diff --git a/_lastrevision b/_lastrevision
index 621dcf9..9eca5e8 100644
--- a/_lastrevision
+++ b/_lastrevision
@@ -1 +1 @@
-1a73678e768b896323b9d2d1f903a400e48e51e1
\ No newline at end of file
+73673e4ab1d13c4393183b8ad6066dfab39c7e63
\ No newline at end of file
diff --git a/_service b/_service
index 69d0f78..b060a52 100644
--- a/_service
+++ b/_service
@@ -3,7 +3,7 @@
https://github.com/openSUSE/salt-packaging.git
salt
package
- 3000.3
+ 3002.2
git
@@ -12,8 +12,8 @@
codeload.github.com
- openSUSE/salt/tar.gz/v3000.3-suse
- v3000.3.tar.gz
+ openSUSE/salt/tar.gz/v3002.2-suse
+ v3002.2.tar.gz
diff --git a/accumulated-changes-from-yomi-167.patch b/accumulated-changes-from-yomi-167.patch
index e8de9cb..46b7f0c 100644
--- a/accumulated-changes-from-yomi-167.patch
+++ b/accumulated-changes-from-yomi-167.patch
@@ -1,4 +1,4 @@
-From 951d2a385a40c5322155f952e08430e8402bfbde Mon Sep 17 00:00:00 2001
+From 828650500159fd7040d2fa76b2fc4d2b627f7065 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 22 Oct 2019 11:02:33 +0200
Subject: [PATCH] Accumulated changes from Yomi (#167)
@@ -17,190 +17,207 @@ This patch ignore this kind of issue during the grains creation.
(cherry picked from commit b865491b74679140f7a71c5ba50d482db47b600f)
---
- salt/grains/core.py | 4 +++
- salt/modules/zypperpkg.py | 30 +++++++++++-----
- tests/unit/grains/test_core.py | 68 ++++++++++++++++++++++++++++++++++++
- tests/unit/modules/test_zypperpkg.py | 26 ++++++++++++++
- 4 files changed, 119 insertions(+), 9 deletions(-)
+ salt/grains/core.py | 6 +--
+ salt/modules/zypperpkg.py | 22 ----------
+ tests/unit/grains/test_core.py | 64 +++++++++++++++++++++++++++-
+ tests/unit/modules/test_zypperpkg.py | 38 +++++++++++++++++
+ 4 files changed, 103 insertions(+), 27 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 77ae99590f..68c43482d3 100644
+index 0dc1d97f97..a2983e388b 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -997,6 +997,10 @@ def _virtual(osdata):
- grains['virtual'] = 'gce'
- elif 'BHYVE' in output:
- grains['virtual'] = 'bhyve'
-+ except UnicodeDecodeError:
-+ # Some firmwares provide non-valid 'product_name'
-+ # files, ignore them
+@@ -1046,7 +1046,7 @@ def _virtual(osdata):
+ if os.path.isfile("/sys/devices/virtual/dmi/id/product_name"):
+ try:
+ with salt.utils.files.fopen(
+- "/sys/devices/virtual/dmi/id/product_name", "rb"
++ "/sys/devices/virtual/dmi/id/product_name", "r"
+ ) as fhr:
+ output = salt.utils.stringutils.to_unicode(
+ fhr.read(), errors="replace"
+@@ -1066,9 +1066,7 @@ def _virtual(osdata):
+ except UnicodeDecodeError:
+ # Some firmwares provide non-valid 'product_name'
+ # files, ignore them
+- log.debug(
+- "The content in /sys/devices/virtual/dmi/id/product_name is not valid"
+- )
+ pass
- except IOError:
+ except OSError:
pass
- elif osdata['kernel'] == 'FreeBSD':
+ elif osdata["kernel"] == "FreeBSD":
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index f7158e0810..5f3b6d6855 100644
+index 2daec0f380..b5621174a4 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -863,23 +863,35 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
- _ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version'])
+@@ -958,28 +958,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
+ }
+ ]
- for include in includes:
-+ if include == 'product':
-+ products = list_products(all=False, root=root)
-+ for product in products:
-+ extended_name = '{}:{}'.format(include, product['name'])
-+ _ret[extended_name] = [{
-+ 'epoch': product['epoch'],
-+ 'version': product['version'],
-+ 'release': product['release'],
-+ 'arch': product['arch'],
-+ 'install_date': None,
-+ 'install_date_time_t': None,
-+ }]
- if include in ('pattern', 'patch'):
- if include == 'pattern':
+- for include in includes:
+- if include in ("pattern", "patch"):
+- if include == "pattern":
- pkgs = list_installed_patterns(root=root)
-+ elements = list_installed_patterns(root=root)
- elif include == 'patch':
+- elif include == "patch":
- pkgs = list_installed_patches(root=root)
-+ elements = list_installed_patches(root=root)
- else:
+- else:
- pkgs = []
- for pkg in pkgs:
-- pkg_extended_name = '{}:{}'.format(include, pkg)
-- info = info_available(pkg_extended_name,
-+ elements = []
-+ for element in elements:
-+ extended_name = '{}:{}'.format(include, element)
-+ info = info_available(extended_name,
- refresh=False,
- root=root)
-- _ret[pkg_extended_name] = [{
-+ _ret[extended_name] = [{
- 'epoch': None,
-- 'version': info[pkg]['version'],
-+ 'version': info[element]['version'],
- 'release': None,
-- 'arch': info[pkg]['arch'],
-+ 'arch': info[element]['arch'],
- 'install_date': None,
- 'install_date_time_t': None,
- }]
+- pkg_extended_name = "{}:{}".format(include, pkg)
+- info = info_available(pkg_extended_name, refresh=False, root=root)
+- _ret[pkg_extended_name] = [
+- {
+- "epoch": None,
+- "version": info[pkg]["version"],
+- "release": None,
+- "arch": info[pkg]["arch"],
+- "install_date": None,
+- "install_date_time_t": None,
+- }
+- ]
+-
+ __context__[contextkey] = _ret
+
+ return __salt__["pkg_resource.format_pkg_list"](
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
-index e722bfab5b..33d6a9507f 100644
+index a5ceeb8317..0dc3423646 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
-@@ -1559,3 +1559,71 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
- assert len(info) == 2
- assert all([x is not None for x in info])
- assert all([isinstance(x, int) for x in info])
-+
-+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
+@@ -2047,13 +2047,74 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ result = core.path()
+ assert result == {"path": path, "systempath": comps}, result
+
++ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
++ @patch("os.path.exists")
++ @patch("salt.utils.platform.is_proxy")
+ def test_kernelparams_return(self):
+ expectations = [
-+ ('BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64',
-+ {'kernelparams': [('BOOT_IMAGE', '/vmlinuz-3.10.0-693.2.2.el7.x86_64')]}),
-+ ('root=/dev/mapper/centos_daemon-root',
-+ {'kernelparams': [('root', '/dev/mapper/centos_daemon-root')]}),
-+ ('rhgb quiet ro',
-+ {'kernelparams': [('rhgb', None), ('quiet', None), ('ro', None)]}),
-+ ('param="value1"',
-+ {'kernelparams': [('param', 'value1')]}),
-+ ('param="value1 value2 value3"',
-+ {'kernelparams': [('param', 'value1 value2 value3')]}),
-+ ('param="value1 value2 value3" LANG="pl" ro',
-+ {'kernelparams': [('param', 'value1 value2 value3'), ('LANG', 'pl'), ('ro', None)]}),
-+ ('ipv6.disable=1',
-+ {'kernelparams': [('ipv6.disable', '1')]}),
-+ ('param="value1:value2:value3"',
-+ {'kernelparams': [('param', 'value1:value2:value3')]}),
-+ ('param="value1,value2,value3"',
-+ {'kernelparams': [('param', 'value1,value2,value3')]}),
-+ ('param="value1" param="value2" param="value3"',
-+ {'kernelparams': [('param', 'value1'), ('param', 'value2'), ('param', 'value3')]}),
++ (
++ "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64",
++ {
++ "kernelparams": [
++ ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64")
++ ]
++ },
++ ),
++ (
++ "root=/dev/mapper/centos_daemon-root",
++ {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]},
++ ),
++ (
++ "rhgb quiet ro",
++ {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]},
++ ),
++ ('param="value1"', {"kernelparams": [("param", "value1")]}),
++ (
++ 'param="value1 value2 value3"',
++ {"kernelparams": [("param", "value1 value2 value3")]},
++ ),
++ (
++ 'param="value1 value2 value3" LANG="pl" ro',
++ {
++ "kernelparams": [
++ ("param", "value1 value2 value3"),
++ ("LANG", "pl"),
++ ("ro", None),
++ ]
++ },
++ ),
++ ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}),
++ (
++ 'param="value1:value2:value3"',
++ {"kernelparams": [("param", "value1:value2:value3")]},
++ ),
++ (
++ 'param="value1,value2,value3"',
++ {"kernelparams": [("param", "value1,value2,value3")]},
++ ),
++ (
++ 'param="value1" param="value2" param="value3"',
++ {
++ "kernelparams": [
++ ("param", "value1"),
++ ("param", "value2"),
++ ("param", "value3"),
++ ]
++ },
++ ),
+ ]
+
+ for cmdline, expectation in expectations:
-+ with patch('salt.utils.files.fopen', mock_open(read_data=cmdline)):
++ with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)):
+ self.assertEqual(core.kernelparams(), expectation)
+
-+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
-+ @patch('os.path.exists')
-+ @patch('salt.utils.platform.is_proxy')
-+ def test__hw_data_linux_empty(self, is_proxy, exists):
-+ is_proxy.return_value = False
-+ exists.return_value = True
-+ with patch('salt.utils.files.fopen', mock_open(read_data='')):
-+ self.assertEqual(core._hw_data({'kernel': 'Linux'}), {
-+ 'biosreleasedate': '',
-+ 'biosversion': '',
-+ 'manufacturer': '',
-+ 'productname': '',
-+ 'serialnumber': '',
-+ 'uuid': ''
-+ })
-+
-+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
-+ @skipIf(six.PY2, 'UnicodeDecodeError is throw in Python 3')
-+ @patch('os.path.exists')
-+ @patch('salt.utils.platform.is_proxy')
-+ def test__hw_data_linux_unicode_error(self, is_proxy, exists):
-+ def _fopen(*args):
-+ class _File(object):
-+ def __enter__(self):
-+ return self
-+
-+ def __exit__(self, *args):
-+ pass
-+
-+ def read(self):
-+ raise UnicodeDecodeError('enconding', b'', 1, 2, 'reason')
-+
-+ return _File()
-+
-+ is_proxy.return_value = False
-+ exists.return_value = True
-+ with patch('salt.utils.files.fopen', _fopen):
-+ self.assertEqual(core._hw_data({'kernel': 'Linux'}), {})
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ @patch("os.path.exists")
+ @patch("salt.utils.platform.is_proxy")
+ def test__hw_data_linux_empty(self, is_proxy, exists):
+ is_proxy.return_value = False
+ exists.return_value = True
+- with patch("salt.utils.files.fopen", mock_open(read_data=b"")):
++ with patch("salt.utils.files.fopen", mock_open(read_data="")):
+ self.assertEqual(
+ core._hw_data({"kernel": "Linux"}),
+ {
+@@ -2067,6 +2128,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ )
+
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
++ @skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3")
+ @patch("os.path.exists")
+ @patch("salt.utils.platform.is_proxy")
+ def test__hw_data_linux_unicode_error(self, is_proxy, exists):
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 6102043384..76937cc358 100644
+index 5d4e7766b6..1b62122e0e 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -944,6 +944,32 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- with self.assertRaisesRegex(CommandExecutionError, '^Advisory id "SUSE-PATCH-XXX" not found$'):
- zypper.install(advisory_ids=['SUSE-PATCH-XXX'])
+@@ -1424,6 +1424,44 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}}
+ )
-+ @patch('salt.modules.zypperpkg._systemd_scope',
-+ MagicMock(return_value=False))
-+ @patch('salt.modules.zypperpkg.list_products',
-+ MagicMock(return_value={'openSUSE': {'installed': False, 'summary': 'test'}}))
-+ @patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"product:openSUSE": "15.2"},
-+ {"product:openSUSE": "15.3"}]))
++ @patch("salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False))
++ @patch(
++ "salt.modules.zypperpkg.list_products",
++ MagicMock(return_value={"openSUSE": {"installed": False, "summary": "test"}}),
++ )
++ @patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(
++ side_effect=[{"product:openSUSE": "15.2"}, {"product:openSUSE": "15.3"}]
++ ),
++ )
+ def test_install_product_ok(self):
-+ '''
++ """
+ Test successfully product installation.
-+ '''
-+ with patch.dict(zypper.__salt__,
-+ {
-+ 'pkg_resource.parse_targets': MagicMock(
-+ return_value=(['product:openSUSE'], None))
-+ }):
-+ with patch('salt.modules.zypperpkg.__zypper__.noraise.call', MagicMock()) as zypper_mock:
-+ ret = zypper.install('product:openSUSE', includes=['product'])
-+ zypper_mock.assert_called_once_with(
-+ '--no-refresh',
-+ 'install',
-+ '--auto-agree-with-licenses',
-+ '--name',
-+ 'product:openSUSE'
++ """
++ with patch.dict(
++ zypper.__salt__,
++ {
++ "pkg_resource.parse_targets": MagicMock(
++ return_value=(["product:openSUSE"], None)
++ )
++ },
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ ret = zypper.install("product:openSUSE", includes=["product"])
++ zypper_mock.assert_called_once_with(
++ "--no-refresh",
++ "install",
++ "--auto-agree-with-licenses",
++ "--name",
++ "product:openSUSE",
++ )
++ self.assertDictEqual(
++ ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}}
+ )
-+ self.assertDictEqual(ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}})
+
def test_remove_purge(self):
- '''
+ """
Test package removal
--
-2.16.4
+2.29.2
diff --git a/accumulated-changes-required-for-yomi-165.patch b/accumulated-changes-required-for-yomi-165.patch
index 000ee98..29409fa 100644
--- a/accumulated-changes-required-for-yomi-165.patch
+++ b/accumulated-changes-required-for-yomi-165.patch
@@ -1,4 +1,4 @@
-From 9f29577b75cac1e79ec7c30a5dff0dff0ab9da3a Mon Sep 17 00:00:00 2001
+From 7d35fdba84b6e1b62a3abc71e518366a35efb662 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 30 Jul 2019 11:23:12 +0200
Subject: [PATCH] Accumulated changes required for Yomi (#165)
@@ -58,143 +58,60 @@ so the cached data will be separated too.
(cherry picked from commit 9c54bb3e8c93ba21fc583bdefbcadbe53cbcd7b5)
---
- salt/modules/cmdmod.py | 12 +++++++++---
- salt/modules/zypperpkg.py | 13 ++++++++++---
- tests/unit/modules/test_cmdmod.py | 16 ++++++++++++++++
- tests/unit/modules/test_zypperpkg.py | 21 +++++++++++++++++++++
- 4 files changed, 56 insertions(+), 6 deletions(-)
+ salt/modules/zypperpkg.py | 1 -
+ tests/unit/modules/test_zypperpkg.py | 22 +++++++++++++++++++++-
+ 2 files changed, 21 insertions(+), 2 deletions(-)
-diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py
-index eed7656a6d..0d2f720bbb 100644
---- a/salt/modules/cmdmod.py
-+++ b/salt/modules/cmdmod.py
-@@ -3094,13 +3094,19 @@ def run_chroot(root,
-
- if isinstance(cmd, (list, tuple)):
- cmd = ' '.join([six.text_type(i) for i in cmd])
-- cmd = 'chroot {0} {1} -c {2}'.format(root, sh_, _cmd_quote(cmd))
-+
-+ # If runas and group are provided, we expect that the user lives
-+ # inside the chroot, not outside.
-+ if runas:
-+ userspec = '--userspec {}:{}'.format(runas, group if group else '')
-+ else:
-+ userspec = ''
-+
-+ cmd = 'chroot {} {} {} -c {}'.format(userspec, root, sh_, _cmd_quote(cmd))
-
- run_func = __context__.pop('cmd.run_chroot.func', run_all)
-
- ret = run_func(cmd,
-- runas=runas,
-- group=group,
- cwd=cwd,
- stdin=stdin,
- shell=shell,
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 3760b525e7..8179cd8c1d 100644
+index c996935bff..b099f3e5d7 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -449,8 +449,14 @@ def _clean_cache():
- '''
- Clean cached results
- '''
-+ keys = []
- for cache_name in ['pkg.list_pkgs', 'pkg.list_provides']:
-- __context__.pop(cache_name, None)
-+ for contextkey in __context__:
-+ if contextkey.startswith(cache_name):
-+ keys.append(contextkey)
-+
-+ for key in keys:
-+ __context__.pop(key, None)
-
-
- def list_upgrades(refresh=True, root=None, **kwargs):
-@@ -811,9 +817,10 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
-
- includes = includes if includes else []
-
-- contextkey = 'pkg.list_pkgs'
-+ # Results can be different if a different root or a different
-+ # inclusion types are passed
-+ contextkey = 'pkg.list_pkgs_{}_{}'.format(root, includes)
+@@ -879,7 +879,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
+ # inclusion types are passed
+ contextkey = "pkg.list_pkgs_{}_{}".format(root, includes)
- # TODO(aplanas): this cached value depends on the parameters
if contextkey not in __context__:
ret = {}
- cmd = ['rpm']
-diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py
-index f8fba59294..8d763435f8 100644
---- a/tests/unit/modules/test_cmdmod.py
-+++ b/tests/unit/modules/test_cmdmod.py
-@@ -371,6 +371,22 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin):
- else:
- raise RuntimeError
-
-+ @skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
-+ @skipIf(salt.utils.platform.is_darwin(), 'Do not run on MacOS')
-+ def test_run_cwd_in_combination_with_runas(self):
-+ '''
-+ cmd.run executes command in the cwd directory
-+ when the runas parameter is specified
-+ '''
-+ cmd = 'pwd'
-+ cwd = '/tmp'
-+ runas = os.getlogin()
-+
-+ with patch.dict(cmdmod.__grains__, {'os': 'Darwin',
-+ 'os_family': 'Solaris'}):
-+ stdout = cmdmod._run(cmd, cwd=cwd, runas=runas).get('stdout')
-+ self.assertEqual(stdout, cwd)
-+
- def test_run_all_binary_replace(self):
- '''
- Test for failed decoding of binary data, for instance when doing
+ cmd = ["rpm"]
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 12c22bfcb2..6102043384 100644
+index 032785395e..5d4e7766b6 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -571,6 +571,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}):
- pkgs = zypper.list_pkgs(versions_as_list=True)
- self.assertFalse(pkgs.get('gpg-pubkey', False))
-+ self.assertTrue('pkg.list_pkgs_None_[]' in zypper.__context__)
- for pkg_name, pkg_version in {
- 'jakarta-commons-discovery': ['0.4-129.686'],
- 'yast2-ftp-server': ['3.1.8-8.1'],
-@@ -613,6 +614,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': zypper.parse_arch_from_name}):
- pkgs = zypper.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t'])
- self.assertFalse(pkgs.get('gpg-pubkey', False))
-+ self.assertTrue('pkg.list_pkgs_None_[]' in zypper.__context__)
- for pkg_name, pkg_attr in {
- 'jakarta-commons-discovery': [{
- 'version': '0.4',
-@@ -1456,3 +1458,22 @@ pattern() = package-c'''),
- 'summary': 'description b',
- },
+@@ -912,7 +912,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ ), patch.dict(
+ zypper.__salt__, {"pkg_resource.stringify": MagicMock()}
+ ), patch.dict(
+- pkg_resource.__salt__, {"pkg.parse_arch": zypper.parse_arch}
++ pkg_resource.__salt__,
++ {"pkg.parse_arch_from_name": zypper.parse_arch_from_name},
+ ):
+ pkgs = zypper.list_pkgs(
+ attr=["epoch", "release", "arch", "install_date_time_t"]
+@@ -1950,3 +1951,22 @@ pattern() = package-c"""
+ "package-a": {"installed": True, "summary": "description a",},
+ "package-b": {"installed": False, "summary": "description b",},
}
+
+ def test__clean_cache_empty(self):
-+ '''Test that an empty cached can be cleaned'''
++ """Test that an empty cached can be cleaned"""
+ context = {}
+ with patch.dict(zypper.__context__, context):
+ zypper._clean_cache()
+ assert context == {}
+
+ def test__clean_cache_filled(self):
-+ '''Test that a filled cached can be cleaned'''
++ """Test that a filled cached can be cleaned"""
+ context = {
-+ 'pkg.list_pkgs_/mnt_[]': None,
-+ 'pkg.list_pkgs_/mnt_[patterns]': None,
-+ 'pkg.list_provides': None,
-+ 'pkg.other_data': None,
++ "pkg.list_pkgs_/mnt_[]": None,
++ "pkg.list_pkgs_/mnt_[patterns]": None,
++ "pkg.list_provides": None,
++ "pkg.other_data": None,
+ }
+ with patch.dict(zypper.__context__, context):
+ zypper._clean_cache()
-+ self.assertEqual(zypper.__context__, {'pkg.other_data': None})
++ self.assertEqual(zypper.__context__, {"pkg.other_data": None})
--
-2.16.4
+2.29.2
diff --git a/activate-all-beacons-sources-config-pillar-grains.patch b/activate-all-beacons-sources-config-pillar-grains.patch
index bce3d30..9667639 100644
--- a/activate-all-beacons-sources-config-pillar-grains.patch
+++ b/activate-all-beacons-sources-config-pillar-grains.patch
@@ -1,26 +1,28 @@
-From 6df4cef549665aad5b9e2af50eb06124a2bb0997 Mon Sep 17 00:00:00 2001
+From c44b897eb1305c6b9c341fc16f729d2293ab24e4 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Tue, 17 Oct 2017 16:52:33 +0200
Subject: [PATCH] Activate all beacons sources: config/pillar/grains
---
- salt/minion.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ salt/minion.py | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/salt/minion.py b/salt/minion.py
-index 6a77d90185..457f485b0a 100644
+index c255f37c26..4da665a130 100644
--- a/salt/minion.py
+++ b/salt/minion.py
-@@ -483,7 +483,7 @@ class MinionBase(object):
+@@ -508,9 +508,7 @@ class MinionBase:
the pillar or grains changed
- '''
- if 'config.merge' in functions:
-- b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
-+ b_conf = functions['config.merge']('beacons', self.opts['beacons'])
+ """
+ if "config.merge" in functions:
+- b_conf = functions["config.merge"](
+- "beacons", self.opts["beacons"], omit_opts=True
+- )
++ b_conf = functions["config.merge"]("beacons", self.opts["beacons"])
if b_conf:
- return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
- return []
+ return self.beacons.process(
+ b_conf, self.opts["grains"]
--
-2.16.4
+2.29.2
diff --git a/add-all_versions-parameter-to-include-all-installed-.patch b/add-all_versions-parameter-to-include-all-installed-.patch
index e7045c0..6212a62 100644
--- a/add-all_versions-parameter-to-include-all-installed-.patch
+++ b/add-all_versions-parameter-to-include-all-installed-.patch
@@ -1,4 +1,4 @@
-From cd66b1e6636013440577a38a5a68729fec2f3f99 Mon Sep 17 00:00:00 2001
+From 2e300c770c227cf394929b7d5d025d5c52f1ae2c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Mon, 14 May 2018 11:33:13 +0100
@@ -19,21 +19,119 @@ Refactor: use dict.setdefault instead if-else statement
Allow removing only specific package versions with zypper and yum
---
- salt/states/pkg.py | 21 +++++++++++++++++++++
- 1 file changed, 21 insertions(+)
+ salt/states/pkg.py | 285 +++++++++++++++++++++++----------------------
+ 1 file changed, 146 insertions(+), 139 deletions(-)
diff --git a/salt/states/pkg.py b/salt/states/pkg.py
-index a13d418400..c0fa2f6b69 100644
+index 51b5a06e8f..a1b2a122bb 100644
--- a/salt/states/pkg.py
+++ b/salt/states/pkg.py
-@@ -450,6 +450,16 @@ def _find_remove_targets(name=None,
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Installation of packages using OS package managers such as yum or apt-get
+ =========================================================================
+@@ -71,21 +70,16 @@ state module
+ used. This will be addressed in a future release of Salt.
+ """
- if __grains__['os'] == 'FreeBSD' and origin:
- cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname]
-+ elif __grains__['os_family'] == 'Suse':
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import fnmatch
+ import logging
+ import os
+ import re
+
+-# Import Salt libs
+ import salt.utils.pkg
+ import salt.utils.platform
+ import salt.utils.versions
+ from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
+-
+-# Import 3rd-party libs
+ from salt.ext import six
+ from salt.modules.pkg_resource import _repack_pkgs
+ from salt.output import nested
+@@ -323,7 +317,7 @@ def _find_download_targets(
+ "name": name,
+ "changes": {},
+ "result": True,
+- "comment": "Version {0} of package '{1}' is already "
++ "comment": "Version {} of package '{}' is already "
+ "downloaded".format(version, name),
+ }
+
+@@ -334,7 +328,7 @@ def _find_download_targets(
+ "name": name,
+ "changes": {},
+ "result": True,
+- "comment": "Package {0} is already " "downloaded".format(name),
++ "comment": "Package {} is already " "downloaded".format(name),
+ }
+
+ version_spec = False
+@@ -349,13 +343,13 @@ def _find_download_targets(
+ comments.append(
+ "The following package(s) were not found, and no "
+ "possible matches were found in the package db: "
+- "{0}".format(", ".join(sorted(problems["no_suggest"])))
++ "{}".format(", ".join(sorted(problems["no_suggest"])))
+ )
+ if problems.get("suggest"):
+- for pkgname, suggestions in six.iteritems(problems["suggest"]):
++ for pkgname, suggestions in problems["suggest"].items():
+ comments.append(
+- "Package '{0}' not found (possible matches: "
+- "{1})".format(pkgname, ", ".join(suggestions))
++ "Package '{}' not found (possible matches: "
++ "{})".format(pkgname, ", ".join(suggestions))
+ )
+ if comments:
+ if len(comments) > 1:
+@@ -371,7 +365,7 @@ def _find_download_targets(
+ # Check current downloaded versions against specified versions
+ targets = {}
+ problems = []
+- for pkgname, pkgver in six.iteritems(to_download):
++ for pkgname, pkgver in to_download.items():
+ cver = cur_pkgs.get(pkgname, {})
+ # Package not yet downloaded, so add to targets
+ if not cver:
+@@ -401,7 +395,7 @@ def _find_download_targets(
+
+ if not targets:
+ # All specified packages are already downloaded
+- msg = "All specified packages{0} are already downloaded".format(
++ msg = "All specified packages{} are already downloaded".format(
+ " (matching specified versions)" if version_spec else ""
+ )
+ return {"name": name, "changes": {}, "result": True, "comment": msg}
+@@ -425,7 +419,7 @@ def _find_advisory_targets(name=None, advisory_ids=None, **kwargs):
+ "name": name,
+ "changes": {},
+ "result": True,
+- "comment": "Advisory patch {0} is already " "installed".format(name),
++ "comment": "Advisory patch {} is already " "installed".format(name),
+ }
+
+ # Find out which advisory patches will be targeted in the call to pkg.install
+@@ -477,12 +471,22 @@ def _find_remove_targets(
+ # Check current versions against specified versions
+ targets = []
+ problems = []
+- for pkgname, pkgver in six.iteritems(to_remove):
++ for pkgname, pkgver in to_remove.items():
+ # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
+ origin = bool(re.search("/", pkgname))
+
+ if __grains__["os"] == "FreeBSD" and origin:
+- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == pkgname]
++ cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname]
++ elif __grains__["os_family"] == "Suse":
+ # On SUSE systems. Zypper returns packages without "arch" in name
+ try:
-+ namepart, archpart = pkgname.rsplit('.', 1)
++ namepart, archpart = pkgname.rsplit(".", 1)
+ except ValueError:
+ cver = cur_pkgs.get(pkgname, [])
+ else:
@@ -43,14 +141,162 @@ index a13d418400..c0fa2f6b69 100644
else:
cver = cur_pkgs.get(pkgname, [])
-@@ -856,6 +866,17 @@ def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None):
- cver = new_pkgs.get(pkgname.split('%')[0])
- elif __grains__['os_family'] == 'Debian':
- cver = new_pkgs.get(pkgname.split('=')[0])
-+ elif __grains__['os_family'] == 'Suse':
+@@ -518,7 +522,7 @@ def _find_remove_targets(
+
+ if not targets:
+ # All specified packages are already absent
+- msg = "All specified packages{0} are already absent".format(
++ msg = "All specified packages{} are already absent".format(
+ " (matching specified versions)" if version_spec else ""
+ )
+ return {"name": name, "changes": {}, "result": True, "comment": msg}
+@@ -619,7 +623,7 @@ def _find_install_targets(
+ "name": name,
+ "changes": {},
+ "result": False,
+- "comment": "Invalidly formatted '{0}' parameter. See "
++ "comment": "Invalidly formatted '{}' parameter. See "
+ "minion log.".format("pkgs" if pkgs else "sources"),
+ }
+
+@@ -634,7 +638,7 @@ def _find_install_targets(
+ "name": name,
+ "changes": {},
+ "result": False,
+- "comment": "Package {0} not found in the "
++ "comment": "Package {} not found in the "
+ "repository.".format(name),
+ }
+ if version is None:
+@@ -656,7 +660,7 @@ def _find_install_targets(
+ origin = bool(re.search("/", name))
+
+ if __grains__["os"] == "FreeBSD" and origin:
+- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == name]
++ cver = [k for k, v in cur_pkgs.items() if v["origin"] == name]
+ else:
+ cver = cur_pkgs.get(name, [])
+
+@@ -667,7 +671,7 @@ def _find_install_targets(
+ "name": name,
+ "changes": {},
+ "result": True,
+- "comment": "Version {0} of package '{1}' is already "
++ "comment": "Version {} of package '{}' is already "
+ "installed".format(version, name),
+ }
+
+@@ -678,7 +682,7 @@ def _find_install_targets(
+ "name": name,
+ "changes": {},
+ "result": True,
+- "comment": "Package {0} is already " "installed".format(name),
++ "comment": "Package {} is already " "installed".format(name),
+ }
+
+ version_spec = False
+@@ -687,21 +691,19 @@ def _find_install_targets(
+ # enforced. Takes extra time. Disable for improved performance
+ if not skip_suggestions:
+ # Perform platform-specific pre-flight checks
+- not_installed = dict(
+- [
+- (name, version)
+- for name, version in desired.items()
+- if not (
+- name in cur_pkgs
+- and (
+- version is None
+- or _fulfills_version_string(
+- cur_pkgs[name], version, ignore_epoch=ignore_epoch
+- )
++ not_installed = {
++ name: version
++ for name, version in desired.items()
++ if not (
++ name in cur_pkgs
++ and (
++ version is None
++ or _fulfills_version_string(
++ cur_pkgs[name], version, ignore_epoch=ignore_epoch
+ )
+ )
+- ]
+- )
++ )
++ }
+ if not_installed:
+ try:
+ problems = _preflight_check(not_installed, **kwargs)
+@@ -713,13 +715,13 @@ def _find_install_targets(
+ comments.append(
+ "The following package(s) were not found, and no "
+ "possible matches were found in the package db: "
+- "{0}".format(", ".join(sorted(problems["no_suggest"])))
++ "{}".format(", ".join(sorted(problems["no_suggest"])))
+ )
+ if problems.get("suggest"):
+- for pkgname, suggestions in six.iteritems(problems["suggest"]):
++ for pkgname, suggestions in problems["suggest"].items():
+ comments.append(
+- "Package '{0}' not found (possible matches: "
+- "{1})".format(pkgname, ", ".join(suggestions))
++ "Package '{}' not found (possible matches: "
++ "{})".format(pkgname, ", ".join(suggestions))
+ )
+ if comments:
+ if len(comments) > 1:
+@@ -733,9 +735,7 @@ def _find_install_targets(
+
+ # Resolve the latest package version for any packages with "latest" in the
+ # package version
+- wants_latest = (
+- [] if sources else [x for x, y in six.iteritems(desired) if y == "latest"]
+- )
++ wants_latest = [] if sources else [x for x, y in desired.items() if y == "latest"]
+ if wants_latest:
+ resolved_latest = __salt__["pkg.latest_version"](
+ *wants_latest, refresh=refresh, **kwargs
+@@ -766,7 +766,7 @@ def _find_install_targets(
+ problems = []
+ warnings = []
+ failed_verify = False
+- for package_name, version_string in six.iteritems(desired):
++ for package_name, version_string in desired.items():
+ cver = cur_pkgs.get(package_name, [])
+ if resolve_capabilities and not cver and package_name in cur_prov:
+ cver = cur_pkgs.get(cur_prov.get(package_name)[0], [])
+@@ -795,12 +795,12 @@ def _find_install_targets(
+ problems.append(err.format(version_string, "file not found"))
+ continue
+ elif not os.path.exists(cached_path):
+- problems.append("{0} does not exist on minion".format(version_string))
++ problems.append("{} does not exist on minion".format(version_string))
+ continue
+ source_info = __salt__["lowpkg.bin_pkg_info"](cached_path)
+ if source_info is None:
+ warnings.append(
+- "Failed to parse metadata for {0}".format(version_string)
++ "Failed to parse metadata for {}".format(version_string)
+ )
+ continue
+ else:
+@@ -923,13 +923,24 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None):
+ has_origin = "/" in pkgname
+
+ if __grains__["os"] == "FreeBSD" and has_origin:
+- cver = [k for k, v in six.iteritems(new_pkgs) if v["origin"] == pkgname]
++ cver = [k for k, v in new_pkgs.items() if v["origin"] == pkgname]
+ elif __grains__["os"] == "MacOS" and has_origin:
+ cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split("/")[-1]))
+ elif __grains__["os"] == "OpenBSD":
+ cver = new_pkgs.get(pkgname.split("%")[0])
+ elif __grains__["os_family"] == "Debian":
+ cver = new_pkgs.get(pkgname.split("=")[0])
++ elif __grains__["os_family"] == "Suse":
+ # On SUSE systems. Zypper returns packages without "arch" in name
+ try:
-+ namepart, archpart = pkgname.rsplit('.', 1)
++ namepart, archpart = pkgname.rsplit(".", 1)
+ except ValueError:
+ cver = new_pkgs.get(pkgname)
+ else:
@@ -61,7 +307,653 @@ index a13d418400..c0fa2f6b69 100644
else:
cver = new_pkgs.get(pkgname)
if not cver and pkgname in new_caps:
+@@ -964,7 +975,7 @@ def _get_desired_pkg(name, desired):
+ oper = ""
+ else:
+ oper = "="
+- return "{0}{1}{2}".format(name, oper, "" if not desired[name] else desired[name])
++ return "{}{}{}".format(name, oper, "" if not desired[name] else desired[name])
+
+
+ def _preflight_check(desired, fromrepo, **kwargs):
+@@ -1709,8 +1720,8 @@ def installed(
+ "comment": "pkg.verify not implemented",
+ }
+
+- if not isinstance(version, six.string_types) and version is not None:
+- version = six.text_type(version)
++ if not isinstance(version, str) and version is not None:
++ version = str(version)
+
+ kwargs["allow_updates"] = allow_updates
+
+@@ -1754,7 +1765,7 @@ def installed(
+ "name": name,
+ "changes": {},
+ "result": False,
+- "comment": six.text_type(exc),
++ "comment": str(exc),
+ }
+
+ if "result" in hold_ret and not hold_ret["result"]:
+@@ -1763,7 +1774,7 @@ def installed(
+ "changes": {},
+ "result": False,
+ "comment": "An error was encountered while "
+- "holding/unholding package(s): {0}".format(hold_ret["comment"]),
++ "holding/unholding package(s): {}".format(hold_ret["comment"]),
+ }
+ else:
+ modified_hold = [
+@@ -1779,16 +1790,16 @@ def installed(
+ ]
+
+ for i in modified_hold:
+- result["comment"] += ".\n{0}".format(i["comment"])
++ result["comment"] += ".\n{}".format(i["comment"])
+ result["result"] = i["result"]
+ result["changes"][i["name"]] = i["changes"]
+
+ for i in not_modified_hold:
+- result["comment"] += ".\n{0}".format(i["comment"])
++ result["comment"] += ".\n{}".format(i["comment"])
+ result["result"] = i["result"]
+
+ for i in failed_hold:
+- result["comment"] += ".\n{0}".format(i["comment"])
++ result["comment"] += ".\n{}".format(i["comment"])
+ result["result"] = i["result"]
+ return result
+
+@@ -1805,8 +1816,8 @@ def installed(
+
+ # Remove any targets not returned by _find_install_targets
+ if pkgs:
+- pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)]
+- pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)])
++ pkgs = [dict([(x, y)]) for x, y in targets.items()]
++ pkgs.extend([dict([(x, y)]) for x, y in to_reinstall.items()])
+ elif sources:
+ oldsources = sources
+ sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets]
+@@ -1823,12 +1834,12 @@ def installed(
+ summary = ", ".join([_get_desired_pkg(x, targets) for x in targets])
+ comment.append(
+ "The following packages would be "
+- "installed/updated: {0}".format(summary)
++ "installed/updated: {}".format(summary)
+ )
+ if to_unpurge:
+ comment.append(
+ "The following packages would have their selection status "
+- "changed from 'purge' to 'install': {0}".format(", ".join(to_unpurge))
++ "changed from 'purge' to 'install': {}".format(", ".join(to_unpurge))
+ )
+ if to_reinstall:
+ # Add a comment for each package in to_reinstall with its
+@@ -1852,7 +1863,7 @@ def installed(
+ else:
+ pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall)
+ comment.append(
+- "Package '{0}' would be reinstalled because the "
++ "Package '{}' would be reinstalled because the "
+ "following files have been altered:".format(pkgstr)
+ )
+ comment.append(_nested_output(altered_files[reinstall_pkg]))
+@@ -1896,7 +1907,7 @@ def installed(
+ ret["changes"] = {}
+ ret["comment"] = (
+ "An error was encountered while installing "
+- "package(s): {0}".format(exc)
++ "package(s): {}".format(exc)
+ )
+ if warnings:
+ ret.setdefault("warnings", []).extend(warnings)
+@@ -1907,7 +1918,7 @@ def installed(
+
+ if isinstance(pkg_ret, dict):
+ changes["installed"].update(pkg_ret)
+- elif isinstance(pkg_ret, six.string_types):
++ elif isinstance(pkg_ret, str):
+ comment.append(pkg_ret)
+ # Code below will be looking for a dictionary. If this is a string
+ # it means that there was an exception raised and that no packages
+@@ -1921,7 +1932,7 @@ def installed(
+ action = "pkg.hold" if kwargs["hold"] else "pkg.unhold"
+ hold_ret = __salt__[action](name=name, pkgs=desired)
+ except (CommandExecutionError, SaltInvocationError) as exc:
+- comment.append(six.text_type(exc))
++ comment.append(str(exc))
+ ret = {
+ "name": name,
+ "changes": changes,
+@@ -1938,7 +1949,7 @@ def installed(
+ "changes": {},
+ "result": False,
+ "comment": "An error was encountered while "
+- "holding/unholding package(s): {0}".format(hold_ret["comment"]),
++ "holding/unholding package(s): {}".format(hold_ret["comment"]),
+ }
+ if warnings:
+ ret.setdefault("warnings", []).extend(warnings)
+@@ -1996,11 +2007,11 @@ def installed(
+ summary = ", ".join([_get_desired_pkg(x, desired) for x in modified])
+ if len(summary) < 20:
+ comment.append(
+- "The following packages were installed/updated: " "{0}".format(summary)
++ "The following packages were installed/updated: " "{}".format(summary)
+ )
+ else:
+ comment.append(
+- "{0} targeted package{1} {2} installed/updated.".format(
++ "{} targeted package{} {} installed/updated.".format(
+ len(modified),
+ "s" if len(modified) > 1 else "",
+ "were" if len(modified) > 1 else "was",
+@@ -2014,14 +2025,14 @@ def installed(
+ comment.append(i["comment"])
+ if len(changes[change_name]["new"]) > 0:
+ changes[change_name]["new"] += "\n"
+- changes[change_name]["new"] += "{0}".format(i["changes"]["new"])
++ changes[change_name]["new"] += "{}".format(i["changes"]["new"])
+ if len(changes[change_name]["old"]) > 0:
+ changes[change_name]["old"] += "\n"
+- changes[change_name]["old"] += "{0}".format(i["changes"]["old"])
++ changes[change_name]["old"] += "{}".format(i["changes"]["old"])
+ else:
+ comment.append(i["comment"])
+ changes[change_name] = {}
+- changes[change_name]["new"] = "{0}".format(i["changes"]["new"])
++ changes[change_name]["new"] = "{}".format(i["changes"]["new"])
+
+ # Any requested packages that were not targeted for install or reinstall
+ if not_modified:
+@@ -2031,11 +2042,11 @@ def installed(
+ summary = ", ".join([_get_desired_pkg(x, desired) for x in not_modified])
+ if len(not_modified) <= 20:
+ comment.append(
+- "The following packages were already installed: " "{0}".format(summary)
++ "The following packages were already installed: " "{}".format(summary)
+ )
+ else:
+ comment.append(
+- "{0} targeted package{1} {2} already installed".format(
++ "{} targeted package{} {} already installed".format(
+ len(not_modified),
+ "s" if len(not_modified) > 1 else "",
+ "were" if len(not_modified) > 1 else "was",
+@@ -2054,7 +2065,7 @@ def installed(
+ else:
+ summary = ", ".join([_get_desired_pkg(x, desired) for x in failed])
+ comment.insert(
+- 0, "The following packages failed to " "install/update: {0}".format(summary)
++ 0, "The following packages failed to " "install/update: {}".format(summary)
+ )
+ result = False
+
+@@ -2118,7 +2129,7 @@ def installed(
+ pkgstr = modified_pkg
+ else:
+ pkgstr = _get_desired_pkg(modified_pkg, desired)
+- msg = "Package {0} was reinstalled.".format(pkgstr)
++ msg = "Package {} was reinstalled.".format(pkgstr)
+ if modified_pkg in altered_files:
+ msg += " The following files were remediated:"
+ comment.append(msg)
+@@ -2133,7 +2144,7 @@ def installed(
+ pkgstr = failed_pkg
+ else:
+ pkgstr = _get_desired_pkg(failed_pkg, desired)
+- msg = "Reinstall was not successful for package {0}.".format(pkgstr)
++ msg = "Reinstall was not successful for package {}.".format(pkgstr)
+ if failed_pkg in altered_files:
+ msg += " The following files could not be remediated:"
+ comment.append(msg)
+@@ -2274,12 +2285,12 @@ def downloaded(
+ ret["result"] = False
+ ret[
+ "comment"
+- ] = "An error was encountered while checking targets: " "{0}".format(targets)
++ ] = "An error was encountered while checking targets: " "{}".format(targets)
+ return ret
+
+ if __opts__["test"]:
+ summary = ", ".join(targets)
+- ret["comment"] = "The following packages would be " "downloaded: {0}".format(
++ ret["comment"] = "The following packages would be " "downloaded: {}".format(
+ summary
+ )
+ return ret
+@@ -2306,7 +2317,7 @@ def downloaded(
+ ret["changes"] = {}
+ ret["comment"] = (
+ "An error was encountered while downloading "
+- "package(s): {0}".format(exc)
++ "package(s): {}".format(exc)
+ )
+ return ret
+
+@@ -2316,13 +2327,13 @@ def downloaded(
+ if failed:
+ summary = ", ".join([_get_desired_pkg(x, targets) for x in failed])
+ ret["result"] = False
+- ret["comment"] = "The following packages failed to " "download: {0}".format(
++ ret["comment"] = "The following packages failed to " "download: {}".format(
+ summary
+ )
+
+ if not ret["changes"] and not ret["comment"]:
+ ret["result"] = True
+- ret["comment"] = "Packages downloaded: " "{0}".format(", ".join(targets))
++ ret["comment"] = "Packages downloaded: " "{}".format(", ".join(targets))
+
+ return ret
+
+@@ -2382,14 +2393,14 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
+ ret["result"] = False
+ ret[
+ "comment"
+- ] = "An error was encountered while checking targets: " "{0}".format(targets)
++ ] = "An error was encountered while checking targets: " "{}".format(targets)
+ return ret
+
+ if __opts__["test"]:
+ summary = ", ".join(targets)
+ ret[
+ "comment"
+- ] = "The following advisory patches would be " "downloaded: {0}".format(summary)
++ ] = "The following advisory patches would be " "downloaded: {}".format(summary)
+ return ret
+
+ try:
+@@ -2408,7 +2419,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
+ ret["changes"] = {}
+ ret["comment"] = (
+ "An error was encountered while downloading "
+- "package(s): {0}".format(exc)
++ "package(s): {}".format(exc)
+ )
+ return ret
+
+@@ -2417,7 +2428,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
+ ret["result"] = True
+ ret["comment"] = (
+ "Advisory patch is not needed or related packages "
+- "are already {0}".format(status)
++ "are already {}".format(status)
+ )
+
+ return ret
+@@ -2674,7 +2685,7 @@ def latest(
+ "changes": {},
+ "result": False,
+ "comment": "An error was encountered while checking the "
+- "newest available version of package(s): {0}".format(exc),
++ "newest available version of package(s): {}".format(exc),
+ }
+
+ try:
+@@ -2683,9 +2694,9 @@ def latest(
+ return {"name": name, "changes": {}, "result": False, "comment": exc.strerror}
+
+ # Repack the cur/avail data if only a single package is being checked
+- if isinstance(cur, six.string_types):
++ if isinstance(cur, str):
+ cur = {desired_pkgs[0]: cur}
+- if isinstance(avail, six.string_types):
++ if isinstance(avail, str):
+ avail = {desired_pkgs[0]: avail}
+
+ targets = {}
+@@ -2695,7 +2706,7 @@ def latest(
+ # Package either a) is up-to-date, or b) does not exist
+ if not cur.get(pkg):
+ # Package does not exist
+- msg = "No information found for '{0}'.".format(pkg)
++ msg = "No information found for '{}'.".format(pkg)
+ log.error(msg)
+ problems.append(msg)
+ elif (
+@@ -2741,12 +2752,12 @@ def latest(
+ comments.append(
+ "The following packages are already up-to-date: "
+ + ", ".join(
+- ["{0} ({1})".format(x, cur[x]) for x in sorted(up_to_date)]
++ ["{} ({})".format(x, cur[x]) for x in sorted(up_to_date)]
+ )
+ )
+ else:
+ comments.append(
+- "{0} packages are already up-to-date".format(up_to_date_count)
++ "{} packages are already up-to-date".format(up_to_date_count)
+ )
+
+ return {
+@@ -2784,7 +2795,7 @@ def latest(
+ "changes": {},
+ "result": False,
+ "comment": "An error was encountered while installing "
+- "package(s): {0}".format(exc),
++ "package(s): {}".format(exc),
+ }
+
+ if changes:
+@@ -2800,7 +2811,7 @@ def latest(
+
+ comments = []
+ if failed:
+- msg = "The following packages failed to update: " "{0}".format(
++ msg = "The following packages failed to update: " "{}".format(
+ ", ".join(sorted(failed))
+ )
+ comments.append(msg)
+@@ -2808,19 +2819,17 @@ def latest(
+ msg = (
+ "The following packages were successfully "
+ "installed/upgraded: "
+- "{0}".format(", ".join(sorted(successful)))
++ "{}".format(", ".join(sorted(successful)))
+ )
+ comments.append(msg)
+ if up_to_date:
+ if len(up_to_date) <= 10:
+ msg = (
+ "The following packages were already up-to-date: "
+- "{0}".format(", ".join(sorted(up_to_date)))
++ "{}".format(", ".join(sorted(up_to_date)))
+ )
+ else:
+- msg = "{0} packages were already up-to-date ".format(
+- len(up_to_date)
+- )
++ msg = "{} packages were already up-to-date ".format(len(up_to_date))
+ comments.append(msg)
+
+ return {
+@@ -2832,18 +2841,18 @@ def latest(
+ else:
+ if len(targets) > 10:
+ comment = (
+- "{0} targeted packages failed to update. "
++ "{} targeted packages failed to update. "
+ "See debug log for details.".format(len(targets))
+ )
+ elif len(targets) > 1:
+ comment = (
+ "The following targeted packages failed to update. "
+- "See debug log for details: ({0}).".format(
++ "See debug log for details: ({}).".format(
+ ", ".join(sorted(targets))
+ )
+ )
+ else:
+- comment = "Package {0} failed to " "update.".format(
++ comment = "Package {} failed to " "update.".format(
+ next(iter(list(targets.keys())))
+ )
+ if up_to_date:
+@@ -2851,10 +2860,10 @@ def latest(
+ comment += (
+ " The following packages were already "
+ "up-to-date: "
+- "{0}".format(", ".join(sorted(up_to_date)))
++ "{}".format(", ".join(sorted(up_to_date)))
+ )
+ else:
+- comment += "{0} packages were already " "up-to-date".format(
++ comment += "{} packages were already " "up-to-date".format(
+ len(up_to_date)
+ )
+
+@@ -2866,13 +2875,13 @@ def latest(
+ }
+ else:
+ if len(desired_pkgs) > 10:
+- comment = "All {0} packages are up-to-date.".format(len(desired_pkgs))
++ comment = "All {} packages are up-to-date.".format(len(desired_pkgs))
+ elif len(desired_pkgs) > 1:
+- comment = "All packages are up-to-date " "({0}).".format(
++ comment = "All packages are up-to-date " "({}).".format(
+ ", ".join(sorted(desired_pkgs))
+ )
+ else:
+- comment = "Package {0} is already " "up-to-date".format(desired_pkgs[0])
++ comment = "Package {} is already " "up-to-date".format(desired_pkgs[0])
+
+ return {"name": name, "changes": {}, "result": True, "comment": comment}
+
+@@ -2894,8 +2903,7 @@ def _uninstall(
+ "name": name,
+ "changes": {},
+ "result": False,
+- "comment": "Invalid action '{0}'. "
+- "This is probably a bug.".format(action),
++ "comment": "Invalid action '{}'. " "This is probably a bug.".format(action),
+ }
+
+ try:
+@@ -2908,7 +2916,7 @@ def _uninstall(
+ "changes": {},
+ "result": False,
+ "comment": "An error was encountered while parsing targets: "
+- "{0}".format(exc),
++ "{}".format(exc),
+ }
+ targets = _find_remove_targets(
+ name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs
+@@ -2921,7 +2929,7 @@ def _uninstall(
+ "changes": {},
+ "result": False,
+ "comment": "An error was encountered while checking targets: "
+- "{0}".format(targets),
++ "{}".format(targets),
+ }
+ if action == "purge":
+ old_removed = __salt__["pkg.list_pkgs"](
+@@ -2936,7 +2944,7 @@ def _uninstall(
+ "changes": {},
+ "result": True,
+ "comment": "None of the targeted packages are installed"
+- "{0}".format(" or partially installed" if action == "purge" else ""),
++ "{}".format(" or partially installed" if action == "purge" else ""),
+ }
+
+ if __opts__["test"]:
+@@ -2944,11 +2952,11 @@ def _uninstall(
+ "name": name,
+ "changes": {},
+ "result": None,
+- "comment": "The following packages will be {0}d: "
+- "{1}.".format(action, ", ".join(targets)),
++ "comment": "The following packages will be {}d: "
++ "{}.".format(action, ", ".join(targets)),
+ }
+
+- changes = __salt__["pkg.{0}".format(action)](
++ changes = __salt__["pkg.{}".format(action)](
+ name, pkgs=pkgs, version=version, **kwargs
+ )
+ new = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
+@@ -2975,8 +2983,8 @@ def _uninstall(
+ "name": name,
+ "changes": changes,
+ "result": False,
+- "comment": "The following packages failed to {0}: "
+- "{1}.".format(action, ", ".join(failed)),
++ "comment": "The following packages failed to {}: "
++ "{}.".format(action, ", ".join(failed)),
+ }
+
+ comments = []
+@@ -2984,14 +2992,13 @@ def _uninstall(
+ if not_installed:
+ comments.append(
+ "The following packages were not installed: "
+- "{0}".format(", ".join(not_installed))
++ "{}".format(", ".join(not_installed))
+ )
+ comments.append(
+- "The following packages were {0}d: "
+- "{1}.".format(action, ", ".join(targets))
++ "The following packages were {}d: " "{}.".format(action, ", ".join(targets))
+ )
+ else:
+- comments.append("All targeted packages were {0}d.".format(action))
++ comments.append("All targeted packages were {}d.".format(action))
+
+ return {
+ "name": name,
+@@ -3089,7 +3096,7 @@ def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **
+ ret["changes"] = {}
+ ret[
+ "comment"
+- ] = "An error was encountered while removing " "package(s): {0}".format(exc)
++ ] = "An error was encountered while removing " "package(s): {}".format(exc)
+ return ret
+
+
+@@ -3181,7 +3188,7 @@ def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **k
+ ret["changes"] = {}
+ ret[
+ "comment"
+- ] = "An error was encountered while purging " "package(s): {0}".format(exc)
++ ] = "An error was encountered while purging " "package(s): {}".format(exc)
+ return ret
+
+
+@@ -3247,17 +3254,17 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs):
+ "new": pkgver,
+ "old": __salt__["pkg.version"](pkgname, **kwargs),
+ }
+- for pkgname, pkgver in six.iteritems(packages)
++ for pkgname, pkgver in packages.items()
+ }
+ if isinstance(pkgs, list):
+ packages = [pkg for pkg in packages if pkg in pkgs]
+ expected = {
+ pkgname: pkgver
+- for pkgname, pkgver in six.iteritems(expected)
++ for pkgname, pkgver in expected.items()
+ if pkgname in pkgs
+ }
+ except Exception as exc: # pylint: disable=broad-except
+- ret["comment"] = six.text_type(exc)
++ ret["comment"] = str(exc)
+ return ret
+ else:
+ ret["comment"] = "refresh must be either True or False"
+@@ -3284,16 +3291,16 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs):
+ ret["changes"] = {}
+ ret[
+ "comment"
+- ] = "An error was encountered while updating " "packages: {0}".format(exc)
++ ] = "An error was encountered while updating " "packages: {}".format(exc)
+ return ret
+
+ # If a package list was provided, ensure those packages were updated
+ missing = []
+ if isinstance(pkgs, list):
+- missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret["changes"]]
++ missing = [pkg for pkg in expected.keys() if pkg not in ret["changes"]]
+
+ if missing:
+- ret["comment"] = "The following package(s) failed to update: {0}".format(
++ ret["comment"] = "The following package(s) failed to update: {}".format(
+ ", ".join(missing)
+ )
+ ret["result"] = False
+@@ -3362,8 +3369,8 @@ def group_installed(name, skip=None, include=None, **kwargs):
+ ret["comment"] = "skip must be formatted as a list"
+ return ret
+ for idx, item in enumerate(skip):
+- if not isinstance(item, six.string_types):
+- skip[idx] = six.text_type(item)
++ if not isinstance(item, str):
++ skip[idx] = str(item)
+
+ if include is None:
+ include = []
+@@ -3372,15 +3379,15 @@ def group_installed(name, skip=None, include=None, **kwargs):
+ ret["comment"] = "include must be formatted as a list"
+ return ret
+ for idx, item in enumerate(include):
+- if not isinstance(item, six.string_types):
+- include[idx] = six.text_type(item)
++ if not isinstance(item, str):
++ include[idx] = str(item)
+
+ try:
+ diff = __salt__["pkg.group_diff"](name)
+ except CommandExecutionError as err:
+ ret["comment"] = (
+ "An error was encountered while installing/updating "
+- "group '{0}': {1}.".format(name, err)
++ "group '{}': {}.".format(name, err)
+ )
+ return ret
+
+@@ -3390,7 +3397,7 @@ def group_installed(name, skip=None, include=None, **kwargs):
+ if invalid_skip:
+ ret[
+ "comment"
+- ] = "The following mandatory packages cannot be skipped: {0}".format(
++ ] = "The following mandatory packages cannot be skipped: {}".format(
+ ", ".join(invalid_skip)
+ )
+ return ret
+@@ -3401,7 +3408,7 @@ def group_installed(name, skip=None, include=None, **kwargs):
+
+ if not targets:
+ ret["result"] = True
+- ret["comment"] = "Group '{0}' is already installed".format(name)
++ ret["comment"] = "Group '{}' is already installed".format(name)
+ return ret
+
+ partially_installed = (
+@@ -3415,9 +3422,9 @@ def group_installed(name, skip=None, include=None, **kwargs):
+ if partially_installed:
+ ret[
+ "comment"
+- ] = "Group '{0}' is partially installed and will be updated".format(name)
++ ] = "Group '{}' is partially installed and will be updated".format(name)
+ else:
+- ret["comment"] = "Group '{0}' will be installed".format(name)
++ ret["comment"] = "Group '{}' will be installed".format(name)
+ return ret
+
+ try:
+@@ -3432,19 +3439,19 @@ def group_installed(name, skip=None, include=None, **kwargs):
+ ret["changes"] = {}
+ ret["comment"] = (
+ "An error was encountered while "
+- "installing/updating group '{0}': {1}".format(name, exc)
++ "installing/updating group '{}': {}".format(name, exc)
+ )
+ return ret
+
+ failed = [x for x in targets if x not in __salt__["pkg.list_pkgs"](**kwargs)]
+ if failed:
+- ret["comment"] = "Failed to install the following packages: {0}".format(
++ ret["comment"] = "Failed to install the following packages: {}".format(
+ ", ".join(failed)
+ )
+ return ret
+
+ ret["result"] = True
+- ret["comment"] = "Group '{0}' was {1}".format(
++ ret["comment"] = "Group '{}' was {}".format(
+ name, "updated" if partially_installed else "installed"
+ )
+ return ret
+@@ -3561,6 +3568,6 @@ def mod_watch(name, **kwargs):
+ return {
+ "name": name,
+ "changes": {},
+- "comment": "pkg.{0} does not work with the watch requisite".format(sfun),
++ "comment": "pkg.{} does not work with the watch requisite".format(sfun),
+ "result": False,
+ }
--
-2.16.4
+2.29.2
diff --git a/add-astra-linux-common-edition-to-the-os-family-list.patch b/add-astra-linux-common-edition-to-the-os-family-list.patch
index 5d99070..1a98229 100644
--- a/add-astra-linux-common-edition-to-the-os-family-list.patch
+++ b/add-astra-linux-common-edition-to-the-os-family-list.patch
@@ -1,4 +1,4 @@
-From acf0b24353d831dcc2c5b292f99480938f5ecd93 Mon Sep 17 00:00:00 2001
+From d5569023c64a3fcec57a7aa6823ee94e8be91b3d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Julio=20Gonz=C3=A1lez=20Gil?=
Date: Wed, 12 Feb 2020 10:05:45 +0100
@@ -11,49 +11,49 @@ Subject: [PATCH] Add Astra Linux Common Edition to the OS Family list
2 files changed, 21 insertions(+)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 20950988d9..f410985198 100644
+index 5dff6ecfd4..5634327623 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -1523,6 +1523,7 @@ _OS_FAMILY_MAP = {
- 'Funtoo': 'Gentoo',
- 'AIX': 'AIX',
- 'TurnKey': 'Debian',
-+ 'AstraLinuxCE': 'Debian',
+@@ -1618,6 +1618,7 @@ _OS_FAMILY_MAP = {
+ "Funtoo": "Gentoo",
+ "AIX": "AIX",
+ "TurnKey": "Debian",
++ "AstraLinuxCE": "Debian",
}
# Matches any possible format:
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
-index b4ed9379e5..c276dee9f3 100644
+index 85d434dd9d..196dbcf83d 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
-@@ -605,6 +605,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+@@ -728,6 +728,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
}
self._run_os_grains_tests("ubuntu-17.10", _os_release_map, expectation)
-+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
++ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ def test_astralinuxce_2_os_grains(self):
-+ '''
++ """
+ Test if OS grains are parsed correctly in Astra Linux CE 2.12.22 "orel"
-+ '''
++ """
+ _os_release_map = {
-+ 'linux_distribution': ('AstraLinuxCE', '2.12.22', 'orel'),
++ "linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"),
+ }
+ expectation = {
-+ 'os': 'AstraLinuxCE',
-+ 'os_family': 'Debian',
-+ 'oscodename': 'orel',
-+ 'osfullname': 'AstraLinuxCE',
-+ 'osrelease': '2.12.22',
-+ 'osrelease_info': (2, 12, 22),
-+ 'osmajorrelease': 2,
-+ 'osfinger': 'AstraLinuxCE-2',
++ "os": "AstraLinuxCE",
++ "os_family": "Debian",
++ "oscodename": "orel",
++ "osfullname": "AstraLinuxCE",
++ "osrelease": "2.12.22",
++ "osrelease_info": (2, 12, 22),
++ "osmajorrelease": 2,
++ "osfinger": "AstraLinuxCE-2",
+ }
+ self._run_os_grains_tests("astralinuxce-2.12.22", _os_release_map, expectation)
+
- @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows')
+ @skipIf(not salt.utils.platform.is_windows(), "System is not Windows")
def test_windows_platform_data(self):
- '''
+ """
--
-2.16.4
+2.29.2
diff --git a/add-batch_presence_ping_timeout-and-batch_presence_p.patch b/add-batch_presence_ping_timeout-and-batch_presence_p.patch
index 4fa802a..296a963 100644
--- a/add-batch_presence_ping_timeout-and-batch_presence_p.patch
+++ b/add-batch_presence_ping_timeout-and-batch_presence_p.patch
@@ -1,46 +1,55 @@
-From 376a7d2eeb6b3b215fac9322f1baee4497bdb339 Mon Sep 17 00:00:00 2001
+From 66f6c2540a151487b26c89a2bb66199d6c65c18d Mon Sep 17 00:00:00 2001
From: Marcelo Chiaradia
Date: Thu, 4 Apr 2019 13:57:38 +0200
Subject: [PATCH] Add 'batch_presence_ping_timeout' and
'batch_presence_ping_gather_job_timeout' parameters for synchronous batching
---
- salt/cli/batch.py | 7 +++++--
- 1 file changed, 5 insertions(+), 2 deletions(-)
+ salt/cli/batch.py | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/salt/cli/batch.py b/salt/cli/batch.py
-index 36e66da1af..67f03c8a45 100644
+index 527cffdeb7..2bc5444aef 100644
--- a/salt/cli/batch.py
+++ b/salt/cli/batch.py
-@@ -83,6 +83,9 @@ def batch_get_opts(
+@@ -77,6 +77,13 @@ def batch_get_opts(
if key not in opts:
opts[key] = val
-+ opts['batch_presence_ping_timeout'] = kwargs.get('batch_presence_ping_timeout', opts['timeout'])
-+ opts['batch_presence_ping_gather_job_timeout'] = kwargs.get('batch_presence_ping_gather_job_timeout', opts['gather_job_timeout'])
++ opts["batch_presence_ping_timeout"] = kwargs.get(
++ "batch_presence_ping_timeout", opts["timeout"]
++ )
++ opts["batch_presence_ping_gather_job_timeout"] = kwargs.get(
++ "batch_presence_ping_gather_job_timeout", opts["gather_job_timeout"]
++ )
+
return opts
-@@ -119,7 +122,7 @@ class Batch(object):
- args = [self.opts['tgt'],
- 'test.ping',
- [],
-- self.opts['timeout'],
-+ self.opts.get('batch_presence_ping_timeout', self.opts['timeout']),
- ]
+@@ -115,7 +122,7 @@ class Batch:
+ self.opts["tgt"],
+ "test.ping",
+ [],
+- self.opts["timeout"],
++ self.opts.get("batch_presence_ping_timeout", self.opts["timeout"]),
+ ]
- selected_target_option = self.opts.get('selected_target_option', None)
-@@ -130,7 +133,7 @@ class Batch(object):
+ selected_target_option = self.opts.get("selected_target_option", None)
+@@ -126,7 +133,12 @@ class Batch:
- self.pub_kwargs['yield_pub_data'] = True
- ping_gen = self.local.cmd_iter(*args,
-- gather_job_timeout=self.opts['gather_job_timeout'],
-+ gather_job_timeout=self.opts.get('batch_presence_ping_gather_job_timeout', self.opts['gather_job_timeout']),
- **self.pub_kwargs)
+ self.pub_kwargs["yield_pub_data"] = True
+ ping_gen = self.local.cmd_iter(
+- *args, gather_job_timeout=self.opts["gather_job_timeout"], **self.pub_kwargs
++ *args,
++ gather_job_timeout=self.opts.get(
++ "batch_presence_ping_gather_job_timeout",
++ self.opts["gather_job_timeout"],
++ ),
++ **self.pub_kwargs
+ )
# Broadcast to targets
--
-2.16.4
+2.29.2
diff --git a/add-cpe_name-for-osversion-grain-parsing-u-49946.patch b/add-cpe_name-for-osversion-grain-parsing-u-49946.patch
index 23a7e7c..64228b2 100644
--- a/add-cpe_name-for-osversion-grain-parsing-u-49946.patch
+++ b/add-cpe_name-for-osversion-grain-parsing-u-49946.patch
@@ -1,4 +1,4 @@
-From a90f35bc03b477a63aae20c58f8957c075569465 Mon Sep 17 00:00:00 2001
+From c845d56fdf1762586b1f210b1eb49193893d4312 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Tue, 9 Oct 2018 14:08:50 +0200
Subject: [PATCH] Add CPE_NAME for osversion* grain parsing (U#49946)
@@ -29,10 +29,10 @@ Fix proper part name in the string-bound CPE
1 file changed, 28 insertions(+)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 9c1b5d930e..7b7e328520 100644
+index 5535584d1b..bc3cf129cd 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -1642,6 +1642,34 @@ def _parse_cpe_name(cpe):
+@@ -1732,6 +1732,34 @@ def _parse_cpe_name(cpe):
return ret
@@ -65,9 +65,9 @@ index 9c1b5d930e..7b7e328520 100644
+
+
def os_data():
- '''
+ """
Return grains pertaining to the operating system
--
-2.16.4
+2.29.2
diff --git a/add-custom-suse-capabilities-as-grains.patch b/add-custom-suse-capabilities-as-grains.patch
index 1051ce0..395a9b5 100644
--- a/add-custom-suse-capabilities-as-grains.patch
+++ b/add-custom-suse-capabilities-as-grains.patch
@@ -1,4 +1,4 @@
-From e57dd3c2ae655422f0f6939825154ce5827d43c4 Mon Sep 17 00:00:00 2001
+From 713ccfdc5c6733495d3ce7f26a8cfeddb8e9e9c4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 21 Jun 2018 11:57:57 +0100
@@ -9,10 +9,10 @@ Subject: [PATCH] Add custom SUSE capabilities as Grains
1 file changed, 7 insertions(+)
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
-index 9ce644b766..1082b05dba 100644
+index 2fdbe6526a..ddc22293ea 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
-@@ -75,3 +75,10 @@ def config():
+@@ -66,3 +66,10 @@ def config():
log.warning("Bad syntax in grains file! Skipping.")
return {}
return {}
@@ -24,6 +24,6 @@ index 9ce644b766..1082b05dba 100644
+ '__suse_reserved_pkg_patches_support': True
+ }
--
-2.16.4
+2.29.2
diff --git a/add-docker-logout-237.patch b/add-docker-logout-237.patch
index 33bf399..7f878dc 100644
--- a/add-docker-logout-237.patch
+++ b/add-docker-logout-237.patch
@@ -1,4 +1,4 @@
-From 9e6bd24b07cd2424c3805777b07b9ea84adff416 Mon Sep 17 00:00:00 2001
+From 355e1e29e8f3286eeb13bc2d05089c096c9e01e3 Mon Sep 17 00:00:00 2001
From: Alexander Graul
Date: Mon, 18 May 2020 16:39:27 +0200
Subject: [PATCH] Add docker logout (#237)
@@ -13,10 +13,10 @@ interpreted as a list of docker registries to log out of.
2 files changed, 139 insertions(+)
diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
-index 28a2107cec..119e9eb170 100644
+index 934038c927..176b4db926 100644
--- a/salt/modules/dockermod.py
+++ b/salt/modules/dockermod.py
-@@ -1481,6 +1481,86 @@ def login(*registries):
+@@ -1586,6 +1586,86 @@ def logout(*registries):
return ret
@@ -102,44 +102,15 @@ index 28a2107cec..119e9eb170 100644
+
# Functions for information gathering
def depends(name):
- '''
+ """
diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py
-index 191bfc123f..8f4ead2867 100644
+index 34e2e9c610..48526acb71 100644
--- a/tests/unit/modules/test_dockermod.py
+++ b/tests/unit/modules/test_dockermod.py
-@@ -164,6 +164,65 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
- self.assertIn('retcode', ret)
- self.assertNotEqual(ret['retcode'], 0)
+@@ -199,6 +199,65 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
+ output_loglevel="quiet",
+ )
-+ def test_logout_calls_docker_cli_logout_single(self):
-+ client = Mock()
-+ get_client_mock = MagicMock(return_value=client)
-+ ref_out = {"stdout": "", "stderr": "", "retcode": 0}
-+ registry_auth_data = {
-+ "portus.example.com:5000": {
-+ "username": "admin",
-+ "password": "linux12345",
-+ "email": "tux@example.com",
-+ }
-+ }
-+ docker_mock = MagicMock(return_value=ref_out)
-+ with patch.object(docker_mod, "_get_client", get_client_mock):
-+ dunder_salt = {
-+ "config.get": MagicMock(return_value=registry_auth_data),
-+ "cmd.run_all": docker_mock,
-+ "config.option": MagicMock(return_value={}),
-+ }
-+ with patch.dict(docker_mod.__salt__, dunder_salt):
-+ ret = docker_mod.logout("portus.example.com:5000")
-+ assert "retcode" in ret
-+ assert ret["retcode"] == 0
-+ docker_mock.assert_called_with(
-+ ["docker", "logout", "portus.example.com:5000"],
-+ python_shell=False,
-+ output_loglevel="quiet",
-+ )
-+
-+
+ def test_logout_calls_docker_cli_logout_all(self):
+ client = Mock()
+ get_client_mock = MagicMock(return_value=client)
@@ -170,10 +141,39 @@ index 191bfc123f..8f4ead2867 100644
+ assert ret["retcode"] == 0
+ assert docker_mock.call_count == 2
+
- def test_ps_with_host_true(self):
- '''
- Check that docker.ps called with host is ``True``,
++ def test_logout_calls_docker_cli_logout_single(self):
++ client = Mock()
++ get_client_mock = MagicMock(return_value=client)
++ ref_out = {"stdout": "", "stderr": "", "retcode": 0}
++ registry_auth_data = {
++ "portus.example.com:5000": {
++ "username": "admin",
++ "password": "linux12345",
++ "email": "tux@example.com",
++ }
++ }
++ docker_mock = MagicMock(return_value=ref_out)
++ with patch.object(docker_mod, "_get_client", get_client_mock):
++ dunder_salt = {
++ "config.get": MagicMock(return_value=registry_auth_data),
++ "cmd.run_all": docker_mock,
++ "config.option": MagicMock(return_value={}),
++ }
++ with patch.dict(docker_mod.__salt__, dunder_salt):
++ ret = docker_mod.logout("portus.example.com:5000")
++ assert "retcode" in ret
++ assert ret["retcode"] == 0
++ docker_mock.assert_called_with(
++ ["docker", "logout", "portus.example.com:5000"],
++ python_shell=False,
++ output_loglevel="quiet",
++ )
++
++
+ def test_logout_calls_docker_cli_logout_all(self):
+ client = Mock()
+ get_client_mock = MagicMock(return_value=client)
--
-2.26.2
+2.29.2
diff --git a/add-environment-variable-to-know-if-yum-is-invoked-f.patch b/add-environment-variable-to-know-if-yum-is-invoked-f.patch
index ac344a6..a03e838 100644
--- a/add-environment-variable-to-know-if-yum-is-invoked-f.patch
+++ b/add-environment-variable-to-know-if-yum-is-invoked-f.patch
@@ -1,78 +1,83 @@
-From 874b1229babf5244debac141cd260f695ccc1e9d Mon Sep 17 00:00:00 2001
+From 7b2b5fc53d30397b8f7a11e59f5c7a57bcb63058 Mon Sep 17 00:00:00 2001
From: Marcelo Chiaradia
Date: Thu, 7 Jun 2018 10:29:41 +0200
-Subject: [PATCH] Add environment variable to know if yum is invoked from
- Salt(bsc#1057635)
+Subject: [PATCH] Add environment variable to know if yum is invoked
+ from Salt(bsc#1057635)
---
- salt/modules/yumpkg.py | 18 ++++++++++++------
- 1 file changed, 12 insertions(+), 6 deletions(-)
+ salt/modules/yumpkg.py | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
-index f7e4ac9753..c89d321a1b 100644
+index b547fe6be7..c58b3e4c70 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
-@@ -913,7 +913,8 @@ def list_repo_pkgs(*args, **kwargs):
- yum_version = None if _yum() != 'yum' else _LooseVersion(
- __salt__['cmd.run'](
- ['yum', '--version'],
-- python_shell=False
-+ python_shell=False,
-+ env={"SALT_RUNNING": '1'}
- ).splitlines()[0].strip()
- )
- # Really old version of yum; does not even have --showduplicates option
-@@ -2324,7 +2325,8 @@ def list_holds(pattern=__HOLD_PATTERN, full=True):
+@@ -917,7 +917,9 @@ def list_repo_pkgs(*args, **kwargs):
+ None
+ if _yum() != "yum"
+ else _LooseVersion(
+- __salt__["cmd.run"](["yum", "--version"], python_shell=False)
++ __salt__["cmd.run"](
++ ["yum", "--version"], python_shell=False, env={"SALT_RUNNING": "1"}
++ )
+ .splitlines()[0]
+ .strip()
+ )
+@@ -2347,7 +2349,9 @@ def list_holds(pattern=__HOLD_PATTERN, full=True):
+ """
_check_versionlock()
- out = __salt__['cmd.run']([_yum(), 'versionlock', 'list'],
-- python_shell=False)
-+ python_shell=False,
-+ env={"SALT_RUNNING": '1'})
+- out = __salt__["cmd.run"]([_yum(), "versionlock", "list"], python_shell=False)
++ out = __salt__["cmd.run"](
++ [_yum(), "versionlock", "list"], python_shell=False, env={"SALT_RUNNING": "1"}
++ )
ret = []
- for line in salt.utils.itertools.split(out, '\n'):
+ for line in salt.utils.itertools.split(out, "\n"):
match = _get_hold(line, pattern=pattern, full=full)
-@@ -2390,7 +2392,8 @@ def group_list():
- out = __salt__['cmd.run_stdout'](
- [_yum(), 'grouplist', 'hidden'],
- output_loglevel='trace',
-- python_shell=False
+@@ -2415,7 +2419,10 @@ def group_list():
+ }
+
+ out = __salt__["cmd.run_stdout"](
+- [_yum(), "grouplist", "hidden"], output_loglevel="trace", python_shell=False
++ [_yum(), "grouplist", "hidden"],
++ output_loglevel="trace",
+ python_shell=False,
-+ env={"SALT_RUNNING": '1'}
++ env={"SALT_RUNNING": "1"},
)
key = None
- for line in salt.utils.itertools.split(out, '\n'):
-@@ -2457,7 +2460,8 @@ def group_info(name, expand=False):
- out = __salt__['cmd.run_stdout'](
- cmd,
- output_loglevel='trace',
-- python_shell=False
-+ python_shell=False,
-+ env={"SALT_RUNNING": '1'}
- )
+ for line in salt.utils.itertools.split(out, "\n"):
+@@ -2486,7 +2493,9 @@ def group_info(name, expand=False, ignore_groups=None):
+ ret[pkgtype] = set()
+
+ cmd = [_yum(), "--quiet", "groupinfo", name]
+- out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False)
++ out = __salt__["cmd.run_stdout"](
++ cmd, output_loglevel="trace", python_shell=False, env={"SALT_RUNNING": "1"}
++ )
g_info = {}
-@@ -3134,7 +3138,8 @@ def download(*packages):
- __salt__['cmd.run'](
- cmd,
- output_loglevel='trace',
-- python_shell=False
-+ python_shell=False,
-+ env={"SALT_RUNNING": '1'}
- )
+ for line in salt.utils.itertools.split(out, "\n"):
+@@ -3203,7 +3212,9 @@ def download(*packages, **kwargs):
+
+ cmd = ["yumdownloader", "-q", "--destdir={}".format(CACHE_DIR)]
+ cmd.extend(packages)
+- __salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False)
++ __salt__["cmd.run"](
++ cmd, output_loglevel="trace", python_shell=False, env={"SALT_RUNNING": "1"}
++ )
ret = {}
for dld_result in os.listdir(CACHE_DIR):
-@@ -3209,7 +3214,8 @@ def _get_patches(installed_only=False):
- cmd = [_yum(), '--quiet', 'updateinfo', 'list', 'all']
- ret = __salt__['cmd.run_stdout'](
- cmd,
-- python_shell=False
-+ python_shell=False,
-+ env={"SALT_RUNNING": '1'}
- )
+ if not dld_result.endswith(".rpm"):
+@@ -3279,7 +3290,7 @@ def _get_patches(installed_only=False):
+ patches = {}
+
+ cmd = [_yum(), "--quiet", "updateinfo", "list", "all"]
+- ret = __salt__["cmd.run_stdout"](cmd, python_shell=False)
++ ret = __salt__["cmd.run_stdout"](cmd, python_shell=False, env={"SALT_RUNNING": "1"})
for line in salt.utils.itertools.split(ret, os.linesep):
- inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)',
+ inst, advisory_id, sev, pkg = re.match(
+ r"([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)", line
--
-2.16.4
+2.29.2
diff --git a/add-hold-unhold-functions.patch b/add-hold-unhold-functions.patch
index dbb61a3..da32063 100644
--- a/add-hold-unhold-functions.patch
+++ b/add-hold-unhold-functions.patch
@@ -1,4 +1,4 @@
-From 666f62917bbc48cbee2ed0aa319a61afd1b1fcb2 Mon Sep 17 00:00:00 2001
+From 6176ef8aa39626dcb450a1665231a796e9544342 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Thu, 6 Dec 2018 16:26:23 +0100
Subject: [PATCH] Add hold/unhold functions
@@ -7,43 +7,324 @@ Add unhold function
Add warnings
---
- salt/modules/zypperpkg.py | 88 ++++++++++++++++++++++++++++++++++++++++++++++-
- 1 file changed, 87 insertions(+), 1 deletion(-)
+ salt/modules/zypperpkg.py | 186 +++++++++++++++++++++++++++-----------
+ 1 file changed, 131 insertions(+), 55 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 50279ccbd1..08a9c2ed4d 100644
+index 44bcbbf2f2..6fa6e3e0a1 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -41,6 +41,7 @@ import salt.utils.pkg
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Package support for openSUSE via the zypper package manager
+
+@@ -12,8 +11,6 @@ Package support for openSUSE via the zypper package manager
+
+ """
+
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import datetime
+ import fnmatch
+@@ -24,7 +21,6 @@ import time
+ from xml.dom import minidom as dom
+ from xml.parsers.expat import ExpatError
+
+-# Import salt libs
+ import salt.utils.data
+ import salt.utils.environment
+ import salt.utils.event
+@@ -35,9 +31,9 @@ import salt.utils.pkg
import salt.utils.pkg.rpm
import salt.utils.stringutils
import salt.utils.systemd
+import salt.utils.versions
- from salt.utils.versions import LooseVersion
- import salt.utils.environment
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
-@@ -1771,7 +1772,7 @@ def clean_locks():
+
+-# Import 3rd-party libs
+ # pylint: disable=import-error,redefined-builtin,no-name-in-module
+ from salt.ext import six
+ from salt.ext.six.moves import configparser
+@@ -51,8 +47,8 @@ log = logging.getLogger(__name__)
+
+ HAS_ZYPP = False
+ ZYPP_HOME = "/etc/zypp"
+-LOCKS = "{0}/locks".format(ZYPP_HOME)
+-REPOS = "{0}/repos.d".format(ZYPP_HOME)
++LOCKS = "{}/locks".format(ZYPP_HOME)
++REPOS = "{}/repos.d".format(ZYPP_HOME)
+ DEFAULT_PRIORITY = 99
+ PKG_ARCH_SEPARATOR = "."
+
+@@ -75,7 +71,7 @@ def __virtual__():
+ return __virtualname__
+
+
+-class _Zypper(object):
++class _Zypper:
+ """
+ Zypper parallel caller.
+ Validates the result and either raises an exception or reports an error.
+@@ -339,7 +335,7 @@ class _Zypper(object):
+ attrs=["pid", "name", "cmdline", "create_time"],
+ )
+ data["cmdline"] = " ".join(data["cmdline"])
+- data["info"] = "Blocking process created at {0}.".format(
++ data["info"] = "Blocking process created at {}.".format(
+ datetime.datetime.utcfromtimestamp(
+ data["create_time"]
+ ).isoformat()
+@@ -347,7 +343,7 @@ class _Zypper(object):
+ data["success"] = True
+ except Exception as err: # pylint: disable=broad-except
+ data = {
+- "info": "Unable to retrieve information about blocking process: {0}".format(
++ "info": "Unable to retrieve information about blocking process: {}".format(
+ err.message
+ ),
+ "success": False,
+@@ -382,7 +378,7 @@ class _Zypper(object):
+ )
+ if self.error_msg and not self.__no_raise and not self.__ignore_repo_failure:
+ raise CommandExecutionError(
+- "Zypper command failure: {0}".format(self.error_msg)
++ "Zypper command failure: {}".format(self.error_msg)
+ )
+
+ return (
+@@ -397,7 +393,7 @@ class _Zypper(object):
+ __zypper__ = _Zypper()
+
+
+-class Wildcard(object):
++class Wildcard:
+ """
+ .. versionadded:: 2017.7.0
+
+@@ -439,7 +435,7 @@ class Wildcard(object):
+ for vrs in self._get_scope_versions(self._get_available_versions())
+ ]
+ )
+- return versions and "{0}{1}".format(self._op or "", versions[-1]) or None
++ return versions and "{}{}".format(self._op or "", versions[-1]) or None
+
+ def _get_available_versions(self):
+ """
+@@ -451,17 +447,15 @@ class Wildcard(object):
+ ).getElementsByTagName("solvable")
+ if not solvables:
+ raise CommandExecutionError(
+- "No packages found matching '{0}'".format(self.name)
++ "No packages found matching '{}'".format(self.name)
+ )
+
+ return sorted(
+- set(
+- [
+- slv.getAttribute(self._attr_solvable_version)
+- for slv in solvables
+- if slv.getAttribute(self._attr_solvable_version)
+- ]
+- )
++ {
++ slv.getAttribute(self._attr_solvable_version)
++ for slv in solvables
++ if slv.getAttribute(self._attr_solvable_version)
++ }
+ )
+
+ def _get_scope_versions(self, pkg_versions):
+@@ -489,7 +483,7 @@ class Wildcard(object):
+ self._op = version.replace(exact_version, "") or None
+ if self._op and self._op not in self.Z_OP:
+ raise CommandExecutionError(
+- 'Zypper do not supports operator "{0}".'.format(self._op)
++ 'Zypper do not supports operator "{}".'.format(self._op)
+ )
+ self.version = exact_version
+
+@@ -539,14 +533,11 @@ def list_upgrades(refresh=True, root=None, **kwargs):
+ cmd = ["list-updates"]
+ if "fromrepo" in kwargs:
+ repos = kwargs["fromrepo"]
+- if isinstance(repos, six.string_types):
++ if isinstance(repos, str):
+ repos = [repos]
+ for repo in repos:
+ cmd.extend(
+- [
+- "--repo",
+- repo if isinstance(repo, six.string_types) else six.text_type(repo),
+- ]
++ ["--repo", repo if isinstance(repo, str) else str(repo),]
+ )
+ log.debug("Targeting repos: %s", repos)
+ for update_node in (
+@@ -610,7 +601,7 @@ def info_installed(*names, **kwargs):
+ for _nfo in pkg_nfo:
+ t_nfo = dict()
+ # Translate dpkg-specific keys to a common structure
+- for key, value in six.iteritems(_nfo):
++ for key, value in _nfo.items():
+ if key == "source_rpm":
+ t_nfo["source"] = value
+ else:
+@@ -1033,9 +1024,7 @@ def list_repo_pkgs(*args, **kwargs):
+ fromrepo = kwargs.pop("fromrepo", "") or ""
+ ret = {}
+
+- targets = [
+- arg if isinstance(arg, six.string_types) else six.text_type(arg) for arg in args
+- ]
++ targets = [arg if isinstance(arg, str) else str(arg) for arg in args]
+
+ def _is_match(pkgname):
+ """
+@@ -1124,7 +1113,7 @@ def _get_repo_info(alias, repos_cfg=None, root=None):
+ try:
+ meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias))
+ meta["alias"] = alias
+- for key, val in six.iteritems(meta):
++ for key, val in meta.items():
+ if val in ["0", "1"]:
+ meta[key] = int(meta[key]) == 1
+ elif val == "NONE":
+@@ -1197,7 +1186,7 @@ def del_repo(repo, root=None):
+ "message": msg[0].childNodes[0].nodeValue,
+ }
+
+- raise CommandExecutionError("Repository '{0}' not found.".format(repo))
++ raise CommandExecutionError("Repository '{}' not found.".format(repo))
+
+
+ def mod_repo(repo, **kwargs):
+@@ -1252,13 +1241,13 @@ def mod_repo(repo, **kwargs):
+ url = kwargs.get("url", kwargs.get("mirrorlist", kwargs.get("baseurl")))
+ if not url:
+ raise CommandExecutionError(
+- "Repository '{0}' not found, and neither 'baseurl' nor "
++ "Repository '{}' not found, and neither 'baseurl' nor "
+ "'mirrorlist' was specified".format(repo)
+ )
+
+ if not _urlparse(url).scheme:
+ raise CommandExecutionError(
+- "Repository '{0}' not found and URL for baseurl/mirrorlist "
++ "Repository '{}' not found and URL for baseurl/mirrorlist "
+ "is malformed".format(repo)
+ )
+
+@@ -1281,7 +1270,7 @@ def mod_repo(repo, **kwargs):
+
+ if new_url == base_url:
+ raise CommandExecutionError(
+- "Repository '{0}' already exists as '{1}'.".format(repo, alias)
++ "Repository '{}' already exists as '{}'.".format(repo, alias)
+ )
+
+ # Add new repo
+@@ -1291,7 +1280,7 @@ def mod_repo(repo, **kwargs):
+ repos_cfg = _get_configured_repos(root=root)
+ if repo not in repos_cfg.sections():
+ raise CommandExecutionError(
+- "Failed add new repository '{0}' for unspecified reason. "
++ "Failed add new repository '{}' for unspecified reason. "
+ "Please check zypper logs.".format(repo)
+ )
+ added = True
+@@ -1327,12 +1316,10 @@ def mod_repo(repo, **kwargs):
+ cmd_opt.append(kwargs["gpgcheck"] and "--gpgcheck" or "--no-gpgcheck")
+
+ if "priority" in kwargs:
+- cmd_opt.append(
+- "--priority={0}".format(kwargs.get("priority", DEFAULT_PRIORITY))
+- )
++ cmd_opt.append("--priority={}".format(kwargs.get("priority", DEFAULT_PRIORITY)))
+
+ if "humanname" in kwargs:
+- cmd_opt.append("--name='{0}'".format(kwargs.get("humanname")))
++ cmd_opt.append("--name='{}'".format(kwargs.get("humanname")))
+
+ if kwargs.get("gpgautoimport") is True:
+ global_cmd_opt.append("--gpg-auto-import-keys")
+@@ -1589,7 +1576,7 @@ def install(
+
+ if pkg_type == "repository":
+ targets = []
+- for param, version_num in six.iteritems(pkg_params):
++ for param, version_num in pkg_params.items():
+ if version_num is None:
+ log.debug("targeting package: %s", param)
+ targets.append(param)
+@@ -1597,7 +1584,7 @@ def install(
+ prefix, verstr = salt.utils.pkg.split_comparison(version_num)
+ if not prefix:
+ prefix = "="
+- target = "{0}{1}{2}".format(param, prefix, verstr)
++ target = "{}{}{}".format(param, prefix, verstr)
+ log.debug("targeting package: %s", target)
+ targets.append(target)
+ elif pkg_type == "advisory":
+@@ -1606,7 +1593,7 @@ def install(
+ for advisory_id in pkg_params:
+ if advisory_id not in cur_patches:
+ raise CommandExecutionError(
+- 'Advisory id "{0}" not found'.format(advisory_id)
++ 'Advisory id "{}" not found'.format(advisory_id)
+ )
+ else:
+ # If we add here the `patch:` prefix, the
+@@ -1703,7 +1690,7 @@ def install(
+
+ if errors:
+ raise CommandExecutionError(
+- "Problem encountered {0} package(s)".format(
++ "Problem encountered {} package(s)".format(
+ "downloading" if downloadonly else "installing"
+ ),
+ info={"errors": errors, "changes": ret},
+@@ -1797,7 +1784,7 @@ def upgrade(
+ cmd_update.append("--dry-run")
+
+ if fromrepo:
+- if isinstance(fromrepo, six.string_types):
++ if isinstance(fromrepo, str):
+ fromrepo = [fromrepo]
+ for repo in fromrepo:
+ cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
+@@ -2052,7 +2039,7 @@ def list_locks(root=None):
+ )
+ if lock.get("solvable_name"):
+ locks[lock.pop("solvable_name")] = lock
+- except IOError:
++ except OSError:
+ pass
+ except Exception: # pylint: disable=broad-except
+ log.warning("Detected a problem when accessing {}".format(_locks))
+@@ -2089,7 +2076,7 @@ def clean_locks(root=None):
return out
--def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
+-def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
+def unhold(name=None, pkgs=None, **kwargs):
- '''
+ """
Remove specified package lock.
-@@ -1783,7 +1784,47 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
+@@ -2104,8 +2091,50 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume
salt '*' pkg.remove_lock ,,
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
- '''
+ """
+ ret = {}
+ if (not name and not pkgs) or (name and pkgs):
-+ raise CommandExecutionError('Name or packages must be specified.')
++ raise CommandExecutionError("Name or packages must be specified.")
+ elif name:
+ pkgs = [name]
+
+ locks = list_locks()
+ try:
-+ pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
++ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys())
+ except MinionError as exc:
+ raise CommandExecutionError(exc)
+
@@ -52,39 +333,44 @@ index 50279ccbd1..08a9c2ed4d 100644
+ for pkg in pkgs:
+ if locks.get(pkg):
+ removed.append(pkg)
-+ ret[pkg]['comment'] = 'Package {0} is no longer held.'.format(pkg)
++ ret[pkg]["comment"] = "Package {} is no longer held.".format(pkg)
+ else:
+ missing.append(pkg)
-+ ret[pkg]['comment'] = 'Package {0} unable to be unheld.'.format(pkg)
++ ret[pkg]["comment"] = "Package {} unable to be unheld.".format(pkg)
+
+ if removed:
-+ __zypper__.call('rl', *removed)
++ __zypper__.call("rl", *removed)
+
+ return ret
+
+
+def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
-+ '''
++ """
+ Remove specified package lock.
+
+ CLI Example:
+
+ .. code-block:: bash
+- locks = list_locks(root)
+ salt '*' pkg.remove_lock
+ salt '*' pkg.remove_lock ,,
+ salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
-+ '''
-+ salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use unhold() instead.')
- locks = list_locks()
++ """
++ salt.utils.versions.warn_until(
++ "Sodium", "This function is deprecated. Please use unhold() instead."
++ )
++ locks = list_locks()
try:
- packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
-@@ -1804,6 +1845,50 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
- return {'removed': len(removed), 'not_found': missing}
+ packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys())
+ except MinionError as exc:
+@@ -2125,7 +2154,51 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume
+ return {"removed": len(removed), "not_found": missing}
+-def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
+def hold(name=None, pkgs=None, **kwargs):
-+ '''
++ """
+ Add a package lock. Specify packages to lock by exact name.
+
+ CLI Example:
@@ -99,46 +385,85 @@ index 50279ccbd1..08a9c2ed4d 100644
+ :param pkgs:
+ :param kwargs:
+ :return:
-+ '''
++ """
+ ret = {}
+ if (not name and not pkgs) or (name and pkgs):
-+ raise CommandExecutionError('Name or packages must be specified.')
++ raise CommandExecutionError("Name or packages must be specified.")
+ elif name:
+ pkgs = [name]
+
+ locks = list_locks()
+ added = []
+ try:
-+ pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
++ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys())
+ except MinionError as exc:
+ raise CommandExecutionError(exc)
+
+ for pkg in pkgs:
-+ ret[pkg] = {'name': pkg, 'changes': {}, 'result': False, 'comment': ''}
++ ret[pkg] = {"name": pkg, "changes": {}, "result": False, "comment": ""}
+ if not locks.get(pkg):
+ added.append(pkg)
-+ ret[pkg]['comment'] = 'Package {0} is now being held.'.format(pkg)
++ ret[pkg]["comment"] = "Package {} is now being held.".format(pkg)
+ else:
-+ ret[pkg]['comment'] = 'Package {0} is already set to be held.'.format(pkg)
++ ret[pkg]["comment"] = "Package {} is already set to be held.".format(pkg)
+
+ if added:
-+ __zypper__.call('al', *added)
++ __zypper__.call("al", *added)
+
+ return ret
+
+
- def add_lock(packages, **kwargs): # pylint: disable=unused-argument
- '''
++def add_lock(packages, **kwargs): # pylint: disable=unused-argument
+ """
Add a package lock. Specify packages to lock by exact name.
-@@ -1816,6 +1901,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
+
+@@ -2140,7 +2213,10 @@ def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
salt '*' pkg.add_lock ,,
salt '*' pkg.add_lock pkgs='["foo", "bar"]'
- '''
-+ salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use hold() instead.')
- locks = list_locks()
+ """
+- locks = list_locks(root)
++ salt.utils.versions.warn_until(
++ "Sodium", "This function is deprecated. Please use hold() instead."
++ )
++ locks = list_locks()
added = []
try:
+ packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys())
+@@ -2495,7 +2571,7 @@ def search(criteria, refresh=False, **kwargs):
+ .getElementsByTagName("solvable")
+ )
+ if not solvables:
+- raise CommandExecutionError("No packages found matching '{0}'".format(criteria))
++ raise CommandExecutionError("No packages found matching '{}'".format(criteria))
+
+ out = {}
+ for solvable in solvables:
+@@ -2649,13 +2725,13 @@ def download(*packages, **kwargs):
+ if failed:
+ pkg_ret[
+ "_error"
+- ] = "The following package(s) failed to download: {0}".format(
++ ] = "The following package(s) failed to download: {}".format(
+ ", ".join(failed)
+ )
+ return pkg_ret
+
+ raise CommandExecutionError(
+- "Unable to download packages: {0}".format(", ".join(packages))
++ "Unable to download packages: {}".format(", ".join(packages))
+ )
+
+
+@@ -2726,7 +2802,7 @@ def diff(*paths, **kwargs):
+
+ if pkg_to_paths:
+ local_pkgs = __salt__["pkg.download"](*pkg_to_paths.keys(), **kwargs)
+- for pkg, files in six.iteritems(pkg_to_paths):
++ for pkg, files in pkg_to_paths.items():
+ for path in files:
+ ret[path] = (
+ __salt__["lowpkg.diff"](local_pkgs[pkg]["path"], path)
--
-2.16.4
+2.29.2
diff --git a/add-migrated-state-and-gpg-key-management-functions-.patch b/add-migrated-state-and-gpg-key-management-functions-.patch
index cbcc578..5a81fa5 100644
--- a/add-migrated-state-and-gpg-key-management-functions-.patch
+++ b/add-migrated-state-and-gpg-key-management-functions-.patch
@@ -1,4 +1,4 @@
-From 5254ec34316a0924edb4856f84e6092fafe479fa Mon Sep 17 00:00:00 2001
+From 57cab2d4e282f8b1d17610e6b4a0e772494bfcb1 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 20 Oct 2020 11:43:09 +0200
Subject: [PATCH] Add "migrated" state and GPG key management functions
@@ -20,17 +20,16 @@ same virtual package, based on the counterpart from rpm_lowpkg API.
---
changelog/58782.added | 1 +
salt/modules/aptpkg.py | 7 +-
- salt/modules/rpm_lowpkg.py | 151 ++++++++
+ salt/modules/rpm_lowpkg.py | 151 +++++++++
salt/modules/yumpkg.py | 88 +++++
- salt/modules/zypperpkg.py | 90 ++++-
- salt/states/pkgrepo.py | 208 ++++++++++
- tests/unit/modules/test_rpm_lowpkg.py | 215 +++++++++++
- tests/unit/modules/test_yumpkg.py | 43 ++-
- tests/unit/modules/test_zypperpkg.py | 40 +-
- tests/unit/states/test_pkgrepo.py | 527 ++++++++++++++++++++++++++
- 10 files changed, 1363 insertions(+), 7 deletions(-)
+ salt/modules/zypperpkg.py | 88 +++++
+ salt/states/pkgrepo.py | 207 ++++++++++++
+ tests/unit/modules/test_rpm_lowpkg.py | 236 ++++++++++++-
+ tests/unit/modules/test_yumpkg.py | 41 ++-
+ tests/unit/modules/test_zypperpkg.py | 40 ++-
+ tests/unit/states/test_pkgrepo.py | 468 +++++++++++++++++++++++++-
+ 10 files changed, 1301 insertions(+), 26 deletions(-)
create mode 100644 changelog/58782.added
- create mode 100644 tests/unit/states/test_pkgrepo.py
diff --git a/changelog/58782.added b/changelog/58782.added
new file mode 100644
@@ -41,43 +40,43 @@ index 0000000000..f9e69f64f2
+Add GPG key functions in "lowpkg" and a "migrated" function in the "pkgrepo" state for repository and GPG key migration.
\ No newline at end of file
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index 765d69aff2..28b8597ef5 100644
+index e4a9872aad..e001d2f11c 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
-@@ -1878,7 +1878,7 @@ def _convert_if_int(value):
+@@ -1908,7 +1908,7 @@ def _convert_if_int(value):
return value
-def get_repo_keys():
+def get_repo_keys(**kwargs):
- '''
+ """
.. versionadded:: 2017.7.0
-@@ -1950,7 +1950,9 @@ def get_repo_keys():
+@@ -1990,7 +1990,9 @@ def get_repo_keys():
return ret
--def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv='base'):
+-def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base"):
+def add_repo_key(
-+ path=None, text=None, keyserver=None, keyid=None, saltenv='base', **kwargs
++ path=None, text=None, keyserver=None, keyid=None, saltenv="base", **kwargs
+):
- '''
+ """
.. versionadded:: 2017.7.0
-@@ -1976,7 +1978,6 @@ def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv='base
+@@ -2016,7 +2018,6 @@ def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base
salt '*' pkg.add_repo_key keyserver='keyserver.example' keyid='0000AAAA'
- '''
- cmd = ['apt-key']
+ """
+ cmd = ["apt-key"]
- kwargs = {}
current_repo_keys = get_repo_keys()
diff --git a/salt/modules/rpm_lowpkg.py b/salt/modules/rpm_lowpkg.py
-index c8a87276b2..fee0221a7c 100644
+index 393b0f453a..57f336bacf 100644
--- a/salt/modules/rpm_lowpkg.py
+++ b/salt/modules/rpm_lowpkg.py
-@@ -823,3 +823,154 @@ def checksum(*paths, **kwargs):
- python_shell=False))
+@@ -835,3 +835,154 @@ def checksum(*paths, **kwargs):
+ )
return ret
+
@@ -232,12 +231,12 @@ index c8a87276b2..fee0221a7c 100644
+ cmd.extend(["-e", key])
+ return __salt__["cmd.retcode"](cmd) == 0
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
-index 04ab240cd4..85a2dbd857 100644
+index c58b3e4c70..dd843f985b 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
-@@ -3271,3 +3271,91 @@ def list_installed_patches(**kwargs):
+@@ -3346,3 +3346,91 @@ def list_installed_patches(**kwargs):
salt '*' pkg.list_installed_patches
- '''
+ """
return _get_patches(installed_only=True)
+
+
@@ -328,19 +327,10 @@ index 04ab240cd4..85a2dbd857 100644
+ """
+ return __salt__["lowpkg.remove_gpg_key"](keyid, root)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index d84a6af6e0..fab7736701 100644
+index d06c265202..5e13c68708 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -1270,7 +1270,7 @@ def mod_repo(repo, **kwargs):
- cmd_opt.append("--priority={0}".format(kwargs.get('priority', DEFAULT_PRIORITY)))
-
- if 'humanname' in kwargs:
-- cmd_opt.append("--name='{0}'".format(kwargs.get('humanname')))
-+ cmd_opt.extend(["--name", kwargs.get("humanname")])
-
- if kwargs.get('gpgautoimport') is True:
- global_cmd_opt.append('--gpg-auto-import-keys')
-@@ -2879,3 +2879,91 @@ def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs):
+@@ -3004,3 +3004,91 @@ def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs):
else:
ret.append(name)
return ret
@@ -433,27 +423,19 @@ index d84a6af6e0..fab7736701 100644
+ """
+ return __salt__["lowpkg.remove_gpg_key"](keyid, root)
diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
-index c39e857580..6c42d17d32 100644
+index 70cb7a1c7e..d734bb9de9 100644
--- a/salt/states/pkgrepo.py
+++ b/salt/states/pkgrepo.py
-@@ -84,6 +84,7 @@ package managers are APT, DNF, YUM and Zypper. Here is some example SLS:
+@@ -93,6 +93,7 @@ package managers are APT, DNF, YUM and Zypper. Here is some example SLS:
+ """
+
- # Import Python libs
- from __future__ import absolute_import, print_function, unicode_literals
+import os
import sys
- # Import salt libs
-@@ -96,6 +97,7 @@ import salt.utils.pkg.rpm
-
- # Import 3rd-party libs
- from salt.ext import six
-+import salt.utils.versions
-
-
- def __virtual__():
-@@ -643,3 +645,209 @@ def absent(name, **kwargs):
- ret['comment'] = 'Failed to remove repo {0}'.format(name)
+ import salt.utils.data
+@@ -679,3 +680,209 @@ def absent(name, **kwargs):
+ ret["comment"] = "Failed to remove repo {}".format(name)
return ret
+
@@ -663,21 +645,49 @@ index c39e857580..6c42d17d32 100644
+
+ return ret
diff --git a/tests/unit/modules/test_rpm_lowpkg.py b/tests/unit/modules/test_rpm_lowpkg.py
-index b6cbd9e5cb..ff3678fde5 100644
+index ec9ecd40cb..84020263ea 100644
--- a/tests/unit/modules/test_rpm_lowpkg.py
+++ b/tests/unit/modules/test_rpm_lowpkg.py
-@@ -5,6 +5,7 @@
+@@ -2,6 +2,7 @@
+ :codeauthor: Jayesh Kariya
+ """
- # Import Python Libs
- from __future__ import absolute_import
+import datetime
- # Import Salt Testing Libs
+ import salt.modules.rpm_lowpkg as rpm
from tests.support.mixins import LoaderModuleMockMixin
-@@ -205,3 +206,217 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- with patch('salt.modules.rpm_lowpkg.rpm.labelCompare', MagicMock(return_value=0)), \
- patch('salt.modules.rpm_lowpkg.HAS_RPM', False):
- self.assertEqual(-1, rpm.version_cmp('1', '2')) # mock returns -1, a python implementation was called
+@@ -15,8 +16,8 @@ def _called_with_root(mock):
+
+
+ def _called_with_root(mock):
+- cmd = ' '.join(mock.call_args[0][0])
+- return cmd.startswith('rpm --root /')
++ cmd = " ".join(mock.call_args[0][0])
++ return cmd.startswith("rpm --root /")
+
+
+ class RpmTestCase(TestCase, LoaderModuleMockMixin):
+@@ -263,14 +264,223 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
+
+ :return:
+ """
+- self.assertEqual(-1, rpm.version_cmp("1", "2"))
+- self.assertEqual(mock_version_cmp.called, True)
+- self.assertEqual(mock_log.warning.called, True)
+- self.assertEqual(
+- mock_log.warning.mock_calls[0][1][0],
+- "Please install a package that provides rpm.labelCompare for more accurate version comparisons.",
+- )
+- self.assertEqual(
+- mock_log.warning.mock_calls[1][1][0],
+- "Falling back on salt.utils.versions.version_cmp() for version comparisons",
+- )
++ with patch(
++ "salt.modules.rpm_lowpkg.rpm.labelCompare", MagicMock(return_value=0)
++ ), patch("salt.modules.rpm_lowpkg.HAS_RPM", False):
++ self.assertEqual(
++ -1, rpm.version_cmp("1", "2")
++ ) # mock returns -1, a python implementation was called
+
+ def test_list_gpg_keys_no_info(self):
+ """
@@ -893,39 +903,31 @@ index b6cbd9e5cb..ff3678fde5 100644
+ self.assertTrue(rpm.remove_gpg_key("gpg-pubkey-1"))
+ self.assertFalse(_called_with_root(mock))
diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py
-index 9fbe3d051e..dfe00a7181 100644
+index 4784160d25..e65a1f8b8b 100644
--- a/tests/unit/modules/test_yumpkg.py
+++ b/tests/unit/modules/test_yumpkg.py
-@@ -10,15 +10,17 @@ from tests.support.unit import TestCase, skipIf
- from tests.support.mock import (
- Mock,
- MagicMock,
-+ mock_open,
- patch,
- )
-
- # Import Salt libs
+@@ -5,9 +5,9 @@ import salt.modules.pkg_resource as pkg_resource
+ import salt.modules.rpm_lowpkg as rpm
+ import salt.modules.yumpkg as yumpkg
+ import salt.utils.platform
-from salt.exceptions import CommandExecutionError
+from salt.exceptions import CommandExecutionError, SaltInvocationError
- import salt.modules.rpm_lowpkg as rpm
- from salt.ext import six
- import salt.modules.yumpkg as yumpkg
- import salt.modules.pkg_resource as pkg_resource
-+import salt.utils.platform
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import MagicMock, Mock, patch
++from tests.support.mock import MagicMock, Mock, mock_open, patch
+ from tests.support.unit import TestCase, skipIf
try:
- import pytest
-@@ -799,8 +801,45 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
- with pytest.raises(CommandExecutionError):
- yumpkg._get_yum_config()
+@@ -1630,6 +1630,43 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
+ ret = yumpkg.get_repo(repo, **kwargs)
+ assert ret == expected, ret
+ def test_get_repo_keys(self):
+ salt_mock = {"lowpkg.list_gpg_keys": MagicMock(return_value=True)}
+ with patch.dict(yumpkg.__salt__, salt_mock):
+ self.assertTrue(yumpkg.get_repo_keys(info=True, root="/mnt"))
+ salt_mock["lowpkg.list_gpg_keys"].assert_called_once_with(True, "/mnt")
-
--@skipIf(pytest is None, 'PyTest is missing')
++
+ def test_add_repo_key_fail(self):
+ with self.assertRaises(SaltInvocationError):
+ yumpkg.add_repo_key()
@@ -957,25 +959,23 @@ index 9fbe3d051e..dfe00a7181 100644
+ self.assertTrue(yumpkg.del_repo_key(keyid="keyid", root="/mnt"))
+ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
+
-+
-+@skipIf(pytest is None, "PyTest is missing")
+
+ @skipIf(pytest is None, "PyTest is missing")
class YumUtilsTestCase(TestCase, LoaderModuleMockMixin):
- '''
- Yum/Dnf utils tests.
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 8cc84485b5..1f2a7dc4b2 100644
+index eaa4d9a76a..018c1ffbca 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -22,7 +22,7 @@ from tests.support.mock import (
- import salt.utils.files
+@@ -10,7 +10,7 @@ import salt.modules.pkg_resource as pkg_resource
import salt.modules.zypperpkg as zypper
- import salt.modules.pkg_resource as pkg_resource
+ import salt.utils.files
+ import salt.utils.pkg
-from salt.exceptions import CommandExecutionError
+from salt.exceptions import CommandExecutionError, SaltInvocationError
-
- # Import 3rd-party libs
+ from salt.ext import six
from salt.ext.six.moves import configparser
-@@ -1728,3 +1728,41 @@ pattern() = package-c"""
+ from tests.support.mixins import LoaderModuleMockMixin
+@@ -2175,3 +2175,41 @@ pattern() = package-c"""
python_shell=False,
env={"ZYPP_READONLY_HACK": "1"},
)
@@ -1018,79 +1018,33 @@ index 8cc84485b5..1f2a7dc4b2 100644
+ self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt"))
+ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
diff --git a/tests/unit/states/test_pkgrepo.py b/tests/unit/states/test_pkgrepo.py
-new file mode 100644
-index 0000000000..9d8d88abd9
---- /dev/null
+index b2be5b4da1..135e545220 100644
+--- a/tests/unit/states/test_pkgrepo.py
+++ b/tests/unit/states/test_pkgrepo.py
-@@ -0,0 +1,527 @@
-+"""
-+ :codeauthor: Tyler Johnson
-+"""
-+import salt.states.pkgrepo as pkgrepo
+@@ -1,17 +1,12 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Tyler Johnson
+ """
+-# Import Python libs
+-from __future__ import absolute_import
+
+-# Import Salt Libs
+ import salt.states.pkgrepo as pkgrepo
+-
+-# Import Salt Testing Libs
+import salt.utils.platform
-+from tests.support.mixins import LoaderModuleMockMixin
-+from tests.support.mock import MagicMock, patch
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+-from tests.support.unit import TestCase
+from tests.support.unit import TestCase, skipIf
-+
-+
-+class PkgrepoTestCase(TestCase, LoaderModuleMockMixin):
-+ """
-+ Test cases for salt.states.pkgrepo
-+ """
-+
-+ def setup_loader_modules(self):
-+ return {
-+ pkgrepo: {
-+ "__opts__": {"test": True},
-+ "__grains__": {"os": "", "os_family": ""},
-+ }
-+ }
-+
-+ def test_new_key_url(self):
-+ """
-+ Test when only the key_url is changed that a change is triggered
-+ """
-+ kwargs = {
-+ "name": "deb http://mock/ sid main",
-+ "disabled": False,
-+ }
-+ key_url = "http://mock/changed_gpg.key"
-+
-+ with patch.dict(
-+ pkgrepo.__salt__, {"pkg.get_repo": MagicMock(return_value=kwargs)}
-+ ):
-+ ret = pkgrepo.managed(key_url=key_url, **kwargs)
-+ self.assertDictEqual(
-+ {"key_url": {"old": None, "new": key_url}}, ret["changes"]
-+ )
-+
-+ def test_update_key_url(self):
-+ """
-+ Test when only the key_url is changed that a change is triggered
-+ """
-+ kwargs = {
-+ "name": "deb http://mock/ sid main",
-+ "gpgcheck": 1,
-+ "disabled": False,
-+ "key_url": "http://mock/gpg.key",
-+ }
-+ changed_kwargs = kwargs.copy()
-+ changed_kwargs["key_url"] = "http://mock/gpg2.key"
-+
-+ with patch.dict(
-+ pkgrepo.__salt__, {"pkg.get_repo": MagicMock(return_value=kwargs)}
-+ ):
-+ ret = pkgrepo.managed(**changed_kwargs)
-+ self.assertIn("key_url", ret["changes"], "Expected a change to key_url")
-+ self.assertDictEqual(
-+ {
-+ "key_url": {
-+ "old": kwargs["key_url"],
-+ "new": changed_kwargs["key_url"],
-+ }
-+ },
-+ ret["changes"],
-+ )
+
+
+ class PkgrepoTestCase(TestCase, LoaderModuleMockMixin):
+@@ -72,3 +67,462 @@ class PkgrepoTestCase(TestCase, LoaderModuleMockMixin):
+ },
+ ret["changes"],
+ )
+
+ def test__normalize_repo_suse(self):
+ repo = {
@@ -1551,6 +1505,6 @@ index 0000000000..9d8d88abd9
+ },
+ )
--
-2.29.1
+2.29.2
diff --git a/add-missing-_utils-at-loader-grains_func.patch b/add-missing-_utils-at-loader-grains_func.patch
deleted file mode 100644
index f422eea..0000000
--- a/add-missing-_utils-at-loader-grains_func.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From 082fa07e5301414b5b834b731aaa96bd5d966de7 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
-
-Date: Tue, 10 Mar 2020 13:16:05 +0000
-Subject: [PATCH] Add missing _utils at loader grains_func
-
----
- salt/loader.py | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/salt/loader.py b/salt/loader.py
-index c68562988d..742b2f8e22 100644
---- a/salt/loader.py
-+++ b/salt/loader.py
-@@ -683,6 +683,7 @@ def grain_funcs(opts, proxy=None):
- __opts__ = salt.config.minion_config('/etc/salt/minion')
- grainfuncs = salt.loader.grain_funcs(__opts__)
- '''
-+ _utils = utils(opts)
- ret = LazyLoader(
- _module_dirs(
- opts,
---
-2.23.0
-
-
diff --git a/add-missing-fun-for-returns-from-wfunc-executions.patch b/add-missing-fun-for-returns-from-wfunc-executions.patch
deleted file mode 100644
index 585f69a..0000000
--- a/add-missing-fun-for-returns-from-wfunc-executions.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From 5c25babafd4e4bbe55626713851ea5d6345c43d1 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
-
-Date: Wed, 9 Oct 2019 13:03:33 +0100
-Subject: [PATCH] Add missing 'fun' for returns from wfunc executions
-
----
- salt/client/ssh/__init__.py | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
-index 4881540837..1373274739 100644
---- a/salt/client/ssh/__init__.py
-+++ b/salt/client/ssh/__init__.py
-@@ -682,6 +682,8 @@ class SSH(object):
- data = {'return': data}
- if 'id' not in data:
- data['id'] = id_
-+ if 'fun' not in data:
-+ data['fun'] = fun
- data['jid'] = jid # make the jid in the payload the same as the jid in the tag
- self.event.fire_event(
- data,
-@@ -797,6 +799,8 @@ class SSH(object):
- data = {'return': data}
- if 'id' not in data:
- data['id'] = id_
-+ if 'fun' not in data:
-+ data['fun'] = fun
- data['jid'] = jid # make the jid in the payload the same as the jid in the tag
- self.event.fire_event(
- data,
---
-2.16.4
-
-
diff --git a/add-multi-file-support-and-globbing-to-the-filetree-.patch b/add-multi-file-support-and-globbing-to-the-filetree-.patch
index 04c883b..dae5602 100644
--- a/add-multi-file-support-and-globbing-to-the-filetree-.patch
+++ b/add-multi-file-support-and-globbing-to-the-filetree-.patch
@@ -1,4 +1,4 @@
-From 0a6b5e92a4a74dee94eb33a939600f8c2e429c01 Mon Sep 17 00:00:00 2001
+From c5e5dc304e897f8c1664cce29fe9ee63d84f3ae6 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Fri, 12 Oct 2018 16:20:40 +0200
Subject: [PATCH] Add multi-file support and globbing to the filetree
@@ -12,37 +12,37 @@ Collect system logs and boot logs
Support globbing in filetree
---
- salt/cli/support/intfunc.py | 49 +++++++++++++++++++++--------------
- salt/cli/support/profiles/default.yml | 7 +++++
+ salt/cli/support/intfunc.py | 49 ++++++++++++++++-----------
+ salt/cli/support/profiles/default.yml | 7 ++++
2 files changed, 37 insertions(+), 19 deletions(-)
diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py
-index 2727cd6394..f15f4d4097 100644
+index d3d8f83cb8..a9f76a6003 100644
--- a/salt/cli/support/intfunc.py
+++ b/salt/cli/support/intfunc.py
-@@ -6,6 +6,7 @@ Internal functions.
+@@ -3,6 +3,7 @@ Internal functions.
+ """
+ # Maybe this needs to be a modules in a future?
- from __future__ import absolute_import, print_function, unicode_literals
- import os
+import glob
- from salt.cli.support.console import MessagesOutput
- import salt.utils.files
+ import os
-@@ -13,7 +14,7 @@ import salt.utils.files
+ import salt.utils.files
+@@ -11,7 +12,7 @@ from salt.cli.support.console import MessagesOutput
out = MessagesOutput()
-def filetree(collector, path):
+def filetree(collector, *paths):
- '''
+ """
Add all files in the tree. If the "path" is a file,
only that file will be added.
-@@ -21,22 +22,32 @@ def filetree(collector, path):
+@@ -19,22 +20,32 @@ def filetree(collector, path):
:param path: File or directory
:return:
- '''
+ """
- if not path:
-- out.error('Path not defined', ident=2)
+- out.error("Path not defined", ident=2)
- else:
- # The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
- # pylint: disable=W8470
@@ -50,7 +50,7 @@ index 2727cd6394..f15f4d4097 100644
- filename = os.path.basename(path)
- try:
- file_ref = salt.utils.files.fopen(path) # pylint: disable=W
-- out.put('Add {}'.format(filename), indent=2)
+- out.put("Add {}".format(filename), indent=2)
- collector.add(filename)
- collector.link(title=path, path=file_ref)
- except Exception as err:
@@ -62,9 +62,9 @@ index 2727cd6394..f15f4d4097 100644
+ _paths += glob.glob(path)
+ for path in set(_paths):
+ if not path:
-+ out.error('Path not defined', ident=2)
++ out.error("Path not defined", ident=2)
+ elif not os.path.exists(path):
-+ out.warning('Path {} does not exists'.format(path))
++ out.warning("Path {} does not exists".format(path))
else:
- for fname in os.listdir(path):
- fname = os.path.join(path, fname)
@@ -75,7 +75,7 @@ index 2727cd6394..f15f4d4097 100644
+ filename = os.path.basename(path)
+ try:
+ file_ref = salt.utils.files.fopen(path) # pylint: disable=W
-+ out.put('Add {}'.format(filename), indent=2)
++ out.put("Add {}".format(filename), indent=2)
+ collector.add(filename)
+ collector.link(title=path, path=file_ref)
+ except Exception as err:
@@ -111,6 +111,6 @@ index 01d9a26193..3defb5eef3 100644
+ - /var/log/messages
--
-2.16.4
+2.29.2
diff --git a/add-new-custom-suse-capability-for-saltutil-state-mo.patch b/add-new-custom-suse-capability-for-saltutil-state-mo.patch
index 1428332..537fcf9 100644
--- a/add-new-custom-suse-capability-for-saltutil-state-mo.patch
+++ b/add-new-custom-suse-capability-for-saltutil-state-mo.patch
@@ -1,4 +1,4 @@
-From ad1323b4f83fa8f2954c0a965f4acaf91575a59b Mon Sep 17 00:00:00 2001
+From 70d13dcc62286d5195bbf28b53aae61616cc0f8f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 26 Mar 2020 13:08:16 +0000
@@ -10,10 +10,10 @@ Subject: [PATCH] Add new custom SUSE capability for saltutil state
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
-index 1082b05dba7830ee53078cff86b5183b5eea2829..b30ab0091fee7cda8f74b861e9e9f95f8ad85b39 100644
+index ddc22293ea..0eec27e628 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
-@@ -80,5 +80,6 @@ def config():
+@@ -71,5 +71,6 @@ def config():
def suse_backported_capabilities():
return {
'__suse_reserved_pkg_all_versions_support': True,
@@ -22,6 +22,6 @@ index 1082b05dba7830ee53078cff86b5183b5eea2829..b30ab0091fee7cda8f74b861e9e9f95f
+ '__suse_reserved_saltutil_states_support': True
}
--
-2.23.0
+2.29.2
diff --git a/add-patch-support-for-allow-vendor-change-option-wit.patch b/add-patch-support-for-allow-vendor-change-option-wit.patch
new file mode 100644
index 0000000..ae6333b
--- /dev/null
+++ b/add-patch-support-for-allow-vendor-change-option-wit.patch
@@ -0,0 +1,107 @@
+From cee4cc182b4740c912861c712dea7bc44eb70ffb Mon Sep 17 00:00:00 2001
+From: Martin Seidl
+Date: Mon, 7 Dec 2020 01:10:51 +0100
+Subject: [PATCH] add patch support for allow vendor change option with
+ zypper
+
+---
+ salt/modules/zypperpkg.py | 46 +++++++++++++++++++++++++++------------
+ 1 file changed, 32 insertions(+), 14 deletions(-)
+
+diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
+index 6f22994bf0..4a5cb85e7c 100644
+--- a/salt/modules/zypperpkg.py
++++ b/salt/modules/zypperpkg.py
+@@ -35,7 +35,6 @@ import salt.utils.versions
+ from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
+
+ # pylint: disable=import-error,redefined-builtin,no-name-in-module
+-from salt.ext import six
+ from salt.ext.six.moves import configparser
+ from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
+ from salt.utils.versions import LooseVersion
+@@ -1431,6 +1430,7 @@ def install(
+ no_recommends=False,
+ root=None,
+ inclusion_detection=False,
++ novendorchange=True,
+ **kwargs
+ ):
+ """
+@@ -1478,6 +1478,10 @@ def install(
+ skip_verify
+ Skip the GPG verification check (e.g., ``--no-gpg-checks``)
+
++
++ novendorchange
++ Disallow vendor change
++
+ version
+ Can be either a version number, or the combination of a comparison
+ operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4').
+@@ -1638,6 +1642,22 @@ def install(
+ cmd_install.append(
+ kwargs.get("resolve_capabilities") and "--capability" or "--name"
+ )
++ if novendorchange:
++ if __grains__["osrelease_info"][0] > 11:
++ cmd_install.append("--no-allow-vendor-change")
++ log.info("Disabling vendor changes")
++ else:
++ log.warning(
++ "Enabling/Disabling vendor changes is not supported on this Zypper version"
++ )
++ else:
++ if __grains__["osrelease_info"][0] > 11:
++ cmd_install.append("--allow-vendor-change")
++ log.info("Enabling vendor changes")
++ else:
++ log.warning(
++ "Enabling/Disabling vendor changes is not supported on this Zypper version"
++ )
+
+ if not refresh:
+ cmd_install.insert(0, "--no-refresh")
+@@ -1649,7 +1669,6 @@ def install(
+ cmd_install.extend(fromrepoopt)
+ if no_recommends:
+ cmd_install.append("--no-recommends")
+-
+ errors = []
+
+ # Split the targets into batches of 500 packages each, so that
+@@ -1793,19 +1812,18 @@ def upgrade(
+ cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
+ log.info("Targeting repos: %s", fromrepo)
+
+- if dist_upgrade:
+- # TODO: Grains validation should be moved to Zypper class
+- if __grains__["osrelease_info"][0] > 11:
+- if novendorchange:
+- cmd_update.append("--no-allow-vendor-change")
+- log.info("Disabling vendor changes")
+- else:
+- cmd_update.append("--allow-vendor-change")
+- log.info("Enabling vendor changes")
++ # TODO: Grains validation should be moved to Zypper class
++ if __grains__["osrelease_info"][0] > 11:
++ if novendorchange:
++ cmd_update.append("--no-allow-vendor-change")
++ log.info("Disabling vendor changes")
+ else:
+- log.warning(
+- "Enabling/Disabling vendor changes is not supported on this Zypper version"
+- )
++ cmd_update.append("--allow-vendor-change")
++ log.info("Enabling vendor changes")
++ else:
++ log.warning(
++ "Enabling/Disabling vendor changes is not supported on this Zypper version"
++ )
+
+ if no_recommends:
+ cmd_update.append("--no-recommends")
+--
+2.29.2
+
+
diff --git a/add-pkg.services_need_restart-302.patch b/add-pkg.services_need_restart-302.patch
new file mode 100644
index 0000000..08ae31b
--- /dev/null
+++ b/add-pkg.services_need_restart-302.patch
@@ -0,0 +1,404 @@
+From c79f4a8619ff1275b2ec4400c1fb27d24c22a7eb Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 8 Dec 2020 15:35:49 +0100
+Subject: [PATCH] Add pkg.services_need_restart (#302)
+
+* Add utils.systemd.pid_to_service function
+
+This function translates a given PID to the systemd service name in case
+the process belongs to a running service. It uses DBUS for the
+translation if DBUS is available, falling back to parsing
+``systemctl status -o json'' output.
+
+* Add zypperpkg.services_need_restart
+
+pkg.services_need_restart returns a list of system services that were
+affected by package manager operations such as updates, downgrades or
+reinstallations without having been restarted. This might cause issues,
+e.g. in the case a shared object was loaded by a process and then
+replaced by the package manager.
+
+(cherry picked from commit b950fcdbd6cc8cb08e1413a0ed05e0ae21717cea)
+
+* Add aptpkg.services_need_restart
+
+pkg.services_need_restart returns a list of system services that were
+affected by package manager operations such as updates, downgrades or
+reinstallations without having been restarted. This might cause issues,
+e.g. in the case a shared object was loaded by a process and then
+replaced by the package manager.
+
+Requires checkrestart, which is part of the debian-goodies package and
+available from official Ubuntu and Debian repositories.
+
+(cherry picked from commit b981f6ecb1a551b98c5cebab4975fc09c6a55a22)
+
+* Add yumpkg.services_need_restart
+
+pkg.services_need_restart returns a list of system services that were
+affected by package manager operations such as updates, downgrades or
+reinstallations without having been restarted. This might cause issues,
+e.g. in the case a shared object was loaded by a process and then
+replaced by the package manager.
+
+Requires dnf with the needs-restarting plugin, which is part of
+dnf-plugins-core and installed by default on RHEL/CentOS/Fedora.
+Also requires systemd for the mapping between PIDs and systemd services.
+
+(cherry picked from commit 5e2be1095729c9f73394e852b82749950957e6fb)
+
+* Add changelog entry for issue #58261
+
+(cherry picked from commit 148877ed8ff7a47132c1186274739e648f7acf1c)
+
+* Simplify dnf needs-restarting output parsing
+
+Co-authored-by: Wayne Werner
+(cherry picked from commit beb5d60f3cc64b880ec25ca188f8a73f6ec493dd)
+---
+ changelog/58261.added | 1 +
+ salt/modules/aptpkg.py | 42 ++++++++++++++++-
+ salt/modules/yumpkg.py | 36 +++++++++++++++
+ salt/modules/zypperpkg.py | 25 ++++++++++
+ salt/utils/systemd.py | 69 ++++++++++++++++++++++++++++
+ tests/unit/modules/test_aptpkg.py | 22 ++++++++-
+ tests/unit/modules/test_yumpkg.py | 32 ++++++++++++-
+ tests/unit/modules/test_zypperpkg.py | 14 ++++++
+ 8 files changed, 238 insertions(+), 3 deletions(-)
+ create mode 100644 changelog/58261.added
+
+diff --git a/changelog/58261.added b/changelog/58261.added
+new file mode 100644
+index 0000000000..537a43e80d
+--- /dev/null
++++ b/changelog/58261.added
+@@ -0,0 +1 @@
++Added ``pkg.services_need_restart`` which lists system services that should be restarted after package management operations.
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index 03e99af733..a0e0cc30c1 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -38,7 +38,12 @@ import salt.utils.stringutils
+ import salt.utils.systemd
+ import salt.utils.versions
+ import salt.utils.yaml
+-from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
++from salt.exceptions import (
++ CommandExecutionError,
++ CommandNotFoundError,
++ MinionError,
++ SaltInvocationError,
++)
+ from salt.modules.cmdmod import _parse_env
+
+ log = logging.getLogger(__name__)
+@@ -3029,3 +3034,38 @@ def list_downloaded(root=None, **kwargs):
+ ).isoformat(),
+ }
+ return ret
++
++
++def services_need_restart(**kwargs):
++ """
++ .. versionadded:: NEXT
++
++ List services that use files which have been changed by the
++ package manager. It might be needed to restart them.
++
++ Requires checkrestart from the debian-goodies package.
++
++ CLI Examples:
++
++ .. code-block:: bash
++
++ salt '*' pkg.services_need_restart
++ """
++ if not salt.utils.path.which_bin(["checkrestart"]):
++ raise CommandNotFoundError(
++ "'checkrestart' is needed. It is part of the 'debian-goodies' "
++ "package which can be installed from official repositories."
++ )
++
++ cmd = ["checkrestart", "--machine"]
++ services = set()
++
++ cr_output = __salt__["cmd.run_stdout"](cmd, python_shell=False)
++ for line in cr_output.split("\n"):
++ if not line.startswith("SERVICE:"):
++ continue
++ end_of_name = line.find(",")
++ service = line[8:end_of_name] # skip "SERVICE:"
++ services.add(service)
++
++ return list(services)
+diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
+index dd843f985b..df174e737d 100644
+--- a/salt/modules/yumpkg.py
++++ b/salt/modules/yumpkg.py
+@@ -3434,3 +3434,39 @@ def del_repo_key(keyid, root=None, **kwargs):
+
+ """
+ return __salt__["lowpkg.remove_gpg_key"](keyid, root)
++
++
++def services_need_restart(**kwargs):
++ """
++ .. versionadded:: NEXT
++
++ List services that use files which have been changed by the
++ package manager. It might be needed to restart them.
++
++ Requires systemd.
++
++ CLI Examples:
++
++ .. code-block:: bash
++
++ salt '*' pkg.services_need_restart
++ """
++ if _yum() != "dnf":
++ raise CommandExecutionError("dnf is required to list outdated services.")
++ if not salt.utils.systemd.booted(__context__):
++ raise CommandExecutionError("systemd is required to list outdated services.")
++
++ cmd = ["dnf", "--quiet", "needs-restarting"]
++ dnf_output = __salt__["cmd.run_stdout"](cmd, python_shell=False)
++ if not dnf_output:
++ return []
++
++ services = set()
++ for line in dnf_output.split("\n"):
++ pid, has_delim, _ = line.partition(":")
++ if has_delim:
++ service = salt.utils.systemd.pid_to_service(pid.strip())
++ if service:
++ services.add(service)
++
++ return list(services)
+diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
+index 5e13c68708..6f22994bf0 100644
+--- a/salt/modules/zypperpkg.py
++++ b/salt/modules/zypperpkg.py
+@@ -3092,3 +3092,28 @@ def del_repo_key(keyid, root=None, **kwargs):
+
+ """
+ return __salt__["lowpkg.remove_gpg_key"](keyid, root)
++
++
++def services_need_restart(root=None, **kwargs):
++ """
++ .. versionadded:: NEXT
++
++ List services that use files which have been changed by the
++ package manager. It might be needed to restart them.
++
++ root
++ operate on a different root directory.
++
++ CLI Examples:
++
++ .. code-block:: bash
++
++ salt '*' pkg.services_need_restart
++
++ """
++ cmd = ["ps", "-sss"]
++
++ zypper_output = __zypper__(root=root).nolock.call(*cmd)
++ services = zypper_output.split()
++
++ return services
+diff --git a/salt/utils/systemd.py b/salt/utils/systemd.py
+index 4d902bc920..f42d0421f8 100644
+--- a/salt/utils/systemd.py
++++ b/salt/utils/systemd.py
+@@ -11,6 +11,12 @@ import salt.utils.path
+ import salt.utils.stringutils
+ from salt.exceptions import SaltInvocationError
+
++try:
++ import dbus
++except ImportError:
++ dbus = None
++
++
+ log = logging.getLogger(__name__)
+
+
+@@ -114,3 +120,66 @@ def has_scope(context=None):
+ if _sd_version is None:
+ return False
+ return _sd_version >= 205
++
++
++def pid_to_service(pid):
++ """
++ Check if a PID belongs to a systemd service and return its name.
++ Return None if the PID does not belong to a service.
++
++ Uses DBUS if available.
++ """
++ if dbus:
++ return _pid_to_service_dbus(pid)
++ else:
++ return _pid_to_service_systemctl(pid)
++
++
++def _pid_to_service_systemctl(pid):
++ systemd_cmd = ["systemctl", "--output", "json", "status", str(pid)]
++ try:
++ systemd_output = subprocess.run(
++ systemd_cmd, check=True, text=True, capture_output=True
++ )
++ status_json = salt.utils.json.find_json(systemd_output.stdout)
++ except (ValueError, subprocess.CalledProcessError):
++ return None
++
++ name = status_json.get("_SYSTEMD_UNIT")
++ if name and name.endswith(".service"):
++ return _strip_suffix(name)
++ else:
++ return None
++
++
++def _pid_to_service_dbus(pid):
++ """
++ Use DBUS to check if a PID belongs to a running systemd service and return the service name if it does.
++ """
++ bus = dbus.SystemBus()
++ systemd_object = bus.get_object(
++ "org.freedesktop.systemd1", "/org/freedesktop/systemd1"
++ )
++ systemd = dbus.Interface(systemd_object, "org.freedesktop.systemd1.Manager")
++ try:
++ service_path = systemd.GetUnitByPID(pid)
++ service_object = bus.get_object("org.freedesktop.systemd1", service_path)
++ service_props = dbus.Interface(
++ service_object, "org.freedesktop.DBus.Properties"
++ )
++ service_name = service_props.Get("org.freedesktop.systemd1.Unit", "Id")
++ name = str(service_name)
++
++ if name and name.endswith(".service"):
++ return _strip_suffix(name)
++ else:
++ return None
++ except dbus.DBusException:
++ return None
++
++
++def _strip_suffix(service_name):
++ """
++ Strip ".service" suffix from a given service name.
++ """
++ return service_name[:-8]
+diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
+index eb3f9e2da7..1d4d2f7fdc 100644
+--- a/tests/unit/modules/test_aptpkg.py
++++ b/tests/unit/modules/test_aptpkg.py
+@@ -13,7 +13,6 @@ import textwrap
+ import pytest
+ import salt.modules.aptpkg as aptpkg
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
+-from salt.ext import six
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, Mock, call, patch
+ from tests.support.unit import TestCase, skipIf
+@@ -1001,3 +1000,24 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin):
+ # We should attempt to call the cmd 5 times
+ self.assertEqual(cmd_mock.call_count, 5)
+ cmd_mock.has_calls(expected_calls)
++
++ @patch("salt.utils.path.which_bin", Mock(return_value="/usr/sbin/checkrestart"))
++ def test_services_need_restart(self):
++ """
++ Test that checkrestart output is parsed correctly
++ """
++ cr_output = """
++PROCESSES: 24
++PROGRAMS: 17
++PACKAGES: 8
++SERVICE:rsyslog,385,/usr/sbin/rsyslogd
++SERVICE:cups-daemon,390,/usr/sbin/cupsd
++ """
++
++ with patch.dict(
++ aptpkg.__salt__, {"cmd.run_stdout": Mock(return_value=cr_output)}
++ ):
++ assert sorted(aptpkg.services_need_restart()) == [
++ "cups-daemon",
++ "rsyslog",
++ ]
+diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py
+index e65a1f8b8b..b97e82d307 100644
+--- a/tests/unit/modules/test_yumpkg.py
++++ b/tests/unit/modules/test_yumpkg.py
+@@ -7,7 +7,7 @@ import salt.modules.yumpkg as yumpkg
+ import salt.utils.platform
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import MagicMock, Mock, mock_open, patch
++from tests.support.mock import MagicMock, Mock, call, mock_open, patch
+ from tests.support.unit import TestCase, skipIf
+
+ try:
+@@ -1745,3 +1745,33 @@ class YumUtilsTestCase(TestCase, LoaderModuleMockMixin):
+ python_shell=True,
+ username="Darth Vader",
+ )
++
++ @skipIf(not salt.utils.systemd.booted(), "Requires systemd")
++ @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf"))
++ def test_services_need_restart(self):
++ """
++ Test that dnf needs-restarting output is parsed and
++ salt.utils.systemd.pid_to_service is called as expected.
++ """
++ expected = ["firewalld", "salt-minion"]
++
++ dnf_mock = Mock(
++ return_value="123 : /usr/bin/firewalld\n456 : /usr/bin/salt-minion\n"
++ )
++ systemd_mock = Mock(side_effect=["firewalld", "salt-minion"])
++ with patch.dict(yumpkg.__salt__, {"cmd.run_stdout": dnf_mock}), patch(
++ "salt.utils.systemd.pid_to_service", systemd_mock
++ ):
++ assert sorted(yumpkg.services_need_restart()) == expected
++ systemd_mock.assert_has_calls([call("123"), call("456")])
++
++ @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf"))
++ def test_services_need_restart_requires_systemd(self):
++ """Test that yumpkg.services_need_restart raises an error if systemd is unavailable."""
++ with patch("salt.utils.systemd.booted", Mock(return_value=False)):
++ pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
++
++ @patch("salt.modules.yumpkg._yum", Mock(return_value="yum"))
++ def test_services_need_restart_requires_dnf(self):
++ """Test that yumpkg.services_need_restart raises an error if DNF is unavailable."""
++ pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
+diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
+index 018c1ffbca..9c4a224c55 100644
+--- a/tests/unit/modules/test_zypperpkg.py
++++ b/tests/unit/modules/test_zypperpkg.py
+@@ -2213,3 +2213,17 @@ pattern() = package-c"""
+ with patch.dict(zypper.__salt__, salt_mock):
+ self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt"))
+ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
++
++ def test_services_need_restart(self):
++ """
++ Test that zypper ps is used correctly to list services that need to
++ be restarted.
++ """
++ expected = ["salt-minion", "firewalld"]
++ zypper_output = "salt-minion\nfirewalld"
++ zypper_mock = Mock()
++ zypper_mock(root=None).nolock.call = Mock(return_value=zypper_output)
++
++ with patch("salt.modules.zypperpkg.__zypper__", zypper_mock):
++ assert zypper.services_need_restart() == expected
++ zypper_mock(root=None).nolock.call.assert_called_with("ps", "-sss")
+--
+2.29.2
+
+
diff --git a/add-publish_batch-to-clearfuncs-exposed-methods.patch b/add-publish_batch-to-clearfuncs-exposed-methods.patch
index b918dd0..a572ef1 100644
--- a/add-publish_batch-to-clearfuncs-exposed-methods.patch
+++ b/add-publish_batch-to-clearfuncs-exposed-methods.patch
@@ -1,27 +1,26 @@
-From da936daeebd701e147707ad814c07bfc259d4be4 Mon Sep 17 00:00:00 2001
+From 2422d30358bcd0f96e399e623136f7984d136b38 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 28 May 2020 09:37:08 +0100
Subject: [PATCH] Add publish_batch to ClearFuncs exposed methods
---
- salt/master.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ salt/master.py | 1 +
+ 1 file changed, 1 insertion(+)
diff --git a/salt/master.py b/salt/master.py
-index 485c16029b12fc38fc88b54aba95f03aa95d14ee..7d7a094a1a212180bfb294df3ad8b38477981450 100644
+index ab85c7f5c6..59bb19ce75 100644
--- a/salt/master.py
+++ b/salt/master.py
-@@ -1906,7 +1906,7 @@ class ClearFuncs(TransportMethods):
- # These methods will be exposed to the transport layer by
- # MWorker._handle_clear
+@@ -2042,6 +2042,7 @@ class ClearFuncs(TransportMethods):
expose_methods = (
-- 'ping', 'publish', 'get_token', 'mk_token', 'wheel', 'runner',
-+ 'ping', 'publish', 'publish_batch', 'get_token', 'mk_token', 'wheel', 'runner',
- )
-
- # The ClearFuncs object encapsulates the functions that can be executed in
+ "ping",
+ "publish",
++ "publish_batch",
+ "get_token",
+ "mk_token",
+ "wheel",
--
-2.23.0
+2.29.2
diff --git a/add-saltssh-multi-version-support-across-python-inte.patch b/add-saltssh-multi-version-support-across-python-inte.patch
index c09d327..19c92ce 100644
--- a/add-saltssh-multi-version-support-across-python-inte.patch
+++ b/add-saltssh-multi-version-support-across-python-inte.patch
@@ -1,4 +1,4 @@
-From 369567107fa18187f8cbc5040728037d0774287b Mon Sep 17 00:00:00 2001
+From 99aa26e7ab4840cf38f54e7692d7d1eede3adeb4 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Mon, 12 Mar 2018 12:01:39 +0100
Subject: [PATCH] Add SaltSSH multi-version support across Python
@@ -254,10 +254,9 @@ Lintfix
Set master_top_first to False by default
---
- doc/topics/releases/fluorine.rst | 178 +++++++++++++++++++++++++++++++++++++++
- salt/client/ssh/ssh_py_shim.py | 4 +
- salt/utils/thin.py | 1 +
- 3 files changed, 183 insertions(+)
+ doc/topics/releases/fluorine.rst | 178 +++++++++++++++++++++++++++++++
+ salt/client/ssh/ssh_py_shim.py | 3 +
+ 2 files changed, 181 insertions(+)
create mode 100644 doc/topics/releases/fluorine.rst
diff --git a/doc/topics/releases/fluorine.rst b/doc/topics/releases/fluorine.rst
@@ -445,39 +444,20 @@ index 0000000000..40c69e25cc
+Salt version is also available on the Master machine, although does not need to be directly
+installed together with the older Python interpreter.
diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py
-index cd7549a178..95b3931a32 100644
+index c0ce0fd7de..5ddd282ed0 100644
--- a/salt/client/ssh/ssh_py_shim.py
+++ b/salt/client/ssh/ssh_py_shim.py
-@@ -165,6 +165,9 @@ def unpack_thin(thin_path):
+@@ -171,6 +171,9 @@ def unpack_thin(thin_path):
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
+ checksum_path = os.path.normpath(os.path.join(OPTIONS.saltdir, "thin_checksum"))
-+ with open(checksum_path, 'w') as chk:
-+ chk.write(OPTIONS.checksum + '\n')
++ with open(checksum_path, "w") as chk:
++ chk.write(OPTIONS.checksum + "\n")
os.umask(old_umask) # pylint: disable=blacklisted-function
try:
os.unlink(thin_path)
-@@ -358,5 +361,6 @@ def main(argv): # pylint: disable=W0613
- return retcode
-
-
-+
- if __name__ == '__main__':
- sys.exit(main(sys.argv))
-diff --git a/salt/utils/thin.py b/salt/utils/thin.py
-index 8496db9569..0ff31cef39 100644
---- a/salt/utils/thin.py
-+++ b/salt/utils/thin.py
-@@ -9,6 +9,7 @@ from __future__ import absolute_import, print_function, unicode_literals
- import copy
- import logging
- import os
-+import copy
- import shutil
- import subprocess
- import sys
--
-2.16.4
+2.29.2
diff --git a/add-standalone-configuration-file-for-enabling-packa.patch b/add-standalone-configuration-file-for-enabling-packa.patch
index a6634a5..5eb1546 100644
--- a/add-standalone-configuration-file-for-enabling-packa.patch
+++ b/add-standalone-configuration-file-for-enabling-packa.patch
@@ -1,9 +1,9 @@
-From 717c9bc6cb81994c5f23de87cfa91112fa7bf89c Mon Sep 17 00:00:00 2001
+From 8ad65d6fa39edc7fc1967e2df1f3db0aa7df4d11 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 22 May 2019 13:00:46 +0100
-Subject: [PATCH] Add standalone configuration file for enabling package
- formulas
+Subject: [PATCH] Add standalone configuration file for enabling
+ package formulas
---
conf/suse/standalone-formulas-configuration.conf | 4 ++++
@@ -21,6 +21,6 @@ index 0000000000..94d05fb2ee
+ - /usr/share/salt-formulas/states
+ - /srv/salt
--
-2.16.4
+2.29.2
diff --git a/add-supportconfig-module-for-remote-calls-and-saltss.patch b/add-supportconfig-module-for-remote-calls-and-saltss.patch
index 45e7eb0..d03d6fe 100644
--- a/add-supportconfig-module-for-remote-calls-and-saltss.patch
+++ b/add-supportconfig-module-for-remote-calls-and-saltss.patch
@@ -1,4 +1,4 @@
-From f4388ef82b5053e9996272b182c29a2da21a6258 Mon Sep 17 00:00:00 2001
+From 9fba801c1e1e6136808dca80ccd7524ed483250e Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Fri, 19 Oct 2018 15:44:47 +0200
Subject: [PATCH] Add supportconfig module for remote calls and SaltSSH
@@ -195,70 +195,98 @@ Remove unused import
Check last function by full name
---
+ doc/ref/modules/all/index.rst | 1 +
+ doc/ref/states/all/index.rst | 1 +
salt/cli/support/__init__.py | 2 +-
- salt/cli/support/collector.py | 12 +-
+ salt/cli/support/collector.py | 14 +-
salt/loader.py | 6 +-
- salt/modules/saltsupport.py | 381 +++++++++++++++++++++++++++++++
- salt/state.py | 34 ++-
- salt/states/saltsupport.py | 206 +++++++++++++++++
- salt/utils/args.py | 6 +-
- salt/utils/decorators/__init__.py | 24 ++
- tests/unit/modules/test_saltsupport.py | 394 +++++++++++++++++++++++++++++++++
- 9 files changed, 1044 insertions(+), 21 deletions(-)
+ salt/modules/saltsupport.py | 405 ++++++++++++++++++++
+ salt/state.py | 38 +-
+ salt/states/saltsupport.py | 225 +++++++++++
+ salt/utils/args.py | 23 +-
+ salt/utils/decorators/__init__.py | 68 ++--
+ tests/unit/modules/test_saltsupport.py | 496 +++++++++++++++++++++++++
+ 11 files changed, 1220 insertions(+), 59 deletions(-)
create mode 100644 salt/modules/saltsupport.py
create mode 100644 salt/states/saltsupport.py
create mode 100644 tests/unit/modules/test_saltsupport.py
+diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst
+index 4c93972276..9fea7af07f 100644
+--- a/doc/ref/modules/all/index.rst
++++ b/doc/ref/modules/all/index.rst
+@@ -415,6 +415,7 @@ execution modules
+ salt_version
+ saltcheck
+ saltcloudmod
++ saltsupport
+ saltutil
+ schedule
+ scp_mod
+diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst
+index 2664b4ce45..052efe4582 100644
+--- a/doc/ref/states/all/index.rst
++++ b/doc/ref/states/all/index.rst
+@@ -281,6 +281,7 @@ state modules
+ rvm
+ salt_proxy
+ saltmod
++ saltsupport
+ saltutil
+ schedule
+ selinux
diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py
-index 6a98a2d656..0a48b0a081 100644
+index 4fdf44186f..59c2609e07 100644
--- a/salt/cli/support/__init__.py
+++ b/salt/cli/support/__init__.py
-@@ -40,7 +40,7 @@ def get_profile(profile, caller, runner):
+@@ -47,7 +47,7 @@ def get_profile(profile, caller, runner):
if os.path.exists(profile_path):
try:
rendered_template = _render_profile(profile_path, caller, runner)
-- log.trace('\n{d}\n{t}\n{d}\n'.format(d='-' * 80, t=rendered_template))
-+ log.debug('\n{d}\n{t}\n{d}\n'.format(d='-' * 80, t=rendered_template))
+- log.trace("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template))
++ log.debug("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template))
data.update(yaml.load(rendered_template))
except Exception as ex:
log.debug(ex, exc_info=True)
diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py
-index a4343297b6..cbae189aea 100644
+index a08a0b8c6e..1879cc5220 100644
--- a/salt/cli/support/collector.py
+++ b/salt/cli/support/collector.py
-@@ -354,7 +354,7 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
+@@ -362,7 +362,7 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
return data
- def collect_local_data(self):
+ def collect_local_data(self, profile=None, profile_source=None):
- '''
+ """
Collects master system data.
:return:
-@@ -375,7 +375,7 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
- '''
- return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
+@@ -388,8 +388,8 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
+ self._local_run({"fun": func, "arg": args, "kwarg": kwargs})
+ )
-- scenario = salt.cli.support.get_profile(self.config['support_profile'], call, run)
-+ scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
+- scenario = salt.cli.support.get_profile(
+- self.config["support_profile"], call, run
++ scenario = profile_source or salt.cli.support.get_profile(
++ profile or self.config["support_profile"], call, run
+ )
for category_name in scenario:
self.out.put(category_name)
- self.collector.add(category_name)
-@@ -415,13 +415,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
+@@ -441,13 +441,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
- return action_name.split(':')[0] or None
+ return action_name.split(":")[0] or None
- def collect_targets_data(self):
-- '''
+- """
- Collects minion targets data
- :return:
-- '''
+- """
- # TODO: remote collector?
-
def _cleanup(self):
- '''
+ """
Cleanup if crash/exception
-@@ -511,7 +504,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
+@@ -551,7 +544,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
@@ -267,16 +295,16 @@ index a4343297b6..cbae189aea 100644
archive_path = self.collector.archive_path
diff --git a/salt/loader.py b/salt/loader.py
-index 428fb338c9..860162b791 100644
+index 8232ed632e..1ee40712e5 100644
--- a/salt/loader.py
+++ b/salt/loader.py
-@@ -1727,8 +1727,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- ))
+@@ -1843,8 +1843,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
+ }
- for attr in getattr(mod, '__load__', dir(mod)):
-- if attr.startswith('_'):
+ for attr in getattr(mod, "__load__", dir(mod)):
+- if attr.startswith("_"):
- # private functions are skipped
-+ if attr.startswith('_') and attr != '__call__':
++ if attr.startswith("_") and attr != "__call__":
+ # private functions are skipped,
+ # except __call__ which is default entrance
+ # for multi-function batch-like state syntax
@@ -285,11 +313,10 @@ index 428fb338c9..860162b791 100644
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
diff --git a/salt/modules/saltsupport.py b/salt/modules/saltsupport.py
new file mode 100644
-index 0000000000..750b2655d6
+index 0000000000..e800e3bf1f
--- /dev/null
+++ b/salt/modules/saltsupport.py
-@@ -0,0 +1,381 @@
-+# -*- coding: utf-8 -*-
+@@ -0,0 +1,405 @@
+#
+# Author: Bo Maryniuk
+#
@@ -305,50 +332,55 @@ index 0000000000..750b2655d6
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-+'''
++"""
+:codeauthor: :email:`Bo Maryniuk `
+
+Module to run salt-support within Salt.
-+'''
++"""
+# pylint: disable=W0231,W0221
+
-+from __future__ import unicode_literals, print_function, absolute_import
+
-+import tempfile
-+import re
-+import os
-+import sys
-+import time
+import datetime
+import logging
++import os
++import re
++import sys
++import tempfile
++import time
+
-+import salt.cli.support.intfunc
-+import salt.utils.decorators
-+import salt.utils.path
+import salt.cli.support
-+import salt.exceptions
-+import salt.utils.stringutils
++import salt.cli.support.intfunc
+import salt.defaults.exitcodes
-+import salt.utils.odict
++import salt.exceptions
++import salt.utils.decorators
+import salt.utils.dictupdate
-+
++import salt.utils.odict
++import salt.utils.path
++import salt.utils.stringutils
+from salt.cli.support.collector import SaltSupport, SupportDataCollector
+
-+__virtualname__ = 'support'
++__virtualname__ = "support"
+log = logging.getLogger(__name__)
+
+
-+class LogCollector(object):
-+ '''
++class LogCollector:
++ """
+ Output collector.
-+ '''
-+ INFO = 'info'
-+ WARNING = 'warning'
-+ ERROR = 'error'
++ """
++
++ INFO = "info"
++ WARNING = "warning"
++ ERROR = "error"
+
+ class MessagesList(list):
+ def append(self, obj):
-+ list.append(self, '{} - {}'.format(datetime.datetime.utcnow().strftime('%T.%f')[:-3], obj))
++ list.append(
++ self,
++ "{} - {}".format(
++ datetime.datetime.utcnow().strftime("%T.%f")[:-3], obj
++ ),
++ )
++
+ __call__ = append
+
+ def __init__(self):
@@ -359,9 +391,9 @@ index 0000000000..750b2655d6
+ }
+
+ def msg(self, message, *args, **kwargs):
-+ title = kwargs.get('title')
++ title = kwargs.get("title")
+ if title:
-+ message = '{}: {}'.format(title, message)
++ message = "{}: {}".format(title, message)
+ self.messages[self.INFO](message)
+
+ def info(self, message, *args, **kwargs):
@@ -381,64 +413,69 @@ index 0000000000..750b2655d6
+
+
+class SaltSupportModule(SaltSupport):
-+ '''
++ """
+ Salt Support module class.
-+ '''
++ """
++
+ def __init__(self):
-+ '''
++ """
+ Constructor
-+ '''
++ """
+ self.config = self.setup_config()
+
+ def setup_config(self):
-+ '''
++ """
+ Return current configuration
+ :return:
-+ '''
++ """
+ return __opts__
+
+ def _get_archive_name(self, archname=None):
-+ '''
++ """
+ Create default archive name.
+
+ :return:
-+ '''
-+ archname = re.sub('[^a-z0-9]', '', (archname or '').lower()) or 'support'
-+ for grain in ['fqdn', 'host', 'localhost', 'nodename']:
++ """
++ archname = re.sub("[^a-z0-9]", "", (archname or "").lower()) or "support"
++ for grain in ["fqdn", "host", "localhost", "nodename"]:
+ host = __grains__.get(grain)
+ if host:
+ break
+ if not host:
-+ host = 'localhost'
++ host = "localhost"
+
-+ return os.path.join(tempfile.gettempdir(),
-+ '{hostname}-{archname}-{date}-{time}.bz2'.format(archname=archname,
-+ hostname=host,
-+ date=time.strftime('%Y%m%d'),
-+ time=time.strftime('%H%M%S')))
++ return os.path.join(
++ tempfile.gettempdir(),
++ "{hostname}-{archname}-{date}-{time}.bz2".format(
++ archname=archname,
++ hostname=host,
++ date=time.strftime("%Y%m%d"),
++ time=time.strftime("%H%M%S"),
++ ),
++ )
+
+ @salt.utils.decorators.external
+ def profiles(self):
-+ '''
++ """
+ Get list of profiles.
+
+ :return:
-+ '''
++ """
+ return {
-+ 'standard': salt.cli.support.get_profiles(self.config),
-+ 'custom': [],
++ "standard": salt.cli.support.get_profiles(self.config),
++ "custom": [],
+ }
+
+ @salt.utils.decorators.external
+ def archives(self):
-+ '''
++ """
+ Get list of existing archives.
+ :return:
-+ '''
++ """
+ arc_files = []
+ tmpdir = tempfile.gettempdir()
+ for filename in os.listdir(tmpdir):
-+ mtc = re.match(r'\w+-\w+-\d+-\d+\.bz2', filename)
++ mtc = re.match(r"\w+-\w+-\d+-\d+\.bz2", filename)
+ if mtc and len(filename) == mtc.span()[-1]:
+ arc_files.append(os.path.join(tmpdir, filename))
+
@@ -446,29 +483,29 @@ index 0000000000..750b2655d6
+
+ @salt.utils.decorators.external
+ def last_archive(self):
-+ '''
++ """
+ Get the last available archive
+ :return:
-+ '''
++ """
+ archives = {}
+ for archive in self.archives():
-+ archives[int(archive.split('.')[0].split('-')[-1])] = archive
++ archives[int(archive.split(".")[0].split("-")[-1])] = archive
+
+ return archives and archives[max(archives)] or None
+
+ @salt.utils.decorators.external
+ def delete_archives(self, *archives):
-+ '''
++ """
+ Delete archives
+ :return:
-+ '''
++ """
+ # Remove paths
+ _archives = []
+ for archive in archives:
+ _archives.append(os.path.basename(archive))
+ archives = _archives[:]
+
-+ ret = {'files': {}, 'errors': {}}
++ ret = {"files": {}, "errors": {}}
+ for archive in self.archives():
+ arc_dir = os.path.dirname(archive)
+ archive = os.path.basename(archive)
@@ -476,43 +513,43 @@ index 0000000000..750b2655d6
+ archive = os.path.join(arc_dir, archive)
+ try:
+ os.unlink(archive)
-+ ret['files'][archive] = 'removed'
++ ret["files"][archive] = "removed"
+ except Exception as err:
-+ ret['errors'][archive] = str(err)
-+ ret['files'][archive] = 'left'
++ ret["errors"][archive] = str(err)
++ ret["files"][archive] = "left"
+
+ return ret
+
+ def format_sync_stats(self, cnt):
-+ '''
++ """
+ Format stats of the sync output.
+
+ :param cnt:
+ :return:
-+ '''
++ """
+ stats = salt.utils.odict.OrderedDict()
-+ if cnt.get('retcode') == salt.defaults.exitcodes.EX_OK:
-+ for line in cnt.get('stdout', '').split(os.linesep):
-+ line = line.split(': ')
++ if cnt.get("retcode") == salt.defaults.exitcodes.EX_OK:
++ for line in cnt.get("stdout", "").split(os.linesep):
++ line = line.split(": ")
+ if len(line) == 2:
-+ stats[line[0].lower().replace(' ', '_')] = line[1]
-+ cnt['transfer'] = stats
-+ del cnt['stdout']
++ stats[line[0].lower().replace(" ", "_")] = line[1]
++ cnt["transfer"] = stats
++ del cnt["stdout"]
+
+ # Remove empty
+ empty_sections = []
+ for section in cnt:
-+ if not cnt[section] and section != 'retcode':
++ if not cnt[section] and section != "retcode":
+ empty_sections.append(section)
+ for section in empty_sections:
+ del cnt[section]
+
+ return cnt
+
-+ @salt.utils.decorators.depends('rsync')
++ @salt.utils.decorators.depends("rsync")
+ @salt.utils.decorators.external
+ def sync(self, group, name=None, host=None, location=None, move=False, all=False):
-+ '''
++ """
+ Sync the latest archive to the host on given location.
+
+ CLI Example:
@@ -532,7 +569,7 @@ index 0000000000..750b2655d6
+ :param all: work with all available archives. Default is False (i.e. latest available)
+
+ :return:
-+ '''
++ """
+ tfh, tfn = tempfile.mkstemp()
+ processed_archives = []
+ src_uri = uri = None
@@ -550,7 +587,7 @@ index 0000000000..750b2655d6
+ for name in archives:
+ err = None
+ if not name:
-+ err = 'No support archive has been defined.'
++ err = "No support archive has been defined."
+ elif not os.path.exists(name):
+ err = 'Support archive "{}" was not found'.format(name)
+ if err is not None:
@@ -559,38 +596,46 @@ index 0000000000..750b2655d6
+
+ if not uri:
+ src_uri = os.path.dirname(name)
-+ uri = '{host}:{loc}'.format(host=host or __opts__['master'],
-+ loc=os.path.join(location or tempfile.gettempdir(), group))
++ uri = "{host}:{loc}".format(
++ host=host or __opts__["master"],
++ loc=os.path.join(location or tempfile.gettempdir(), group),
++ )
+
+ os.write(tfh, salt.utils.stringutils.to_bytes(os.path.basename(name)))
+ os.write(tfh, salt.utils.stringutils.to_bytes(os.linesep))
+ processed_archives.append(name)
-+ log.debug('Syncing {filename} to {uri}'.format(filename=name, uri=uri))
++ log.debug("Syncing {filename} to {uri}".format(filename=name, uri=uri))
+ os.close(tfh)
+
+ if not processed_archives:
-+ raise salt.exceptions.SaltInvocationError('No archives found to transfer.')
++ raise salt.exceptions.SaltInvocationError("No archives found to transfer.")
+
-+ ret = __salt__['rsync.rsync'](src=src_uri, dst=uri, additional_opts=['--stats', '--files-from={}'.format(tfn)])
-+ ret['files'] = {}
++ ret = __salt__["rsync.rsync"](
++ src=src_uri,
++ dst=uri,
++ additional_opts=["--stats", "--files-from={}".format(tfn)],
++ )
++ ret["files"] = {}
+ for name in processed_archives:
+ if move:
+ salt.utils.dictupdate.update(ret, self.delete_archives(name))
-+ log.debug('Deleting {filename}'.format(filename=name))
-+ ret['files'][name] = 'moved'
++ log.debug("Deleting {filename}".format(filename=name))
++ ret["files"][name] = "moved"
+ else:
-+ ret['files'][name] = 'copied'
++ ret["files"][name] = "copied"
+
+ try:
+ os.unlink(tfn)
-+ except (OSError, IOError) as err:
-+ log.error('Cannot remove temporary rsync file {fn}: {err}'.format(fn=tfn, err=err))
++ except OSError as err:
++ log.error(
++ "Cannot remove temporary rsync file {fn}: {err}".format(fn=tfn, err=err)
++ )
+
+ return self.format_sync_stats(ret)
+
+ @salt.utils.decorators.external
-+ def run(self, profile='default', pillar=None, archive=None, output='nested'):
-+ '''
++ def run(self, profile="default", pillar=None, archive=None, output="nested"):
++ """
+ Run Salt Support on the minion.
+
+ profile
@@ -612,11 +657,13 @@ index 0000000000..750b2655d6
+ salt '*' support.run
+ salt '*' support.run profile=network
+ salt '*' support.run pillar=something_special
-+ '''
-+ class outputswitch(object):
-+ '''
++ """
++
++ class outputswitch:
++ """
+ Output switcher on context
-+ '''
++ """
++
+ def __init__(self, output_device):
+ self._tmp_out = output_device
+ self._orig_out = None
@@ -630,116 +677,124 @@ index 0000000000..750b2655d6
+
+ self.out = LogCollector()
+ with outputswitch(self.out):
-+ self.collector = SupportDataCollector(archive or self._get_archive_name(archname=archive), output)
++ self.collector = SupportDataCollector(
++ archive or self._get_archive_name(archname=archive), output
++ )
+ self.collector.out = self.out
+ self.collector.open()
-+ self.collect_local_data(profile=profile, profile_source=__pillar__.get(pillar))
++ self.collect_local_data(
++ profile=profile, profile_source=__pillar__.get(pillar)
++ )
+ self.collect_internal_data()
+ self.collector.close()
+
-+ return {'archive': self.collector.archive_path,
-+ 'messages': self.out.messages}
++ return {"archive": self.collector.archive_path, "messages": self.out.messages}
+
+
+def __virtual__():
-+ '''
++ """
+ Set method references as module functions aliases
+ :return:
-+ '''
++ """
+ support = SaltSupportModule()
+
+ def _set_function(obj):
-+ '''
++ """
+ Create a Salt function for the SaltSupport class.
-+ '''
++ """
++
+ def _cmd(*args, **kwargs):
-+ '''
++ """
+ Call support method as a function from the Salt.
-+ '''
++ """
+ _kwargs = {}
+ for kw in kwargs:
-+ if not kw.startswith('__'):
++ if not kw.startswith("__"):
+ _kwargs[kw] = kwargs[kw]
+ return obj(*args, **_kwargs)
++
+ _cmd.__doc__ = obj.__doc__
+ return _cmd
+
+ for m_name in dir(support):
+ obj = getattr(support, m_name)
-+ if getattr(obj, 'external', False):
++ if getattr(obj, "external", False):
+ setattr(sys.modules[__name__], m_name, _set_function(obj))
+
+ return __virtualname__
diff --git a/salt/state.py b/salt/state.py
-index 1db1c35c52..bc5277554e 100644
+index beab2cb16c..b1bce4e0cd 100644
--- a/salt/state.py
+++ b/salt/state.py
-@@ -1406,8 +1406,9 @@ class State(object):
+@@ -1547,7 +1547,9 @@ class State:
names = []
- if state.startswith('__'):
+ if state.startswith("__"):
continue
-- chunk = {'state': state,
-- 'name': name}
+- chunk = {"state": state, "name": name}
+ chunk = OrderedDict()
-+ chunk['state'] = state
-+ chunk['name'] = name
++ chunk["state"] = state
++ chunk["name"] = name
if orchestration_jid is not None:
- chunk['__orchestration_jid__'] = orchestration_jid
- if '__sls__' in body:
-@@ -1977,8 +1978,12 @@ class State(object):
+ chunk["__orchestration_jid__"] = orchestration_jid
+ if "__sls__" in body:
+@@ -2150,9 +2152,16 @@ class State:
ret = self.call_parallel(cdata, low)
else:
self.format_slots(cdata)
-- ret = self.states[cdata['full']](*cdata['args'],
-- **cdata['kwargs'])
-+ if cdata['full'].split('.')[-1] == '__call__':
+- ret = self.states[cdata["full"]](
+- *cdata["args"], **cdata["kwargs"]
+- )
++ if cdata["full"].split(".")[-1] == "__call__":
+ # __call__ requires OrderedDict to preserve state order
+ # kwargs are also invalid overall
-+ ret = self.states[cdata['full']](cdata['args'], module=None, state=cdata['kwargs'])
++ ret = self.states[cdata["full"]](
++ cdata["args"], module=None, state=cdata["kwargs"]
++ )
+ else:
-+ ret = self.states[cdata['full']](*cdata['args'], **cdata['kwargs'])
++ ret = self.states[cdata["full"]](
++ *cdata["args"], **cdata["kwargs"]
++ )
self.states.inject_globals = {}
- if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states:
- ret.update(self._run_check_cmd(low))
-@@ -2882,10 +2887,31 @@ class State(object):
+ if (
+ "check_cmd" in low
+@@ -3188,10 +3197,31 @@ class State:
running.update(errors)
return running
+ def inject_default_call(self, high):
-+ '''
++ """
+ Sets .call function to a state, if not there.
+
+ :param high:
+ :return:
-+ '''
++ """
+ for chunk in high:
+ state = high[chunk]
+ for state_ref in state:
+ needs_default = True
+ for argset in state[state_ref]:
-+ if isinstance(argset, six.string_types):
++ if isinstance(argset, str):
+ needs_default = False
+ break
+ if needs_default:
+ order = state[state_ref].pop(-1)
-+ state[state_ref].append('__call__')
++ state[state_ref].append("__call__")
+ state[state_ref].append(order)
+
def call_high(self, high, orchestration_jid=None):
- '''
+ """
Process a high data call and ensure the defined states.
- '''
+ """
+ self.inject_default_call(high)
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
diff --git a/salt/states/saltsupport.py b/salt/states/saltsupport.py
new file mode 100644
-index 0000000000..f245f7f137
+index 0000000000..fb0c9e0372
--- /dev/null
+++ b/salt/states/saltsupport.py
-@@ -0,0 +1,206 @@
-+# -*- coding: utf-8 -*-
+@@ -0,0 +1,225 @@
+#
+# Author: Bo Maryniuk
+#
@@ -756,7 +811,7 @@ index 0000000000..f245f7f137
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
-+r'''
++r"""
+:codeauthor: :email:`Bo Maryniuk `
+
+Execution of Salt Support from within states
@@ -774,27 +829,28 @@ index 0000000000..f245f7f137
+ - group: somewhere
+ - move: true
+
-+'''
-+from __future__ import absolute_import, print_function, unicode_literals
++"""
+import logging
+import os
+import tempfile
+
++import salt.exceptions
++
+# Import salt modules
+import salt.fileclient
+import salt.utils.decorators.path
-+import salt.exceptions
+import salt.utils.odict
+
+log = logging.getLogger(__name__)
-+__virtualname__ = 'support'
++__virtualname__ = "support"
+
+
-+class SaltSupportState(object):
-+ '''
++class SaltSupportState:
++ """
+ Salt-support.
-+ '''
-+ EXPORTED = ['collected', 'taken']
++ """
++
++ EXPORTED = ["collected", "taken"]
+
+ def get_kwargs(self, data):
+ kwargs = {}
@@ -804,57 +860,65 @@ index 0000000000..f245f7f137
+ return kwargs
+
+ def __call__(self, state):
-+ '''
++ """
+ Call support.
+
+ :param args:
+ :param kwargs:
+ :return:
-+ '''
++ """
+ ret = {
-+ 'name': state.pop('name'),
-+ 'changes': {},
-+ 'result': True,
-+ 'comment': '',
++ "name": state.pop("name"),
++ "changes": {},
++ "result": True,
++ "comment": "",
+ }
+
+ out = {}
-+ functions = ['Functions:']
++ functions = ["Functions:"]
+ try:
+ for ref_func, ref_kwargs in state.items():
+ if ref_func not in self.EXPORTED:
-+ raise salt.exceptions.SaltInvocationError('Function {} is not found'.format(ref_func))
++ raise salt.exceptions.SaltInvocationError(
++ "Function {} is not found".format(ref_func)
++ )
+ out[ref_func] = getattr(self, ref_func)(**self.get_kwargs(ref_kwargs))
-+ functions.append(' - {}'.format(ref_func))
-+ ret['comment'] = '\n'.join(functions)
++ functions.append(" - {}".format(ref_func))
++ ret["comment"] = "\n".join(functions)
+ except Exception as ex:
-+ ret['comment'] = str(ex)
-+ ret['result'] = False
-+ ret['changes'] = out
++ ret["comment"] = str(ex)
++ ret["result"] = False
++ ret["changes"] = out
+
+ return ret
+
+ def check_destination(self, location, group):
-+ '''
++ """
+ Check destination for the archives.
+ :return:
-+ '''
++ """
+ # Pre-create destination, since rsync will
+ # put one file named as group
+ try:
+ destination = os.path.join(location, group)
+ if os.path.exists(destination) and not os.path.isdir(destination):
-+ raise salt.exceptions.SaltException('Destination "{}" should be directory!'.format(destination))
++ raise salt.exceptions.SaltException(
++ 'Destination "{}" should be directory!'.format(destination)
++ )
+ if not os.path.exists(destination):
+ os.makedirs(destination)
-+ log.debug('Created destination directory for archives: %s', destination)
++ log.debug("Created destination directory for archives: %s", destination)
+ else:
-+ log.debug('Archives destination directory %s already exists', destination)
++ log.debug(
++ "Archives destination directory %s already exists", destination
++ )
+ except OSError as err:
+ log.error(err)
+
-+ def collected(self, group, filename=None, host=None, location=None, move=True, all=True):
-+ '''
++ def collected(
++ self, group, filename=None, host=None, location=None, move=True, all=True
++ ):
++ """
+ Sync archives to a central place.
+
+ :param name:
@@ -865,22 +929,23 @@ index 0000000000..f245f7f137
+ :param move:
+ :param all:
+ :return:
-+ '''
++ """
+ ret = {
-+ 'name': 'support.collected',
-+ 'changes': {},
-+ 'result': True,
-+ 'comment': '',
++ "name": "support.collected",
++ "changes": {},
++ "result": True,
++ "comment": "",
+ }
+ location = location or tempfile.gettempdir()
+ self.check_destination(location, group)
-+ ret['changes'] = __salt__['support.sync'](group, name=filename, host=host,
-+ location=location, move=move, all=all)
++ ret["changes"] = __salt__["support.sync"](
++ group, name=filename, host=host, location=location, move=move, all=all
++ )
+
+ return ret
+
-+ def taken(self, profile='default', pillar=None, archive=None, output='nested'):
-+ '''
++ def taken(self, profile="default", pillar=None, archive=None, output="nested"):
++ """
+ Takes minion support config data.
+
+ :param profile:
@@ -888,23 +953,29 @@ index 0000000000..f245f7f137
+ :param archive:
+ :param output:
+ :return:
-+ '''
++ """
+ ret = {
-+ 'name': 'support.taken',
-+ 'changes': {},
-+ 'result': True,
++ "name": "support.taken",
++ "changes": {},
++ "result": True,
+ }
+
-+ result = __salt__['support.run'](profile=profile, pillar=pillar, archive=archive, output=output)
-+ if result.get('archive'):
-+ ret['comment'] = 'Information about this system has been saved to {} file.'.format(result['archive'])
-+ ret['changes']['archive'] = result['archive']
-+ ret['changes']['messages'] = {}
-+ for key in ['info', 'error', 'warning']:
-+ if result.get('messages', {}).get(key):
-+ ret['changes']['messages'][key] = result['messages'][key]
++ result = __salt__["support.run"](
++ profile=profile, pillar=pillar, archive=archive, output=output
++ )
++ if result.get("archive"):
++ ret[
++ "comment"
++ ] = "Information about this system has been saved to {} file.".format(
++ result["archive"]
++ )
++ ret["changes"]["archive"] = result["archive"]
++ ret["changes"]["messages"] = {}
++ for key in ["info", "error", "warning"]:
++ if result.get("messages", {}).get(key):
++ ret["changes"]["messages"][key] = result["messages"][key]
+ else:
-+ ret['comment'] = ''
++ ret["comment"] = ""
+
+ return ret
+
@@ -913,7 +984,7 @@ index 0000000000..f245f7f137
+
+
+def __call__(*args, **kwargs):
-+ '''
++ """
+ SLS single-ID syntax processing.
+
+ module:
@@ -926,75 +997,289 @@ index 0000000000..f245f7f137
+ :param cdata:
+ :param kwargs:
+ :return:
-+ '''
-+ return _support_state(kwargs.get('state', {}))
++ """
++ return _support_state(kwargs.get("state", {}))
+
+
-+def taken(name, profile='default', pillar=None, archive=None, output='nested'):
-+ return _support_state.taken(profile=profile, pillar=pillar,
-+ archive=archive, output=output)
++def taken(name, profile="default", pillar=None, archive=None, output="nested"):
++ return _support_state.taken(
++ profile=profile, pillar=pillar, archive=archive, output=output
++ )
+
+
-+def collected(name, group, filename=None, host=None, location=None, move=True, all=True):
-+ return _support_state.collected(group=group, filename=filename,
-+ host=host, location=location, move=move, all=all)
++def collected(
++ name, group, filename=None, host=None, location=None, move=True, all=True
++):
++ return _support_state.collected(
++ group=group, filename=filename, host=host, location=location, move=move, all=all
++ )
+
+
+def __virtual__():
-+ '''
++ """
+ Salt Support state
-+ '''
++ """
+ return __virtualname__
diff --git a/salt/utils/args.py b/salt/utils/args.py
-index 8cc0f35196..666a502498 100644
+index 87afdd3597..102402500c 100644
--- a/salt/utils/args.py
+++ b/salt/utils/args.py
-@@ -20,9 +20,7 @@ import salt.utils.data
- import salt.utils.jid
+@@ -1,8 +1,6 @@
+-# -*- coding: utf-8 -*-
+ """
+ Functions used for CLI argument handling
+ """
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import copy
+ import fnmatch
+@@ -17,6 +15,7 @@ import salt.utils.jid
import salt.utils.versions
import salt.utils.yaml
--
--log = logging.getLogger(__name__)
--
+ from salt.exceptions import SaltInvocationError
+from salt.utils.odict import OrderedDict
- if six.PY3:
- KWARG_REGEX = re.compile(r'^([^\d\W][\w.-]*)=(?!=)(.*)$', re.UNICODE)
-@@ -423,7 +421,7 @@ def format_call(fun,
+ log = logging.getLogger(__name__)
+
+@@ -70,9 +69,9 @@ def invalid_kwargs(invalid_kwargs, raise_exc=True):
+ """
+ if invalid_kwargs:
+ if isinstance(invalid_kwargs, dict):
+- new_invalid = ["{0}={1}".format(x, y) for x, y in invalid_kwargs.items()]
++ new_invalid = ["{}={}".format(x, y) for x, y in invalid_kwargs.items()]
+ invalid_kwargs = new_invalid
+- msg = "The following keyword arguments are not valid: {0}".format(
++ msg = "The following keyword arguments are not valid: {}".format(
+ ", ".join(invalid_kwargs)
+ )
+ if raise_exc:
+@@ -259,7 +258,7 @@ def get_function_argspec(func, is_class_method=None):
+ and this is not always wanted.
+ """
+ if not callable(func):
+- raise TypeError("{0} is not a callable".format(func))
++ raise TypeError("{} is not a callable".format(func))
+
+ if hasattr(func, "__wrapped__"):
+ func = func.__wrapped__
+@@ -279,7 +278,7 @@ def get_function_argspec(func, is_class_method=None):
+ try:
+ sig = inspect.signature(func)
+ except TypeError:
+- raise TypeError("Cannot inspect argument list for '{0}'".format(func))
++ raise TypeError("Cannot inspect argument list for '{}'".format(func))
+ else:
+ # argspec-related functions are deprecated in Python 3 in favor of
+ # the new inspect.Signature class, and will be removed at some
+@@ -439,7 +438,7 @@ def format_call(
ret = initial_ret is not None and initial_ret or {}
- ret['args'] = []
-- ret['kwargs'] = {}
-+ ret['kwargs'] = OrderedDict()
+ ret["args"] = []
+- ret["kwargs"] = {}
++ ret["kwargs"] = OrderedDict()
aspec = get_function_argspec(fun, is_class_method=is_class_method)
+@@ -470,7 +469,7 @@ def format_call(
+ used_args_count = len(ret["args"]) + len(args)
+ args_count = used_args_count + len(missing_args)
+ raise SaltInvocationError(
+- "{0} takes at least {1} argument{2} ({3} given)".format(
++ "{} takes at least {} argument{} ({} given)".format(
+ fun.__name__, args_count, args_count > 1 and "s" or "", used_args_count
+ )
+ )
+@@ -506,18 +505,18 @@ def format_call(
+ # In case this is being called for a state module
+ "full",
+ # Not a state module, build the name
+- "{0}.{1}".format(fun.__module__, fun.__name__),
++ "{}.{}".format(fun.__module__, fun.__name__),
+ ),
+ )
+ else:
+- msg = "{0} and '{1}' are invalid keyword arguments for '{2}'".format(
+- ", ".join(["'{0}'".format(e) for e in extra][:-1]),
++ msg = "{} and '{}' are invalid keyword arguments for '{}'".format(
++ ", ".join(["'{}'".format(e) for e in extra][:-1]),
+ list(extra.keys())[-1],
+ ret.get(
+ # In case this is being called for a state module
+ "full",
+ # Not a state module, build the name
+- "{0}.{1}".format(fun.__module__, fun.__name__),
++ "{}.{}".format(fun.__module__, fun.__name__),
+ ),
+ )
+
diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py
-index 45d69072c7..b2abb15425 100644
+index 940d0a90f2..b06cf0abc8 100644
--- a/salt/utils/decorators/__init__.py
+++ b/salt/utils/decorators/__init__.py
-@@ -690,3 +690,27 @@ def ensure_unicode_args(function):
+@@ -1,10 +1,7 @@
+-# -*- coding: utf-8 -*-
+ """
+ Helpful decorators for module writing
+ """
+
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import errno
+ import inspect
+@@ -15,13 +12,10 @@ import time
+ from collections import defaultdict
+ from functools import wraps
+
+-# Import salt libs
+ import salt.utils.args
+ import salt.utils.data
+ import salt.utils.versions
+ from salt.exceptions import CommandExecutionError, SaltConfigurationError
+-
+-# Import 3rd-party libs
+ from salt.ext import six
+ from salt.log import LOG_LEVELS
+
+@@ -32,7 +26,7 @@ if getattr(sys, "getwindowsversion", False):
+ log = logging.getLogger(__name__)
+
+
+-class Depends(object):
++class Depends:
+ """
+ This decorator will check the module when it is loaded and check that the
+ dependencies passed in are in the globals of the module. If not, it will
+@@ -121,7 +115,7 @@ class Depends(object):
+
+ @staticmethod
+ def run_command(dependency, mod_name, func_name):
+- full_name = "{0}.{1}".format(mod_name, func_name)
++ full_name = "{}.{}".format(mod_name, func_name)
+ log.trace("Running '%s' for '%s'", dependency, full_name)
+ if IS_WINDOWS:
+ args = salt.utils.args.shlex_split(dependency, posix=False)
+@@ -145,8 +139,8 @@ class Depends(object):
+ It will modify the "functions" dict and remove/replace modules that
+ are missing dependencies.
+ """
+- for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]):
+- for (mod_name, func_name), (frame, params) in six.iteritems(dependent_dict):
++ for dependency, dependent_dict in cls.dependency_dict[kind].items():
++ for (mod_name, func_name), (frame, params) in dependent_dict.items():
+ if mod_name != tgt_mod:
+ continue
+ # Imports from local context take presedence over those from the global context.
+@@ -232,7 +226,7 @@ class Depends(object):
+ except (AttributeError, KeyError):
+ pass
+
+- mod_key = "{0}.{1}".format(mod_name, func_name)
++ mod_key = "{}.{}".format(mod_name, func_name)
+
+ # if we don't have this module loaded, skip it!
+ if mod_key not in functions:
+@@ -267,9 +261,7 @@ def timing(function):
+ mod_name = function.__module__[16:]
else:
- return function(*args, **kwargs)
+ mod_name = function.__module__
+- fstr = "Function %s.%s took %.{0}f seconds to execute".format(
+- sys.float_info.dig
+- )
++ fstr = "Function %s.%s took %.{}f seconds to execute".format(sys.float_info.dig)
+ log.profile(fstr, mod_name, function.__name__, end_time - start_time)
+ return ret
+
+@@ -291,13 +283,13 @@ def memoize(func):
+ def _memoize(*args, **kwargs):
+ str_args = []
+ for arg in args:
+- if not isinstance(arg, six.string_types):
+- str_args.append(six.text_type(arg))
++ if not isinstance(arg, str):
++ str_args.append(str(arg))
+ else:
+ str_args.append(arg)
+
+ args_ = ",".join(
+- list(str_args) + ["{0}={1}".format(k, kwargs[k]) for k in sorted(kwargs)]
++ list(str_args) + ["{}={}".format(k, kwargs[k]) for k in sorted(kwargs)]
+ )
+ if args_ not in cache:
+ cache[args_] = func(*args, **kwargs)
+@@ -306,7 +298,7 @@ def memoize(func):
+ return _memoize
+
+
+-class _DeprecationDecorator(object):
++class _DeprecationDecorator:
+ """
+ Base mix-in class for the deprecation decorator.
+ Takes care of a common functionality, used in its derivatives.
+@@ -359,7 +351,7 @@ class _DeprecationDecorator(object):
+ try:
+ return self._function(*args, **kwargs)
+ except TypeError as error:
+- error = six.text_type(error).replace(
++ error = str(error).replace(
+ self._function, self._orig_f_name
+ ) # Hide hidden functions
+ log.error(
+@@ -374,7 +366,7 @@ class _DeprecationDecorator(object):
+ self._function.__name__,
+ error,
+ )
+- six.reraise(*sys.exc_info())
++ raise
+ else:
+ raise CommandExecutionError(
+ "Function is deprecated, but the successor function was not found."
+@@ -626,11 +618,11 @@ class _WithDeprecated(_DeprecationDecorator):
+
+ if use_deprecated and use_superseded:
+ raise SaltConfigurationError(
+- "Function '{0}' is mentioned both in deprecated "
++ "Function '{}' is mentioned both in deprecated "
+ "and superseded sections. Please remove any of that.".format(full_name)
+ )
+ old_function = self._globals.get(
+- self._with_name or "_{0}".format(function.__name__)
++ self._with_name or "_{}".format(function.__name__)
+ )
+ if self._policy == self.OPT_IN:
+ self._function = function if use_superseded else old_function
+@@ -782,12 +774,30 @@ def ensure_unicode_args(function):
+
+ @wraps(function)
+ def wrapped(*args, **kwargs):
+- if six.PY2:
+- return function(
+- *salt.utils.data.decode_list(args),
+- **salt.utils.data.decode_dict(kwargs)
+- )
+- else:
+- return function(*args, **kwargs)
++ return function(*args, **kwargs)
+
return wrapped
+
+
+def external(func):
-+ '''
++ """
+ Mark function as external.
+
+ :param func:
+ :return:
-+ '''
++ """
+
+ def f(*args, **kwargs):
-+ '''
++ """
+ Stub.
+
+ :param args:
+ :param kwargs:
+ :return:
-+ '''
++ """
+ return func(*args, **kwargs)
+
+ f.external = True
@@ -1003,405 +1288,507 @@ index 45d69072c7..b2abb15425 100644
+ return f
diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py
new file mode 100644
-index 0000000000..7bd652a90e
+index 0000000000..f9ce7be29a
--- /dev/null
+++ b/tests/unit/modules/test_saltsupport.py
-@@ -0,0 +1,394 @@
-+# -*- coding: utf-8 -*-
-+'''
+@@ -0,0 +1,496 @@
++"""
+ :codeauthor: Bo Maryniuk
-+'''
++"""
+
-+# Import Python libs
-+from __future__ import absolute_import, print_function, unicode_literals
+
-+# Import Salt Testing Libs
-+from tests.support.mixins import LoaderModuleMockMixin
-+from tests.support.unit import TestCase, skipIf
-+from tests.support.mock import patch, MagicMock, NO_MOCK, NO_MOCK_REASON
-+from salt.modules import saltsupport
-+import salt.exceptions
+import datetime
+
++import salt.exceptions
++from salt.modules import saltsupport
++from tests.support.mixins import LoaderModuleMockMixin
++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.unit import TestCase, skipIf
++
+try:
+ import pytest
+except ImportError:
+ pytest = None
+
+
-+@skipIf(not bool(pytest), 'Pytest required')
++@skipIf(not bool(pytest), "Pytest required")
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+class SaltSupportModuleTestCase(TestCase, LoaderModuleMockMixin):
-+ '''
++ """
+ Test cases for salt.modules.support::SaltSupportModule
-+ '''
++ """
++
+ def setup_loader_modules(self):
+ return {saltsupport: {}}
+
-+ @patch('tempfile.gettempdir', MagicMock(return_value='/mnt/storage'))
-+ @patch('salt.modules.saltsupport.__grains__', {'fqdn': 'c-3po'})
-+ @patch('time.strftime', MagicMock(return_value='000'))
++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage"))
++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"})
++ @patch("time.strftime", MagicMock(return_value="000"))
+ def test_get_archive_name(self):
-+ '''
++ """
+ Test archive name construction.
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ assert support._get_archive_name() == '/mnt/storage/c-3po-support-000-000.bz2'
++ assert support._get_archive_name() == "/mnt/storage/c-3po-support-000-000.bz2"
+
-+ @patch('tempfile.gettempdir', MagicMock(return_value='/mnt/storage'))
-+ @patch('salt.modules.saltsupport.__grains__', {'fqdn': 'c-3po'})
-+ @patch('time.strftime', MagicMock(return_value='000'))
++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage"))
++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"})
++ @patch("time.strftime", MagicMock(return_value="000"))
+ def test_get_custom_archive_name(self):
-+ '''
++ """
+ Test get custom archive name.
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ temp_name = support._get_archive_name(archname='Darth Wader')
-+ assert temp_name == '/mnt/storage/c-3po-darthwader-000-000.bz2'
-+ temp_name = support._get_archive_name(archname='Яйця з сіллю')
-+ assert temp_name == '/mnt/storage/c-3po-support-000-000.bz2'
-+ temp_name = support._get_archive_name(archname='!@#$%^&*()Fillip J. Fry')
-+ assert temp_name == '/mnt/storage/c-3po-fillipjfry-000-000.bz2'
++ temp_name = support._get_archive_name(archname="Darth Wader")
++ assert temp_name == "/mnt/storage/c-3po-darthwader-000-000.bz2"
++ temp_name = support._get_archive_name(archname="Яйця з сіллю")
++ assert temp_name == "/mnt/storage/c-3po-support-000-000.bz2"
++ temp_name = support._get_archive_name(archname="!@#$%^&*()Fillip J. Fry")
++ assert temp_name == "/mnt/storage/c-3po-fillipjfry-000-000.bz2"
+
-+ @patch('salt.cli.support.get_profiles', MagicMock(return_value={'message': 'Feature was not beta tested'}))
++ @patch(
++ "salt.cli.support.get_profiles",
++ MagicMock(return_value={"message": "Feature was not beta tested"}),
++ )
+ def test_profiles_format(self):
-+ '''
++ """
+ Test profiles format.
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
+ profiles = support.profiles()
-+ assert 'custom' in profiles
-+ assert 'standard' in profiles
-+ assert 'message' in profiles['standard']
-+ assert profiles['custom'] == []
-+ assert profiles['standard']['message'] == 'Feature was not beta tested'
++ assert "custom" in profiles
++ assert "standard" in profiles
++ assert "message" in profiles["standard"]
++ assert profiles["custom"] == []
++ assert profiles["standard"]["message"] == "Feature was not beta tested"
+
-+ @patch('tempfile.gettempdir', MagicMock(return_value='/mnt/storage'))
-+ @patch('os.listdir', MagicMock(return_value=['one-support-000-000.bz2', 'two-support-111-111.bz2', 'trash.bz2',
-+ 'hostname-000-000.bz2', 'three-support-wrong222-222.bz2',
-+ '000-support-000-000.bz2']))
++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage"))
++ @patch(
++ "os.listdir",
++ MagicMock(
++ return_value=[
++ "one-support-000-000.bz2",
++ "two-support-111-111.bz2",
++ "trash.bz2",
++ "hostname-000-000.bz2",
++ "three-support-wrong222-222.bz2",
++ "000-support-000-000.bz2",
++ ]
++ ),
++ )
+ def test_get_existing_archives(self):
-+ '''
++ """
+ Get list of existing archives.
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
+ out = support.archives()
+ assert len(out) == 3
-+ for name in ['/mnt/storage/one-support-000-000.bz2', '/mnt/storage/two-support-111-111.bz2',
-+ '/mnt/storage/000-support-000-000.bz2']:
++ for name in [
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/000-support-000-000.bz2",
++ ]:
+ assert name in out
+
+ def test_last_archive(self):
-+ '''
++ """
+ Get last archive name
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2',
-+ '/mnt/storage/two-support-111-111.bz2',
-+ '/mnt/storage/three-support-222-222.bz2'])
-+ assert support.last_archive() == '/mnt/storage/three-support-222-222.bz2'
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++ assert support.last_archive() == "/mnt/storage/three-support-222-222.bz2"
+
-+ @patch('os.unlink', MagicMock(return_value=True))
++ @patch("os.unlink", MagicMock(return_value=True))
+ def test_delete_all_archives_success(self):
-+ '''
++ """
+ Test delete archives
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2',
-+ '/mnt/storage/two-support-111-111.bz2',
-+ '/mnt/storage/three-support-222-222.bz2'])
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
+ ret = support.delete_archives()
-+ assert 'files' in ret
-+ assert 'errors' in ret
-+ assert not bool(ret['errors'])
-+ assert bool(ret['files'])
-+ assert isinstance(ret['errors'], dict)
-+ assert isinstance(ret['files'], dict)
++ assert "files" in ret
++ assert "errors" in ret
++ assert not bool(ret["errors"])
++ assert bool(ret["files"])
++ assert isinstance(ret["errors"], dict)
++ assert isinstance(ret["files"], dict)
+
+ for arc in support.archives():
-+ assert ret['files'][arc] == 'removed'
++ assert ret["files"][arc] == "removed"
+
-+ @patch('os.unlink', MagicMock(return_value=False, side_effect=[OSError('Decreasing electron flux'),
-+ OSError('Solar flares interference'),
-+ None]))
++ @patch(
++ "os.unlink",
++ MagicMock(
++ return_value=False,
++ side_effect=[
++ OSError("Decreasing electron flux"),
++ OSError("Solar flares interference"),
++ None,
++ ],
++ ),
++ )
+ def test_delete_all_archives_failure(self):
-+ '''
++ """
+ Test delete archives failure
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2',
-+ '/mnt/storage/two-support-111-111.bz2',
-+ '/mnt/storage/three-support-222-222.bz2'])
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
+ ret = support.delete_archives()
-+ assert 'files' in ret
-+ assert 'errors' in ret
-+ assert bool(ret['errors'])
-+ assert bool(ret['files'])
-+ assert isinstance(ret['errors'], dict)
-+ assert isinstance(ret['files'], dict)
++ assert "files" in ret
++ assert "errors" in ret
++ assert bool(ret["errors"])
++ assert bool(ret["files"])
++ assert isinstance(ret["errors"], dict)
++ assert isinstance(ret["files"], dict)
+
-+ assert ret['files']['/mnt/storage/three-support-222-222.bz2'] == 'removed'
-+ assert ret['files']['/mnt/storage/one-support-000-000.bz2'] == 'left'
-+ assert ret['files']['/mnt/storage/two-support-111-111.bz2'] == 'left'
++ assert ret["files"]["/mnt/storage/three-support-222-222.bz2"] == "removed"
++ assert ret["files"]["/mnt/storage/one-support-000-000.bz2"] == "left"
++ assert ret["files"]["/mnt/storage/two-support-111-111.bz2"] == "left"
+
-+ assert len(ret['errors']) == 2
-+ assert ret['errors']['/mnt/storage/one-support-000-000.bz2'] == 'Decreasing electron flux'
-+ assert ret['errors']['/mnt/storage/two-support-111-111.bz2'] == 'Solar flares interference'
++ assert len(ret["errors"]) == 2
++ assert (
++ ret["errors"]["/mnt/storage/one-support-000-000.bz2"]
++ == "Decreasing electron flux"
++ )
++ assert (
++ ret["errors"]["/mnt/storage/two-support-111-111.bz2"]
++ == "Solar flares interference"
++ )
+
+ def test_format_sync_stats(self):
-+ '''
++ """
+ Test format rsync stats for preserving ordering of the keys
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ stats = '''
++ stats = """
+robot: Bender
+cute: Leela
+weird: Zoidberg
+professor: Farnsworth
-+ '''
-+ f_stats = support.format_sync_stats({'retcode': 0, 'stdout': stats})
-+ assert list(f_stats['transfer'].keys()) == ['robot', 'cute', 'weird', 'professor']
-+ assert list(f_stats['transfer'].values()) == ['Bender', 'Leela', 'Zoidberg', 'Farnsworth']
++ """
++ f_stats = support.format_sync_stats({"retcode": 0, "stdout": stats})
++ assert list(f_stats["transfer"].keys()) == [
++ "robot",
++ "cute",
++ "weird",
++ "professor",
++ ]
++ assert list(f_stats["transfer"].values()) == [
++ "Bender",
++ "Leela",
++ "Zoidberg",
++ "Farnsworth",
++ ]
+
-+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy')))
-+ @patch('os.close', MagicMock())
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.close", MagicMock())
+ def test_sync_no_archives_failure(self):
-+ '''
++ """
+ Test sync failed when no archives specified.
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
+ support.archives = MagicMock(return_value=[])
+
+ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
-+ support.sync('group-name')
-+ assert 'No archives found to transfer' in str(err)
++ support.sync("group-name")
++ assert "No archives found to transfer" in str(err)
+
-+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy')))
-+ @patch('os.path.exists', MagicMock(return_value=False))
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=False))
+ def test_sync_last_picked_archive_not_found_failure(self):
-+ '''
++ """
+ Test sync failed when archive was not found (last picked)
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2',
-+ '/mnt/storage/two-support-111-111.bz2',
-+ '/mnt/storage/three-support-222-222.bz2'])
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
+
+ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
-+ support.sync('group-name')
-+ assert ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found' in str(err)
++ support.sync("group-name")
++ assert (
++ ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found'
++ in str(err)
++ )
+
-+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy')))
-+ @patch('os.path.exists', MagicMock(return_value=False))
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=False))
+ def test_sync_specified_archive_not_found_failure(self):
-+ '''
++ """
+ Test sync failed when archive was not found (last picked)
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2',
-+ '/mnt/storage/two-support-111-111.bz2',
-+ '/mnt/storage/three-support-222-222.bz2'])
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
+
+ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
-+ support.sync('group-name', name='lost.bz2')
++ support.sync("group-name", name="lost.bz2")
+ assert ' Support archive "lost.bz2" was not found' in str(err)
+
-+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy')))
-+ @patch('os.path.exists', MagicMock(return_value=False))
-+ @patch('os.close', MagicMock())
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=False))
++ @patch("os.close", MagicMock())
+ def test_sync_no_archive_to_transfer_failure(self):
-+ '''
++ """
+ Test sync failed when no archive was found to transfer
+
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
+ support.archives = MagicMock(return_value=[])
+ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
-+ support.sync('group-name', all=True)
-+ assert 'No archives found to transfer' in str(err)
++ support.sync("group-name", all=True)
++ assert "No archives found to transfer" in str(err)
+
-+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy')))
-+ @patch('os.path.exists', MagicMock(return_value=True))
-+ @patch('os.close', MagicMock())
-+ @patch('os.write', MagicMock())
-+ @patch('os.unlink', MagicMock())
-+ @patch('salt.modules.saltsupport.__salt__', {'rsync.rsync': MagicMock(return_value={})})
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=True))
++ @patch("os.close", MagicMock())
++ @patch("os.write", MagicMock())
++ @patch("os.unlink", MagicMock())
++ @patch(
++ "salt.modules.saltsupport.__salt__", {"rsync.rsync": MagicMock(return_value={})}
++ )
+ def test_sync_archives(self):
-+ '''
++ """
+ Test sync archives
+ :return:
-+ '''
++ """
+ support = saltsupport.SaltSupportModule()
-+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2',
-+ '/mnt/storage/two-support-111-111.bz2',
-+ '/mnt/storage/three-support-222-222.bz2'])
-+ out = support.sync('group-name', host='buzz', all=True, move=False)
-+ assert 'files' in out
-+ for arc_name in out['files']:
-+ assert out['files'][arc_name] == 'copied'
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++ out = support.sync("group-name", host="buzz", all=True, move=False)
++ assert "files" in out
++ for arc_name in out["files"]:
++ assert out["files"][arc_name] == "copied"
+ assert saltsupport.os.unlink.call_count == 1
-+ assert saltsupport.os.unlink.call_args_list[0][0][0] == 'dummy'
++ assert saltsupport.os.unlink.call_args_list[0][0][0] == "dummy"
+ calls = []
+ for call in saltsupport.os.write.call_args_list:
+ assert len(call) == 2
+ calls.append(call[0])
-+ assert calls == [(0, b'one-support-000-000.bz2'),
-+ (0, b'\n'), (0, b'two-support-111-111.bz2'), (0, b'\n'),
-+ (0, b'three-support-222-222.bz2'), (0, b'\n')]
++ assert calls == [
++ (0, b"one-support-000-000.bz2"),
++ (0, b"\n"),
++ (0, b"two-support-111-111.bz2"),
++ (0, b"\n"),
++ (0, b"three-support-222-222.bz2"),
++ (0, b"\n"),
++ ]
+
-+ @patch('salt.modules.saltsupport.__pillar__', {})
-+ @patch('salt.modules.saltsupport.SupportDataCollector', MagicMock())
++ @patch("salt.modules.saltsupport.__pillar__", {})
++ @patch("salt.modules.saltsupport.SupportDataCollector", MagicMock())
+ def test_run_support(self):
-+ '''
++ """
+ Test run support
+ :return:
-+ '''
-+ saltsupport.SupportDataCollector(None, None).archive_path = 'dummy'
++ """
++ saltsupport.SupportDataCollector(None, None).archive_path = "dummy"
+ support = saltsupport.SaltSupportModule()
+ support.collect_internal_data = MagicMock()
+ support.collect_local_data = MagicMock()
+ out = support.run()
+
-+ for section in ['messages', 'archive']:
++ for section in ["messages", "archive"]:
+ assert section in out
-+ assert out['archive'] == 'dummy'
-+ for section in ['warning', 'error', 'info']:
-+ assert section in out['messages']
++ assert out["archive"] == "dummy"
++ for section in ["warning", "error", "info"]:
++ assert section in out["messages"]
+ ld_call = support.collect_local_data.call_args_list[0][1]
-+ assert 'profile' in ld_call
-+ assert ld_call['profile'] == 'default'
-+ assert 'profile_source' in ld_call
-+ assert ld_call['profile_source'] is None
++ assert "profile" in ld_call
++ assert ld_call["profile"] == "default"
++ assert "profile_source" in ld_call
++ assert ld_call["profile_source"] is None
+ assert support.collector.open.call_count == 1
+ assert support.collector.close.call_count == 1
+ assert support.collect_internal_data.call_count == 1
+
+
-+@skipIf(not bool(pytest), 'Pytest required')
++@skipIf(not bool(pytest), "Pytest required")
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+class LogCollectorTestCase(TestCase, LoaderModuleMockMixin):
-+ '''
++ """
+ Test cases for salt.modules.support::LogCollector
-+ '''
++ """
++
+ def setup_loader_modules(self):
+ return {saltsupport: {}}
+
+ def test_msg(self):
-+ '''
++ """
+ Test message to the log collector.
+
+ :return:
-+ '''
++ """
+ utcmock = MagicMock()
+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
-+ with patch('datetime.datetime', utcmock):
-+ msg = 'Upgrading /dev/null device'
++ with patch("datetime.datetime", utcmock):
++ msg = "Upgrading /dev/null device"
+ out = saltsupport.LogCollector()
-+ out.msg(msg, title='Here')
++ out.msg(msg, title="Here")
+ assert saltsupport.LogCollector.INFO in out.messages
-+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList
-+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - {0}: {1}'.format('Here', msg)]
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - {}: {}".format("Here", msg)
++ ]
+
+ def test_info_message(self):
-+ '''
++ """
+ Test info message to the log collector.
+
+ :return:
-+ '''
++ """
+ utcmock = MagicMock()
+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
-+ with patch('datetime.datetime', utcmock):
-+ msg = 'SIMM crosstalk during tectonic stress'
++ with patch("datetime.datetime", utcmock):
++ msg = "SIMM crosstalk during tectonic stress"
+ out = saltsupport.LogCollector()
+ out.info(msg)
+ assert saltsupport.LogCollector.INFO in out.messages
-+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList
-+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - {}'.format(msg)]
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
+
+ def test_put_message(self):
-+ '''
++ """
+ Test put message to the log collector.
+
+ :return:
-+ '''
++ """
+ utcmock = MagicMock()
+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
-+ with patch('datetime.datetime', utcmock):
-+ msg = 'Webmaster kidnapped by evil cult'
++ with patch("datetime.datetime", utcmock):
++ msg = "Webmaster kidnapped by evil cult"
+ out = saltsupport.LogCollector()
+ out.put(msg)
+ assert saltsupport.LogCollector.INFO in out.messages
-+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList
-+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - {}'.format(msg)]
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
+
+ def test_warning_message(self):
-+ '''
++ """
+ Test warning message to the log collector.
+
+ :return:
-+ '''
++ """
+ utcmock = MagicMock()
+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
-+ with patch('datetime.datetime', utcmock):
-+ msg = 'Your e-mail is now being delivered by USPS'
++ with patch("datetime.datetime", utcmock):
++ msg = "Your e-mail is now being delivered by USPS"
+ out = saltsupport.LogCollector()
+ out.warning(msg)
+ assert saltsupport.LogCollector.WARNING in out.messages
-+ assert type(out.messages[saltsupport.LogCollector.WARNING]) == saltsupport.LogCollector.MessagesList
-+ assert out.messages[saltsupport.LogCollector.WARNING] == ['00:00:00.000 - {}'.format(msg)]
++ assert (
++ type(out.messages[saltsupport.LogCollector.WARNING])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.WARNING] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
+
+ def test_error_message(self):
-+ '''
++ """
+ Test error message to the log collector.
+
+ :return:
-+ '''
++ """
+ utcmock = MagicMock()
+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
-+ with patch('datetime.datetime', utcmock):
-+ msg = 'Learning curve appears to be fractal'
++ with patch("datetime.datetime", utcmock):
++ msg = "Learning curve appears to be fractal"
+ out = saltsupport.LogCollector()
+ out.error(msg)
+ assert saltsupport.LogCollector.ERROR in out.messages
-+ assert type(out.messages[saltsupport.LogCollector.ERROR]) == saltsupport.LogCollector.MessagesList
-+ assert out.messages[saltsupport.LogCollector.ERROR] == ['00:00:00.000 - {}'.format(msg)]
++ assert (
++ type(out.messages[saltsupport.LogCollector.ERROR])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.ERROR] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
+
+ def test_hl_message(self):
-+ '''
++ """
+ Test highlighter message to the log collector.
+
+ :return:
-+ '''
++ """
+ utcmock = MagicMock()
+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
-+ with patch('datetime.datetime', utcmock):
++ with patch("datetime.datetime", utcmock):
+ out = saltsupport.LogCollector()
-+ out.highlight('The {} TTYs became {} TTYs and vice versa', 'real', 'pseudo')
++ out.highlight("The {} TTYs became {} TTYs and vice versa", "real", "pseudo")
+ assert saltsupport.LogCollector.INFO in out.messages
-+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList
-+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - The real TTYs became '
-+ 'pseudo TTYs and vice versa']
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - The real TTYs became " "pseudo TTYs and vice versa"
++ ]
--
-2.16.4
+2.29.2
diff --git a/add-virt.all_capabilities.patch b/add-virt.all_capabilities.patch
index a5dd125..794c5fd 100644
--- a/add-virt.all_capabilities.patch
+++ b/add-virt.all_capabilities.patch
@@ -1,4 +1,4 @@
-From 82ddc9d93f6c0d6bc1e8dc6ebd30d6809d9f4d8f Mon Sep 17 00:00:00 2001
+From ca2ad86438293af6715a9890b168f159ff4d9b9b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?=
Date: Thu, 18 Oct 2018 13:32:59 +0200
Subject: [PATCH] Add virt.all_capabilities
@@ -10,100 +10,37 @@ before calling virt.domain_capabilities for each of them.
This commit embeds all this logic to get them all in a single
virt.all_capabilities call.
---
- salt/modules/virt.py | 107 +++++++++++++++++++++++++++++-----------
- tests/unit/modules/test_virt.py | 56 +++++++++++++++++++++
- 2 files changed, 134 insertions(+), 29 deletions(-)
+ salt/modules/virt.py | 73 +++++++++++++++++++++++++++++++--
+ tests/unit/modules/test_virt.py | 2 +-
+ 2 files changed, 71 insertions(+), 4 deletions(-)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index a2412bb745..3889238ecd 100644
+index 313181c49e..362c2a68b5 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
-@@ -4254,37 +4254,10 @@ def _parse_caps_loader(node):
+@@ -5568,11 +5568,76 @@ def _parse_domain_caps(caps):
return result
--def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
+def _parse_domain_caps(caps):
- '''
-- Return the domain capabilities given an emulator, architecture, machine or virtualization type.
--
-- .. versionadded:: 2019.2.0
--
-- :param emulator: return the capabilities for the given emulator binary
-- :param arch: return the capabilities for the given CPU architecture
-- :param machine: return the capabilities for the given emulated machine type
-- :param domain: return the capabilities for the given virtualization type.
-- :param connection: libvirt connection URI, overriding defaults
-- :param username: username to connect with, overriding defaults
-- :param password: password to connect with, overriding defaults
--
-- The list of the possible emulator, arch, machine and domain can be found in
-- the host capabilities output.
--
-- If none of the parameters is provided the libvirt default domain capabilities
-- will be returned.
--
-- CLI Example:
--
-- .. code-block:: bash
--
-- salt '*' virt.domain_capabilities arch='x86_64' domain='kvm'
--
++ """
+ Parse the XML document of domain capabilities into a structure.
- '''
-- conn = __get_conn(**kwargs)
-- caps = ElementTree.fromstring(conn.getDomainCapabilities(emulator, arch, machine, domain, 0))
-- conn.close()
--
- result = {
- 'emulator': caps.find('path').text if caps.find('path') is not None else None,
- 'domain': caps.find('domain').text if caps.find('domain') is not None else None,
-@@ -4324,6 +4297,82 @@ def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **k
- return result
-
-
-+def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
-+ '''
-+ Return the domain capabilities given an emulator, architecture, machine or virtualization type.
-+
-+ .. versionadded:: Fluorine
-+
-+ :param emulator: return the capabilities for the given emulator binary
-+ :param arch: return the capabilities for the given CPU architecture
-+ :param machine: return the capabilities for the given emulated machine type
-+ :param domain: return the capabilities for the given virtualization type.
-+ :param connection: libvirt connection URI, overriding defaults
-+ :param username: username to connect with, overriding defaults
-+ :param password: password to connect with, overriding defaults
-+
-+ The list of the possible emulator, arch, machine and domain can be found in
-+ the host capabilities output.
-+
-+ If none of the parameters is provided, the libvirt default one is returned.
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' virt.domain_capabilities arch='x86_64' domain='kvm'
-+
-+ '''
-+ conn = __get_conn(**kwargs)
-+ result = []
-+ try:
-+ caps = ElementTree.fromstring(conn.getDomainCapabilities(emulator, arch, machine, domain, 0))
-+ result = _parse_domain_caps(caps)
-+ finally:
-+ conn.close()
-+
-+ return result
++ """
++ result = {
++ "emulator": caps.find("path").text if caps.find("path") is not None else None,
++ "domain": caps.find("domain").text if caps.find("domain") is not None else None,
++ "machine": caps.find("machine").text
++ if caps.find("machine") is not None
++ else None,
++ "arch": caps.find("arch").text if caps.find("arch") is not None else None,
++ }
+
+
+def all_capabilities(**kwargs):
-+ '''
++ """
+ Return the host and domain capabilities in a single call.
+
-+ .. versionadded:: Neon
++ .. versionadded:: 3001
+
+ :param connection: libvirt connection URI, overriding defaults
+ :param username: username to connect with, overriding defaults
@@ -115,100 +52,94 @@ index a2412bb745..3889238ecd 100644
+
+ salt '*' virt.all_capabilities
+
-+ '''
++ """
+ conn = __get_conn(**kwargs)
-+ result = {}
+ try:
+ host_caps = ElementTree.fromstring(conn.getCapabilities())
-+ domains = [[(guest.get('arch', {}).get('name', None), key)
-+ for key in guest.get('arch', {}).get('domains', {}).keys()]
-+ for guest in [_parse_caps_guest(guest) for guest in host_caps.findall('guest')]]
++ domains = [
++ [
++ (guest.get("arch", {}).get("name", None), key)
++ for key in guest.get("arch", {}).get("domains", {}).keys()
++ ]
++ for guest in [
++ _parse_caps_guest(guest) for guest in host_caps.findall("guest")
++ ]
++ ]
+ flattened = [pair for item in (x for x in domains) for pair in item]
+ result = {
-+ 'host': {
-+ 'host': _parse_caps_host(host_caps.find('host')),
-+ 'guests': [_parse_caps_guest(guest) for guest in host_caps.findall('guest')]
-+ },
-+ 'domains': [_parse_domain_caps(ElementTree.fromstring(
-+ conn.getDomainCapabilities(None, arch, None, domain)))
-+ for (arch, domain) in flattened]}
++ "host": {
++ "host": _parse_caps_host(host_caps.find("host")),
++ "guests": [
++ _parse_caps_guest(guest) for guest in host_caps.findall("guest")
++ ],
++ },
++ "domains": [
++ _parse_domain_caps(
++ ElementTree.fromstring(
++ conn.getDomainCapabilities(None, arch, None, domain)
++ )
++ )
++ for (arch, domain) in flattened
++ ],
++ }
++ return result
+ finally:
+ conn.close()
+
++
+ def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
+ """
+ Return the domain capabilities given an emulator, architecture, machine or virtualization type.
+
+- .. versionadded:: 2019.2.0
++ .. versionadded:: Fluorine
+
+ :param emulator: return the capabilities for the given emulator binary
+ :param arch: return the capabilities for the given CPU architecture
+@@ -5611,7 +5676,7 @@ def all_capabilities(**kwargs):
+ """
+ Return the host and domain capabilities in a single call.
+
+- .. versionadded:: 3001
++ .. versionadded:: Neon
+
+ :param connection: libvirt connection URI, overriding defaults
+ :param username: username to connect with, overriding defaults
+@@ -5625,6 +5690,7 @@ def all_capabilities(**kwargs):
+
+ """
+ conn = __get_conn(**kwargs)
++ result = {}
+ try:
+ host_caps = ElementTree.fromstring(conn.getCapabilities())
+ domains = [
+@@ -5653,10 +5719,11 @@ def all_capabilities(**kwargs):
+ for (arch, domain) in flattened
+ ],
+ }
+- return result
+ finally:
+ conn.close()
+
+ return result
+
-+
- def cpu_baseline(full=False, migratable=False, out='libvirt', **kwargs):
- '''
- Return the optimal 'custom' CPU baseline config for VM's on this minion
+
+ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs):
+ """
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index 32f4302e5f..94372c6d72 100644
+index cce107c9e4..e9e73d7b5d 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
-@@ -2216,6 +2216,62 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -4063,7 +4063,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "44454c4c-3400-105a-8033-b3c04f4b344a", caps["host"]["host"]["uuid"]
+ )
+ self.assertEqual(
+- {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]},
++ {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]}
+ )
- self.assertEqual(expected, caps)
-
-+ def test_all_capabilities(self):
-+ '''
-+ Test the virt.domain_capabilities default output
-+ '''
-+ domainXml = '''
-+
-+ /usr/bin/qemu-system-x86_64
-+ kvm
-+ virt-2.12
-+ x86_64
-+
-+
-+
-+ '''
-+ hostXml = '''
-+
-+
-+ 44454c4c-3400-105a-8033-b3c04f4b344a
-+
-+ x86_64
-+ Nehalem
-+ Intel
-+
-+
-+
-+
-+
-+ hvm
-+
-+ 64
-+ /usr/bin/qemu-system-x86_64
-+ pc-i440fx-2.6
-+ pc
-+ pc-0.12
-+
-+
-+ /usr/bin/qemu-kvm
-+ pc-i440fx-2.6
-+ pc
-+ pc-0.12
-+
-+
-+
-+
-+ '''
-+
-+ # pylint: disable=no-member
-+ self.mock_conn.getCapabilities.return_value = hostXml
-+ self.mock_conn.getDomainCapabilities.side_effect = [
-+ domainXml, domainXml.replace('kvm', 'qemu')]
-+ # pylint: enable=no-member
-+
-+ caps = virt.all_capabilities()
-+ self.assertEqual('44454c4c-3400-105a-8033-b3c04f4b344a', caps['host']['host']['uuid'])
-+ self.assertEqual(set(['qemu', 'kvm']), set([domainCaps['domain'] for domainCaps in caps['domains']]))
-+
def test_network_tag(self):
- '''
- Test virt._get_net_xml() with VLAN tag
--
-2.16.4
+2.29.2
diff --git a/adds-explicit-type-cast-for-port.patch b/adds-explicit-type-cast-for-port.patch
index abb13cb..176d530 100644
--- a/adds-explicit-type-cast-for-port.patch
+++ b/adds-explicit-type-cast-for-port.patch
@@ -1,4 +1,4 @@
-From 2182f2cbc835fee8a95101ce0c722d582b7456aa Mon Sep 17 00:00:00 2001
+From 12d67e0cfa54399f3a0b6ae0d4faa09793fa2b0f Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Wed, 1 Apr 2020 16:13:23 +0200
Subject: [PATCH] Adds explicit type cast for port
@@ -12,22 +12,22 @@ The type casting to int solves this issue.
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/salt/utils/network.py b/salt/utils/network.py
-index d6543ff160..def997f3dc 100644
+index 25b2d06758..1705a5809d 100644
--- a/salt/utils/network.py
+++ b/salt/utils/network.py
-@@ -1457,9 +1457,9 @@ def _netlink_tool_remote_on(port, which_end):
- local_host, local_port = chunks[3].rsplit(':', 1)
- remote_host, remote_port = chunks[4].rsplit(':', 1)
+@@ -1626,9 +1626,9 @@ def _netlink_tool_remote_on(port, which_end):
+ local_host, local_port = chunks[3].rsplit(":", 1)
+ remote_host, remote_port = chunks[4].rsplit(":", 1)
-- if which_end == 'remote_port' and int(remote_port) != port:
-+ if which_end == 'remote_port' and int(remote_port) != int(port):
+- if which_end == "remote_port" and int(remote_port) != port:
++ if which_end == "remote_port" and int(remote_port) != int(port):
continue
-- if which_end == 'local_port' and int(local_port) != port:
-+ if which_end == 'local_port' and int(local_port) != int(port):
+- if which_end == "local_port" and int(local_port) != port:
++ if which_end == "local_port" and int(local_port) != int(port):
continue
remotes.add(remote_host.strip("[]"))
--
-2.16.4
+2.29.2
diff --git a/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch b/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch
index f3ccc21..22fdf66 100644
--- a/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch
+++ b/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch
@@ -1,4 +1,4 @@
-From 206a2f7c4c1104f2f35dfa2c0b775bef4adc5b91 Mon Sep 17 00:00:00 2001
+From 125f973014b8d5ffa13ae7dd231043e39af75ea0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 3 Jul 2019 09:34:50 +0100
@@ -7,62 +7,23 @@ Subject: [PATCH] Allow passing kwargs to pkg.list_downloaded
Add unit test for pkg.list_downloaded with kwargs
---
- salt/modules/zypperpkg.py | 2 +-
- tests/unit/modules/test_zypperpkg.py | 27 +++++++++++++++++++++++++++
- 2 files changed, 28 insertions(+), 1 deletion(-)
+ salt/modules/zypperpkg.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 582caffb59..3760b525e7 100644
+index 75cb5ce4a8..c996935bff 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -2557,7 +2557,7 @@ def download(*packages, **kwargs):
+@@ -2754,7 +2754,7 @@ def download(*packages, **kwargs):
)
-def list_downloaded(root=None):
+def list_downloaded(root=None, **kwargs):
- '''
+ """
.. versionadded:: 2017.7.0
-diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 3a6466f061..12c22bfcb2 100644
---- a/tests/unit/modules/test_zypperpkg.py
-+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -767,6 +767,33 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- self.assertEqual(len(list_patches), 3)
- self.assertDictEqual(list_patches, PATCHES_RET)
-
-+ @patch('salt.utils.path.os_walk', MagicMock(return_value=[('test', 'test', 'test')]))
-+ @patch('os.path.getsize', MagicMock(return_value=123456))
-+ @patch('os.path.getctime', MagicMock(return_value=1234567890.123456))
-+ @patch('fnmatch.filter', MagicMock(return_value=['/var/cache/zypper/packages/foo/bar/test_package.rpm']))
-+ def test_list_downloaded_with_kwargs(self):
-+ '''
-+ Test downloaded packages listing.
-+
-+ :return:
-+ '''
-+ DOWNLOADED_RET = {
-+ 'test-package': {
-+ '1.0': {
-+ 'path': '/var/cache/zypper/packages/foo/bar/test_package.rpm',
-+ 'size': 123456,
-+ 'creation_date_time_t': 1234567890,
-+ 'creation_date_time': '2009-02-13T23:31:30',
-+ }
-+ }
-+ }
-+
-+ with patch.dict(zypper.__salt__, {'lowpkg.bin_pkg_info': MagicMock(return_value={'name': 'test-package',
-+ 'version': '1.0'})}):
-+ list_downloaded = zypper.list_downloaded(kw1=True, kw2=False)
-+ self.assertEqual(len(list_downloaded), 1)
-+ self.assertDictEqual(list_downloaded, DOWNLOADED_RET)
-+
- @patch('salt.utils.path.os_walk', MagicMock(return_value=[('test', 'test', 'test')]))
- @patch('os.path.getsize', MagicMock(return_value=123456))
- @patch('os.path.getctime', MagicMock(return_value=1234567890.123456))
--
-2.16.4
+2.29.2
diff --git a/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch b/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch
index d5fd856..e11f91a 100644
--- a/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch
+++ b/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch
@@ -1,4 +1,4 @@
-From e1b4dda1eed90b4c6495b7a1fb047052f2cc5d5c Mon Sep 17 00:00:00 2001
+From 6111853f13c9c1e8eaaa1acd521cd3abfbfff766 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 13 Aug 2020 13:49:16 +0100
@@ -15,1810 +15,77 @@ Add new unit test for ansible.playbooks
Add unit tests for ansible.playbooks state
---
- salt/modules/ansiblegate.py | 10 +-
- salt/states/ansiblegate.py | 51 +-
- .../unit/files/playbooks/failed_example.json | 748 ++++++++++++++++
- .../unit/files/playbooks/success_example.json | 803 ++++++++++++++++++
- tests/unit/modules/test_ansiblegate.py | 15 +
- tests/unit/states/test_ansiblegate.py | 113 +++
- 6 files changed, 1717 insertions(+), 23 deletions(-)
- create mode 100644 tests/unit/files/playbooks/failed_example.json
- create mode 100644 tests/unit/files/playbooks/success_example.json
- create mode 100644 tests/unit/states/test_ansiblegate.py
+ tests/unit/modules/test_ansiblegate.py | 12 ++++++++++++
+ tests/unit/states/test_ansiblegate.py | 7 ++++---
+ 2 files changed, 16 insertions(+), 3 deletions(-)
-diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py
-index 8e28fcafa3..e76809d4ba 100644
---- a/salt/modules/ansiblegate.py
-+++ b/salt/modules/ansiblegate.py
-@@ -381,9 +381,9 @@ def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None,
- 'cwd': rundir,
- 'cmd': ' '.join(command)
- }
-- ret = __salt__['cmd.run_all'](**cmd_kwargs)
-- log.debug('Ansible Playbook Return: %s', ret)
-- retdata = json.loads(ret['stdout'])
-- if ret['retcode']:
-- __context__['retcode'] = ret['retcode']
-+ ret = __salt__["cmd.run_all"](**cmd_kwargs)
-+ log.debug("Ansible Playbook Return: %s", ret)
-+ retdata = json.loads(ret["stdout"])
-+ if 'retcode' in ret:
-+ __context__["retcode"] = retdata["retcode"] = ret["retcode"]
- return retdata
-diff --git a/salt/states/ansiblegate.py b/salt/states/ansiblegate.py
-index b42dc02938..d268e492e2 100644
---- a/salt/states/ansiblegate.py
-+++ b/salt/states/ansiblegate.py
-@@ -120,9 +120,11 @@ def _changes(plays):
- task_changes = {}
- for task in play['tasks']:
- host_changes = {}
-- for host, data in six.iteritems(task['hosts']):
-- if data['changed'] is True:
-- host_changes[host] = data.get('diff', data.get('changes', {}))
-+ for host, data in six.iteritems(task["hosts"]):
-+ if data["changed"] is True:
-+ host_changes[host] = data.get("diff", data.get("changes", {}))
-+ elif any(x in data for x in ["failed", "skipped", "unreachable"]):
-+ host_changes[host] = data.get("results", data.get("msg", {}))
- if host_changes:
- task_changes[task['task']['name']] = host_changes
- if task_changes:
-@@ -177,20 +179,33 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=
- if not isinstance(ansible_kwargs, dict):
- log.debug('Setting ansible_kwargs to empty dict: %s', ansible_kwargs)
- ansible_kwargs = {}
-- checks = __salt__['ansible.playbooks'](name, rundir=rundir, check=True, diff=True, **ansible_kwargs)
-- if all(not check['changed'] for check in six.itervalues(checks['stats'])):
-- ret['comment'] = 'No changes to be made from playbook {0}'.format(name)
-- ret['result'] = True
-- elif __opts__['test']:
-- ret['comment'] = 'Changes will be made from playbook {0}'.format(name)
-- ret['result'] = None
-- ret['changes'] = _changes(checks)
-+ if __opts__["test"]:
-+ checks = __salt__["ansible.playbooks"](name, rundir=rundir, check=True, diff=True, **ansible_kwargs)
-+ if all(not check["changed"] and not check["failures"] and not check["unreachable"] and not check["skipped"] for check in six.itervalues(checks["stats"])):
-+ ret["comment"] = "No changes to be made from playbook {0}".format(name)
-+ ret["result"] = True
-+ elif any(check["changed"] and not check["failures"] and not check["unreachable"] and not check["skipped"] for check in six.itervalues(checks["stats"])):
-+ ret["comment"] = "Changes will be made from playbook {0}".format(name)
-+ ret["result"] = None
-+ ret["changes"] = _changes(checks)
-+ else:
-+ ret["comment"] = "There were some issues running the playbook {0}".format(name)
-+ ret["result"] = False
-+ ret["changes"] = _changes(checks)
- else:
-- results = __salt__['ansible.playbooks'](name, rundir=rundir, diff=True, **ansible_kwargs)
-- ret['comment'] = 'Changes were made by playbook {0}'.format(name)
-- ret['changes'] = _changes(results)
-- ret['result'] = all(
-- not check['failures'] and not check['unreachable']
-- for check in six.itervalues(checks['stats'])
-- )
-+ results = __salt__["ansible.playbooks"](name, rundir=rundir, diff=True, **ansible_kwargs)
-+ if all(not check["changed"] and not check["failures"] and not check["unreachable"] and not check["skipped"] for check in six.itervalues(results["stats"])):
-+ ret["comment"] = "No changes to be made from playbook {0}".format(name)
-+ ret["result"] = True
-+ ret["changes"] = _changes(results)
-+ else:
-+ ret["changes"] = _changes(results)
-+ ret["result"] = all(
-+ not check["failures"] and not check["unreachable"] and not check["skipped"]
-+ for check in six.itervalues(results["stats"])
-+ )
-+ if ret["result"]:
-+ ret["comment"] = "Changes were made by playbook {0}".format(name)
-+ else:
-+ ret["comment"] = "There were some issues running the playbook {0}".format(name)
- return ret
-diff --git a/tests/unit/files/playbooks/failed_example.json b/tests/unit/files/playbooks/failed_example.json
-new file mode 100644
-index 0000000000..9ee8ba25b7
---- /dev/null
-+++ b/tests/unit/files/playbooks/failed_example.json
-@@ -0,0 +1,748 @@
-+{
-+ "custom_stats": {},
-+ "global_custom_stats": {},
-+ "plays": [
-+ {
-+ "play": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:33.889442Z",
-+ "start": "2020-08-14T11:55:30.460145Z"
-+ },
-+ "id": "5254001e-9fce-297d-21cd-000000000007",
-+ "name": "py2hosts"
-+ },
-+ "tasks": [
-+ {
-+ "hosts": {
-+ "centos7-host1.tf.local": {
-+ "_ansible_no_log": false,
-+ "_ansible_verbose_override": true,
-+ "action": "gather_facts",
-+ "ansible_facts": {
-+ "ansible_all_ipv4_addresses": [
-+ "192.168.122.29"
-+ ],
-+ "ansible_all_ipv6_addresses": [
-+ "fe80::5054:ff:fe3e:4ce"
-+ ],
-+ "ansible_apparmor": {
-+ "status": "disabled"
-+ },
-+ "ansible_architecture": "x86_64",
-+ "ansible_bios_date": "04/01/2014",
-+ "ansible_bios_version": "rel-1.13.0-0-gf21b5a4-rebuilt.opensuse.org",
-+ "ansible_cmdline": {
-+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64",
-+ "LANG": "en_US.UTF-8",
-+ "console": "ttyS0,115200",
-+ "crashkernel": "auto",
-+ "quiet": true,
-+ "rhgb": true,
-+ "ro": true,
-+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ "ansible_date_time": {
-+ "date": "2020-08-14",
-+ "day": "14",
-+ "epoch": "1597406131",
-+ "hour": "13",
-+ "iso8601": "2020-08-14T11:55:31Z",
-+ "iso8601_basic": "20200814T135531991936",
-+ "iso8601_basic_short": "20200814T135531",
-+ "iso8601_micro": "2020-08-14T11:55:31.992035Z",
-+ "minute": "55",
-+ "month": "08",
-+ "second": "31",
-+ "time": "13:55:31",
-+ "tz": "CEST",
-+ "tz_offset": "+0200",
-+ "weekday": "Friday",
-+ "weekday_number": "5",
-+ "weeknumber": "32",
-+ "year": "2020"
-+ },
-+ "ansible_default_ipv4": {
-+ "address": "192.168.122.29",
-+ "alias": "eth0",
-+ "broadcast": "192.168.122.255",
-+ "gateway": "192.168.122.1",
-+ "interface": "eth0",
-+ "macaddress": "52:54:00:3e:04:ce",
-+ "mtu": 1500,
-+ "netmask": "255.255.255.0",
-+ "network": "192.168.122.0",
-+ "type": "ether"
-+ },
-+ "ansible_default_ipv6": {},
-+ "ansible_device_links": {
-+ "ids": {},
-+ "labels": {},
-+ "masters": {},
-+ "uuids": {
-+ "vda1": [
-+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ ],
-+ "vda2": [
-+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24"
-+ ],
-+ "vda3": [
-+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ ],
-+ "vda5": [
-+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ ]
-+ }
-+ },
-+ "ansible_devices": {
-+ "vda": {
-+ "holders": [],
-+ "host": "",
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": []
-+ },
-+ "model": null,
-+ "partitions": {
-+ "vda1": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ ]
-+ },
-+ "sectors": "2097152",
-+ "sectorsize": 512,
-+ "size": "1.00 GB",
-+ "start": "2048",
-+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ },
-+ "vda2": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24"
-+ ]
-+ },
-+ "sectors": "4196352",
-+ "sectorsize": 512,
-+ "size": "2.00 GB",
-+ "start": "2099200",
-+ "uuid": "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24"
-+ },
-+ "vda3": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ ]
-+ },
-+ "sectors": "104857600",
-+ "sectorsize": 512,
-+ "size": "50.00 GB",
-+ "start": "6295552",
-+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ "vda4": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": []
-+ },
-+ "sectors": "2",
-+ "sectorsize": 512,
-+ "size": "1.00 KB",
-+ "start": "111153152",
-+ "uuid": null
-+ },
-+ "vda5": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ ]
-+ },
-+ "sectors": "308275200",
-+ "sectorsize": 512,
-+ "size": "147.00 GB",
-+ "start": "111155200",
-+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ }
-+ },
-+ "removable": "0",
-+ "rotational": "1",
-+ "sas_address": null,
-+ "sas_device_handle": null,
-+ "scheduler_mode": "mq-deadline",
-+ "sectors": "419430400",
-+ "sectorsize": "512",
-+ "size": "200.00 GB",
-+ "support_discard": "0",
-+ "vendor": "0x1af4",
-+ "virtual": 1
-+ }
-+ },
-+ "ansible_distribution": "CentOS",
-+ "ansible_distribution_file_parsed": true,
-+ "ansible_distribution_file_path": "/etc/redhat-release",
-+ "ansible_distribution_file_variety": "RedHat",
-+ "ansible_distribution_major_version": "7",
-+ "ansible_distribution_release": "Core",
-+ "ansible_distribution_version": "7.5",
-+ "ansible_dns": {
-+ "nameservers": [
-+ "192.168.122.1"
-+ ]
-+ },
-+ "ansible_domain": "tf.local",
-+ "ansible_effective_group_id": 0,
-+ "ansible_effective_user_id": 0,
-+ "ansible_env": {
-+ "HOME": "/root",
-+ "LANG": "es_ES.utf8",
-+ "LC_ADDRESS": "C",
-+ "LC_COLLATE": "C",
-+ "LC_CTYPE": "C",
-+ "LC_IDENTIFICATION": "C",
-+ "LC_MEASUREMENT": "C",
-+ "LC_MESSAGES": "C",
-+ "LC_MONETARY": "C",
-+ "LC_NAME": "C",
-+ "LC_NUMERIC": "C",
-+ "LC_PAPER": "C",
-+ "LC_TELEPHONE": "C",
-+ "LC_TIME": "C",
-+ "LESSOPEN": "||/usr/bin/lesspipe.sh %s",
-+ "LOGNAME": "root",
-+ "LS_COLORS": "rs=0:di=38;5;27:ln=38;5;51:mh=44;38;5;15:pi=40;38;5;11:so=38;5;13:do=38;5;5:bd=48;5;232;38;5;11:cd=48;5;232;38;5;3:or=48;5;232;38;5;9:mi=05;48;5;232;38;5;15:su=48;5;196;38;5;15:sg=48;5;11;38;5;16:ca=48;5;196;38;5;226:tw=48;5;10;38;5;16:ow=48;5;10;38;5;21:st=48;5;21;38;5;15:ex=38;5;34:*.tar=38;5;9:*.tgz=38;5;9:*.arc=38;5;9:*.arj=38;5;9:*.taz=38;5;9:*.lha=38;5;9:*.lz4=38;5;9:*.lzh=38;5;9:*.lzma=38;5;9:*.tlz=38;5;9:*.txz=38;5;9:*.tzo=38;5;9:*.t7z=38;5;9:*.zip=38;5;9:*.z=38;5;9:*.Z=38;5;9:*.dz=38;5;9:*.gz=38;5;9:*.lrz=38;5;9:*.lz=38;5;9:*.lzo=38;5;9:*.xz=38;5;9:*.bz2=38;5;9:*.bz=38;5;9:*.tbz=38;5;9:*.tbz2=38;5;9:*.tz=38;5;9:*.deb=38;5;9:*.rpm=38;5;9:*.jar=38;5;9:*.war=38;5;9:*.ear=38;5;9:*.sar=38;5;9:*.rar=38;5;9:*.alz=38;5;9:*.ace=38;5;9:*.zoo=38;5;9:*.cpio=38;5;9:*.7z=38;5;9:*.rz=38;5;9:*.cab=38;5;9:*.jpg=38;5;13:*.jpeg=38;5;13:*.gif=38;5;13:*.bmp=38;5;13:*.pbm=38;5;13:*.pgm=38;5;13:*.ppm=38;5;13:*.tga=38;5;13:*.xbm=38;5;13:*.xpm=38;5;13:*.tif=38;5;13:*.tiff=38;5;13:*.png=38;5;13:*.svg=38;5;13:*.svgz=38;5;13:*.mng=38;5;13:*.pcx=38;5;13:*.mov=38;5;13:*.mpg=38;5;13:*.mpeg=38;5;13:*.m2v=38;5;13:*.mkv=38;5;13:*.webm=38;5;13:*.ogm=38;5;13:*.mp4=38;5;13:*.m4v=38;5;13:*.mp4v=38;5;13:*.vob=38;5;13:*.qt=38;5;13:*.nuv=38;5;13:*.wmv=38;5;13:*.asf=38;5;13:*.rm=38;5;13:*.rmvb=38;5;13:*.flc=38;5;13:*.avi=38;5;13:*.fli=38;5;13:*.flv=38;5;13:*.gl=38;5;13:*.dl=38;5;13:*.xcf=38;5;13:*.xwd=38;5;13:*.yuv=38;5;13:*.cgm=38;5;13:*.emf=38;5;13:*.axv=38;5;13:*.anx=38;5;13:*.ogv=38;5;13:*.ogx=38;5;13:*.aac=38;5;45:*.au=38;5;45:*.flac=38;5;45:*.mid=38;5;45:*.midi=38;5;45:*.mka=38;5;45:*.mp3=38;5;45:*.mpc=38;5;45:*.ogg=38;5;45:*.ra=38;5;45:*.wav=38;5;45:*.axa=38;5;45:*.oga=38;5;45:*.spx=38;5;45:*.xspf=38;5;45:",
-+ "MAIL": "/var/mail/root",
-+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin",
-+ "PWD": "/root",
-+ "SHELL": "/bin/bash",
-+ "SHLVL": "2",
-+ "SSH_CLIENT": "192.168.122.179 55766 22",
-+ "SSH_CONNECTION": "192.168.122.179 55766 192.168.122.29 22",
-+ "SSH_TTY": "/dev/pts/0",
-+ "TERM": "xterm-256color",
-+ "USER": "root",
-+ "XDG_RUNTIME_DIR": "/run/user/0",
-+ "XDG_SESSION_ID": "110",
-+ "_": "/usr/bin/python"
-+ },
-+ "ansible_eth0": {
-+ "active": true,
-+ "device": "eth0",
-+ "features": {
-+ "busy_poll": "off [fixed]",
-+ "fcoe_mtu": "off [fixed]",
-+ "generic_receive_offload": "on",
-+ "generic_segmentation_offload": "on",
-+ "highdma": "on [fixed]",
-+ "hw_tc_offload": "off [fixed]",
-+ "l2_fwd_offload": "off [fixed]",
-+ "large_receive_offload": "off [fixed]",
-+ "loopback": "off [fixed]",
-+ "netns_local": "off [fixed]",
-+ "ntuple_filters": "off [fixed]",
-+ "receive_hashing": "off [fixed]",
-+ "rx_all": "off [fixed]",
-+ "rx_checksumming": "on [fixed]",
-+ "rx_fcs": "off [fixed]",
-+ "rx_udp_tunnel_port_offload": "off [fixed]",
-+ "rx_vlan_filter": "on [fixed]",
-+ "rx_vlan_offload": "off [fixed]",
-+ "rx_vlan_stag_filter": "off [fixed]",
-+ "rx_vlan_stag_hw_parse": "off [fixed]",
-+ "scatter_gather": "on",
-+ "tcp_segmentation_offload": "on",
-+ "tx_checksum_fcoe_crc": "off [fixed]",
-+ "tx_checksum_ip_generic": "on",
-+ "tx_checksum_ipv4": "off [fixed]",
-+ "tx_checksum_ipv6": "off [fixed]",
-+ "tx_checksum_sctp": "off [fixed]",
-+ "tx_checksumming": "on",
-+ "tx_fcoe_segmentation": "off [fixed]",
-+ "tx_gre_csum_segmentation": "off [fixed]",
-+ "tx_gre_segmentation": "off [fixed]",
-+ "tx_gso_partial": "off [fixed]",
-+ "tx_gso_robust": "off [fixed]",
-+ "tx_ipip_segmentation": "off [fixed]",
-+ "tx_lockless": "off [fixed]",
-+ "tx_nocache_copy": "off",
-+ "tx_scatter_gather": "on",
-+ "tx_scatter_gather_fraglist": "off [fixed]",
-+ "tx_sctp_segmentation": "off [fixed]",
-+ "tx_sit_segmentation": "off [fixed]",
-+ "tx_tcp6_segmentation": "on",
-+ "tx_tcp_ecn_segmentation": "on",
-+ "tx_tcp_mangleid_segmentation": "off",
-+ "tx_tcp_segmentation": "on",
-+ "tx_udp_tnl_csum_segmentation": "off [fixed]",
-+ "tx_udp_tnl_segmentation": "off [fixed]",
-+ "tx_vlan_offload": "off [fixed]",
-+ "tx_vlan_stag_hw_insert": "off [fixed]",
-+ "udp_fragmentation_offload": "on",
-+ "vlan_challenged": "off [fixed]"
-+ },
-+ "hw_timestamp_filters": [],
-+ "ipv4": {
-+ "address": "192.168.122.29",
-+ "broadcast": "192.168.122.255",
-+ "netmask": "255.255.255.0",
-+ "network": "192.168.122.0"
-+ },
-+ "ipv6": [
-+ {
-+ "address": "fe80::5054:ff:fe3e:4ce",
-+ "prefix": "64",
-+ "scope": "link"
-+ }
-+ ],
-+ "macaddress": "52:54:00:3e:04:ce",
-+ "module": "virtio_net",
-+ "mtu": 1500,
-+ "pciid": "virtio0",
-+ "promisc": false,
-+ "timestamping": [
-+ "rx_software",
-+ "software"
-+ ],
-+ "type": "ether"
-+ },
-+ "ansible_fibre_channel_wwn": [],
-+ "ansible_fips": false,
-+ "ansible_form_factor": "Other",
-+ "ansible_fqdn": "centos7-host1.tf.local",
-+ "ansible_hostname": "centos7-host1",
-+ "ansible_hostnqn": "",
-+ "ansible_interfaces": [
-+ "lo",
-+ "eth0"
-+ ],
-+ "ansible_is_chroot": false,
-+ "ansible_iscsi_iqn": "",
-+ "ansible_kernel": "3.10.0-862.el7.x86_64",
-+ "ansible_kernel_version": "#1 SMP Fri Apr 20 16:44:24 UTC 2018",
-+ "ansible_lo": {
-+ "active": true,
-+ "device": "lo",
-+ "features": {
-+ "busy_poll": "off [fixed]",
-+ "fcoe_mtu": "off [fixed]",
-+ "generic_receive_offload": "on",
-+ "generic_segmentation_offload": "on",
-+ "highdma": "on [fixed]",
-+ "hw_tc_offload": "off [fixed]",
-+ "l2_fwd_offload": "off [fixed]",
-+ "large_receive_offload": "off [fixed]",
-+ "loopback": "on [fixed]",
-+ "netns_local": "on [fixed]",
-+ "ntuple_filters": "off [fixed]",
-+ "receive_hashing": "off [fixed]",
-+ "rx_all": "off [fixed]",
-+ "rx_checksumming": "on [fixed]",
-+ "rx_fcs": "off [fixed]",
-+ "rx_udp_tunnel_port_offload": "off [fixed]",
-+ "rx_vlan_filter": "off [fixed]",
-+ "rx_vlan_offload": "off [fixed]",
-+ "rx_vlan_stag_filter": "off [fixed]",
-+ "rx_vlan_stag_hw_parse": "off [fixed]",
-+ "scatter_gather": "on",
-+ "tcp_segmentation_offload": "on",
-+ "tx_checksum_fcoe_crc": "off [fixed]",
-+ "tx_checksum_ip_generic": "on [fixed]",
-+ "tx_checksum_ipv4": "off [fixed]",
-+ "tx_checksum_ipv6": "off [fixed]",
-+ "tx_checksum_sctp": "on [fixed]",
-+ "tx_checksumming": "on",
-+ "tx_fcoe_segmentation": "off [fixed]",
-+ "tx_gre_csum_segmentation": "off [fixed]",
-+ "tx_gre_segmentation": "off [fixed]",
-+ "tx_gso_partial": "off [fixed]",
-+ "tx_gso_robust": "off [fixed]",
-+ "tx_ipip_segmentation": "off [fixed]",
-+ "tx_lockless": "on [fixed]",
-+ "tx_nocache_copy": "off [fixed]",
-+ "tx_scatter_gather": "on [fixed]",
-+ "tx_scatter_gather_fraglist": "on [fixed]",
-+ "tx_sctp_segmentation": "on",
-+ "tx_sit_segmentation": "off [fixed]",
-+ "tx_tcp6_segmentation": "on",
-+ "tx_tcp_ecn_segmentation": "on",
-+ "tx_tcp_mangleid_segmentation": "on",
-+ "tx_tcp_segmentation": "on",
-+ "tx_udp_tnl_csum_segmentation": "off [fixed]",
-+ "tx_udp_tnl_segmentation": "off [fixed]",
-+ "tx_vlan_offload": "off [fixed]",
-+ "tx_vlan_stag_hw_insert": "off [fixed]",
-+ "udp_fragmentation_offload": "on",
-+ "vlan_challenged": "on [fixed]"
-+ },
-+ "hw_timestamp_filters": [],
-+ "ipv4": {
-+ "address": "127.0.0.1",
-+ "broadcast": "host",
-+ "netmask": "255.0.0.0",
-+ "network": "127.0.0.0"
-+ },
-+ "ipv6": [
-+ {
-+ "address": "::1",
-+ "prefix": "128",
-+ "scope": "host"
-+ }
-+ ],
-+ "mtu": 65536,
-+ "promisc": false,
-+ "timestamping": [
-+ "rx_software",
-+ "software"
-+ ],
-+ "type": "loopback"
-+ },
-+ "ansible_local": {},
-+ "ansible_lsb": {},
-+ "ansible_machine": "x86_64",
-+ "ansible_machine_id": "d5f025e24919a00e864180785ebaa8c9",
-+ "ansible_memfree_mb": 717,
-+ "ansible_memory_mb": {
-+ "nocache": {
-+ "free": 893,
-+ "used": 98
-+ },
-+ "real": {
-+ "free": 717,
-+ "total": 991,
-+ "used": 274
-+ },
-+ "swap": {
-+ "cached": 0,
-+ "free": 2048,
-+ "total": 2048,
-+ "used": 0
-+ }
-+ },
-+ "ansible_memtotal_mb": 991,
-+ "ansible_mounts": [
-+ {
-+ "block_available": 243103,
-+ "block_size": 4096,
-+ "block_total": 259584,
-+ "block_used": 16481,
-+ "device": "/dev/vda1",
-+ "fstype": "xfs",
-+ "inode_available": 523998,
-+ "inode_total": 524288,
-+ "inode_used": 290,
-+ "mount": "/boot",
-+ "options": "rw,relatime,attr2,inode64,noquota",
-+ "size_available": 995749888,
-+ "size_total": 1063256064,
-+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ },
-+ {
-+ "block_available": 12902656,
-+ "block_size": 4096,
-+ "block_total": 13100800,
-+ "block_used": 198144,
-+ "device": "/dev/vda3",
-+ "fstype": "xfs",
-+ "inode_available": 26189994,
-+ "inode_total": 26214400,
-+ "inode_used": 24406,
-+ "mount": "/",
-+ "options": "rw,relatime,attr2,inode64,noquota",
-+ "size_available": 52849278976,
-+ "size_total": 53660876800,
-+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ {
-+ "block_available": 38507349,
-+ "block_size": 4096,
-+ "block_total": 38515585,
-+ "block_used": 8236,
-+ "device": "/dev/vda5",
-+ "fstype": "xfs",
-+ "inode_available": 77068797,
-+ "inode_total": 77068800,
-+ "inode_used": 3,
-+ "mount": "/home",
-+ "options": "rw,relatime,attr2,inode64,noquota",
-+ "size_available": 157726101504,
-+ "size_total": 157759836160,
-+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ }
-+ ],
-+ "ansible_nodename": "centos7-host1",
-+ "ansible_os_family": "RedHat",
-+ "ansible_pkg_mgr": "yum",
-+ "ansible_proc_cmdline": {
-+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64",
-+ "LANG": "en_US.UTF-8",
-+ "console": "ttyS0,115200",
-+ "crashkernel": "auto",
-+ "quiet": true,
-+ "rhgb": true,
-+ "ro": true,
-+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ "ansible_processor": [
-+ "0",
-+ "GenuineIntel",
-+ "QEMU Virtual CPU version 2.5+"
-+ ],
-+ "ansible_processor_cores": 1,
-+ "ansible_processor_count": 1,
-+ "ansible_processor_threads_per_core": 1,
-+ "ansible_processor_vcpus": 1,
-+ "ansible_product_name": "Standard PC (i440FX + PIIX, 1996)",
-+ "ansible_product_serial": "NA",
-+ "ansible_product_uuid": "18FEBA4D-2060-45E8-87AF-AD6574F522CC",
-+ "ansible_product_version": "pc-i440fx-4.2",
-+ "ansible_python": {
-+ "executable": "/usr/bin/python",
-+ "has_sslcontext": true,
-+ "type": "CPython",
-+ "version": {
-+ "major": 2,
-+ "micro": 5,
-+ "minor": 7,
-+ "releaselevel": "final",
-+ "serial": 0
-+ },
-+ "version_info": [
-+ 2,
-+ 7,
-+ 5,
-+ "final",
-+ 0
-+ ]
-+ },
-+ "ansible_python_version": "2.7.5",
-+ "ansible_real_group_id": 0,
-+ "ansible_real_user_id": 0,
-+ "ansible_selinux": {
-+ "status": "disabled"
-+ },
-+ "ansible_selinux_python_present": true,
-+ "ansible_service_mgr": "systemd",
-+ "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE3bXHUHyjmlbxE6LCP2ohRTr0pTX7sq89g0yKvovFK1qhP1rsBvy2jW8wjo2P8mlBWhL7obRGl8B+i3cMxZdrc=",
-+ "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIHv4wovK7u1Est8e1rMvQifupxLPpxtNEJIvKHq/iIVF",
-+ "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDPW4spvldGYXFraJCWJAqkuyQQRogSL+aECRU0hAG+IwESq3ceVkUZrvMVnhxmVImcRGWLCP24wmiMC2G/sDMHfBIhQIc4ySvLLyVd20VIsQHWiODQsSZTKCWkIwNmWuUD/8FcIpHm4YKlzZdHRVPwx9oIkdzoxgGyGZ3em7QwhryPZ+GiK8P9dEE2xy2lfAMXCFEL6Eyw/WF1AS0KLZiKl5ct9aYedUZN1rWkWW1Kb9S+OsZ+qzjdZbU2EfQI8SnP8kkvKt1E/B1UnsfZ5R0nlsyIX6Bh8oCluqJrxXrsTBf/s4Pe76/Q7JH/QHp2Yw+sQb+l7wXhlNmDRTpqXDdR",
-+ "ansible_swapfree_mb": 2048,
-+ "ansible_swaptotal_mb": 2048,
-+ "ansible_system": "Linux",
-+ "ansible_system_capabilities": [
-+ "cap_chown",
-+ "cap_dac_override",
-+ "cap_dac_read_search",
-+ "cap_fowner",
-+ "cap_fsetid",
-+ "cap_kill",
-+ "cap_setgid",
-+ "cap_setuid",
-+ "cap_setpcap",
-+ "cap_linux_immutable",
-+ "cap_net_bind_service",
-+ "cap_net_broadcast",
-+ "cap_net_admin",
-+ "cap_net_raw",
-+ "cap_ipc_lock",
-+ "cap_ipc_owner",
-+ "cap_sys_module",
-+ "cap_sys_rawio",
-+ "cap_sys_chroot",
-+ "cap_sys_ptrace",
-+ "cap_sys_pacct",
-+ "cap_sys_admin",
-+ "cap_sys_boot",
-+ "cap_sys_nice",
-+ "cap_sys_resource",
-+ "cap_sys_time",
-+ "cap_sys_tty_config",
-+ "cap_mknod",
-+ "cap_lease",
-+ "cap_audit_write",
-+ "cap_audit_control",
-+ "cap_setfcap",
-+ "cap_mac_override",
-+ "cap_mac_admin",
-+ "cap_syslog",
-+ "35",
-+ "36+ep"
-+ ],
-+ "ansible_system_capabilities_enforced": "True",
-+ "ansible_system_vendor": "QEMU",
-+ "ansible_uptime_seconds": 178555,
-+ "ansible_user_dir": "/root",
-+ "ansible_user_gecos": "root",
-+ "ansible_user_gid": 0,
-+ "ansible_user_id": "root",
-+ "ansible_user_shell": "/bin/bash",
-+ "ansible_user_uid": 0,
-+ "ansible_userspace_architecture": "x86_64",
-+ "ansible_userspace_bits": "64",
-+ "ansible_virtualization_role": "guest",
-+ "ansible_virtualization_type": "kvm",
-+ "discovered_interpreter_python": "/usr/bin/python",
-+ "gather_subset": [
-+ "all"
-+ ],
-+ "module_setup": true
-+ },
-+ "changed": false,
-+ "deprecations": [],
-+ "warnings": []
-+ }
-+ },
-+ "task": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:31.760375Z",
-+ "start": "2020-08-14T11:55:30.470536Z"
-+ },
-+ "id": "5254001e-9fce-297d-21cd-00000000000f",
-+ "name": "Gathering Facts"
-+ }
-+ },
-+ {
-+ "hosts": {
-+ "centos7-host1.tf.local": {
-+ "_ansible_no_log": false,
-+ "action": "yum",
-+ "changed": false,
-+ "invocation": {
-+ "module_args": {
-+ "allow_downgrade": false,
-+ "autoremove": false,
-+ "bugfix": false,
-+ "conf_file": null,
-+ "disable_excludes": null,
-+ "disable_gpg_check": false,
-+ "disable_plugin": [],
-+ "disablerepo": [],
-+ "download_dir": null,
-+ "download_only": false,
-+ "enable_plugin": [],
-+ "enablerepo": [],
-+ "exclude": [],
-+ "install_repoquery": true,
-+ "install_weak_deps": true,
-+ "installroot": "/",
-+ "list": null,
-+ "lock_timeout": 30,
-+ "name": [
-+ "httpd"
-+ ],
-+ "releasever": null,
-+ "security": false,
-+ "skip_broken": false,
-+ "state": "present",
-+ "update_cache": false,
-+ "update_only": false,
-+ "use_backend": "auto",
-+ "validate_certs": true
-+ }
-+ },
-+ "msg": "",
-+ "rc": 0,
-+ "results": [
-+ "httpd-2.4.6-93.el7.centos.x86_64 providing httpd is already installed"
-+ ]
-+ }
-+ },
-+ "task": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:32.952644Z",
-+ "start": "2020-08-14T11:55:31.776073Z"
-+ },
-+ "id": "5254001e-9fce-297d-21cd-000000000009",
-+ "name": "yum"
-+ }
-+ },
-+ {
-+ "hosts": {
-+ "centos7-host1.tf.local": {
-+ "_ansible_no_log": false,
-+ "action": "yum",
-+ "changed": false,
-+ "failed": true,
-+ "invocation": {
-+ "module_args": {
-+ "allow_downgrade": false,
-+ "autoremove": false,
-+ "bugfix": false,
-+ "conf_file": null,
-+ "disable_excludes": null,
-+ "disable_gpg_check": false,
-+ "disable_plugin": [],
-+ "disablerepo": [],
-+ "download_dir": null,
-+ "download_only": false,
-+ "enable_plugin": [],
-+ "enablerepo": [],
-+ "exclude": [],
-+ "install_repoquery": true,
-+ "install_weak_deps": true,
-+ "installroot": "/",
-+ "list": null,
-+ "lock_timeout": 30,
-+ "name": [
-+ "rsyndc"
-+ ],
-+ "releasever": null,
-+ "security": false,
-+ "skip_broken": false,
-+ "state": "present",
-+ "update_cache": false,
-+ "update_only": false,
-+ "use_backend": "auto",
-+ "validate_certs": true
-+ }
-+ },
-+ "msg": "No package matching 'rsyndc' found available, installed or updated",
-+ "rc": 126,
-+ "results": [
-+ "No package matching 'rsyndc' found available, installed or updated"
-+ ]
-+ }
-+ },
-+ "task": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:33.889442Z",
-+ "start": "2020-08-14T11:55:32.969762Z"
-+ },
-+ "id": "5254001e-9fce-297d-21cd-00000000000a",
-+ "name": "yum"
-+ }
-+ }
-+ ]
-+ }
-+ ],
-+ "stats": {
-+ "centos7-host1.tf.local": {
-+ "changed": 0,
-+ "failures": 1,
-+ "ignored": 0,
-+ "ok": 2,
-+ "rescued": 0,
-+ "skipped": 0,
-+ "unreachable": 0
-+ }
-+ },
-+ "retcode": 2
-+}
-diff --git a/tests/unit/files/playbooks/success_example.json b/tests/unit/files/playbooks/success_example.json
-new file mode 100644
-index 0000000000..8a9f3ad868
---- /dev/null
-+++ b/tests/unit/files/playbooks/success_example.json
-@@ -0,0 +1,803 @@
-+{
-+ "custom_stats": {},
-+ "global_custom_stats": {},
-+ "plays": [
-+ {
-+ "play": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:58.334076Z",
-+ "start": "2020-08-14T11:55:54.295001Z"
-+ },
-+ "id": "5254001e-9fce-f8b5-c66a-000000000007",
-+ "name": "py2hosts"
-+ },
-+ "tasks": [
-+ {
-+ "hosts": {
-+ "centos7-host1.tf.local": {
-+ "_ansible_no_log": false,
-+ "_ansible_verbose_override": true,
-+ "action": "gather_facts",
-+ "ansible_facts": {
-+ "ansible_all_ipv4_addresses": [
-+ "192.168.122.29"
-+ ],
-+ "ansible_all_ipv6_addresses": [
-+ "fe80::5054:ff:fe3e:4ce"
-+ ],
-+ "ansible_apparmor": {
-+ "status": "disabled"
-+ },
-+ "ansible_architecture": "x86_64",
-+ "ansible_bios_date": "04/01/2014",
-+ "ansible_bios_version": "rel-1.13.0-0-gf21b5a4-rebuilt.opensuse.org",
-+ "ansible_cmdline": {
-+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64",
-+ "LANG": "en_US.UTF-8",
-+ "console": "ttyS0,115200",
-+ "crashkernel": "auto",
-+ "quiet": true,
-+ "rhgb": true,
-+ "ro": true,
-+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ "ansible_date_time": {
-+ "date": "2020-08-14",
-+ "day": "14",
-+ "epoch": "1597406155",
-+ "hour": "13",
-+ "iso8601": "2020-08-14T11:55:55Z",
-+ "iso8601_basic": "20200814T135555808955",
-+ "iso8601_basic_short": "20200814T135555",
-+ "iso8601_micro": "2020-08-14T11:55:55.809048Z",
-+ "minute": "55",
-+ "month": "08",
-+ "second": "55",
-+ "time": "13:55:55",
-+ "tz": "CEST",
-+ "tz_offset": "+0200",
-+ "weekday": "Friday",
-+ "weekday_number": "5",
-+ "weeknumber": "32",
-+ "year": "2020"
-+ },
-+ "ansible_default_ipv4": {
-+ "address": "192.168.122.29",
-+ "alias": "eth0",
-+ "broadcast": "192.168.122.255",
-+ "gateway": "192.168.122.1",
-+ "interface": "eth0",
-+ "macaddress": "52:54:00:3e:04:ce",
-+ "mtu": 1500,
-+ "netmask": "255.255.255.0",
-+ "network": "192.168.122.0",
-+ "type": "ether"
-+ },
-+ "ansible_default_ipv6": {},
-+ "ansible_device_links": {
-+ "ids": {},
-+ "labels": {},
-+ "masters": {},
-+ "uuids": {
-+ "vda1": [
-+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ ],
-+ "vda2": [
-+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24"
-+ ],
-+ "vda3": [
-+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ ],
-+ "vda5": [
-+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ ]
-+ }
-+ },
-+ "ansible_devices": {
-+ "vda": {
-+ "holders": [],
-+ "host": "",
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": []
-+ },
-+ "model": null,
-+ "partitions": {
-+ "vda1": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ ]
-+ },
-+ "sectors": "2097152",
-+ "sectorsize": 512,
-+ "size": "1.00 GB",
-+ "start": "2048",
-+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ },
-+ "vda2": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24"
-+ ]
-+ },
-+ "sectors": "4196352",
-+ "sectorsize": 512,
-+ "size": "2.00 GB",
-+ "start": "2099200",
-+ "uuid": "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24"
-+ },
-+ "vda3": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ ]
-+ },
-+ "sectors": "104857600",
-+ "sectorsize": 512,
-+ "size": "50.00 GB",
-+ "start": "6295552",
-+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ "vda4": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": []
-+ },
-+ "sectors": "2",
-+ "sectorsize": 512,
-+ "size": "1.00 KB",
-+ "start": "111153152",
-+ "uuid": null
-+ },
-+ "vda5": {
-+ "holders": [],
-+ "links": {
-+ "ids": [],
-+ "labels": [],
-+ "masters": [],
-+ "uuids": [
-+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ ]
-+ },
-+ "sectors": "308275200",
-+ "sectorsize": 512,
-+ "size": "147.00 GB",
-+ "start": "111155200",
-+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ }
-+ },
-+ "removable": "0",
-+ "rotational": "1",
-+ "sas_address": null,
-+ "sas_device_handle": null,
-+ "scheduler_mode": "mq-deadline",
-+ "sectors": "419430400",
-+ "sectorsize": "512",
-+ "size": "200.00 GB",
-+ "support_discard": "0",
-+ "vendor": "0x1af4",
-+ "virtual": 1
-+ }
-+ },
-+ "ansible_distribution": "CentOS",
-+ "ansible_distribution_file_parsed": true,
-+ "ansible_distribution_file_path": "/etc/redhat-release",
-+ "ansible_distribution_file_variety": "RedHat",
-+ "ansible_distribution_major_version": "7",
-+ "ansible_distribution_release": "Core",
-+ "ansible_distribution_version": "7.5",
-+ "ansible_dns": {
-+ "nameservers": [
-+ "192.168.122.1"
-+ ]
-+ },
-+ "ansible_domain": "tf.local",
-+ "ansible_effective_group_id": 0,
-+ "ansible_effective_user_id": 0,
-+ "ansible_env": {
-+ "HOME": "/root",
-+ "LANG": "es_ES.utf8",
-+ "LC_ADDRESS": "C",
-+ "LC_COLLATE": "C",
-+ "LC_CTYPE": "C",
-+ "LC_IDENTIFICATION": "C",
-+ "LC_MEASUREMENT": "C",
-+ "LC_MESSAGES": "C",
-+ "LC_MONETARY": "C",
-+ "LC_NAME": "C",
-+ "LC_NUMERIC": "C",
-+ "LC_PAPER": "C",
-+ "LC_TELEPHONE": "C",
-+ "LC_TIME": "C",
-+ "LESSOPEN": "||/usr/bin/lesspipe.sh %s",
-+ "LOGNAME": "root",
-+ "LS_COLORS": "rs=0:di=38;5;27:ln=38;5;51:mh=44;38;5;15:pi=40;38;5;11:so=38;5;13:do=38;5;5:bd=48;5;232;38;5;11:cd=48;5;232;38;5;3:or=48;5;232;38;5;9:mi=05;48;5;232;38;5;15:su=48;5;196;38;5;15:sg=48;5;11;38;5;16:ca=48;5;196;38;5;226:tw=48;5;10;38;5;16:ow=48;5;10;38;5;21:st=48;5;21;38;5;15:ex=38;5;34:*.tar=38;5;9:*.tgz=38;5;9:*.arc=38;5;9:*.arj=38;5;9:*.taz=38;5;9:*.lha=38;5;9:*.lz4=38;5;9:*.lzh=38;5;9:*.lzma=38;5;9:*.tlz=38;5;9:*.txz=38;5;9:*.tzo=38;5;9:*.t7z=38;5;9:*.zip=38;5;9:*.z=38;5;9:*.Z=38;5;9:*.dz=38;5;9:*.gz=38;5;9:*.lrz=38;5;9:*.lz=38;5;9:*.lzo=38;5;9:*.xz=38;5;9:*.bz2=38;5;9:*.bz=38;5;9:*.tbz=38;5;9:*.tbz2=38;5;9:*.tz=38;5;9:*.deb=38;5;9:*.rpm=38;5;9:*.jar=38;5;9:*.war=38;5;9:*.ear=38;5;9:*.sar=38;5;9:*.rar=38;5;9:*.alz=38;5;9:*.ace=38;5;9:*.zoo=38;5;9:*.cpio=38;5;9:*.7z=38;5;9:*.rz=38;5;9:*.cab=38;5;9:*.jpg=38;5;13:*.jpeg=38;5;13:*.gif=38;5;13:*.bmp=38;5;13:*.pbm=38;5;13:*.pgm=38;5;13:*.ppm=38;5;13:*.tga=38;5;13:*.xbm=38;5;13:*.xpm=38;5;13:*.tif=38;5;13:*.tiff=38;5;13:*.png=38;5;13:*.svg=38;5;13:*.svgz=38;5;13:*.mng=38;5;13:*.pcx=38;5;13:*.mov=38;5;13:*.mpg=38;5;13:*.mpeg=38;5;13:*.m2v=38;5;13:*.mkv=38;5;13:*.webm=38;5;13:*.ogm=38;5;13:*.mp4=38;5;13:*.m4v=38;5;13:*.mp4v=38;5;13:*.vob=38;5;13:*.qt=38;5;13:*.nuv=38;5;13:*.wmv=38;5;13:*.asf=38;5;13:*.rm=38;5;13:*.rmvb=38;5;13:*.flc=38;5;13:*.avi=38;5;13:*.fli=38;5;13:*.flv=38;5;13:*.gl=38;5;13:*.dl=38;5;13:*.xcf=38;5;13:*.xwd=38;5;13:*.yuv=38;5;13:*.cgm=38;5;13:*.emf=38;5;13:*.axv=38;5;13:*.anx=38;5;13:*.ogv=38;5;13:*.ogx=38;5;13:*.aac=38;5;45:*.au=38;5;45:*.flac=38;5;45:*.mid=38;5;45:*.midi=38;5;45:*.mka=38;5;45:*.mp3=38;5;45:*.mpc=38;5;45:*.ogg=38;5;45:*.ra=38;5;45:*.wav=38;5;45:*.axa=38;5;45:*.oga=38;5;45:*.spx=38;5;45:*.xspf=38;5;45:",
-+ "MAIL": "/var/mail/root",
-+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin",
-+ "PWD": "/root",
-+ "SHELL": "/bin/bash",
-+ "SHLVL": "2",
-+ "SSH_CLIENT": "192.168.122.179 55766 22",
-+ "SSH_CONNECTION": "192.168.122.179 55766 192.168.122.29 22",
-+ "SSH_TTY": "/dev/pts/0",
-+ "TERM": "xterm-256color",
-+ "USER": "root",
-+ "XDG_RUNTIME_DIR": "/run/user/0",
-+ "XDG_SESSION_ID": "110",
-+ "_": "/usr/bin/python"
-+ },
-+ "ansible_eth0": {
-+ "active": true,
-+ "device": "eth0",
-+ "features": {
-+ "busy_poll": "off [fixed]",
-+ "fcoe_mtu": "off [fixed]",
-+ "generic_receive_offload": "on",
-+ "generic_segmentation_offload": "on",
-+ "highdma": "on [fixed]",
-+ "hw_tc_offload": "off [fixed]",
-+ "l2_fwd_offload": "off [fixed]",
-+ "large_receive_offload": "off [fixed]",
-+ "loopback": "off [fixed]",
-+ "netns_local": "off [fixed]",
-+ "ntuple_filters": "off [fixed]",
-+ "receive_hashing": "off [fixed]",
-+ "rx_all": "off [fixed]",
-+ "rx_checksumming": "on [fixed]",
-+ "rx_fcs": "off [fixed]",
-+ "rx_udp_tunnel_port_offload": "off [fixed]",
-+ "rx_vlan_filter": "on [fixed]",
-+ "rx_vlan_offload": "off [fixed]",
-+ "rx_vlan_stag_filter": "off [fixed]",
-+ "rx_vlan_stag_hw_parse": "off [fixed]",
-+ "scatter_gather": "on",
-+ "tcp_segmentation_offload": "on",
-+ "tx_checksum_fcoe_crc": "off [fixed]",
-+ "tx_checksum_ip_generic": "on",
-+ "tx_checksum_ipv4": "off [fixed]",
-+ "tx_checksum_ipv6": "off [fixed]",
-+ "tx_checksum_sctp": "off [fixed]",
-+ "tx_checksumming": "on",
-+ "tx_fcoe_segmentation": "off [fixed]",
-+ "tx_gre_csum_segmentation": "off [fixed]",
-+ "tx_gre_segmentation": "off [fixed]",
-+ "tx_gso_partial": "off [fixed]",
-+ "tx_gso_robust": "off [fixed]",
-+ "tx_ipip_segmentation": "off [fixed]",
-+ "tx_lockless": "off [fixed]",
-+ "tx_nocache_copy": "off",
-+ "tx_scatter_gather": "on",
-+ "tx_scatter_gather_fraglist": "off [fixed]",
-+ "tx_sctp_segmentation": "off [fixed]",
-+ "tx_sit_segmentation": "off [fixed]",
-+ "tx_tcp6_segmentation": "on",
-+ "tx_tcp_ecn_segmentation": "on",
-+ "tx_tcp_mangleid_segmentation": "off",
-+ "tx_tcp_segmentation": "on",
-+ "tx_udp_tnl_csum_segmentation": "off [fixed]",
-+ "tx_udp_tnl_segmentation": "off [fixed]",
-+ "tx_vlan_offload": "off [fixed]",
-+ "tx_vlan_stag_hw_insert": "off [fixed]",
-+ "udp_fragmentation_offload": "on",
-+ "vlan_challenged": "off [fixed]"
-+ },
-+ "hw_timestamp_filters": [],
-+ "ipv4": {
-+ "address": "192.168.122.29",
-+ "broadcast": "192.168.122.255",
-+ "netmask": "255.255.255.0",
-+ "network": "192.168.122.0"
-+ },
-+ "ipv6": [
-+ {
-+ "address": "fe80::5054:ff:fe3e:4ce",
-+ "prefix": "64",
-+ "scope": "link"
-+ }
-+ ],
-+ "macaddress": "52:54:00:3e:04:ce",
-+ "module": "virtio_net",
-+ "mtu": 1500,
-+ "pciid": "virtio0",
-+ "promisc": false,
-+ "timestamping": [
-+ "rx_software",
-+ "software"
-+ ],
-+ "type": "ether"
-+ },
-+ "ansible_fibre_channel_wwn": [],
-+ "ansible_fips": false,
-+ "ansible_form_factor": "Other",
-+ "ansible_fqdn": "centos7-host1.tf.local",
-+ "ansible_hostname": "centos7-host1",
-+ "ansible_hostnqn": "",
-+ "ansible_interfaces": [
-+ "lo",
-+ "eth0"
-+ ],
-+ "ansible_is_chroot": false,
-+ "ansible_iscsi_iqn": "",
-+ "ansible_kernel": "3.10.0-862.el7.x86_64",
-+ "ansible_kernel_version": "#1 SMP Fri Apr 20 16:44:24 UTC 2018",
-+ "ansible_lo": {
-+ "active": true,
-+ "device": "lo",
-+ "features": {
-+ "busy_poll": "off [fixed]",
-+ "fcoe_mtu": "off [fixed]",
-+ "generic_receive_offload": "on",
-+ "generic_segmentation_offload": "on",
-+ "highdma": "on [fixed]",
-+ "hw_tc_offload": "off [fixed]",
-+ "l2_fwd_offload": "off [fixed]",
-+ "large_receive_offload": "off [fixed]",
-+ "loopback": "on [fixed]",
-+ "netns_local": "on [fixed]",
-+ "ntuple_filters": "off [fixed]",
-+ "receive_hashing": "off [fixed]",
-+ "rx_all": "off [fixed]",
-+ "rx_checksumming": "on [fixed]",
-+ "rx_fcs": "off [fixed]",
-+ "rx_udp_tunnel_port_offload": "off [fixed]",
-+ "rx_vlan_filter": "off [fixed]",
-+ "rx_vlan_offload": "off [fixed]",
-+ "rx_vlan_stag_filter": "off [fixed]",
-+ "rx_vlan_stag_hw_parse": "off [fixed]",
-+ "scatter_gather": "on",
-+ "tcp_segmentation_offload": "on",
-+ "tx_checksum_fcoe_crc": "off [fixed]",
-+ "tx_checksum_ip_generic": "on [fixed]",
-+ "tx_checksum_ipv4": "off [fixed]",
-+ "tx_checksum_ipv6": "off [fixed]",
-+ "tx_checksum_sctp": "on [fixed]",
-+ "tx_checksumming": "on",
-+ "tx_fcoe_segmentation": "off [fixed]",
-+ "tx_gre_csum_segmentation": "off [fixed]",
-+ "tx_gre_segmentation": "off [fixed]",
-+ "tx_gso_partial": "off [fixed]",
-+ "tx_gso_robust": "off [fixed]",
-+ "tx_ipip_segmentation": "off [fixed]",
-+ "tx_lockless": "on [fixed]",
-+ "tx_nocache_copy": "off [fixed]",
-+ "tx_scatter_gather": "on [fixed]",
-+ "tx_scatter_gather_fraglist": "on [fixed]",
-+ "tx_sctp_segmentation": "on",
-+ "tx_sit_segmentation": "off [fixed]",
-+ "tx_tcp6_segmentation": "on",
-+ "tx_tcp_ecn_segmentation": "on",
-+ "tx_tcp_mangleid_segmentation": "on",
-+ "tx_tcp_segmentation": "on",
-+ "tx_udp_tnl_csum_segmentation": "off [fixed]",
-+ "tx_udp_tnl_segmentation": "off [fixed]",
-+ "tx_vlan_offload": "off [fixed]",
-+ "tx_vlan_stag_hw_insert": "off [fixed]",
-+ "udp_fragmentation_offload": "on",
-+ "vlan_challenged": "on [fixed]"
-+ },
-+ "hw_timestamp_filters": [],
-+ "ipv4": {
-+ "address": "127.0.0.1",
-+ "broadcast": "host",
-+ "netmask": "255.0.0.0",
-+ "network": "127.0.0.0"
-+ },
-+ "ipv6": [
-+ {
-+ "address": "::1",
-+ "prefix": "128",
-+ "scope": "host"
-+ }
-+ ],
-+ "mtu": 65536,
-+ "promisc": false,
-+ "timestamping": [
-+ "rx_software",
-+ "software"
-+ ],
-+ "type": "loopback"
-+ },
-+ "ansible_local": {},
-+ "ansible_lsb": {},
-+ "ansible_machine": "x86_64",
-+ "ansible_machine_id": "d5f025e24919a00e864180785ebaa8c9",
-+ "ansible_memfree_mb": 717,
-+ "ansible_memory_mb": {
-+ "nocache": {
-+ "free": 893,
-+ "used": 98
-+ },
-+ "real": {
-+ "free": 717,
-+ "total": 991,
-+ "used": 274
-+ },
-+ "swap": {
-+ "cached": 0,
-+ "free": 2048,
-+ "total": 2048,
-+ "used": 0
-+ }
-+ },
-+ "ansible_memtotal_mb": 991,
-+ "ansible_mounts": [
-+ {
-+ "block_available": 243103,
-+ "block_size": 4096,
-+ "block_total": 259584,
-+ "block_used": 16481,
-+ "device": "/dev/vda1",
-+ "fstype": "xfs",
-+ "inode_available": 523998,
-+ "inode_total": 524288,
-+ "inode_used": 290,
-+ "mount": "/boot",
-+ "options": "rw,relatime,attr2,inode64,noquota",
-+ "size_available": 995749888,
-+ "size_total": 1063256064,
-+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345"
-+ },
-+ {
-+ "block_available": 12902661,
-+ "block_size": 4096,
-+ "block_total": 13100800,
-+ "block_used": 198139,
-+ "device": "/dev/vda3",
-+ "fstype": "xfs",
-+ "inode_available": 26189994,
-+ "inode_total": 26214400,
-+ "inode_used": 24406,
-+ "mount": "/",
-+ "options": "rw,relatime,attr2,inode64,noquota",
-+ "size_available": 52849299456,
-+ "size_total": 53660876800,
-+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ {
-+ "block_available": 38507349,
-+ "block_size": 4096,
-+ "block_total": 38515585,
-+ "block_used": 8236,
-+ "device": "/dev/vda5",
-+ "fstype": "xfs",
-+ "inode_available": 77068797,
-+ "inode_total": 77068800,
-+ "inode_used": 3,
-+ "mount": "/home",
-+ "options": "rw,relatime,attr2,inode64,noquota",
-+ "size_available": 157726101504,
-+ "size_total": 157759836160,
-+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249"
-+ }
-+ ],
-+ "ansible_nodename": "centos7-host1",
-+ "ansible_os_family": "RedHat",
-+ "ansible_pkg_mgr": "yum",
-+ "ansible_proc_cmdline": {
-+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64",
-+ "LANG": "en_US.UTF-8",
-+ "console": "ttyS0,115200",
-+ "crashkernel": "auto",
-+ "quiet": true,
-+ "rhgb": true,
-+ "ro": true,
-+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c"
-+ },
-+ "ansible_processor": [
-+ "0",
-+ "GenuineIntel",
-+ "QEMU Virtual CPU version 2.5+"
-+ ],
-+ "ansible_processor_cores": 1,
-+ "ansible_processor_count": 1,
-+ "ansible_processor_threads_per_core": 1,
-+ "ansible_processor_vcpus": 1,
-+ "ansible_product_name": "Standard PC (i440FX + PIIX, 1996)",
-+ "ansible_product_serial": "NA",
-+ "ansible_product_uuid": "18FEBA4D-2060-45E8-87AF-AD6574F522CC",
-+ "ansible_product_version": "pc-i440fx-4.2",
-+ "ansible_python": {
-+ "executable": "/usr/bin/python",
-+ "has_sslcontext": true,
-+ "type": "CPython",
-+ "version": {
-+ "major": 2,
-+ "micro": 5,
-+ "minor": 7,
-+ "releaselevel": "final",
-+ "serial": 0
-+ },
-+ "version_info": [
-+ 2,
-+ 7,
-+ 5,
-+ "final",
-+ 0
-+ ]
-+ },
-+ "ansible_python_version": "2.7.5",
-+ "ansible_real_group_id": 0,
-+ "ansible_real_user_id": 0,
-+ "ansible_selinux": {
-+ "status": "disabled"
-+ },
-+ "ansible_selinux_python_present": true,
-+ "ansible_service_mgr": "systemd",
-+ "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE3bXHUHyjmlbxE6LCP2ohRTr0pTX7sq89g0yKvovFK1qhP1rsBvy2jW8wjo2P8mlBWhL7obRGl8B+i3cMxZdrc=",
-+ "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIHv4wovK7u1Est8e1rMvQifupxLPpxtNEJIvKHq/iIVF",
-+ "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDPW4spvldGYXFraJCWJAqkuyQQRogSL+aECRU0hAG+IwESq3ceVkUZrvMVnhxmVImcRGWLCP24wmiMC2G/sDMHfBIhQIc4ySvLLyVd20VIsQHWiODQsSZTKCWkIwNmWuUD/8FcIpHm4YKlzZdHRVPwx9oIkdzoxgGyGZ3em7QwhryPZ+GiK8P9dEE2xy2lfAMXCFEL6Eyw/WF1AS0KLZiKl5ct9aYedUZN1rWkWW1Kb9S+OsZ+qzjdZbU2EfQI8SnP8kkvKt1E/B1UnsfZ5R0nlsyIX6Bh8oCluqJrxXrsTBf/s4Pe76/Q7JH/QHp2Yw+sQb+l7wXhlNmDRTpqXDdR",
-+ "ansible_swapfree_mb": 2048,
-+ "ansible_swaptotal_mb": 2048,
-+ "ansible_system": "Linux",
-+ "ansible_system_capabilities": [
-+ "cap_chown",
-+ "cap_dac_override",
-+ "cap_dac_read_search",
-+ "cap_fowner",
-+ "cap_fsetid",
-+ "cap_kill",
-+ "cap_setgid",
-+ "cap_setuid",
-+ "cap_setpcap",
-+ "cap_linux_immutable",
-+ "cap_net_bind_service",
-+ "cap_net_broadcast",
-+ "cap_net_admin",
-+ "cap_net_raw",
-+ "cap_ipc_lock",
-+ "cap_ipc_owner",
-+ "cap_sys_module",
-+ "cap_sys_rawio",
-+ "cap_sys_chroot",
-+ "cap_sys_ptrace",
-+ "cap_sys_pacct",
-+ "cap_sys_admin",
-+ "cap_sys_boot",
-+ "cap_sys_nice",
-+ "cap_sys_resource",
-+ "cap_sys_time",
-+ "cap_sys_tty_config",
-+ "cap_mknod",
-+ "cap_lease",
-+ "cap_audit_write",
-+ "cap_audit_control",
-+ "cap_setfcap",
-+ "cap_mac_override",
-+ "cap_mac_admin",
-+ "cap_syslog",
-+ "35",
-+ "36+ep"
-+ ],
-+ "ansible_system_capabilities_enforced": "True",
-+ "ansible_system_vendor": "QEMU",
-+ "ansible_uptime_seconds": 178578,
-+ "ansible_user_dir": "/root",
-+ "ansible_user_gecos": "root",
-+ "ansible_user_gid": 0,
-+ "ansible_user_id": "root",
-+ "ansible_user_shell": "/bin/bash",
-+ "ansible_user_uid": 0,
-+ "ansible_userspace_architecture": "x86_64",
-+ "ansible_userspace_bits": "64",
-+ "ansible_virtualization_role": "guest",
-+ "ansible_virtualization_type": "kvm",
-+ "discovered_interpreter_python": "/usr/bin/python",
-+ "gather_subset": [
-+ "all"
-+ ],
-+ "module_setup": true
-+ },
-+ "changed": false,
-+ "deprecations": [],
-+ "warnings": []
-+ }
-+ },
-+ "task": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:55.578128Z",
-+ "start": "2020-08-14T11:55:54.313122Z"
-+ },
-+ "id": "5254001e-9fce-f8b5-c66a-00000000000f",
-+ "name": "Gathering Facts"
-+ }
-+ },
-+ {
-+ "hosts": {
-+ "centos7-host1.tf.local": {
-+ "_ansible_no_log": false,
-+ "action": "yum",
-+ "changed": false,
-+ "invocation": {
-+ "module_args": {
-+ "allow_downgrade": false,
-+ "autoremove": false,
-+ "bugfix": false,
-+ "conf_file": null,
-+ "disable_excludes": null,
-+ "disable_gpg_check": false,
-+ "disable_plugin": [],
-+ "disablerepo": [],
-+ "download_dir": null,
-+ "download_only": false,
-+ "enable_plugin": [],
-+ "enablerepo": [],
-+ "exclude": [],
-+ "install_repoquery": true,
-+ "install_weak_deps": true,
-+ "installroot": "/",
-+ "list": null,
-+ "lock_timeout": 30,
-+ "name": [
-+ "httpd"
-+ ],
-+ "releasever": null,
-+ "security": false,
-+ "skip_broken": false,
-+ "state": "present",
-+ "update_cache": false,
-+ "update_only": false,
-+ "use_backend": "auto",
-+ "validate_certs": true
-+ }
-+ },
-+ "msg": "",
-+ "rc": 0,
-+ "results": [
-+ "httpd-2.4.6-93.el7.centos.x86_64 providing httpd is already installed"
-+ ]
-+ }
-+ },
-+ "task": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:56.737921Z",
-+ "start": "2020-08-14T11:55:55.596293Z"
-+ },
-+ "id": "5254001e-9fce-f8b5-c66a-000000000009",
-+ "name": "yum"
-+ }
-+ },
-+ {
-+ "hosts": {
-+ "centos7-host1.tf.local": {
-+ "_ansible_no_log": false,
-+ "action": "yum",
-+ "changed": false,
-+ "invocation": {
-+ "module_args": {
-+ "allow_downgrade": false,
-+ "autoremove": false,
-+ "bugfix": false,
-+ "conf_file": null,
-+ "disable_excludes": null,
-+ "disable_gpg_check": false,
-+ "disable_plugin": [],
-+ "disablerepo": [],
-+ "download_dir": null,
-+ "download_only": false,
-+ "enable_plugin": [],
-+ "enablerepo": [],
-+ "exclude": [],
-+ "install_repoquery": true,
-+ "install_weak_deps": true,
-+ "installroot": "/",
-+ "list": null,
-+ "lock_timeout": 30,
-+ "name": [
-+ "rsync"
-+ ],
-+ "releasever": null,
-+ "security": false,
-+ "skip_broken": false,
-+ "state": "present",
-+ "update_cache": false,
-+ "update_only": false,
-+ "use_backend": "auto",
-+ "validate_certs": true
-+ }
-+ },
-+ "msg": "",
-+ "rc": 0,
-+ "results": [
-+ "rsync-3.1.2-10.el7.x86_64 providing rsync is already installed"
-+ ]
-+ }
-+ },
-+ "task": {
-+ "duration": {
-+ "end": "2020-08-14T11:55:57.609670Z",
-+ "start": "2020-08-14T11:55:56.755620Z"
-+ },
-+ "id": "5254001e-9fce-f8b5-c66a-00000000000a",
-+ "name": "yum"
-+ }
-+ },
-+ {
-+ "hosts": {
-+ "centos7-host1.tf.local": {
-+ "_ansible_no_log": false,
-+ "action": "synchronize",
-+ "changed": true,
-+ "cmd": "/usr/bin/rsync --delay-updates -F --compress --delete-after --archive --rsh=/usr/bin/ssh -S none -i /etc/ansible/keys/mykey.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null --out-format=<>%i %n%L /root/myfiles/ centos7-host1.tf.local:/var/www/html/",
-+ "invocation": {
-+ "module_args": {
-+ "_local_rsync_password": null,
-+ "_local_rsync_path": "rsync",
-+ "_substitute_controller": false,
-+ "archive": true,
-+ "checksum": false,
-+ "compress": true,
-+ "copy_links": false,
-+ "delete": true,
-+ "dest": "centos7-host1.tf.local:/var/www/html/",
-+ "dest_port": null,
-+ "dirs": false,
-+ "existing_only": false,
-+ "group": null,
-+ "link_dest": null,
-+ "links": null,
-+ "mode": "push",
-+ "owner": null,
-+ "partial": false,
-+ "perms": null,
-+ "private_key": "/etc/ansible/keys/mykey.pem",
-+ "recursive": null,
-+ "rsync_opts": [],
-+ "rsync_path": null,
-+ "rsync_timeout": 0,
-+ "set_remote_user": true,
-+ "src": "/root/myfiles/",
-+ "ssh_args": null,
-+ "times": null,
-+ "verify_host": false
-+ }
-+ },
-+ "msg": "
Date: Mon, 17 Feb 2020 15:34:00 +1100
Subject: [PATCH] Apply patch from upstream to support Python 3.8
@@ -7,15 +7,12 @@ Apply saltstack/salt#56031 to support Python 3.8, which removed a
deprecated module and changed some behaviour. Add a {Build,}Requires on
python-distro, since it is now required.
---
- pkg/suse/salt.spec | 2 ++
- salt/config/__init__.py | 4 +++-
- salt/grains/core.py | 16 ++++++++--------
- salt/renderers/stateconf.py | 8 ++++----
- tests/unit/modules/test_virt.py | 2 +-
- 5 files changed, 18 insertions(+), 14 deletions(-)
+ pkg/suse/salt.spec | 2 ++
+ salt/renderers/stateconf.py | 49 ++++++++++++++++---------------------
+ 2 files changed, 23 insertions(+), 28 deletions(-)
diff --git a/pkg/suse/salt.spec b/pkg/suse/salt.spec
-index e3e678af3b..0f6a9bc012 100644
+index a17d2381ce..0df9d6c283 100644
--- a/pkg/suse/salt.spec
+++ b/pkg/suse/salt.spec
@@ -62,6 +62,7 @@ BuildRequires: python-psutil
@@ -34,95 +31,205 @@ index e3e678af3b..0f6a9bc012 100644
%if 0%{?suse_version}
# requirements/opt.txt (not all)
Recommends: python-MySQL-python
-diff --git a/salt/config/__init__.py b/salt/config/__init__.py
-index 0ebe1181dd..f484d94e7e 100644
---- a/salt/config/__init__.py
-+++ b/salt/config/__init__.py
-@@ -3196,7 +3196,9 @@ def apply_cloud_providers_config(overrides, defaults=None):
- # Merge provided extends
- keep_looping = False
- for alias, entries in six.iteritems(providers.copy()):
-- for driver, details in six.iteritems(entries):
-+ for driver in list(six.iterkeys(entries)):
-+ # Don't use iteritems, because the values of the dictionary will be changed
-+ details = entries[driver]
-
- if 'extends' not in details:
- # Extends resolved or non existing, continue!
-diff --git a/salt/grains/core.py b/salt/grains/core.py
-index f410985198..358b66fdb0 100644
---- a/salt/grains/core.py
-+++ b/salt/grains/core.py
-@@ -40,20 +40,20 @@ except ImportError:
- __proxyenabled__ = ['*']
- __FQDN__ = None
-
--# Extend the default list of supported distros. This will be used for the
--# /etc/DISTRO-release checking that is part of linux_distribution()
--from platform import _supported_dists
--_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
-- 'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void')
--
- # linux_distribution deprecated in py3.7
- try:
- from platform import linux_distribution as _deprecated_linux_distribution
-
-+ # Extend the default list of supported distros. This will be used for the
-+ # /etc/DISTRO-release checking that is part of linux_distribution()
-+ from platform import _supported_dists
-+ _supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
-+ 'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void')
-+
- def linux_distribution(**kwargs):
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
-- return _deprecated_linux_distribution(**kwargs)
-+ return _deprecated_linux_distribution(supported_dists=_supported_dists, **kwargs)
- except ImportError:
- from distro import linux_distribution
-
-@@ -1976,7 +1976,7 @@ def os_data():
- )
- (osname, osrelease, oscodename) = \
- [x.strip('"').strip("'") for x in
-- linux_distribution(supported_dists=_supported_dists)]
-+ linux_distribution()]
- # Try to assign these three names based on the lsb info, they tend to
- # be more accurate than what python gets from /etc/DISTRO-release.
- # It's worth noting that Ubuntu has patched their Python distribution
diff --git a/salt/renderers/stateconf.py b/salt/renderers/stateconf.py
-index cfce9e6926..5c8a8322ed 100644
+index 298ae28338..f0527d51d7 100644
--- a/salt/renderers/stateconf.py
+++ b/salt/renderers/stateconf.py
-@@ -224,10 +224,10 @@ def render(input, saltenv='base', sls='', argline='', **kws):
- tmplctx = STATE_CONF.copy()
- if tmplctx:
- prefix = sls + '::'
-- for k in six.iterkeys(tmplctx): # iterate over a copy of keys
-- if k.startswith(prefix):
-- tmplctx[k[len(prefix):]] = tmplctx[k]
-- del tmplctx[k]
-+ tmplctx = {
-+ k[len(prefix):] if k.startswith(prefix) else k: v
-+ for k, v in six.iteritems(tmplctx)
-+ }
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ A flexible renderer that takes a templating engine and a data format
+
+@@ -26,8 +25,6 @@ A flexible renderer that takes a templating engine and a data format
+ # - apache: >= 0.1.0
+ #
+
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import copy
+ import getopt
+@@ -36,12 +33,9 @@ import os
+ import re
+ from itertools import chain
+
+-# Import salt libs
+ import salt.utils.files
+ import salt.utils.stringutils
+ from salt.exceptions import SaltRenderError
+-
+-# Import 3rd-party libs
+ from salt.ext import six
+ from salt.ext.six.moves import StringIO # pylint: disable=import-error
+
+@@ -135,7 +129,7 @@ def render(input, saltenv="base", sls="", argline="", **kws):
+ sid = has_names_decls(data)
+ if sid:
+ raise SaltRenderError(
+- "'names' declaration(found in state id: {0}) is "
++ "'names' declaration(found in state id: {}) is "
+ "not supported with implicitly ordered states! You "
+ "should generate the states in a template for-loop "
+ "instead.".format(sid)
+@@ -203,11 +197,11 @@ def render(input, saltenv="base", sls="", argline="", **kws):
+ name, rt_argline = (args[1] + " ").split(" ", 1)
+ render_template = renderers[name] # e.g., the mako renderer
+ except KeyError as err:
+- raise SaltRenderError("Renderer: {0} is not available!".format(err))
++ raise SaltRenderError("Renderer: {} is not available!".format(err))
+ except IndexError:
+ raise INVALID_USAGE_ERROR
+
+- if isinstance(input, six.string_types):
++ if isinstance(input, str):
+ with salt.utils.files.fopen(input, "r") as ifile:
+ sls_templ = salt.utils.stringutils.to_unicode(ifile.read())
+ else: # assume file-like
+@@ -227,7 +221,7 @@ def render(input, saltenv="base", sls="", argline="", **kws):
+ prefix = sls + "::"
+ tmplctx = {
+ k[len(prefix) :] if k.startswith(prefix) else k: v
+- for k, v in six.iteritems(tmplctx)
++ for k, v in tmplctx.items()
+ }
else:
tmplctx = {}
+@@ -262,8 +256,8 @@ def rewrite_single_shorthand_state_decl(data): # pylint: disable=C0103
+ state_id_decl:
+ state.func: []
+ """
+- for sid, states in six.iteritems(data):
+- if isinstance(states, six.string_types):
++ for sid, states in data.items():
++ if isinstance(states, str):
+ data[sid] = {states: []}
-diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index 94372c6d72..d762dcc479 100644
---- a/tests/unit/modules/test_virt.py
-+++ b/tests/unit/modules/test_virt.py
-@@ -1256,7 +1256,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-
-
-
--
-+
-
-
-
@@ -917,7 +847,7 @@ index 6e61544a1f..ca5e80d2d2 100644
domain_mock_boot = self.set_mock_vm("vm_with_boot_param", xml_boot)
domain_mock_boot.OSType = MagicMock(return_value="hvm")
define_mock_boot = MagicMock(return_value=True)
-@@ -2697,6 +2787,218 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2694,6 +2786,218 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual(setxml.find("os").find("loader"), None)
self.assertEqual(setxml.find("os").find("nvram"), None)
@@ -1137,32 +1067,18 @@ index 6e61544a1f..ca5e80d2d2 100644
"""
Test virt._nic_profile with mixed dictionaries and lists as input.
diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py
-index f03159334b..1923ae5c0f 100644
+index 8fe892f607..1923ae5c0f 100644
--- a/tests/unit/states/test_virt.py
+++ b/tests/unit/states/test_virt.py
-@@ -1,21 +1,15 @@
- """
- :codeauthor: Jayesh Kariya
- """
--# Import Python libs
-
- import shutil
- import tempfile
-
--# Import Salt Libs
+@@ -8,7 +8,6 @@ import tempfile
import salt.states.virt as virt
import salt.utils.files
from salt.exceptions import CommandExecutionError, SaltInvocationError
--
--# Import 3rd-party libs
+-from salt.ext import six
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, mock_open, patch
--
--# Import Salt Testing Libs
from tests.support.runtests import RUNTIME_VARS
- from tests.support.unit import TestCase
-
-@@ -351,6 +345,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -346,6 +345,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
install=False,
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
@@ -1170,7 +1086,7 @@ index f03159334b..1923ae5c0f 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -376,6 +371,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -371,6 +371,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
start=False,
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
@@ -1178,7 +1094,7 @@ index f03159334b..1923ae5c0f 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -489,6 +485,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -484,6 +485,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
password=None,
boot=None,
test=False,
@@ -1186,7 +1102,7 @@ index f03159334b..1923ae5c0f 100644
)
# Failed definition update case
-@@ -559,6 +556,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -554,6 +556,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
install=False,
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
@@ -1194,7 +1110,7 @@ index f03159334b..1923ae5c0f 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -601,6 +599,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -596,6 +599,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
boot=None,
test=True,
boot_dev=None,
@@ -1202,7 +1118,7 @@ index f03159334b..1923ae5c0f 100644
)
# No changes case
-@@ -636,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -631,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
boot=None,
test=True,
boot_dev=None,
@@ -1210,7 +1126,7 @@ index f03159334b..1923ae5c0f 100644
)
def test_running(self):
-@@ -713,6 +713,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -708,6 +713,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
pub_key=None,
priv_key=None,
boot_dev=None,
@@ -1218,7 +1134,7 @@ index f03159334b..1923ae5c0f 100644
connection=None,
username=None,
password=None,
-@@ -775,6 +776,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -770,6 +776,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
boot_dev="network hd",
@@ -1226,7 +1142,7 @@ index f03159334b..1923ae5c0f 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -800,6 +802,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -795,6 +802,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
boot_dev="network hd",
@@ -1234,7 +1150,7 @@ index f03159334b..1923ae5c0f 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -945,6 +948,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -940,6 +948,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
boot=None,
test=False,
boot_dev=None,
@@ -1242,7 +1158,7 @@ index f03159334b..1923ae5c0f 100644
)
# Failed definition update case
-@@ -1018,6 +1022,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1013,6 +1022,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
install=False,
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
@@ -1250,7 +1166,7 @@ index f03159334b..1923ae5c0f 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -1064,6 +1069,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1059,6 +1069,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
boot=None,
test=True,
boot_dev=None,
@@ -1258,7 +1174,7 @@ index f03159334b..1923ae5c0f 100644
)
start_mock.assert_not_called()
-@@ -1101,6 +1107,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1096,6 +1107,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
boot=None,
test=True,
boot_dev=None,
@@ -1267,6 +1183,6 @@ index f03159334b..1923ae5c0f 100644
def test_stopped(self):
--
-2.28.0
+2.29.2
diff --git a/backport-virt-patches-from-3001-256.patch b/backport-virt-patches-from-3001-256.patch
index 0e6785a..4a32152 100644
--- a/backport-virt-patches-from-3001-256.patch
+++ b/backport-virt-patches-from-3001-256.patch
@@ -1,4 +1,4 @@
-From 5bd071081ccb8ae3938643831d2e5632712b48b7 Mon Sep 17 00:00:00 2001
+From 32559016ba2bd306a3a027a2191857f24258fc46 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat
Date: Mon, 7 Sep 2020 15:00:40 +0200
Subject: [PATCH] Backport virt patches from 3001+ (#256)
@@ -411,26 +411,20 @@ Co-authored-by: xeacott
Co-authored-by: Frode Gundersen
Co-authored-by: Daniel A. Wozniak
---
- changelog/56454.fixed | 1 +
- changelog/57544.added | 1 +
- changelog/58331.fixed | 1 +
- salt/modules/virt.py | 442 ++++---
- salt/states/virt.py | 171 ++-
- salt/templates/virt/libvirt_domain.jinja | 2 +-
- salt/utils/data.py | 976 +++++++++------
- salt/utils/xmlutil.py | 251 +++-
- tests/pytests/unit/utils/test_data.py | 57 +
- tests/pytests/unit/utils/test_xmlutil.py | 169 +++
- tests/unit/modules/test_virt.py | 218 ++--
- tests/unit/states/test_virt.py | 98 +-
- tests/unit/utils/test_data.py | 1399 ++++++++++++----------
- tests/unit/utils/test_xmlutil.py | 164 +--
- 14 files changed, 2588 insertions(+), 1362 deletions(-)
+ changelog/56454.fixed | 1 +
+ changelog/57544.added | 1 +
+ changelog/58331.fixed | 1 +
+ salt/modules/virt.py | 270 +++++++++++++----------
+ salt/states/virt.py | 88 ++++++--
+ salt/templates/virt/libvirt_domain.jinja | 29 +--
+ salt/utils/xmlutil.py | 4 +-
+ tests/unit/modules/test_virt.py | 159 +++++++++----
+ tests/unit/states/test_virt.py | 93 +++++++-
+ tests/unit/utils/test_data.py | 32 ---
+ 10 files changed, 441 insertions(+), 237 deletions(-)
create mode 100644 changelog/56454.fixed
create mode 100644 changelog/57544.added
create mode 100644 changelog/58331.fixed
- create mode 100644 tests/pytests/unit/utils/test_data.py
- create mode 100644 tests/pytests/unit/utils/test_xmlutil.py
diff --git a/changelog/56454.fixed b/changelog/56454.fixed
new file mode 100644
@@ -454,41 +448,11 @@ index 0000000000..4b8f78dd53
@@ -0,0 +1 @@
+Leave boot parameters untouched if boot parameter is set to None in virt.update
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index a78c21e323..cd80fbe608 100644
+index fb27397baa..ec40f08359 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
-@@ -1,8 +1,11 @@
--# -*- coding: utf-8 -*-
- """
- Work with virtual machines managed by libvirt
-
--:depends: libvirt Python module
-+:depends:
-+ * libvirt Python module
-+ * libvirt client
-+ * qemu-img
-+ * grep
-
- Connection
- ==========
-@@ -73,7 +76,7 @@ The calls not using the libvirt connection setup are:
- # of his in the virt func module have been used
-
- # Import python libs
--from __future__ import absolute_import, print_function, unicode_literals
-+
- import base64
- import copy
- import datetime
-@@ -89,23 +92,19 @@ from xml.etree import ElementTree
- from xml.sax import saxutils
-
- # Import third party libs
--import jinja2
+@@ -94,17 +94,13 @@ from xml.sax import saxutils
import jinja2.exceptions
-
- # Import salt libs
-+import salt.utils.data
import salt.utils.files
import salt.utils.json
-import salt.utils.network
@@ -505,72 +469,7 @@ index a78c21e323..cd80fbe608 100644
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
from salt.utils.virt import check_remote, download_remote
-@@ -227,8 +226,8 @@ def __get_conn(**kwargs):
- )
- except Exception: # pylint: disable=broad-except
- raise CommandExecutionError(
-- "Sorry, {0} failed to open a connection to the hypervisor "
-- "software at {1}".format(__grains__["fqdn"], conn_str)
-+ "Sorry, {} failed to open a connection to the hypervisor "
-+ "software at {}".format(__grains__["fqdn"], conn_str)
- )
- return conn
-
-@@ -405,7 +404,7 @@ def _get_nics(dom):
- # driver, source, and match can all have optional attributes
- if re.match("(driver|source|address)", v_node.tag):
- temp = {}
-- for key, value in six.iteritems(v_node.attrib):
-+ for key, value in v_node.attrib.items():
- temp[key] = value
- nic[v_node.tag] = temp
- # virtualport needs to be handled separately, to pick up the
-@@ -413,7 +412,7 @@ def _get_nics(dom):
- if v_node.tag == "virtualport":
- temp = {}
- temp["type"] = v_node.get("type")
-- for key, value in six.iteritems(v_node.attrib):
-+ for key, value in v_node.attrib.items():
- temp[key] = value
- nic["virtualport"] = temp
- if "mac" not in nic:
-@@ -435,7 +434,7 @@ def _get_graphics(dom):
- }
- doc = ElementTree.fromstring(dom.XMLDesc(0))
- for g_node in doc.findall("devices/graphics"):
-- for key, value in six.iteritems(g_node.attrib):
-+ for key, value in g_node.attrib.items():
- out[key] = value
- return out
-
-@@ -448,7 +447,7 @@ def _get_loader(dom):
- doc = ElementTree.fromstring(dom.XMLDesc(0))
- for g_node in doc.findall("os/loader"):
- out["path"] = g_node.text
-- for key, value in six.iteritems(g_node.attrib):
-+ for key, value in g_node.attrib.items():
- out[key] = value
- return out
-
-@@ -503,7 +502,7 @@ def _get_disks(conn, dom):
- qemu_target = source.get("protocol")
- source_name = source.get("name")
- if source_name:
-- qemu_target = "{0}:{1}".format(qemu_target, source_name)
-+ qemu_target = "{}:{}".format(qemu_target, source_name)
-
- # Reverse the magic for the rbd and gluster pools
- if source.get("protocol") in ["rbd", "gluster"]:
-@@ -633,7 +632,7 @@ def _get_target(target, ssh):
- proto = "qemu"
- if ssh:
- proto += "+ssh"
-- return " {0}://{1}/{2}".format(proto, target, "system")
-+ return " {}://{}/{}".format(proto, target, "system")
-
-
- def _gen_xml(
-@@ -648,6 +647,7 @@ def _gen_xml(
+@@ -647,6 +643,7 @@ def _gen_xml(
arch,
graphics=None,
boot=None,
@@ -578,18 +477,7 @@ index a78c21e323..cd80fbe608 100644
**kwargs
):
"""
-@@ -657,8 +657,8 @@ def _gen_xml(
- context = {
- "hypervisor": hypervisor,
- "name": name,
-- "cpu": six.text_type(cpu),
-- "mem": six.text_type(mem),
-+ "cpu": str(cpu),
-+ "mem": str(mem),
- }
- if hypervisor in ["qemu", "kvm"]:
- context["controller_model"] = False
-@@ -681,15 +681,17 @@ def _gen_xml(
+@@ -680,15 +677,17 @@ def _gen_xml(
graphics = None
context["graphics"] = graphics
@@ -613,213 +501,7 @@ index a78c21e323..cd80fbe608 100644
if os_type == "xen":
# Compute the Xen PV boot method
if __grains__["os_family"] == "Suse":
-@@ -720,7 +722,7 @@ def _gen_xml(
- "target_dev": _get_disk_target(targets, len(diskp), prefix),
- "disk_bus": disk["model"],
- "format": disk.get("format", "raw"),
-- "index": six.text_type(i),
-+ "index": str(i),
- }
- targets.append(disk_context["target_dev"])
- if disk.get("source_file"):
-@@ -825,8 +827,8 @@ def _gen_vol_xml(
- "name": name,
- "target": {"permissions": permissions, "nocow": nocow},
- "format": format,
-- "size": six.text_type(size),
-- "allocation": six.text_type(int(allocation) * 1024),
-+ "size": str(size),
-+ "allocation": str(int(allocation) * 1024),
- "backingStore": backing_store,
- }
- fn_ = "libvirt_volume.jinja"
-@@ -978,31 +980,29 @@ def _zfs_image_create(
- """
- if not disk_image_name and not disk_size:
- raise CommandExecutionError(
-- "Unable to create new disk {0}, please specify"
-+ "Unable to create new disk {}, please specify"
- " the disk image name or disk size argument".format(disk_name)
- )
-
- if not pool:
- raise CommandExecutionError(
-- "Unable to create new disk {0}, please specify"
-+ "Unable to create new disk {}, please specify"
- " the disk pool name".format(disk_name)
- )
-
-- destination_fs = os.path.join(pool, "{0}.{1}".format(vm_name, disk_name))
-+ destination_fs = os.path.join(pool, "{}.{}".format(vm_name, disk_name))
- log.debug("Image destination will be %s", destination_fs)
-
- existing_disk = __salt__["zfs.list"](name=pool)
- if "error" in existing_disk:
- raise CommandExecutionError(
-- "Unable to create new disk {0}. {1}".format(
-+ "Unable to create new disk {}. {}".format(
- destination_fs, existing_disk["error"]
- )
- )
- elif destination_fs in existing_disk:
- log.info(
-- "ZFS filesystem {0} already exists. Skipping creation".format(
-- destination_fs
-- )
-+ "ZFS filesystem {} already exists. Skipping creation".format(destination_fs)
- )
- blockdevice_path = os.path.join("/dev/zvol", pool, vm_name)
- return blockdevice_path
-@@ -1025,7 +1025,7 @@ def _zfs_image_create(
- )
-
- blockdevice_path = os.path.join(
-- "/dev/zvol", pool, "{0}.{1}".format(vm_name, disk_name)
-+ "/dev/zvol", pool, "{}.{}".format(vm_name, disk_name)
- )
- log.debug("Image path will be %s", blockdevice_path)
- return blockdevice_path
-@@ -1042,7 +1042,7 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"):
-
- if not disk_size and not disk_image:
- raise CommandExecutionError(
-- "Unable to create new disk {0}, please specify"
-+ "Unable to create new disk {}, please specify"
- " disk size and/or disk image argument".format(disk["filename"])
- )
-
-@@ -1066,7 +1066,7 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"):
- if create_overlay and qcow2:
- log.info("Cloning qcow2 image %s using copy on write", sfn)
- __salt__["cmd.run"](
-- 'qemu-img create -f qcow2 -o backing_file="{0}" "{1}"'.format(
-+ 'qemu-img create -f qcow2 -o backing_file="{}" "{}"'.format(
- sfn, img_dest
- ).split()
- )
-@@ -1079,16 +1079,16 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"):
- if disk_size and qcow2:
- log.debug("Resize qcow2 image to %sM", disk_size)
- __salt__["cmd.run"](
-- 'qemu-img resize "{0}" {1}M'.format(img_dest, disk_size)
-+ 'qemu-img resize "{}" {}M'.format(img_dest, disk_size)
- )
-
- log.debug("Apply umask and remove exec bit")
- mode = (0o0777 ^ mask) & 0o0666
- os.chmod(img_dest, mode)
-
-- except (IOError, OSError) as err:
-+ except OSError as err:
- raise CommandExecutionError(
-- "Problem while copying image. {0} - {1}".format(disk_image, err)
-+ "Problem while copying image. {} - {}".format(disk_image, err)
- )
-
- else:
-@@ -1099,13 +1099,13 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"):
- if disk_size:
- log.debug("Create empty image with size %sM", disk_size)
- __salt__["cmd.run"](
-- 'qemu-img create -f {0} "{1}" {2}M'.format(
-+ 'qemu-img create -f {} "{}" {}M'.format(
- disk.get("format", "qcow2"), img_dest, disk_size
- )
- )
- else:
- raise CommandExecutionError(
-- "Unable to create new disk {0},"
-+ "Unable to create new disk {},"
- " please specify argument".format(img_dest)
- )
-
-@@ -1113,9 +1113,9 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"):
- mode = (0o0777 ^ mask) & 0o0666
- os.chmod(img_dest, mode)
-
-- except (IOError, OSError) as err:
-+ except OSError as err:
- raise CommandExecutionError(
-- "Problem while creating volume {0} - {1}".format(img_dest, err)
-+ "Problem while creating volume {} - {}".format(img_dest, err)
- )
-
- return img_dest
-@@ -1252,7 +1252,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name):
- __salt__["config.get"]("virt:disk", {}).get(profile, default)
- )
-
-- # Transform the list to remove one level of dictionnary and add the name as a property
-+ # Transform the list to remove one level of dictionary and add the name as a property
- disklist = [dict(d, name=name) for disk in disklist for name, d in disk.items()]
-
- # Merge with the user-provided disks definitions
-@@ -1274,7 +1274,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name):
- disk["model"] = "ide"
-
- # Add the missing properties that have defaults
-- for key, val in six.iteritems(overlay):
-+ for key, val in overlay.items():
- if key not in disk:
- disk[key] = val
-
-@@ -1296,7 +1296,7 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps):
- Compute the disk file name and update it in the disk value.
- """
- # Compute the filename without extension since it may not make sense for some pool types
-- disk["filename"] = "{0}_{1}".format(vm_name, disk["name"])
-+ disk["filename"] = "{}_{}".format(vm_name, disk["name"])
-
- # Compute the source file path
- base_dir = disk.get("pool", None)
-@@ -1311,7 +1311,7 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps):
- # For path-based disks, keep the qcow2 default format
- if not disk.get("format"):
- disk["format"] = "qcow2"
-- disk["filename"] = "{0}.{1}".format(disk["filename"], disk["format"])
-+ disk["filename"] = "{}.{}".format(disk["filename"], disk["format"])
- disk["source_file"] = os.path.join(base_dir, disk["filename"])
- else:
- if "pool" not in disk:
-@@ -1365,7 +1365,7 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps):
- disk["format"] = volume_options.get("default_format", None)
-
- elif hypervisor == "bhyve" and vm_name:
-- disk["filename"] = "{0}.{1}".format(vm_name, disk["name"])
-+ disk["filename"] = "{}.{}".format(vm_name, disk["name"])
- disk["source_file"] = os.path.join(
- "/dev/zvol", base_dir or "", disk["filename"]
- )
-@@ -1373,8 +1373,8 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps):
- elif hypervisor in ["esxi", "vmware"]:
- if not base_dir:
- base_dir = __salt__["config.get"]("virt:storagepool", "[0] ")
-- disk["filename"] = "{0}.{1}".format(disk["filename"], disk["format"])
-- disk["source_file"] = "{0}{1}".format(base_dir, disk["filename"])
-+ disk["filename"] = "{}.{}".format(disk["filename"], disk["format"])
-+ disk["source_file"] = "{}{}".format(base_dir, disk["filename"])
-
-
- def _complete_nics(interfaces, hypervisor):
-@@ -1422,7 +1422,7 @@ def _complete_nics(interfaces, hypervisor):
- """
- Apply the default overlay to attributes
- """
-- for key, value in six.iteritems(overlays[hypervisor]):
-+ for key, value in overlays[hypervisor].items():
- if key not in attributes or not attributes[key]:
- attributes[key] = value
-
-@@ -1449,7 +1449,7 @@ def _nic_profile(profile_name, hypervisor):
- """
- Append dictionary profile data to interfaces list
- """
-- for interface_name, attributes in six.iteritems(profile_dict):
-+ for interface_name, attributes in profile_dict.items():
- attributes["name"] = interface_name
- interfaces.append(attributes)
-
-@@ -1522,17 +1522,24 @@ def _handle_remote_boot_params(orig_boot):
+@@ -1519,17 +1518,24 @@ def _handle_remote_boot_params(orig_boot):
new_boot = orig_boot.copy()
keys = orig_boot.keys()
cases = [
@@ -847,7 +529,7 @@ index a78c21e323..cd80fbe608 100644
if saltinst_dir is None:
os.makedirs(CACHE_DIR)
saltinst_dir = CACHE_DIR
-@@ -1540,12 +1547,41 @@ def _handle_remote_boot_params(orig_boot):
+@@ -1537,12 +1543,41 @@ def _handle_remote_boot_params(orig_boot):
return new_boot
else:
raise SaltInvocationError(
@@ -890,7 +572,7 @@ index a78c21e323..cd80fbe608 100644
def init(
name,
cpu,
-@@ -1566,6 +1602,7 @@ def init(
+@@ -1563,6 +1598,7 @@ def init(
os_type=None,
arch=None,
boot=None,
@@ -898,7 +580,7 @@ index a78c21e323..cd80fbe608 100644
**kwargs
):
"""
-@@ -1635,7 +1672,8 @@ def init(
+@@ -1632,7 +1668,8 @@ def init(
This is an optional parameter, all of the keys are optional within the dictionary. The structure of
the dictionary is documented in :ref:`init-boot-def`. If a remote path is provided to kernel or initrd,
salt will handle the downloading of the specified remote file and modify the XML accordingly.
@@ -908,7 +590,7 @@ index a78c21e323..cd80fbe608 100644
.. versionadded:: 3000
-@@ -1649,6 +1687,12 @@ def init(
+@@ -1646,6 +1683,12 @@ def init(
'nvram': '/usr/share/OVMF/OVMF_VARS.ms.fd'
}
@@ -921,7 +603,7 @@ index a78c21e323..cd80fbe608 100644
.. _init-boot-def:
.. rubric:: Boot parameters definition
-@@ -1674,6 +1718,11 @@ def init(
+@@ -1671,6 +1714,11 @@ def init(
.. versionadded:: sodium
@@ -933,16 +615,7 @@ index a78c21e323..cd80fbe608 100644
.. _init-nic-def:
.. rubric:: Network Interfaces Definitions
-@@ -1797,7 +1846,7 @@ def init(
-
- .. rubric:: Graphics Definition
-
-- The graphics dictionnary can have the following properties:
-+ The graphics dictionary can have the following properties:
-
- type
- Graphics type. The possible values are ``none``, ``'spice'``, ``'vnc'`` and other values
-@@ -1858,6 +1907,8 @@ def init(
+@@ -1855,6 +1903,8 @@ def init(
for x in y
}
)
@@ -951,27 +624,7 @@ index a78c21e323..cd80fbe608 100644
virt_hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0]
# esxi used to be a possible value for the hypervisor: map it to vmware since it's the same
-@@ -1890,8 +1941,8 @@ def init(
- else:
- # assume libvirt manages disks for us
- log.debug("Generating libvirt XML for %s", _disk)
-- volume_name = "{0}/{1}".format(name, _disk["name"])
-- filename = "{0}.{1}".format(volume_name, _disk["format"])
-+ volume_name = "{}/{}".format(name, _disk["name"])
-+ filename = "{}.{}".format(volume_name, _disk["format"])
- vol_xml = _gen_vol_xml(
- filename, _disk["size"], format=_disk["format"]
- )
-@@ -1939,7 +1990,7 @@ def init(
- else:
- # Unknown hypervisor
- raise SaltInvocationError(
-- "Unsupported hypervisor when handling disk image: {0}".format(
-+ "Unsupported hypervisor when handling disk image: {}".format(
- virt_hypervisor
- )
- )
-@@ -1965,8 +2016,10 @@ def init(
+@@ -1962,8 +2012,10 @@ def init(
arch,
graphics,
boot,
@@ -982,7 +635,7 @@ index a78c21e323..cd80fbe608 100644
conn.defineXML(vm_xml)
except libvirt.libvirtError as err:
conn.close()
-@@ -2192,6 +2245,7 @@ def update(
+@@ -2189,6 +2241,7 @@ def update(
live=True,
boot=None,
test=False,
@@ -990,7 +643,7 @@ index a78c21e323..cd80fbe608 100644
**kwargs
):
"""
-@@ -2234,11 +2288,28 @@ def update(
+@@ -2248,11 +2301,28 @@ def update(
Refer to :ref:`init-boot-def` for the complete boot parameter description.
@@ -1021,23 +674,8 @@ index a78c21e323..cd80fbe608 100644
:param test: run in dry-run mode if set to True
.. versionadded:: sodium
-@@ -2286,6 +2357,8 @@ def update(
-
- if boot is not None:
- boot = _handle_remote_boot_params(boot)
-+ if boot.get("efi", None) is not None:
-+ need_update = _handle_efi_param(boot, desc)
-
- new_desc = ElementTree.fromstring(
- _gen_xml(
-@@ -2307,76 +2380,58 @@ def update(
- # Update the cpu
- cpu_node = desc.find("vcpu")
- if cpu and int(cpu_node.text) != cpu:
-- cpu_node.text = six.text_type(cpu)
-- cpu_node.set("current", six.text_type(cpu))
-+ cpu_node.text = str(cpu)
-+ cpu_node.set("current", str(cpu))
+@@ -2327,67 +2397,54 @@ def update(
+ cpu_node.set("current", str(cpu))
need_update = True
- # Update the kernel boot parameters
@@ -1062,31 +700,25 @@ index a78c21e323..cd80fbe608 100644
- parent_tag.remove(found_tag)
- else:
- found_tag.text = boot_tag_value
+-
+- # If the existing tag is loader or nvram, we need to update the corresponding attribute
+- if found_tag.tag == "loader" and boot_tag_value is not None:
+- found_tag.set("readonly", "yes")
+- found_tag.set("type", "pflash")
+-
+- if found_tag.tag == "nvram" and boot_tag_value is not None:
+- found_tag.set("template", found_tag.text)
+- found_tag.text = None
+ def _set_loader(node, value):
+ salt.utils.xmlutil.set_node_text(node, value)
+ if value is not None:
+ node.set("readonly", "yes")
+ node.set("type", "pflash")
-- # If the existing tag is loader or nvram, we need to update the corresponding attribute
-- if found_tag.tag == "loader" and boot_tag_value is not None:
-- found_tag.set("readonly", "yes")
-- found_tag.set("type", "pflash")
+- need_update = True
+ def _set_nvram(node, value):
+ node.set("template", value)
-- if found_tag.tag == "nvram" and boot_tag_value is not None:
-- found_tag.set("template", found_tag.text)
-- found_tag.text = None
-+ def _set_with_mib_unit(node, value):
-+ node.text = str(value)
-+ node.set("unit", "MiB")
-
-- need_update = True
--
-- # Existing tag is not found, but value is not None
-- elif found_tag is None and boot_tag_value is not None:
--
- # Need to check for parent tag, and add it if it does not exist.
- # Add a subelement and set the value to the new value, and then
- # mark for update.
@@ -1102,12 +734,13 @@ index a78c21e323..cd80fbe608 100644
- if child_tag.tag == "loader":
- child_tag.set("readonly", "yes")
- child_tag.set("type", "pflash")
--
++ def _set_with_mib_unit(node, value):
++ node.text = str(value)
++ node.set("unit", "MiB")
+
- if child_tag.tag == "nvram":
- child_tag.set("template", child_tag.text)
- child_tag.text = None
--
-- need_update = True
+ # Update the kernel boot parameters
+ params_mapping = [
+ {"path": "boot:kernel", "xpath": "os/kernel"},
@@ -1141,7 +774,7 @@ index a78c21e323..cd80fbe608 100644
- for mem_node_name in ["memory", "currentMemory"]:
- mem_node = desc.find(mem_node_name)
- if mem and int(mem_node.text) != mem * 1024:
-- mem_node.text = six.text_type(mem)
+- mem_node.text = str(mem)
- mem_node.set("unit", "MiB")
- need_update = True
+ data = {k: v for k, v in locals().items() if bool(v)}
@@ -1153,18 +786,7 @@ index a78c21e323..cd80fbe608 100644
# Update the XML definition with the new disks and diff changes
devices_node = desc.find("devices")
-@@ -2395,8 +2450,8 @@ def update(
- if func_locals.get(param, None) is not None
- ]:
- old = devices_node.findall(dev_type)
-- new = new_desc.findall("devices/{0}".format(dev_type))
-- changes[dev_type] = globals()["_diff_{0}_lists".format(dev_type)](old, new)
-+ new = new_desc.findall("devices/{}".format(dev_type))
-+ changes[dev_type] = globals()["_diff_{}_lists".format(dev_type)](old, new)
- if changes[dev_type]["deleted"] or changes[dev_type]["new"]:
- for item in old:
- devices_node.remove(item)
-@@ -2423,9 +2478,9 @@ def update(
+@@ -2434,9 +2491,9 @@ def update(
_disk_volume_create(conn, all_disks[idx])
if not test:
@@ -1177,25 +799,7 @@ index a78c21e323..cd80fbe608 100644
status["definition"] = True
except libvirt.libvirtError as err:
conn.close()
-@@ -2554,7 +2609,7 @@ def update(
- except libvirt.libvirtError as err:
- if "errors" not in status:
- status["errors"] = []
-- status["errors"].append(six.text_type(err))
-+ status["errors"].append(str(err))
-
- conn.close()
- return status
-@@ -2768,7 +2823,7 @@ def _node_info(conn):
- info = {
- "cpucores": raw[6],
- "cpumhz": raw[3],
-- "cpumodel": six.text_type(raw[0]),
-+ "cpumodel": str(raw[0]),
- "cpus": raw[2],
- "cputhreads": raw[7],
- "numanodes": raw[4],
-@@ -3207,24 +3262,21 @@ def get_profiles(hypervisor=None, **kwargs):
+@@ -3218,24 +3275,19 @@ def get_profiles(hypervisor=None, **kwargs):
for x in y
}
)
@@ -1208,14 +812,13 @@ index a78c21e323..cd80fbe608 100644
+ hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0]
virtconf = __salt__["config.get"]("virt", {})
for typ in ["disk", "nic"]:
-- _func = getattr(sys.modules[__name__], "_{0}_profile".format(typ))
-+ _func = getattr(sys.modules[__name__], "_{}_profile".format(typ))
- ret[typ] = {
+ _func = getattr(sys.modules[__name__], "_{}_profile".format(typ))
+- ret[typ] = {
- "default": _func(
- "default", hypervisor if hypervisor else default_hypervisor
- )
-+ "default": _func("default", hypervisor)
- }
+- }
++ ret[typ] = {"default": _func("default", hypervisor)}
if typ in virtconf:
ret.setdefault(typ, {})
for prf in virtconf[typ]:
@@ -1226,60 +829,7 @@ index a78c21e323..cd80fbe608 100644
return ret
-@@ -3506,7 +3558,7 @@ def create_xml_path(path, **kwargs):
- return create_xml_str(
- salt.utils.stringutils.to_unicode(fp_.read()), **kwargs
- )
-- except (OSError, IOError):
-+ except OSError:
- return False
-
-
-@@ -3564,7 +3616,7 @@ def define_xml_path(path, **kwargs):
- return define_xml_str(
- salt.utils.stringutils.to_unicode(fp_.read()), **kwargs
- )
-- except (OSError, IOError):
-+ except OSError:
- return False
-
-
-@@ -3576,7 +3628,7 @@ def _define_vol_xml_str(conn, xml, pool=None): # pylint: disable=redefined-oute
- poolname = (
- pool if pool else __salt__["config.get"]("virt:storagepool", default_pool)
- )
-- pool = conn.storagePoolLookupByName(six.text_type(poolname))
-+ pool = conn.storagePoolLookupByName(str(poolname))
- ret = pool.createXML(xml, 0) is not None
- return ret
-
-@@ -3660,7 +3712,7 @@ def define_vol_xml_path(path, pool=None, **kwargs):
- return define_vol_xml_str(
- salt.utils.stringutils.to_unicode(fp_.read()), pool=pool, **kwargs
- )
-- except (OSError, IOError):
-+ except OSError:
- return False
-
-
-@@ -3777,7 +3829,7 @@ def seed_non_shared_migrate(disks, force=False):
-
- salt '*' virt.seed_non_shared_migrate
- """
-- for _, data in six.iteritems(disks):
-+ for _, data in disks.items():
- fn_ = data["file"]
- form = data["file format"]
- size = data["virtual size"].split()[1][1:]
-@@ -3921,14 +3973,14 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
- # TODO create solution for 'dataset is busy'
- time.sleep(3)
- fs_name = disks[disk]["file"][len("/dev/zvol/") :]
-- log.info("Destroying VM ZFS volume {0}".format(fs_name))
-+ log.info("Destroying VM ZFS volume {}".format(fs_name))
- __salt__["zfs.destroy"](name=fs_name, force=True)
- elif os.path.exists(disks[disk]["file"]):
- os.remove(disks[disk]["file"])
+@@ -4043,7 +4095,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
directories.add(os.path.dirname(disks[disk]["file"]))
else:
# We may have a volume to delete here
@@ -1288,74 +838,7 @@ index a78c21e323..cd80fbe608 100644
if matcher:
pool_name = matcher.group("pool")
pool = None
-@@ -3975,7 +4027,7 @@ def _is_kvm_hyper():
- with salt.utils.files.fopen("/proc/modules") as fp_:
- if "kvm_" not in salt.utils.stringutils.to_unicode(fp_.read()):
- return False
-- except IOError:
-+ except OSError:
- # No /proc/modules? Are we on Windows? Or Solaris?
- return False
- return "libvirtd" in __salt__["cmd.run"](__grains__["ps"])
-@@ -3995,7 +4047,7 @@ def _is_xen_hyper():
- with salt.utils.files.fopen("/proc/modules") as fp_:
- if "xen_" not in salt.utils.stringutils.to_unicode(fp_.read()):
- return False
-- except (OSError, IOError):
-+ except OSError:
- # No /proc/modules? Are we on Windows? Or Solaris?
- return False
- return "libvirtd" in __salt__["cmd.run"](__grains__["ps"])
-@@ -4110,7 +4162,7 @@ def vm_cputime(vm_=None, **kwargs):
- cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus
- return {
- "cputime": int(raw[4]),
-- "cputime_percent": int("{0:.0f}".format(cputime_percent)),
-+ "cputime_percent": int("{:.0f}".format(cputime_percent)),
- }
-
- info = {}
-@@ -4180,7 +4232,7 @@ def vm_netstats(vm_=None, **kwargs):
- "tx_errs": 0,
- "tx_drop": 0,
- }
-- for attrs in six.itervalues(nics):
-+ for attrs in nics.values():
- if "target" in attrs:
- dev = attrs["target"]
- stats = dom.interfaceStats(dev)
-@@ -4508,7 +4560,7 @@ def revert_snapshot(name, vm_snapshot=None, cleanup=False, **kwargs):
- conn.close()
- raise CommandExecutionError(
- snapshot
-- and 'Snapshot "{0}" not found'.format(vm_snapshot)
-+ and 'Snapshot "{}" not found'.format(vm_snapshot)
- or "No more previous snapshots available"
- )
- elif snap.isCurrent():
-@@ -5102,10 +5154,10 @@ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs):
- ]
-
- if not cpu_specs:
-- raise ValueError("Model {0} not found in CPU map".format(cpu_model))
-+ raise ValueError("Model {} not found in CPU map".format(cpu_model))
- elif len(cpu_specs) > 1:
- raise ValueError(
-- "Multiple models {0} found in CPU map".format(cpu_model)
-+ "Multiple models {} found in CPU map".format(cpu_model)
- )
-
- cpu_specs = cpu_specs[0]
-@@ -5126,7 +5178,7 @@ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs):
- "vendor": cpu.find("vendor").text,
- "features": [feature.get("name") for feature in cpu.findall("feature")],
- }
-- return cpu.toxml()
-+ return ElementTree.tostring(cpu)
-
-
- def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **kwargs):
-@@ -5250,7 +5302,7 @@ def list_networks(**kwargs):
+@@ -5431,7 +5483,7 @@ def list_networks(**kwargs):
def network_info(name=None, **kwargs):
"""
@@ -1364,40 +847,7 @@ index a78c21e323..cd80fbe608 100644
:param name: virtual network name
:param connection: libvirt connection URI, overriding defaults
-@@ -5446,20 +5498,20 @@ def _parse_pools_caps(doc):
- for option_kind in ["pool", "vol"]:
- options = {}
- default_format_node = pool.find(
-- "{0}Options/defaultFormat".format(option_kind)
-+ "{}Options/defaultFormat".format(option_kind)
- )
- if default_format_node is not None:
- options["default_format"] = default_format_node.get("type")
- options_enums = {
- enum.get("name"): [value.text for value in enum.findall("value")]
-- for enum in pool.findall("{0}Options/enum".format(option_kind))
-+ for enum in pool.findall("{}Options/enum".format(option_kind))
- }
- if options_enums:
- options.update(options_enums)
- if options:
- if "options" not in pool_caps:
- pool_caps["options"] = {}
-- kind = option_kind if option_kind is not "vol" else "volume"
-+ kind = option_kind if option_kind != "vol" else "volume"
- pool_caps["options"][kind] = options
- return pool_caps
-
-@@ -5695,7 +5747,7 @@ def pool_define(
- keys. The path is the qualified name for iSCSI devices.
-
- Report to `this libvirt page `_
-- for more informations on the use of ``part_separator``
-+ for more information on the use of ``part_separator``
- :param source_dir:
- Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``.
- (Default: ``None``)
-@@ -5847,15 +5899,19 @@ def _pool_set_secret(
+@@ -6028,15 +6080,19 @@ def _pool_set_secret(
if secret_type:
# Get the previously defined secret if any
secret = None
@@ -1426,16 +876,7 @@ index a78c21e323..cd80fbe608 100644
# Create secret if needed
if not secret:
-@@ -5918,7 +5974,7 @@ def pool_update(
- keys. The path is the qualified name for iSCSI devices.
-
- Report to `this libvirt page `_
-- for more informations on the use of ``part_separator``
-+ for more information on the use of ``part_separator``
- :param source_dir:
- Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``.
- (Default: ``None``)
-@@ -6107,7 +6163,7 @@ def list_pools(**kwargs):
+@@ -6288,7 +6344,7 @@ def list_pools(**kwargs):
def pool_info(name=None, **kwargs):
"""
@@ -1444,30 +885,7 @@ index a78c21e323..cd80fbe608 100644
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
-@@ -6283,6 +6339,22 @@ def pool_undefine(name, **kwargs):
- conn = __get_conn(**kwargs)
- try:
- pool = conn.storagePoolLookupByName(name)
-+ desc = ElementTree.fromstring(pool.XMLDesc())
-+
-+ # Is there a secret that we generated and would need to be removed?
-+ # Don't remove the other secrets
-+ auth_node = desc.find("source/auth")
-+ if auth_node is not None:
-+ auth_types = {
-+ "ceph": libvirt.VIR_SECRET_USAGE_TYPE_CEPH,
-+ "iscsi": libvirt.VIR_SECRET_USAGE_TYPE_ISCSI,
-+ }
-+ secret_type = auth_types[auth_node.get("type")]
-+ secret_usage = auth_node.find("secret").get("usage")
-+ if secret_type and "pool_{}".format(name) == secret_usage:
-+ secret = conn.secretLookupByUsage(secret_type, secret_usage)
-+ secret.undefine()
-+
- return not bool(pool.undefine())
- finally:
- conn.close()
-@@ -6308,22 +6380,6 @@ def pool_delete(name, **kwargs):
+@@ -6505,22 +6561,6 @@ def pool_delete(name, **kwargs):
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
@@ -1490,55 +908,11 @@ index a78c21e323..cd80fbe608 100644
return not bool(pool.delete(libvirt.VIR_STORAGE_POOL_DELETE_NORMAL))
finally:
conn.close()
-@@ -6768,7 +6824,7 @@ def _volume_upload(conn, pool, volume, file, offset=0, length=0, sparse=False):
- stream.abort()
- if ret:
- raise CommandExecutionError(
-- "Failed to close file: {0}".format(err.strerror)
-+ "Failed to close file: {}".format(err.strerror)
- )
- if stream:
- try:
-@@ -6776,7 +6832,7 @@ def _volume_upload(conn, pool, volume, file, offset=0, length=0, sparse=False):
- except libvirt.libvirtError as err:
- if ret:
- raise CommandExecutionError(
-- "Failed to finish stream: {0}".format(err.get_error_message())
-+ "Failed to finish stream: {}".format(err.get_error_message())
- )
- return ret
-
diff --git a/salt/states/virt.py b/salt/states/virt.py
-index fdef002293..3d99fd53c8 100644
+index cb15d57d8f..b45cf72ed3 100644
--- a/salt/states/virt.py
+++ b/salt/states/virt.py
-@@ -1,4 +1,3 @@
--# -*- coding: utf-8 -*-
- """
- Manage virt
- ===========
-@@ -13,9 +12,9 @@ for the generation and signing of certificates for systems running libvirt:
- """
-
- # Import Python libs
--from __future__ import absolute_import, print_function, unicode_literals
-
- import fnmatch
-+import logging
- import os
-
- # Import Salt libs
-@@ -25,9 +24,6 @@ import salt.utils.stringutils
- import salt.utils.versions
- from salt.exceptions import CommandExecutionError, SaltInvocationError
-
--# Import 3rd-party libs
--from salt.ext import six
--
- try:
- import libvirt # pylint: disable=import-error
-
-@@ -38,6 +34,8 @@ except ImportError:
+@@ -33,6 +33,8 @@ except ImportError:
__virtualname__ = "virt"
@@ -1547,56 +921,7 @@ index fdef002293..3d99fd53c8 100644
def __virtual__():
"""
-@@ -99,8 +97,8 @@ def keys(name, basepath="/etc/pki", **kwargs):
- # rename them to something hopefully unique to avoid
- # overriding anything existing
- pillar_kwargs = {}
-- for key, value in six.iteritems(kwargs):
-- pillar_kwargs["ext_pillar_virt.{0}".format(key)] = value
-+ for key, value in kwargs.items():
-+ pillar_kwargs["ext_pillar_virt.{}".format(key)] = value
-
- pillar = __salt__["pillar.ext"]({"libvirt": "_"}, pillar_kwargs)
- paths = {
-@@ -112,7 +110,7 @@ def keys(name, basepath="/etc/pki", **kwargs):
- }
-
- for key in paths:
-- p_key = "libvirt.{0}.pem".format(key)
-+ p_key = "libvirt.{}.pem".format(key)
- if p_key not in pillar:
- continue
- if not os.path.exists(os.path.dirname(paths[key])):
-@@ -134,7 +132,7 @@ def keys(name, basepath="/etc/pki", **kwargs):
- for key in ret["changes"]:
- with salt.utils.files.fopen(paths[key], "w+") as fp_:
- fp_.write(
-- salt.utils.stringutils.to_str(pillar["libvirt.{0}.pem".format(key)])
-+ salt.utils.stringutils.to_str(pillar["libvirt.{}.pem".format(key)])
- )
-
- ret["comment"] = "Updated libvirt certs and keys"
-@@ -176,7 +174,7 @@ def _virt_call(
- domain_state = __salt__["virt.vm_state"](targeted_domain)
- action_needed = domain_state.get(targeted_domain) != state
- if action_needed:
-- response = __salt__["virt.{0}".format(function)](
-+ response = __salt__["virt.{}".format(function)](
- targeted_domain,
- connection=connection,
- username=username,
-@@ -189,9 +187,7 @@ def _virt_call(
- else:
- noaction_domains.append(targeted_domain)
- except libvirt.libvirtError as err:
-- ignored_domains.append(
-- {"domain": targeted_domain, "issue": six.text_type(err)}
-- )
-+ ignored_domains.append({"domain": targeted_domain, "issue": str(err)})
- if not changed_domains:
- ret["result"] = not ignored_domains and bool(targeted_domains)
- ret["comment"] = "No changes had happened"
-@@ -292,6 +288,7 @@ def defined(
+@@ -285,6 +287,7 @@ def defined(
arch=None,
boot=None,
update=True,
@@ -1604,7 +929,7 @@ index fdef002293..3d99fd53c8 100644
):
"""
Starts an existing guest, or defines and starts a new VM with specified arguments.
-@@ -352,6 +349,14 @@ def defined(
+@@ -345,6 +348,14 @@ def defined(
.. deprecated:: sodium
@@ -1619,7 +944,7 @@ index fdef002293..3d99fd53c8 100644
.. rubric:: Example States
Make sure a virtual machine called ``domain_name`` is defined:
-@@ -362,6 +367,7 @@ def defined(
+@@ -355,6 +366,7 @@ def defined(
virt.defined:
- cpu: 2
- mem: 2048
@@ -1627,7 +952,7 @@ index fdef002293..3d99fd53c8 100644
- disk_profile: prod
- disks:
- name: system
-@@ -414,17 +420,18 @@ def defined(
+@@ -407,6 +419,7 @@ def defined(
password=password,
boot=boot,
test=__opts__["test"],
@@ -1635,37 +960,15 @@ index fdef002293..3d99fd53c8 100644
)
ret["changes"][name] = status
if not status.get("definition"):
-- ret["comment"] = "Domain {0} unchanged".format(name)
-+ ret["comment"] = "Domain {} unchanged".format(name)
- ret["result"] = True
- elif status.get("errors"):
- ret[
- "comment"
-- ] = "Domain {0} updated with live update(s) failures".format(name)
-+ ] = "Domain {} updated with live update(s) failures".format(name)
- else:
-- ret["comment"] = "Domain {0} updated".format(name)
-+ ret["comment"] = "Domain {} updated".format(name)
- else:
- if not __opts__["test"]:
- __salt__["virt.init"](
-@@ -448,12 +455,13 @@ def defined(
+@@ -441,6 +454,7 @@ def defined(
password=password,
boot=boot,
start=False,
+ boot_dev=boot_dev,
)
ret["changes"][name] = {"definition": True}
-- ret["comment"] = "Domain {0} defined".format(name)
-+ ret["comment"] = "Domain {} defined".format(name)
- except libvirt.libvirtError as err:
- # Something bad happened when defining / updating the VM, report it
-- ret["comment"] = six.text_type(err)
-+ ret["comment"] = str(err)
- ret["result"] = False
-
- return ret
-@@ -480,6 +488,7 @@ def running(
+ ret["comment"] = "Domain {} defined".format(name)
+@@ -473,6 +487,7 @@ def running(
os_type=None,
arch=None,
boot=None,
@@ -1673,7 +976,7 @@ index fdef002293..3d99fd53c8 100644
):
"""
Starts an existing guest, or defines and starts a new VM with specified arguments.
-@@ -591,6 +600,14 @@ def running(
+@@ -584,6 +599,14 @@ def running(
.. versionadded:: 3000
@@ -1688,15 +991,7 @@ index fdef002293..3d99fd53c8 100644
.. rubric:: Example States
Make sure an already-defined virtual machine called ``domain_name`` is running:
-@@ -609,6 +626,7 @@ def running(
- - cpu: 2
- - mem: 2048
- - disk_profile: prod
-+ - boot_dev: network hd
- - disks:
- - name: system
- size: 8192
-@@ -657,6 +675,7 @@ def running(
+@@ -651,6 +674,7 @@ def running(
arch=arch,
boot=boot,
update=update,
@@ -1704,91 +999,7 @@ index fdef002293..3d99fd53c8 100644
connection=connection,
username=username,
password=password,
-@@ -681,11 +700,11 @@ def running(
- ret["comment"] = comment
- ret["changes"][name]["started"] = True
- elif not changed:
-- ret["comment"] = "Domain {0} exists and is running".format(name)
-+ ret["comment"] = "Domain {} exists and is running".format(name)
-
- except libvirt.libvirtError as err:
- # Something bad happened when starting / updating the VM, report it
-- ret["comment"] = six.text_type(err)
-+ ret["comment"] = str(err)
- ret["result"] = False
-
- return ret
-@@ -830,7 +849,7 @@ def reverted(
- try:
- domains = fnmatch.filter(__salt__["virt.list_domains"](), name)
- if not domains:
-- ret["comment"] = 'No domains found for criteria "{0}"'.format(name)
-+ ret["comment"] = 'No domains found for criteria "{}"'.format(name)
- else:
- ignored_domains = list()
- if len(domains) > 1:
-@@ -848,9 +867,7 @@ def reverted(
- }
- except CommandExecutionError as err:
- if len(domains) > 1:
-- ignored_domains.append(
-- {"domain": domain, "issue": six.text_type(err)}
-- )
-+ ignored_domains.append({"domain": domain, "issue": str(err)})
- if len(domains) > 1:
- if result:
- ret["changes"]["reverted"].append(result)
-@@ -860,7 +877,7 @@ def reverted(
-
- ret["result"] = len(domains) != len(ignored_domains)
- if ret["result"]:
-- ret["comment"] = "Domain{0} has been reverted".format(
-+ ret["comment"] = "Domain{} has been reverted".format(
- len(domains) > 1 and "s" or ""
- )
- if ignored_domains:
-@@ -868,9 +885,9 @@ def reverted(
- if not ret["changes"]["reverted"]:
- ret["changes"].pop("reverted")
- except libvirt.libvirtError as err:
-- ret["comment"] = six.text_type(err)
-+ ret["comment"] = str(err)
- except CommandExecutionError as err:
-- ret["comment"] = six.text_type(err)
-+ ret["comment"] = str(err)
-
- return ret
-
-@@ -955,7 +972,7 @@ def network_defined(
- name, connection=connection, username=username, password=password
- )
- if info and info[name]:
-- ret["comment"] = "Network {0} exists".format(name)
-+ ret["comment"] = "Network {} exists".format(name)
- ret["result"] = True
- else:
- if not __opts__["test"]:
-@@ -974,7 +991,7 @@ def network_defined(
- password=password,
- )
- ret["changes"][name] = "Network defined"
-- ret["comment"] = "Network {0} defined".format(name)
-+ ret["comment"] = "Network {} defined".format(name)
- except libvirt.libvirtError as err:
- ret["result"] = False
- ret["comment"] = err.get_error_message()
-@@ -1108,6 +1125,10 @@ def network_running(
- return ret
-
-
-+# Some of the libvirt storage drivers do not support the build action
-+BUILDABLE_POOL_TYPES = {"disk", "fs", "netfs", "dir", "logical", "vstorage", "zfs"}
-+
-+
- def pool_defined(
- name,
- ptype=None,
-@@ -1222,25 +1243,35 @@ def pool_defined(
+@@ -1218,14 +1242,24 @@ def pool_defined(
action = ""
if info[name]["state"] != "running":
@@ -1821,21 +1032,7 @@ index fdef002293..3d99fd53c8 100644
action = (
"{}, autostart flag changed".format(action)
- if needs_autostart
- else action
- )
-- ret["changes"][name] = "Pool updated{0}".format(action)
-- ret["comment"] = "Pool {0} updated{1}".format(name, action)
-+ ret["changes"][name] = "Pool updated{}".format(action)
-+ ret["comment"] = "Pool {} updated{}".format(name, action)
-
- else:
-- ret["comment"] = "Pool {0} unchanged".format(name)
-+ ret["comment"] = "Pool {} unchanged".format(name)
- ret["result"] = True
- else:
- needs_autostart = autostart
-@@ -1265,15 +1296,28 @@ def pool_defined(
+@@ -1261,9 +1295,22 @@ def pool_defined(
password=password,
)
@@ -1860,16 +1057,8 @@ index fdef002293..3d99fd53c8 100644
+ )
if needs_autostart:
ret["changes"][name] = "Pool defined, marked for autostart"
-- ret["comment"] = "Pool {0} defined, marked for autostart".format(name)
-+ ret["comment"] = "Pool {} defined, marked for autostart".format(name)
- else:
- ret["changes"][name] = "Pool defined"
-- ret["comment"] = "Pool {0} defined".format(name)
-+ ret["comment"] = "Pool {} defined".format(name)
-
- if needs_autostart:
- if not __opts__["test"]:
-@@ -1374,7 +1418,7 @@ def pool_running(
+ ret["comment"] = "Pool {} defined, marked for autostart".format(name)
+@@ -1370,7 +1417,7 @@ def pool_running(
is_running = info.get(name, {}).get("state", "stopped") == "running"
if is_running:
if updated:
@@ -1878,7 +1067,7 @@ index fdef002293..3d99fd53c8 100644
if not __opts__["test"]:
__salt__["virt.pool_stop"](
name,
-@@ -1382,13 +1426,16 @@ def pool_running(
+@@ -1378,13 +1425,16 @@ def pool_running(
username=username,
password=password,
)
@@ -1902,2323 +1091,73 @@ index fdef002293..3d99fd53c8 100644
else:
action = "already running"
result = True
-@@ -1402,16 +1449,16 @@ def pool_running(
- password=password,
- )
-
-- comment = "Pool {0}".format(name)
-+ comment = "Pool {}".format(name)
- change = "Pool"
- if name in ret["changes"]:
-- comment = "{0},".format(ret["comment"])
-- change = "{0},".format(ret["changes"][name])
-+ comment = "{},".format(ret["comment"])
-+ change = "{},".format(ret["changes"][name])
-
- if action != "already running":
-- ret["changes"][name] = "{0} {1}".format(change, action)
-+ ret["changes"][name] = "{} {}".format(change, action)
-
-- ret["comment"] = "{0} {1}".format(comment, action)
-+ ret["comment"] = "{} {}".format(comment, action)
- ret["result"] = result
-
- except libvirt.libvirtError as err:
-@@ -1539,15 +1586,13 @@ def pool_deleted(name, purge=False, connection=None, username=None, password=Non
- ret["result"] = None
-
- if unsupported:
-- ret[
-- "comment"
-- ] = 'Unsupported actions for pool of type "{0}": {1}'.format(
-+ ret["comment"] = 'Unsupported actions for pool of type "{}": {}'.format(
- info[name]["type"], ", ".join(unsupported)
- )
- else:
-- ret["comment"] = "Storage pool could not be found: {0}".format(name)
-+ ret["comment"] = "Storage pool could not be found: {}".format(name)
- except libvirt.libvirtError as err:
-- ret["comment"] = "Failed deleting pool: {0}".format(err.get_error_message())
-+ ret["comment"] = "Failed deleting pool: {}".format(err.get_error_message())
- ret["result"] = False
-
- return ret
diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja
-index aac6283eb0..04a61ffa78 100644
+index 439ed83f7f..2a2f5e4141 100644
--- a/salt/templates/virt/libvirt_domain.jinja
+++ b/salt/templates/virt/libvirt_domain.jinja
-@@ -3,7 +3,7 @@
+@@ -2,32 +2,9 @@
+
+ {{ name }}
{{ cpu }}
- {{ mem }}
- {{ mem }}
--
+- {%- if mem.max %}
+- {{ mem.max }}
+- {%- endif %}
+- {%- if mem.boot %}
+- {{ mem.boot }}
+- {%- endif %}
+- {%- if mem.current %}
+- {{ mem.current }}
+- {%- endif %}
+- {%- if mem %}
+-
+- {%- if 'hard_limit' in mem and mem.hard_limit %}
+- {{ mem.hard_limit }}
+- {%- endif %}
+- {%- if 'soft_limit' in mem and mem.soft_limit %}
+- {{ mem.soft_limit }}
+- {%- endif %}
+- {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %}
+- {{ mem.swap_hard_limit }}
+- {%- endif %}
+- {%- if 'min_guarantee' in mem and mem.min_guarantee %}
+- {{ mem.min_guarantee }}
+- {%- endif %}
+-
+- {%- endif %}
+-
++ {{ mem }}
++ {{ mem }}
+
{{ os_type }}
{% if boot %}
{% if 'kernel' in boot %}
-diff --git a/salt/utils/data.py b/salt/utils/data.py
-index 8f84c2ea42..1c4c22efb3 100644
---- a/salt/utils/data.py
-+++ b/salt/utils/data.py
-@@ -1,22 +1,16 @@
- # -*- coding: utf-8 -*-
--'''
-+"""
- Functions for manipulating, inspecting, or otherwise working with data types
- and data structures.
--'''
-+"""
-
--from __future__ import absolute_import, print_function, unicode_literals
-
- # Import Python libs
- import copy
- import fnmatch
-+import functools
- import logging
- import re
--import functools
--
--try:
-- from collections.abc import Mapping, MutableMapping, Sequence
--except ImportError:
-- from collections import Mapping, MutableMapping, Sequence
-
- # Import Salt libs
- import salt.utils.dictupdate
-@@ -24,13 +18,22 @@ import salt.utils.stringutils
- import salt.utils.yaml
- from salt.defaults import DEFAULT_TARGET_DELIM
- from salt.exceptions import SaltException
--from salt.utils.decorators.jinja import jinja_filter
--from salt.utils.odict import OrderedDict
-+from salt.ext import six
-
- # Import 3rd-party libs
--from salt.ext.six.moves import zip # pylint: disable=redefined-builtin
--from salt.ext import six
- from salt.ext.six.moves import range # pylint: disable=redefined-builtin
-+from salt.ext.six.moves import zip # pylint: disable=redefined-builtin
-+from salt.utils.decorators.jinja import jinja_filter
-+from salt.utils.odict import OrderedDict
-+
-+try:
-+ from collections.abc import Mapping, MutableMapping, Sequence
-+except ImportError:
-+ # pylint: disable=no-name-in-module
-+ from collections import Mapping, MutableMapping, Sequence
-+
-+ # pylint: enable=no-name-in-module
-+
-
- try:
- import jmespath
-@@ -41,15 +44,16 @@ log = logging.getLogger(__name__)
-
-
- class CaseInsensitiveDict(MutableMapping):
-- '''
-+ """
- Inspired by requests' case-insensitive dict implementation, but works with
- non-string keys as well.
-- '''
-+ """
-+
- def __init__(self, init=None, **kwargs):
-- '''
-+ """
- Force internal dict to be ordered to ensure a consistent iteration
- order, irrespective of case.
-- '''
-+ """
- self._data = OrderedDict()
- self.update(init or {}, **kwargs)
-
-@@ -67,7 +71,7 @@ class CaseInsensitiveDict(MutableMapping):
- return self._data[to_lowercase(key)][1]
-
- def __iter__(self):
-- return (item[0] for item in six.itervalues(self._data))
-+ return (item[0] for item in self._data.values())
-
- def __eq__(self, rval):
- if not isinstance(rval, Mapping):
-@@ -76,28 +80,28 @@ class CaseInsensitiveDict(MutableMapping):
- return dict(self.items_lower()) == dict(CaseInsensitiveDict(rval).items_lower())
-
- def __repr__(self):
-- return repr(dict(six.iteritems(self)))
-+ return repr(dict(self.items()))
-
- def items_lower(self):
-- '''
-+ """
- Returns a generator iterating over keys and values, with the keys all
- being lowercase.
-- '''
-- return ((key, val[1]) for key, val in six.iteritems(self._data))
-+ """
-+ return ((key, val[1]) for key, val in self._data.items())
-
- def copy(self):
-- '''
-+ """
- Returns a copy of the object
-- '''
-- return CaseInsensitiveDict(six.iteritems(self._data))
-+ """
-+ return CaseInsensitiveDict(self._data.items())
-
-
- def __change_case(data, attr, preserve_dict_class=False):
-- '''
-+ """
- Calls data.attr() if data has an attribute/method called attr.
- Processes data recursively if data is a Mapping or Sequence.
- For Mapping, processes both keys and values.
-- '''
-+ """
- try:
- return getattr(data, attr)()
- except AttributeError:
-@@ -107,73 +111,120 @@ def __change_case(data, attr, preserve_dict_class=False):
-
- if isinstance(data, Mapping):
- return (data_type if preserve_dict_class else dict)(
-- (__change_case(key, attr, preserve_dict_class),
-- __change_case(val, attr, preserve_dict_class))
-- for key, val in six.iteritems(data)
-+ (
-+ __change_case(key, attr, preserve_dict_class),
-+ __change_case(val, attr, preserve_dict_class),
-+ )
-+ for key, val in data.items()
- )
- if isinstance(data, Sequence):
- return data_type(
-- __change_case(item, attr, preserve_dict_class) for item in data)
-+ __change_case(item, attr, preserve_dict_class) for item in data
-+ )
- return data
-
-
- def to_lowercase(data, preserve_dict_class=False):
-- '''
-+ """
- Recursively changes everything in data to lowercase.
-- '''
-- return __change_case(data, 'lower', preserve_dict_class)
-+ """
-+ return __change_case(data, "lower", preserve_dict_class)
-
-
- def to_uppercase(data, preserve_dict_class=False):
-- '''
-+ """
- Recursively changes everything in data to uppercase.
-- '''
-- return __change_case(data, 'upper', preserve_dict_class)
-+ """
-+ return __change_case(data, "upper", preserve_dict_class)
-
-
--@jinja_filter('compare_dicts')
-+@jinja_filter("compare_dicts")
- def compare_dicts(old=None, new=None):
-- '''
-+ """
- Compare before and after results from various salt functions, returning a
- dict describing the changes that were made.
-- '''
-+ """
- ret = {}
-- for key in set((new or {})).union((old or {})):
-+ for key in set(new or {}).union(old or {}):
- if key not in old:
- # New key
-- ret[key] = {'old': '',
-- 'new': new[key]}
-+ ret[key] = {"old": "", "new": new[key]}
- elif key not in new:
- # Key removed
-- ret[key] = {'new': '',
-- 'old': old[key]}
-+ ret[key] = {"new": "", "old": old[key]}
- elif new[key] != old[key]:
- # Key modified
-- ret[key] = {'old': old[key],
-- 'new': new[key]}
-+ ret[key] = {"old": old[key], "new": new[key]}
- return ret
-
-
--@jinja_filter('compare_lists')
-+@jinja_filter("compare_lists")
- def compare_lists(old=None, new=None):
-- '''
-+ """
- Compare before and after results from various salt functions, returning a
- dict describing the changes that were made
-- '''
-+ """
- ret = {}
- for item in new:
- if item not in old:
-- ret.setdefault('new', []).append(item)
-+ ret.setdefault("new", []).append(item)
- for item in old:
- if item not in new:
-- ret.setdefault('old', []).append(item)
-+ ret.setdefault("old", []).append(item)
- return ret
-
-
--def decode(data, encoding=None, errors='strict', keep=False,
-- normalize=False, preserve_dict_class=False, preserve_tuples=False,
-- to_str=False):
-- '''
-+def _remove_circular_refs(ob, _seen=None):
-+ """
-+ Generic method to remove circular references from objects.
-+ This has been taken from author Martijn Pieters
-+ https://stackoverflow.com/questions/44777369/
-+ remove-circular-references-in-dicts-lists-tuples/44777477#44777477
-+ :param ob: dict, list, typle, set, and frozenset
-+ Standard python object
-+ :param object _seen:
-+ Object that has circular reference
-+ :returns:
-+ Cleaned Python object
-+ :rtype:
-+ type(ob)
-+ """
-+ if _seen is None:
-+ _seen = set()
-+ if id(ob) in _seen:
-+ # Here we caught a circular reference.
-+ # Alert user and cleanup to continue.
-+ log.exception(
-+ "Caught a circular reference in data structure below."
-+ "Cleaning and continuing execution.\n%r\n",
-+ ob,
-+ )
-+ return None
-+ _seen.add(id(ob))
-+ res = ob
-+ if isinstance(ob, dict):
-+ res = {
-+ _remove_circular_refs(k, _seen): _remove_circular_refs(v, _seen)
-+ for k, v in ob.items()
-+ }
-+ elif isinstance(ob, (list, tuple, set, frozenset)):
-+ res = type(ob)(_remove_circular_refs(v, _seen) for v in ob)
-+ # remove id again; only *nested* references count
-+ _seen.remove(id(ob))
-+ return res
-+
-+
-+def decode(
-+ data,
-+ encoding=None,
-+ errors="strict",
-+ keep=False,
-+ normalize=False,
-+ preserve_dict_class=False,
-+ preserve_tuples=False,
-+ to_str=False,
-+):
-+ """
- Generic function which will decode whichever type is passed, if necessary.
- Optionally use to_str=True to ensure strings are str types and not unicode
- on Python 2.
-@@ -199,22 +250,55 @@ def decode(data, encoding=None, errors='strict', keep=False,
- two strings above, in which "й" is represented as two code points (i.e. one
- for the base character, and one for the breve mark). Normalizing allows for
- a more reliable test case.
-- '''
-- _decode_func = salt.utils.stringutils.to_unicode \
-- if not to_str \
-+
-+ """
-+ # Clean data object before decoding to avoid circular references
-+ data = _remove_circular_refs(data)
-+
-+ _decode_func = (
-+ salt.utils.stringutils.to_unicode
-+ if not to_str
- else salt.utils.stringutils.to_str
-+ )
- if isinstance(data, Mapping):
-- return decode_dict(data, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ return decode_dict(
-+ data,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
- if isinstance(data, list):
-- return decode_list(data, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ return decode_list(
-+ data,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
- if isinstance(data, tuple):
-- return decode_tuple(data, encoding, errors, keep, normalize,
-- preserve_dict_class, to_str) \
-- if preserve_tuples \
-- else decode_list(data, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ return (
-+ decode_tuple(
-+ data, encoding, errors, keep, normalize, preserve_dict_class, to_str
-+ )
-+ if preserve_tuples
-+ else decode_list(
-+ data,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
-+ )
- try:
- data = _decode_func(data, encoding, errors, normalize)
- except TypeError:
-@@ -228,25 +312,48 @@ def decode(data, encoding=None, errors='strict', keep=False,
- return data
-
-
--def decode_dict(data, encoding=None, errors='strict', keep=False,
-- normalize=False, preserve_dict_class=False,
-- preserve_tuples=False, to_str=False):
-- '''
-+def decode_dict(
-+ data,
-+ encoding=None,
-+ errors="strict",
-+ keep=False,
-+ normalize=False,
-+ preserve_dict_class=False,
-+ preserve_tuples=False,
-+ to_str=False,
-+):
-+ """
- Decode all string values to Unicode. Optionally use to_str=True to ensure
- strings are str types and not unicode on Python 2.
-- '''
-- _decode_func = salt.utils.stringutils.to_unicode \
-- if not to_str \
-+ """
-+ # Clean data object before decoding to avoid circular references
-+ data = _remove_circular_refs(data)
-+
-+ _decode_func = (
-+ salt.utils.stringutils.to_unicode
-+ if not to_str
- else salt.utils.stringutils.to_str
-+ )
- # Make sure we preserve OrderedDicts
- ret = data.__class__() if preserve_dict_class else {}
-- for key, value in six.iteritems(data):
-+ for key, value in data.items():
- if isinstance(key, tuple):
-- key = decode_tuple(key, encoding, errors, keep, normalize,
-- preserve_dict_class, to_str) \
-- if preserve_tuples \
-- else decode_list(key, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ key = (
-+ decode_tuple(
-+ key, encoding, errors, keep, normalize, preserve_dict_class, to_str
-+ )
-+ if preserve_tuples
-+ else decode_list(
-+ key,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
-+ )
- else:
- try:
- key = _decode_func(key, encoding, errors, normalize)
-@@ -260,17 +367,50 @@ def decode_dict(data, encoding=None, errors='strict', keep=False,
- raise
-
- if isinstance(value, list):
-- value = decode_list(value, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ value = decode_list(
-+ value,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
- elif isinstance(value, tuple):
-- value = decode_tuple(value, encoding, errors, keep, normalize,
-- preserve_dict_class, to_str) \
-- if preserve_tuples \
-- else decode_list(value, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ value = (
-+ decode_tuple(
-+ value,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ to_str,
-+ )
-+ if preserve_tuples
-+ else decode_list(
-+ value,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
-+ )
- elif isinstance(value, Mapping):
-- value = decode_dict(value, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ value = decode_dict(
-+ value,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
- else:
- try:
- value = _decode_func(value, encoding, errors, normalize)
-@@ -287,30 +427,69 @@ def decode_dict(data, encoding=None, errors='strict', keep=False,
- return ret
-
-
--def decode_list(data, encoding=None, errors='strict', keep=False,
-- normalize=False, preserve_dict_class=False,
-- preserve_tuples=False, to_str=False):
-- '''
-+def decode_list(
-+ data,
-+ encoding=None,
-+ errors="strict",
-+ keep=False,
-+ normalize=False,
-+ preserve_dict_class=False,
-+ preserve_tuples=False,
-+ to_str=False,
-+):
-+ """
- Decode all string values to Unicode. Optionally use to_str=True to ensure
- strings are str types and not unicode on Python 2.
-- '''
-- _decode_func = salt.utils.stringutils.to_unicode \
-- if not to_str \
-+ """
-+ # Clean data object before decoding to avoid circular references
-+ data = _remove_circular_refs(data)
-+
-+ _decode_func = (
-+ salt.utils.stringutils.to_unicode
-+ if not to_str
- else salt.utils.stringutils.to_str
-+ )
- ret = []
- for item in data:
- if isinstance(item, list):
-- item = decode_list(item, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ item = decode_list(
-+ item,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
- elif isinstance(item, tuple):
-- item = decode_tuple(item, encoding, errors, keep, normalize,
-- preserve_dict_class, to_str) \
-- if preserve_tuples \
-- else decode_list(item, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ item = (
-+ decode_tuple(
-+ item, encoding, errors, keep, normalize, preserve_dict_class, to_str
-+ )
-+ if preserve_tuples
-+ else decode_list(
-+ item,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
-+ )
- elif isinstance(item, Mapping):
-- item = decode_dict(item, encoding, errors, keep, normalize,
-- preserve_dict_class, preserve_tuples, to_str)
-+ item = decode_dict(
-+ item,
-+ encoding,
-+ errors,
-+ keep,
-+ normalize,
-+ preserve_dict_class,
-+ preserve_tuples,
-+ to_str,
-+ )
- else:
- try:
- item = _decode_func(item, encoding, errors, normalize)
-@@ -327,21 +506,35 @@ def decode_list(data, encoding=None, errors='strict', keep=False,
- return ret
-
-
--def decode_tuple(data, encoding=None, errors='strict', keep=False,
-- normalize=False, preserve_dict_class=False, to_str=False):
-- '''
-+def decode_tuple(
-+ data,
-+ encoding=None,
-+ errors="strict",
-+ keep=False,
-+ normalize=False,
-+ preserve_dict_class=False,
-+ to_str=False,
-+):
-+ """
- Decode all string values to Unicode. Optionally use to_str=True to ensure
- strings are str types and not unicode on Python 2.
-- '''
-+ """
- return tuple(
-- decode_list(data, encoding, errors, keep, normalize,
-- preserve_dict_class, True, to_str)
-+ decode_list(
-+ data, encoding, errors, keep, normalize, preserve_dict_class, True, to_str
-+ )
- )
-
-
--def encode(data, encoding=None, errors='strict', keep=False,
-- preserve_dict_class=False, preserve_tuples=False):
-- '''
-+def encode(
-+ data,
-+ encoding=None,
-+ errors="strict",
-+ keep=False,
-+ preserve_dict_class=False,
-+ preserve_tuples=False,
-+):
-+ """
- Generic function which will encode whichever type is passed, if necessary
-
- If `strict` is True, and `keep` is False, and we fail to encode, a
-@@ -349,18 +542,27 @@ def encode(data, encoding=None, errors='strict', keep=False,
- original value to silently be returned in cases where encoding fails. This
- can be useful for cases where the data passed to this function is likely to
- contain binary blobs.
-- '''
-+
-+ """
-+ # Clean data object before encoding to avoid circular references
-+ data = _remove_circular_refs(data)
-+
- if isinstance(data, Mapping):
-- return encode_dict(data, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ return encode_dict(
-+ data, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
- if isinstance(data, list):
-- return encode_list(data, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ return encode_list(
-+ data, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
- if isinstance(data, tuple):
-- return encode_tuple(data, encoding, errors, keep, preserve_dict_class) \
-- if preserve_tuples \
-- else encode_list(data, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ return (
-+ encode_tuple(data, encoding, errors, keep, preserve_dict_class)
-+ if preserve_tuples
-+ else encode_list(
-+ data, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
-+ )
- try:
- return salt.utils.stringutils.to_bytes(data, encoding, errors)
- except TypeError:
-@@ -374,20 +576,31 @@ def encode(data, encoding=None, errors='strict', keep=False,
- return data
-
-
--@jinja_filter('json_decode_dict') # Remove this for Aluminium
--@jinja_filter('json_encode_dict')
--def encode_dict(data, encoding=None, errors='strict', keep=False,
-- preserve_dict_class=False, preserve_tuples=False):
-- '''
-+@jinja_filter("json_decode_dict") # Remove this for Aluminium
-+@jinja_filter("json_encode_dict")
-+def encode_dict(
-+ data,
-+ encoding=None,
-+ errors="strict",
-+ keep=False,
-+ preserve_dict_class=False,
-+ preserve_tuples=False,
-+):
-+ """
- Encode all string values to bytes
-- '''
-+ """
-+ # Clean data object before encoding to avoid circular references
-+ data = _remove_circular_refs(data)
- ret = data.__class__() if preserve_dict_class else {}
-- for key, value in six.iteritems(data):
-+ for key, value in data.items():
- if isinstance(key, tuple):
-- key = encode_tuple(key, encoding, errors, keep, preserve_dict_class) \
-- if preserve_tuples \
-- else encode_list(key, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ key = (
-+ encode_tuple(key, encoding, errors, keep, preserve_dict_class)
-+ if preserve_tuples
-+ else encode_list(
-+ key, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
-+ )
- else:
- try:
- key = salt.utils.stringutils.to_bytes(key, encoding, errors)
-@@ -401,16 +614,21 @@ def encode_dict(data, encoding=None, errors='strict', keep=False,
- raise
-
- if isinstance(value, list):
-- value = encode_list(value, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ value = encode_list(
-+ value, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
- elif isinstance(value, tuple):
-- value = encode_tuple(value, encoding, errors, keep, preserve_dict_class) \
-- if preserve_tuples \
-- else encode_list(value, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ value = (
-+ encode_tuple(value, encoding, errors, keep, preserve_dict_class)
-+ if preserve_tuples
-+ else encode_list(
-+ value, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
-+ )
- elif isinstance(value, Mapping):
-- value = encode_dict(value, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ value = encode_dict(
-+ value, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
- else:
- try:
- value = salt.utils.stringutils.to_bytes(value, encoding, errors)
-@@ -427,26 +645,40 @@ def encode_dict(data, encoding=None, errors='strict', keep=False,
- return ret
-
-
--@jinja_filter('json_decode_list') # Remove this for Aluminium
--@jinja_filter('json_encode_list')
--def encode_list(data, encoding=None, errors='strict', keep=False,
-- preserve_dict_class=False, preserve_tuples=False):
-- '''
-+@jinja_filter("json_decode_list") # Remove this for Aluminium
-+@jinja_filter("json_encode_list")
-+def encode_list(
-+ data,
-+ encoding=None,
-+ errors="strict",
-+ keep=False,
-+ preserve_dict_class=False,
-+ preserve_tuples=False,
-+):
-+ """
- Encode all string values to bytes
-- '''
-+ """
-+ # Clean data object before encoding to avoid circular references
-+ data = _remove_circular_refs(data)
-+
- ret = []
- for item in data:
- if isinstance(item, list):
-- item = encode_list(item, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ item = encode_list(
-+ item, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
- elif isinstance(item, tuple):
-- item = encode_tuple(item, encoding, errors, keep, preserve_dict_class) \
-- if preserve_tuples \
-- else encode_list(item, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ item = (
-+ encode_tuple(item, encoding, errors, keep, preserve_dict_class)
-+ if preserve_tuples
-+ else encode_list(
-+ item, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
-+ )
- elif isinstance(item, Mapping):
-- item = encode_dict(item, encoding, errors, keep,
-- preserve_dict_class, preserve_tuples)
-+ item = encode_dict(
-+ item, encoding, errors, keep, preserve_dict_class, preserve_tuples
-+ )
- else:
- try:
- item = salt.utils.stringutils.to_bytes(item, encoding, errors)
-@@ -463,42 +695,37 @@ def encode_list(data, encoding=None, errors='strict', keep=False,
- return ret
-
-
--def encode_tuple(data, encoding=None, errors='strict', keep=False,
-- preserve_dict_class=False):
-- '''
-+def encode_tuple(
-+ data, encoding=None, errors="strict", keep=False, preserve_dict_class=False
-+):
-+ """
- Encode all string values to Unicode
-- '''
-- return tuple(
-- encode_list(data, encoding, errors, keep, preserve_dict_class, True))
-+ """
-+ return tuple(encode_list(data, encoding, errors, keep, preserve_dict_class, True))
-
-
--@jinja_filter('exactly_n_true')
-+@jinja_filter("exactly_n_true")
- def exactly_n(iterable, amount=1):
-- '''
-+ """
- Tests that exactly N items in an iterable are "truthy" (neither None,
- False, nor 0).
-- '''
-+ """
- i = iter(iterable)
- return all(any(i) for j in range(amount)) and not any(i)
-
-
--@jinja_filter('exactly_one_true')
-+@jinja_filter("exactly_one_true")
- def exactly_one(iterable):
-- '''
-+ """
- Check if only one item is not None, False, or 0 in an iterable.
-- '''
-+ """
- return exactly_n(iterable)
-
-
--def filter_by(lookup_dict,
-- lookup,
-- traverse,
-- merge=None,
-- default='default',
-- base=None):
-- '''
-+def filter_by(lookup_dict, lookup, traverse, merge=None, default="default", base=None):
-+ """
- Common code to filter data structures like grains and pillar
-- '''
-+ """
- ret = None
- # Default value would be an empty list if lookup not found
- val = traverse_dict_and_list(traverse, lookup, [])
-@@ -507,10 +734,8 @@ def filter_by(lookup_dict,
- # lookup_dict keys
- for each in val if isinstance(val, list) else [val]:
- for key in lookup_dict:
-- test_key = key if isinstance(key, six.string_types) \
-- else six.text_type(key)
-- test_each = each if isinstance(each, six.string_types) \
-- else six.text_type(each)
-+ test_key = key if isinstance(key, str) else str(key)
-+ test_each = each if isinstance(each, str) else str(each)
- if fnmatch.fnmatchcase(test_each, test_key):
- ret = lookup_dict[key]
- break
-@@ -528,14 +753,13 @@ def filter_by(lookup_dict,
- elif isinstance(base_values, Mapping):
- if not isinstance(ret, Mapping):
- raise SaltException(
-- 'filter_by default and look-up values must both be '
-- 'dictionaries.')
-+ "filter_by default and look-up values must both be " "dictionaries."
-+ )
- ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
-
- if merge:
- if not isinstance(merge, Mapping):
-- raise SaltException(
-- 'filter_by merge argument must be a dictionary.')
-+ raise SaltException("filter_by merge argument must be a dictionary.")
-
- if ret is None:
- ret = merge
-@@ -546,12 +770,12 @@ def filter_by(lookup_dict,
-
-
- def traverse_dict(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
-- '''
-+ """
- Traverse a dict using a colon-delimited (or otherwise delimited, using the
- 'delimiter' param) target string. The target 'foo:bar:baz' will return
- data['foo']['bar']['baz'] if this value exists, and will otherwise return
- the dict in the default argument.
-- '''
-+ """
- ptr = data
- try:
- for each in key.split(delimiter):
-@@ -562,9 +786,9 @@ def traverse_dict(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
- return ptr
-
-
--@jinja_filter('traverse')
-+@jinja_filter("traverse")
- def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
-- '''
-+ """
- Traverse a dict or list using a colon-delimited (or otherwise delimited,
- using the 'delimiter' param) target string. The target 'foo:bar:0' will
- return data['foo']['bar'][0] if this value exists, and will otherwise
-@@ -573,7 +797,7 @@ def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DEL
- The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
- {'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
- then return data['foo']['bar']['0']
-- '''
-+ """
- ptr = data
- for each in key.split(delimiter):
- if isinstance(ptr, list):
-@@ -605,18 +829,17 @@ def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DEL
- return ptr
-
-
--def subdict_match(data,
-- expr,
-- delimiter=DEFAULT_TARGET_DELIM,
-- regex_match=False,
-- exact_match=False):
-- '''
-+def subdict_match(
-+ data, expr, delimiter=DEFAULT_TARGET_DELIM, regex_match=False, exact_match=False
-+):
-+ """
- Check for a match in a dictionary using a delimiter character to denote
- levels of subdicts, and also allowing the delimiter character to be
- matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and
- data['foo']['bar'] == 'baz'. The latter would take priority over the
- former, as more deeply-nested matches are tried first.
-- '''
-+ """
-+
- def _match(target, pattern, regex_match=False, exact_match=False):
- # The reason for using six.text_type first and _then_ using
- # to_unicode as a fallback is because we want to eventually have
-@@ -628,11 +851,11 @@ def subdict_match(data,
- # begin with is that (by design) to_unicode will raise a TypeError if a
- # non-string/bytestring/bytearray value is passed.
- try:
-- target = six.text_type(target).lower()
-+ target = str(target).lower()
- except UnicodeDecodeError:
- target = salt.utils.stringutils.to_unicode(target).lower()
- try:
-- pattern = six.text_type(pattern).lower()
-+ pattern = str(pattern).lower()
- except UnicodeDecodeError:
- pattern = salt.utils.stringutils.to_unicode(pattern).lower()
-
-@@ -640,48 +863,54 @@ def subdict_match(data,
- try:
- return re.match(pattern, target)
- except Exception: # pylint: disable=broad-except
-- log.error('Invalid regex \'%s\' in match', pattern)
-+ log.error("Invalid regex '%s' in match", pattern)
- return False
- else:
-- return target == pattern if exact_match \
-- else fnmatch.fnmatch(target, pattern)
-+ return (
-+ target == pattern if exact_match else fnmatch.fnmatch(target, pattern)
-+ )
-
- def _dict_match(target, pattern, regex_match=False, exact_match=False):
- ret = False
-- wildcard = pattern.startswith('*:')
-+ wildcard = pattern.startswith("*:")
- if wildcard:
- pattern = pattern[2:]
-
-- if pattern == '*':
-+ if pattern == "*":
- # We are just checking that the key exists
- ret = True
- if not ret and pattern in target:
- # We might want to search for a key
- ret = True
-- if not ret and subdict_match(target,
-- pattern,
-- regex_match=regex_match,
-- exact_match=exact_match):
-+ if not ret and subdict_match(
-+ target, pattern, regex_match=regex_match, exact_match=exact_match
-+ ):
- ret = True
- if not ret and wildcard:
- for key in target:
- if isinstance(target[key], dict):
-- if _dict_match(target[key],
-- pattern,
-- regex_match=regex_match,
-- exact_match=exact_match):
-+ if _dict_match(
-+ target[key],
-+ pattern,
-+ regex_match=regex_match,
-+ exact_match=exact_match,
-+ ):
- return True
- elif isinstance(target[key], list):
- for item in target[key]:
-- if _match(item,
-- pattern,
-- regex_match=regex_match,
-- exact_match=exact_match):
-- return True
-- elif _match(target[key],
-+ if _match(
-+ item,
- pattern,
- regex_match=regex_match,
-- exact_match=exact_match):
-+ exact_match=exact_match,
-+ ):
-+ return True
-+ elif _match(
-+ target[key],
-+ pattern,
-+ regex_match=regex_match,
-+ exact_match=exact_match,
-+ ):
- return True
- return ret
-
-@@ -695,7 +924,7 @@ def subdict_match(data,
- # want to use are 3, 2, and 1, in that order.
- for idx in range(num_splits - 1, 0, -1):
- key = delimiter.join(splits[:idx])
-- if key == '*':
-+ if key == "*":
- # We are matching on everything under the top level, so we need to
- # treat the match as the entire data being passed in
- matchstr = expr
-@@ -703,54 +932,55 @@ def subdict_match(data,
- else:
- matchstr = delimiter.join(splits[idx:])
- match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
-- log.debug("Attempting to match '%s' in '%s' using delimiter '%s'",
-- matchstr, key, delimiter)
-+ log.debug(
-+ "Attempting to match '%s' in '%s' using delimiter '%s'",
-+ matchstr,
-+ key,
-+ delimiter,
-+ )
- if match == {}:
- continue
- if isinstance(match, dict):
-- if _dict_match(match,
-- matchstr,
-- regex_match=regex_match,
-- exact_match=exact_match):
-+ if _dict_match(
-+ match, matchstr, regex_match=regex_match, exact_match=exact_match
-+ ):
- return True
- continue
- if isinstance(match, (list, tuple)):
- # We are matching a single component to a single list member
- for member in match:
- if isinstance(member, dict):
-- if _dict_match(member,
-- matchstr,
-- regex_match=regex_match,
-- exact_match=exact_match):
-+ if _dict_match(
-+ member,
-+ matchstr,
-+ regex_match=regex_match,
-+ exact_match=exact_match,
-+ ):
- return True
-- if _match(member,
-- matchstr,
-- regex_match=regex_match,
-- exact_match=exact_match):
-+ if _match(
-+ member, matchstr, regex_match=regex_match, exact_match=exact_match
-+ ):
- return True
- continue
-- if _match(match,
-- matchstr,
-- regex_match=regex_match,
-- exact_match=exact_match):
-+ if _match(match, matchstr, regex_match=regex_match, exact_match=exact_match):
- return True
- return False
-
-
--@jinja_filter('substring_in_list')
-+@jinja_filter("substring_in_list")
- def substr_in_list(string_to_search_for, list_to_search):
-- '''
-+ """
- Return a boolean value that indicates whether or not a given
- string is present in any of the strings which comprise a list
-- '''
-+ """
- return any(string_to_search_for in s for s in list_to_search)
-
-
- def is_dictlist(data):
-- '''
-+ """
- Returns True if data is a list of one-element dicts (as found in many SLS
- schemas), otherwise returns False
-- '''
-+ """
- if isinstance(data, list):
- for element in data:
- if isinstance(element, dict):
-@@ -762,16 +992,12 @@ def is_dictlist(data):
- return False
-
-
--def repack_dictlist(data,
-- strict=False,
-- recurse=False,
-- key_cb=None,
-- val_cb=None):
-- '''
-+def repack_dictlist(data, strict=False, recurse=False, key_cb=None, val_cb=None):
-+ """
- Takes a list of one-element dicts (as found in many SLS schemas) and
- repacks into a single dictionary.
-- '''
-- if isinstance(data, six.string_types):
-+ """
-+ if isinstance(data, str):
- try:
- data = salt.utils.yaml.safe_load(data)
- except salt.utils.yaml.parser.ParserError as err:
-@@ -783,7 +1009,7 @@ def repack_dictlist(data,
- if val_cb is None:
- val_cb = lambda x, y: y
-
-- valid_non_dict = (six.string_types, six.integer_types, float)
-+ valid_non_dict = ((str,), (int,), float)
- if isinstance(data, list):
- for element in data:
- if isinstance(element, valid_non_dict):
-@@ -791,21 +1017,21 @@ def repack_dictlist(data,
- if isinstance(element, dict):
- if len(element) != 1:
- log.error(
-- 'Invalid input for repack_dictlist: key/value pairs '
-- 'must contain only one element (data passed: %s).',
-- element
-+ "Invalid input for repack_dictlist: key/value pairs "
-+ "must contain only one element (data passed: %s).",
-+ element,
- )
- return {}
- else:
- log.error(
-- 'Invalid input for repack_dictlist: element %s is '
-- 'not a string/dict/numeric value', element
-+ "Invalid input for repack_dictlist: element %s is "
-+ "not a string/dict/numeric value",
-+ element,
- )
- return {}
- else:
- log.error(
-- 'Invalid input for repack_dictlist, data passed is not a list '
-- '(%s)', data
-+ "Invalid input for repack_dictlist, data passed is not a list " "(%s)", data
- )
- return {}
-
-@@ -821,8 +1047,8 @@ def repack_dictlist(data,
- ret[key_cb(key)] = repack_dictlist(val, recurse=recurse)
- elif strict:
- log.error(
-- 'Invalid input for repack_dictlist: nested dictlist '
-- 'found, but recurse is set to False'
-+ "Invalid input for repack_dictlist: nested dictlist "
-+ "found, but recurse is set to False"
- )
- return {}
- else:
-@@ -832,17 +1058,17 @@ def repack_dictlist(data,
- return ret
-
-
--@jinja_filter('is_list')
-+@jinja_filter("is_list")
- def is_list(value):
-- '''
-+ """
- Check if a variable is a list.
-- '''
-+ """
- return isinstance(value, list)
-
-
--@jinja_filter('is_iter')
--def is_iter(thing, ignore=six.string_types):
-- '''
-+@jinja_filter("is_iter")
-+def is_iter(thing, ignore=(str,)):
-+ """
- Test if an object is iterable, but not a string type.
-
- Test if an object is an iterator or is iterable itself. By default this
-@@ -853,7 +1079,7 @@ def is_iter(thing, ignore=six.string_types):
- dictionaries or named tuples.
-
- Based on https://bitbucket.org/petershinners/yter
-- '''
-+ """
- if ignore and isinstance(thing, ignore):
- return False
- try:
-@@ -863,9 +1089,9 @@ def is_iter(thing, ignore=six.string_types):
- return False
-
-
--@jinja_filter('sorted_ignorecase')
-+@jinja_filter("sorted_ignorecase")
- def sorted_ignorecase(to_sort):
-- '''
-+ """
- Sort a list of strings ignoring case.
-
- >>> L = ['foo', 'Foo', 'bar', 'Bar']
-@@ -874,19 +1100,19 @@ def sorted_ignorecase(to_sort):
- >>> sorted(L, key=lambda x: x.lower())
- ['bar', 'Bar', 'foo', 'Foo']
- >>>
-- '''
-+ """
- return sorted(to_sort, key=lambda x: x.lower())
-
-
- def is_true(value=None):
-- '''
-+ """
- Returns a boolean value representing the "truth" of the value passed. The
- rules for what is a "True" value are:
-
- 1. Integer/float values greater than 0
- 2. The string values "True" and "true"
- 3. Any object for which bool(obj) returns True
-- '''
-+ """
- # First, try int/float conversion
- try:
- value = int(value)
-@@ -898,26 +1124,26 @@ def is_true(value=None):
- pass
-
- # Now check for truthiness
-- if isinstance(value, (six.integer_types, float)):
-+ if isinstance(value, ((int,), float)):
- return value > 0
-- if isinstance(value, six.string_types):
-- return six.text_type(value).lower() == 'true'
-+ if isinstance(value, str):
-+ return str(value).lower() == "true"
- return bool(value)
-
-
--@jinja_filter('mysql_to_dict')
-+@jinja_filter("mysql_to_dict")
- def mysql_to_dict(data, key):
-- '''
-+ """
- Convert MySQL-style output to a python dictionary
-- '''
-+ """
- ret = {}
-- headers = ['']
-+ headers = [""]
- for line in data:
- if not line:
- continue
-- if line.startswith('+'):
-+ if line.startswith("+"):
- continue
-- comps = line.split('|')
-+ comps = line.split("|")
- for comp in range(len(comps)):
- comps[comp] = comps[comp].strip()
- if len(headers) > 1:
-@@ -934,14 +1160,14 @@ def mysql_to_dict(data, key):
-
-
- def simple_types_filter(data):
-- '''
-+ """
- Convert the data list, dictionary into simple types, i.e., int, float, string,
- bool, etc.
-- '''
-+ """
- if data is None:
- return data
-
-- simpletypes_keys = (six.string_types, six.text_type, six.integer_types, float, bool)
-+ simpletypes_keys = ((str,), str, (int,), float, bool)
- simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple])
-
- if isinstance(data, (list, tuple)):
-@@ -957,7 +1183,7 @@ def simple_types_filter(data):
-
- if isinstance(data, dict):
- simpledict = {}
-- for key, value in six.iteritems(data):
-+ for key, value in data.items():
- if key is not None and not isinstance(key, simpletypes_keys):
- key = repr(key)
- if value is not None and isinstance(value, (dict, list, tuple)):
-@@ -971,23 +1197,23 @@ def simple_types_filter(data):
-
-
- def stringify(data):
-- '''
-+ """
- Given an iterable, returns its items as a list, with any non-string items
- converted to unicode strings.
-- '''
-+ """
- ret = []
- for item in data:
- if six.PY2 and isinstance(item, str):
- item = salt.utils.stringutils.to_unicode(item)
-- elif not isinstance(item, six.string_types):
-- item = six.text_type(item)
-+ elif not isinstance(item, str):
-+ item = str(item)
- ret.append(item)
- return ret
-
-
--@jinja_filter('json_query')
-+@jinja_filter("json_query")
- def json_query(data, expr):
-- '''
-+ """
- Query data using JMESPath language (http://jmespath.org).
-
- Requires the https://github.com/jmespath/jmespath.py library.
-@@ -1009,16 +1235,16 @@ def json_query(data, expr):
- .. code-block:: text
-
- [80, 25, 22]
-- '''
-+ """
- if jmespath is None:
-- err = 'json_query requires jmespath module installed'
-+ err = "json_query requires jmespath module installed"
- log.error(err)
- raise RuntimeError(err)
- return jmespath.search(expr, data)
-
-
- def _is_not_considered_falsey(value, ignore_types=()):
-- '''
-+ """
- Helper function for filter_falsey to determine if something is not to be
- considered falsey.
-
-@@ -1026,12 +1252,12 @@ def _is_not_considered_falsey(value, ignore_types=()):
- :param list ignore_types: The types to ignore when considering the value.
-
- :return bool
-- '''
-+ """
- return isinstance(value, bool) or type(value) in ignore_types or value
-
-
- def filter_falsey(data, recurse_depth=None, ignore_types=()):
-- '''
-+ """
- Helper function to remove items from an iterable with falsey value.
- Removes ``None``, ``{}`` and ``[]``, 0, '' (but does not remove ``False``).
- Recurses into sub-iterables if ``recurse`` is set to ``True``.
-@@ -1045,37 +1271,42 @@ def filter_falsey(data, recurse_depth=None, ignore_types=()):
- :return type(data)
-
- .. versionadded:: 3000
-- '''
-+ """
- filter_element = (
-- functools.partial(filter_falsey,
-- recurse_depth=recurse_depth-1,
-- ignore_types=ignore_types)
-- if recurse_depth else lambda x: x
-+ functools.partial(
-+ filter_falsey, recurse_depth=recurse_depth - 1, ignore_types=ignore_types
-+ )
-+ if recurse_depth
-+ else lambda x: x
- )
-
- if isinstance(data, dict):
-- processed_elements = [(key, filter_element(value)) for key, value in six.iteritems(data)]
-- return type(data)([
-- (key, value)
-- for key, value in processed_elements
-- if _is_not_considered_falsey(value, ignore_types=ignore_types)
-- ])
-+ processed_elements = [
-+ (key, filter_element(value)) for key, value in data.items()
-+ ]
-+ return type(data)(
-+ [
-+ (key, value)
-+ for key, value in processed_elements
-+ if _is_not_considered_falsey(value, ignore_types=ignore_types)
-+ ]
-+ )
- if is_iter(data):
- processed_elements = (filter_element(value) for value in data)
-- return type(data)([
-- value for value in processed_elements
-- if _is_not_considered_falsey(value, ignore_types=ignore_types)
-- ])
-+ return type(data)(
-+ [
-+ value
-+ for value in processed_elements
-+ if _is_not_considered_falsey(value, ignore_types=ignore_types)
-+ ]
-+ )
- return data
-
-
- def recursive_diff(
-- old,
-- new,
-- ignore_keys=None,
-- ignore_order=False,
-- ignore_missing_keys=False):
-- '''
-+ old, new, ignore_keys=None, ignore_order=False, ignore_missing_keys=False
-+):
-+ """
- Performs a recursive diff on mappings and/or iterables and returns the result
- in a {'old': values, 'new': values}-style.
- Compares dicts and sets unordered (obviously), OrderedDicts and Lists ordered
-@@ -1090,12 +1321,16 @@ def recursive_diff(
- but missing in ``new``. Only works for regular dicts.
-
- :return dict: Returns dict with keys 'old' and 'new' containing the differences.
-- '''
-+ """
- ignore_keys = ignore_keys or []
- res = {}
- ret_old = copy.deepcopy(old)
- ret_new = copy.deepcopy(new)
-- if isinstance(old, OrderedDict) and isinstance(new, OrderedDict) and not ignore_order:
-+ if (
-+ isinstance(old, OrderedDict)
-+ and isinstance(new, OrderedDict)
-+ and not ignore_order
-+ ):
- append_old, append_new = [], []
- if len(old) != len(new):
- min_length = min(len(old), len(new))
-@@ -1114,13 +1349,14 @@ def recursive_diff(
- new[key_new],
- ignore_keys=ignore_keys,
- ignore_order=ignore_order,
-- ignore_missing_keys=ignore_missing_keys)
-+ ignore_missing_keys=ignore_missing_keys,
-+ )
- if not res: # Equal
- del ret_old[key_old]
- del ret_new[key_new]
- else:
-- ret_old[key_old] = res['old']
-- ret_new[key_new] = res['new']
-+ ret_old[key_old] = res["old"]
-+ ret_new[key_new] = res["new"]
- else:
- if key_old in ignore_keys:
- del ret_old[key_old]
-@@ -1131,7 +1367,7 @@ def recursive_diff(
- ret_old[item] = old[item]
- for item in append_new:
- ret_new[item] = new[item]
-- ret = {'old': ret_old, 'new': ret_new} if ret_old or ret_new else {}
-+ ret = {"old": ret_old, "new": ret_new} if ret_old or ret_new else {}
- elif isinstance(old, Mapping) and isinstance(new, Mapping):
- # Compare unordered
- for key in set(list(old) + list(new)):
-@@ -1146,16 +1382,17 @@ def recursive_diff(
- new[key],
- ignore_keys=ignore_keys,
- ignore_order=ignore_order,
-- ignore_missing_keys=ignore_missing_keys)
-+ ignore_missing_keys=ignore_missing_keys,
-+ )
- if not res: # Equal
- del ret_old[key]
- del ret_new[key]
- else:
-- ret_old[key] = res['old']
-- ret_new[key] = res['new']
-- ret = {'old': ret_old, 'new': ret_new} if ret_old or ret_new else {}
-+ ret_old[key] = res["old"]
-+ ret_new[key] = res["new"]
-+ ret = {"old": ret_old, "new": ret_new} if ret_old or ret_new else {}
- elif isinstance(old, set) and isinstance(new, set):
-- ret = {'old': old - new, 'new': new - old} if old - new or new - old else {}
-+ ret = {"old": old - new, "new": new - old} if old - new or new - old else {}
- elif is_iter(old) and is_iter(new):
- # Create a list so we can edit on an index-basis.
- list_old = list(ret_old)
-@@ -1168,7 +1405,8 @@ def recursive_diff(
- item_new,
- ignore_keys=ignore_keys,
- ignore_order=ignore_order,
-- ignore_missing_keys=ignore_missing_keys)
-+ ignore_missing_keys=ignore_missing_keys,
-+ )
- if not res:
- list_old.remove(item_old)
- list_new.remove(item_new)
-@@ -1181,19 +1419,87 @@ def recursive_diff(
- iter_new,
- ignore_keys=ignore_keys,
- ignore_order=ignore_order,
-- ignore_missing_keys=ignore_missing_keys)
-+ ignore_missing_keys=ignore_missing_keys,
-+ )
- if not res: # Equal
- remove_indices.append(index)
- else:
-- list_old[index] = res['old']
-- list_new[index] = res['new']
-+ list_old[index] = res["old"]
-+ list_new[index] = res["new"]
- for index in reversed(remove_indices):
- list_old.pop(index)
- list_new.pop(index)
- # Instantiate a new whatever-it-was using the list as iterable source.
- # This may not be the most optimized in way of speed and memory usage,
- # but it will work for all iterable types.
-- ret = {'old': type(old)(list_old), 'new': type(new)(list_new)} if list_old or list_new else {}
-+ ret = (
-+ {"old": type(old)(list_old), "new": type(new)(list_new)}
-+ if list_old or list_new
-+ else {}
-+ )
- else:
-- ret = {} if old == new else {'old': ret_old, 'new': ret_new}
-+ ret = {} if old == new else {"old": ret_old, "new": ret_new}
- return ret
-+
-+
-+def get_value(obj, path, default=None):
-+ """
-+ Get the values for a given path.
-+
-+ :param path:
-+ keys of the properties in the tree separated by colons.
-+ One segment in the path can be replaced by an id surrounded by curly braces.
-+ This will match all items in a list of dictionary.
-+
-+ :param default:
-+ default value to return when no value is found
-+
-+ :return:
-+ a list of dictionaries, with at least the "value" key providing the actual value.
-+ If a placeholder was used, the placeholder id will be a key providing the replacement for it.
-+ Note that a value that wasn't found in the tree will be an empty list.
-+ This ensures we can make the difference with a None value set by the user.
-+ """
-+ res = [{"value": obj}]
-+ if path:
-+ key = path[: path.find(":")] if ":" in path else path
-+ next_path = path[path.find(":") + 1 :] if ":" in path else None
-+
-+ if key.startswith("{") and key.endswith("}"):
-+ placeholder_name = key[1:-1]
-+ # There will be multiple values to get here
-+ items = []
-+ if obj is None:
-+ return res
-+ if isinstance(obj, dict):
-+ items = obj.items()
-+ elif isinstance(obj, list):
-+ items = enumerate(obj)
-+
-+ def _append_placeholder(value_dict, key):
-+ value_dict[placeholder_name] = key
-+ return value_dict
-+
-+ values = [
-+ [
-+ _append_placeholder(item, key)
-+ for item in get_value(val, next_path, default)
-+ ]
-+ for key, val in items
-+ ]
-+
-+ # flatten the list
-+ values = [y for x in values for y in x]
-+ return values
-+ elif isinstance(obj, dict):
-+ if key not in obj.keys():
-+ return [{"value": default}]
-+
-+ value = obj.get(key)
-+ if res is not None:
-+ res = get_value(value, next_path, default)
-+ else:
-+ res = [{"value": value}]
-+ else:
-+ return [{"value": default if obj is not None else obj}]
-+ return res
diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py
-index 6d8d74fd3f..2b9c7bf43f 100644
+index e5c8ad4eec..b9f047820b 100644
--- a/salt/utils/xmlutil.py
+++ b/salt/utils/xmlutil.py
-@@ -1,30 +1,34 @@
--# -*- coding: utf-8 -*-
--'''
-+"""
- Various XML utilities
--'''
-+"""
-
- # Import Python libs
--from __future__ import absolute_import, print_function, unicode_literals
-+import re
-+import string # pylint: disable=deprecated-module
-+from xml.etree import ElementTree
-+
-+# Import salt libs
-+import salt.utils.data
-
-
- def _conv_name(x):
-- '''
-+ """
- If this XML tree has an xmlns attribute, then etree will add it
- to the beginning of the tag, like: "{http://path}tag".
-- '''
-- if '}' in x:
-- comps = x.split('}')
-+ """
-+ if "}" in x:
-+ comps = x.split("}")
- name = comps[1]
- return name
- return x
-
-
- def _to_dict(xmltree):
-- '''
-+ """
+@@ -25,7 +25,7 @@ def _to_dict(xmltree):
+ """
Converts an XML ElementTree to a dictionary that only contains items.
This is the default behavior in version 2017.7. This will default to prevent
- unexpected parsing issues on modules dependant on this.
-- '''
-+ """
+- unexpected parsing issues on modules dependent on this.
++ unexpected parsing issues on modules dependant on this.
+ """
# If this object has no children, the for..loop below will return nothing
# for it, so just return a single dict representing it.
- if len(xmltree.getchildren()) < 1:
-@@ -51,9 +55,9 @@ def _to_dict(xmltree):
+@@ -298,7 +298,7 @@ def change_xml(doc, data, mapping):
+ if convert_fn:
+ new_value = convert_fn(new_value)
-
- def _to_full_dict(xmltree):
-- '''
-+ """
- Returns the full XML dictionary including attributes.
-- '''
-+ """
- xmldict = {}
-
- for attrName, attrValue in xmltree.attrib.items():
-@@ -87,15 +91,234 @@ def _to_full_dict(xmltree):
-
-
- def to_dict(xmltree, attr=False):
-- '''
-+ """
- Convert an XML tree into a dict. The tree that is passed in must be an
- ElementTree object.
- Args:
- xmltree: An ElementTree object.
- attr: If true, attributes will be parsed. If false, they will be ignored.
-
-- '''
-+ """
- if attr:
- return _to_full_dict(xmltree)
- else:
- return _to_dict(xmltree)
-+
-+
-+def get_xml_node(node, xpath):
-+ """
-+ Get an XML node using a path (super simple xpath showing complete node ancestry).
-+ This also creates the missing nodes.
-+
-+ The supported XPath can contain elements filtering using [@attr='value'].
-+
-+ Args:
-+ node: an Element object
-+ xpath: simple XPath to look for.
-+ """
-+ if not xpath.startswith("./"):
-+ xpath = "./{}".format(xpath)
-+ res = node.find(xpath)
-+ if res is None:
-+ parent_xpath = xpath[: xpath.rfind("/")]
-+ parent = node.find(parent_xpath)
-+ if parent is None:
-+ parent = get_xml_node(node, parent_xpath)
-+ segment = xpath[xpath.rfind("/") + 1 :]
-+ # We may have [] filter in the segment
-+ matcher = re.match(
-+ r"""(?P[^[]+)(?:\[@(?P\w+)=["'](?P[^"']+)["']])?""",
-+ segment,
-+ )
-+ attrib = (
-+ {matcher.group("attr"): matcher.group("value")}
-+ if matcher.group("attr") and matcher.group("value")
-+ else {}
-+ )
-+ res = ElementTree.SubElement(parent, matcher.group("tag"), attrib)
-+ return res
-+
-+
-+def set_node_text(node, value):
-+ """
-+ Function to use in the ``set`` value in the :py:func:`change_xml` mapping items to set the text.
-+ This is the default.
-+
-+ :param node: the node to set the text to
-+ :param value: the value to set
-+ """
-+ node.text = str(value)
-+
-+
-+def clean_node(parent_map, node, ignored=None):
-+ """
-+ Remove the node from its parent if it has no attribute but the ignored ones, no text and no child.
-+ Recursively called up to the document root to ensure no empty node is left.
-+
-+ :param parent_map: dictionary mapping each node to its parent
-+ :param node: the node to clean
-+ :param ignored: a list of ignored attributes.
-+ """
-+ has_text = node.text is not None and node.text.strip()
-+ parent = parent_map.get(node)
-+ if (
-+ len(node.attrib.keys() - (ignored or [])) == 0
-+ and not list(node)
-+ and not has_text
-+ ):
-+ parent.remove(node)
-+ # Clean parent nodes if needed
-+ if parent is not None:
-+ clean_node(parent_map, parent, ignored)
-+
-+
-+def del_text(parent_map, node):
-+ """
-+ Function to use as ``del`` value in the :py:func:`change_xml` mapping items to remove the text.
-+ This is the default function.
-+ Calls :py:func:`clean_node` before returning.
-+ """
-+ parent = parent_map[node]
-+ parent.remove(node)
-+ clean_node(parent, node)
-+
-+
-+def del_attribute(attribute, ignored=None):
-+ """
-+ Helper returning a function to use as ``del`` value in the :py:func:`change_xml` mapping items to
-+ remove an attribute.
-+
-+ The generated function calls :py:func:`clean_node` before returning.
-+
-+ :param attribute: the name of the attribute to remove
-+ :param ignored: the list of attributes to ignore during the cleanup
-+
-+ :return: the function called by :py:func:`change_xml`.
-+ """
-+
-+ def _do_delete(parent_map, node):
-+ if attribute not in node.keys():
-+ return
-+ node.attrib.pop(attribute)
-+ clean_node(parent_map, node, ignored)
-+
-+ return _do_delete
-+
-+
-+def change_xml(doc, data, mapping):
-+ """
-+ Change an XML ElementTree document according.
-+
-+ :param doc: the ElementTree parsed XML document to modify
-+ :param data: the dictionary of values used to modify the XML.
-+ :param mapping: a list of items describing how to modify the XML document.
-+ Each item is a dictionary containing the following keys:
-+
-+ .. glossary::
-+ path
-+ the path to the value to set or remove in the ``data`` parameter.
-+ See :py:func:`salt.utils.data.get_value ` for the format
-+ of the value.
-+
-+ xpath
-+ Simplified XPath expression used to locate the change in the XML tree.
-+ See :py:func:`get_xml_node` documentation for details on the supported XPath syntax
-+
-+ get
-+ function gettin the value from the XML.
-+ Takes a single parameter for the XML node found by the XPath expression.
-+ Default returns the node text value.
-+ This may be used to return an attribute or to perform value transformation.
-+
-+ set
-+ function setting the value in the XML.
-+ Takes two parameters for the XML node and the value to set.
-+ Default is to set the text value.
-+
-+ del
-+ function deleting the value in the XML.
-+ Takes two parameters for the parent node and the node matched by the XPath.
-+ Default is to remove the text value.
-+ More cleanup may be performed, see the :py:func:`clean_node` function for details.
-+
-+ convert
-+ function modifying the user-provided value right before comparing it with the one from the XML.
-+ Takes the value as single parameter.
-+ Default is to apply no conversion.
-+
-+ :return: ``True`` if the XML has been modified, ``False`` otherwise.
-+ """
-+ need_update = False
-+ for param in mapping:
-+ # Get the value from the function parameter using the path-like description
-+ # Using an empty list as a default value will cause values not provided by the user
-+ # to be left untouched, as opposed to explicit None unsetting the value
-+ values = salt.utils.data.get_value(data, param["path"], [])
-+ xpath = param["xpath"]
-+ # Prepend the xpath with ./ to handle the root more easily
-+ if not xpath.startswith("./"):
-+ xpath = "./{}".format(xpath)
-+
-+ placeholders = [
-+ s[1:-1]
-+ for s in param["path"].split(":")
-+ if s.startswith("{") and s.endswith("}")
-+ ]
-+
-+ ctx = {placeholder: "$$$" for placeholder in placeholders}
-+ all_nodes_xpath = string.Template(xpath).substitute(ctx)
-+ all_nodes_xpath = re.sub(
-+ r"""(?:=['"]\$\$\$["'])|(?:\[\$\$\$\])""", "", all_nodes_xpath
-+ )
-+
-+ # Store the nodes that are not removed for later cleanup
-+ kept_nodes = set()
-+
-+ for value_item in values:
-+ new_value = value_item["value"]
-+
-+ # Only handle simple type values. Use multiple entries or a custom get for dict or lists
-+ if isinstance(new_value, list) or isinstance(new_value, dict):
-+ continue
-+
-+ if new_value is not None:
-+ ctx = {
-+ placeholder: value_item.get(placeholder, "")
-+ for placeholder in placeholders
-+ }
-+ node_xpath = string.Template(xpath).substitute(ctx)
-+ node = get_xml_node(doc, node_xpath)
-+
-+ kept_nodes.add(node)
-+
-+ get_fn = param.get("get", lambda n: n.text)
-+ set_fn = param.get("set", set_node_text)
-+ current_value = get_fn(node)
-+
-+ # Do we need to apply some conversion to the user-provided value?
-+ convert_fn = param.get("convert")
-+ if convert_fn:
-+ new_value = convert_fn(new_value)
-+
+- if str(current_value) != str(new_value):
+ if current_value != new_value:
-+ set_fn(node, new_value)
-+ need_update = True
-+ else:
-+ nodes = doc.findall(all_nodes_xpath)
-+ del_fn = param.get("del", del_text)
-+ parent_map = {c: p for p in doc.iter() for c in p}
-+ for node in nodes:
-+ del_fn(parent_map, node)
-+ need_update = True
-+
-+ # Clean the left over XML elements if there were placeholders
-+ if placeholders and values[0].get("value") != []:
-+ all_nodes = set(doc.findall(all_nodes_xpath))
-+ to_remove = all_nodes - kept_nodes
-+ del_fn = param.get("del", del_text)
-+ parent_map = {c: p for p in doc.iter() for c in p}
-+ for node in to_remove:
-+ del_fn(parent_map, node)
-+ need_update = True
-+
-+ return need_update
-diff --git a/tests/pytests/unit/utils/test_data.py b/tests/pytests/unit/utils/test_data.py
-new file mode 100644
-index 0000000000..b3f0ba04ae
---- /dev/null
-+++ b/tests/pytests/unit/utils/test_data.py
-@@ -0,0 +1,57 @@
-+import salt.utils.data
-+
-+
-+def test_get_value_simple_path():
-+ data = {"a": {"b": {"c": "foo"}}}
-+ assert [{"value": "foo"}] == salt.utils.data.get_value(data, "a:b:c")
-+
-+
-+def test_get_value_placeholder_dict():
-+ data = {"a": {"b": {"name": "foo"}, "c": {"name": "bar"}}}
-+ assert [
-+ {"value": "foo", "id": "b"},
-+ {"value": "bar", "id": "c"},
-+ ] == salt.utils.data.get_value(data, "a:{id}:name")
-+
-+
-+def test_get_value_placeholder_list():
-+ data = {"a": [{"name": "foo"}, {"name": "bar"}]}
-+ assert [
-+ {"value": "foo", "id": 0},
-+ {"value": "bar", "id": 1},
-+ ] == salt.utils.data.get_value(data, "a:{id}:name")
-+
-+
-+def test_get_value_nested_placeholder():
-+ data = {
-+ "a": {
-+ "b": {"b1": {"name": "foo1"}, "b2": {"name": "foo2"}},
-+ "c": {"c1": {"name": "bar"}},
-+ }
-+ }
-+ assert [
-+ {"value": "foo1", "id": "b", "sub": "b1"},
-+ {"value": "foo2", "id": "b", "sub": "b2"},
-+ {"value": "bar", "id": "c", "sub": "c1"},
-+ ] == salt.utils.data.get_value(data, "a:{id}:{sub}:name")
-+
-+
-+def test_get_value_nested_notfound():
-+ data = {"a": {"b": {"c": "foo"}}}
-+ assert [{"value": []}] == salt.utils.data.get_value(data, "a:b:d", [])
-+
-+
-+def test_get_value_not_found():
-+ assert [{"value": []}] == salt.utils.data.get_value({}, "a", [])
-+
-+
-+def test_get_value_none():
-+ assert [{"value": None}] == salt.utils.data.get_value({"a": None}, "a")
-+
-+
-+def test_get_value_simple_type_path():
-+ assert [{"value": []}] == salt.utils.data.get_value({"a": 1024}, "a:b", [])
-+
-+
-+def test_get_value_None_path():
-+ assert [{"value": None}] == salt.utils.data.get_value({"a": None}, "a:b", [])
-diff --git a/tests/pytests/unit/utils/test_xmlutil.py b/tests/pytests/unit/utils/test_xmlutil.py
-new file mode 100644
-index 0000000000..081cc64193
---- /dev/null
-+++ b/tests/pytests/unit/utils/test_xmlutil.py
-@@ -0,0 +1,169 @@
-+import pytest
-+import salt.utils.xmlutil as xml
-+from salt._compat import ElementTree as ET
-+
-+
-+@pytest.fixture
-+def xml_doc():
-+ return ET.fromstring(
-+ """
-+
-+ test01
-+ 1024
-+
-+
-+
-+
-+
-+
-+
-+ """
-+ )
-+
-+
-+def test_change_xml_text(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc, {"name": "test02"}, [{"path": "name", "xpath": "name"}]
-+ )
-+ assert ret
-+ assert "test02" == xml_doc.find("name").text
-+
-+
-+def test_change_xml_text_nochange(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc, {"name": "test01"}, [{"path": "name", "xpath": "name"}]
-+ )
-+ assert not ret
-+
-+
-+def test_change_xml_text_notdefined(xml_doc):
-+ ret = xml.change_xml(xml_doc, {}, [{"path": "name", "xpath": "name"}])
-+ assert not ret
-+
-+
-+def test_change_xml_text_removed(xml_doc):
-+ ret = xml.change_xml(xml_doc, {"name": None}, [{"path": "name", "xpath": "name"}])
-+ assert ret
-+ assert xml_doc.find("name") is None
-+
-+
-+def test_change_xml_text_add(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"cpu": {"vendor": "ACME"}},
-+ [{"path": "cpu:vendor", "xpath": "cpu/vendor"}],
-+ )
-+ assert ret
-+ assert "ACME" == xml_doc.find("cpu/vendor").text
-+
-+
-+def test_change_xml_convert(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"mem": 2},
-+ [{"path": "mem", "xpath": "memory", "convert": lambda v: v * 1024}],
-+ )
-+ assert ret
-+ assert "2048" == xml_doc.find("memory").text
-+
-+
-+def test_change_xml_attr(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"cpu": {"topology": {"cores": 4}}},
-+ [
-+ {
-+ "path": "cpu:topology:cores",
-+ "xpath": "cpu/topology",
-+ "get": lambda n: int(n.get("cores")) if n.get("cores") else None,
-+ "set": lambda n, v: n.set("cores", str(v)),
-+ "del": xml.del_attribute("cores"),
-+ }
-+ ],
-+ )
-+ assert ret
-+ assert "4" == xml_doc.find("cpu/topology").get("cores")
-+
-+
-+def test_change_xml_attr_unchanged(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"cpu": {"topology": {"sockets": 1}}},
-+ [
-+ {
-+ "path": "cpu:topology:sockets",
-+ "xpath": "cpu/topology",
-+ "get": lambda n: int(n.get("sockets")) if n.get("sockets") else None,
-+ "set": lambda n, v: n.set("sockets", str(v)),
-+ "del": xml.del_attribute("sockets"),
-+ }
-+ ],
-+ )
-+ assert not ret
-+
-+
-+def test_change_xml_attr_remove(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"cpu": {"topology": {"sockets": None}}},
-+ [
-+ {
-+ "path": "cpu:topology:sockets",
-+ "xpath": "./cpu/topology",
-+ "get": lambda n: int(n.get("sockets")) if n.get("sockets") else None,
-+ "set": lambda n, v: n.set("sockets", str(v)),
-+ "del": xml.del_attribute("sockets"),
-+ }
-+ ],
-+ )
-+ assert ret
-+ assert xml_doc.find("cpu") is None
-+
-+
-+def test_change_xml_not_simple_value(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"cpu": {"topology": {"sockets": None}}},
-+ [{"path": "cpu", "xpath": "vcpu", "get": lambda n: int(n.text)}],
-+ )
-+ assert not ret
-+
-+
-+def test_change_xml_template(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"cpu": {"vcpus": {2: {"enabled": True}, 4: {"enabled": False}}}},
-+ [
-+ {
-+ "path": "cpu:vcpus:{id}:enabled",
-+ "xpath": "vcpus/vcpu[@id='$id']",
-+ "convert": lambda v: "yes" if v else "no",
-+ "get": lambda n: n.get("enabled"),
-+ "set": lambda n, v: n.set("enabled", v),
-+ "del": xml.del_attribute("enabled", ["id"]),
-+ },
-+ ],
-+ )
-+ assert ret
-+ assert xml_doc.find("vcpus/vcpu[@id='1']") is None
-+ assert "yes" == xml_doc.find("vcpus/vcpu[@id='2']").get("enabled")
-+ assert "no" == xml_doc.find("vcpus/vcpu[@id='4']").get("enabled")
-+
-+
-+def test_change_xml_template_remove(xml_doc):
-+ ret = xml.change_xml(
-+ xml_doc,
-+ {"cpu": {"vcpus": None}},
-+ [
-+ {
-+ "path": "cpu:vcpus:{id}:enabled",
-+ "xpath": "vcpus/vcpu[@id='$id']",
-+ "convert": lambda v: "yes" if v else "no",
-+ "get": lambda n: n.get("enabled"),
-+ "set": lambda n, v: n.set("enabled", v),
-+ "del": xml.del_attribute("enabled", ["id"]),
-+ },
-+ ],
-+ )
-+ assert ret
-+ assert xml_doc.find("vcpus") is None
+ set_fn(node, new_value)
+ need_update = True
+ else:
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index d3988464f6..5ec8de77e7 100644
+index f53b4a85c1..4775fec31f 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
-@@ -1,4 +1,3 @@
--# -*- coding: utf-8 -*-
- """
- virt execution module unit tests
- """
-@@ -6,7 +5,6 @@ virt execution module unit tests
- # pylint: disable=3rd-party-module-not-gated
-
- # Import python libs
--from __future__ import absolute_import, print_function, unicode_literals
-
- import datetime
- import os
-@@ -23,9 +21,6 @@ import salt.utils.yaml
- from salt._compat import ElementTree as ET
- from salt.exceptions import CommandExecutionError, SaltInvocationError
-
--# Import third party libs
--from salt.ext import six
--
- # pylint: disable=import-error
- from salt.ext.six.moves import range # pylint: disable=redefined-builtin
-
-@@ -136,7 +131,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- "model": "virtio",
- "filename": "myvm_system.qcow2",
- "image": "/path/to/image",
-- "source_file": "{0}{1}myvm_system.qcow2".format(root_dir, os.sep),
-+ "source_file": "{}{}myvm_system.qcow2".format(root_dir, os.sep),
- },
- {
- "name": "data",
-@@ -145,7 +140,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- "format": "raw",
- "model": "virtio",
- "filename": "myvm_data.raw",
-- "source_file": "{0}{1}myvm_data.raw".format(root_dir, os.sep),
-+ "source_file": "{}{}myvm_data.raw".format(root_dir, os.sep),
- },
- ],
- disks,
-@@ -582,8 +577,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- self.assertIsNone(root.get("type"))
- self.assertEqual(root.find("name").text, "vmname/system.vmdk")
- self.assertEqual(root.find("capacity").attrib["unit"], "KiB")
-- self.assertEqual(root.find("capacity").text, six.text_type(8192 * 1024))
-- self.assertEqual(root.find("allocation").text, six.text_type(0))
-+ self.assertEqual(root.find("capacity").text, str(8192 * 1024))
-+ self.assertEqual(root.find("allocation").text, str(0))
- self.assertEqual(root.find("target/format").get("type"), "vmdk")
- self.assertIsNone(root.find("target/permissions"))
- self.assertIsNone(root.find("target/nocow"))
-@@ -615,9 +610,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- self.assertIsNone(root.find("target/path"))
- self.assertEqual(root.find("target/format").get("type"), "qcow2")
- self.assertEqual(root.find("capacity").attrib["unit"], "KiB")
-- self.assertEqual(root.find("capacity").text, six.text_type(8192 * 1024))
-+ self.assertEqual(root.find("capacity").text, str(8192 * 1024))
- self.assertEqual(root.find("capacity").attrib["unit"], "KiB")
-- self.assertEqual(root.find("allocation").text, six.text_type(4096 * 1024))
-+ self.assertEqual(root.find("allocation").text, str(4096 * 1024))
- self.assertEqual(root.find("target/permissions/mode").text, "0775")
- self.assertEqual(root.find("target/permissions/owner").text, "123")
- self.assertEqual(root.find("target/permissions/group").text, "456")
-@@ -638,7 +633,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- root = ET.fromstring(xml_data)
- self.assertEqual(root.attrib["type"], "kvm")
- self.assertEqual(root.find("vcpu").text, "1")
-- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024))
-+ self.assertEqual(root.find("memory").text, str(512 * 1024))
- self.assertEqual(root.find("memory").attrib["unit"], "KiB")
-
- disks = root.findall(".//disk")
-@@ -671,7 +666,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- root = ET.fromstring(xml_data)
- self.assertEqual(root.attrib["type"], "vmware")
- self.assertEqual(root.find("vcpu").text, "1")
-- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024))
-+ self.assertEqual(root.find("memory").text, str(512 * 1024))
- self.assertEqual(root.find("memory").attrib["unit"], "KiB")
-
- disks = root.findall(".//disk")
-@@ -714,7 +709,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- root = ET.fromstring(xml_data)
- self.assertEqual(root.attrib["type"], "xen")
- self.assertEqual(root.find("vcpu").text, "1")
-- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024))
-+ self.assertEqual(root.find("memory").text, str(512 * 1024))
- self.assertEqual(root.find("memory").attrib["unit"], "KiB")
- self.assertEqual(
- root.find(".//kernel").text, "/usr/lib/grub2/x86_64-xen/grub.xen"
-@@ -768,7 +763,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- root = ET.fromstring(xml_data)
- self.assertEqual(root.attrib["type"], "vmware")
- self.assertEqual(root.find("vcpu").text, "1")
-- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024))
-+ self.assertEqual(root.find("memory").text, str(512 * 1024))
- self.assertEqual(root.find("memory").attrib["unit"], "KiB")
- self.assertTrue(len(root.findall(".//disk")) == 2)
- self.assertTrue(len(root.findall(".//interface")) == 2)
-@@ -801,7 +796,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- root = ET.fromstring(xml_data)
- self.assertEqual(root.attrib["type"], "kvm")
- self.assertEqual(root.find("vcpu").text, "1")
-- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024))
-+ self.assertEqual(root.find("memory").text, str(512 * 1024))
- self.assertEqual(root.find("memory").attrib["unit"], "KiB")
- disks = root.findall(".//disk")
- self.assertTrue(len(disks) == 2)
-@@ -1635,7 +1630,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- self.assertIsNone(definition.find("./devices/disk[2]/source"))
- self.assertEqual(
- mock_run.call_args[0][0],
-- 'qemu-img create -f qcow2 "{0}" 10240M'.format(expected_disk_path),
-+ 'qemu-img create -f qcow2 "{}" 10240M'.format(expected_disk_path),
- )
- self.assertEqual(mock_chmod.call_args[0][0], expected_disk_path)
-
-@@ -1729,11 +1724,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- 1
-
- hvm
-+
-
-
-
-
--
-+
-
-
-
-@@ -1850,17 +1846,36 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1843,17 +1843,36 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
}
@@ -4263,7 +1202,7 @@ index d3988464f6..5ec8de77e7 100644
self.assertEqual(
{
"definition": True,
-@@ -1884,6 +1899,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1877,6 +1896,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"console=ttyS0 ks=http://example.com/f8-i386/os/",
)
@@ -4275,7 +1214,7 @@ index d3988464f6..5ec8de77e7 100644
self.assertEqual(
{
"definition": True,
-@@ -1903,9 +1923,28 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1896,9 +1920,28 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"/usr/share/OVMF/OVMF_VARS.ms.fd",
)
@@ -4304,16 +1243,7 @@ index d3988464f6..5ec8de77e7 100644
# Update memory case
setmem_mock = MagicMock(return_value=0)
domain_mock.setMemoryFlags = setmem_mock
-@@ -1955,7 +1994,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- ) # pylint: disable=no-member
- self.assertEqual(
- mock_run.call_args[0][0],
-- 'qemu-img create -f qcow2 "{0}" 2048M'.format(added_disk_path),
-+ 'qemu-img create -f qcow2 "{}" 2048M'.format(added_disk_path),
- )
- self.assertEqual(mock_chmod.call_args[0][0], added_disk_path)
- self.assertListEqual(
-@@ -2397,6 +2436,43 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2390,6 +2433,43 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
],
)
@@ -4357,7 +1287,7 @@ index d3988464f6..5ec8de77e7 100644
def test_update_existing_boot_params(self):
"""
Test virt.update() with existing boot parameters.
-@@ -2537,6 +2613,18 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2530,6 +2610,18 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual(setxml.find("os").find("initrd"), None)
self.assertEqual(setxml.find("os").find("cmdline"), None)
@@ -4376,49 +1306,7 @@ index d3988464f6..5ec8de77e7 100644
self.assertEqual(
{
"definition": True,
-@@ -2582,7 +2670,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- salt.modules.config.__opts__, mock_config # pylint: disable=no-member
- ):
-
-- for name in six.iterkeys(mock_config["virt"]["nic"]):
-+ for name in mock_config["virt"]["nic"].keys():
- profile = salt.modules.virt._nic_profile(name, "kvm")
- self.assertEqual(len(profile), 2)
-
-@@ -3592,8 +3680,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- "44454c4c-3400-105a-8033-b3c04f4b344a", caps["host"]["host"]["uuid"]
- )
- self.assertEqual(
-- set(["qemu", "kvm"]),
-- set([domainCaps["domain"] for domainCaps in caps["domains"]]),
-+ {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]},
- )
-
- def test_network_tag(self):
-@@ -3694,9 +3781,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- for i in range(2):
- net_mock = MagicMock()
-
-- net_mock.name.return_value = "net{0}".format(i)
-+ net_mock.name.return_value = "net{}".format(i)
- net_mock.UUIDString.return_value = "some-uuid"
-- net_mock.bridgeName.return_value = "br{0}".format(i)
-+ net_mock.bridgeName.return_value = "br{}".format(i)
- net_mock.autostart.return_value = True
- net_mock.isActive.return_value = False
- net_mock.isPersistent.return_value = True
-@@ -4156,8 +4243,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- pool_mocks = []
- for i in range(2):
- pool_mock = MagicMock()
-- pool_mock.name.return_value = "pool{0}".format(i)
-- pool_mock.UUIDString.return_value = "some-uuid-{0}".format(i)
-+ pool_mock.name.return_value = "pool{}".format(i)
-+ pool_mock.UUIDString.return_value = "some-uuid-{}".format(i)
- pool_mock.info.return_value = [0, 1234, 5678, 123]
- pool_mock.autostart.return_value = True
- pool_mock.isPersistent.return_value = True
-@@ -4257,7 +4344,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -4248,7 +4340,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
mock_pool = MagicMock()
mock_pool.delete = MagicMock(return_value=0)
@@ -4426,7 +1314,7 @@ index d3988464f6..5ec8de77e7 100644
self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mock_pool)
res = virt.pool_delete("test-pool")
-@@ -4271,12 +4357,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -4262,12 +4353,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL
)
@@ -4442,7 +1330,7 @@ index d3988464f6..5ec8de77e7 100644
mock_pool.XMLDesc.return_value = """
test-ses
-@@ -4293,16 +4379,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -4284,16 +4375,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
mock_undefine = MagicMock(return_value=0)
self.mock_conn.secretLookupByUsage.return_value.undefine = mock_undefine
@@ -4461,7 +1349,7 @@ index d3988464f6..5ec8de77e7 100644
self.mock_conn.secretLookupByUsage.assert_called_once_with(
self.mock_libvirt.VIR_SECRET_USAGE_TYPE_CEPH, "pool_test-ses"
-@@ -4571,24 +4652,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -4562,24 +4648,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
@@ -4486,7 +1374,7 @@ index d3988464f6..5ec8de77e7 100644
mock_secret = MagicMock()
self.mock_conn.secretLookupByUUIDString = MagicMock(return_value=mock_secret)
-@@ -4609,6 +4672,23 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -4600,6 +4668,23 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
self.mock_conn.storagePoolDefineXML.assert_not_called()
mock_secret.setValue.assert_called_once_with(b"secret")
@@ -4510,80 +1398,11 @@ index d3988464f6..5ec8de77e7 100644
def test_pool_update_password_create(self):
"""
Test the pool_update function, where the password only is changed
-@@ -4695,11 +4775,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- for idx, disk in enumerate(vms_disks):
- vm = MagicMock()
- # pylint: disable=no-member
-- vm.name.return_value = "vm{0}".format(idx)
-+ vm.name.return_value = "vm{}".format(idx)
- vm.XMLDesc.return_value = """
-
-- vm{0}
-- {1}
-+ vm{}
-+ {}
-
- """.format(
- idx, disk
-@@ -4760,7 +4840,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- # pylint: disable=no-member
- mock_volume.name.return_value = vol_data["name"]
- mock_volume.key.return_value = vol_data["key"]
-- mock_volume.path.return_value = "/path/to/{0}.qcow2".format(
-+ mock_volume.path.return_value = "/path/to/{}.qcow2".format(
- vol_data["name"]
- )
- if vol_data["info"]:
-@@ -4769,7 +4849,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- """
-
-
-- {0}
-+ {}
-
- """.format(
- vol_data["backingStore"]
-@@ -5234,7 +5314,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-
- def create_mock_vm(idx):
- mock_vm = MagicMock()
-- mock_vm.name.return_value = "vm{0}".format(idx)
-+ mock_vm.name.return_value = "vm{}".format(idx)
- return mock_vm
-
- mock_vms = [create_mock_vm(idx) for idx in range(3)]
diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py
-index c76f8a5fc0..f03159334b 100644
+index 6d38829870..8fe892f607 100644
--- a/tests/unit/states/test_virt.py
+++ b/tests/unit/states/test_virt.py
-@@ -1,9 +1,7 @@
--# -*- coding: utf-8 -*-
- """
- :codeauthor: Jayesh Kariya
- """
- # Import Python libs
--from __future__ import absolute_import, print_function, unicode_literals
-
- import shutil
- import tempfile
-@@ -14,7 +12,6 @@ import salt.utils.files
- from salt.exceptions import CommandExecutionError, SaltInvocationError
-
- # Import 3rd-party libs
--from salt.ext import six
- from tests.support.mixins import LoaderModuleMockMixin
- from tests.support.mock import MagicMock, mock_open, patch
-
-@@ -37,7 +34,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
- """
- Fake function return error message
- """
-- return six.text_type(self)
-+ return str(self)
-
-
- class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
-@@ -341,6 +338,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -333,6 +333,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
"myvm",
cpu=2,
mem=2048,
@@ -4591,7 +1410,7 @@ index c76f8a5fc0..f03159334b 100644
os_type="linux",
arch="i686",
vm_type="qemu",
-@@ -363,6 +361,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -355,6 +356,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
"myvm",
cpu=2,
mem=2048,
@@ -4599,7 +1418,7 @@ index c76f8a5fc0..f03159334b 100644
os_type="linux",
arch="i686",
disk="prod",
-@@ -471,10 +470,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -463,10 +465,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
"comment": "Domain myvm updated with live update(s) failures",
}
)
@@ -4614,7 +1433,7 @@ index c76f8a5fc0..f03159334b 100644
mem=None,
disk_profile=None,
disks=None,
-@@ -598,6 +600,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -590,6 +595,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
password=None,
boot=None,
test=True,
@@ -4622,7 +1441,7 @@ index c76f8a5fc0..f03159334b 100644
)
# No changes case
-@@ -632,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -624,6 +630,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
password=None,
boot=None,
test=True,
@@ -4630,7 +1449,7 @@ index c76f8a5fc0..f03159334b 100644
)
def test_running(self):
-@@ -708,6 +712,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -700,6 +707,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
install=True,
pub_key=None,
priv_key=None,
@@ -4638,7 +1457,7 @@ index c76f8a5fc0..f03159334b 100644
connection=None,
username=None,
password=None,
-@@ -769,6 +774,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -761,6 +769,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
install=False,
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
@@ -4646,7 +1465,7 @@ index c76f8a5fc0..f03159334b 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -793,6 +799,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -785,6 +794,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
start=False,
pub_key="/path/to/key.pub",
priv_key="/path/to/key",
@@ -4654,7 +1473,7 @@ index c76f8a5fc0..f03159334b 100644
connection="someconnection",
username="libvirtuser",
password="supersecret",
-@@ -937,6 +944,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -929,6 +939,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
password=None,
boot=None,
test=False,
@@ -4662,7 +1481,7 @@ index c76f8a5fc0..f03159334b 100644
)
# Failed definition update case
-@@ -1055,6 +1063,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1047,6 +1058,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
password=None,
boot=None,
test=True,
@@ -4670,7 +1489,7 @@ index c76f8a5fc0..f03159334b 100644
)
start_mock.assert_not_called()
-@@ -1091,6 +1100,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1083,6 +1095,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
password=None,
boot=None,
test=True,
@@ -4678,7 +1497,7 @@ index c76f8a5fc0..f03159334b 100644
)
def test_stopped(self):
-@@ -1978,6 +1988,72 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1970,6 +1983,72 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
password="secret",
)
@@ -4751,7 +1570,7 @@ index c76f8a5fc0..f03159334b 100644
mocks["update"] = MagicMock(return_value=False)
for mock in mocks:
mocks[mock].reset_mock()
-@@ -2027,6 +2103,9 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2019,6 +2098,9 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
for mock in mocks:
mocks[mock].reset_mock()
mocks["update"] = MagicMock(return_value=True)
@@ -4761,7 +1580,7 @@ index c76f8a5fc0..f03159334b 100644
with patch.dict(
virt.__salt__,
{ # pylint: disable=no-member
-@@ -2130,6 +2209,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2122,6 +2204,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
),
ret,
)
@@ -4769,7 +1588,7 @@ index c76f8a5fc0..f03159334b 100644
mocks["update"].assert_called_with(
"mypool",
ptype="logical",
-@@ -2477,8 +2557,8 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2469,8 +2552,8 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
):
ret.update(
{
@@ -4780,7 +1599,7 @@ index c76f8a5fc0..f03159334b 100644
"result": True,
}
)
-@@ -2504,9 +2584,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2496,9 +2579,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
mocks["start"].assert_called_with(
"mypool", connection=None, username=None, password=None
)
@@ -4792,2321 +1611,49 @@ index c76f8a5fc0..f03159334b 100644
"mypool",
ptype="logical",
diff --git a/tests/unit/utils/test_data.py b/tests/unit/utils/test_data.py
-index 8fa352321c..8a6956d442 100644
+index 9206979284..aff7384232 100644
--- a/tests/unit/utils/test_data.py
+++ b/tests/unit/utils/test_data.py
-@@ -1,38 +1,38 @@
--# -*- coding: utf-8 -*-
--'''
-+"""
- Tests for salt.utils.data
--'''
-+"""
+@@ -220,38 +220,6 @@ class DataTestCase(TestCase):
+ ),
+ )
- # Import Python libs
--from __future__ import absolute_import, print_function, unicode_literals
-+
- import logging
-
- # Import Salt libs
- import salt.utils.data
- import salt.utils.stringutils
--from salt.utils.odict import OrderedDict
--from tests.support.unit import TestCase, LOREM_IPSUM
--from tests.support.mock import patch
-
- # Import 3rd party libs
--from salt.ext.six.moves import builtins # pylint: disable=import-error,redefined-builtin
--from salt.ext import six
-+from salt.ext.six.moves import ( # pylint: disable=import-error,redefined-builtin
-+ builtins,
-+)
-+from salt.utils.odict import OrderedDict
-+from tests.support.mock import patch
-+from tests.support.unit import LOREM_IPSUM, TestCase
-
- log = logging.getLogger(__name__)
--_b = lambda x: x.encode('utf-8')
-+_b = lambda x: x.encode("utf-8")
- _s = lambda x: salt.utils.stringutils.to_str(x, normalize=True)
- # Some randomized data that will not decode
--BYTES = b'1\x814\x10'
-+BYTES = b"1\x814\x10"
-
- # This is an example of a unicode string with й constructed using two separate
- # code points. Do not modify it.
--EGGS = '\u044f\u0438\u0306\u0446\u0430'
-+EGGS = "\u044f\u0438\u0306\u0446\u0430"
-
-
- class DataTestCase(TestCase):
- test_data = [
-- 'unicode_str',
-- _b('питон'),
-+ "unicode_str",
-+ _b("питон"),
- 123,
- 456.789,
- True,
-@@ -40,71 +40,79 @@ class DataTestCase(TestCase):
- None,
- EGGS,
- BYTES,
-- [123, 456.789, _b('спам'), True, False, None, EGGS, BYTES],
-- (987, 654.321, _b('яйца'), EGGS, None, (True, EGGS, BYTES)),
-- {_b('str_key'): _b('str_val'),
-- None: True,
-- 123: 456.789,
-- EGGS: BYTES,
-- _b('subdict'): {'unicode_key': EGGS,
-- _b('tuple'): (123, 'hello', _b('world'), True, EGGS, BYTES),
-- _b('list'): [456, _b('спам'), False, EGGS, BYTES]}},
-- OrderedDict([(_b('foo'), 'bar'), (123, 456), (EGGS, BYTES)])
-+ [123, 456.789, _b("спам"), True, False, None, EGGS, BYTES],
-+ (987, 654.321, _b("яйца"), EGGS, None, (True, EGGS, BYTES)),
-+ {
-+ _b("str_key"): _b("str_val"),
-+ None: True,
-+ 123: 456.789,
-+ EGGS: BYTES,
-+ _b("subdict"): {
-+ "unicode_key": EGGS,
-+ _b("tuple"): (123, "hello", _b("world"), True, EGGS, BYTES),
-+ _b("list"): [456, _b("спам"), False, EGGS, BYTES],
-+ },
-+ },
-+ OrderedDict([(_b("foo"), "bar"), (123, 456), (EGGS, BYTES)]),
- ]
-
- def test_sorted_ignorecase(self):
-- test_list = ['foo', 'Foo', 'bar', 'Bar']
-- expected_list = ['bar', 'Bar', 'foo', 'Foo']
+- # Traverse and match integer key in a nested dict
+- # https://github.com/saltstack/salt/issues/56444
- self.assertEqual(
-- salt.utils.data.sorted_ignorecase(test_list), expected_list)
-+ test_list = ["foo", "Foo", "bar", "Bar"]
-+ expected_list = ["bar", "Bar", "foo", "Foo"]
-+ self.assertEqual(salt.utils.data.sorted_ignorecase(test_list), expected_list)
-
- def test_mysql_to_dict(self):
-- test_mysql_output = ['+----+------+-----------+------+---------+------+-------+------------------+',
-- '| Id | User | Host | db | Command | Time | State | Info |',
-- '+----+------+-----------+------+---------+------+-------+------------------+',
-- '| 7 | root | localhost | NULL | Query | 0 | init | show processlist |',
-- '+----+------+-----------+------+---------+------+-------+------------------+']
-+ test_mysql_output = [
-+ "+----+------+-----------+------+---------+------+-------+------------------+",
-+ "| Id | User | Host | db | Command | Time | State | Info |",
-+ "+----+------+-----------+------+---------+------+-------+------------------+",
-+ "| 7 | root | localhost | NULL | Query | 0 | init | show processlist |",
-+ "+----+------+-----------+------+---------+------+-------+------------------+",
-+ ]
-
-- ret = salt.utils.data.mysql_to_dict(test_mysql_output, 'Info')
-+ ret = salt.utils.data.mysql_to_dict(test_mysql_output, "Info")
- expected_dict = {
-- 'show processlist': {'Info': 'show processlist', 'db': 'NULL', 'State': 'init', 'Host': 'localhost',
-- 'Command': 'Query', 'User': 'root', 'Time': 0, 'Id': 7}}
-+ "show processlist": {
-+ "Info": "show processlist",
-+ "db": "NULL",
-+ "State": "init",
-+ "Host": "localhost",
-+ "Command": "Query",
-+ "User": "root",
-+ "Time": 0,
-+ "Id": 7,
-+ }
-+ }
-
- self.assertDictEqual(ret, expected_dict)
-
- def test_subdict_match(self):
-- test_two_level_dict = {'foo': {'bar': 'baz'}}
-- test_two_level_comb_dict = {'foo': {'bar': 'baz:woz'}}
-+ test_two_level_dict = {"foo": {"bar": "baz"}}
-+ test_two_level_comb_dict = {"foo": {"bar": "baz:woz"}}
- test_two_level_dict_and_list = {
-- 'abc': ['def', 'ghi', {'lorem': {'ipsum': [{'dolor': 'sit'}]}}],
-+ "abc": ["def", "ghi", {"lorem": {"ipsum": [{"dolor": "sit"}]}}],
- }
-- test_three_level_dict = {'a': {'b': {'c': 'v'}}}
-+ test_three_level_dict = {"a": {"b": {"c": "v"}}}
-
- self.assertTrue(
-- salt.utils.data.subdict_match(
-- test_two_level_dict, 'foo:bar:baz'
-- )
-+ salt.utils.data.subdict_match(test_two_level_dict, "foo:bar:baz")
- )
- # In test_two_level_comb_dict, 'foo:bar' corresponds to 'baz:woz', not
- # 'baz'. This match should return False.
- self.assertFalse(
-- salt.utils.data.subdict_match(
-- test_two_level_comb_dict, 'foo:bar:baz'
-- )
-+ salt.utils.data.subdict_match(test_two_level_comb_dict, "foo:bar:baz")
- )
- # This tests matching with the delimiter in the value part (in other
- # words, that the path 'foo:bar' corresponds to the string 'baz:woz').
- self.assertTrue(
-- salt.utils.data.subdict_match(
-- test_two_level_comb_dict, 'foo:bar:baz:woz'
-- )
-+ salt.utils.data.subdict_match(test_two_level_comb_dict, "foo:bar:baz:woz")
- )
- # This would match if test_two_level_comb_dict['foo']['bar'] was equal
- # to 'baz:woz:wiz', or if there was more deep nesting. But it does not,
- # so this should return False.
- self.assertFalse(
- salt.utils.data.subdict_match(
-- test_two_level_comb_dict, 'foo:bar:baz:woz:wiz'
-+ test_two_level_comb_dict, "foo:bar:baz:woz:wiz"
- )
- )
- # This tests for cases when a key path corresponds to a list. The
-@@ -115,189 +123,171 @@ class DataTestCase(TestCase):
- # salt.utils.traverse_list_and_dict() so this particular assertion is a
- # sanity check.
- self.assertTrue(
-- salt.utils.data.subdict_match(
-- test_two_level_dict_and_list, 'abc:ghi'
-- )
-+ salt.utils.data.subdict_match(test_two_level_dict_and_list, "abc:ghi")
- )
- # This tests the use case of a dict embedded in a list, embedded in a
- # list, embedded in a dict. This is a rather absurd case, but it
- # confirms that match recursion works properly.
- self.assertTrue(
- salt.utils.data.subdict_match(
-- test_two_level_dict_and_list, 'abc:lorem:ipsum:dolor:sit'
-+ test_two_level_dict_and_list, "abc:lorem:ipsum:dolor:sit"
- )
- )
- # Test four level dict match for reference
-- self.assertTrue(
-- salt.utils.data.subdict_match(
-- test_three_level_dict, 'a:b:c:v'
-- )
+- "it worked",
+- salt.utils.data.traverse_dict_and_list(
+- {"foo": {1234: "it worked"}}, "foo:1234", "it didn't work",
+- ),
- )
-+ self.assertTrue(salt.utils.data.subdict_match(test_three_level_dict, "a:b:c:v"))
- # Test regression in 2015.8 where 'a:c:v' would match 'a:b:c:v'
-- self.assertFalse(
-- salt.utils.data.subdict_match(
-- test_three_level_dict, 'a:c:v'
-- )
+- # Make sure that we properly return the default value when the initial
+- # attempt fails and YAML-loading the target key doesn't change its
+- # value.
+- self.assertEqual(
+- "default",
+- salt.utils.data.traverse_dict_and_list(
+- {"foo": {"baz": "didn't work"}}, "foo:bar", "default",
+- ),
- )
-+ self.assertFalse(salt.utils.data.subdict_match(test_three_level_dict, "a:c:v"))
- # Test wildcard match
-- self.assertTrue(
-- salt.utils.data.subdict_match(
-- test_three_level_dict, 'a:*:c:v'
-- )
-- )
-+ self.assertTrue(salt.utils.data.subdict_match(test_three_level_dict, "a:*:c:v"))
-
- def test_subdict_match_with_wildcards(self):
-- '''
-+ """
- Tests subdict matching when wildcards are used in the expression
-- '''
-- data = {
-- 'a': {
-- 'b': {
-- 'ç': 'd',
-- 'é': ['eff', 'gee', '8ch'],
-- 'ĩ': {'j': 'k'}
-- }
-- }
+-
+- def test_issue_39709(self):
+- test_two_level_dict_and_list = {
+- "foo": ["bar", "baz", {"lorem": {"ipsum": [{"dolor": "sit"}]}}]
- }
-- assert salt.utils.data.subdict_match(data, '*:*:*:*')
-- assert salt.utils.data.subdict_match(data, 'a:*:*:*')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:*')
-- assert salt.utils.data.subdict_match(data, 'a:b:ç:*')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:d')
-- assert salt.utils.data.subdict_match(data, 'a:*:ç:d')
-- assert salt.utils.data.subdict_match(data, '*:b:ç:d')
-- assert salt.utils.data.subdict_match(data, '*:*:ç:d')
-- assert salt.utils.data.subdict_match(data, '*:*:*:d')
-- assert salt.utils.data.subdict_match(data, 'a:*:*:d')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:ef*')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:g*')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:j:*')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:j:k')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:*:k')
-- assert salt.utils.data.subdict_match(data, 'a:b:*:*:*')
-+ """
-+ data = {"a": {"b": {"ç": "d", "é": ["eff", "gee", "8ch"], "ĩ": {"j": "k"}}}}
-+ assert salt.utils.data.subdict_match(data, "*:*:*:*")
-+ assert salt.utils.data.subdict_match(data, "a:*:*:*")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:*")
-+ assert salt.utils.data.subdict_match(data, "a:b:ç:*")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:d")
-+ assert salt.utils.data.subdict_match(data, "a:*:ç:d")
-+ assert salt.utils.data.subdict_match(data, "*:b:ç:d")
-+ assert salt.utils.data.subdict_match(data, "*:*:ç:d")
-+ assert salt.utils.data.subdict_match(data, "*:*:*:d")
-+ assert salt.utils.data.subdict_match(data, "a:*:*:d")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:ef*")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:g*")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:j:*")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:j:k")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:*:k")
-+ assert salt.utils.data.subdict_match(data, "a:b:*:*:*")
-
- def test_traverse_dict(self):
-- test_two_level_dict = {'foo': {'bar': 'baz'}}
-+ test_two_level_dict = {"foo": {"bar": "baz"}}
-
- self.assertDictEqual(
-- {'not_found': 'nope'},
-+ {"not_found": "nope"},
- salt.utils.data.traverse_dict(
-- test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'}
-- )
-+ test_two_level_dict, "foo:bar:baz", {"not_found": "nope"}
-+ ),
- )
- self.assertEqual(
-- 'baz',
-+ "baz",
- salt.utils.data.traverse_dict(
-- test_two_level_dict, 'foo:bar', {'not_found': 'not_found'}
-- )
-+ test_two_level_dict, "foo:bar", {"not_found": "not_found"}
-+ ),
- )
-
- def test_traverse_dict_and_list(self):
-- test_two_level_dict = {'foo': {'bar': 'baz'}}
-+ test_two_level_dict = {"foo": {"bar": "baz"}}
- test_two_level_dict_and_list = {
-- 'foo': ['bar', 'baz', {'lorem': {'ipsum': [{'dolor': 'sit'}]}}]
-+ "foo": ["bar", "baz", {"lorem": {"ipsum": [{"dolor": "sit"}]}}]
- }
-
- # Check traversing too far: salt.utils.data.traverse_dict_and_list() returns
- # the value corresponding to a given key path, and baz is a value
- # corresponding to the key path foo:bar.
- self.assertDictEqual(
-- {'not_found': 'nope'},
-+ {"not_found": "nope"},
- salt.utils.data.traverse_dict_and_list(
-- test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'}
-- )
-+ test_two_level_dict, "foo:bar:baz", {"not_found": "nope"}
-+ ),
- )
- # Now check to ensure that foo:bar corresponds to baz
- self.assertEqual(
-- 'baz',
-+ "baz",
- salt.utils.data.traverse_dict_and_list(
-- test_two_level_dict, 'foo:bar', {'not_found': 'not_found'}
-- )
-+ test_two_level_dict, "foo:bar", {"not_found": "not_found"}
-+ ),
- )
- # Check traversing too far
- self.assertDictEqual(
-- {'not_found': 'nope'},
-+ {"not_found": "nope"},
- salt.utils.data.traverse_dict_and_list(
-- test_two_level_dict_and_list, 'foo:bar', {'not_found': 'nope'}
-- )
-+ test_two_level_dict_and_list, "foo:bar", {"not_found": "nope"}
-+ ),
- )
- # Check index 1 (2nd element) of list corresponding to path 'foo'
- self.assertEqual(
-- 'baz',
-+ "baz",
- salt.utils.data.traverse_dict_and_list(
-- test_two_level_dict_and_list, 'foo:1', {'not_found': 'not_found'}
-- )
-+ test_two_level_dict_and_list, "foo:1", {"not_found": "not_found"}
-+ ),
- )
- # Traverse a couple times into dicts embedded in lists
- self.assertEqual(
-- 'sit',
-+ "sit",
- salt.utils.data.traverse_dict_and_list(
- test_two_level_dict_and_list,
-- 'foo:lorem:ipsum:dolor',
-- {'not_found': 'not_found'}
-- )
-+ "foo:lorem:ipsum:dolor",
-+ {"not_found": "not_found"},
-+ ),
- )
-
- def test_compare_dicts(self):
-- ret = salt.utils.data.compare_dicts(old={'foo': 'bar'}, new={'foo': 'bar'})
-+ ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "bar"})
- self.assertEqual(ret, {})
-
-- ret = salt.utils.data.compare_dicts(old={'foo': 'bar'}, new={'foo': 'woz'})
-- expected_ret = {'foo': {'new': 'woz', 'old': 'bar'}}
-+ ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "woz"})
-+ expected_ret = {"foo": {"new": "woz", "old": "bar"}}
- self.assertDictEqual(ret, expected_ret)
-
- def test_compare_lists_no_change(self):
-- ret = salt.utils.data.compare_lists(old=[1, 2, 3, 'a', 'b', 'c'],
-- new=[1, 2, 3, 'a', 'b', 'c'])
-+ ret = salt.utils.data.compare_lists(
-+ old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 3, "a", "b", "c"]
-+ )
- expected = {}
- self.assertDictEqual(ret, expected)
-
- def test_compare_lists_changes(self):
-- ret = salt.utils.data.compare_lists(old=[1, 2, 3, 'a', 'b', 'c'],
-- new=[1, 2, 4, 'x', 'y', 'z'])
-- expected = {'new': [4, 'x', 'y', 'z'], 'old': [3, 'a', 'b', 'c']}
-+ ret = salt.utils.data.compare_lists(
-+ old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 4, "x", "y", "z"]
-+ )
-+ expected = {"new": [4, "x", "y", "z"], "old": [3, "a", "b", "c"]}
- self.assertDictEqual(ret, expected)
-
- def test_compare_lists_changes_new(self):
-- ret = salt.utils.data.compare_lists(old=[1, 2, 3],
-- new=[1, 2, 3, 'x', 'y', 'z'])
-- expected = {'new': ['x', 'y', 'z']}
-+ ret = salt.utils.data.compare_lists(old=[1, 2, 3], new=[1, 2, 3, "x", "y", "z"])
-+ expected = {"new": ["x", "y", "z"]}
- self.assertDictEqual(ret, expected)
-
- def test_compare_lists_changes_old(self):
-- ret = salt.utils.data.compare_lists(old=[1, 2, 3, 'a', 'b', 'c'],
-- new=[1, 2, 3])
-- expected = {'old': ['a', 'b', 'c']}
-+ ret = salt.utils.data.compare_lists(old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 3])
-+ expected = {"old": ["a", "b", "c"]}
- self.assertDictEqual(ret, expected)
-
- def test_decode(self):
-- '''
-+ """
- Companion to test_decode_to_str, they should both be kept up-to-date
- with one another.
-
- NOTE: This uses the lambda "_b" defined above in the global scope,
- which encodes a string to a bytestring, assuming utf-8.
-- '''
-+ """
- expected = [
-- 'unicode_str',
-- 'питон',
-+ "unicode_str",
-+ "питон",
- 123,
- 456.789,
- True,
- False,
- None,
-- 'яйца',
-+ "яйца",
- BYTES,
-- [123, 456.789, 'спам', True, False, None, 'яйца', BYTES],
-- (987, 654.321, 'яйца', 'яйца', None, (True, 'яйца', BYTES)),
-- {'str_key': 'str_val',
-- None: True,
-- 123: 456.789,
-- 'яйца': BYTES,
-- 'subdict': {'unicode_key': 'яйца',
-- 'tuple': (123, 'hello', 'world', True, 'яйца', BYTES),
-- 'list': [456, 'спам', False, 'яйца', BYTES]}},
-- OrderedDict([('foo', 'bar'), (123, 456), ('яйца', BYTES)])
-+ [123, 456.789, "спам", True, False, None, "яйца", BYTES],
-+ (987, 654.321, "яйца", "яйца", None, (True, "яйца", BYTES)),
-+ {
-+ "str_key": "str_val",
-+ None: True,
-+ 123: 456.789,
-+ "яйца": BYTES,
-+ "subdict": {
-+ "unicode_key": "яйца",
-+ "tuple": (123, "hello", "world", True, "яйца", BYTES),
-+ "list": [456, "спам", False, "яйца", BYTES],
-+ },
-+ },
-+ OrderedDict([("foo", "bar"), (123, 456), ("яйца", BYTES)]),
- ]
-
- ret = salt.utils.data.decode(
-@@ -305,7 +295,8 @@ class DataTestCase(TestCase):
- keep=True,
- normalize=True,
- preserve_dict_class=True,
-- preserve_tuples=True)
-+ preserve_tuples=True,
-+ )
- self.assertEqual(ret, expected)
-
- # The binary data in the data structure should fail to decode, even
-@@ -317,74 +308,100 @@ class DataTestCase(TestCase):
- keep=False,
- normalize=True,
- preserve_dict_class=True,
-- preserve_tuples=True)
-+ preserve_tuples=True,
-+ )
-
- # Now munge the expected data so that we get what we would expect if we
- # disable preservation of dict class and tuples
-- expected[10] = [987, 654.321, 'яйца', 'яйца', None, [True, 'яйца', BYTES]]
-- expected[11]['subdict']['tuple'] = [123, 'hello', 'world', True, 'яйца', BYTES]
-- expected[12] = {'foo': 'bar', 123: 456, 'яйца': BYTES}
-+ expected[10] = [987, 654.321, "яйца", "яйца", None, [True, "яйца", BYTES]]
-+ expected[11]["subdict"]["tuple"] = [123, "hello", "world", True, "яйца", BYTES]
-+ expected[12] = {"foo": "bar", 123: 456, "яйца": BYTES}
-
- ret = salt.utils.data.decode(
- self.test_data,
- keep=True,
- normalize=True,
- preserve_dict_class=False,
-- preserve_tuples=False)
-+ preserve_tuples=False,
-+ )
- self.assertEqual(ret, expected)
-
- # Now test single non-string, non-data-structure items, these should
- # return the same value when passed to this function
- for item in (123, 4.56, True, False, None):
-- log.debug('Testing decode of %s', item)
-+ log.debug("Testing decode of %s", item)
- self.assertEqual(salt.utils.data.decode(item), item)
-
- # Test single strings (not in a data structure)
-- self.assertEqual(salt.utils.data.decode('foo'), 'foo')
-- self.assertEqual(salt.utils.data.decode(_b('bar')), 'bar')
-- self.assertEqual(salt.utils.data.decode(EGGS, normalize=True), 'яйца')
-+ self.assertEqual(salt.utils.data.decode("foo"), "foo")
-+ self.assertEqual(salt.utils.data.decode(_b("bar")), "bar")
-+ self.assertEqual(salt.utils.data.decode(EGGS, normalize=True), "яйца")
- self.assertEqual(salt.utils.data.decode(EGGS, normalize=False), EGGS)
-
- # Test binary blob
- self.assertEqual(salt.utils.data.decode(BYTES, keep=True), BYTES)
-- self.assertRaises(
-- UnicodeDecodeError,
-- salt.utils.data.decode,
-- BYTES,
-- keep=False)
-+ self.assertRaises(UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False)
-+
-+ def test_circular_refs_dicts(self):
-+ test_dict = {"key": "value", "type": "test1"}
-+ test_dict["self"] = test_dict
-+ ret = salt.utils.data._remove_circular_refs(ob=test_dict)
-+ self.assertDictEqual(ret, {"key": "value", "type": "test1", "self": None})
-+
-+ def test_circular_refs_lists(self):
-+ test_list = {
-+ "foo": [],
-+ }
-+ test_list["foo"].append((test_list,))
-+ ret = salt.utils.data._remove_circular_refs(ob=test_list)
-+ self.assertDictEqual(ret, {"foo": [(None,)]})
-+
-+ def test_circular_refs_tuple(self):
-+ test_dup = {"foo": "string 1", "bar": "string 1", "ham": 1, "spam": 1}
-+ ret = salt.utils.data._remove_circular_refs(ob=test_dup)
-+ self.assertDictEqual(
-+ ret, {"foo": "string 1", "bar": "string 1", "ham": 1, "spam": 1}
-+ )
-
- def test_decode_to_str(self):
-- '''
-+ """
- Companion to test_decode, they should both be kept up-to-date with one
- another.
-
- NOTE: This uses the lambda "_s" defined above in the global scope,
- which converts the string/bytestring to a str type.
-- '''
-+ """
- expected = [
-- _s('unicode_str'),
-- _s('питон'),
-+ _s("unicode_str"),
-+ _s("питон"),
- 123,
- 456.789,
- True,
- False,
- None,
-- _s('яйца'),
-+ _s("яйца"),
- BYTES,
-- [123, 456.789, _s('спам'), True, False, None, _s('яйца'), BYTES],
-- (987, 654.321, _s('яйца'), _s('яйца'), None, (True, _s('яйца'), BYTES)),
-+ [123, 456.789, _s("спам"), True, False, None, _s("яйца"), BYTES],
-+ (987, 654.321, _s("яйца"), _s("яйца"), None, (True, _s("яйца"), BYTES)),
- {
-- _s('str_key'): _s('str_val'),
-+ _s("str_key"): _s("str_val"),
- None: True,
- 123: 456.789,
-- _s('яйца'): BYTES,
-- _s('subdict'): {
-- _s('unicode_key'): _s('яйца'),
-- _s('tuple'): (123, _s('hello'), _s('world'), True, _s('яйца'), BYTES),
-- _s('list'): [456, _s('спам'), False, _s('яйца'), BYTES]
-- }
-+ _s("яйца"): BYTES,
-+ _s("subdict"): {
-+ _s("unicode_key"): _s("яйца"),
-+ _s("tuple"): (
-+ 123,
-+ _s("hello"),
-+ _s("world"),
-+ True,
-+ _s("яйца"),
-+ BYTES,
-+ ),
-+ _s("list"): [456, _s("спам"), False, _s("яйца"), BYTES],
-+ },
- },
-- OrderedDict([(_s('foo'), _s('bar')), (123, 456), (_s('яйца'), BYTES)])
-+ OrderedDict([(_s("foo"), _s("bar")), (123, 456), (_s("яйца"), BYTES)]),
- ]
-
- ret = salt.utils.data.decode(
-@@ -393,27 +410,42 @@ class DataTestCase(TestCase):
- normalize=True,
- preserve_dict_class=True,
- preserve_tuples=True,
-- to_str=True)
-+ to_str=True,
-+ )
- self.assertEqual(ret, expected)
-
-- if six.PY3:
-- # The binary data in the data structure should fail to decode, even
-- # using the fallback, and raise an exception.
-- self.assertRaises(
-- UnicodeDecodeError,
-- salt.utils.data.decode,
-- self.test_data,
-- keep=False,
-- normalize=True,
-- preserve_dict_class=True,
-- preserve_tuples=True,
-- to_str=True)
-+ # The binary data in the data structure should fail to decode, even
-+ # using the fallback, and raise an exception.
-+ self.assertRaises(
-+ UnicodeDecodeError,
-+ salt.utils.data.decode,
-+ self.test_data,
-+ keep=False,
-+ normalize=True,
-+ preserve_dict_class=True,
-+ preserve_tuples=True,
-+ to_str=True,
-+ )
-
- # Now munge the expected data so that we get what we would expect if we
- # disable preservation of dict class and tuples
-- expected[10] = [987, 654.321, _s('яйца'), _s('яйца'), None, [True, _s('яйца'), BYTES]]
-- expected[11][_s('subdict')][_s('tuple')] = [123, _s('hello'), _s('world'), True, _s('яйца'), BYTES]
-- expected[12] = {_s('foo'): _s('bar'), 123: 456, _s('яйца'): BYTES}
-+ expected[10] = [
-+ 987,
-+ 654.321,
-+ _s("яйца"),
-+ _s("яйца"),
-+ None,
-+ [True, _s("яйца"), BYTES],
-+ ]
-+ expected[11][_s("subdict")][_s("tuple")] = [
-+ 123,
-+ _s("hello"),
-+ _s("world"),
-+ True,
-+ _s("яйца"),
-+ BYTES,
-+ ]
-+ expected[12] = {_s("foo"): _s("bar"), 123: 456, _s("яйца"): BYTES}
-
- ret = salt.utils.data.decode(
- self.test_data,
-@@ -421,47 +453,41 @@ class DataTestCase(TestCase):
- normalize=True,
- preserve_dict_class=False,
- preserve_tuples=False,
-- to_str=True)
-+ to_str=True,
-+ )
- self.assertEqual(ret, expected)
-
- # Now test single non-string, non-data-structure items, these should
- # return the same value when passed to this function
- for item in (123, 4.56, True, False, None):
-- log.debug('Testing decode of %s', item)
-+ log.debug("Testing decode of %s", item)
- self.assertEqual(salt.utils.data.decode(item, to_str=True), item)
-
- # Test single strings (not in a data structure)
-- self.assertEqual(salt.utils.data.decode('foo', to_str=True), _s('foo'))
-- self.assertEqual(salt.utils.data.decode(_b('bar'), to_str=True), _s('bar'))
-+ self.assertEqual(salt.utils.data.decode("foo", to_str=True), _s("foo"))
-+ self.assertEqual(salt.utils.data.decode(_b("bar"), to_str=True), _s("bar"))
-
- # Test binary blob
+-
- self.assertEqual(
-- salt.utils.data.decode(BYTES, keep=True, to_str=True),
-- BYTES
-+ self.assertEqual(salt.utils.data.decode(BYTES, keep=True, to_str=True), BYTES)
-+ self.assertRaises(
-+ UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False, to_str=True,
- )
-- if six.PY3:
-- self.assertRaises(
-- UnicodeDecodeError,
-- salt.utils.data.decode,
-- BYTES,
-- keep=False,
-- to_str=True)
-
- def test_decode_fallback(self):
-- '''
-+ """
- Test fallback to utf-8
-- '''
-- with patch.object(builtins, '__salt_system_encoding__', 'ascii'):
-- self.assertEqual(salt.utils.data.decode(_b('яйца')), 'яйца')
-+ """
-+ with patch.object(builtins, "__salt_system_encoding__", "ascii"):
-+ self.assertEqual(salt.utils.data.decode(_b("яйца")), "яйца")
-
- def test_encode(self):
-- '''
-+ """
- NOTE: This uses the lambda "_b" defined above in the global scope,
- which encodes a string to a bytestring, assuming utf-8.
-- '''
-+ """
- expected = [
-- _b('unicode_str'),
-- _b('питон'),
-+ _b("unicode_str"),
-+ _b("питон"),
- 123,
- 456.789,
- True,
-@@ -469,67 +495,71 @@ class DataTestCase(TestCase):
- None,
- _b(EGGS),
- BYTES,
-- [123, 456.789, _b('спам'), True, False, None, _b(EGGS), BYTES],
-- (987, 654.321, _b('яйца'), _b(EGGS), None, (True, _b(EGGS), BYTES)),
-+ [123, 456.789, _b("спам"), True, False, None, _b(EGGS), BYTES],
-+ (987, 654.321, _b("яйца"), _b(EGGS), None, (True, _b(EGGS), BYTES)),
- {
-- _b('str_key'): _b('str_val'),
-+ _b("str_key"): _b("str_val"),
- None: True,
- 123: 456.789,
- _b(EGGS): BYTES,
-- _b('subdict'): {
-- _b('unicode_key'): _b(EGGS),
-- _b('tuple'): (123, _b('hello'), _b('world'), True, _b(EGGS), BYTES),
-- _b('list'): [456, _b('спам'), False, _b(EGGS), BYTES]
-- }
-+ _b("subdict"): {
-+ _b("unicode_key"): _b(EGGS),
-+ _b("tuple"): (123, _b("hello"), _b("world"), True, _b(EGGS), BYTES),
-+ _b("list"): [456, _b("спам"), False, _b(EGGS), BYTES],
-+ },
- },
-- OrderedDict([(_b('foo'), _b('bar')), (123, 456), (_b(EGGS), BYTES)])
-+ OrderedDict([(_b("foo"), _b("bar")), (123, 456), (_b(EGGS), BYTES)]),
- ]
-
- # Both keep=True and keep=False should work because the BYTES data is
- # already bytes.
- ret = salt.utils.data.encode(
-- self.test_data,
-- keep=True,
-- preserve_dict_class=True,
-- preserve_tuples=True)
-+ self.test_data, keep=True, preserve_dict_class=True, preserve_tuples=True
-+ )
- self.assertEqual(ret, expected)
- ret = salt.utils.data.encode(
-- self.test_data,
-- keep=False,
-- preserve_dict_class=True,
-- preserve_tuples=True)
-+ self.test_data, keep=False, preserve_dict_class=True, preserve_tuples=True
-+ )
- self.assertEqual(ret, expected)
-
- # Now munge the expected data so that we get what we would expect if we
- # disable preservation of dict class and tuples
-- expected[10] = [987, 654.321, _b('яйца'), _b(EGGS), None, [True, _b(EGGS), BYTES]]
-- expected[11][_b('subdict')][_b('tuple')] = [
-- 123, _b('hello'), _b('world'), True, _b(EGGS), BYTES
-+ expected[10] = [
-+ 987,
-+ 654.321,
-+ _b("яйца"),
-+ _b(EGGS),
-+ None,
-+ [True, _b(EGGS), BYTES],
- ]
-- expected[12] = {_b('foo'): _b('bar'), 123: 456, _b(EGGS): BYTES}
-+ expected[11][_b("subdict")][_b("tuple")] = [
-+ 123,
-+ _b("hello"),
-+ _b("world"),
-+ True,
-+ _b(EGGS),
-+ BYTES,
-+ ]
-+ expected[12] = {_b("foo"): _b("bar"), 123: 456, _b(EGGS): BYTES}
-
- ret = salt.utils.data.encode(
-- self.test_data,
-- keep=True,
-- preserve_dict_class=False,
-- preserve_tuples=False)
-+ self.test_data, keep=True, preserve_dict_class=False, preserve_tuples=False
-+ )
- self.assertEqual(ret, expected)
- ret = salt.utils.data.encode(
-- self.test_data,
-- keep=False,
-- preserve_dict_class=False,
-- preserve_tuples=False)
-+ self.test_data, keep=False, preserve_dict_class=False, preserve_tuples=False
-+ )
- self.assertEqual(ret, expected)
-
- # Now test single non-string, non-data-structure items, these should
- # return the same value when passed to this function
- for item in (123, 4.56, True, False, None):
-- log.debug('Testing encode of %s', item)
-+ log.debug("Testing encode of %s", item)
- self.assertEqual(salt.utils.data.encode(item), item)
-
- # Test single strings (not in a data structure)
-- self.assertEqual(salt.utils.data.encode('foo'), _b('foo'))
-- self.assertEqual(salt.utils.data.encode(_b('bar')), _b('bar'))
-+ self.assertEqual(salt.utils.data.encode("foo"), _b("foo"))
-+ self.assertEqual(salt.utils.data.encode(_b("bar")), _b("bar"))
-
- # Test binary blob, nothing should happen even when keep=False since
- # the data is already bytes
-@@ -537,41 +567,43 @@ class DataTestCase(TestCase):
- self.assertEqual(salt.utils.data.encode(BYTES, keep=False), BYTES)
-
- def test_encode_keep(self):
-- '''
-+ """
- Whereas we tested the keep argument in test_decode, it is much easier
- to do a more comprehensive test of keep in its own function where we
- can force the encoding.
-- '''
-- unicode_str = 'питон'
-- encoding = 'ascii'
-+ """
-+ unicode_str = "питон"
-+ encoding = "ascii"
-
- # Test single string
- self.assertEqual(
-- salt.utils.data.encode(unicode_str, encoding, keep=True),
-- unicode_str)
-+ salt.utils.data.encode(unicode_str, encoding, keep=True), unicode_str
-+ )
- self.assertRaises(
- UnicodeEncodeError,
- salt.utils.data.encode,
- unicode_str,
- encoding,
-- keep=False)
-+ keep=False,
-+ )
-
- data = [
- unicode_str,
-- [b'foo', [unicode_str], {b'key': unicode_str}, (unicode_str,)],
-- {b'list': [b'foo', unicode_str],
-- b'dict': {b'key': unicode_str},
-- b'tuple': (b'foo', unicode_str)},
-- ([b'foo', unicode_str], {b'key': unicode_str}, (unicode_str,))
-+ [b"foo", [unicode_str], {b"key": unicode_str}, (unicode_str,)],
-+ {
-+ b"list": [b"foo", unicode_str],
-+ b"dict": {b"key": unicode_str},
-+ b"tuple": (b"foo", unicode_str),
-+ },
-+ ([b"foo", unicode_str], {b"key": unicode_str}, (unicode_str,)),
- ]
-
- # Since everything was a bytestring aside from the bogus data, the
- # return data should be identical. We don't need to test recursive
- # decoding, that has already been tested in test_encode.
- self.assertEqual(
-- salt.utils.data.encode(data, encoding,
-- keep=True, preserve_tuples=True),
-- data
-+ salt.utils.data.encode(data, encoding, keep=True, preserve_tuples=True),
-+ data,
- )
- self.assertRaises(
- UnicodeEncodeError,
-@@ -579,13 +611,15 @@ class DataTestCase(TestCase):
- data,
- encoding,
- keep=False,
-- preserve_tuples=True)
-+ preserve_tuples=True,
-+ )
-
- for index, _ in enumerate(data):
- self.assertEqual(
-- salt.utils.data.encode(data[index], encoding,
-- keep=True, preserve_tuples=True),
-- data[index]
-+ salt.utils.data.encode(
-+ data[index], encoding, keep=True, preserve_tuples=True
-+ ),
-+ data[index],
- )
- self.assertRaises(
- UnicodeEncodeError,
-@@ -593,31 +627,36 @@ class DataTestCase(TestCase):
- data[index],
- encoding,
- keep=False,
-- preserve_tuples=True)
-+ preserve_tuples=True,
-+ )
-
- def test_encode_fallback(self):
-- '''
-+ """
- Test fallback to utf-8
-- '''
-- with patch.object(builtins, '__salt_system_encoding__', 'ascii'):
-- self.assertEqual(salt.utils.data.encode('яйца'), _b('яйца'))
-- with patch.object(builtins, '__salt_system_encoding__', 'CP1252'):
-- self.assertEqual(salt.utils.data.encode('Ψ'), _b('Ψ'))
-+ """
-+ with patch.object(builtins, "__salt_system_encoding__", "ascii"):
-+ self.assertEqual(salt.utils.data.encode("яйца"), _b("яйца"))
-+ with patch.object(builtins, "__salt_system_encoding__", "CP1252"):
-+ self.assertEqual(salt.utils.data.encode("Ψ"), _b("Ψ"))
-
- def test_repack_dict(self):
-- list_of_one_element_dicts = [{'dict_key_1': 'dict_val_1'},
-- {'dict_key_2': 'dict_val_2'},
-- {'dict_key_3': 'dict_val_3'}]
-- expected_ret = {'dict_key_1': 'dict_val_1',
-- 'dict_key_2': 'dict_val_2',
-- 'dict_key_3': 'dict_val_3'}
-+ list_of_one_element_dicts = [
-+ {"dict_key_1": "dict_val_1"},
-+ {"dict_key_2": "dict_val_2"},
-+ {"dict_key_3": "dict_val_3"},
-+ ]
-+ expected_ret = {
-+ "dict_key_1": "dict_val_1",
-+ "dict_key_2": "dict_val_2",
-+ "dict_key_3": "dict_val_3",
-+ }
- ret = salt.utils.data.repack_dictlist(list_of_one_element_dicts)
- self.assertDictEqual(ret, expected_ret)
-
- # Try with yaml
-- yaml_key_val_pair = '- key1: val1'
-+ yaml_key_val_pair = "- key1: val1"
- ret = salt.utils.data.repack_dictlist(yaml_key_val_pair)
-- self.assertDictEqual(ret, {'key1': 'val1'})
-+ self.assertDictEqual(ret, {"key1": "val1"})
-
- # Make sure we handle non-yaml junk data
- ret = salt.utils.data.repack_dictlist(LOREM_IPSUM)
-@@ -626,43 +665,47 @@ class DataTestCase(TestCase):
- def test_stringify(self):
- self.assertRaises(TypeError, salt.utils.data.stringify, 9)
- self.assertEqual(
-- salt.utils.data.stringify(['one', 'two', str('three'), 4, 5]), # future lint: disable=blacklisted-function
-- ['one', 'two', 'three', '4', '5']
-+ salt.utils.data.stringify(
-+ ["one", "two", "three", 4, 5]
-+ ), # future lint: disable=blacklisted-function
-+ ["one", "two", "three", "4", "5"],
- )
-
- def test_json_query(self):
- # Raises exception if jmespath module is not found
-- with patch('salt.utils.data.jmespath', None):
-+ with patch("salt.utils.data.jmespath", None):
- self.assertRaisesRegex(
-- RuntimeError, 'requires jmespath',
-- salt.utils.data.json_query, {}, '@'
-+ RuntimeError, "requires jmespath", salt.utils.data.json_query, {}, "@"
- )
-
- # Test search
- user_groups = {
-- 'user1': {'groups': ['group1', 'group2', 'group3']},
-- 'user2': {'groups': ['group1', 'group2']},
-- 'user3': {'groups': ['group3']},
-+ "user1": {"groups": ["group1", "group2", "group3"]},
-+ "user2": {"groups": ["group1", "group2"]},
-+ "user3": {"groups": ["group3"]},
- }
-- expression = '*.groups[0]'
-- primary_groups = ['group1', 'group1', 'group3']
-+ expression = "*.groups[0]"
-+ primary_groups = ["group1", "group1", "group3"]
- self.assertEqual(
-- sorted(salt.utils.data.json_query(user_groups, expression)),
-- primary_groups
-+ sorted(salt.utils.data.json_query(user_groups, expression)), primary_groups
- )
-
-
- class FilterFalseyTestCase(TestCase):
-- '''
-+ """
- Test suite for salt.utils.data.filter_falsey
-- '''
-+ """
-
- def test_nop(self):
-- '''
-+ """
- Test cases where nothing will be done.
-- '''
-+ """
- # Test with dictionary without recursion
-- old_dict = {'foo': 'bar', 'bar': {'baz': {'qux': 'quux'}}, 'baz': ['qux', {'foo': 'bar'}]}
-+ old_dict = {
-+ "foo": "bar",
-+ "bar": {"baz": {"qux": "quux"}},
-+ "baz": ["qux", {"foo": "bar"}],
-+ }
- new_dict = salt.utils.data.filter_falsey(old_dict)
- self.assertEqual(old_dict, new_dict)
- # Check returned type equality
-@@ -671,23 +714,25 @@ class FilterFalseyTestCase(TestCase):
- new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3)
- self.assertEqual(old_dict, new_dict)
- # Test with list
-- old_list = ['foo', 'bar']
-+ old_list = ["foo", "bar"]
- new_list = salt.utils.data.filter_falsey(old_list)
- self.assertEqual(old_list, new_list)
- # Check returned type equality
- self.assertIs(type(old_list), type(new_list))
- # Test with set
-- old_set = set(['foo', 'bar'])
-+ old_set = {"foo", "bar"}
- new_set = salt.utils.data.filter_falsey(old_set)
- self.assertEqual(old_set, new_set)
- # Check returned type equality
- self.assertIs(type(old_set), type(new_set))
- # Test with OrderedDict
-- old_dict = OrderedDict([
-- ('foo', 'bar'),
-- ('bar', OrderedDict([('qux', 'quux')])),
-- ('baz', ['qux', OrderedDict([('foo', 'bar')])])
-- ])
-+ old_dict = OrderedDict(
-+ [
-+ ("foo", "bar"),
-+ ("bar", OrderedDict([("qux", "quux")])),
-+ ("baz", ["qux", OrderedDict([("foo", "bar")])]),
-+ ]
-+ )
- new_dict = salt.utils.data.filter_falsey(old_dict)
- self.assertEqual(old_dict, new_dict)
- self.assertIs(type(old_dict), type(new_dict))
-@@ -696,8 +741,8 @@ class FilterFalseyTestCase(TestCase):
- new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type(0)])
- self.assertEqual(old_list, new_list)
- # Test excluding str (or unicode) (or both)
-- old_list = ['']
-- new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type('')])
-+ old_list = [""]
-+ new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type("")])
- self.assertEqual(old_list, new_list)
- # Test excluding list
- old_list = [[]]
-@@ -709,185 +754,264 @@ class FilterFalseyTestCase(TestCase):
- self.assertEqual(old_list, new_list)
-
- def test_filter_dict_no_recurse(self):
-- '''
-+ """
- Test filtering a dictionary without recursing.
- This will only filter out key-values where the values are falsey.
-- '''
-- old_dict = {'foo': None,
-- 'bar': {'baz': {'qux': None, 'quux': '', 'foo': []}},
-- 'baz': ['qux'],
-- 'qux': {},
-- 'quux': []}
-+ """
-+ old_dict = {
-+ "foo": None,
-+ "bar": {"baz": {"qux": None, "quux": "", "foo": []}},
-+ "baz": ["qux"],
-+ "qux": {},
-+ "quux": [],
-+ }
- new_dict = salt.utils.data.filter_falsey(old_dict)
-- expect_dict = {'bar': {'baz': {'qux': None, 'quux': '', 'foo': []}}, 'baz': ['qux']}
-+ expect_dict = {
-+ "bar": {"baz": {"qux": None, "quux": "", "foo": []}},
-+ "baz": ["qux"],
-+ }
- self.assertEqual(expect_dict, new_dict)
- self.assertIs(type(expect_dict), type(new_dict))
-
- def test_filter_dict_recurse(self):
-- '''
-+ """
- Test filtering a dictionary with recursing.
- This will filter out any key-values where the values are falsey or when
- the values *become* falsey after filtering their contents (in case they
- are lists or dicts).
-- '''
-- old_dict = {'foo': None,
-- 'bar': {'baz': {'qux': None, 'quux': '', 'foo': []}},
-- 'baz': ['qux'],
-- 'qux': {},
-- 'quux': []}
-+ """
-+ old_dict = {
-+ "foo": None,
-+ "bar": {"baz": {"qux": None, "quux": "", "foo": []}},
-+ "baz": ["qux"],
-+ "qux": {},
-+ "quux": [],
-+ }
- new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3)
-- expect_dict = {'baz': ['qux']}
-+ expect_dict = {"baz": ["qux"]}
- self.assertEqual(expect_dict, new_dict)
- self.assertIs(type(expect_dict), type(new_dict))
-
- def test_filter_list_no_recurse(self):
-- '''
-+ """
- Test filtering a list without recursing.
- This will only filter out items which are falsey.
-- '''
-- old_list = ['foo', None, [], {}, 0, '']
-+ """
-+ old_list = ["foo", None, [], {}, 0, ""]
- new_list = salt.utils.data.filter_falsey(old_list)
-- expect_list = ['foo']
-+ expect_list = ["foo"]
- self.assertEqual(expect_list, new_list)
- self.assertIs(type(expect_list), type(new_list))
- # Ensure nested values are *not* filtered out.
- old_list = [
-- 'foo',
-- ['foo'],
-- ['foo', None],
-- {'foo': 0},
-- {'foo': 'bar', 'baz': []},
-- [{'foo': ''}],
-+ "foo",
-+ ["foo"],
-+ ["foo", None],
-+ {"foo": 0},
-+ {"foo": "bar", "baz": []},
-+ [{"foo": ""}],
- ]
- new_list = salt.utils.data.filter_falsey(old_list)
- self.assertEqual(old_list, new_list)
- self.assertIs(type(old_list), type(new_list))
-
- def test_filter_list_recurse(self):
-- '''
-+ """
- Test filtering a list with recursing.
- This will filter out any items which are falsey, or which become falsey
- after filtering their contents (in case they are lists or dicts).
-- '''
-+ """
- old_list = [
-- 'foo',
-- ['foo'],
-- ['foo', None],
-- {'foo': 0},
-- {'foo': 'bar', 'baz': []},
-- [{'foo': ''}]
-+ "foo",
-+ ["foo"],
-+ ["foo", None],
-+ {"foo": 0},
-+ {"foo": "bar", "baz": []},
-+ [{"foo": ""}],
- ]
- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3)
-- expect_list = ['foo', ['foo'], ['foo'], {'foo': 'bar'}]
-+ expect_list = ["foo", ["foo"], ["foo"], {"foo": "bar"}]
- self.assertEqual(expect_list, new_list)
- self.assertIs(type(expect_list), type(new_list))
-
- def test_filter_set_no_recurse(self):
-- '''
-+ """
- Test filtering a set without recursing.
- Note that a set cannot contain unhashable types, so recursion is not possible.
-- '''
-- old_set = set([
-- 'foo',
-- None,
-- 0,
-- '',
-- ])
-+ """
-+ old_set = {"foo", None, 0, ""}
- new_set = salt.utils.data.filter_falsey(old_set)
-- expect_set = set(['foo'])
-+ expect_set = {"foo"}
- self.assertEqual(expect_set, new_set)
- self.assertIs(type(expect_set), type(new_set))
-
- def test_filter_ordereddict_no_recurse(self):
-- '''
-+ """
- Test filtering an OrderedDict without recursing.
-- '''
-- old_dict = OrderedDict([
-- ('foo', None),
-- ('bar', OrderedDict([('baz', OrderedDict([('qux', None), ('quux', ''), ('foo', [])]))])),
-- ('baz', ['qux']),
-- ('qux', {}),
-- ('quux', [])
-- ])
-+ """
-+ old_dict = OrderedDict(
-+ [
-+ ("foo", None),
-+ (
-+ "bar",
-+ OrderedDict(
-+ [
-+ (
-+ "baz",
-+ OrderedDict([("qux", None), ("quux", ""), ("foo", [])]),
-+ )
-+ ]
-+ ),
-+ ),
-+ ("baz", ["qux"]),
-+ ("qux", {}),
-+ ("quux", []),
-+ ]
-+ )
- new_dict = salt.utils.data.filter_falsey(old_dict)
-- expect_dict = OrderedDict([
-- ('bar', OrderedDict([('baz', OrderedDict([('qux', None), ('quux', ''), ('foo', [])]))])),
-- ('baz', ['qux']),
-- ])
-+ expect_dict = OrderedDict(
-+ [
-+ (
-+ "bar",
-+ OrderedDict(
-+ [
-+ (
-+ "baz",
-+ OrderedDict([("qux", None), ("quux", ""), ("foo", [])]),
-+ )
-+ ]
-+ ),
-+ ),
-+ ("baz", ["qux"]),
-+ ]
-+ )
- self.assertEqual(expect_dict, new_dict)
- self.assertIs(type(expect_dict), type(new_dict))
-
- def test_filter_ordereddict_recurse(self):
-- '''
-+ """
- Test filtering an OrderedDict with recursing.
-- '''
-- old_dict = OrderedDict([
-- ('foo', None),
-- ('bar', OrderedDict([('baz', OrderedDict([('qux', None), ('quux', ''), ('foo', [])]))])),
-- ('baz', ['qux']),
-- ('qux', {}),
-- ('quux', [])
-- ])
-+ """
-+ old_dict = OrderedDict(
-+ [
-+ ("foo", None),
-+ (
-+ "bar",
-+ OrderedDict(
-+ [
-+ (
-+ "baz",
-+ OrderedDict([("qux", None), ("quux", ""), ("foo", [])]),
-+ )
-+ ]
-+ ),
-+ ),
-+ ("baz", ["qux"]),
-+ ("qux", {}),
-+ ("quux", []),
-+ ]
-+ )
- new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3)
-- expect_dict = OrderedDict([
-- ('baz', ['qux']),
-- ])
-+ expect_dict = OrderedDict([("baz", ["qux"])])
- self.assertEqual(expect_dict, new_dict)
- self.assertIs(type(expect_dict), type(new_dict))
-
- def test_filter_list_recurse_limit(self):
-- '''
-+ """
- Test filtering a list with recursing, but with a limited depth.
- Note that the top-level is always processed, so a recursion depth of 2
- means that two *additional* levels are processed.
-- '''
-+ """
- old_list = [None, [None, [None, [None]]]]
- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=2)
- self.assertEqual([[[[None]]]], new_list)
-
- def test_filter_dict_recurse_limit(self):
-- '''
-+ """
- Test filtering a dict with recursing, but with a limited depth.
- Note that the top-level is always processed, so a recursion depth of 2
- means that two *additional* levels are processed.
-- '''
-- old_dict = {'one': None,
-- 'foo': {'two': None, 'bar': {'three': None, 'baz': {'four': None}}}}
-+ """
-+ old_dict = {
-+ "one": None,
-+ "foo": {"two": None, "bar": {"three": None, "baz": {"four": None}}},
-+ }
- new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=2)
-- self.assertEqual({'foo': {'bar': {'baz': {'four': None}}}}, new_dict)
-+ self.assertEqual({"foo": {"bar": {"baz": {"four": None}}}}, new_dict)
-
- def test_filter_exclude_types(self):
-- '''
-+ """
- Test filtering a list recursively, but also ignoring (i.e. not filtering)
- out certain types that can be falsey.
-- '''
-+ """
- # Ignore int, unicode
-- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]]
-- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type(0), type('')])
-- self.assertEqual(['foo', ['foo'], ['foo'], {'foo': 0}, {'foo': 'bar'}, [{'foo': ''}]], new_list)
-+ old_list = [
-+ "foo",
-+ ["foo"],
-+ ["foo", None],
-+ {"foo": 0},
-+ {"foo": "bar", "baz": []},
-+ [{"foo": ""}],
-+ ]
-+ new_list = salt.utils.data.filter_falsey(
-+ old_list, recurse_depth=3, ignore_types=[type(0), type("")]
-+ )
-+ self.assertEqual(
-+ ["foo", ["foo"], ["foo"], {"foo": 0}, {"foo": "bar"}, [{"foo": ""}]],
-+ new_list,
-+ )
- # Ignore list
-- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]]
-- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type([])])
-- self.assertEqual(['foo', ['foo'], ['foo'], {'foo': 'bar', 'baz': []}, []], new_list)
-+ old_list = [
-+ "foo",
-+ ["foo"],
-+ ["foo", None],
-+ {"foo": 0},
-+ {"foo": "bar", "baz": []},
-+ [{"foo": ""}],
-+ ]
-+ new_list = salt.utils.data.filter_falsey(
-+ old_list, recurse_depth=3, ignore_types=[type([])]
-+ )
-+ self.assertEqual(
-+ ["foo", ["foo"], ["foo"], {"foo": "bar", "baz": []}, []], new_list
-+ )
- # Ignore dict
-- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]]
-- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type({})])
-- self.assertEqual(['foo', ['foo'], ['foo'], {}, {'foo': 'bar'}, [{}]], new_list)
-+ old_list = [
-+ "foo",
-+ ["foo"],
-+ ["foo", None],
-+ {"foo": 0},
-+ {"foo": "bar", "baz": []},
-+ [{"foo": ""}],
-+ ]
-+ new_list = salt.utils.data.filter_falsey(
-+ old_list, recurse_depth=3, ignore_types=[type({})]
-+ )
-+ self.assertEqual(["foo", ["foo"], ["foo"], {}, {"foo": "bar"}, [{}]], new_list)
- # Ignore NoneType
-- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]]
-- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type(None)])
-- self.assertEqual(['foo', ['foo'], ['foo', None], {'foo': 'bar'}], new_list)
-+ old_list = [
-+ "foo",
-+ ["foo"],
-+ ["foo", None],
-+ {"foo": 0},
-+ {"foo": "bar", "baz": []},
-+ [{"foo": ""}],
-+ ]
-+ new_list = salt.utils.data.filter_falsey(
-+ old_list, recurse_depth=3, ignore_types=[type(None)]
-+ )
-+ self.assertEqual(["foo", ["foo"], ["foo", None], {"foo": "bar"}], new_list)
-
-
- class FilterRecursiveDiff(TestCase):
-- '''
-+ """
- Test suite for salt.utils.data.recursive_diff
-- '''
-+ """
-
- def test_list_equality(self):
-- '''
-+ """
- Test cases where equal lists are compared.
-- '''
-+ """
- test_list = [0, 1, 2]
- self.assertEqual({}, salt.utils.data.recursive_diff(test_list, test_list))
-
-@@ -895,392 +1019,455 @@ class FilterRecursiveDiff(TestCase):
- self.assertEqual({}, salt.utils.data.recursive_diff(test_list, test_list))
-
- def test_dict_equality(self):
-- '''
-+ """
- Test cases where equal dicts are compared.
-- '''
-- test_dict = {'foo': 'bar', 'bar': {'baz': {'qux': 'quux'}}, 'frop': 0}
-+ """
-+ test_dict = {"foo": "bar", "bar": {"baz": {"qux": "quux"}}, "frop": 0}
- self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_dict))
-
- def test_ordereddict_equality(self):
-- '''
-+ """
- Test cases where equal OrderedDicts are compared.
-- '''
-- test_dict = OrderedDict([
-- ('foo', 'bar'),
-- ('bar', OrderedDict([('baz', OrderedDict([('qux', 'quux')]))])),
-- ('frop', 0)])
-+ """
-+ test_dict = OrderedDict(
-+ [
-+ ("foo", "bar"),
-+ ("bar", OrderedDict([("baz", OrderedDict([("qux", "quux")]))])),
-+ ("frop", 0),
-+ ]
-+ )
- self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_dict))
-
- def test_mixed_equality(self):
-- '''
-+ """
- Test cases where mixed nested lists and dicts are compared.
-- '''
-+ """
- test_data = {
-- 'foo': 'bar',
-- 'baz': [0, 1, 2],
-- 'bar': {'baz': [{'qux': 'quux'}, {'froop', 0}]}
-+ "foo": "bar",
-+ "baz": [0, 1, 2],
-+ "bar": {"baz": [{"qux": "quux"}, {"froop", 0}]},
- }
- self.assertEqual({}, salt.utils.data.recursive_diff(test_data, test_data))
-
- def test_set_equality(self):
-- '''
-+ """
- Test cases where equal sets are compared.
-- '''
-- test_set = set([0, 1, 2, 3, 'foo'])
-+ """
-+ test_set = {0, 1, 2, 3, "foo"}
- self.assertEqual({}, salt.utils.data.recursive_diff(test_set, test_set))
-
- # This is a bit of an oddity, as python seems to sort the sets in memory
- # so both sets end up with the same ordering (0..3).
-- set_one = set([0, 1, 2, 3])
-- set_two = set([3, 2, 1, 0])
-+ set_one = {0, 1, 2, 3}
-+ set_two = {3, 2, 1, 0}
- self.assertEqual({}, salt.utils.data.recursive_diff(set_one, set_two))
-
- def test_tuple_equality(self):
-- '''
-+ """
- Test cases where equal tuples are compared.
-- '''
-- test_tuple = (0, 1, 2, 3, 'foo')
-+ """
-+ test_tuple = (0, 1, 2, 3, "foo")
- self.assertEqual({}, salt.utils.data.recursive_diff(test_tuple, test_tuple))
-
- def test_list_inequality(self):
-- '''
-+ """
- Test cases where two inequal lists are compared.
-- '''
-+ """
- list_one = [0, 1, 2]
-- list_two = ['foo', 'bar', 'baz']
-- expected_result = {'old': list_one, 'new': list_two}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two))
-- expected_result = {'new': list_one, 'old': list_two}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one))
+- "sit",
+- salt.utils.data.traverse_dict_and_list(
+- test_two_level_dict_and_list,
+- ["foo", "lorem", "ipsum", "dolor"],
+- {"not_found": "not_found"},
+- ),
+- )
-
-- list_one = [0, 'foo', 1, 'bar']
-- list_two = [1, 'foo', 1, 'qux']
-- expected_result = {'old': [0, 'bar'], 'new': [1, 'qux']}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two))
-- expected_result = {'new': [0, 'bar'], 'old': [1, 'qux']}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one))
-+ list_two = ["foo", "bar", "baz"]
-+ expected_result = {"old": list_one, "new": list_two}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_one, list_two)
-+ )
-+ expected_result = {"new": list_one, "old": list_two}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_two, list_one)
-+ )
-+
-+ list_one = [0, "foo", 1, "bar"]
-+ list_two = [1, "foo", 1, "qux"]
-+ expected_result = {"old": [0, "bar"], "new": [1, "qux"]}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_one, list_two)
-+ )
-+ expected_result = {"new": [0, "bar"], "old": [1, "qux"]}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_two, list_one)
-+ )
-
- list_one = [0, 1, [2, 3]]
-- list_two = [0, 1, ['foo', 'bar']]
-- expected_result = {'old': [[2, 3]], 'new': [['foo', 'bar']]}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two))
-- expected_result = {'new': [[2, 3]], 'old': [['foo', 'bar']]}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one))
-+ list_two = [0, 1, ["foo", "bar"]]
-+ expected_result = {"old": [[2, 3]], "new": [["foo", "bar"]]}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_one, list_two)
-+ )
-+ expected_result = {"new": [[2, 3]], "old": [["foo", "bar"]]}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_two, list_one)
-+ )
-
- def test_dict_inequality(self):
-- '''
-+ """
- Test cases where two inequal dicts are compared.
-- '''
-- dict_one = {'foo': 1, 'bar': 2, 'baz': 3}
-- dict_two = {'foo': 2, 1: 'bar', 'baz': 3}
-- expected_result = {'old': {'foo': 1, 'bar': 2}, 'new': {'foo': 2, 1: 'bar'}}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_one, dict_two))
-- expected_result = {'new': {'foo': 1, 'bar': 2}, 'old': {'foo': 2, 1: 'bar'}}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_two, dict_one))
--
-- dict_one = {'foo': {'bar': {'baz': 1}}}
-- dict_two = {'foo': {'qux': {'baz': 1}}}
-- expected_result = {'old': dict_one, 'new': dict_two}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_one, dict_two))
-- expected_result = {'new': dict_one, 'old': dict_two}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_two, dict_one))
-+ """
-+ dict_one = {"foo": 1, "bar": 2, "baz": 3}
-+ dict_two = {"foo": 2, 1: "bar", "baz": 3}
-+ expected_result = {"old": {"foo": 1, "bar": 2}, "new": {"foo": 2, 1: "bar"}}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
-+ )
-+ expected_result = {"new": {"foo": 1, "bar": 2}, "old": {"foo": 2, 1: "bar"}}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)
-+ )
-+
-+ dict_one = {"foo": {"bar": {"baz": 1}}}
-+ dict_two = {"foo": {"qux": {"baz": 1}}}
-+ expected_result = {"old": dict_one, "new": dict_two}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
-+ )
-+ expected_result = {"new": dict_one, "old": dict_two}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)
-+ )
-
- def test_ordereddict_inequality(self):
-- '''
-+ """
- Test cases where two inequal OrderedDicts are compared.
-- '''
-- odict_one = OrderedDict([('foo', 'bar'), ('bar', 'baz')])
-- odict_two = OrderedDict([('bar', 'baz'), ('foo', 'bar')])
-- expected_result = {'old': odict_one, 'new': odict_two}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(odict_one, odict_two))
-+ """
-+ odict_one = OrderedDict([("foo", "bar"), ("bar", "baz")])
-+ odict_two = OrderedDict([("bar", "baz"), ("foo", "bar")])
-+ expected_result = {"old": odict_one, "new": odict_two}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(odict_one, odict_two)
-+ )
-
- def test_set_inequality(self):
-- '''
-+ """
- Test cases where two inequal sets are compared.
- Tricky as the sets are compared zipped, so shuffled sets of equal values
- are considered different.
-- '''
-- set_one = set([0, 1, 2, 4])
-- set_two = set([0, 1, 3, 4])
-- expected_result = {'old': set([2]), 'new': set([3])}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(set_one, set_two))
-- expected_result = {'new': set([2]), 'old': set([3])}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(set_two, set_one))
-+ """
-+ set_one = {0, 1, 2, 4}
-+ set_two = {0, 1, 3, 4}
-+ expected_result = {"old": {2}, "new": {3}}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(set_one, set_two)
-+ )
-+ expected_result = {"new": {2}, "old": {3}}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(set_two, set_one)
-+ )
-
- # It is unknown how different python versions will store sets in memory.
- # Python 2.7 seems to sort it (i.e. set_one below becomes {0, 1, 'foo', 'bar'}
- # However Python 3.6.8 stores it differently each run.
- # So just test for "not equal" here.
-- set_one = set([0, 'foo', 1, 'bar'])
-- set_two = set(['foo', 1, 'bar', 2])
-+ set_one = {0, "foo", 1, "bar"}
-+ set_two = {"foo", 1, "bar", 2}
- expected_result = {}
-- self.assertNotEqual(expected_result, salt.utils.data.recursive_diff(set_one, set_two))
-+ self.assertNotEqual(
-+ expected_result, salt.utils.data.recursive_diff(set_one, set_two)
-+ )
-
- def test_mixed_inequality(self):
-- '''
-+ """
- Test cases where two mixed dicts/iterables that are different are compared.
-- '''
-- dict_one = {'foo': [1, 2, 3]}
-- dict_two = {'foo': [3, 2, 1]}
-- expected_result = {'old': {'foo': [1, 3]}, 'new': {'foo': [3, 1]}}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_one, dict_two))
-- expected_result = {'new': {'foo': [1, 3]}, 'old': {'foo': [3, 1]}}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_two, dict_one))
--
-- list_one = [1, 2, {'foo': ['bar', {'foo': 1, 'bar': 2}]}]
-- list_two = [3, 4, {'foo': ['qux', {'foo': 1, 'bar': 2}]}]
-- expected_result = {'old': [1, 2, {'foo': ['bar']}], 'new': [3, 4, {'foo': ['qux']}]}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two))
-- expected_result = {'new': [1, 2, {'foo': ['bar']}], 'old': [3, 4, {'foo': ['qux']}]}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one))
--
-- mixed_one = {'foo': set([0, 1, 2]), 'bar': [0, 1, 2]}
-- mixed_two = {'foo': set([1, 2, 3]), 'bar': [1, 2, 3]}
-+ """
-+ dict_one = {"foo": [1, 2, 3]}
-+ dict_two = {"foo": [3, 2, 1]}
-+ expected_result = {"old": {"foo": [1, 3]}, "new": {"foo": [3, 1]}}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
-+ )
-+ expected_result = {"new": {"foo": [1, 3]}, "old": {"foo": [3, 1]}}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)
-+ )
-+
-+ list_one = [1, 2, {"foo": ["bar", {"foo": 1, "bar": 2}]}]
-+ list_two = [3, 4, {"foo": ["qux", {"foo": 1, "bar": 2}]}]
- expected_result = {
-- 'old': {'foo': set([0]), 'bar': [0, 1, 2]},
-- 'new': {'foo': set([3]), 'bar': [1, 2, 3]}
-+ "old": [1, 2, {"foo": ["bar"]}],
-+ "new": [3, 4, {"foo": ["qux"]}],
- }
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two))
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_one, list_two)
-+ )
-+ expected_result = {
-+ "new": [1, 2, {"foo": ["bar"]}],
-+ "old": [3, 4, {"foo": ["qux"]}],
-+ }
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(list_two, list_one)
-+ )
-+
-+ mixed_one = {"foo": {0, 1, 2}, "bar": [0, 1, 2]}
-+ mixed_two = {"foo": {1, 2, 3}, "bar": [1, 2, 3]}
- expected_result = {
-- 'new': {'foo': set([0]), 'bar': [0, 1, 2]},
-- 'old': {'foo': set([3]), 'bar': [1, 2, 3]}
-+ "old": {"foo": {0}, "bar": [0, 1, 2]},
-+ "new": {"foo": {3}, "bar": [1, 2, 3]},
- }
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one))
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)
-+ )
-+ expected_result = {
-+ "new": {"foo": {0}, "bar": [0, 1, 2]},
-+ "old": {"foo": {3}, "bar": [1, 2, 3]},
-+ }
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one)
-+ )
-
- def test_tuple_inequality(self):
-- '''
-+ """
- Test cases where two tuples that are different are compared.
-- '''
-+ """
- tuple_one = (1, 2, 3)
- tuple_two = (3, 2, 1)
-- expected_result = {'old': (1, 3), 'new': (3, 1)}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two))
-+ expected_result = {"old": (1, 3), "new": (3, 1)}
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two)
-+ )
-
- def test_list_vs_set(self):
-- '''
-+ """
- Test case comparing a list with a set, will be compared unordered.
-- '''
-+ """
- mixed_one = [1, 2, 3]
-- mixed_two = set([3, 2, 1])
-+ mixed_two = {3, 2, 1}
- expected_result = {}
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two))
-- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one))
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)
-+ )
-+ self.assertEqual(
-+ expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one)
-+ )
-
- def test_dict_vs_ordereddict(self):
-- '''
-+ """
- Test case comparing a dict with an ordereddict, will be compared unordered.
-- '''
-- test_dict = {'foo': 'bar', 'bar': 'baz'}
-- test_odict = OrderedDict([('foo', 'bar'), ('bar', 'baz')])
-+ """
-+ test_dict = {"foo": "bar", "bar": "baz"}
-+ test_odict = OrderedDict([("foo", "bar"), ("bar", "baz")])
- self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_odict))
- self.assertEqual({}, salt.utils.data.recursive_diff(test_odict, test_dict))
-
-- test_odict2 = OrderedDict([('bar', 'baz'), ('foo', 'bar')])
-+ test_odict2 = OrderedDict([("bar", "baz"), ("foo", "bar")])
- self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_odict2))
- self.assertEqual({}, salt.utils.data.recursive_diff(test_odict2, test_dict))
-
- def test_list_ignore_ignored(self):
-- '''
-+ """
- Test case comparing two lists with ignore-list supplied (which is not used
- when comparing lists).
-- '''
-+ """
- list_one = [1, 2, 3]
- list_two = [3, 2, 1]
-- expected_result = {'old': [1, 3], 'new': [3, 1]}
-+ expected_result = {"old": [1, 3], "new": [3, 1]}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(list_one, list_two, ignore_keys=[1, 3])
-+ salt.utils.data.recursive_diff(list_one, list_two, ignore_keys=[1, 3]),
- )
-
- def test_dict_ignore(self):
-- '''
-+ """
- Test case comparing two dicts with ignore-list supplied.
-- '''
-- dict_one = {'foo': 1, 'bar': 2, 'baz': 3}
-- dict_two = {'foo': 3, 'bar': 2, 'baz': 1}
-- expected_result = {'old': {'baz': 3}, 'new': {'baz': 1}}
-+ """
-+ dict_one = {"foo": 1, "bar": 2, "baz": 3}
-+ dict_two = {"foo": 3, "bar": 2, "baz": 1}
-+ expected_result = {"old": {"baz": 3}, "new": {"baz": 1}}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=['foo'])
-+ salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=["foo"]),
- )
-
- def test_ordereddict_ignore(self):
-- '''
-+ """
- Test case comparing two OrderedDicts with ignore-list supplied.
-- '''
-- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)])
-- odict_two = OrderedDict([('baz', 1), ('bar', 2), ('foo', 3)])
-+ """
-+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
-+ odict_two = OrderedDict([("baz", 1), ("bar", 2), ("foo", 3)])
- # The key 'foo' will be ignored, which means the key from the other OrderedDict
- # will always be considered "different" since OrderedDicts are compared ordered.
-- expected_result = {'old': OrderedDict([('baz', 3)]), 'new': OrderedDict([('baz', 1)])}
-+ expected_result = {
-+ "old": OrderedDict([("baz", 3)]),
-+ "new": OrderedDict([("baz", 1)]),
-+ }
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(odict_one, odict_two, ignore_keys=['foo'])
-+ salt.utils.data.recursive_diff(odict_one, odict_two, ignore_keys=["foo"]),
- )
-
- def test_dict_vs_ordereddict_ignore(self):
-- '''
-+ """
- Test case comparing a dict with an OrderedDict with ignore-list supplied.
-- '''
-- dict_one = {'foo': 1, 'bar': 2, 'baz': 3}
-- odict_two = OrderedDict([('foo', 3), ('bar', 2), ('baz', 1)])
-- expected_result = {'old': {'baz': 3}, 'new': OrderedDict([('baz', 1)])}
-+ """
-+ dict_one = {"foo": 1, "bar": 2, "baz": 3}
-+ odict_two = OrderedDict([("foo", 3), ("bar", 2), ("baz", 1)])
-+ expected_result = {"old": {"baz": 3}, "new": OrderedDict([("baz", 1)])}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, odict_two, ignore_keys=['foo'])
-+ salt.utils.data.recursive_diff(dict_one, odict_two, ignore_keys=["foo"]),
- )
-
- def test_mixed_nested_ignore(self):
-- '''
-+ """
- Test case comparing mixed, nested items with ignore-list supplied.
-- '''
-- dict_one = {'foo': [1], 'bar': {'foo': 1, 'bar': 2}, 'baz': 3}
-- dict_two = {'foo': [2], 'bar': {'foo': 3, 'bar': 2}, 'baz': 1}
-- expected_result = {'old': {'baz': 3}, 'new': {'baz': 1}}
-+ """
-+ dict_one = {"foo": [1], "bar": {"foo": 1, "bar": 2}, "baz": 3}
-+ dict_two = {"foo": [2], "bar": {"foo": 3, "bar": 2}, "baz": 1}
-+ expected_result = {"old": {"baz": 3}, "new": {"baz": 1}}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=['foo'])
-+ salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=["foo"]),
- )
-
- def test_ordered_dict_unequal_length(self):
-- '''
-+ """
- Test case comparing two OrderedDicts of unequal length.
-- '''
-- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)])
-- odict_two = OrderedDict([('foo', 1), ('bar', 2)])
-- expected_result = {'old': OrderedDict([('baz', 3)]), 'new': {}}
-+ """
-+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
-+ odict_two = OrderedDict([("foo", 1), ("bar", 2)])
-+ expected_result = {"old": OrderedDict([("baz", 3)]), "new": {}}
- self.assertEqual(
-- expected_result,
-- salt.utils.data.recursive_diff(odict_one, odict_two)
-+ expected_result, salt.utils.data.recursive_diff(odict_one, odict_two)
- )
-
- def test_list_unequal_length(self):
-- '''
-+ """
- Test case comparing two lists of unequal length.
-- '''
-+ """
- list_one = [1, 2, 3]
- list_two = [1, 2, 3, 4]
-- expected_result = {'old': [], 'new': [4]}
-+ expected_result = {"old": [], "new": [4]}
- self.assertEqual(
-- expected_result,
-- salt.utils.data.recursive_diff(list_one, list_two)
-+ expected_result, salt.utils.data.recursive_diff(list_one, list_two)
- )
-
- def test_set_unequal_length(self):
-- '''
-+ """
- Test case comparing two sets of unequal length.
- This does not do anything special, as it is unordered.
-- '''
-- set_one = set([1, 2, 3])
-- set_two = set([4, 3, 2, 1])
-- expected_result = {'old': set([]), 'new': set([4])}
-+ """
-+ set_one = {1, 2, 3}
-+ set_two = {4, 3, 2, 1}
-+ expected_result = {"old": set(), "new": {4}}
- self.assertEqual(
-- expected_result,
-- salt.utils.data.recursive_diff(set_one, set_two)
-+ expected_result, salt.utils.data.recursive_diff(set_one, set_two)
- )
-
- def test_tuple_unequal_length(self):
-- '''
-+ """
- Test case comparing two tuples of unequal length.
- This should be the same as comparing two ordered lists.
-- '''
-+ """
- tuple_one = (1, 2, 3)
- tuple_two = (1, 2, 3, 4)
-- expected_result = {'old': (), 'new': (4,)}
-+ expected_result = {"old": (), "new": (4,)}
- self.assertEqual(
-- expected_result,
-- salt.utils.data.recursive_diff(tuple_one, tuple_two)
-+ expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two)
- )
-
- def test_list_unordered(self):
-- '''
-+ """
- Test case comparing two lists unordered.
-- '''
-+ """
- list_one = [1, 2, 3, 4]
- list_two = [4, 3, 2]
-- expected_result = {'old': [1], 'new': []}
-+ expected_result = {"old": [1], "new": []}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(list_one, list_two, ignore_order=True)
-+ salt.utils.data.recursive_diff(list_one, list_two, ignore_order=True),
- )
-
- def test_mixed_nested_unordered(self):
-- '''
-+ """
- Test case comparing nested dicts/lists unordered.
-- '''
-- dict_one = {'foo': {'bar': [1, 2, 3]}, 'bar': [{'foo': 4}, 0]}
-- dict_two = {'foo': {'bar': [3, 2, 1]}, 'bar': [0, {'foo': 4}]}
-+ """
-+ dict_one = {"foo": {"bar": [1, 2, 3]}, "bar": [{"foo": 4}, 0]}
-+ dict_two = {"foo": {"bar": [3, 2, 1]}, "bar": [0, {"foo": 4}]}
- expected_result = {}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_order=True)
-+ salt.utils.data.recursive_diff(dict_one, dict_two, ignore_order=True),
- )
- expected_result = {
-- 'old': {'foo': {'bar': [1, 3]}, 'bar': [{'foo': 4}, 0]},
-- 'new': {'foo': {'bar': [3, 1]}, 'bar': [0, {'foo': 4}]},
-+ "old": {"foo": {"bar": [1, 3]}, "bar": [{"foo": 4}, 0]},
-+ "new": {"foo": {"bar": [3, 1]}, "bar": [0, {"foo": 4}]},
- }
- self.assertEqual(
-- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two)
-+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
- )
-
- def test_ordered_dict_unordered(self):
-- '''
-+ """
- Test case comparing OrderedDicts unordered.
-- '''
-- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)])
-- odict_two = OrderedDict([('baz', 3), ('bar', 2), ('foo', 1)])
-+ """
-+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
-+ odict_two = OrderedDict([("baz", 3), ("bar", 2), ("foo", 1)])
- expected_result = {}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(odict_one, odict_two, ignore_order=True)
-+ salt.utils.data.recursive_diff(odict_one, odict_two, ignore_order=True),
- )
-
- def test_ignore_missing_keys_dict(self):
-- '''
-+ """
- Test case ignoring missing keys on a comparison of dicts.
-- '''
-- dict_one = {'foo': 1, 'bar': 2, 'baz': 3}
-- dict_two = {'bar': 3}
-- expected_result = {'old': {'bar': 2}, 'new': {'bar': 3}}
-+ """
-+ dict_one = {"foo": 1, "bar": 2, "baz": 3}
-+ dict_two = {"bar": 3}
-+ expected_result = {"old": {"bar": 2}, "new": {"bar": 3}}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True)
-+ salt.utils.data.recursive_diff(
-+ dict_one, dict_two, ignore_missing_keys=True
-+ ),
- )
-
- def test_ignore_missing_keys_ordered_dict(self):
-- '''
-+ """
- Test case not ignoring missing keys on a comparison of OrderedDicts.
-- '''
-- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)])
-- odict_two = OrderedDict([('bar', 3)])
-- expected_result = {'old': odict_one, 'new': odict_two}
-+ """
-+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
-+ odict_two = OrderedDict([("bar", 3)])
-+ expected_result = {"old": odict_one, "new": odict_two}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(odict_one, odict_two, ignore_missing_keys=True)
-+ salt.utils.data.recursive_diff(
-+ odict_one, odict_two, ignore_missing_keys=True
-+ ),
- )
-
- def test_ignore_missing_keys_recursive(self):
-- '''
-+ """
- Test case ignoring missing keys on a comparison of nested dicts.
-- '''
-- dict_one = {'foo': {'bar': 2, 'baz': 3}}
-- dict_two = {'foo': {'baz': 3}}
-+ """
-+ dict_one = {"foo": {"bar": 2, "baz": 3}}
-+ dict_two = {"foo": {"baz": 3}}
- expected_result = {}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True)
-+ salt.utils.data.recursive_diff(
-+ dict_one, dict_two, ignore_missing_keys=True
-+ ),
- )
- # Compare from dict-in-dict
- dict_two = {}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True)
-+ salt.utils.data.recursive_diff(
-+ dict_one, dict_two, ignore_missing_keys=True
-+ ),
- )
- # Compare from dict-in-list
-- dict_one = {'foo': ['bar', {'baz': 3}]}
-- dict_two = {'foo': ['bar', {}]}
-+ dict_one = {"foo": ["bar", {"baz": 3}]}
-+ dict_two = {"foo": ["bar", {}]}
- self.assertEqual(
- expected_result,
-- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True)
-+ salt.utils.data.recursive_diff(
-+ dict_one, dict_two, ignore_missing_keys=True
-+ ),
- )
-diff --git a/tests/unit/utils/test_xmlutil.py b/tests/unit/utils/test_xmlutil.py
-index c04f39498e..cbf73861e5 100644
---- a/tests/unit/utils/test_xmlutil.py
-+++ b/tests/unit/utils/test_xmlutil.py
-@@ -1,148 +1,170 @@
--# -*- coding: utf-8 -*-
--'''
-+"""
- tests.unit.xmlutil_test
- ~~~~~~~~~~~~~~~~~~~~
--'''
--from __future__ import absolute_import, print_function, unicode_literals
--# Import Salt Testing libs
--from tests.support.unit import TestCase
-+"""
-+import salt.utils.xmlutil as xml
-
- # Import Salt libs
- from salt._compat import ElementTree as ET
--import salt.utils.xmlutil as xml
-+
-+# Import Salt Testing libs
-+from tests.support.unit import TestCase
-
-
- class XMLUtilTestCase(TestCase):
-- '''
-+ """
- Tests that salt.utils.xmlutil properly parses XML data and returns as a properly formatted
- dictionary. The default method of parsing will ignore attributes and return only the child
- items. The full method will include parsing attributes.
-- '''
-+ """
-
- def setUp(self):
-
- # Populate our use cases for specific XML formats.
- self.cases = {
-- 'a': {
-- 'xml': 'data',
-- 'legacy': {'parent': 'data'},
-- 'full': 'data'
-+ "a": {
-+ "xml": "data",
-+ "legacy": {"parent": "data"},
-+ "full": "data",
- },
-- 'b': {
-- 'xml': 'data',
-- 'legacy': {'parent': 'data'},
-- 'full': {'parent': 'data', 'value': 'data'}
-+ "b": {
-+ "xml": 'data',
-+ "legacy": {"parent": "data"},
-+ "full": {"parent": "data", "value": "data"},
- },
-- 'c': {
-- 'xml': 'datadata'
-- '',
-- 'legacy': {'child': ['data', {'child': 'data'}, {'child': None}, {'child': None}]},
-- 'full': {'child': ['data', {'child': 'data', 'value': 'data'}, {'value': 'data'}, None]}
-+ "c": {
-+ "xml": 'datadata'
-+ '',
-+ "legacy": {
-+ "child": [
-+ "data",
-+ {"child": "data"},
-+ {"child": None},
-+ {"child": None},
-+ ]
-+ },
-+ "full": {
-+ "child": [
-+ "data",
-+ {"child": "data", "value": "data"},
-+ {"value": "data"},
-+ None,
-+ ]
-+ },
- },
-- 'd': {
-- 'xml': 'data',
-- 'legacy': {'child': 'data'},
-- 'full': {'child': 'data', 'another': 'data', 'value': 'data'}
-+ "d": {
-+ "xml": 'data',
-+ "legacy": {"child": "data"},
-+ "full": {"child": "data", "another": "data", "value": "data"},
- },
-- 'e': {
-- 'xml': 'data',
-- 'legacy': {'child': 'data'},
-- 'full': {'child': {'child': 'data', 'value': 'data'}, 'another': 'data', 'value': 'data'}
-+ "e": {
-+ "xml": 'data',
-+ "legacy": {"child": "data"},
-+ "full": {
-+ "child": {"child": "data", "value": "data"},
-+ "another": "data",
-+ "value": "data",
-+ },
- },
-- 'f': {
-- 'xml': 'data'
-- 'data',
-- 'legacy': {'child': [{'sub-child': 'data'}, {'child': 'data'}]},
-- 'full': {'child': [{'sub-child': {'value': 'data', 'sub-child': 'data'}}, 'data']}
-+ "f": {
-+ "xml": 'data'
-+ "data",
-+ "legacy": {"child": [{"sub-child": "data"}, {"child": "data"}]},
-+ "full": {
-+ "child": [
-+ {"sub-child": {"value": "data", "sub-child": "data"}},
-+ "data",
-+ ]
-+ },
- },
- }
-
- def test_xml_case_a(self):
-- xmldata = ET.fromstring(self.cases['a']['xml'])
-+ xmldata = ET.fromstring(self.cases["a"]["xml"])
- defaultdict = xml.to_dict(xmldata)
-- self.assertEqual(defaultdict, self.cases['a']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["a"]["legacy"])
-
- def test_xml_case_a_legacy(self):
-- xmldata = ET.fromstring(self.cases['a']['xml'])
-+ xmldata = ET.fromstring(self.cases["a"]["xml"])
- defaultdict = xml.to_dict(xmldata, False)
-- self.assertEqual(defaultdict, self.cases['a']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["a"]["legacy"])
-
- def test_xml_case_a_full(self):
-- xmldata = ET.fromstring(self.cases['a']['xml'])
-+ xmldata = ET.fromstring(self.cases["a"]["xml"])
- defaultdict = xml.to_dict(xmldata, True)
-- self.assertEqual(defaultdict, self.cases['a']['full'])
-+ self.assertEqual(defaultdict, self.cases["a"]["full"])
-
- def test_xml_case_b(self):
-- xmldata = ET.fromstring(self.cases['b']['xml'])
-+ xmldata = ET.fromstring(self.cases["b"]["xml"])
- defaultdict = xml.to_dict(xmldata)
-- self.assertEqual(defaultdict, self.cases['b']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["b"]["legacy"])
-
- def test_xml_case_b_legacy(self):
-- xmldata = ET.fromstring(self.cases['b']['xml'])
-+ xmldata = ET.fromstring(self.cases["b"]["xml"])
- defaultdict = xml.to_dict(xmldata, False)
-- self.assertEqual(defaultdict, self.cases['b']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["b"]["legacy"])
-
- def test_xml_case_b_full(self):
-- xmldata = ET.fromstring(self.cases['b']['xml'])
-+ xmldata = ET.fromstring(self.cases["b"]["xml"])
- defaultdict = xml.to_dict(xmldata, True)
-- self.assertEqual(defaultdict, self.cases['b']['full'])
-+ self.assertEqual(defaultdict, self.cases["b"]["full"])
-
- def test_xml_case_c(self):
-- xmldata = ET.fromstring(self.cases['c']['xml'])
-+ xmldata = ET.fromstring(self.cases["c"]["xml"])
- defaultdict = xml.to_dict(xmldata)
-- self.assertEqual(defaultdict, self.cases['c']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["c"]["legacy"])
-
- def test_xml_case_c_legacy(self):
-- xmldata = ET.fromstring(self.cases['c']['xml'])
-+ xmldata = ET.fromstring(self.cases["c"]["xml"])
- defaultdict = xml.to_dict(xmldata, False)
-- self.assertEqual(defaultdict, self.cases['c']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["c"]["legacy"])
-
- def test_xml_case_c_full(self):
-- xmldata = ET.fromstring(self.cases['c']['xml'])
-+ xmldata = ET.fromstring(self.cases["c"]["xml"])
- defaultdict = xml.to_dict(xmldata, True)
-- self.assertEqual(defaultdict, self.cases['c']['full'])
-+ self.assertEqual(defaultdict, self.cases["c"]["full"])
-
- def test_xml_case_d(self):
-- xmldata = ET.fromstring(self.cases['d']['xml'])
-+ xmldata = ET.fromstring(self.cases["d"]["xml"])
- defaultdict = xml.to_dict(xmldata)
-- self.assertEqual(defaultdict, self.cases['d']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["d"]["legacy"])
-
- def test_xml_case_d_legacy(self):
-- xmldata = ET.fromstring(self.cases['d']['xml'])
-+ xmldata = ET.fromstring(self.cases["d"]["xml"])
- defaultdict = xml.to_dict(xmldata, False)
-- self.assertEqual(defaultdict, self.cases['d']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["d"]["legacy"])
-
- def test_xml_case_d_full(self):
-- xmldata = ET.fromstring(self.cases['d']['xml'])
-+ xmldata = ET.fromstring(self.cases["d"]["xml"])
- defaultdict = xml.to_dict(xmldata, True)
-- self.assertEqual(defaultdict, self.cases['d']['full'])
-+ self.assertEqual(defaultdict, self.cases["d"]["full"])
-
- def test_xml_case_e(self):
-- xmldata = ET.fromstring(self.cases['e']['xml'])
-+ xmldata = ET.fromstring(self.cases["e"]["xml"])
- defaultdict = xml.to_dict(xmldata)
-- self.assertEqual(defaultdict, self.cases['e']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["e"]["legacy"])
-
- def test_xml_case_e_legacy(self):
-- xmldata = ET.fromstring(self.cases['e']['xml'])
-+ xmldata = ET.fromstring(self.cases["e"]["xml"])
- defaultdict = xml.to_dict(xmldata, False)
-- self.assertEqual(defaultdict, self.cases['e']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["e"]["legacy"])
-
- def test_xml_case_e_full(self):
-- xmldata = ET.fromstring(self.cases['e']['xml'])
-+ xmldata = ET.fromstring(self.cases["e"]["xml"])
- defaultdict = xml.to_dict(xmldata, True)
-- self.assertEqual(defaultdict, self.cases['e']['full'])
-+ self.assertEqual(defaultdict, self.cases["e"]["full"])
-
- def test_xml_case_f(self):
-- xmldata = ET.fromstring(self.cases['f']['xml'])
-+ xmldata = ET.fromstring(self.cases["f"]["xml"])
- defaultdict = xml.to_dict(xmldata)
-- self.assertEqual(defaultdict, self.cases['f']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["f"]["legacy"])
-
- def test_xml_case_f_legacy(self):
-- xmldata = ET.fromstring(self.cases['f']['xml'])
-+ xmldata = ET.fromstring(self.cases["f"]["xml"])
- defaultdict = xml.to_dict(xmldata, False)
-- self.assertEqual(defaultdict, self.cases['f']['legacy'])
-+ self.assertEqual(defaultdict, self.cases["f"]["legacy"])
-
- def test_xml_case_f_full(self):
-- xmldata = ET.fromstring(self.cases['f']['xml'])
-+ xmldata = ET.fromstring(self.cases["f"]["xml"])
- defaultdict = xml.to_dict(xmldata, True)
-- self.assertEqual(defaultdict, self.cases['f']['full'])
-+ self.assertEqual(defaultdict, self.cases["f"]["full"])
+ def test_compare_dicts(self):
+ ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "bar"})
+ self.assertEqual(ret, {})
--
-2.28.0
+2.29.2
diff --git a/batch-async-catch-exceptions-and-safety-unregister-a.patch b/batch-async-catch-exceptions-and-safety-unregister-a.patch
index c87e6bc..8b84c50 100644
--- a/batch-async-catch-exceptions-and-safety-unregister-a.patch
+++ b/batch-async-catch-exceptions-and-safety-unregister-a.patch
@@ -1,42 +1,34 @@
-From c5edf396ffd66b6ac1479aa01367aae3eff7683d Mon Sep 17 00:00:00 2001
+From 1606379714f4776e2b529fb1d45891266985c896 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Fri, 28 Feb 2020 15:11:53 +0000
-Subject: [PATCH] Batch Async: Catch exceptions and safety unregister and
- close instances
+Subject: [PATCH] Batch Async: Catch exceptions and safety unregister
+ and close instances
---
- salt/cli/batch_async.py | 156 +++++++++++++++++++++++-----------------
- 1 file changed, 89 insertions(+), 67 deletions(-)
+ salt/cli/batch_async.py | 160 ++++++++++++++++++++++++----------------
+ 1 file changed, 96 insertions(+), 64 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index da069b64bd..b8f272ed67 100644
+index 1e2ac5b0d3..3dc04826d1 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -13,7 +13,6 @@ import salt.client
-
- # pylint: enable=import-error,no-name-in-module,redefined-builtin
- import logging
--import fnmatch
-
- log = logging.getLogger(__name__)
-
-@@ -104,22 +103,25 @@ class BatchAsync(object):
+@@ -107,22 +107,25 @@ class BatchAsync:
def __event_handler(self, raw):
if not self.event:
return
- mtag, data = self.event.unpack(raw, self.event.serial)
- for (pattern, op) in self.patterns:
- if mtag.startswith(pattern[:-1]):
-- minion = data['id']
-- if op == 'ping_return':
+- minion = data["id"]
+- if op == "ping_return":
- self.minions.add(minion)
- if self.targeted_minions == self.minions:
- self.event.io_loop.spawn_callback(self.start_batch)
-- elif op == 'find_job_return':
+- elif op == "find_job_return":
- if data.get("return", None):
- self.find_job_returned.add(minion)
-- elif op == 'batch_run':
+- elif op == "batch_run":
- if minion in self.active:
- self.active.remove(minion)
- self.done_minions.add(minion)
@@ -45,25 +37,25 @@ index da069b64bd..b8f272ed67 100644
+ mtag, data = self.event.unpack(raw, self.event.serial)
+ for (pattern, op) in self.patterns:
+ if mtag.startswith(pattern[:-1]):
-+ minion = data['id']
-+ if op == 'ping_return':
++ minion = data["id"]
++ if op == "ping_return":
+ self.minions.add(minion)
+ if self.targeted_minions == self.minions:
+ self.event.io_loop.spawn_callback(self.start_batch)
-+ elif op == 'find_job_return':
++ elif op == "find_job_return":
+ if data.get("return", None):
+ self.find_job_returned.add(minion)
-+ elif op == 'batch_run':
++ elif op == "batch_run":
+ if minion in self.active:
+ self.active.remove(minion)
+ self.done_minions.add(minion)
+ self.event.io_loop.spawn_callback(self.schedule_next)
+ except Exception as ex:
-+ log.error("Exception occured while processing event: {}".format(ex))
++ log.error("Exception occured while processing event: {}".format(ex))
def _get_next(self):
- to_run = self.minions.difference(
-@@ -146,54 +148,59 @@ class BatchAsync(object):
+ to_run = (
+@@ -154,53 +157,67 @@ class BatchAsync:
if timedout_minions:
self.schedule_next()
@@ -74,112 +66,118 @@ index da069b64bd..b8f272ed67 100644
@tornado.gen.coroutine
def find_job(self, minions):
-- not_done = minions.difference(self.done_minions).difference(self.timedout_minions)
+- not_done = minions.difference(self.done_minions).difference(
+- self.timedout_minions
+- )
-
- if not_done:
- jid = self.jid_gen()
-- find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid)
+- find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
- self.patterns.add((find_job_return_pattern, "find_job_return"))
-- self.event.subscribe(find_job_return_pattern, match_type='glob')
+- self.event.subscribe(find_job_return_pattern, match_type="glob")
-
- ret = yield self.local.run_job_async(
- not_done,
-- 'saltutil.find_job',
+- "saltutil.find_job",
- [self.batch_jid],
-- 'list',
-- gather_job_timeout=self.opts['gather_job_timeout'],
+- "list",
+- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=jid,
-- **self.eauth)
-- yield tornado.gen.sleep(self.opts['gather_job_timeout'])
-- self.event.io_loop.spawn_callback(
-- self.check_find_job,
-- not_done,
-- jid)
+- **self.eauth
+ if self.event:
-+ not_done = minions.difference(self.done_minions).difference(self.timedout_minions)
++ not_done = minions.difference(self.done_minions).difference(
++ self.timedout_minions
+ )
+- yield tornado.gen.sleep(self.opts["gather_job_timeout"])
+- self.event.io_loop.spawn_callback(self.check_find_job, not_done, jid)
+ try:
+ if not_done:
+ jid = self.jid_gen()
-+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid)
++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
+ self.patterns.add((find_job_return_pattern, "find_job_return"))
-+ self.event.subscribe(find_job_return_pattern, match_type='glob')
++ self.event.subscribe(find_job_return_pattern, match_type="glob")
+ ret = yield self.local.run_job_async(
+ not_done,
-+ 'saltutil.find_job',
++ "saltutil.find_job",
+ [self.batch_jid],
-+ 'list',
-+ gather_job_timeout=self.opts['gather_job_timeout'],
++ "list",
++ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=jid,
-+ **self.eauth)
-+ yield tornado.gen.sleep(self.opts['gather_job_timeout'])
++ **self.eauth
++ )
++ yield tornado.gen.sleep(self.opts["gather_job_timeout"])
+ if self.event:
+ self.event.io_loop.spawn_callback(
-+ self.check_find_job,
-+ not_done,
-+ jid)
++ self.check_find_job, not_done, jid
++ )
+ except Exception as ex:
-+ log.error("Exception occured handling batch async: {}. Aborting execution.".format(ex))
++ log.error(
++ "Exception occured handling batch async: {}. Aborting execution.".format(
++ ex
++ )
++ )
+ self.close_safe()
@tornado.gen.coroutine
def start(self):
- self.__set_event_handler()
- ping_return = yield self.local.run_job_async(
-- self.opts['tgt'],
-- 'test.ping',
+- self.opts["tgt"],
+- "test.ping",
- [],
-- self.opts.get(
-- 'selected_target_option',
-- self.opts.get('tgt_type', 'glob')
-- ),
-- gather_job_timeout=self.opts['gather_job_timeout'],
+- self.opts.get("selected_target_option", self.opts.get("tgt_type", "glob")),
+- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=self.ping_jid,
- metadata=self.metadata,
-- **self.eauth)
-- self.targeted_minions = set(ping_return['minions'])
-- #start batching even if not all minions respond to ping
-- yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout'])
+- **self.eauth
+- )
+- self.targeted_minions = set(ping_return["minions"])
+- # start batching even if not all minions respond to ping
+- yield tornado.gen.sleep(
+- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
+- )
- self.event.io_loop.spawn_callback(self.start_batch)
--
+ if self.event:
+ self.__set_event_handler()
+ ping_return = yield self.local.run_job_async(
-+ self.opts['tgt'],
-+ 'test.ping',
++ self.opts["tgt"],
++ "test.ping",
+ [],
+ self.opts.get(
-+ 'selected_target_option',
-+ self.opts.get('tgt_type', 'glob')
++ "selected_target_option", self.opts.get("tgt_type", "glob")
+ ),
-+ gather_job_timeout=self.opts['gather_job_timeout'],
++ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=self.ping_jid,
+ metadata=self.metadata,
-+ **self.eauth)
-+ self.targeted_minions = set(ping_return['minions'])
-+ #start batching even if not all minions respond to ping
-+ yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout'])
++ **self.eauth
++ )
++ self.targeted_minions = set(ping_return["minions"])
++ # start batching even if not all minions respond to ping
++ yield tornado.gen.sleep(
++ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
++ )
+ if self.event:
+ self.event.io_loop.spawn_callback(self.start_batch)
@tornado.gen.coroutine
def start_batch(self):
-@@ -206,7 +213,8 @@ class BatchAsync(object):
- "metadata": self.metadata
- }
- ret = self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid))
+@@ -215,7 +232,8 @@ class BatchAsync:
+ ret = self.event.fire_event(
+ data, "salt/batch/{}/start".format(self.batch_jid)
+ )
- self.event.io_loop.spawn_callback(self.run_next)
+ if self.event:
+ self.event.io_loop.spawn_callback(self.run_next)
@tornado.gen.coroutine
def end_batch(self):
-@@ -221,11 +229,21 @@ class BatchAsync(object):
- "metadata": self.metadata
+@@ -232,11 +250,21 @@ class BatchAsync:
+ "metadata": self.metadata,
}
- self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid))
+ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
- for (pattern, label) in self.patterns:
- if label in ["ping_return", "batch_run"]:
-- self.event.unsubscribe(pattern, match_type='glob')
+- self.event.unsubscribe(pattern, match_type="glob")
- del self
- gc.collect()
+
@@ -189,18 +187,18 @@ index da069b64bd..b8f272ed67 100644
+ self.close_safe()
+
+ def close_safe(self):
-+ for (pattern, label) in self.patterns:
-+ self.event.unsubscribe(pattern, match_type='glob')
-+ self.event.remove_event_handler(self.__event_handler)
-+ self.event = None
-+ self.local = None
-+ self.ioloop = None
-+ del self
-+ gc.collect()
++ for (pattern, label) in self.patterns:
++ self.event.unsubscribe(pattern, match_type="glob")
++ self.event.remove_event_handler(self.__event_handler)
++ self.event = None
++ self.local = None
++ self.ioloop = None
++ del self
++ gc.collect()
@tornado.gen.coroutine
def schedule_next(self):
-@@ -233,7 +251,8 @@ class BatchAsync(object):
+@@ -244,7 +272,8 @@ class BatchAsync:
self.scheduled = True
# call later so that we maybe gather more returns
yield tornado.gen.sleep(self.batch_delay)
@@ -210,10 +208,10 @@ index da069b64bd..b8f272ed67 100644
@tornado.gen.coroutine
def run_next(self):
-@@ -254,17 +273,20 @@ class BatchAsync(object):
- metadata=self.metadata)
+@@ -266,17 +295,20 @@ class BatchAsync:
+ )
- yield tornado.gen.sleep(self.opts['timeout'])
+ yield tornado.gen.sleep(self.opts["timeout"])
- self.event.io_loop.spawn_callback(self.find_job, set(next_batch))
+
+ # The batch can be done already at this point, which means no self.event
@@ -235,6 +233,6 @@ index da069b64bd..b8f272ed67 100644
self.ioloop = None
gc.collect()
--
-2.23.0
+2.29.2
diff --git a/batch.py-avoid-exception-when-minion-does-not-respon.patch b/batch.py-avoid-exception-when-minion-does-not-respon.patch
index c7e4ea0..6471863 100644
--- a/batch.py-avoid-exception-when-minion-does-not-respon.patch
+++ b/batch.py-avoid-exception-when-minion-does-not-respon.patch
@@ -1,9 +1,9 @@
-From bbd2e622f7e165a6e16fd5edf5f4596764748208 Mon Sep 17 00:00:00 2001
+From 03f0aa44f6963e09a92dd3ea2090ef9ee463cb94 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 5 Jun 2019 15:15:04 +0100
-Subject: [PATCH] batch.py: avoid exception when minion does not respond
- (bsc#1135507)
+Subject: [PATCH] batch.py: avoid exception when minion does not
+ respond (bsc#1135507)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@@ -18,26 +18,29 @@ bsc#1135507
Signed-off-by: José Guilherme Vanz
---
- salt/cli/batch.py | 5 +++++
- 1 file changed, 5 insertions(+)
+ salt/cli/batch.py | 8 ++++++++
+ 1 file changed, 8 insertions(+)
diff --git a/salt/cli/batch.py b/salt/cli/batch.py
-index 67f03c8a45..10fc81a5f4 100644
+index 2bc5444aef..6285a45434 100644
--- a/salt/cli/batch.py
+++ b/salt/cli/batch.py
-@@ -318,6 +318,11 @@ class Batch(object):
- if self.opts.get('failhard') and data['retcode'] > 0:
+@@ -348,6 +348,14 @@ class Batch:
+ if self.opts.get("failhard") and data["retcode"] > 0:
failhard = True
+ # avoid an exception if the minion does not respond.
+ if data.get("failed") is True:
-+ log.debug('Minion %s failed to respond: data=%s', minion, data)
-+ data = {'ret': 'Minion did not return. [Failed]', 'retcode': salt.defaults.exitcodes.EX_GENERIC}
++ log.debug("Minion %s failed to respond: data=%s", minion, data)
++ data = {
++ "ret": "Minion did not return. [Failed]",
++ "retcode": salt.defaults.exitcodes.EX_GENERIC,
++ }
+
- if self.opts.get('raw'):
+ if self.opts.get("raw"):
ret[minion] = data
yield data
--
-2.16.4
+2.29.2
diff --git a/batch_async-avoid-using-fnmatch-to-match-event-217.patch b/batch_async-avoid-using-fnmatch-to-match-event-217.patch
index b26a2d6..d97e8cf 100644
--- a/batch_async-avoid-using-fnmatch-to-match-event-217.patch
+++ b/batch_async-avoid-using-fnmatch-to-match-event-217.patch
@@ -1,26 +1,27 @@
-From bd20cd2655a1141fe9ea892e974e40988c3fb83c Mon Sep 17 00:00:00 2001
+From 31fedcb3173f73fbffc3b053bc64c94a7b608118 Mon Sep 17 00:00:00 2001
From: Silvio Moioli
Date: Mon, 2 Mar 2020 11:23:59 +0100
-Subject: [PATCH] batch_async: avoid using fnmatch to match event (#217)
+Subject: [PATCH] batch_async: avoid using fnmatch to match event
+ (#217)
---
salt/cli/batch_async.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index c4545e3ebc..da069b64bd 100644
+index 8d2601e636..1e2ac5b0d3 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -106,7 +106,7 @@ class BatchAsync(object):
+@@ -109,7 +109,7 @@ class BatchAsync:
return
mtag, data = self.event.unpack(raw, self.event.serial)
for (pattern, op) in self.patterns:
- if fnmatch.fnmatch(mtag, pattern):
+ if mtag.startswith(pattern[:-1]):
- minion = data['id']
- if op == 'ping_return':
+ minion = data["id"]
+ if op == "ping_return":
self.minions.add(minion)
--
-2.23.0
+2.29.2
diff --git a/bsc-1176024-fix-file-directory-user-and-group-owners.patch b/bsc-1176024-fix-file-directory-user-and-group-owners.patch
index d4c5bda..a6f6811 100644
--- a/bsc-1176024-fix-file-directory-user-and-group-owners.patch
+++ b/bsc-1176024-fix-file-directory-user-and-group-owners.patch
@@ -1,4 +1,4 @@
-From 8973063f6ad24fd5b3788292aa8cc341221d7fb5 Mon Sep 17 00:00:00 2001
+From 60b8f6cdaab10a12973a074678608b86a34e23b7 Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
Date: Tue, 6 Oct 2020 12:36:41 +0300
Subject: [PATCH] bsc#1176024: Fix file/directory user and group
@@ -12,22 +12,22 @@ Subject: [PATCH] bsc#1176024: Fix file/directory user and group
Co-authored-by: Victor Zhestkov
---
- salt/modules/file.py | 18 +++++++++---------
- salt/states/file.py | 4 ++--
- 2 files changed, 11 insertions(+), 11 deletions(-)
+ salt/modules/file.py | 26 +++++++++++++++++---------
+ salt/states/file.py | 12 ++++++++++--
+ 2 files changed, 27 insertions(+), 11 deletions(-)
diff --git a/salt/modules/file.py b/salt/modules/file.py
-index b5b70e2d4c..0b516aff05 100644
+index 989a7ad92d..b830b390d3 100644
--- a/salt/modules/file.py
+++ b/salt/modules/file.py
-@@ -256,7 +256,7 @@ def group_to_gid(group):
+@@ -252,7 +252,7 @@ def group_to_gid(group):
try:
if isinstance(group, int):
return group
- return grp.getgrnam(group).gr_gid
+ return grp.getgrnam(salt.utils.stringutils.to_str(group)).gr_gid
except KeyError:
- return ''
+ return ""
@@ -344,7 +344,7 @@ def user_to_uid(user):
try:
@@ -36,77 +36,91 @@ index b5b70e2d4c..0b516aff05 100644
- return pwd.getpwnam(user).pw_uid
+ return pwd.getpwnam(salt.utils.stringutils.to_str(user)).pw_uid
except KeyError:
- return ''
+ return ""
-@@ -4574,7 +4574,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
- if (salt.utils.platform.is_windows() and
- user_to_uid(user) != user_to_uid(perms['luser'])
- ) or (
-- not salt.utils.platform.is_windows() and user != perms['luser']
-+ not salt.utils.platform.is_windows() and salt.utils.stringutils.to_str(user) != perms['luser']
- ):
- perms['cuser'] = user
+@@ -4977,7 +4977,10 @@ def check_perms(
+ if (
+ salt.utils.platform.is_windows()
+ and user_to_uid(user) != user_to_uid(perms["luser"])
+- ) or (not salt.utils.platform.is_windows() and user != perms["luser"]):
++ ) or (
++ not salt.utils.platform.is_windows()
++ and salt.utils.stringutils.to_str(user) != perms["luser"]
++ ):
+ perms["cuser"] = user
-@@ -4584,7 +4584,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
- if (salt.utils.platform.is_windows() and
- group_to_gid(group) != group_to_gid(perms['lgroup'])
- ) or (
-- not salt.utils.platform.is_windows() and group != perms['lgroup']
-+ not salt.utils.platform.is_windows() and salt.utils.stringutils.to_str(group) != perms['lgroup']
- ):
- perms['cgroup'] = group
+ if group:
+@@ -4986,7 +4989,10 @@ def check_perms(
+ if (
+ salt.utils.platform.is_windows()
+ and group_to_gid(group) != group_to_gid(perms["lgroup"])
+- ) or (not salt.utils.platform.is_windows() and group != perms["lgroup"]):
++ ) or (
++ not salt.utils.platform.is_windows()
++ and salt.utils.stringutils.to_str(group) != perms["lgroup"]
++ ):
+ perms["cgroup"] = group
-@@ -4615,7 +4615,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
- user != ''
- ) or (
- not salt.utils.platform.is_windows() and
-- user != get_user(name, follow_symlinks=follow_symlinks) and
-+ salt.utils.stringutils.to_str(user) != get_user(name, follow_symlinks=follow_symlinks) and
- user != ''
+ if "cuser" in perms or "cgroup" in perms:
+@@ -5017,7 +5023,8 @@ def check_perms(
+ and user != ""
+ ) or (
+ not salt.utils.platform.is_windows()
+- and user != get_user(name, follow_symlinks=follow_symlinks)
++ and salt.utils.stringutils.to_str(user)
++ != get_user(name, follow_symlinks=follow_symlinks)
+ and user != ""
):
- if __opts__['test'] is True:
-@@ -4633,10 +4633,10 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
- if (salt.utils.platform.is_windows() and
- group_to_gid(group) != group_to_gid(
- get_group(name, follow_symlinks=follow_symlinks)) and
-- user != '') or (
-+ group != '') or (
- not salt.utils.platform.is_windows() and
-- group != get_group(name, follow_symlinks=follow_symlinks) and
-- user != ''
-+ salt.utils.stringutils.to_str(group) != get_group(name, follow_symlinks=follow_symlinks) and
-+ group != ''
+ if __opts__["test"] is True:
+@@ -5035,18 +5042,19 @@ def check_perms(
+ salt.utils.platform.is_windows()
+ and group_to_gid(group)
+ != group_to_gid(get_group(name, follow_symlinks=follow_symlinks))
+- and user != ""
++ and group != ""
+ ) or (
+ not salt.utils.platform.is_windows()
+- and group != get_group(name, follow_symlinks=follow_symlinks)
+- and user != ""
++ and salt.utils.stringutils.to_str(group)
++ != get_group(name, follow_symlinks=follow_symlinks)
++ and group != ""
):
- if __opts__['test'] is True:
- ret['changes']['group'] = group
-@@ -4644,7 +4644,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
- ret['result'] = False
- ret['comment'].append('Failed to change group to {0}'
- .format(group))
-- elif 'cgroup' in perms and user != '':
-+ elif 'cgroup' in perms and group != '':
- ret['changes']['group'] = group
+ if __opts__["test"] is True:
+ ret["changes"]["group"] = group
+ else:
+ ret["result"] = False
+ ret["comment"].append("Failed to change group to {}".format(group))
+- elif "cgroup" in perms and user != "":
++ elif "cgroup" in perms and group != "":
+ ret["changes"]["group"] = group
# Mode changes if needed
diff --git a/salt/states/file.py b/salt/states/file.py
-index 0e925bb2ed..f21e0d12fc 100644
+index 9e24e389d8..89c70eb454 100644
--- a/salt/states/file.py
+++ b/salt/states/file.py
-@@ -960,11 +960,11 @@ def _check_dir_meta(name,
- changes['directory'] = 'new'
+@@ -989,9 +989,17 @@ def _check_dir_meta(name, user, group, mode, follow_symlinks=False):
+ if not stats:
+ changes["directory"] = "new"
return changes
- if (user is not None
-- and user != stats['user']
-+ and salt.utils.stringutils.to_str(user) != stats['user']
- and user != stats.get('uid')):
- changes['user'] = user
- if (group is not None
-- and group != stats['group']
-+ and salt.utils.stringutils.to_str(group) != stats['group']
- and group != stats.get('gid')):
- changes['group'] = group
+- if user is not None and user != stats["user"] and user != stats.get("uid"):
++ if (
++ user is not None
++ and salt.utils.stringutils.to_str(user) != stats["user"]
++ and user != stats.get("uid")
++ ):
+ changes["user"] = user
+- if group is not None and group != stats["group"] and group != stats.get("gid"):
++ if (
++ group is not None
++ and salt.utils.stringutils.to_str(group) != stats["group"]
++ and group != stats.get("gid")
++ ):
+ changes["group"] = group
# Normalize the dir mode
+ smode = salt.utils.files.normalize_mode(stats["mode"])
--
-2.28.0
+2.29.2
diff --git a/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch b/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
index 9ac3008..eff0ef5 100644
--- a/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
+++ b/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
@@ -1,4 +1,4 @@
-From 07f5a1d984b5a86c24620503f5e373ea0f11484a Mon Sep 17 00:00:00 2001
+From d9362f10681a2dfdb057939eee1ffae3a35d4a8d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Fri, 12 Apr 2019 16:47:03 +0100
@@ -7,54 +7,81 @@ Subject: [PATCH] Calculate FQDNs in parallel to avoid blockings
Fix pylint issue
---
- salt/grains/core.py | 31 ++++++++++++++++++++++++++-----
- 1 file changed, 26 insertions(+), 5 deletions(-)
+ salt/grains/core.py | 55 +++++++++++++++++++++++++++++++++------------
+ 1 file changed, 41 insertions(+), 14 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 309e4c9c4a..4600f055dd 100644
+index 006878f806..883e3ebe09 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -20,12 +20,15 @@ import platform
- import logging
- import locale
+@@ -20,8 +20,10 @@ import socket
+ import sys
+ import time
import uuid
-+import time
++import warnings
import zlib
from errno import EACCES, EPERM
- import datetime
- import warnings
- import time
-
+from multiprocessing.dummy import Pool as ThreadPool
+
+ import distro
+ import salt.exceptions
+@@ -44,6 +46,14 @@ import salt.utils.versions
+ from salt.ext.six.moves import range
+ from salt.utils.network import _get_interfaces
+
++# pylint: disable=import-error
++try:
++ import dateutil.tz
+
- # pylint: disable=import-error
- try:
- import dateutil.tz
-@@ -2275,13 +2278,10 @@ def fqdns():
++ _DATEUTIL_TZ = True
++except ImportError:
++ _DATEUTIL_TZ = False
++
+
+ # rewrite distro.linux_distribution to allow best=True kwarg in version(), needed to get the minor version numbers in CentOS
+ def _linux_distribution():
+@@ -2402,22 +2412,12 @@ def fqdns():
grains = {}
fqdns = set()
-- addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces())
-- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces()))
-- err_message = 'Exception during resolving address: %s'
+- addresses = salt.utils.network.ip_addrs(
+- include_loopback=False, interface_data=_get_interfaces()
+- )
+- addresses.extend(
+- salt.utils.network.ip_addrs6(
+- include_loopback=False, interface_data=_get_interfaces()
+- )
+- )
+- err_message = "Exception during resolving address: %s"
- for ip in addresses:
+ def _lookup_fqdn(ip):
try:
name, aliaslist, addresslist = socket.gethostbyaddr(ip)
-- fqdns.update([socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)])
-+ return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]
+- fqdns.update(
+- [socket.getfqdn(name)]
+- + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]
+- )
++ return [socket.getfqdn(name)] + [
++ als for als in aliaslist if salt.utils.network.is_fqdn(als)
++ ]
except socket.herror as err:
if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
# No FQDN for this IP address, so we don't need to know this all the time.
-@@ -2291,6 +2291,27 @@ def fqdns():
- except (socket.error, socket.gaierror, socket.timeout) as err:
+@@ -2427,6 +2427,33 @@ def fqdns():
+ except (OSError, socket.gaierror, socket.timeout) as err:
log.error(err_message, ip, err)
+ start = time.time()
+
-+ addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces())
-+ addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces()))
-+ err_message = 'Exception during resolving address: %s'
++ addresses = salt.utils.network.ip_addrs(
++ include_loopback=False, interface_data=_get_interfaces()
++ )
++ addresses.extend(
++ salt.utils.network.ip_addrs6(
++ include_loopback=False, interface_data=_get_interfaces()
++ )
++ )
++ err_message = "Exception during resolving address: %s"
+
+ # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel.
+ # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing
@@ -69,12 +96,12 @@ index 309e4c9c4a..4600f055dd 100644
+ fqdns.update(item)
+
+ elapsed = time.time() - start
-+ log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed))
++ log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed))
+
return {"fqdns": sorted(list(fqdns))}
--
-2.16.4
+2.29.2
diff --git a/changed-imports-to-vendored-tornado.patch b/changed-imports-to-vendored-tornado.patch
index 0226156..403546a 100644
--- a/changed-imports-to-vendored-tornado.patch
+++ b/changed-imports-to-vendored-tornado.patch
@@ -1,38 +1,36 @@
-From 0cf1a655aa9353b22ae011e492a33aa52d780f83 Mon Sep 17 00:00:00 2001
+From 5db9ccdb4f557cdbff670b18c45e55124e29c57c Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Tue, 10 Mar 2020 14:02:17 +0100
Subject: [PATCH] Changed imports to vendored Tornado
---
- salt/cli/batch_async.py | 26 ++++++++++++------------
+ salt/cli/batch_async.py | 25 ++++++++++++-----------
salt/master.py | 2 +-
- salt/transport/ipc.py | 4 ++--
tests/unit/cli/test_batch_async.py | 32 +++++++++++++++---------------
- 4 files changed, 32 insertions(+), 32 deletions(-)
+ 3 files changed, 30 insertions(+), 29 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index b8f272ed67..08eeb34f1c 100644
+index 3dc04826d1..09aa85258b 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -6,7 +6,7 @@ Execute a job on the targeted minions by using a moving window of fixed size `ba
- # Import python libs
- from __future__ import absolute_import, print_function, unicode_literals
- import gc
--import tornado
-+import salt.ext.tornado
+@@ -8,6 +8,7 @@ import gc
+ import logging
- # Import salt libs
import salt.client
-@@ -50,7 +50,7 @@ class BatchAsync(object):
- }
- '''
++import salt.ext.tornado
+ import tornado
+ from salt.cli.batch import batch_get_eauth, batch_get_opts, get_bnum
+
+@@ -46,7 +47,7 @@ class BatchAsync:
+ """
+
def __init__(self, parent_opts, jid_gen, clear_load):
- ioloop = tornado.ioloop.IOLoop.current()
+ ioloop = salt.ext.tornado.ioloop.IOLoop.current()
- self.local = salt.client.get_local_client(parent_opts['conf_file'], io_loop=ioloop)
- if 'gather_job_timeout' in clear_load['kwargs']:
- clear_load['gather_job_timeout'] = clear_load['kwargs'].pop('gather_job_timeout')
-@@ -152,7 +152,7 @@ class BatchAsync(object):
+ self.local = salt.client.get_local_client(
+ parent_opts["conf_file"], io_loop=ioloop
+ )
+@@ -161,7 +162,7 @@ class BatchAsync:
self.find_job_returned = self.find_job_returned.difference(running)
self.event.io_loop.spawn_callback(self.find_job, running)
@@ -40,18 +38,18 @@ index b8f272ed67..08eeb34f1c 100644
+ @salt.ext.tornado.gen.coroutine
def find_job(self, minions):
if self.event:
- not_done = minions.difference(self.done_minions).difference(self.timedout_minions)
-@@ -170,7 +170,7 @@ class BatchAsync(object):
- gather_job_timeout=self.opts['gather_job_timeout'],
+ not_done = minions.difference(self.done_minions).difference(
+@@ -182,7 +183,7 @@ class BatchAsync:
jid=jid,
- **self.eauth)
-- yield tornado.gen.sleep(self.opts['gather_job_timeout'])
-+ yield salt.ext.tornado.gen.sleep(self.opts['gather_job_timeout'])
+ **self.eauth
+ )
+- yield tornado.gen.sleep(self.opts["gather_job_timeout"])
++ yield salt.ext.tornado.gen.sleep(self.opts["gather_job_timeout"])
if self.event:
self.event.io_loop.spawn_callback(
- self.check_find_job,
-@@ -180,7 +180,7 @@ class BatchAsync(object):
- log.error("Exception occured handling batch async: {}. Aborting execution.".format(ex))
+ self.check_find_job, not_done, jid
+@@ -195,7 +196,7 @@ class BatchAsync:
+ )
self.close_safe()
- @tornado.gen.coroutine
@@ -59,12 +57,14 @@ index b8f272ed67..08eeb34f1c 100644
def start(self):
if self.event:
self.__set_event_handler()
-@@ -198,11 +198,11 @@ class BatchAsync(object):
- **self.eauth)
- self.targeted_minions = set(ping_return['minions'])
- #start batching even if not all minions respond to ping
-- yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout'])
-+ yield salt.ext.tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout'])
+@@ -213,13 +214,13 @@ class BatchAsync:
+ )
+ self.targeted_minions = set(ping_return["minions"])
+ # start batching even if not all minions respond to ping
+- yield tornado.gen.sleep(
++ yield salt.ext.tornado.gen.sleep(
+ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
+ )
if self.event:
self.event.io_loop.spawn_callback(self.start_batch)
@@ -73,16 +73,16 @@ index b8f272ed67..08eeb34f1c 100644
def start_batch(self):
if not self.initialized:
self.batch_size = get_bnum(self.opts, self.minions, True)
-@@ -216,7 +216,7 @@ class BatchAsync(object):
+@@ -235,7 +236,7 @@ class BatchAsync:
if self.event:
self.event.io_loop.spawn_callback(self.run_next)
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
def end_batch(self):
- left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions))
- if not left and not self.ended:
-@@ -232,7 +232,7 @@ class BatchAsync(object):
+ left = self.minions.symmetric_difference(
+ self.done_minions.union(self.timedout_minions)
+@@ -253,7 +254,7 @@ class BatchAsync:
# release to the IOLoop to allow the event to be published
# before closing batch async execution
@@ -91,9 +91,9 @@ index b8f272ed67..08eeb34f1c 100644
self.close_safe()
def close_safe(self):
-@@ -245,16 +245,16 @@ class BatchAsync(object):
- del self
- gc.collect()
+@@ -266,16 +267,16 @@ class BatchAsync:
+ del self
+ gc.collect()
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
@@ -111,66 +111,44 @@ index b8f272ed67..08eeb34f1c 100644
def run_next(self):
self.scheduled = False
next_batch = self._get_next()
-@@ -272,7 +272,7 @@ class BatchAsync(object):
- jid=self.batch_jid,
- metadata=self.metadata)
+@@ -294,7 +295,7 @@ class BatchAsync:
+ metadata=self.metadata,
+ )
-- yield tornado.gen.sleep(self.opts['timeout'])
-+ yield salt.ext.tornado.gen.sleep(self.opts['timeout'])
+- yield tornado.gen.sleep(self.opts["timeout"])
++ yield salt.ext.tornado.gen.sleep(self.opts["timeout"])
# The batch can be done already at this point, which means no self.event
if self.event:
diff --git a/salt/master.py b/salt/master.py
-index 3abf7ae60b..3a9d12999d 100644
+index 7a99af357a..ab85c7f5c6 100644
--- a/salt/master.py
+++ b/salt/master.py
-@@ -2049,7 +2049,7 @@ class ClearFuncs(object):
+@@ -2237,7 +2237,7 @@ class ClearFuncs(TransportMethods):
functools.partial(self._prep_jid, clear_load, {}),
- batch_load
+ batch_load,
)
- ioloop = tornado.ioloop.IOLoop.current()
+ ioloop = salt.ext.tornado.ioloop.IOLoop.current()
ioloop.add_callback(batch.start)
return {
-diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py
-index d2b295a633..33ee3d4182 100644
---- a/salt/transport/ipc.py
-+++ b/salt/transport/ipc.py
-@@ -697,7 +697,7 @@ class IPCMessageSubscriber(IPCClient):
- for callback in self.callbacks:
- self.io_loop.spawn_callback(callback, raw)
-
-- @tornado.gen.coroutine
-+ @salt.ext.tornado.gen.coroutine
- def read_async(self):
- '''
- Asynchronously read messages and invoke a callback when they are ready.
-@@ -712,7 +712,7 @@ class IPCMessageSubscriber(IPCClient):
- yield salt.ext.tornado.gen.sleep(1)
- except Exception as exc: # pylint: disable=broad-except
- log.error('Exception occurred while Subscriber connecting: %s', exc)
-- yield tornado.gen.sleep(1)
-+ yield salt.ext.tornado.gen.sleep(1)
- yield self._read(None, self.__run_callbacks)
-
- def close(self):
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
-index e1ce60859b..635dc689a8 100644
+index dcee9a87bd..82a712b15b 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
-@@ -5,8 +5,8 @@ from __future__ import absolute_import
- # Import Salt Libs
- from salt.cli.batch_async import BatchAsync
-
+@@ -1,8 +1,8 @@
-import tornado
--from tornado.testing import AsyncTestCase
+import salt.ext.tornado
+ from salt.cli.batch_async import BatchAsync
+from salt.ext.tornado.testing import AsyncTestCase
- from tests.support.unit import skipIf, TestCase
- from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
+ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
+-from tornado.testing import AsyncTestCase
-@@ -59,10 +59,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+
+ @skipIf(NO_MOCK, NO_MOCK_REASON)
+@@ -52,10 +52,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch.start_batch()
self.assertEqual(self.batch.batch_size, 2)
@@ -180,12 +158,12 @@ index e1ce60859b..635dc689a8 100644
self.batch.event = MagicMock()
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
- future.set_result({'minions': ['foo', 'bar']})
+ future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
ret = self.batch.start()
-@@ -78,10 +78,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -71,10 +71,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
# assert targeted_minions == all minions matched by tgt
- self.assertEqual(self.batch.targeted_minions, set(['foo', 'bar']))
+ self.assertEqual(self.batch.targeted_minions, {"foo", "bar"})
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
@@ -193,11 +171,11 @@ index e1ce60859b..635dc689a8 100644
self.batch.event = MagicMock()
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
- future.set_result({'minions': ['foo', 'bar']})
+ future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
self.batch.batch_presence_ping_timeout = None
-@@ -109,7 +109,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- )
+@@ -103,7 +103,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ ),
)
- @tornado.testing.gen_test
@@ -205,26 +183,26 @@ index e1ce60859b..635dc689a8 100644
def test_start_batch_calls_next(self):
self.batch.run_next = MagicMock(return_value=MagicMock())
self.batch.event = MagicMock()
-@@ -165,14 +165,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- self.assertEqual(
- len(event.remove_event_handler.mock_calls), 1)
+@@ -160,14 +160,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.assertEqual(len(event.unsubscribe.mock_calls), 2)
+ self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_next(self):
self.batch.event = MagicMock()
- self.batch.opts['fun'] = 'my.fun'
- self.batch.opts['arg'] = []
- self.batch._get_next = MagicMock(return_value={'foo', 'bar'})
+ self.batch.opts["fun"] = "my.fun"
+ self.batch.opts["arg"] = []
+ self.batch._get_next = MagicMock(return_value={"foo", "bar"})
self.batch.batch_size = 2
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
- future.set_result({'minions': ['foo', 'bar']})
+ future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
self.batch.run_next()
-@@ -284,38 +284,38 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -290,38 +290,38 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch._BatchAsync__event_handler(MagicMock())
- self.assertEqual(self.batch.find_job_returned, {'foo'})
+ self.assertEqual(self.batch.find_job_returned, {"foo"})
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
@@ -242,33 +220,33 @@ index e1ce60859b..635dc689a8 100644
+ future = salt.ext.tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
- self.batch.minions = set(['foo', 'bar'])
+ self.batch.minions = {"foo", "bar"}
self.batch.jid_gen = MagicMock(return_value="1234")
- tornado.gen.sleep = MagicMock(return_value=future)
+ salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
- self.batch.find_job({'foo', 'bar'})
+ self.batch.find_job({"foo", "bar"})
self.assertEqual(
self.batch.event.io_loop.spawn_callback.call_args[0],
- (self.batch.check_find_job, {'foo', 'bar'}, "1234")
+ (self.batch.check_find_job, {"foo", "bar"}, "1234"),
)
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_find_job_with_done_minions(self):
- self.batch.done_minions = {'bar'}
+ self.batch.done_minions = {"bar"}
self.batch.event = MagicMock()
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
- self.batch.minions = set(['foo', 'bar'])
+ self.batch.minions = {"foo", "bar"}
self.batch.jid_gen = MagicMock(return_value="1234")
- tornado.gen.sleep = MagicMock(return_value=future)
+ salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
- self.batch.find_job({'foo', 'bar'})
+ self.batch.find_job({"foo", "bar"})
self.assertEqual(
self.batch.event.io_loop.spawn_callback.call_args[0],
--
-2.23.0
+2.29.2
diff --git a/debian-info_installed-compatibility-50453.patch b/debian-info_installed-compatibility-50453.patch
index dbb7665..7c45e35 100644
--- a/debian-info_installed-compatibility-50453.patch
+++ b/debian-info_installed-compatibility-50453.patch
@@ -1,4 +1,4 @@
-From 068eecfba4b2a14b334ff17a295d4005d17491f3 Mon Sep 17 00:00:00 2001
+From 36f4465d22f8cdf05be20ba72756757f5725e509 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Tue, 20 Nov 2018 16:06:31 +0100
Subject: [PATCH] Debian info_installed compatibility (#50453)
@@ -49,17 +49,17 @@ Adjust architecture getter according to the lowpkg info
Fix wrong Git merge: missing function signature
---
- salt/modules/aptpkg.py | 20 ++++-
- salt/modules/dpkg_lowpkg.py | 93 +++++++++++++++++---
- tests/unit/modules/test_aptpkg.py | 153 +++++++++++++++++++++------------
- tests/unit/modules/test_dpkg_lowpkg.py | 127 ++++++++++++++-------------
- 4 files changed, 263 insertions(+), 130 deletions(-)
+ salt/modules/aptpkg.py | 24 ++-
+ salt/modules/dpkg_lowpkg.py | 110 ++++++++++--
+ tests/unit/modules/test_aptpkg.py | 235 ++++++++++++++++++-------
+ tests/unit/modules/test_dpkg_lowpkg.py | 189 +++++++++++---------
+ 4 files changed, 396 insertions(+), 162 deletions(-)
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index 8f4d95a195..4ec9158476 100644
+index 70e173806a..bf90d0614f 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
-@@ -2825,6 +2825,15 @@ def info_installed(*names, **kwargs):
+@@ -2902,6 +2902,15 @@ def info_installed(*names, **kwargs):
.. versionadded:: 2016.11.3
@@ -75,24 +75,28 @@ index 8f4d95a195..4ec9158476 100644
CLI example:
.. code-block:: bash
-@@ -2835,11 +2844,15 @@ def info_installed(*names, **kwargs):
- '''
+@@ -2912,11 +2921,19 @@ def info_installed(*names, **kwargs):
+ """
kwargs = salt.utils.args.clean_kwargs(**kwargs)
- failhard = kwargs.pop('failhard', True)
-+ kwargs.pop('errors', None) # Only for compatibility with RPM
-+ attr = kwargs.pop('attr', None) # Package attributes to return
-+ all_versions = kwargs.pop('all_versions', False) # This is for backward compatible structure only
+ failhard = kwargs.pop("failhard", True)
++ kwargs.pop("errors", None) # Only for compatibility with RPM
++ attr = kwargs.pop("attr", None) # Package attributes to return
++ all_versions = kwargs.pop(
++ "all_versions", False
++ ) # This is for backward compatible structure only
+
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
ret = dict()
-- for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, failhard=failhard).items():
-+ for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, failhard=failhard, attr=attr).items():
+- for pkg_name, pkg_nfo in __salt__["lowpkg.info"](*names, failhard=failhard).items():
++ for pkg_name, pkg_nfo in __salt__["lowpkg.info"](
++ *names, failhard=failhard, attr=attr
++ ).items():
t_nfo = dict()
- if pkg_nfo.get('status', 'ii')[1] != 'i':
- continue # return only packages that are really installed
-@@ -2860,7 +2873,10 @@ def info_installed(*names, **kwargs):
+ if pkg_nfo.get("status", "ii")[1] != "i":
+ continue # return only packages that are really installed
+@@ -2937,7 +2954,10 @@ def info_installed(*names, **kwargs):
else:
t_nfo[key] = value
@@ -105,38 +109,58 @@ index 8f4d95a195..4ec9158476 100644
return ret
diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py
-index 4ac8efd2f2..b78e844830 100644
+index d569e04995..7447637774 100644
--- a/salt/modules/dpkg_lowpkg.py
+++ b/salt/modules/dpkg_lowpkg.py
-@@ -252,6 +252,38 @@ def file_dict(*packages):
- return {'errors': errors, 'packages': ret}
+@@ -2,13 +2,11 @@
+ Support for DEB packages
+ """
+
+-# Import python libs
+ import datetime
+ import logging
+ import os
+ import re
+
+-# Import salt libs
+ import salt.utils.args
+ import salt.utils.data
+ import salt.utils.files
+@@ -236,6 +234,44 @@ def file_dict(*packages, **kwargs):
+ return {"errors": errors, "packages": ret}
+def _get_pkg_build_time(name):
-+ '''
++ """
+ Get package build time, if possible.
+
+ :param name:
+ :return:
-+ '''
++ """
+ iso_time = iso_time_t = None
-+ changelog_dir = os.path.join('/usr/share/doc', name)
++ changelog_dir = os.path.join("/usr/share/doc", name)
+ if os.path.exists(changelog_dir):
+ for fname in os.listdir(changelog_dir):
+ try:
+ iso_time_t = int(os.path.getmtime(os.path.join(changelog_dir, fname)))
-+ iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z'
++ iso_time = (
++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z"
++ )
+ break
+ except OSError:
+ pass
+
+ # Packager doesn't care about Debian standards, therefore Plan B: brute-force it.
+ if not iso_time:
-+ for pkg_f_path in __salt__['cmd.run']('dpkg-query -L {}'.format(name)).splitlines():
-+ if 'changelog' in pkg_f_path.lower() and os.path.exists(pkg_f_path):
++ for pkg_f_path in __salt__["cmd.run"](
++ "dpkg-query -L {}".format(name)
++ ).splitlines():
++ if "changelog" in pkg_f_path.lower() and os.path.exists(pkg_f_path):
+ try:
+ iso_time_t = int(os.path.getmtime(pkg_f_path))
-+ iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z'
++ iso_time = (
++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z"
++ )
+ break
+ except OSError:
+ pass
@@ -145,67 +169,76 @@ index 4ac8efd2f2..b78e844830 100644
+
+
def _get_pkg_info(*packages, **kwargs):
- '''
+ """
Return list of package information. If 'packages' parameter is empty,
-@@ -274,7 +306,7 @@ def _get_pkg_info(*packages, **kwargs):
- ret = []
- cmd = "dpkg-query -W -f='package:" + bin_var + "\\n" \
- "revision:${binary:Revision}\\n" \
-- "architecture:${Architecture}\\n" \
-+ "arch:${Architecture}\\n" \
- "maintainer:${Maintainer}\\n" \
- "summary:${Summary}\\n" \
- "source:${source:Package}\\n" \
-@@ -308,9 +340,14 @@ def _get_pkg_info(*packages, **kwargs):
+@@ -259,7 +295,7 @@ def _get_pkg_info(*packages, **kwargs):
+ cmd = (
+ "dpkg-query -W -f='package:" + bin_var + "\\n"
+ "revision:${binary:Revision}\\n"
+- "architecture:${Architecture}\\n"
++ "arch:${Architecture}\\n"
+ "maintainer:${Maintainer}\\n"
+ "summary:${Summary}\\n"
+ "source:${source:Package}\\n"
+@@ -298,9 +334,16 @@ def _get_pkg_info(*packages, **kwargs):
key, value = pkg_info_line.split(":", 1)
if value:
pkg_data[key] = value
-- install_date = _get_pkg_install_time(pkg_data.get('package'))
+- install_date = _get_pkg_install_time(pkg_data.get("package"))
- if install_date:
-- pkg_data['install_date'] = install_date
-+ install_date, install_date_t = _get_pkg_install_time(pkg_data.get('package'), pkg_data.get('arch'))
+- pkg_data["install_date"] = install_date
++ install_date, install_date_t = _get_pkg_install_time(
++ pkg_data.get("package"), pkg_data.get("arch")
++ )
+ if install_date:
-+ pkg_data['install_date'] = install_date
-+ pkg_data['install_date_time_t'] = install_date_t # Unix ticks
-+ build_date, build_date_t = _get_pkg_build_time(pkg_data.get('package'))
++ pkg_data["install_date"] = install_date
++ pkg_data["install_date_time_t"] = install_date_t # Unix ticks
++ build_date, build_date_t = _get_pkg_build_time(pkg_data.get("package"))
+ if build_date:
-+ pkg_data['build_date'] = build_date
-+ pkg_data['build_date_time_t'] = build_date_t
- pkg_data['description'] = pkg_descr.split(":", 1)[-1]
++ pkg_data["build_date"] = build_date
++ pkg_data["build_date_time_t"] = build_date_t
+ pkg_data["description"] = pkg_descr.split(":", 1)[-1]
ret.append(pkg_data)
-@@ -336,19 +373,32 @@ def _get_pkg_license(pkg):
+@@ -326,24 +369,34 @@ def _get_pkg_license(pkg):
return ", ".join(sorted(licenses))
-def _get_pkg_install_time(pkg):
+def _get_pkg_install_time(pkg, arch):
- '''
+ """
Return package install time, based on the /var/lib/dpkg/info/.list
:return:
- '''
+ """
- iso_time = None
+ iso_time = iso_time_t = None
-+ loc_root = '/var/lib/dpkg/info'
++ loc_root = "/var/lib/dpkg/info"
if pkg is not None:
-- location = "/var/lib/dpkg/info/{0}.list".format(pkg)
+- location = "/var/lib/dpkg/info/{}.list".format(pkg)
- if os.path.exists(location):
-- iso_time = datetime.datetime.utcfromtimestamp(int(os.path.getmtime(location))).isoformat() + "Z"
+- iso_time = (
+- datetime.datetime.utcfromtimestamp(
+- int(os.path.getmtime(location))
+- ).isoformat()
+- + "Z"
+- )
+ locations = []
-+ if arch is not None and arch != 'all':
-+ locations.append(os.path.join(loc_root, '{0}:{1}.list'.format(pkg, arch)))
-+
-+ locations.append(os.path.join(loc_root, '{0}.list'.format(pkg)))
++ if arch is not None and arch != "all":
++ locations.append(os.path.join(loc_root, "{}:{}.list".format(pkg, arch)))
+
+- return iso_time
++ locations.append(os.path.join(loc_root, "{}.list".format(pkg)))
+ for location in locations:
+ try:
+ iso_time_t = int(os.path.getmtime(location))
-+ iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z'
++ iso_time = (
++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z"
++ )
+ break
+ except OSError:
+ pass
-
-- return iso_time
++
+ if iso_time is None:
+ log.debug('Unable to get package installation time for package "%s".', pkg)
+
@@ -213,7 +246,7 @@ index 4ac8efd2f2..b78e844830 100644
def _get_pkg_ds_avail():
-@@ -398,6 +448,15 @@ def info(*packages, **kwargs):
+@@ -393,6 +446,15 @@ def info(*packages, **kwargs):
.. versionadded:: 2016.11.3
@@ -229,25 +262,25 @@ index 4ac8efd2f2..b78e844830 100644
CLI example:
.. code-block:: bash
-@@ -412,6 +471,10 @@ def info(*packages, **kwargs):
+@@ -407,6 +469,10 @@ def info(*packages, **kwargs):
kwargs = salt.utils.args.clean_kwargs(**kwargs)
- failhard = kwargs.pop('failhard', True)
-+ attr = kwargs.pop('attr', None) or None
+ failhard = kwargs.pop("failhard", True)
++ attr = kwargs.pop("attr", None) or None
+ if attr:
-+ attr = attr.split(',')
++ attr = attr.split(",")
+
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
-@@ -431,6 +494,14 @@ def info(*packages, **kwargs):
- lic = _get_pkg_license(pkg['package'])
+@@ -434,6 +500,14 @@ def info(*packages, **kwargs):
+ lic = _get_pkg_license(pkg["package"])
if lic:
- pkg['license'] = lic
-- ret[pkg['package']] = pkg
+ pkg["license"] = lic
+- ret[pkg["package"]] = pkg
+
+ # Remove keys that aren't in attrs
-+ pkg_name = pkg['package']
++ pkg_name = pkg["package"]
+ if attr:
+ for k in list(pkg.keys())[:]:
+ if k not in attr:
@@ -257,400 +290,558 @@ index 4ac8efd2f2..b78e844830 100644
return ret
diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
-index e1b6602df5..10e960f090 100644
+index a7b7a34166..77d8b84896 100644
--- a/tests/unit/modules/test_aptpkg.py
+++ b/tests/unit/modules/test_aptpkg.py
-@@ -20,6 +20,8 @@ from tests.support.mock import Mock, MagicMock, patch
- from salt.ext import six
- from salt.exceptions import CommandExecutionError, SaltInvocationError
+@@ -13,6 +13,7 @@ import textwrap
+ import pytest
import salt.modules.aptpkg as aptpkg
-+import pytest
-+import textwrap
-
- try:
- import pytest
-@@ -166,51 +168,39 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
++from salt.ext import six
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, Mock, call, patch
+ from tests.support.unit import TestCase, skipIf
+@@ -182,49 +183,54 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
- return {aptpkg: {}}
+ return {aptpkg: {"__grains__": {}}}
-+ @patch('salt.modules.aptpkg.__salt__',
-+ {'pkg_resource.version': MagicMock(return_value=LOWPKG_INFO['wget']['version'])})
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {
++ "pkg_resource.version": MagicMock(
++ return_value=LOWPKG_INFO["wget"]["version"]
++ )
++ },
++ )
def test_version(self):
- '''
+ """
Test - Returns a string representing the package version or an empty string if
not installed.
- '''
-- version = LOWPKG_INFO['wget']['version']
+ """
+- version = LOWPKG_INFO["wget"]["version"]
- mock = MagicMock(return_value=version)
-- with patch.dict(aptpkg.__salt__, {'pkg_resource.version': mock}):
-- self.assertEqual(aptpkg.version(*['wget']), version)
-+ assert aptpkg.version(*['wget']) == aptpkg.__salt__['pkg_resource.version']()
+- with patch.dict(aptpkg.__salt__, {"pkg_resource.version": mock}):
+- self.assertEqual(aptpkg.version(*["wget"]), version)
++ assert aptpkg.version(*["wget"]) == aptpkg.__salt__["pkg_resource.version"]()
-+ @patch('salt.modules.aptpkg.latest_version', MagicMock(return_value=''))
++ @patch("salt.modules.aptpkg.latest_version", MagicMock(return_value=""))
def test_upgrade_available(self):
- '''
+ """
Test - Check whether or not an upgrade is available for a given package.
- '''
-- with patch('salt.modules.aptpkg.latest_version',
-- MagicMock(return_value='')):
-- self.assertFalse(aptpkg.upgrade_available('wget'))
-+ assert not aptpkg.upgrade_available('wget')
+ """
+- with patch("salt.modules.aptpkg.latest_version", MagicMock(return_value="")):
+- self.assertFalse(aptpkg.upgrade_available("wget"))
++ assert not aptpkg.upgrade_available("wget")
-+ @patch('salt.modules.aptpkg.get_repo_keys', MagicMock(return_value=REPO_KEYS))
-+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': 'OK'})})
++ @patch("salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS))
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": "OK"})},
++ )
def test_add_repo_key(self):
- '''
+ """
Test - Add a repo key.
- '''
-- with patch('salt.modules.aptpkg.get_repo_keys',
-- MagicMock(return_value=REPO_KEYS)):
-- mock = MagicMock(return_value={
-- 'retcode': 0,
-- 'stdout': 'OK'
-- })
-- with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock}):
-- self.assertTrue(aptpkg.add_repo_key(keyserver='keyserver.ubuntu.com',
-- keyid='FBB75451'))
-+ assert aptpkg.add_repo_key(keyserver='keyserver.ubuntu.com', keyid='FBB75451')
+ """
+- with patch(
+- "salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS)
+- ):
+- mock = MagicMock(return_value={"retcode": 0, "stdout": "OK"})
+- with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}):
+- self.assertTrue(
+- aptpkg.add_repo_key(
+- keyserver="keyserver.ubuntu.com", keyid="FBB75451"
+- )
+- )
++ assert aptpkg.add_repo_key(keyserver="keyserver.ubuntu.com", keyid="FBB75451")
-+ @patch('salt.modules.aptpkg.get_repo_keys', MagicMock(return_value=REPO_KEYS))
-+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': 'OK'})})
++ @patch("salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS))
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": "OK"})},
++ )
def test_add_repo_key_failed(self):
- '''
+ """
Test - Add a repo key using incomplete input data.
- '''
-- with patch('salt.modules.aptpkg.get_repo_keys',
-- MagicMock(return_value=REPO_KEYS)):
-- kwargs = {'keyserver': 'keyserver.ubuntu.com'}
-- mock = MagicMock(return_value={
-- 'retcode': 0,
-- 'stdout': 'OK'
-- })
-- with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock}):
+ """
+- with patch(
+- "salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS)
+- ):
+- kwargs = {"keyserver": "keyserver.ubuntu.com"}
+- mock = MagicMock(return_value={"retcode": 0, "stdout": "OK"})
+- with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}):
- self.assertRaises(SaltInvocationError, aptpkg.add_repo_key, **kwargs)
+ with pytest.raises(SaltInvocationError) as ex:
-+ aptpkg.add_repo_key(keyserver='keyserver.ubuntu.com')
-+ assert ' No keyid or keyid too short for keyserver: keyserver.ubuntu.com' in str(ex)
++ aptpkg.add_repo_key(keyserver="keyserver.ubuntu.com")
++ assert (
++ " No keyid or keyid too short for keyserver: keyserver.ubuntu.com"
++ in str(ex)
++ )
def test_get_repo_keys(self):
- '''
-@@ -223,35 +213,31 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock}):
+ """
+@@ -234,35 +240,48 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+ with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}):
self.assertEqual(aptpkg.get_repo_keys(), REPO_KEYS)
-+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.file_dict': MagicMock(return_value=LOWPKG_FILES)})
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"lowpkg.file_dict": MagicMock(return_value=LOWPKG_FILES)},
++ )
def test_file_dict(self):
- '''
+ """
Test - List the files that belong to a package, grouped by package.
- '''
+ """
- mock = MagicMock(return_value=LOWPKG_FILES)
-- with patch.dict(aptpkg.__salt__, {'lowpkg.file_dict': mock}):
-- self.assertEqual(aptpkg.file_dict('wget'), LOWPKG_FILES)
-+ assert aptpkg.file_dict('wget') == LOWPKG_FILES
+- with patch.dict(aptpkg.__salt__, {"lowpkg.file_dict": mock}):
+- self.assertEqual(aptpkg.file_dict("wget"), LOWPKG_FILES)
++ assert aptpkg.file_dict("wget") == LOWPKG_FILES
-+ @patch('salt.modules.aptpkg.__salt__', {
-+ 'lowpkg.file_list': MagicMock(return_value={'errors': LOWPKG_FILES['errors'],
-+ 'files': LOWPKG_FILES['packages']['wget']})})
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {
++ "lowpkg.file_list": MagicMock(
++ return_value={
++ "errors": LOWPKG_FILES["errors"],
++ "files": LOWPKG_FILES["packages"]["wget"],
++ }
++ )
++ },
++ )
def test_file_list(self):
- '''
+ """
- Test - List the files that belong to a package.
+ Test 'file_list' function, which is just an alias to the lowpkg 'file_list'
+
- '''
+ """
- files = {
-- 'errors': LOWPKG_FILES['errors'],
-- 'files': LOWPKG_FILES['packages']['wget'],
+- "errors": LOWPKG_FILES["errors"],
+- "files": LOWPKG_FILES["packages"]["wget"],
- }
- mock = MagicMock(return_value=files)
-- with patch.dict(aptpkg.__salt__, {'lowpkg.file_list': mock}):
-- self.assertEqual(aptpkg.file_list('wget'), files)
-+ assert aptpkg.file_list('wget') == aptpkg.__salt__['lowpkg.file_list']()
+- with patch.dict(aptpkg.__salt__, {"lowpkg.file_list": mock}):
+- self.assertEqual(aptpkg.file_list("wget"), files)
++ assert aptpkg.file_list("wget") == aptpkg.__salt__["lowpkg.file_list"]()
-+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_stdout': MagicMock(return_value='wget\t\t\t\t\t\tinstall')})
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"cmd.run_stdout": MagicMock(return_value="wget\t\t\t\t\t\tinstall")},
++ )
def test_get_selections(self):
- '''
+ """
Test - View package state from the dpkg database.
- '''
-- selections = {'install': ['wget']}
-- mock = MagicMock(return_value='wget\t\t\t\t\t\tinstall')
-- with patch.dict(aptpkg.__salt__, {'cmd.run_stdout': mock}):
-- self.assertEqual(aptpkg.get_selections('wget'), selections)
-+ assert aptpkg.get_selections('wget') == {'install': ['wget']}
+ """
+- selections = {"install": ["wget"]}
+- mock = MagicMock(return_value="wget\t\t\t\t\t\tinstall")
+- with patch.dict(aptpkg.__salt__, {"cmd.run_stdout": mock}):
+- self.assertEqual(aptpkg.get_selections("wget"), selections)
++ assert aptpkg.get_selections("wget") == {"install": ["wget"]}
-+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)})
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)},
++ )
def test_info_installed(self):
- '''
+ """
Test - Return the information of the named package(s) installed on the system.
-@@ -267,21 +253,72 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
- if installed['wget'].get(names[name], False):
- installed['wget'][name] = installed['wget'].pop(names[name])
+@@ -274,21 +293,101 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+ if installed["wget"].get(names[name], False):
+ installed["wget"][name] = installed["wget"].pop(names[name])
- mock = MagicMock(return_value=LOWPKG_INFO)
-- with patch.dict(aptpkg.__salt__, {'lowpkg.info': mock}):
-- del installed['wget']['status']
-- self.assertEqual(aptpkg.info_installed('wget'), installed)
+- with patch.dict(aptpkg.__salt__, {"lowpkg.info": mock}):
+- del installed["wget"]["status"]
+- self.assertEqual(aptpkg.info_installed("wget"), installed)
- self.assertEqual(len(aptpkg.info_installed()), 1)
-+ assert aptpkg.info_installed('wget') == installed
-+
-+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)})
++ assert aptpkg.info_installed("wget") == installed
+
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)},
++ )
+ def test_info_installed_attr(self):
-+ '''
++ """
+ Test info_installed 'attr'.
+ This doesn't test 'attr' behaviour per se, since the underlying function is in dpkg.
+ The test should simply not raise exceptions for invalid parameter.
+
+ :return:
-+ '''
-+ ret = aptpkg.info_installed('emacs', attr='foo,bar')
++ """
++ ret = aptpkg.info_installed("emacs", attr="foo,bar")
+ assert isinstance(ret, dict)
-+ assert 'wget' in ret
-+ assert isinstance(ret['wget'], dict)
++ assert "wget" in ret
++ assert isinstance(ret["wget"], dict)
+
-+ wget_pkg = ret['wget']
-+ expected_pkg = {'url': 'http://www.gnu.org/software/wget/',
-+ 'packager': 'Ubuntu Developers ', 'name': 'wget',
-+ 'install_date': '2016-08-30T22:20:15Z', 'description': 'retrieves files from the web',
-+ 'version': '1.15-1ubuntu1.14.04.2', 'architecture': 'amd64', 'group': 'web', 'source': 'wget'}
++ wget_pkg = ret["wget"]
++ expected_pkg = {
++ "url": "http://www.gnu.org/software/wget/",
++ "packager": "Ubuntu Developers ",
++ "name": "wget",
++ "install_date": "2016-08-30T22:20:15Z",
++ "description": "retrieves files from the web",
++ "version": "1.15-1ubuntu1.14.04.2",
++ "architecture": "amd64",
++ "group": "web",
++ "source": "wget",
++ }
+ for k in wget_pkg:
+ assert k in expected_pkg
+ assert wget_pkg[k] == expected_pkg[k]
+
-+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)})
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)},
++ )
+ def test_info_installed_all_versions(self):
-+ '''
++ """
+ Test info_installed 'all_versions'.
+ Since Debian won't return same name packages with the different names,
+ this should just return different structure, backward compatible with
+ the RPM equivalents.
+
+ :return:
-+ '''
++ """
+ print()
-+ ret = aptpkg.info_installed('emacs', all_versions=True)
++ ret = aptpkg.info_installed("emacs", all_versions=True)
+ assert isinstance(ret, dict)
-+ assert 'wget' in ret
-+ assert isinstance(ret['wget'], list)
++ assert "wget" in ret
++ assert isinstance(ret["wget"], list)
+
-+ pkgs = ret['wget']
++ pkgs = ret["wget"]
+
+ assert len(pkgs) == 1
+ assert isinstance(pkgs[0], dict)
+
+ wget_pkg = pkgs[0]
-+ expected_pkg = {'url': 'http://www.gnu.org/software/wget/',
-+ 'packager': 'Ubuntu Developers ', 'name': 'wget',
-+ 'install_date': '2016-08-30T22:20:15Z', 'description': 'retrieves files from the web',
-+ 'version': '1.15-1ubuntu1.14.04.2', 'architecture': 'amd64', 'group': 'web', 'source': 'wget'}
++ expected_pkg = {
++ "url": "http://www.gnu.org/software/wget/",
++ "packager": "Ubuntu Developers ",
++ "name": "wget",
++ "install_date": "2016-08-30T22:20:15Z",
++ "description": "retrieves files from the web",
++ "version": "1.15-1ubuntu1.14.04.2",
++ "architecture": "amd64",
++ "group": "web",
++ "source": "wget",
++ }
+ for k in wget_pkg:
+ assert k in expected_pkg
+ assert wget_pkg[k] == expected_pkg[k]
-
-+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_stdout': MagicMock(return_value='wget: /usr/bin/wget')})
++
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {"cmd.run_stdout": MagicMock(return_value="wget: /usr/bin/wget")},
++ )
def test_owner(self):
- '''
+ """
Test - Return the name of the package that owns the file.
- '''
-- paths = ['/usr/bin/wget']
-- mock = MagicMock(return_value='wget: /usr/bin/wget')
-- with patch.dict(aptpkg.__salt__, {'cmd.run_stdout': mock}):
-- self.assertEqual(aptpkg.owner(*paths), 'wget')
-+ assert aptpkg.owner('/usr/bin/wget') == 'wget'
+ """
+- paths = ["/usr/bin/wget"]
+- mock = MagicMock(return_value="wget: /usr/bin/wget")
+- with patch.dict(aptpkg.__salt__, {"cmd.run_stdout": mock}):
+- self.assertEqual(aptpkg.owner(*paths), "wget")
++ assert aptpkg.owner("/usr/bin/wget") == "wget"
-+ @patch('salt.utils.pkg.clear_rtag', MagicMock())
-+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0,
-+ 'stdout': APT_Q_UPDATE}),
-+ 'config.get': MagicMock(return_value=False)})
++ @patch("salt.utils.pkg.clear_rtag", MagicMock())
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {
++ "cmd.run_all": MagicMock(
++ return_value={"retcode": 0, "stdout": APT_Q_UPDATE}
++ ),
++ "config.get": MagicMock(return_value=False),
++ },
++ )
def test_refresh_db(self):
- '''
+ """
Test - Updates the APT database to latest packages based upon repositories.
-@@ -301,6 +338,10 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock, 'config.get': MagicMock(return_value=False)}):
+@@ -308,6 +407,16 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+ ):
self.assertEqual(aptpkg.refresh_db(), refresh_db)
-+ @patch('salt.utils.pkg.clear_rtag', MagicMock())
-+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0,
-+ 'stdout': APT_Q_UPDATE_ERROR}),
-+ 'config.get': MagicMock(return_value=False)})
++ @patch("salt.utils.pkg.clear_rtag", MagicMock())
++ @patch(
++ "salt.modules.aptpkg.__salt__",
++ {
++ "cmd.run_all": MagicMock(
++ return_value={"retcode": 0, "stdout": APT_Q_UPDATE_ERROR}
++ ),
++ "config.get": MagicMock(return_value=False),
++ },
++ )
def test_refresh_db_failed(self):
- '''
+ """
Test - Update the APT database using unreachable repositories.
-@@ -332,22 +373,24 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+@@ -340,29 +449,33 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
assert aptpkg.autoremove(list_only=True) == []
assert aptpkg.autoremove(list_only=True, purge=True) == []
-+ @patch('salt.modules.aptpkg._uninstall', MagicMock(return_value=UNINSTALL))
+- def test_install(self):
+- """
+- Test - Install packages.
+- """
+- with patch("salt.modules.aptpkg.install", MagicMock(return_value=INSTALL)):
+- self.assertEqual(aptpkg.install(name="tmux"), INSTALL)
+- kwargs = {"force_conf_new": True}
+- self.assertEqual(aptpkg.install(name="tmux", **kwargs), INSTALL)
+-
++ @patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL))
def test_remove(self):
- '''
+ """
Test - Remove packages.
- '''
-- with patch('salt.modules.aptpkg._uninstall',
-- MagicMock(return_value=UNINSTALL)):
-- self.assertEqual(aptpkg.remove(name='tmux'), UNINSTALL)
-+ assert aptpkg.remove(name='tmux') == UNINSTALL
+ """
+- with patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)):
+- self.assertEqual(aptpkg.remove(name="tmux"), UNINSTALL)
++ assert aptpkg.remove(name="tmux") == UNINSTALL
-+ @patch('salt.modules.aptpkg._uninstall', MagicMock(return_value=UNINSTALL))
++ @patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL))
def test_purge(self):
- '''
+ """
Test - Remove packages along with all configuration files.
- '''
-- with patch('salt.modules.aptpkg._uninstall',
-- MagicMock(return_value=UNINSTALL)):
-- self.assertEqual(aptpkg.purge(name='tmux'), UNINSTALL)
-+ assert aptpkg.purge(name='tmux') == UNINSTALL
-
-+ @patch('salt.utils.pkg.clear_rtag', MagicMock())
-+ @patch('salt.modules.aptpkg.list_pkgs', MagicMock(return_value=UNINSTALL))
-+ @patch.multiple(aptpkg, **{'__salt__': {'config.get': MagicMock(return_value=True),
-+ 'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': UPGRADE})}})
+ """
+- with patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)):
+- self.assertEqual(aptpkg.purge(name="tmux"), UNINSTALL)
+-
++ assert aptpkg.purge(name="tmux") == UNINSTALL
++
++ @patch("salt.utils.pkg.clear_rtag", MagicMock())
++ @patch("salt.modules.aptpkg.list_pkgs", MagicMock(return_value=UNINSTALL))
++ @patch.multiple(
++ aptpkg,
++ **{
++ "__salt__": {
++ "config.get": MagicMock(return_value=True),
++ "cmd.run_all": MagicMock(
++ return_value={"retcode": 0, "stdout": UPGRADE}
++ ),
++ }
++ }
++ )
def test_upgrade(self):
- '''
+ """
Test - Upgrades all packages.
diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py
-index 6c07a75417..a0b3346f9d 100644
+index 071c0f0742..160bbcd5b1 100644
--- a/tests/unit/modules/test_dpkg_lowpkg.py
+++ b/tests/unit/modules/test_dpkg_lowpkg.py
-@@ -23,6 +23,30 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
- '''
- Test cases for salt.modules.dpkg
- '''
+@@ -1,18 +1,12 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Jayesh Kariya
+ """
+
+-# Import Python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+ import os
+
+-# Import Salt Libs
+ import salt.modules.dpkg_lowpkg as dpkg
+-
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
+@@ -65,6 +59,51 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
+ package = cmd[2]
+ return DPKG_L_OUTPUT[package]
+
+ dselect_pkg = {
-+ 'emacs': {'priority': 'optional', 'filename': 'pool/main/e/emacs-defaults/emacs_46.1_all.deb',
-+ 'description': 'GNU Emacs editor (metapackage)', 'md5sum': '766eb2cee55ba0122dac64c4cea04445',
-+ 'sha256': 'd172289b9a1608820eddad85c7ffc15f346a6e755c3120de0f64739c4bbc44ce',
-+ 'description-md5': '21fb7da111336097a2378959f6d6e6a8',
-+ 'bugs': 'https://bugs.launchpad.net/springfield/+filebug',
-+ 'depends': 'emacs24 | emacs24-lucid | emacs24-nox', 'origin': 'Simpsons', 'version': '46.1',
-+ 'task': 'ubuntu-usb, edubuntu-usb', 'original-maintainer': 'Homer Simpson ',
-+ 'package': 'emacs', 'architecture': 'all', 'size': '1692',
-+ 'sha1': '9271bcec53c1f7373902b1e594d9fc0359616407', 'source': 'emacs-defaults',
-+ 'maintainer': 'Simpsons Developers ', 'supported': '9m',
-+ 'section': 'editors', 'installed-size': '25'}
++ "emacs": {
++ "priority": "optional",
++ "filename": "pool/main/e/emacs-defaults/emacs_46.1_all.deb",
++ "description": "GNU Emacs editor (metapackage)",
++ "md5sum": "766eb2cee55ba0122dac64c4cea04445",
++ "sha256": "d172289b9a1608820eddad85c7ffc15f346a6e755c3120de0f64739c4bbc44ce",
++ "description-md5": "21fb7da111336097a2378959f6d6e6a8",
++ "bugs": "https://bugs.launchpad.net/springfield/+filebug",
++ "depends": "emacs24 | emacs24-lucid | emacs24-nox",
++ "origin": "Simpsons",
++ "version": "46.1",
++ "task": "ubuntu-usb, edubuntu-usb",
++ "original-maintainer": "Homer Simpson ",
++ "package": "emacs",
++ "architecture": "all",
++ "size": "1692",
++ "sha1": "9271bcec53c1f7373902b1e594d9fc0359616407",
++ "source": "emacs-defaults",
++ "maintainer": "Simpsons Developers ",
++ "supported": "9m",
++ "section": "editors",
++ "installed-size": "25",
++ }
+ }
+
+ pkgs_info = [
-+ {'version': '46.1', 'arch': 'all', 'build_date': '2014-08-07T16:51:48Z', 'install_date_time_t': 1481745778,
-+ 'section': 'editors', 'description': 'GNU Emacs editor (metapackage)\n GNU Emacs is the extensible '
-+ 'self-documenting text editor.\n This is a metapackage that will always '
-+ 'depend on the latest\n recommended Emacs release.\n',
-+ 'package': 'emacs', 'source': 'emacs-defaults',
-+ 'maintainer': 'Simpsons Developers ',
-+ 'build_date_time_t': 1407430308, 'installed_size': '25', 'install_date': '2016-12-14T20:02:58Z'}
++ {
++ "version": "46.1",
++ "arch": "all",
++ "build_date": "2014-08-07T16:51:48Z",
++ "install_date_time_t": 1481745778,
++ "section": "editors",
++ "description": "GNU Emacs editor (metapackage)\n GNU Emacs is the extensible "
++ "self-documenting text editor.\n This is a metapackage that will always "
++ "depend on the latest\n recommended Emacs release.\n",
++ "package": "emacs",
++ "source": "emacs-defaults",
++ "maintainer": "Simpsons Developers ",
++ "build_date_time_t": 1407430308,
++ "installed_size": "25",
++ "install_date": "2016-12-14T20:02:58Z",
++ }
+ ]
+
def setup_loader_modules(self):
return {dpkg: {}}
-@@ -101,68 +125,47 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(dpkg.__salt__, {'cmd.run_all': mock}):
- self.assertEqual(dpkg.file_dict('httpd'), 'Error: error')
+@@ -269,83 +308,71 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
+ dpkg.bin_pkg_info("package.deb")["name"], "package_name"
+ )
-+ @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg))
-+ @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info))
-+ @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3'))
++ @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg))
++ @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
++ @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3"))
def test_info(self):
- '''
+ """
- Test package info
+ Test info
+ :return:
- '''
-- mock = MagicMock(return_value={'retcode': 0,
-- 'stderr': '',
-- 'stdout':
-- os.linesep.join([
-- 'package:bash',
-- 'revision:',
-- 'architecture:amd64',
-- 'maintainer:Ubuntu Developers ',
-- 'summary:',
-- 'source:bash',
-- 'version:4.4.18-2ubuntu1',
-- 'section:shells',
-- 'installed_size:1588',
-- 'size:',
-- 'MD5:',
-- 'SHA1:',
-- 'SHA256:',
-- 'origin:',
-- 'homepage:http://tiswww.case.edu/php/chet/bash/bashtop.html',
-- 'status:ii ',
-- '======',
-- 'description:GNU Bourne Again SHell',
-- ' Bash is an sh-compatible command language interpreter that executes',
-- ' commands read from the standard input or from a file. Bash also',
-- ' incorporates useful features from the Korn and C shells (ksh and csh).',
-- ' .',
-- ' Bash is ultimately intended to be a conformant implementation of the',
-- ' IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).',
-- ' .',
-- ' The Programmable Completion Code, by Ian Macdonald, is now found in',
-- ' the bash-completion package.',
-- '------'
-- ])})
--
-- with patch.dict(dpkg.__salt__, {'cmd.run_all': mock}), \
-- patch.dict(dpkg.__grains__, {'os': 'Ubuntu', 'osrelease_info': (18, 4)}), \
-- patch('salt.utils.path.which', MagicMock(return_value=False)), \
-- patch('os.path.exists', MagicMock(return_value=False)),\
-- patch('os.path.getmtime', MagicMock(return_value=1560199259.0)):
-- self.assertDictEqual(dpkg.info('bash'),
-- {'bash': {'architecture': 'amd64',
-- 'description': os.linesep.join([
-- 'GNU Bourne Again SHell',
-- ' Bash is an sh-compatible command language interpreter that executes',
-- ' commands read from the standard input or from a file. Bash also',
-- ' incorporates useful features from the Korn and C shells (ksh and csh).',
-- ' .',
-- ' Bash is ultimately intended to be a conformant implementation of the',
-- ' IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).',
-- ' .',
-- ' The Programmable Completion Code, by Ian Macdonald, is now found in',
-- ' the bash-completion package.' + os.linesep
-- ]),
-- 'homepage': 'http://tiswww.case.edu/php/chet/bash/bashtop.html',
-- 'maintainer': 'Ubuntu Developers '
-- '',
-- 'package': 'bash',
-- 'section': 'shells',
-- 'source': 'bash',
-- 'status': 'ii',
-- 'version': '4.4.18-2ubuntu1'}})
-+ ret = dpkg.info('emacs')
+ """
+- mock = MagicMock(
+- return_value={
+- "retcode": 0,
+- "stderr": "",
+- "stdout": os.linesep.join(
+- [
+- "package:bash",
+- "revision:",
+- "architecture:amd64",
+- "maintainer:Ubuntu Developers ",
+- "summary:",
+- "source:bash",
+- "version:4.4.18-2ubuntu1",
+- "section:shells",
+- "installed_size:1588",
+- "size:",
+- "MD5:",
+- "SHA1:",
+- "SHA256:",
+- "origin:",
+- "homepage:http://tiswww.case.edu/php/chet/bash/bashtop.html",
+- "status:ii ",
+- "======",
+- "description:GNU Bourne Again SHell",
+- " Bash is an sh-compatible command language interpreter that executes",
+- " commands read from the standard input or from a file. Bash also",
+- " incorporates useful features from the Korn and C shells (ksh and csh).",
+- " .",
+- " Bash is ultimately intended to be a conformant implementation of the",
+- " IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).",
+- " .",
+- " The Programmable Completion Code, by Ian Macdonald, is now found in",
+- " the bash-completion package.",
+- "------",
+- ]
+- ),
+- }
++ ret = dpkg.info("emacs")
+
+ assert isinstance(ret, dict)
+ assert len(ret.keys()) == 1
-+ assert 'emacs' in ret
++ assert "emacs" in ret
+
-+ pkg_data = ret['emacs']
++ pkg_data = ret["emacs"]
+
+ assert isinstance(pkg_data, dict)
-+ for pkg_section in ['section', 'architecture', 'original-maintainer', 'maintainer', 'package', 'installed-size',
-+ 'build_date_time_t', 'sha256', 'origin', 'build_date', 'size', 'source', 'version',
-+ 'install_date_time_t', 'license', 'priority', 'description', 'md5sum', 'supported',
-+ 'filename', 'sha1', 'install_date', 'arch']:
++ for pkg_section in [
++ "section",
++ "architecture",
++ "original-maintainer",
++ "maintainer",
++ "package",
++ "installed-size",
++ "build_date_time_t",
++ "sha256",
++ "origin",
++ "build_date",
++ "size",
++ "source",
++ "version",
++ "install_date_time_t",
++ "license",
++ "priority",
++ "description",
++ "md5sum",
++ "supported",
++ "filename",
++ "sha1",
++ "install_date",
++ "arch",
++ ]:
+ assert pkg_section in pkg_data
+
-+ assert pkg_data['section'] == 'editors'
-+ assert pkg_data['maintainer'] == 'Simpsons Developers '
-+ assert pkg_data['license'] == 'BSD v3'
-+
-+ @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg))
-+ @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info))
-+ @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3'))
++ assert pkg_data["section"] == "editors"
++ assert (
++ pkg_data["maintainer"]
++ == "Simpsons Developers "
+ )
++ assert pkg_data["license"] == "BSD v3"
+
+- with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}), patch.dict(
+- dpkg.__grains__, {"os": "Ubuntu", "osrelease_info": (18, 4)}
+- ), patch("salt.utils.path.which", MagicMock(return_value=False)), patch(
+- "os.path.exists", MagicMock(return_value=False)
+- ), patch(
+- "os.path.getmtime", MagicMock(return_value=1560199259.0)
+- ):
+- self.assertDictEqual(
+- dpkg.info("bash"),
+- {
+- "bash": {
+- "architecture": "amd64",
+- "description": os.linesep.join(
+- [
+- "GNU Bourne Again SHell",
+- " Bash is an sh-compatible command language interpreter that executes",
+- " commands read from the standard input or from a file. Bash also",
+- " incorporates useful features from the Korn and C shells (ksh and csh).",
+- " .",
+- " Bash is ultimately intended to be a conformant implementation of the",
+- " IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).",
+- " .",
+- " The Programmable Completion Code, by Ian Macdonald, is now found in",
+- " the bash-completion package." + os.linesep,
+- ]
+- ),
+- "homepage": "http://tiswww.case.edu/php/chet/bash/bashtop.html",
+- "maintainer": "Ubuntu Developers "
+- "",
+- "package": "bash",
+- "section": "shells",
+- "source": "bash",
+- "status": "ii",
+- "version": "4.4.18-2ubuntu1",
+- }
+- },
+- )
++ @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg))
++ @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
++ @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3"))
+ def test_info_attr(self):
-+ '''
++ """
+ Test info with 'attr' parameter
+ :return:
-+ '''
-+ ret = dpkg.info('emacs', attr='arch,license,version')
++ """
++ ret = dpkg.info("emacs", attr="arch,license,version")
+ assert isinstance(ret, dict)
-+ assert 'emacs' in ret
-+ for attr in ['arch', 'license', 'version']:
-+ assert attr in ret['emacs']
++ assert "emacs" in ret
++ for attr in ["arch", "license", "version"]:
++ assert attr in ret["emacs"]
+
-+ assert ret['emacs']['arch'] == 'all'
-+ assert ret['emacs']['license'] == 'BSD v3'
-+ assert ret['emacs']['version'] == '46.1'
++ assert ret["emacs"]["arch"] == "all"
++ assert ret["emacs"]["license"] == "BSD v3"
++ assert ret["emacs"]["version"] == "46.1"
--
-2.16.4
+2.29.2
diff --git a/decide-if-the-source-should-be-actually-skipped.patch b/decide-if-the-source-should-be-actually-skipped.patch
deleted file mode 100644
index 3f625c4..0000000
--- a/decide-if-the-source-should-be-actually-skipped.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 615a8f8dfa8ef12eeb4c387e48309cc466b8597d Mon Sep 17 00:00:00 2001
-From: Bo Maryniuk
-Date: Tue, 4 Dec 2018 16:39:08 +0100
-Subject: [PATCH] Decide if the source should be actually skipped
-
----
- salt/modules/aptpkg.py | 23 ++++++++++++++++++++++-
- 1 file changed, 22 insertions(+), 1 deletion(-)
-
-diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index 4ec9158476..3b0d8423db 100644
---- a/salt/modules/aptpkg.py
-+++ b/salt/modules/aptpkg.py
-@@ -1620,6 +1620,27 @@ def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import
- return ret
-
-
-+def _skip_source(source):
-+ '''
-+ Decide to skip source or not.
-+
-+ :param source:
-+ :return:
-+ '''
-+ if source.invalid:
-+ if source.uri and source.type and source.type in ("deb", "deb-src", "rpm", "rpm-src"):
-+ pieces = source.mysplit(source.line)
-+ if pieces[1].strip()[0] == "[":
-+ options = pieces.pop(1).strip("[]").split()
-+ if len(options) > 0:
-+ log.debug("Source %s will be included although is marked invalid", source.uri)
-+ return False
-+ return True
-+ else:
-+ return True
-+ return False
-+
-+
- def list_repos():
- '''
- Lists all repos in the sources.list (and sources.lists.d) files
-@@ -1635,7 +1656,7 @@ def list_repos():
- repos = {}
- sources = sourceslist.SourcesList()
- for source in sources.list:
-- if source.invalid:
-+ if _skip_source(source):
- continue
- repo = {}
- repo['file'] = source.file
---
-2.16.4
-
-
diff --git a/do-not-break-repo-files-with-multiple-line-values-on.patch b/do-not-break-repo-files-with-multiple-line-values-on.patch
index cf5bd69..5db42ff 100644
--- a/do-not-break-repo-files-with-multiple-line-values-on.patch
+++ b/do-not-break-repo-files-with-multiple-line-values-on.patch
@@ -1,4 +1,4 @@
-From f81a5b92d691c1d511a814f9344104dd37466bc3 Mon Sep 17 00:00:00 2001
+From e986ed8fc0d5da74374d9ded82e10c16fc984ca8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 29 May 2019 11:03:16 +0100
@@ -6,42 +6,45 @@ Subject: [PATCH] Do not break repo files with multiple line values on
yumpkg (bsc#1135360)
---
- tests/integration/modules/test_pkg.py | 48 +++++++++++++++++++++++++++++++++++
- 1 file changed, 48 insertions(+)
+ tests/integration/modules/test_pkg.py | 51 +++++++++++++++++++++++++++
+ 1 file changed, 51 insertions(+)
diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py
-index e8374db2c0..61748f9477 100644
+index 7a720523da..e32013800d 100644
--- a/tests/integration/modules/test_pkg.py
+++ b/tests/integration/modules/test_pkg.py
-@@ -182,6 +182,54 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
+@@ -194,6 +194,57 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
if repo is not None:
- self.run_function('pkg.del_repo', [repo])
+ self.run_function("pkg.del_repo", [repo])
+ def test_mod_del_repo_multiline_values(self):
-+ '''
++ """
+ test modifying and deleting a software repository defined with multiline values
-+ '''
-+ os_grain = self.run_function('grains.item', ['os'])['os']
++ """
++ os_grain = self.run_function("grains.item", ["os"])["os"]
+ repo = None
+ try:
-+ if os_grain in ['CentOS', 'RedHat', 'SUSE']:
-+ my_baseurl = 'http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/'
-+ expected_get_repo_baseurl = 'http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/'
-+ major_release = int(
-+ self.run_function(
-+ 'grains.item',
-+ ['osmajorrelease']
-+ )['osmajorrelease']
++ if os_grain in ["CentOS", "RedHat", "SUSE"]:
++ my_baseurl = (
++ "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/"
+ )
-+ repo = 'fakerepo'
-+ name = 'Fake repo for RHEL/CentOS/SUSE'
++ expected_get_repo_baseurl = (
++ "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/"
++ )
++ major_release = int(
++ self.run_function("grains.item", ["osmajorrelease"])[
++ "osmajorrelease"
++ ]
++ )
++ repo = "fakerepo"
++ name = "Fake repo for RHEL/CentOS/SUSE"
+ baseurl = my_baseurl
-+ gpgkey = 'https://my.fake.repo/foo/bar/MY-GPG-KEY.pub'
-+ failovermethod = 'priority'
++ gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub"
++ failovermethod = "priority"
+ gpgcheck = 1
+ enabled = 1
+ ret = self.run_function(
-+ 'pkg.mod_repo',
++ "pkg.mod_repo",
+ [repo],
+ name=name,
+ baseurl=baseurl,
@@ -55,20 +58,20 @@ index e8374db2c0..61748f9477 100644
+ self.assertNotEqual(ret, {})
+ repo_info = ret[next(iter(ret))]
+ self.assertIn(repo, repo_info)
-+ self.assertEqual(repo_info[repo]['baseurl'], my_baseurl)
-+ ret = self.run_function('pkg.get_repo', [repo])
-+ self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
-+ self.run_function('pkg.mod_repo', [repo])
-+ ret = self.run_function('pkg.get_repo', [repo])
-+ self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
++ self.assertEqual(repo_info[repo]["baseurl"], my_baseurl)
++ ret = self.run_function("pkg.get_repo", [repo])
++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
++ self.run_function("pkg.mod_repo", [repo])
++ ret = self.run_function("pkg.get_repo", [repo])
++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
+ finally:
+ if repo is not None:
-+ self.run_function('pkg.del_repo', [repo])
++ self.run_function("pkg.del_repo", [repo])
+
- @requires_salt_modules('pkg.owner')
+ @requires_salt_modules("pkg.owner")
def test_owner(self):
- '''
+ """
--
-2.16.4
+2.29.2
diff --git a/do-not-crash-when-there-are-ipv6-established-connect.patch b/do-not-crash-when-there-are-ipv6-established-connect.patch
index 5c10d80..2af9dca 100644
--- a/do-not-crash-when-there-are-ipv6-established-connect.patch
+++ b/do-not-crash-when-there-are-ipv6-established-connect.patch
@@ -1,4 +1,4 @@
-From bfee3a7c47786bb860663de97fca26725101f1d0 Mon Sep 17 00:00:00 2001
+From 998136ffd4c8442e0c3a7030af3d8196abec6be1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Tue, 7 May 2019 15:33:51 +0100
@@ -11,24 +11,24 @@ Add unit test for '_netlink_tool_remote_on'
1 file changed, 5 insertions(+)
diff --git a/salt/utils/network.py b/salt/utils/network.py
-index 2ae2e213b7..307cab885f 100644
+index dd7fceb91a..d253ded3ab 100644
--- a/salt/utils/network.py
+++ b/salt/utils/network.py
-@@ -1442,8 +1442,13 @@ def _netlink_tool_remote_on(port, which_end):
- elif 'ESTAB' not in line:
+@@ -1623,8 +1623,13 @@ def _netlink_tool_remote_on(port, which_end):
+ elif "ESTAB" not in line:
continue
chunks = line.split()
-+ local_host, local_port = chunks[3].rsplit(':', 1)
- remote_host, remote_port = chunks[4].rsplit(':', 1)
++ local_host, local_port = chunks[3].rsplit(":", 1)
+ remote_host, remote_port = chunks[4].rsplit(":", 1)
-+ if which_end == 'remote_port' and int(remote_port) != port:
++ if which_end == "remote_port" and int(remote_port) != port:
+ continue
-+ if which_end == 'local_port' and int(local_port) != port:
++ if which_end == "local_port" and int(local_port) != port:
+ continue
remotes.add(remote_host.strip("[]"))
if valid is False:
--
-2.23.0
+2.29.2
diff --git a/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch
index 2c00607..f4925d1 100644
--- a/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch
+++ b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch
@@ -1,4 +1,4 @@
-From 3d5d89428ca333caa2c2259f679f8fffd7110ba6 Mon Sep 17 00:00:00 2001
+From 57f9da0bd7727c46eab866941fee46a3eaf8c8ea Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Fri, 21 Sep 2018 17:31:39 +0200
Subject: [PATCH] Do not load pip state if there is no 3rd party
@@ -6,40 +6,355 @@ Subject: [PATCH] Do not load pip state if there is no 3rd party
Safe import 3rd party dependency
---
- salt/modules/pip.py | 12 ++++++++++--
- 1 file changed, 10 insertions(+), 2 deletions(-)
+ salt/modules/pip.py | 93 ++++++++++++++++++++++++---------------------
+ 1 file changed, 50 insertions(+), 43 deletions(-)
diff --git a/salt/modules/pip.py b/salt/modules/pip.py
-index 0a0773a8f4..f19593ed1a 100644
+index f7c101f6e4..742e0dd48a 100644
--- a/salt/modules/pip.py
+++ b/salt/modules/pip.py
-@@ -82,7 +82,10 @@ from __future__ import absolute_import, print_function, unicode_literals
- # Import python libs
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ r"""
+ Install Python packages with pip to either the system or a virtualenv
+
+@@ -77,9 +76,7 @@ of the 2015.5 branch:
+ The issue is described here: https://github.com/saltstack/salt/issues/46163
+
+ """
+-from __future__ import absolute_import, print_function, unicode_literals
+
+-# Import python libs
import logging
import os
--import pkg_resources
+ import re
+@@ -89,7 +86,6 @@ import tempfile
+
+ import pkg_resources # pylint: disable=3rd-party-module-not-gated
+
+-# Import Salt libs
+ import salt.utils.data
+ import salt.utils.files
+ import salt.utils.json
+@@ -101,6 +97,12 @@ import salt.utils.versions
+ from salt.exceptions import CommandExecutionError, CommandNotFoundError
+ from salt.ext import six
+
+try:
+ import pkg_resources
+except ImportError:
+ pkg_resources = None
- import re
- import shutil
- import sys
-@@ -121,7 +124,12 @@ def __virtual__():
++
++
+ # This needs to be named logger so we don't shadow it in pip.install
+ logger = logging.getLogger(__name__) # pylint: disable=C0103
+
+@@ -118,7 +120,12 @@ def __virtual__():
entire filesystem. If it's not installed in a conventional location, the
user is required to provide the location of pip each time it is used.
- '''
-- return 'pip'
+ """
+- return "pip"
+ if pkg_resources is None:
+ ret = False, 'Package dependency "pkg_resource" is missing'
+ else:
-+ ret = 'pip'
++ ret = "pip"
+
+ return ret
def _pip_bin_env(cwd, bin_env):
+@@ -140,7 +147,7 @@ def _clear_context(bin_env=None):
+ """
+ contextkey = "pip.version"
+ if bin_env is not None:
+- contextkey = "{0}.{1}".format(contextkey, bin_env)
++ contextkey = "{}.{}".format(contextkey, bin_env)
+ __context__.pop(contextkey, None)
+
+
+@@ -196,7 +203,7 @@ def _get_pip_bin(bin_env):
+ bin_path,
+ )
+ raise CommandNotFoundError(
+- "Could not find a pip binary in virtualenv {0}".format(bin_env)
++ "Could not find a pip binary in virtualenv {}".format(bin_env)
+ )
+
+ # bin_env is the python or pip binary
+@@ -209,11 +216,11 @@ def _get_pip_bin(bin_env):
+ return [os.path.normpath(bin_env)]
+
+ raise CommandExecutionError(
+- "Could not find a pip binary within {0}".format(bin_env)
++ "Could not find a pip binary within {}".format(bin_env)
+ )
+ else:
+ raise CommandNotFoundError(
+- "Access denied to {0}, could not find a pip binary".format(bin_env)
++ "Access denied to {}, could not find a pip binary".format(bin_env)
+ )
+
+
+@@ -283,7 +290,7 @@ def _resolve_requirements_chain(requirements):
+
+ chain = []
+
+- if isinstance(requirements, six.string_types):
++ if isinstance(requirements, str):
+ requirements = [requirements]
+
+ for req_file in requirements:
+@@ -300,7 +307,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
+ cleanup_requirements = []
+
+ if requirements is not None:
+- if isinstance(requirements, six.string_types):
++ if isinstance(requirements, str):
+ requirements = [r.strip() for r in requirements.split(",")]
+ elif not isinstance(requirements, list):
+ raise TypeError("requirements must be a string or list")
+@@ -314,7 +321,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
+ if not cached_requirements:
+ ret = {
+ "result": False,
+- "comment": "pip requirements file '{0}' not found".format(
++ "comment": "pip requirements file '{}' not found".format(
+ requirement
+ ),
+ }
+@@ -412,15 +419,15 @@ def _format_env_vars(env_vars):
+ ret = {}
+ if env_vars:
+ if isinstance(env_vars, dict):
+- for key, val in six.iteritems(env_vars):
+- if not isinstance(key, six.string_types):
++ for key, val in env_vars.items():
++ if not isinstance(key, str):
+ key = str(key) # future lint: disable=blacklisted-function
+- if not isinstance(val, six.string_types):
++ if not isinstance(val, str):
+ val = str(val) # future lint: disable=blacklisted-function
+ ret[key] = val
+ else:
+ raise CommandExecutionError(
+- "env_vars {0} is not a dictionary".format(env_vars)
++ "env_vars {} is not a dictionary".format(env_vars)
+ )
+ return ret
+
+@@ -762,9 +769,9 @@ def install(
+
+ if log:
+ if os.path.isdir(log):
+- raise IOError("'{0}' is a directory. Use --log path_to_file".format(log))
++ raise OSError("'{}' is a directory. Use --log path_to_file".format(log))
+ elif not os.access(log, os.W_OK):
+- raise IOError("'{0}' is not writeable".format(log))
++ raise OSError("'{}' is not writeable".format(log))
+
+ cmd.extend(["--log", log])
+
+@@ -790,12 +797,12 @@ def install(
+ int(timeout)
+ except ValueError:
+ raise ValueError(
+- "'{0}' is not a valid timeout, must be an integer".format(timeout)
++ "'{}' is not a valid timeout, must be an integer".format(timeout)
+ )
+ cmd.extend(["--timeout", timeout])
+
+ if find_links:
+- if isinstance(find_links, six.string_types):
++ if isinstance(find_links, str):
+ find_links = [l.strip() for l in find_links.split(",")]
+
+ for link in find_links:
+@@ -803,7 +810,7 @@ def install(
+ salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link)
+ ):
+ raise CommandExecutionError(
+- "'{0}' is not a valid URL or path".format(link)
++ "'{}' is not a valid URL or path".format(link)
+ )
+ cmd.extend(["--find-links", link])
+
+@@ -815,13 +822,13 @@ def install(
+
+ if index_url:
+ if not salt.utils.url.validate(index_url, VALID_PROTOS):
+- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url))
++ raise CommandExecutionError("'{}' is not a valid URL".format(index_url))
+ cmd.extend(["--index-url", index_url])
+
+ if extra_index_url:
+ if not salt.utils.url.validate(extra_index_url, VALID_PROTOS):
+ raise CommandExecutionError(
+- "'{0}' is not a valid URL".format(extra_index_url)
++ "'{}' is not a valid URL".format(extra_index_url)
+ )
+ cmd.extend(["--extra-index-url", extra_index_url])
+
+@@ -836,13 +843,13 @@ def install(
+ " use index_url and/or extra_index_url instead"
+ )
+
+- if isinstance(mirrors, six.string_types):
++ if isinstance(mirrors, str):
+ mirrors = [m.strip() for m in mirrors.split(",")]
+
+ cmd.append("--use-mirrors")
+ for mirror in mirrors:
+ if not mirror.startswith("http://"):
+- raise CommandExecutionError("'{0}' is not a valid URL".format(mirror))
++ raise CommandExecutionError("'{}' is not a valid URL".format(mirror))
+ cmd.extend(["--mirrors", mirror])
+
+ if disable_version_check:
+@@ -883,7 +890,7 @@ def install(
+ if exists_action.lower() not in ("s", "i", "w", "b"):
+ raise CommandExecutionError(
+ "The exists_action pip option only supports the values "
+- "s, i, w, and b. '{0}' is not valid.".format(exists_action)
++ "s, i, w, and b. '{}' is not valid.".format(exists_action)
+ )
+ cmd.extend(["--exists-action", exists_action])
+
+@@ -911,14 +918,14 @@ def install(
+ cmd.extend(["--cert", cert])
+
+ if global_options:
+- if isinstance(global_options, six.string_types):
++ if isinstance(global_options, str):
+ global_options = [go.strip() for go in global_options.split(",")]
+
+ for opt in global_options:
+ cmd.extend(["--global-option", opt])
+
+ if install_options:
+- if isinstance(install_options, six.string_types):
++ if isinstance(install_options, str):
+ install_options = [io.strip() for io in install_options.split(",")]
+
+ for opt in install_options:
+@@ -929,7 +936,7 @@ def install(
+ try:
+ pkgs = [p.strip() for p in pkgs.split(",")]
+ except AttributeError:
+- pkgs = [p.strip() for p in six.text_type(pkgs).split(",")]
++ pkgs = [p.strip() for p in str(pkgs).split(",")]
+ pkgs = salt.utils.data.stringify(salt.utils.data.decode_list(pkgs))
+
+ # It's possible we replaced version-range commas with semicolons so
+@@ -945,7 +952,7 @@ def install(
+
+ if editable:
+ egg_match = re.compile(r"(?:#|#.*?&)egg=([^&]*)")
+- if isinstance(editable, six.string_types):
++ if isinstance(editable, str):
+ editable = [e.strip() for e in editable.split(",")]
+
+ for entry in editable:
+@@ -964,14 +971,14 @@ def install(
+ cmd.append("--allow-all-external")
+
+ if allow_external:
+- if isinstance(allow_external, six.string_types):
++ if isinstance(allow_external, str):
+ allow_external = [p.strip() for p in allow_external.split(",")]
+
+ for pkg in allow_external:
+ cmd.extend(["--allow-external", pkg])
+
+ if allow_unverified:
+- if isinstance(allow_unverified, six.string_types):
++ if isinstance(allow_unverified, str):
+ allow_unverified = [p.strip() for p in allow_unverified.split(",")]
+
+ for pkg in allow_unverified:
+@@ -1106,8 +1113,8 @@ def uninstall(
+ try:
+ # TODO make this check if writeable
+ os.path.exists(log)
+- except IOError:
+- raise IOError("'{0}' is not writeable".format(log))
++ except OSError:
++ raise OSError("'{}' is not writeable".format(log))
+
+ cmd.extend(["--log", log])
+
+@@ -1133,12 +1140,12 @@ def uninstall(
+ int(timeout)
+ except ValueError:
+ raise ValueError(
+- "'{0}' is not a valid timeout, must be an integer".format(timeout)
++ "'{}' is not a valid timeout, must be an integer".format(timeout)
+ )
+ cmd.extend(["--timeout", timeout])
+
+ if pkgs:
+- if isinstance(pkgs, six.string_types):
++ if isinstance(pkgs, str):
+ pkgs = [p.strip() for p in pkgs.split(",")]
+ if requirements:
+ for requirement in requirements:
+@@ -1323,7 +1330,7 @@ def version(bin_env=None, cwd=None, user=None):
+ cwd = _pip_bin_env(cwd, bin_env)
+ contextkey = "pip.version"
+ if bin_env is not None:
+- contextkey = "{0}.{1}".format(contextkey, bin_env)
++ contextkey = "{}.{}".format(contextkey, bin_env)
+
+ if contextkey in __context__:
+ return __context__[contextkey]
+@@ -1402,7 +1409,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None):
+ if match:
+ name, version_ = match.groups()
+ else:
+- logger.error("Can't parse line '{0}'".format(line))
++ logger.error("Can't parse line '{}'".format(line))
+ continue
+ packages[name] = version_
+
+@@ -1414,7 +1421,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None):
+ raise CommandExecutionError("Invalid JSON", info=result)
+
+ for pkg in pkgs:
+- packages[pkg["name"]] = "{0} [{1}]".format(
++ packages[pkg["name"]] = "{} [{}]".format(
+ pkg["latest_version"], pkg["latest_filetype"]
+ )
+
+@@ -1602,17 +1609,17 @@ def list_all_versions(
+ """
+ cwd = _pip_bin_env(cwd, bin_env)
+ cmd = _get_pip_bin(bin_env)
+- cmd.extend(["install", "{0}==versions".format(pkg)])
++ cmd.extend(["install", "{}==versions".format(pkg)])
+
+ if index_url:
+ if not salt.utils.url.validate(index_url, VALID_PROTOS):
+- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url))
++ raise CommandExecutionError("'{}' is not a valid URL".format(index_url))
+ cmd.extend(["--index-url", index_url])
+
+ if extra_index_url:
+ if not salt.utils.url.validate(extra_index_url, VALID_PROTOS):
+ raise CommandExecutionError(
+- "'{0}' is not a valid URL".format(extra_index_url)
++ "'{}' is not a valid URL".format(extra_index_url)
+ )
+ cmd.extend(["--extra-index-url", extra_index_url])
+
+@@ -1632,7 +1639,7 @@ def list_all_versions(
+ if not include_rc:
+ filtered.append("rc")
+ if filtered:
+- excludes = re.compile(r"^((?!{0}).)*$".format("|".join(filtered)))
++ excludes = re.compile(r"^((?!{}).)*$".format("|".join(filtered)))
+ else:
+ excludes = re.compile(r"")
+
--
-2.16.4
+2.29.2
diff --git a/do-not-make-ansiblegate-to-crash-on-python3-minions.patch b/do-not-make-ansiblegate-to-crash-on-python3-minions.patch
index 6725766..b3a8df1 100644
--- a/do-not-make-ansiblegate-to-crash-on-python3-minions.patch
+++ b/do-not-make-ansiblegate-to-crash-on-python3-minions.patch
@@ -1,4 +1,4 @@
-From 235cca81be2f64ed3feb48ed42bfa3f9196bff39 Mon Sep 17 00:00:00 2001
+From 5d465a5b392efa1b4df7870161b32e0125efa4af Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Fri, 28 Jun 2019 15:17:56 +0100
@@ -10,77 +10,315 @@ Move MockTimedProc implementation to tests.support.mock
Add unit test for ansible caller
---
- salt/modules/ansiblegate.py | 14 +++++++++---
- tests/support/mock.py | 31 +++++++++++++++++++++++++
- tests/unit/modules/test_ansiblegate.py | 41 ++++++++++++++++++++++++++++++++++
- tests/unit/modules/test_cmdmod.py | 35 ++---------------------------
- 4 files changed, 85 insertions(+), 36 deletions(-)
+ salt/modules/ansiblegate.py | 7 +-
+ tests/support/mock.py | 128 +++++++++-------
+ tests/unit/modules/test_ansiblegate.py | 201 +++++++++++++++++++++++++
+ tests/unit/modules/test_cmdmod.py | 1 +
+ 4 files changed, 280 insertions(+), 57 deletions(-)
+ create mode 100644 tests/unit/modules/test_ansiblegate.py
diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py
-index 6b903c2b94..8e28fcafa3 100644
+index 0279a26017..5d4b986ec2 100644
--- a/salt/modules/ansiblegate.py
+++ b/salt/modules/ansiblegate.py
-@@ -147,6 +147,10 @@ class AnsibleModuleCaller(object):
+@@ -160,6 +160,7 @@ class AnsibleModuleCaller:
:param kwargs: keywords to the module
:return:
- '''
-+ if six.PY3:
-+ python_exec = 'python3'
-+ else:
-+ python_exec = 'python'
+ """
++ python_exec = "python3"
module = self._resolver.load_module(module)
- if not hasattr(module, 'main'):
-@@ -162,9 +166,13 @@ class AnsibleModuleCaller(object):
- ["echo", "{0}".format(js_args)],
- stdout=subprocess.PIPE, timeout=self.timeout)
+ if not hasattr(module, "main"):
+@@ -182,9 +183,9 @@ class AnsibleModuleCaller:
+ timeout=self.timeout,
+ )
proc_out.run()
-+ if six.PY3:
-+ proc_out_stdout = proc_out.stdout.decode()
-+ else:
-+ proc_out_stdout = proc_out.stdout
+- proc_out_stdout = salt.utils.stringutils.to_str(proc_out.stdout)
++ proc_out_stdout = proc_out.stdout.decode()
proc_exc = salt.utils.timed_subprocess.TimedProc(
-- ['python', module.__file__],
-- stdin=proc_out.stdout, stdout=subprocess.PIPE, timeout=self.timeout)
+- [sys.executable, module.__file__],
+ [python_exec, module.__file__],
-+ stdin=proc_out_stdout, stdout=subprocess.PIPE, timeout=self.timeout)
- proc_exc.run()
-
- try:
-@@ -263,7 +271,7 @@ def help(module=None, *args):
- description = doc.get('description') or ''
- del doc['description']
- ret['Description'] = description
-- ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = doc.keys()
-+ ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = [i for i in doc.keys()]
+ stdin=proc_out_stdout,
+ stdout=subprocess.PIPE,
+ timeout=self.timeout,
+@@ -298,7 +299,7 @@ def help(module=None, *args):
+ 'Available sections on module "{}"'.format(
+ module.__name__.replace("ansible.modules.", "")
+ )
+- ] = list(doc)
++ ] = [i for i in doc.keys()]
else:
for arg in args:
info = doc.get(arg)
diff --git a/tests/support/mock.py b/tests/support/mock.py
-index 805a60377c..67ecb4838a 100644
+index 7ef02e0701..87d052c399 100644
--- a/tests/support/mock.py
+++ b/tests/support/mock.py
-@@ -461,6 +461,37 @@ class MockOpen(object):
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Pedro Algarvio (pedro@algarvio.me)
+
+@@ -14,7 +13,6 @@
+ """
+ # pylint: disable=unused-import,function-redefined,blacklisted-module,blacklisted-external-module
+
+-from __future__ import absolute_import
+
+ import collections
+ import copy
+@@ -42,8 +40,6 @@ from mock import (
+ patch,
+ sentinel,
+ )
+-
+-# Import salt libs
+ from salt.ext import six
+
+ # pylint: disable=no-name-in-module,no-member
+@@ -57,7 +53,7 @@ if sys.version_info < (3, 6) and __mock_version < (2,):
+ raise ImportError("Please install mock>=2.0.0")
+
+
+-class MockFH(object):
++class MockFH:
+ def __init__(self, filename, read_data, *args, **kwargs):
+ self.filename = filename
+ self.read_data = read_data
+@@ -89,7 +85,7 @@ class MockFH(object):
+ """
+ # Newline will always be a bytestring on PY2 because mock_open will have
+ # normalized it to one.
+- newline = b"\n" if isinstance(read_data, six.binary_type) else "\n"
++ newline = b"\n" if isinstance(read_data, bytes) else "\n"
+
+ read_data = [line + newline for line in read_data.split(newline)]
+
+@@ -103,8 +99,7 @@ class MockFH(object):
+ # newline that we added in the list comprehension.
+ read_data[-1] = read_data[-1][:-1]
+
+- for line in read_data:
+- yield line
++ yield from read_data
+
+ @property
+ def write_calls(self):
+@@ -126,18 +121,18 @@ class MockFH(object):
+ def __check_read_data(self):
+ if not self.__read_data_ok:
+ if self.binary_mode:
+- if not isinstance(self.read_data, six.binary_type):
++ if not isinstance(self.read_data, bytes):
+ raise TypeError(
+- "{0} opened in binary mode, expected read_data to be "
+- "bytes, not {1}".format(
++ "{} opened in binary mode, expected read_data to be "
++ "bytes, not {}".format(
+ self.filename, type(self.read_data).__name__
+ )
+ )
+ else:
+ if not isinstance(self.read_data, str):
+ raise TypeError(
+- "{0} opened in non-binary mode, expected read_data to "
+- "be str, not {1}".format(
++ "{} opened in non-binary mode, expected read_data to "
++ "be str, not {}".format(
+ self.filename, type(self.read_data).__name__
+ )
+ )
+@@ -147,8 +142,8 @@ class MockFH(object):
+ def _read(self, size=0):
+ self.__check_read_data()
+ if not self.read_mode:
+- raise IOError("File not open for reading")
+- if not isinstance(size, six.integer_types) or size < 0:
++ raise OSError("File not open for reading")
++ if not isinstance(size, int) or size < 0:
+ raise TypeError("a positive integer is required")
+
+ joined = self.empty_string.join(self.read_data_iter)
+@@ -169,7 +164,7 @@ class MockFH(object):
+ # TODO: Implement "size" argument
+ self.__check_read_data()
+ if not self.read_mode:
+- raise IOError("File not open for reading")
++ raise OSError("File not open for reading")
+ ret = list(self.read_data_iter)
+ self.__loc += sum(len(x) for x in ret)
+ return ret
+@@ -178,7 +173,7 @@ class MockFH(object):
+ # TODO: Implement "size" argument
+ self.__check_read_data()
+ if not self.read_mode:
+- raise IOError("File not open for reading")
++ raise OSError("File not open for reading")
+ try:
+ ret = next(self.read_data_iter)
+ self.__loc += len(ret)
+@@ -189,7 +184,7 @@ class MockFH(object):
+ def __iter__(self):
+ self.__check_read_data()
+ if not self.read_mode:
+- raise IOError("File not open for reading")
++ raise OSError("File not open for reading")
+ while True:
+ try:
+ ret = next(self.read_data_iter)
+@@ -200,30 +195,22 @@ class MockFH(object):
+
+ def _write(self, content):
+ if not self.write_mode:
+- raise IOError("File not open for writing")
+- if six.PY2:
+- if isinstance(content, six.text_type):
+- # encoding intentionally not specified to force a
+- # UnicodeEncodeError when non-ascii unicode type is passed
+- content.encode()
+- else:
+- content_type = type(content)
+- if self.binary_mode and content_type is not bytes:
+- raise TypeError(
+- "a bytes-like object is required, not '{0}'".format(
+- content_type.__name__
+- )
+- )
+- elif not self.binary_mode and content_type is not str:
+- raise TypeError(
+- "write() argument must be str, not {0}".format(
+- content_type.__name__
+- )
++ raise OSError("File not open for writing")
++ content_type = type(content)
++ if self.binary_mode and content_type is not bytes:
++ raise TypeError(
++ "a bytes-like object is required, not '{}'".format(
++ content_type.__name__
+ )
++ )
++ elif not self.binary_mode and content_type is not str:
++ raise TypeError(
++ "write() argument must be str, not {}".format(content_type.__name__)
++ )
+
+ def _writelines(self, lines):
+ if not self.write_mode:
+- raise IOError("File not open for writing")
++ raise OSError("File not open for writing")
+ for line in lines:
+ self._write(line)
+
+@@ -234,26 +221,24 @@ class MockFH(object):
+ pass
+
+
+-class MockCall(object):
++class MockCall:
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+ def __repr__(self):
+ # future lint: disable=blacklisted-function
+- ret = str("MockCall(")
++ ret = "MockCall("
+ for arg in self.args:
+- ret += repr(arg) + str(", ")
++ ret += repr(arg) + ", "
+ if not self.kwargs:
+ if self.args:
+ # Remove trailing ', '
+ ret = ret[:-2]
+ else:
+- for key, val in six.iteritems(self.kwargs):
+- ret += str("{0}={1}").format(
+- salt.utils.stringutils.to_str(key), repr(val)
+- )
+- ret += str(")")
++ for key, val in self.kwargs.items():
++ ret += "{}={}".format(salt.utils.stringutils.to_str(key), repr(val))
++ ret += ")"
+ return ret
+ # future lint: enable=blacklisted-function
+
+@@ -264,7 +249,7 @@ class MockCall(object):
+ return self.args == other.args and self.kwargs == other.kwargs
+
+
+-class MockOpen(object):
++class MockOpen:
+ r'''
+ This class can be used to mock the use of ``open()``.
+
+@@ -379,7 +364,7 @@ class MockOpen(object):
+ # .__class__() used here to preserve the dict class in the event that
+ # an OrderedDict was used.
+ new_read_data = read_data.__class__()
+- for key, val in six.iteritems(read_data):
++ for key, val in read_data.items():
+ try:
+ val = salt.utils.data.decode(val, to_str=True)
+ except TypeError:
+@@ -424,7 +409,7 @@ class MockOpen(object):
+ except IndexError:
+ # We've run out of file contents, abort!
+ raise RuntimeError(
+- "File matching expression '{0}' opened more times than "
++ "File matching expression '{}' opened more times than "
+ "expected".format(matched_pattern)
+ )
+
+@@ -443,7 +428,7 @@ class MockOpen(object):
+ except KeyError:
+ # No matching glob in read_data, treat this as a file that does
+ # not exist and raise the appropriate exception.
+- raise IOError(errno.ENOENT, "No such file or directory", name)
++ raise OSError(errno.ENOENT, "No such file or directory", name)
+
+ def write_calls(self, path=None):
+ """
+@@ -451,7 +436,7 @@ class MockOpen(object):
+ the results to files matching a given pattern.
+ """
+ ret = []
+- for filename, handles in six.iteritems(self.filehandles):
++ for filename, handles in self.filehandles.items():
+ if path is None or fnmatch.fnmatch(filename, path):
+ for fh_ in handles:
+ ret.extend(fh_.write_calls)
+@@ -463,19 +448,54 @@ class MockOpen(object):
+ narrow the results to files matching a given pattern.
+ """
+ ret = []
+- for filename, handles in six.iteritems(self.filehandles):
++ for filename, handles in self.filehandles.items():
+ if path is None or fnmatch.fnmatch(filename, path):
+ for fh_ in handles:
ret.extend(fh_.writelines_calls)
return ret
-+class MockTimedProc(object):
-+ '''
+
+-class MockTimedProc(object):
++class MockTimedProc:
++ """
+ Class used as a stand-in for salt.utils.timed_subprocess.TimedProc
-+ '''
-+ class _Process(object):
-+ '''
++ """
++
++ class _Process:
++ """
+ Used to provide a dummy "process" attribute
-+ '''
++ """
++
+ def __init__(self, returncode=0, pid=12345):
+ self.returncode = returncode
+ self.pid = pid
+
+ def __init__(self, stdout=None, stderr=None, returncode=0, pid=12345):
+ if stdout is not None and not isinstance(stdout, bytes):
-+ raise TypeError('Must pass stdout to MockTimedProc as bytes')
++ raise TypeError("Must pass stdout to MockTimedProc as bytes")
+ if stderr is not None and not isinstance(stderr, bytes):
-+ raise TypeError('Must pass stderr to MockTimedProc as bytes')
++ raise TypeError("Must pass stderr to MockTimedProc as bytes")
+ self._stdout = stdout
+ self._stderr = stderr
+ self.process = self._Process(returncode=returncode, pid=pid)
@@ -95,124 +333,238 @@ index 805a60377c..67ecb4838a 100644
+ @property
+ def stderr(self):
+ return self._stderr
++
++
++class MockTimedProc:
+ """
+ Class used as a stand-in for salt.utils.timed_subprocess.TimedProc
+ """
- # reimplement mock_open to support multiple filehandles
- mock_open = MockOpen
+- class _Process(object):
++ class _Process:
+ """
+ Used to provide a dummy "process" attribute
+ """
diff --git a/tests/unit/modules/test_ansiblegate.py b/tests/unit/modules/test_ansiblegate.py
-index 5613a0e79b..b7b43efda4 100644
---- a/tests/unit/modules/test_ansiblegate.py
+new file mode 100644
+index 0000000000..61aad44b5c
+--- /dev/null
+++ b/tests/unit/modules/test_ansiblegate.py
-@@ -29,11 +29,13 @@ from tests.support.unit import TestCase, skipIf
- from tests.support.mock import (
- patch,
- MagicMock,
-+ MockTimedProc,
- )
-
- import salt.modules.ansiblegate as ansible
- import salt.utils.platform
- from salt.exceptions import LoaderError
+@@ -0,0 +1,201 @@
++#
++# Author: Bo Maryniuk
++#
++# Copyright 2017 SUSE LLC
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++import os
++
++import salt.modules.ansiblegate as ansible
++import salt.utils.platform
++from salt.exceptions import LoaderError
+from salt.ext import six
-
-
- @skipIf(NO_PYTEST, False)
-@@ -134,3 +136,42 @@ description:
- '''
- with patch('salt.modules.ansiblegate.ansible', None):
- assert ansible.__virtual__() == 'ansible'
++from tests.support.mixins import LoaderModuleMockMixin
++from tests.support.mock import MagicMock, MockTimedProc, patch
++from tests.support.unit import TestCase, skipIf
++
++try:
++ import pytest
++except ImportError as import_error:
++ pytest = None
++NO_PYTEST = not bool(pytest)
++
++
++@skipIf(NO_PYTEST, False)
++@skipIf(salt.utils.platform.is_windows(), "Not supported on Windows")
++class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin):
++ def setUp(self):
++ self.resolver = ansible.AnsibleModuleResolver({})
++ self.resolver._modules_map = {
++ "one.two.three": os.sep + os.path.join("one", "two", "three.py"),
++ "four.five.six": os.sep + os.path.join("four", "five", "six.py"),
++ "three.six.one": os.sep + os.path.join("three", "six", "one.py"),
++ }
++
++ def tearDown(self):
++ self.resolver = None
++
++ def setup_loader_modules(self):
++ return {ansible: {}}
++
++ def test_ansible_module_help(self):
++ """
++ Test help extraction from the module
++ :return:
++ """
++
++ class Module:
++ """
++ An ansible module mock.
++ """
++
++ __name__ = "foo"
++ DOCUMENTATION = """
++---
++one:
++ text here
++---
++two:
++ text here
++description:
++ describe the second part
++ """
++
++ with patch.object(ansible, "_resolver", self.resolver), patch.object(
++ ansible._resolver, "load_module", MagicMock(return_value=Module())
++ ):
++ ret = ansible.help("dummy")
++ assert sorted(
++ ret.get('Available sections on module "{}"'.format(Module().__name__))
++ ) == ["one", "two"]
++ assert ret.get("Description") == "describe the second part"
++
++ def test_module_resolver_modlist(self):
++ """
++ Test Ansible resolver modules list.
++ :return:
++ """
++ assert self.resolver.get_modules_list() == [
++ "four.five.six",
++ "one.two.three",
++ "three.six.one",
++ ]
++ for ptr in ["five", "fi", "ve"]:
++ assert self.resolver.get_modules_list(ptr) == ["four.five.six"]
++ for ptr in ["si", "ix", "six"]:
++ assert self.resolver.get_modules_list(ptr) == [
++ "four.five.six",
++ "three.six.one",
++ ]
++ assert self.resolver.get_modules_list("one") == [
++ "one.two.three",
++ "three.six.one",
++ ]
++ assert self.resolver.get_modules_list("one.two") == ["one.two.three"]
++ assert self.resolver.get_modules_list("four") == ["four.five.six"]
++
++ def test_resolver_module_loader_failure(self):
++ """
++ Test Ansible module loader.
++ :return:
++ """
++ mod = "four.five.six"
++ with pytest.raises(ImportError) as import_error:
++ self.resolver.load_module(mod)
++
++ mod = "i.even.do.not.exist.at.all"
++ with pytest.raises(LoaderError) as loader_error:
++ self.resolver.load_module(mod)
++
++ def test_resolver_module_loader(self):
++ """
++ Test Ansible module loader.
++ :return:
++ """
++ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch(
++ "salt.modules.ansiblegate.importlib.import_module", lambda x: x
++ ):
++ assert (
++ self.resolver.load_module("four.five.six")
++ == "ansible.modules.four.five.six"
++ )
++
++ def test_resolver_module_loader_import_failure(self):
++ """
++ Test Ansible module loader failure.
++ :return:
++ """
++ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch(
++ "salt.modules.ansiblegate.importlib.import_module", lambda x: x
++ ):
++ with pytest.raises(LoaderError) as loader_error:
++ self.resolver.load_module("something.strange")
++
++ def test_virtual_function(self):
++ """
++ Test Ansible module __virtual__ when ansible is not installed on the minion.
++ :return:
++ """
++ with patch("salt.modules.ansiblegate.ansible", None):
++ assert ansible.__virtual__() == "ansible"
+
+ def test_ansible_module_call(self):
-+ '''
++ """
+ Test Ansible module call from ansible gate module
+
+ :return:
-+ '''
++ """
+
-+ class Module(object):
-+ '''
++ class Module:
++ """
+ An ansible module mock.
-+ '''
-+ __name__ = 'one.two.three'
-+ __file__ = 'foofile'
++ """
++
++ __name__ = "one.two.three"
++ __file__ = "foofile"
+
+ def main():
+ pass
+
+ ANSIBLE_MODULE_ARGS = '{"ANSIBLE_MODULE_ARGS": ["arg_1", {"kwarg1": "foobar"}]}'
+
-+ proc = MagicMock(side_effect=[
-+ MockTimedProc(
-+ stdout=ANSIBLE_MODULE_ARGS.encode(),
-+ stderr=None),
-+ MockTimedProc(stdout='{"completed": true}'.encode(), stderr=None)
-+ ])
++ proc = MagicMock(
++ side_effect=[
++ MockTimedProc(stdout=ANSIBLE_MODULE_ARGS.encode(), stderr=None),
++ MockTimedProc(stdout=b'{"completed": true}', stderr=None),
++ ]
++ )
+
-+ with patch.object(ansible, '_resolver', self.resolver), \
-+ patch.object(ansible._resolver, 'load_module', MagicMock(return_value=Module())):
++ with patch.object(ansible, "_resolver", self.resolver), patch.object(
++ ansible._resolver, "load_module", MagicMock(return_value=Module())
++ ):
+ _ansible_module_caller = ansible.AnsibleModuleCaller(ansible._resolver)
-+ with patch('salt.utils.timed_subprocess.TimedProc', proc):
-+ ret = _ansible_module_caller.call("one.two.three", "arg_1", kwarg1="foobar")
-+ if six.PY3:
-+ proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}'], stdout=-1, timeout=1200)
-+ proc.assert_any_call(['python3', 'foofile'], stdin=ANSIBLE_MODULE_ARGS, stdout=-1, timeout=1200)
-+ else:
-+ proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"_raw_params": "arg_1", "kwarg1": "foobar"}}'], stdout=-1, timeout=1200)
-+ proc.assert_any_call(['python', 'foofile'], stdin=ANSIBLE_MODULE_ARGS, stdout=-1, timeout=1200)
++ with patch("salt.utils.timed_subprocess.TimedProc", proc):
++ ret = _ansible_module_caller.call(
++ "one.two.three", "arg_1", kwarg1="foobar"
++ )
++ proc.assert_any_call(
++ [
++ "echo",
++ '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}',
++ ],
++ stdout=-1,
++ timeout=1200,
++ )
++ proc.assert_any_call(
++ ["python3", "foofile"],
++ stdin=ANSIBLE_MODULE_ARGS,
++ stdout=-1,
++ timeout=1200,
++ )
+ assert ret == {"completed": True, "timeout": 1200}
diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py
-index 8170a56b4e..f8fba59294 100644
+index 15b97f8568..f3348bc379 100644
--- a/tests/unit/modules/test_cmdmod.py
+++ b/tests/unit/modules/test_cmdmod.py
-@@ -26,6 +26,7 @@ from tests.support.helpers import TstSuiteLoggingHandler
- from tests.support.mock import (
- mock_open,
- Mock,
-+ MockTimedProc,
- MagicMock,
- patch
- )
-@@ -36,39 +37,7 @@ MOCK_SHELL_FILE = '# List of acceptable shells\n' \
- '/bin/bash\n'
+@@ -24,6 +24,7 @@ DEFAULT_SHELL = "foo/bar"
+ MOCK_SHELL_FILE = "# List of acceptable shells\n" "\n" "/bin/bash\n"
--class MockTimedProc(object):
-- '''
-- Class used as a stand-in for salt.utils.timed_subprocess.TimedProc
-- '''
-- class _Process(object):
-- '''
-- Used to provide a dummy "process" attribute
-- '''
-- def __init__(self, returncode=0, pid=12345):
-- self.returncode = returncode
-- self.pid = pid
--
-- def __init__(self, stdout=None, stderr=None, returncode=0, pid=12345):
-- if stdout is not None and not isinstance(stdout, bytes):
-- raise TypeError('Must pass stdout to MockTimedProc as bytes')
-- if stderr is not None and not isinstance(stderr, bytes):
-- raise TypeError('Must pass stderr to MockTimedProc as bytes')
-- self._stdout = stdout
-- self._stderr = stderr
-- self.process = self._Process(returncode=returncode, pid=pid)
--
-- def run(self):
-- pass
--
-- @property
-- def stdout(self):
-- return self._stdout
--
-- @property
-- def stderr(self):
-- return self._stderr
--
--
+@skipIf(NO_MOCK, NO_MOCK_REASON)
class CMDMODTestCase(TestCase, LoaderModuleMockMixin):
- '''
+ """
Unit tests for the salt.modules.cmdmod module
--
-2.16.4
+2.29.2
diff --git a/do-not-raise-streamclosederror-traceback-but-only-lo.patch b/do-not-raise-streamclosederror-traceback-but-only-lo.patch
index bf180aa..b9cba9d 100644
--- a/do-not-raise-streamclosederror-traceback-but-only-lo.patch
+++ b/do-not-raise-streamclosederror-traceback-but-only-lo.patch
@@ -1,4 +1,4 @@
-From b651c2cd8b719a72e66b63afd9061739624763e1 Mon Sep 17 00:00:00 2001
+From 81d0105b0c0464c375070ffbc863a020a67e7965 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 26 Aug 2020 10:24:58 +0100
@@ -10,18 +10,18 @@ Subject: [PATCH] Do not raise StreamClosedError traceback but only log
1 file changed, 1 deletion(-)
diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py
-index 33ee3d4182..624eca5a9c 100644
+index f411907da2..5ff0956dde 100644
--- a/salt/transport/ipc.py
+++ b/salt/transport/ipc.py
-@@ -667,7 +667,6 @@ class IPCMessageSubscriber(IPCClient):
+@@ -688,7 +688,6 @@ class IPCMessageSubscriber(IPCClient):
except StreamClosedError as exc:
- log.trace('Subscriber disconnected from IPC %s', self.socket_path)
+ log.trace("Subscriber disconnected from IPC %s", self.socket_path)
self._read_stream_future = None
- exc_to_raise = exc
except Exception as exc: # pylint: disable=broad-except
- log.error('Exception occurred in Subscriber while handling stream: %s', exc)
+ log.error("Exception occurred in Subscriber while handling stream: %s", exc)
self._read_stream_future = None
--
-2.28.0
+2.29.2
diff --git a/do-not-report-patches-as-installed-when-not-all-the-.patch b/do-not-report-patches-as-installed-when-not-all-the-.patch
deleted file mode 100644
index aaa0af0..0000000
--- a/do-not-report-patches-as-installed-when-not-all-the-.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 7e9adda8dfd53050756d0ac0cf64570b76ce7365 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
-
-Date: Wed, 13 Mar 2019 16:14:07 +0000
-Subject: [PATCH] Do not report patches as installed when not all the
- related packages are installed (bsc#1128061)
-
-Co-authored-by: Mihai Dinca
----
- salt/modules/yumpkg.py | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
-index b1257d0de0..3ddf989511 100644
---- a/salt/modules/yumpkg.py
-+++ b/salt/modules/yumpkg.py
-@@ -3220,7 +3220,11 @@ def _get_patches(installed_only=False):
- for line in salt.utils.itertools.split(ret, os.linesep):
- inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)',
- line).groups()
-+<<<<<<< HEAD
- if advisory_id not in patches:
-+=======
-+ if not advisory_id in patches:
-+>>>>>>> Do not report patches as installed when not all the related packages are installed (bsc#1128061)
- patches[advisory_id] = {
- 'installed': True if inst == 'i' else False,
- 'summary': [pkg]
---
-2.16.4
-
-
diff --git a/don-t-call-zypper-with-more-than-one-no-refresh.patch b/don-t-call-zypper-with-more-than-one-no-refresh.patch
index 0929110..819bd79 100644
--- a/don-t-call-zypper-with-more-than-one-no-refresh.patch
+++ b/don-t-call-zypper-with-more-than-one-no-refresh.patch
@@ -1,4 +1,4 @@
-From c1f5e6332bf025394b81868bf1edc6ae44944a7c Mon Sep 17 00:00:00 2001
+From 421988aea296ced1f8c63cfa4b517b25eedfb00c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?=
Date: Tue, 29 Jan 2019 09:44:03 +0100
Subject: [PATCH] Don't call zypper with more than one --no-refresh
@@ -11,32 +11,32 @@ passed twice. Make sure we won't hit this.
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 04a6a6872d..37428cf67c 100644
+index 6fa6e3e0a1..dfaaf420a1 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -282,7 +282,7 @@ class _Zypper(object):
+@@ -300,7 +300,7 @@ class _Zypper:
self.__called = True
if self.__xml:
- self.__cmd.append('--xmlout')
+ self.__cmd.append("--xmlout")
- if not self.__refresh:
-+ if not self.__refresh and '--no-refresh' not in args:
- self.__cmd.append('--no-refresh')
-
- self.__cmd.extend(args)
++ if not self.__refresh and "--no-refresh" not in args:
+ self.__cmd.append("--no-refresh")
+ if self.__root:
+ self.__cmd.extend(["--root", self.__root])
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index b3162f10cd..956902eab3 100644
+index 7bff7065c6..b07f9a3af7 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -135,7 +135,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
- self.assertEqual(zypper.__zypper__.call('foo'), stdout_xml_snippet)
+@@ -136,7 +136,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(zypper.__zypper__.call("foo"), stdout_xml_snippet)
self.assertEqual(len(sniffer.calls), 1)
-- zypper.__zypper__.call('bar')
-+ zypper.__zypper__.call('--no-refresh', 'bar')
+- zypper.__zypper__.call("bar")
++ zypper.__zypper__.call("--no-refresh", "bar")
self.assertEqual(len(sniffer.calls), 2)
- self.assertEqual(sniffer.calls[0]['args'][0], ['zypper', '--non-interactive', '--no-refresh', 'foo'])
- self.assertEqual(sniffer.calls[1]['args'][0], ['zypper', '--non-interactive', '--no-refresh', 'bar'])
+ self.assertEqual(
+ sniffer.calls[0]["args"][0],
--
-2.16.4
+2.29.2
diff --git a/drop-wrong-mock-from-chroot-unit-test.patch b/drop-wrong-mock-from-chroot-unit-test.patch
index 7f56f14..e9eb834 100644
--- a/drop-wrong-mock-from-chroot-unit-test.patch
+++ b/drop-wrong-mock-from-chroot-unit-test.patch
@@ -1,4 +1,4 @@
-From e2c3b1cb72b796fe12f94af64baa2e64cbe5db0b Mon Sep 17 00:00:00 2001
+From 3dc61b426cee5c40976ee25a0357fd07244a630b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Tue, 13 Oct 2020 12:02:00 +0100
@@ -9,11 +9,11 @@ Subject: [PATCH] Drop wrong mock from chroot unit test
1 file changed, 1 deletion(-)
diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py
-index 62808ed680..045d56c5b0 100644
+index 196e3ad27f..a0f3f8e6af 100644
--- a/tests/unit/modules/test_chroot.py
+++ b/tests/unit/modules/test_chroot.py
-@@ -83,7 +83,6 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
- self.assertTrue(chroot.create('/chroot'))
+@@ -71,7 +71,6 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertTrue(chroot.create("/chroot"))
makedirs.assert_called()
- @patch("salt.modules.chroot.exist")
@@ -21,6 +21,6 @@ index 62808ed680..045d56c5b0 100644
def test_in_chroot(self, fopen):
"""
--
-2.28.0
+2.29.2
diff --git a/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch b/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch
new file mode 100644
index 0000000..fd02a36
--- /dev/null
+++ b/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch
@@ -0,0 +1,99 @@
+From 79ae019ac7515614c6fbc620e66575f015bc447d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 5 Jan 2021 09:34:45 +0000
+Subject: [PATCH] Drop wrong virt capabilities code after rebasing
+ patches
+
+---
+ salt/modules/virt.py | 66 --------------------------------------------
+ 1 file changed, 66 deletions(-)
+
+diff --git a/salt/modules/virt.py b/salt/modules/virt.py
+index e3960a5a90..786bfa1e58 100644
+--- a/salt/modules/virt.py
++++ b/salt/modules/virt.py
+@@ -143,7 +143,6 @@ import salt.utils.xmlutil as xmlutil
+ import salt.utils.yaml
+ from salt._compat import ElementTree, ipaddress, saxutils
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
+-from salt.ext import six
+ from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
+ from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
+ from salt.utils.virt import check_remote, download_remote
+@@ -5416,71 +5415,6 @@ def _parse_domain_caps(caps):
+ return result
+
+
+-def _parse_domain_caps(caps):
+- """
+- Parse the XML document of domain capabilities into a structure.
+- """
+- result = {
+- "emulator": caps.find("path").text if caps.find("path") is not None else None,
+- "domain": caps.find("domain").text if caps.find("domain") is not None else None,
+- "machine": caps.find("machine").text
+- if caps.find("machine") is not None
+- else None,
+- "arch": caps.find("arch").text if caps.find("arch") is not None else None,
+- }
+-
+-
+-def all_capabilities(**kwargs):
+- """
+- Return the host and domain capabilities in a single call.
+-
+- .. versionadded:: 3001
+-
+- :param connection: libvirt connection URI, overriding defaults
+- :param username: username to connect with, overriding defaults
+- :param password: password to connect with, overriding defaults
+-
+- CLI Example:
+-
+- .. code-block:: bash
+-
+- salt '*' virt.all_capabilities
+-
+- """
+- conn = __get_conn(**kwargs)
+- try:
+- host_caps = ElementTree.fromstring(conn.getCapabilities())
+- domains = [
+- [
+- (guest.get("arch", {}).get("name", None), key)
+- for key in guest.get("arch", {}).get("domains", {}).keys()
+- ]
+- for guest in [
+- _parse_caps_guest(guest) for guest in host_caps.findall("guest")
+- ]
+- ]
+- flattened = [pair for item in (x for x in domains) for pair in item]
+- result = {
+- "host": {
+- "host": _parse_caps_host(host_caps.find("host")),
+- "guests": [
+- _parse_caps_guest(guest) for guest in host_caps.findall("guest")
+- ],
+- },
+- "domains": [
+- _parse_domain_caps(
+- ElementTree.fromstring(
+- conn.getDomainCapabilities(None, arch, None, domain)
+- )
+- )
+- for (arch, domain) in flattened
+- ],
+- }
+- return result
+- finally:
+- conn.close()
+-
+-
+ def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
+ """
+ Return the domain capabilities given an emulator, architecture, machine or virtualization type.
+--
+2.29.2
+
+
diff --git a/early-feature-support-config.patch b/early-feature-support-config.patch
index db7592a..297c074 100644
--- a/early-feature-support-config.patch
+++ b/early-feature-support-config.patch
@@ -1,4 +1,4 @@
-From 33a85b16a4740f3dd803fd0e47e26819afeecdd7 Mon Sep 17 00:00:00 2001
+From 550db5157741b0a252bfc684f3496a7fd6d674ad Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Tue, 10 Jul 2018 12:06:33 +0200
Subject: [PATCH] early feature: support-config
@@ -291,12 +291,12 @@ Remove development stub. Ughh...
Removed blacklist of pkg_resources
---
- salt/cli/support/__init__.py | 65 +++
- salt/cli/support/collector.py | 495 ++++++++++++++++++++++
- salt/cli/support/console.py | 165 ++++++++
- salt/cli/support/intfunc.py | 42 ++
- salt/cli/support/localrunner.py | 34 ++
- salt/cli/support/profiles/default.yml | 71 ++++
+ salt/cli/support/__init__.py | 76 +++
+ salt/cli/support/collector.py | 538 +++++++++++++++++++++
+ salt/cli/support/console.py | 184 +++++++
+ salt/cli/support/intfunc.py | 40 ++
+ salt/cli/support/localrunner.py | 33 ++
+ salt/cli/support/profiles/default.yml | 71 +++
salt/cli/support/profiles/jobs-active.yml | 3 +
salt/cli/support/profiles/jobs-last.yml | 3 +
salt/cli/support/profiles/jobs-trace.yml | 7 +
@@ -304,12 +304,12 @@ Removed blacklist of pkg_resources
salt/cli/support/profiles/postgres.yml | 11 +
salt/cli/support/profiles/salt.yml | 9 +
salt/cli/support/profiles/users.yml | 22 +
- salt/scripts.py | 14 +
- salt/utils/parsers.py | 65 +++
+ salt/scripts.py | 15 +
+ salt/utils/parsers.py | 114 +++++
scripts/salt-support | 11 +
setup.py | 2 +
- tests/unit/cli/test_support.py | 477 +++++++++++++++++++++
- 18 files changed, 1523 insertions(+)
+ tests/unit/cli/test_support.py | 553 ++++++++++++++++++++++
+ 18 files changed, 1719 insertions(+)
create mode 100644 salt/cli/support/__init__.py
create mode 100644 salt/cli/support/collector.py
create mode 100644 salt/cli/support/console.py
@@ -328,168 +328,172 @@ Removed blacklist of pkg_resources
diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py
new file mode 100644
-index 0000000000000000000000000000000000000000..6a98a2d65656c0ad89d921b6842067a7399eab2c
+index 0000000000..4fdf44186f
--- /dev/null
+++ b/salt/cli/support/__init__.py
-@@ -0,0 +1,65 @@
-+# coding=utf-8
-+'''
+@@ -0,0 +1,76 @@
++"""
+Get default scenario of the support.
-+'''
-+from __future__ import print_function, unicode_literals, absolute_import
-+import yaml
-+import os
-+import salt.exceptions
-+import jinja2
++"""
+import logging
++import os
++
++import jinja2
++import salt.exceptions
++import yaml
+
+log = logging.getLogger(__name__)
+
+
+def _render_profile(path, caller, runner):
-+ '''
++ """
+ Render profile as Jinja2.
+ :param path:
+ :return:
-+ '''
-+ env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False)
-+ return env.get_template(os.path.basename(path)).render(salt=caller, runners=runner).strip()
++ """
++ env = jinja2.Environment(
++ loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False
++ )
++ return (
++ env.get_template(os.path.basename(path))
++ .render(salt=caller, runners=runner)
++ .strip()
++ )
+
+
+def get_profile(profile, caller, runner):
-+ '''
++ """
+ Get profile.
+
+ :param profile:
+ :return:
-+ '''
-+ profiles = profile.split(',')
++ """
++ profiles = profile.split(",")
+ data = {}
+ for profile in profiles:
+ if os.path.basename(profile) == profile:
-+ profile = profile.split('.')[0] # Trim extension if someone added it
-+ profile_path = os.path.join(os.path.dirname(__file__), 'profiles', profile + '.yml')
++ profile = profile.split(".")[0] # Trim extension if someone added it
++ profile_path = os.path.join(
++ os.path.dirname(__file__), "profiles", profile + ".yml"
++ )
+ else:
+ profile_path = profile
+ if os.path.exists(profile_path):
+ try:
+ rendered_template = _render_profile(profile_path, caller, runner)
-+ log.trace('\n{d}\n{t}\n{d}\n'.format(d='-' * 80, t=rendered_template))
++ log.trace("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template))
+ data.update(yaml.load(rendered_template))
+ except Exception as ex:
+ log.debug(ex, exc_info=True)
-+ raise salt.exceptions.SaltException('Rendering profile failed: {}'.format(ex))
++ raise salt.exceptions.SaltException(
++ "Rendering profile failed: {}".format(ex)
++ )
+ else:
-+ raise salt.exceptions.SaltException('Profile "{}" is not found.'.format(profile))
++ raise salt.exceptions.SaltException(
++ 'Profile "{}" is not found.'.format(profile)
++ )
+
+ return data
+
+
+def get_profiles(config):
-+ '''
++ """
+ Get available profiles.
+
+ :return:
-+ '''
++ """
+ profiles = []
-+ for profile_name in os.listdir(os.path.join(os.path.dirname(__file__), 'profiles')):
-+ if profile_name.endswith('.yml'):
-+ profiles.append(profile_name.split('.')[0])
++ for profile_name in os.listdir(os.path.join(os.path.dirname(__file__), "profiles")):
++ if profile_name.endswith(".yml"):
++ profiles.append(profile_name.split(".")[0])
+
+ return sorted(profiles)
diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py
new file mode 100644
-index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc45441ff259fb
+index 0000000000..bfbf491f5b
--- /dev/null
+++ b/salt/cli/support/collector.py
-@@ -0,0 +1,495 @@
-+# coding=utf-8
-+from __future__ import absolute_import, print_function, unicode_literals
-+import os
-+import sys
+@@ -0,0 +1,538 @@
++import builtins as exceptions
+import copy
-+import yaml
+import json
+import logging
++import os
++import sys
+import tarfile
+import time
-+import salt.ext.six as six
-+
-+if six.PY2:
-+ import exceptions
-+else:
-+ import builtins as exceptions
-+ from io import IOBase as file
-+
+from io import BytesIO
++from io import IOBase as file
+
-+import salt.utils.stringutils
-+import salt.utils.parsers
-+import salt.utils.verify
-+import salt.utils.platform
-+import salt.utils.process
-+import salt.exceptions
-+import salt.defaults.exitcodes
+import salt.cli.caller
+import salt.cli.support
+import salt.cli.support.console
+import salt.cli.support.intfunc
+import salt.cli.support.localrunner
++import salt.defaults.exitcodes
++import salt.exceptions
++import salt.ext.six as six
+import salt.output.table_out
+import salt.runner
+import salt.utils.files
-+
++import salt.utils.parsers
++import salt.utils.platform
++import salt.utils.process
++import salt.utils.stringutils
++import salt.utils.verify
++import yaml
+
+salt.output.table_out.__opts__ = {}
+log = logging.getLogger(__name__)
+
+
-+class SupportDataCollector(object):
-+ '''
++class SupportDataCollector:
++ """
+ Data collector. It behaves just like another outputter,
+ except it grabs the data to the archive files.
-+ '''
++ """
++
+ def __init__(self, name, output):
-+ '''
++ """
+ constructor of the data collector
+ :param name:
+ :param path:
+ :param format:
-+ '''
++ """
+ self.archive_path = name
+ self.__default_outputter = output
+ self.__format = format
+ self.__arch = None
+ self.__current_section = None
+ self.__current_section_name = None
-+ self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
++ self.__default_root = time.strftime("%Y.%m.%d-%H.%M.%S-snapshot")
+ self.out = salt.cli.support.console.MessagesOutput()
+
+ def open(self):
-+ '''
++ """
+ Opens archive.
+ :return:
-+ '''
++ """
+ if self.__arch is not None:
-+ raise salt.exceptions.SaltException('Archive already opened.')
-+ self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
++ raise salt.exceptions.SaltException("Archive already opened.")
++ self.__arch = tarfile.TarFile.bz2open(self.archive_path, "w")
+
+ def close(self):
-+ '''
++ """
+ Closes the archive.
+ :return:
-+ '''
++ """
+ if self.__arch is None:
-+ raise salt.exceptions.SaltException('Archive already closed')
++ raise salt.exceptions.SaltException("Archive already closed")
+ self._flush_content()
+ self.__arch.close()
+ self.__arch = None
+
+ def _flush_content(self):
-+ '''
++ """
+ Flush content to the archive
+ :return:
-+ '''
++ """
+ if self.__current_section is not None:
+ buff = BytesIO()
+ buff._dirty = False
@@ -499,51 +503,59 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544
+ self.out.put(ret_data.name, indent=4)
+ self.__arch.add(ret_data.name, arcname=ret_data.name)
+ else:
-+ buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
-+ buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
++ buff.write(salt.utils.stringutils.to_bytes(title + "\n"))
++ buff.write(
++ salt.utils.stringutils.to_bytes(("-" * len(title)) + "\n\n")
++ )
+ buff.write(salt.utils.stringutils.to_bytes(ret_data))
-+ buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
++ buff.write(salt.utils.stringutils.to_bytes("\n\n\n"))
+ buff._dirty = True
+ if buff._dirty:
+ buff.seek(0)
-+ tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
-+ if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
++ tar_info = tarfile.TarInfo(
++ name="{}/{}".format(
++ self.__default_root, self.__current_section_name
++ )
++ )
++ if not hasattr(buff, "getbuffer"): # Py2's BytesIO is older
+ buff.getbuffer = buff.getvalue
+ tar_info.size = len(buff.getbuffer())
+ self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
+
+ def add(self, name):
-+ '''
++ """
+ Start a new section.
+ :param name:
+ :return:
-+ '''
++ """
+ if self.__current_section:
+ self._flush_content()
+ self.discard_current(name)
+
+ def discard_current(self, name=None):
-+ '''
++ """
+ Discard current section
+ :return:
-+ '''
++ """
+ self.__current_section = []
+ self.__current_section_name = name
+
+ def write(self, title, data, output=None):
-+ '''
++ """
+ Add a data to the current opened section.
+ :return:
-+ '''
++ """
+ if not isinstance(data, (dict, list, tuple)):
-+ data = {'raw-content': str(data)}
++ data = {"raw-content": str(data)}
+ output = output or self.__default_outputter
+
-+ if output != 'null':
++ if output != "null":
+ try:
-+ if isinstance(data, dict) and 'return' in data:
-+ data = data['return']
-+ content = salt.output.try_printout(data, output, {'extension_modules': '', 'color': False})
++ if isinstance(data, dict) and "return" in data:
++ data = data["return"]
++ content = salt.output.try_printout(
++ data, output, {"extension_modules": "", "color": False}
++ )
+ except Exception: # Fall-back to just raw YAML
+ content = None
+ else:
@@ -551,20 +563,20 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544
+
+ if content is None:
+ data = json.loads(json.dumps(data))
-+ if isinstance(data, dict) and data.get('return'):
-+ data = data.get('return')
++ if isinstance(data, dict) and data.get("return"):
++ data = data.get("return")
+ content = yaml.safe_dump(data, default_flow_style=False, indent=4)
+
+ self.__current_section.append({title: content})
+
+ def link(self, title, path):
-+ '''
++ """
+ Add a static file on the file system.
+
+ :param title:
+ :param path:
+ :return:
-+ '''
++ """
+ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
+ # pylint: disable=W8470
+ if not isinstance(path, file):
@@ -574,188 +586,204 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544
+
+
+class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
-+ '''
++ """
+ Class to run Salt Support subsystem.
-+ '''
-+ RUNNER_TYPE = 'run'
-+ CALL_TYPE = 'call'
++ """
++
++ RUNNER_TYPE = "run"
++ CALL_TYPE = "call"
+
+ def _setup_fun_config(self, fun_conf):
-+ '''
++ """
+ Setup function configuration.
+
+ :param conf:
+ :return:
-+ '''
++ """
+ conf = copy.deepcopy(self.config)
-+ conf['file_client'] = 'local'
-+ conf['fun'] = ''
-+ conf['arg'] = []
-+ conf['kwarg'] = {}
-+ conf['cache_jobs'] = False
-+ conf['print_metadata'] = False
++ conf["file_client"] = "local"
++ conf["fun"] = ""
++ conf["arg"] = []
++ conf["kwarg"] = {}
++ conf["cache_jobs"] = False
++ conf["print_metadata"] = False
+ conf.update(fun_conf)
-+ conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
++ conf["fun"] = conf["fun"].split(":")[-1] # Discard typing prefix
+
+ return conf
+
+ def _get_runner(self, conf):
-+ '''
++ """
+ Get & setup runner.
+
+ :param conf:
+ :return:
-+ '''
++ """
+ conf = self._setup_fun_config(copy.deepcopy(conf))
-+ if not getattr(self, '_runner', None):
++ if not getattr(self, "_runner", None):
+ self._runner = salt.cli.support.localrunner.LocalRunner(conf)
+ else:
+ self._runner.opts = conf
+ return self._runner
+
+ def _get_caller(self, conf):
-+ '''
++ """
+ Get & setup caller from the factory.
+
+ :param conf:
+ :return:
-+ '''
++ """
+ conf = self._setup_fun_config(copy.deepcopy(conf))
-+ if not getattr(self, '_caller', None):
++ if not getattr(self, "_caller", None):
+ self._caller = salt.cli.caller.Caller.factory(conf)
+ else:
+ self._caller.opts = conf
+ return self._caller
+
+ def _local_call(self, call_conf):
-+ '''
++ """
+ Execute local call
-+ '''
++ """
+ try:
+ ret = self._get_caller(call_conf).call()
+ except SystemExit:
-+ ret = 'Data is not available at this moment'
++ ret = "Data is not available at this moment"
+ self.out.error(ret)
+ except Exception as ex:
-+ ret = 'Unhandled exception occurred: {}'.format(ex)
++ ret = "Unhandled exception occurred: {}".format(ex)
+ log.debug(ex, exc_info=True)
+ self.out.error(ret)
+
+ return ret
+
+ def _local_run(self, run_conf):
-+ '''
++ """
+ Execute local runner
+
+ :param run_conf:
+ :return:
-+ '''
++ """
+ try:
+ ret = self._get_runner(run_conf).run()
+ except SystemExit:
-+ ret = 'Runner is not available at this moment'
++ ret = "Runner is not available at this moment"
+ self.out.error(ret)
+ except Exception as ex:
-+ ret = 'Unhandled exception occurred: {}'.format(ex)
++ ret = "Unhandled exception occurred: {}".format(ex)
+ log.debug(ex, exc_info=True)
+
+ return ret
+
+ def _internal_function_call(self, call_conf):
-+ '''
++ """
+ Call internal function.
+
+ :param call_conf:
+ :return:
-+ '''
++ """
++
+ def stub(*args, **kwargs):
-+ message = 'Function {} is not available'.format(call_conf['fun'])
++ message = "Function {} is not available".format(call_conf["fun"])
+ self.out.error(message)
-+ log.debug('Attempt to run "{fun}" with {arg} arguments and {kwargs} parameters.'.format(**call_conf))
++ log.debug(
++ 'Attempt to run "{fun}" with {arg} arguments and {kwargs} parameters.'.format(
++ **call_conf
++ )
++ )
+ return message
+
-+ return getattr(salt.cli.support.intfunc,
-+ call_conf['fun'], stub)(self.collector,
-+ *call_conf['arg'],
-+ **call_conf['kwargs'])
++ return getattr(salt.cli.support.intfunc, call_conf["fun"], stub)(
++ self.collector, *call_conf["arg"], **call_conf["kwargs"]
++ )
+
+ def _get_action(self, action_meta):
-+ '''
++ """
+ Parse action and turn into a calling point.
+ :param action_meta:
+ :return:
-+ '''
++ """
+ conf = {
-+ 'fun': list(action_meta.keys())[0],
-+ 'arg': [],
-+ 'kwargs': {},
++ "fun": list(action_meta.keys())[0],
++ "arg": [],
++ "kwargs": {},
+ }
-+ if not len(conf['fun'].split('.')) - 1:
-+ conf['salt.int.intfunc'] = True
++ if not len(conf["fun"].split(".")) - 1:
++ conf["salt.int.intfunc"] = True
+
-+ action_meta = action_meta[conf['fun']]
-+ info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
-+ for arg in action_meta.get('args') or []:
++ action_meta = action_meta[conf["fun"]]
++ info = action_meta.get("info", "Action for {}".format(conf["fun"]))
++ for arg in action_meta.get("args") or []:
+ if not isinstance(arg, dict):
-+ conf['arg'].append(arg)
++ conf["arg"].append(arg)
+ else:
-+ conf['kwargs'].update(arg)
++ conf["kwargs"].update(arg)
+
-+ return info, action_meta.get('output'), conf
++ return info, action_meta.get("output"), conf
+
+ def collect_internal_data(self):
-+ '''
++ """
+ Dumps current running pillars, configuration etc.
+ :return:
-+ '''
-+ section = 'configuration'
++ """
++ section = "configuration"
+ self.out.put(section)
+ self.collector.add(section)
-+ self.out.put('Saving config', indent=2)
-+ self.collector.write('General Configuration', self.config)
-+ self.out.put('Saving pillars', indent=2)
-+ self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
++ self.out.put("Saving config", indent=2)
++ self.collector.write("General Configuration", self.config)
++ self.out.put("Saving pillars", indent=2)
++ self.collector.write(
++ "Active Pillars", self._local_call({"fun": "pillar.items"})
++ )
+
-+ section = 'highstate'
++ section = "highstate"
+ self.out.put(section)
+ self.collector.add(section)
-+ self.out.put('Saving highstate', indent=2)
-+ self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
++ self.out.put("Saving highstate", indent=2)
++ self.collector.write(
++ "Rendered highstate", self._local_call({"fun": "state.show_highstate"})
++ )
+
+ def _extract_return(self, data):
-+ '''
++ """
+ Extracts return data from the results.
+
+ :param data:
+ :return:
-+ '''
++ """
+ if isinstance(data, dict):
-+ data = data.get('return', data)
++ data = data.get("return", data)
+
+ return data
+
+ def collect_local_data(self):
-+ '''
++ """
+ Collects master system data.
+ :return:
-+ '''
++ """
++
+ def call(func, *args, **kwargs):
-+ '''
++ """
+ Call wrapper for templates
+ :param func:
+ :return:
-+ '''
-+ return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
++ """
++ return self._extract_return(
++ self._local_call({"fun": func, "arg": args, "kwarg": kwargs})
++ )
+
+ def run(func, *args, **kwargs):
-+ '''
++ """
+ Runner wrapper for templates
+ :param func:
+ :return:
-+ '''
-+ return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
++ """
++ return self._extract_return(
++ self._local_run({"fun": func, "arg": args, "kwarg": kwargs})
++ )
+
-+ scenario = salt.cli.support.get_profile(self.config['support_profile'], call, run)
++ scenario = salt.cli.support.get_profile(
++ self.config["support_profile"], call, run
++ )
+ for category_name in scenario:
+ self.out.put(category_name)
+ self.collector.add(category_name)
@@ -763,70 +791,89 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544
+ if not action:
+ continue
+ action_name = next(iter(action))
-+ if not isinstance(action[action_name], six.string_types):
++ if not isinstance(action[action_name], str):
+ info, output, conf = self._get_action(action)
-+ action_type = self._get_action_type(action) # run: for runners
++ action_type = self._get_action_type(
++ action
++ ) # run: for runners
+ if action_type == self.RUNNER_TYPE:
-+ self.out.put('Running {}'.format(info.lower()), indent=2)
++ self.out.put("Running {}".format(info.lower()), indent=2)
+ self.collector.write(info, self._local_run(conf), output=output)
+ elif action_type == self.CALL_TYPE:
-+ if not conf.get('salt.int.intfunc'):
-+ self.out.put('Collecting {}'.format(info.lower()), indent=2)
-+ self.collector.write(info, self._local_call(conf), output=output)
++ if not conf.get("salt.int.intfunc"):
++ self.out.put("Collecting {}".format(info.lower()), indent=2)
++ self.collector.write(
++ info, self._local_call(conf), output=output
++ )
+ else:
+ self.collector.discard_current()
+ self._internal_function_call(conf)
+ else:
-+ self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
++ self.out.error(
++ 'Unknown action type "{}" for action: {}'.format(
++ action_type, action
++ )
++ )
+ else:
+ # TODO: This needs to be moved then to the utils.
+ # But the code is not yet there (other PRs)
-+ self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
++ self.out.msg(
++ "\n".join(salt.cli.support.console.wrap(action[action_name])),
++ ident=2,
++ )
+
+ def _get_action_type(self, action):
-+ '''
++ """
+ Get action type.
+ :param action:
+ :return:
-+ '''
-+ action_name = next(iter(action or {'': None}))
-+ if ':' not in action_name:
-+ action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
++ """
++ action_name = next(iter(action or {"": None}))
++ if ":" not in action_name:
++ action_name = "{}:{}".format(self.CALL_TYPE, action_name)
+
-+ return action_name.split(':')[0] or None
++ return action_name.split(":")[0] or None
+
+ def collect_targets_data(self):
-+ '''
++ """
+ Collects minion targets data
+ :return:
-+ '''
++ """
+ # TODO: remote collector?
+
+ def _cleanup(self):
-+ '''
++ """
+ Cleanup if crash/exception
+ :return:
-+ '''
-+ if (hasattr(self, 'config')
-+ and self.config.get('support_archive')
-+ and os.path.exists(self.config['support_archive'])):
-+ self.out.warning('Terminated earlier, cleaning up')
-+ os.unlink(self.config['support_archive'])
++ """
++ if (
++ hasattr(self, "config")
++ and self.config.get("support_archive")
++ and os.path.exists(self.config["support_archive"])
++ ):
++ self.out.warning("Terminated earlier, cleaning up")
++ os.unlink(self.config["support_archive"])
+
+ def _check_existing_archive(self):
-+ '''
++ """
+ Check if archive exists or not. If exists and --force was not specified,
+ bail out. Otherwise remove it and move on.
+
+ :return:
-+ '''
-+ if os.path.exists(self.config['support_archive']):
-+ if self.config['support_archive_force_overwrite']:
-+ self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
-+ os.unlink(self.config['support_archive'])
++ """
++ if os.path.exists(self.config["support_archive"]):
++ if self.config["support_archive_force_overwrite"]:
++ self.out.warning(
++ "Overwriting existing archive: {}".format(
++ self.config["support_archive"]
++ )
++ )
++ os.unlink(self.config["support_archive"])
+ ret = True
+ else:
-+ self.out.warning('File {} already exists.'.format(self.config['support_archive']))
++ self.out.warning(
++ "File {} already exists.".format(self.config["support_archive"])
++ )
+ ret = False
+ else:
+ ret = True
@@ -848,32 +895,36 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544
+ exit_code = salt.defaults.exitcodes.EX_GENERIC
+ self.out.error(ex)
+ else:
-+ if self.config['log_level'] not in ('quiet', ):
++ if self.config["log_level"] not in ("quiet",):
+ self.setup_logfile_logger()
+ salt.utils.verify.verify_log(self.config)
+ salt.cli.support.log = log # Pass update logger so trace is available
+
-+ if self.config['support_profile_list']:
-+ self.out.put('List of available profiles:')
-+ for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
-+ msg_template = ' {}. '.format(idx + 1) + '{}'
++ if self.config["support_profile_list"]:
++ self.out.put("List of available profiles:")
++ for idx, profile in enumerate(
++ salt.cli.support.get_profiles(self.config)
++ ):
++ msg_template = " {}. ".format(idx + 1) + "{}"
+ self.out.highlight(msg_template, profile)
+ exit_code = salt.defaults.exitcodes.EX_OK
-+ elif self.config['support_show_units']:
-+ self.out.put('List of available units:')
++ elif self.config["support_show_units"]:
++ self.out.put("List of available units:")
+ for idx, unit in enumerate(self.find_existing_configs(None)):
-+ msg_template = ' {}. '.format(idx + 1) + '{}'
++ msg_template = " {}. ".format(idx + 1) + "{}"
+ self.out.highlight(msg_template, unit)
+ exit_code = salt.defaults.exitcodes.EX_OK
+ else:
-+ if not self.config['support_profile']:
++ if not self.config["support_profile"]:
+ self.print_help()
+ raise SystemExit()
+
+ if self._check_existing_archive():
+ try:
-+ self.collector = SupportDataCollector(self.config['support_archive'],
-+ output=self.config['support_output_format'])
++ self.collector = SupportDataCollector(
++ self.config["support_archive"],
++ output=self.config["support_output_format"],
++ )
+ except Exception as ex:
+ self.out.error(ex)
+ exit_code = salt.defaults.exitcodes.EX_GENERIC
@@ -887,8 +938,11 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544
+ self.collector.close()
+
+ archive_path = self.collector.archive_path
-+ self.out.highlight('\nSupport data has been written to "{}" file.\n',
-+ archive_path, _main='YELLOW')
++ self.out.highlight(
++ '\nSupport data has been written to "{}" file.\n',
++ archive_path,
++ _main="YELLOW",
++ )
+ except Exception as ex:
+ self.out.error(ex)
+ log.debug(ex, exc_info=True)
@@ -900,61 +954,64 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544
+ sys.exit(exit_code)
diff --git a/salt/cli/support/console.py b/salt/cli/support/console.py
new file mode 100644
-index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7bd3720a9e
+index 0000000000..266b645479
--- /dev/null
+++ b/salt/cli/support/console.py
-@@ -0,0 +1,165 @@
-+# coding=utf-8
-+'''
+@@ -0,0 +1,184 @@
++"""
+Collection of tools to report messages to console.
+
+NOTE: This is subject to incorporate other formatting bits
+ from all around everywhere and then to be moved to utils.
-+'''
++"""
+
-+from __future__ import absolute_import, print_function, unicode_literals
+
-+import sys
+import os
-+import salt.utils.color
++import sys
+import textwrap
+
++import salt.utils.color
+
-+class IndentOutput(object):
-+ '''
++
++class IndentOutput:
++ """
+ Paint different indends in different output.
-+ '''
++ """
++
+ def __init__(self, conf=None, device=sys.stdout):
+ if conf is None:
-+ conf = {0: 'CYAN', 2: 'GREEN', 4: 'LIGHT_BLUE', 6: 'BLUE'}
++ conf = {0: "CYAN", 2: "GREEN", 4: "LIGHT_BLUE", 6: "BLUE"}
+ self._colors_conf = conf
+ self._device = device
+ self._colors = salt.utils.color.get_colors()
-+ self._default_color = 'GREEN'
-+ self._default_hl_color = 'LIGHT_GREEN'
++ self._default_color = "GREEN"
++ self._default_hl_color = "LIGHT_GREEN"
+
+ def put(self, message, indent=0):
-+ '''
++ """
+ Print message with an indent.
+
+ :param message:
+ :param indent:
+ :return:
-+ '''
-+ color = self._colors_conf.get(indent + indent % 2, self._colors_conf.get(0, self._default_color))
++ """
++ color = self._colors_conf.get(
++ indent + indent % 2, self._colors_conf.get(0, self._default_color)
++ )
+
-+ for chunk in [' ' * indent, self._colors[color], message, self._colors['ENDC']]:
++ for chunk in [" " * indent, self._colors[color], message, self._colors["ENDC"]]:
+ self._device.write(str(chunk))
+ self._device.write(os.linesep)
+ self._device.flush()
+
+
+class MessagesOutput(IndentOutput):
-+ '''
++ """
+ Messages output to the CLI.
-+ '''
-+ def msg(self, message, title=None, title_color=None, color='BLUE', ident=0):
-+ '''
++ """
++
++ def msg(self, message, title=None, title_color=None, color="BLUE", ident=0):
++ """
+ Hint message.
+
+ :param message:
@@ -963,7 +1020,7 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b
+ :param color:
+ :param ident:
+ :return:
-+ '''
++ """
+ if title and not title_color:
+ title_color = color
+ if title_color and not title:
@@ -972,49 +1029,55 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b
+ self.__colored_output(title, message, title_color, color, ident=ident)
+
+ def info(self, message, ident=0):
-+ '''
++ """
+ Write an info message to the CLI.
+
+ :param message:
+ :param ident:
+ :return:
-+ '''
-+ self.__colored_output('Info', message, 'GREEN', 'LIGHT_GREEN', ident=ident)
++ """
++ self.__colored_output("Info", message, "GREEN", "LIGHT_GREEN", ident=ident)
+
+ def warning(self, message, ident=0):
-+ '''
++ """
+ Write a warning message to the CLI.
+
+ :param message:
+ :param ident:
+ :return:
-+ '''
-+ self.__colored_output('Warning', message, 'YELLOW', 'LIGHT_YELLOW', ident=ident)
++ """
++ self.__colored_output("Warning", message, "YELLOW", "LIGHT_YELLOW", ident=ident)
+
+ def error(self, message, ident=0):
-+ '''
++ """
+ Write an error message to the CLI.
+
+ :param message:
+ :param ident
+ :return:
-+ '''
-+ self.__colored_output('Error', message, 'RED', 'LIGHT_RED', ident=ident)
++ """
++ self.__colored_output("Error", message, "RED", "LIGHT_RED", ident=ident)
+
+ def __colored_output(self, title, message, title_color, message_color, ident=0):
-+ if title and not title.endswith(':'):
++ if title and not title.endswith(":"):
+ _linesep = title.endswith(os.linesep)
-+ title = '{}:{}'.format(title.strip(), _linesep and os.linesep or ' ')
++ title = "{}:{}".format(title.strip(), _linesep and os.linesep or " ")
+
-+ for chunk in [title_color and self._colors[title_color] or None, ' ' * ident,
-+ title, self._colors[message_color], message, self._colors['ENDC']]:
++ for chunk in [
++ title_color and self._colors[title_color] or None,
++ " " * ident,
++ title,
++ self._colors[message_color],
++ message,
++ self._colors["ENDC"],
++ ]:
+ if chunk:
+ self._device.write(str(chunk))
+ self._device.write(os.linesep)
+ self._device.flush()
+
+ def highlight(self, message, *values, **colors):
-+ '''
++ """
+ Highlighter works the way that message parameter is a template,
+ the "values" is a list of arguments going one after another as values there.
+ And so the "colors" should designate either highlight color or alternate for each.
@@ -1038,31 +1101,41 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b
+ :param formatted:
+ :param colors:
+ :return:
-+ '''
++ """
+
-+ m_color = colors.get('_main', self._default_color)
-+ h_color = colors.get('_highlight', self._default_hl_color)
++ m_color = colors.get("_main", self._default_color)
++ h_color = colors.get("_highlight", self._default_hl_color)
+
+ _values = []
+ for value in values:
-+ _values.append('{p}{c}{r}'.format(p=self._colors[colors.get(value, h_color)],
-+ c=value, r=self._colors[m_color]))
-+ self._device.write('{s}{m}{e}'.format(s=self._colors[m_color],
-+ m=message.format(*_values), e=self._colors['ENDC']))
++ _values.append(
++ "{p}{c}{r}".format(
++ p=self._colors[colors.get(value, h_color)],
++ c=value,
++ r=self._colors[m_color],
++ )
++ )
++ self._device.write(
++ "{s}{m}{e}".format(
++ s=self._colors[m_color],
++ m=message.format(*_values),
++ e=self._colors["ENDC"],
++ )
++ )
+ self._device.write(os.linesep)
+ self._device.flush()
+
+
+def wrap(txt, width=80, ident=0):
-+ '''
++ """
+ Wrap text to the required dimensions and clean it up, prepare for display.
+
+ :param txt:
+ :param width:
+ :return:
-+ '''
-+ ident = ' ' * ident
-+ txt = (txt or '').replace(os.linesep, ' ').strip()
++ """
++ ident = " " * ident
++ txt = (txt or "").replace(os.linesep, " ").strip()
+
+ wrapper = textwrap.TextWrapper()
+ wrapper.fix_sentence_endings = False
@@ -1071,35 +1144,33 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b
+ return wrapper.wrap(txt)
diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py
new file mode 100644
-index 0000000000000000000000000000000000000000..2727cd6394c364e35c99403b75e7fd23856187c6
+index 0000000000..d3d8f83cb8
--- /dev/null
+++ b/salt/cli/support/intfunc.py
-@@ -0,0 +1,42 @@
-+# coding=utf-8
-+'''
+@@ -0,0 +1,40 @@
++"""
+Internal functions.
-+'''
++"""
+# Maybe this needs to be a modules in a future?
+
-+from __future__ import absolute_import, print_function, unicode_literals
+import os
-+from salt.cli.support.console import MessagesOutput
-+import salt.utils.files
+
++import salt.utils.files
++from salt.cli.support.console import MessagesOutput
+
+out = MessagesOutput()
+
+
+def filetree(collector, path):
-+ '''
++ """
+ Add all files in the tree. If the "path" is a file,
+ only that file will be added.
+
+ :param path: File or directory
+ :return:
-+ '''
++ """
+ if not path:
-+ out.error('Path not defined', ident=2)
++ out.error("Path not defined", ident=2)
+ else:
+ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
+ # pylint: disable=W8470
@@ -1107,7 +1178,7 @@ index 0000000000000000000000000000000000000000..2727cd6394c364e35c99403b75e7fd23
+ filename = os.path.basename(path)
+ try:
+ file_ref = salt.utils.files.fopen(path) # pylint: disable=W
-+ out.put('Add {}'.format(filename), indent=2)
++ out.put("Add {}".format(filename), indent=2)
+ collector.add(filename)
+ collector.link(title=path, path=file_ref)
+ except Exception as err:
@@ -1119,47 +1190,46 @@ index 0000000000000000000000000000000000000000..2727cd6394c364e35c99403b75e7fd23
+ filetree(collector, fname)
diff --git a/salt/cli/support/localrunner.py b/salt/cli/support/localrunner.py
new file mode 100644
-index 0000000000000000000000000000000000000000..26deb883bccc98079201d26deba01008cca72921
+index 0000000000..ad10eda0b0
--- /dev/null
+++ b/salt/cli/support/localrunner.py
-@@ -0,0 +1,34 @@
-+# coding=utf-8
-+'''
+@@ -0,0 +1,33 @@
++"""
+Local Runner
-+'''
++"""
++
++import logging
+
-+from __future__ import print_function, absolute_import, unicode_literals
+import salt.runner
+import salt.utils.platform
+import salt.utils.process
-+import logging
+
+log = logging.getLogger(__name__)
+
+
+class LocalRunner(salt.runner.Runner):
-+ '''
++ """
+ Runner class that changes its default behaviour.
-+ '''
++ """
+
+ def _proc_function(self, fun, low, user, tag, jid, daemonize=True):
-+ '''
++ """
+ Same as original _proc_function in AsyncClientMixin,
+ except it calls "low" without firing a print event.
-+ '''
++ """
+ if daemonize and not salt.utils.platform.is_windows():
+ salt.log.setup.shutdown_multiprocessing_logging()
+ salt.utils.process.daemonize()
+ salt.log.setup.setup_multiprocessing_logging()
+
-+ low['__jid__'] = jid
-+ low['__user__'] = user
-+ low['__tag__'] = tag
++ low["__jid__"] = jid
++ low["__user__"] = user
++ low["__tag__"] = tag
+
+ return self.low(fun, low, print_event=False, full_return=False)
diff --git a/salt/cli/support/profiles/default.yml b/salt/cli/support/profiles/default.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..01d9a261933333a6246e932556cdd88f3adf1f82
+index 0000000000..01d9a26193
--- /dev/null
+++ b/salt/cli/support/profiles/default.yml
@@ -0,0 +1,71 @@
@@ -1236,7 +1306,7 @@ index 0000000000000000000000000000000000000000..01d9a261933333a6246e932556cdd88f
+
diff --git a/salt/cli/support/profiles/jobs-active.yml b/salt/cli/support/profiles/jobs-active.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..508c54ece79087c98ba2dd4bd0ee265b09520296
+index 0000000000..508c54ece7
--- /dev/null
+++ b/salt/cli/support/profiles/jobs-active.yml
@@ -0,0 +1,3 @@
@@ -1245,7 +1315,7 @@ index 0000000000000000000000000000000000000000..508c54ece79087c98ba2dd4bd0ee265b
+ info: List of all actively running jobs
diff --git a/salt/cli/support/profiles/jobs-last.yml b/salt/cli/support/profiles/jobs-last.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..e3b719f552d2288d15dc5af4d6c320e0386ed7d0
+index 0000000000..e3b719f552
--- /dev/null
+++ b/salt/cli/support/profiles/jobs-last.yml
@@ -0,0 +1,3 @@
@@ -1254,7 +1324,7 @@ index 0000000000000000000000000000000000000000..e3b719f552d2288d15dc5af4d6c320e0
+ info: List all detectable jobs and associated functions
diff --git a/salt/cli/support/profiles/jobs-trace.yml b/salt/cli/support/profiles/jobs-trace.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..00b28e0502e2e6dbd9fe3e990750cbc1f9a94a30
+index 0000000000..00b28e0502
--- /dev/null
+++ b/salt/cli/support/profiles/jobs-trace.yml
@@ -0,0 +1,7 @@
@@ -1267,7 +1337,7 @@ index 0000000000000000000000000000000000000000..00b28e0502e2e6dbd9fe3e990750cbc1
+ {% endfor %}
diff --git a/salt/cli/support/profiles/network.yml b/salt/cli/support/profiles/network.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..268f02e61fbe9b5f0870569a9343da6e778a7017
+index 0000000000..268f02e61f
--- /dev/null
+++ b/salt/cli/support/profiles/network.yml
@@ -0,0 +1,27 @@
@@ -1300,7 +1370,7 @@ index 0000000000000000000000000000000000000000..268f02e61fbe9b5f0870569a9343da6e
+ info: ARP table
diff --git a/salt/cli/support/profiles/postgres.yml b/salt/cli/support/profiles/postgres.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..2238752c7a90a09bddc9cd3cbf27acbbf2a85c1c
+index 0000000000..2238752c7a
--- /dev/null
+++ b/salt/cli/support/profiles/postgres.yml
@@ -0,0 +1,11 @@
@@ -1317,7 +1387,7 @@ index 0000000000000000000000000000000000000000..2238752c7a90a09bddc9cd3cbf27acbb
+ - /etc/postgresql
diff --git a/salt/cli/support/profiles/salt.yml b/salt/cli/support/profiles/salt.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..4b18d9887002f9a9efdd6f54870db4b74384a19e
+index 0000000000..4b18d98870
--- /dev/null
+++ b/salt/cli/support/profiles/salt.yml
@@ -0,0 +1,9 @@
@@ -1332,7 +1402,7 @@ index 0000000000000000000000000000000000000000..4b18d9887002f9a9efdd6f54870db4b7
+ - {{salt('config.get', 'log_file')}}
diff --git a/salt/cli/support/profiles/users.yml b/salt/cli/support/profiles/users.yml
new file mode 100644
-index 0000000000000000000000000000000000000000..391acdb606d2ebb35ac7cff10844fffd84d96915
+index 0000000000..391acdb606
--- /dev/null
+++ b/salt/cli/support/profiles/users.yml
@@ -0,0 +1,22 @@
@@ -1359,40 +1429,41 @@ index 0000000000000000000000000000000000000000..391acdb606d2ebb35ac7cff10844fffd
+ info: List of all available groups
+ output: table
diff --git a/salt/scripts.py b/salt/scripts.py
-index 5e623a578e8363b51e10af247b325069741064d5..401ec2055303dd3b342110ddbab155e30d5b4e31 100644
+index 8f3cde8477..e5c248f011 100644
--- a/salt/scripts.py
+++ b/salt/scripts.py
-@@ -579,3 +579,17 @@ def salt_unity():
+@@ -592,3 +592,18 @@ def salt_unity():
sys.argv.pop(1)
- s_fun = getattr(sys.modules[__name__], 'salt_{0}'.format(cmd))
+ s_fun = getattr(sys.modules[__name__], "salt_{}".format(cmd))
s_fun()
+
+
+def salt_support():
-+ '''
++ """
+ Run Salt Support that collects system data, logs etc for debug and support purposes.
+ :return:
-+ '''
++ """
+
+ import salt.cli.support.collector
-+ if '' in sys.path:
-+ sys.path.remove('')
++
++ if "" in sys.path:
++ sys.path.remove("")
+ client = salt.cli.support.collector.SaltSupport()
+ _install_signal_handlers(client)
+ client.run()
diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py
-index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa380495171 100644
+index 952f9aebc5..c1422a9556 100644
--- a/salt/utils/parsers.py
+++ b/salt/utils/parsers.py
-@@ -21,6 +21,7 @@ import getpass
- import logging
- import optparse
- import traceback
+@@ -17,6 +17,7 @@ import optparse
+ import os
+ import signal
+ import sys
+import tempfile
+ import traceback
+ import types
from functools import partial
-
-
-@@ -34,6 +35,7 @@ import salt.utils.args
+@@ -31,6 +32,7 @@ import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.jid
@@ -1400,49 +1471,98 @@ index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa3
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
-@@ -1913,6 +1915,69 @@ class SyndicOptionParser(six.with_metaclass(OptionParserMeta,
- self.get_config_file_path('minion'))
+@@ -2049,6 +2051,118 @@ class SyndicOptionParser(
+ return opts
-+class SaltSupportOptionParser(six.with_metaclass(OptionParserMeta, OptionParser, ConfigDirMixIn,
-+ MergeConfigMixIn, LogLevelMixIn, TimeoutMixIn)):
++class SaltSupportOptionParser(
++ OptionParser,
++ ConfigDirMixIn,
++ MergeConfigMixIn,
++ LogLevelMixIn,
++ TimeoutMixIn,
++ metaclass=OptionParserMeta,
++):
+ default_timeout = 5
-+ description = 'Salt Support is a program to collect all support data: logs, system configuration etc.'
-+ usage = '%prog [options] \'\' [arguments]'
++ description = "Salt Support is a program to collect all support data: logs, system configuration etc."
++ usage = "%prog [options] '' [arguments]"
+ # ConfigDirMixIn config filename attribute
-+ _config_filename_ = 'master'
++ _config_filename_ = "master"
+
+ # LogLevelMixIn attributes
-+ _default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level']
-+ _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file']
++ _default_logging_level_ = config.DEFAULT_MASTER_OPTS["log_level"]
++ _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS["log_file"]
+
+ def _mixin_setup(self):
-+ self.add_option('-P', '--show-profiles', default=False, action='store_true',
-+ dest='support_profile_list', help='Show available profiles')
-+ self.add_option('-p', '--profile', default='', dest='support_profile',
-+ help='Specify support profile or comma-separated profiles, e.g.: "salt,network"')
-+ support_archive = '{t}/{h}-support.tar.bz2'.format(t=tempfile.gettempdir(),
-+ h=salt.utils.network.get_fqhostname())
-+ self.add_option('-a', '--archive', default=support_archive, dest='support_archive',
-+ help=('Specify name of the resulting support archive. '
-+ 'Default is "{f}".'.format(f=support_archive)))
-+ self.add_option('-u', '--unit', default='', dest='support_unit',
-+ help='Specify examined unit (default "master").')
-+ self.add_option('-U', '--show-units', default=False, action='store_true', dest='support_show_units',
-+ help='Show available units')
-+ self.add_option('-f', '--force', default=False, action='store_true', dest='support_archive_force_overwrite',
-+ help='Force overwrite existing archive, if exists')
-+ self.add_option('-o', '--out', default='null', dest='support_output_format',
-+ help=('Set the default output using the specified outputter, '
-+ 'unless profile does not overrides this. Default: "yaml".'))
++ self.add_option(
++ "-P",
++ "--show-profiles",
++ default=False,
++ action="store_true",
++ dest="support_profile_list",
++ help="Show available profiles",
++ )
++ self.add_option(
++ "-p",
++ "--profile",
++ default="",
++ dest="support_profile",
++ help='Specify support profile or comma-separated profiles, e.g.: "salt,network"',
++ )
++ support_archive = "{t}/{h}-support.tar.bz2".format(
++ t=tempfile.gettempdir(), h=salt.utils.network.get_fqhostname()
++ )
++ self.add_option(
++ "-a",
++ "--archive",
++ default=support_archive,
++ dest="support_archive",
++ help=(
++ "Specify name of the resulting support archive. "
++ 'Default is "{f}".'.format(f=support_archive)
++ ),
++ )
++ self.add_option(
++ "-u",
++ "--unit",
++ default="",
++ dest="support_unit",
++ help='Specify examined unit (default "master").',
++ )
++ self.add_option(
++ "-U",
++ "--show-units",
++ default=False,
++ action="store_true",
++ dest="support_show_units",
++ help="Show available units",
++ )
++ self.add_option(
++ "-f",
++ "--force",
++ default=False,
++ action="store_true",
++ dest="support_archive_force_overwrite",
++ help="Force overwrite existing archive, if exists",
++ )
++ self.add_option(
++ "-o",
++ "--out",
++ default="null",
++ dest="support_output_format",
++ help=(
++ "Set the default output using the specified outputter, "
++ 'unless profile does not overrides this. Default: "yaml".'
++ ),
++ )
+
+ def find_existing_configs(self, default):
-+ '''
++ """
+ Find configuration files on the system.
+ :return:
-+ '''
++ """
+ configs = []
-+ for cfg in [default, self._config_filename_, 'minion', 'proxy', 'cloud', 'spm']:
++ for cfg in [default, self._config_filename_, "minion", "proxy", "cloud", "spm"]:
+ if not cfg:
+ continue
+ config_path = self.get_config_file_path(cfg)
@@ -1450,15 +1570,15 @@ index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa3
+ configs.append(cfg)
+
+ if default and default not in configs:
-+ raise SystemExit('Unknown configuration unit: {}'.format(default))
++ raise SystemExit("Unknown configuration unit: {}".format(default))
+
+ return configs
+
+ def setup_config(self, cfg=None):
-+ '''
++ """
+ Open suitable config file.
+ :return:
-+ '''
++ """
+ _opts, _args = optparse.OptionParser.parse_args(self)
+ configs = self.find_existing_configs(_opts.support_unit)
+ if cfg not in configs:
@@ -1467,72 +1587,70 @@ index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa3
+ return config.master_config(self.get_config_file_path(cfg))
+
+
- class SaltCMDOptionParser(six.with_metaclass(OptionParserMeta,
- OptionParser,
- ConfigDirMixIn,
+ class SaltCMDOptionParser(
+ OptionParser,
+ ConfigDirMixIn,
diff --git a/scripts/salt-support b/scripts/salt-support
new file mode 100755
-index 0000000000000000000000000000000000000000..48ce141c673aa390174f88f0e18b857c561ab6f5
+index 0000000000..4e0e79f3ea
--- /dev/null
+++ b/scripts/salt-support
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
-+'''
++"""
+Salt support is to collect logs,
+debug data and system information
+for support purposes.
-+'''
++"""
+
+from salt.scripts import salt_support
+
-+if __name__ == '__main__':
++if __name__ == "__main__":
+ salt_support()
diff --git a/setup.py b/setup.py
-index 788664e14e1e93ffe51e9ace4409b48e9b4afeaf..06374647df5e82a21fc39b08d41c596f0483ff0c 100755
+index 08c84344ea..39a66fefba 100755
--- a/setup.py
+++ b/setup.py
-@@ -1058,6 +1058,7 @@ class SaltDistribution(distutils.dist.Distribution):
- 'scripts/salt-master',
- 'scripts/salt-minion',
- 'scripts/salt-proxy',
-+ 'scripts/salt-support',
- 'scripts/salt-ssh',
- 'scripts/salt-syndic',
- 'scripts/salt-unity',
-@@ -1093,6 +1094,7 @@ class SaltDistribution(distutils.dist.Distribution):
- 'salt-key = salt.scripts:salt_key',
- 'salt-master = salt.scripts:salt_master',
- 'salt-minion = salt.scripts:salt_minion',
-+ 'salt-support = salt.scripts:salt_support',
- 'salt-ssh = salt.scripts:salt_ssh',
- 'salt-syndic = salt.scripts:salt_syndic',
- 'salt-unity = salt.scripts:salt_unity',
+@@ -1253,6 +1253,7 @@ class SaltDistribution(distutils.dist.Distribution):
+ "scripts/salt-master",
+ "scripts/salt-minion",
+ "scripts/salt-proxy",
++ "scripts/salt-support",
+ "scripts/salt-ssh",
+ "scripts/salt-syndic",
+ "scripts/salt-unity",
+@@ -1299,6 +1300,7 @@ class SaltDistribution(distutils.dist.Distribution):
+ "salt-key = salt.scripts:salt_key",
+ "salt-master = salt.scripts:salt_master",
+ "salt-minion = salt.scripts:salt_minion",
++ "salt-support = salt.scripts:salt_support",
+ "salt-ssh = salt.scripts:salt_ssh",
+ "salt-syndic = salt.scripts:salt_syndic",
+ "salt-unity = salt.scripts:salt_unity",
diff --git a/tests/unit/cli/test_support.py b/tests/unit/cli/test_support.py
new file mode 100644
-index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c136438524008c7cd
+index 0000000000..dc0e99bb3d
--- /dev/null
+++ b/tests/unit/cli/test_support.py
-@@ -0,0 +1,477 @@
-+# -*- coding: utf-8 -*-
-+'''
+@@ -0,0 +1,553 @@
++"""
+ :codeauthor: Bo Maryniuk
-+'''
++"""
+
-+from __future__ import absolute_import, print_function, unicode_literals
+
-+from tests.support.unit import skipIf, TestCase
-+from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
++import os
+
++import jinja2
++import salt.cli.support.collector
++import salt.exceptions
++import salt.utils.files
++import yaml
++from salt.cli.support.collector import SaltSupport, SupportDataCollector
+from salt.cli.support.console import IndentOutput
-+from salt.cli.support.collector import SupportDataCollector, SaltSupport
+from salt.utils.color import get_colors
+from salt.utils.stringutils import to_bytes
-+import salt.exceptions
-+import salt.cli.support.collector
-+import salt.utils.files
-+import os
-+import yaml
-+import jinja2
++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.unit import TestCase, skipIf
+
+try:
+ import pytest
@@ -1540,239 +1658,268 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852
+ pytest = None
+
+
-+@skipIf(not bool(pytest), 'Pytest needs to be installed')
++@skipIf(not bool(pytest), "Pytest needs to be installed")
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+class SaltSupportIndentOutputTestCase(TestCase):
-+ '''
++ """
+ Unit Tests for the salt-support indent output.
-+ '''
++ """
+
+ def setUp(self):
-+ '''
++ """
+ Setup test
+ :return:
-+ '''
++ """
+
-+ self.message = 'Stubborn processes on dumb terminal'
++ self.message = "Stubborn processes on dumb terminal"
+ self.device = MagicMock()
+ self.iout = IndentOutput(device=self.device)
+ self.colors = get_colors()
+
+ def tearDown(self):
-+ '''
++ """
+ Remove instances after test run
+ :return:
-+ '''
++ """
+ del self.message
+ del self.device
+ del self.iout
+ del self.colors
+
+ def test_standard_output(self):
-+ '''
++ """
+ Test console standard output.
-+ '''
++ """
+ self.iout.put(self.message)
+ assert self.device.write.called
+ assert self.device.write.call_count == 5
-+ for idx, data in enumerate(['', str(self.colors['CYAN']), self.message, str(self.colors['ENDC']), '\n']):
++ for idx, data in enumerate(
++ ["", str(self.colors["CYAN"]), self.message, str(self.colors["ENDC"]), "\n"]
++ ):
+ assert self.device.write.call_args_list[idx][0][0] == data
+
+ def test_indent_output(self):
-+ '''
++ """
+ Test indent distance.
+ :return:
-+ '''
++ """
+ self.iout.put(self.message, indent=10)
-+ for idx, data in enumerate([' ' * 10, str(self.colors['CYAN']), self.message, str(self.colors['ENDC']), '\n']):
++ for idx, data in enumerate(
++ [
++ " " * 10,
++ str(self.colors["CYAN"]),
++ self.message,
++ str(self.colors["ENDC"]),
++ "\n",
++ ]
++ ):
+ assert self.device.write.call_args_list[idx][0][0] == data
+
+ def test_color_config(self):
-+ '''
++ """
+ Test color config changes on each ident.
+ :return:
-+ '''
++ """
+
-+ conf = {0: 'MAGENTA', 2: 'RED', 4: 'WHITE', 6: 'YELLOW'}
++ conf = {0: "MAGENTA", 2: "RED", 4: "WHITE", 6: "YELLOW"}
+ self.iout = IndentOutput(conf=conf, device=self.device)
+ for indent in sorted(list(conf)):
+ self.iout.put(self.message, indent=indent)
+
+ step = 1
+ for ident_key in sorted(list(conf)):
-+ assert str(self.device.write.call_args_list[step][0][0]) == str(self.colors[conf[ident_key]])
++ assert str(self.device.write.call_args_list[step][0][0]) == str(
++ self.colors[conf[ident_key]]
++ )
+ step += 5
+
+
-+@skipIf(not bool(pytest), 'Pytest needs to be installed')
++@skipIf(not bool(pytest), "Pytest needs to be installed")
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+class SaltSupportCollectorTestCase(TestCase):
-+ '''
++ """
+ Collector tests.
-+ '''
++ """
++
+ def setUp(self):
-+ '''
++ """
+ Setup the test case
+ :return:
-+ '''
-+ self.archive_path = '/highway/to/hell'
++ """
++ self.archive_path = "/highway/to/hell"
+ self.output_device = MagicMock()
+ self.collector = SupportDataCollector(self.archive_path, self.output_device)
+
+ def tearDown(self):
-+ '''
++ """
+ Tear down the test case elements
+ :return:
-+ '''
++ """
+ del self.collector
+ del self.archive_path
+ del self.output_device
+
-+ @patch('salt.cli.support.collector.tarfile.TarFile', MagicMock())
++ @patch("salt.cli.support.collector.tarfile.TarFile", MagicMock())
+ def test_archive_open(self):
-+ '''
++ """
+ Test archive is opened.
+
+ :return:
-+ '''
++ """
+ self.collector.open()
+ assert self.collector.archive_path == self.archive_path
+ with pytest.raises(salt.exceptions.SaltException) as err:
+ self.collector.open()
-+ assert 'Archive already opened' in str(err)
++ assert "Archive already opened" in str(err)
+
-+ @patch('salt.cli.support.collector.tarfile.TarFile', MagicMock())
++ @patch("salt.cli.support.collector.tarfile.TarFile", MagicMock())
+ def test_archive_close(self):
-+ '''
++ """
+ Test archive is opened.
+
+ :return:
-+ '''
++ """
+ self.collector.open()
+ self.collector._flush_content = lambda: None
+ self.collector.close()
+ assert self.collector.archive_path == self.archive_path
+ with pytest.raises(salt.exceptions.SaltException) as err:
+ self.collector.close()
-+ assert 'Archive already closed' in str(err)
++ assert "Archive already closed" in str(err)
+
+ def test_archive_addwrite(self):
-+ '''
++ """
+ Test add to the archive a section and write to it.
+
+ :return:
-+ '''
++ """
+ archive = MagicMock()
-+ with patch('salt.cli.support.collector.tarfile.TarFile', archive):
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
+ self.collector.open()
-+ self.collector.add('foo')
-+ self.collector.write(title='title', data='data', output='null')
++ self.collector.add("foo")
++ self.collector.write(title="title", data="data", output="null")
+ self.collector._flush_content()
+
-+ assert (archive.bz2open().addfile.call_args[1]['fileobj'].read()
-+ == to_bytes('title\n-----\n\nraw-content: data\n\n\n\n'))
++ assert archive.bz2open().addfile.call_args[1]["fileobj"].read() == to_bytes(
++ "title\n-----\n\nraw-content: data\n\n\n\n"
++ )
+
-+ @patch('salt.utils.files.fopen', MagicMock(return_value='path=/dev/null'))
++ @patch("salt.utils.files.fopen", MagicMock(return_value="path=/dev/null"))
+ def test_archive_addlink(self):
-+ '''
++ """
+ Test add to the archive a section and link an external file or directory to it.
+
+ :return:
-+ '''
++ """
+ archive = MagicMock()
-+ with patch('salt.cli.support.collector.tarfile.TarFile', archive):
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
+ self.collector.open()
-+ self.collector.add('foo')
-+ self.collector.link(title='Backup Path', path='/path/to/backup.config')
++ self.collector.add("foo")
++ self.collector.link(title="Backup Path", path="/path/to/backup.config")
+ self.collector._flush_content()
+
+ assert archive.bz2open().addfile.call_count == 1
-+ assert (archive.bz2open().addfile.call_args[1]['fileobj'].read()
-+ == to_bytes('Backup Path\n-----------\n\npath=/dev/null\n\n\n'))
++ assert archive.bz2open().addfile.call_args[1]["fileobj"].read() == to_bytes(
++ "Backup Path\n-----------\n\npath=/dev/null\n\n\n"
++ )
+
-+ @patch('salt.utils.files.fopen', MagicMock(return_value='path=/dev/null'))
++ @patch("salt.utils.files.fopen", MagicMock(return_value="path=/dev/null"))
+ def test_archive_discard_section(self):
-+ '''
++ """
+ Test discard a section from the archive.
+
+ :return:
-+ '''
++ """
+ archive = MagicMock()
-+ with patch('salt.cli.support.collector.tarfile.TarFile', archive):
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
+ self.collector.open()
-+ self.collector.add('solar-interference')
-+ self.collector.link(title='Thermal anomaly', path='/path/to/another/great.config')
-+ self.collector.add('foo')
-+ self.collector.link(title='Backup Path', path='/path/to/backup.config')
++ self.collector.add("solar-interference")
++ self.collector.link(
++ title="Thermal anomaly", path="/path/to/another/great.config"
++ )
++ self.collector.add("foo")
++ self.collector.link(title="Backup Path", path="/path/to/backup.config")
+ self.collector._flush_content()
+ assert archive.bz2open().addfile.call_count == 2
-+ assert (archive.bz2open().addfile.mock_calls[0][2]['fileobj'].read()
-+ == to_bytes('Thermal anomaly\n---------------\n\npath=/dev/null\n\n\n'))
++ assert archive.bz2open().addfile.mock_calls[0][2][
++ "fileobj"
++ ].read() == to_bytes(
++ "Thermal anomaly\n---------------\n\npath=/dev/null\n\n\n"
++ )
+ self.collector.close()
+
+ archive = MagicMock()
-+ with patch('salt.cli.support.collector.tarfile.TarFile', archive):
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
+ self.collector.open()
-+ self.collector.add('solar-interference')
-+ self.collector.link(title='Thermal anomaly', path='/path/to/another/great.config')
++ self.collector.add("solar-interference")
++ self.collector.link(
++ title="Thermal anomaly", path="/path/to/another/great.config"
++ )
+ self.collector.discard_current()
-+ self.collector.add('foo')
-+ self.collector.link(title='Backup Path', path='/path/to/backup.config')
++ self.collector.add("foo")
++ self.collector.link(title="Backup Path", path="/path/to/backup.config")
+ self.collector._flush_content()
+ assert archive.bz2open().addfile.call_count == 2
-+ assert (archive.bz2open().addfile.mock_calls[0][2]['fileobj'].read()
-+ == to_bytes('Backup Path\n-----------\n\npath=/dev/null\n\n\n'))
++ assert archive.bz2open().addfile.mock_calls[0][2][
++ "fileobj"
++ ].read() == to_bytes("Backup Path\n-----------\n\npath=/dev/null\n\n\n")
+ self.collector.close()
+
+
-+@skipIf(not bool(pytest), 'Pytest needs to be installed')
++@skipIf(not bool(pytest), "Pytest needs to be installed")
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+class SaltSupportRunnerTestCase(TestCase):
-+ '''
++ """
+ Test runner class.
-+ '''
++ """
+
+ def setUp(self):
-+ '''
++ """
+ Set up test suite.
+ :return:
-+ '''
-+ self.archive_path = '/dev/null'
++ """
++ self.archive_path = "/dev/null"
+ self.output_device = MagicMock()
+ self.runner = SaltSupport()
-+ self.runner.collector = SupportDataCollector(self.archive_path, self.output_device)
++ self.runner.collector = SupportDataCollector(
++ self.archive_path, self.output_device
++ )
+
+ def tearDown(self):
-+ '''
++ """
+ Tear down.
+
+ :return:
-+ '''
++ """
+ del self.archive_path
+ del self.output_device
+ del self.runner
+
+ def test_function_config(self):
-+ '''
++ """
+ Test function config formation.
+
+ :return:
-+ '''
++ """
+ self.runner.config = {}
-+ msg = 'Electromagnetic energy loss'
-+ assert self.runner._setup_fun_config({'description': msg}) == {'print_metadata': False,
-+ 'file_client': 'local',
-+ 'fun': '', 'kwarg': {},
-+ 'description': msg,
-+ 'cache_jobs': False, 'arg': []}
++ msg = "Electromagnetic energy loss"
++ assert self.runner._setup_fun_config({"description": msg}) == {
++ "print_metadata": False,
++ "file_client": "local",
++ "fun": "",
++ "kwarg": {},
++ "description": msg,
++ "cache_jobs": False,
++ "arg": [],
++ }
+
+ def test_local_caller(self):
-+ '''
++ """
+ Test local caller.
+
+ :return:
-+ '''
-+ msg = 'Because of network lag due to too many people playing deathmatch'
++ """
++ msg = "Because of network lag due to too many people playing deathmatch"
+ caller = MagicMock()
+ caller().call = MagicMock(return_value=msg)
+
@@ -1781,19 +1928,22 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852
+ assert self.runner._local_call({}) == msg
+
+ caller().call = MagicMock(side_effect=SystemExit)
-+ assert self.runner._local_call({}) == 'Data is not available at this moment'
++ assert self.runner._local_call({}) == "Data is not available at this moment"
+
+ err_msg = "The UPS doesn't have a battery backup."
+ caller().call = MagicMock(side_effect=Exception(err_msg))
-+ assert self.runner._local_call({}) == "Unhandled exception occurred: The UPS doesn't have a battery backup."
++ assert (
++ self.runner._local_call({})
++ == "Unhandled exception occurred: The UPS doesn't have a battery backup."
++ )
+
+ def test_local_runner(self):
-+ '''
++ """
+ Test local runner.
+
+ :return:
-+ '''
-+ msg = 'Big to little endian conversion error'
++ """
++ msg = "Big to little endian conversion error"
+ runner = MagicMock()
+ runner().run = MagicMock(return_value=msg)
+
@@ -1802,163 +1952,202 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852
+ assert self.runner._local_run({}) == msg
+
+ runner().run = MagicMock(side_effect=SystemExit)
-+ assert self.runner._local_run({}) == 'Runner is not available at this moment'
++ assert self.runner._local_run({}) == "Runner is not available at this moment"
+
-+ err_msg = 'Trojan horse ran out of hay'
++ err_msg = "Trojan horse ran out of hay"
+ runner().run = MagicMock(side_effect=Exception(err_msg))
-+ assert self.runner._local_run({}) == 'Unhandled exception occurred: Trojan horse ran out of hay'
++ assert (
++ self.runner._local_run({})
++ == "Unhandled exception occurred: Trojan horse ran out of hay"
++ )
+
-+ @patch('salt.cli.support.intfunc', MagicMock(spec=[]))
++ @patch("salt.cli.support.intfunc", MagicMock(spec=[]))
+ def test_internal_function_call_stub(self):
-+ '''
++ """
+ Test missing internal function call is handled accordingly.
+
+ :return:
-+ '''
++ """
+ self.runner.out = MagicMock()
-+ out = self.runner._internal_function_call({'fun': 'everythingisawesome',
-+ 'arg': [], 'kwargs': {}})
-+ assert out == 'Function everythingisawesome is not available'
++ out = self.runner._internal_function_call(
++ {"fun": "everythingisawesome", "arg": [], "kwargs": {}}
++ )
++ assert out == "Function everythingisawesome is not available"
+
+ def test_internal_function_call(self):
-+ '''
++ """
+ Test missing internal function call is handled accordingly.
+
+ :return:
-+ '''
-+ msg = 'Internet outage'
++ """
++ msg = "Internet outage"
+ intfunc = MagicMock()
+ intfunc.everythingisawesome = MagicMock(return_value=msg)
+ self.runner.out = MagicMock()
-+ with patch('salt.cli.support.intfunc', intfunc):
-+ out = self.runner._internal_function_call({'fun': 'everythingisawesome',
-+ 'arg': [], 'kwargs': {}})
++ with patch("salt.cli.support.intfunc", intfunc):
++ out = self.runner._internal_function_call(
++ {"fun": "everythingisawesome", "arg": [], "kwargs": {}}
++ )
+ assert out == msg
+
+ def test_get_action(self):
-+ '''
++ """
+ Test action meta gets parsed.
+
+ :return:
-+ '''
-+ action_meta = {'run:jobs.list_jobs_filter': {'info': 'List jobs filter', 'args': [1]}}
-+ assert self.runner._get_action(action_meta) == ('List jobs filter', None,
-+ {'fun': 'run:jobs.list_jobs_filter', 'kwargs': {}, 'arg': [1]})
-+ action_meta = {'user.info': {'info': 'Information about "usbmux"', 'args': ['usbmux']}}
-+ assert self.runner._get_action(action_meta) == ('Information about "usbmux"', None,
-+ {'fun': 'user.info', 'kwargs': {}, 'arg': ['usbmux']})
++ """
++ action_meta = {
++ "run:jobs.list_jobs_filter": {"info": "List jobs filter", "args": [1]}
++ }
++ assert self.runner._get_action(action_meta) == (
++ "List jobs filter",
++ None,
++ {"fun": "run:jobs.list_jobs_filter", "kwargs": {}, "arg": [1]},
++ )
++ action_meta = {
++ "user.info": {"info": 'Information about "usbmux"', "args": ["usbmux"]}
++ }
++ assert self.runner._get_action(action_meta) == (
++ 'Information about "usbmux"',
++ None,
++ {"fun": "user.info", "kwargs": {}, "arg": ["usbmux"]},
++ )
+
+ def test_extract_return(self):
-+ '''
++ """
+ Test extract return from the output.
+
+ :return:
-+ '''
-+ out = {'key': 'value'}
++ """
++ out = {"key": "value"}
+ assert self.runner._extract_return(out) == out
-+ assert self.runner._extract_return({'return': out}) == out
++ assert self.runner._extract_return({"return": out}) == out
+
+ def test_get_action_type(self):
-+ '''
++ """
+ Test action meta determines action type.
+
+ :return:
-+ '''
-+ action_meta = {'run:jobs.list_jobs_filter': {'info': 'List jobs filter', 'args': [1]}}
-+ assert self.runner._get_action_type(action_meta) == 'run'
++ """
++ action_meta = {
++ "run:jobs.list_jobs_filter": {"info": "List jobs filter", "args": [1]}
++ }
++ assert self.runner._get_action_type(action_meta) == "run"
+
-+ action_meta = {'user.info': {'info': 'Information about "usbmux"', 'args': ['usbmux']}}
-+ assert self.runner._get_action_type(action_meta) == 'call'
++ action_meta = {
++ "user.info": {"info": 'Information about "usbmux"', "args": ["usbmux"]}
++ }
++ assert self.runner._get_action_type(action_meta) == "call"
+
-+ @patch('os.path.exists', MagicMock(return_value=True))
++ @patch("os.path.exists", MagicMock(return_value=True))
+ def test_cleanup(self):
-+ '''
++ """
+ Test cleanup routine.
+
+ :return:
-+ '''
-+ arch = '/tmp/killme.zip'
++ """
++ arch = "/tmp/killme.zip"
+ unlink = MagicMock()
-+ with patch('os.unlink', unlink):
-+ self.runner.config = {'support_archive': arch}
++ with patch("os.unlink", unlink):
++ self.runner.config = {"support_archive": arch}
+ self.runner.out = MagicMock()
+ self.runner._cleanup()
+
-+ assert self.runner.out.warning.call_args[0][0] == 'Terminated earlier, cleaning up'
++ assert (
++ self.runner.out.warning.call_args[0][0]
++ == "Terminated earlier, cleaning up"
++ )
+ unlink.assert_called_once_with(arch)
+
-+ @patch('os.path.exists', MagicMock(return_value=True))
++ @patch("os.path.exists", MagicMock(return_value=True))
+ def test_check_existing_archive(self):
-+ '''
++ """
+ Test check existing archive.
+
+ :return:
-+ '''
-+ arch = '/tmp/endothermal-recalibration.zip'
++ """
++ arch = "/tmp/endothermal-recalibration.zip"
+ unlink = MagicMock()
-+ with patch('os.unlink', unlink), patch('os.path.exists', MagicMock(return_value=False)):
-+ self.runner.config = {'support_archive': '',
-+ 'support_archive_force_overwrite': True}
++ with patch("os.unlink", unlink), patch(
++ "os.path.exists", MagicMock(return_value=False)
++ ):
++ self.runner.config = {
++ "support_archive": "",
++ "support_archive_force_overwrite": True,
++ }
+ self.runner.out = MagicMock()
+ assert self.runner._check_existing_archive()
+ assert self.runner.out.warning.call_count == 0
+
-+ with patch('os.unlink', unlink):
-+ self.runner.config = {'support_archive': arch,
-+ 'support_archive_force_overwrite': False}
++ with patch("os.unlink", unlink):
++ self.runner.config = {
++ "support_archive": arch,
++ "support_archive_force_overwrite": False,
++ }
+ self.runner.out = MagicMock()
+ assert not self.runner._check_existing_archive()
-+ assert self.runner.out.warning.call_args[0][0] == 'File {} already exists.'.format(arch)
++ assert self.runner.out.warning.call_args[0][
++ 0
++ ] == "File {} already exists.".format(arch)
+
-+ with patch('os.unlink', unlink):
-+ self.runner.config = {'support_archive': arch,
-+ 'support_archive_force_overwrite': True}
++ with patch("os.unlink", unlink):
++ self.runner.config = {
++ "support_archive": arch,
++ "support_archive_force_overwrite": True,
++ }
+ self.runner.out = MagicMock()
+ assert self.runner._check_existing_archive()
-+ assert self.runner.out.warning.call_args[0][0] == 'Overwriting existing archive: {}'.format(arch)
++ assert self.runner.out.warning.call_args[0][
++ 0
++ ] == "Overwriting existing archive: {}".format(arch)
+
+
-+@skipIf(not bool(pytest), 'Pytest needs to be installed')
++@skipIf(not bool(pytest), "Pytest needs to be installed")
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+class ProfileIntegrityTestCase(TestCase):
-+ '''
++ """
+ Default profile integrity
-+ '''
++ """
++
+ def setUp(self):
-+ '''
++ """
+ Set up test suite.
+
+ :return:
-+ '''
++ """
+ self.profiles = {}
-+ profiles = os.path.join(os.path.dirname(salt.cli.support.collector.__file__), 'profiles')
++ profiles = os.path.join(
++ os.path.dirname(salt.cli.support.collector.__file__), "profiles"
++ )
+ for profile in os.listdir(profiles):
-+ self.profiles[profile.split('.')[0]] = os.path.join(profiles, profile)
++ self.profiles[profile.split(".")[0]] = os.path.join(profiles, profile)
+
+ def tearDown(self):
-+ '''
++ """
+ Tear down test suite.
+
+ :return:
-+ '''
++ """
+ del self.profiles
+
+ def _render_template_to_yaml(self, name, *args, **kwargs):
-+ '''
++ """
+ Get template referene for rendering.
+ :return:
-+ '''
++ """
+ with salt.utils.files.fopen(self.profiles[name]) as t_fh:
+ template = t_fh.read()
-+ return yaml.load(jinja2.Environment().from_string(template).render(*args, **kwargs))
++ return yaml.load(
++ jinja2.Environment().from_string(template).render(*args, **kwargs)
++ )
+
+ def test_non_template_profiles_parseable(self):
-+ '''
++ """
+ Test shipped default profile is YAML parse-able.
+
+ :return:
-+ '''
-+ for t_name in ['default', 'jobs-active', 'jobs-last', 'network', 'postgres']:
++ """
++ for t_name in ["default", "jobs-active", "jobs-last", "network", "postgres"]:
+ with salt.utils.files.fopen(self.profiles[t_name]) as ref:
+ try:
+ yaml.load(ref)
@@ -1968,29 +2157,36 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852
+ assert parsed
+
+ def test_users_template_profile(self):
-+ '''
++ """
+ Test users template profile.
+
+ :return:
-+ '''
-+ users_data = self._render_template_to_yaml('users', salt=MagicMock(return_value=['pokemon']))
-+ assert len(users_data['all-users']) == 5
-+ for user_data in users_data['all-users']:
-+ for tgt in ['user.list_groups', 'shadow.info', 'cron.raw_cron']:
++ """
++ users_data = self._render_template_to_yaml(
++ "users", salt=MagicMock(return_value=["pokemon"])
++ )
++ assert len(users_data["all-users"]) == 5
++ for user_data in users_data["all-users"]:
++ for tgt in ["user.list_groups", "shadow.info", "cron.raw_cron"]:
+ if tgt in user_data:
-+ assert user_data[tgt]['args'] == ['pokemon']
++ assert user_data[tgt]["args"] == ["pokemon"]
+
+ def test_jobs_trace_template_profile(self):
-+ '''
++ """
+ Test jobs-trace template profile.
+
+ :return:
-+ '''
-+ jobs_trace = self._render_template_to_yaml('jobs-trace', runners=MagicMock(return_value=['0000']))
-+ assert len(jobs_trace['jobs-details']) == 1
-+ assert jobs_trace['jobs-details'][0]['run:jobs.list_job']['info'] == 'Details on JID 0000'
-+ assert jobs_trace['jobs-details'][0]['run:jobs.list_job']['args'] == [0]
++ """
++ jobs_trace = self._render_template_to_yaml(
++ "jobs-trace", runners=MagicMock(return_value=["0000"])
++ )
++ assert len(jobs_trace["jobs-details"]) == 1
++ assert (
++ jobs_trace["jobs-details"][0]["run:jobs.list_job"]["info"]
++ == "Details on JID 0000"
++ )
++ assert jobs_trace["jobs-details"][0]["run:jobs.list_job"]["args"] == [0]
--
-2.23.0
+2.29.2
diff --git a/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch b/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch
index 9c17720..519ef13 100644
--- a/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch
+++ b/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch
@@ -1,4 +1,4 @@
-From cc3bd759bc0e4cc3414ccc5a2928c593fa2eee04 Mon Sep 17 00:00:00 2001
+From fec7f65b4debede8cf0eef335182fce2206e200d Mon Sep 17 00:00:00 2001
From: Maximilian Meister
Date: Thu, 3 May 2018 15:52:23 +0200
Subject: [PATCH] enable passing a unix_socket for mysql returners
@@ -15,14 +15,19 @@ the refactor is done upstream
Signed-off-by: Maximilian Meister
---
- salt/returners/mysql.py | 11 ++++++++---
- 1 file changed, 8 insertions(+), 3 deletions(-)
+ salt/returners/mysql.py | 63 ++++++++++++++++++++---------------------
+ 1 file changed, 30 insertions(+), 33 deletions(-)
diff --git a/salt/returners/mysql.py b/salt/returners/mysql.py
-index 69599ec36a..ff9d380843 100644
+index b7bb05164f..4aa8aeddfa 100644
--- a/salt/returners/mysql.py
+++ b/salt/returners/mysql.py
-@@ -18,6 +18,7 @@ config. These are the defaults:
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Return data to a mysql server
+
+@@ -18,6 +17,7 @@ config. These are the defaults:
mysql.pass: 'salt'
mysql.db: 'salt'
mysql.port: 3306
@@ -30,7 +35,7 @@ index 69599ec36a..ff9d380843 100644
SSL is optional. The defaults are set to None. If you do not want to use SSL,
either exclude these options or set them to None.
-@@ -43,6 +44,7 @@ optional. The following ssl options are simply for illustration purposes:
+@@ -43,6 +43,7 @@ optional. The following ssl options are simply for illustration purposes:
alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem'
alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt'
alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key'
@@ -38,37 +43,195 @@ index 69599ec36a..ff9d380843 100644
Should you wish the returner data to be cleaned out every so often, set
`keep_jobs` to the number of hours for the jobs to live in the tables.
-@@ -198,7 +200,8 @@ def _get_options(ret=None):
- 'port': 3306,
- 'ssl_ca': None,
- 'ssl_cert': None,
-- 'ssl_key': None}
-+ 'ssl_key': None,
-+ 'unix_socket': '/tmp/mysql.sock'}
+@@ -138,22 +139,15 @@ To override individual configuration items, append --return_kwargs '{"key:": "va
+ salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}'
- attrs = {'host': 'host',
- 'user': 'user',
-@@ -207,7 +210,8 @@ def _get_options(ret=None):
- 'port': 'port',
- 'ssl_ca': 'ssl_ca',
- 'ssl_cert': 'ssl_cert',
-- 'ssl_key': 'ssl_key'}
-+ 'ssl_key': 'ssl_key',
-+ 'unix_socket': 'unix_socket'}
+ """
+-from __future__ import absolute_import, print_function, unicode_literals
- _options = salt.returners.get_returner_options(__virtualname__,
- ret,
-@@ -261,7 +265,8 @@ def _get_serv(ret=None, commit=False):
- passwd=_options.get('pass'),
- db=_options.get('db'),
- port=_options.get('port'),
-- ssl=ssl_options)
-+ ssl=ssl_options,
-+ unix_socket=_options.get('unix_socket'))
+ import logging
+ import sys
+-
+-# Import python libs
+ from contextlib import contextmanager
+
+ import salt.exceptions
+-
+-# Import salt libs
+ import salt.returners
+ import salt.utils.jid
+ import salt.utils.json
+-
+-# Import 3rd-party libs
+ from salt.ext import six
+
+ # Let's not allow PyLint complain about string substitution
+@@ -205,6 +199,7 @@ def _get_options(ret=None):
+ "ssl_ca": None,
+ "ssl_cert": None,
+ "ssl_key": None,
++ "unix_socket": "/tmp/mysql.sock",
+ }
+
+ attrs = {
+@@ -216,6 +211,7 @@ def _get_options(ret=None):
+ "ssl_ca": "ssl_ca",
+ "ssl_cert": "ssl_cert",
+ "ssl_key": "ssl_key",
++ "unix_socket": "unix_socket",
+ }
+
+ _options = salt.returners.get_returner_options(
+@@ -227,8 +223,8 @@ def _get_options(ret=None):
+ defaults=defaults,
+ )
+ # post processing
+- for k, v in six.iteritems(_options):
+- if isinstance(v, six.string_types) and v.lower() == "none":
++ for k, v in _options.items():
++ if isinstance(v, str) and v.lower() == "none":
+ # Ensure 'None' is rendered as None
+ _options[k] = None
+ if k == "port":
+@@ -274,6 +270,7 @@ def _get_serv(ret=None, commit=False):
+ db=_options.get("db"),
+ port=_options.get("port"),
+ ssl=ssl_options,
++ unix_socket=_options.get("unix_socket"),
+ )
try:
- __context__['mysql_returner_conn'] = conn
+@@ -291,9 +288,9 @@ def _get_serv(ret=None, commit=False):
+ yield cursor
+ except MySQLdb.DatabaseError as err:
+ error = err.args
+- sys.stderr.write(six.text_type(error))
++ sys.stderr.write(str(error))
+ cursor.execute("ROLLBACK")
+- six.reraise(*sys.exc_info())
++ raise
+ else:
+ if commit:
+ cursor.execute("COMMIT")
+@@ -515,8 +512,8 @@ def _purge_jobs(timestamp):
+ log.error(
+ "mysql returner archiver was unable to delete contents of table 'jids'"
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
+
+ try:
+ sql = "delete from `salt_returns` where alter_time < %s"
+@@ -526,8 +523,8 @@ def _purge_jobs(timestamp):
+ log.error(
+ "mysql returner archiver was unable to delete contents of table 'salt_returns'"
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
+
+ try:
+ sql = "delete from `salt_events` where alter_time < %s"
+@@ -537,8 +534,8 @@ def _purge_jobs(timestamp):
+ log.error(
+ "mysql returner archiver was unable to delete contents of table 'salt_events'"
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
+
+ return True
+
+@@ -556,7 +553,7 @@ def _archive_jobs(timestamp):
+ for table_name in source_tables:
+ try:
+ tmp_table_name = table_name + "_archive"
+- sql = "create table if not exists {0} like {1}".format(
++ sql = "create table if not exists {} like {}".format(
+ tmp_table_name, table_name
+ )
+ cur.execute(sql)
+@@ -566,11 +563,11 @@ def _archive_jobs(timestamp):
+ log.error(
+ "mysql returner archiver was unable to create the archive tables."
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
+
+ try:
+- sql = "insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)".format(
++ sql = "insert into `{}` select * from `{}` where jid in (select distinct jid from salt_returns where alter_time < %s)".format(
+ target_tables["jids"], "jids"
+ )
+ cur.execute(sql, (timestamp,))
+@@ -579,14 +576,14 @@ def _archive_jobs(timestamp):
+ log.error(
+ "mysql returner archiver was unable to copy contents of table 'jids'"
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
+ except Exception as e: # pylint: disable=broad-except
+ log.error(e)
+ raise
+
+ try:
+- sql = "insert into `{0}` select * from `{1}` where alter_time < %s".format(
++ sql = "insert into `{}` select * from `{}` where alter_time < %s".format(
+ target_tables["salt_returns"], "salt_returns"
+ )
+ cur.execute(sql, (timestamp,))
+@@ -595,11 +592,11 @@ def _archive_jobs(timestamp):
+ log.error(
+ "mysql returner archiver was unable to copy contents of table 'salt_returns'"
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
+
+ try:
+- sql = "insert into `{0}` select * from `{1}` where alter_time < %s".format(
++ sql = "insert into `{}` select * from `{}` where alter_time < %s".format(
+ target_tables["salt_events"], "salt_events"
+ )
+ cur.execute(sql, (timestamp,))
+@@ -608,8 +605,8 @@ def _archive_jobs(timestamp):
+ log.error(
+ "mysql returner archiver was unable to copy contents of table 'salt_events'"
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
+
+ return _purge_jobs(timestamp)
+
+@@ -623,7 +620,7 @@ def clean_old_jobs():
+ if __opts__.get("keep_jobs", False) and int(__opts__.get("keep_jobs", 0)) > 0:
+ try:
+ with _get_serv() as cur:
+- sql = "select date_sub(now(), interval {0} hour) as stamp;".format(
++ sql = "select date_sub(now(), interval {} hour) as stamp;".format(
+ __opts__["keep_jobs"]
+ )
+ cur.execute(sql)
+@@ -638,5 +635,5 @@ def clean_old_jobs():
+ log.error(
+ "Mysql returner was unable to get timestamp for purge/archive of jobs"
+ )
+- log.error(six.text_type(e))
+- raise salt.exceptions.SaltRunnerError(six.text_type(e))
++ log.error(str(e))
++ raise salt.exceptions.SaltRunnerError(str(e))
--
-2.16.4
+2.29.2
diff --git a/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch b/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch
index 000cd9d..fafe014 100644
--- a/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch
+++ b/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch
@@ -1,4 +1,4 @@
-From 173444cecc1e7b4867570f1f8764db1b7f82061e Mon Sep 17 00:00:00 2001
+From 1cea7d065d8da7c713af8136162c21187d5186f5 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat
Date: Wed, 14 Oct 2020 12:39:16 +0200
Subject: [PATCH] Ensure virt.update stop_on_reboot is updated with its
@@ -14,22 +14,22 @@ this value.
2 files changed, 3 insertions(+)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index 87ab7ca12d..9bc7bc6093 100644
+index 8e2180608a..e3960a5a90 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
-@@ -2742,6 +2742,7 @@ def update(
+@@ -2738,6 +2738,7 @@ def update(
]
- data = {k: v for k, v in six.iteritems(locals()) if bool(v)}
+ data = {k: v for k, v in locals().items() if bool(v)}
+ data["stop_on_reboot"] = stop_on_reboot
if boot_dev:
data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
- need_update = salt.utils.xmlutil.change_xml(
+ need_update = (
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index ca5e80d2d2..fbc03cf7a6 100644
+index fba821ea53..83152eda6e 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
-@@ -1778,6 +1778,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1777,6 +1777,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
1048576
1048576
1
@@ -37,7 +37,7 @@ index ca5e80d2d2..fbc03cf7a6 100644
hvm
-@@ -2350,6 +2351,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2349,6 +2350,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
1048576
1048576
1
@@ -46,6 +46,6 @@ index ca5e80d2d2..fbc03cf7a6 100644
hvm
--
-2.28.0
+2.29.2
diff --git a/fall-back-to-pymysql.patch b/fall-back-to-pymysql.patch
index 4f3c55b..e3e0e9f 100644
--- a/fall-back-to-pymysql.patch
+++ b/fall-back-to-pymysql.patch
@@ -1,4 +1,4 @@
-From f0098b4b9e5abaaca7bbc6c17f5a60bb2129dda5 Mon Sep 17 00:00:00 2001
+From 188a97fc20c3e24950b82dc6fcd0da878509cf7a Mon Sep 17 00:00:00 2001
From: Maximilian Meister
Date: Thu, 5 Apr 2018 13:23:23 +0200
Subject: [PATCH] fall back to PyMySQL
@@ -11,10 +11,10 @@ Signed-off-by: Maximilian Meister
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/salt/modules/mysql.py b/salt/modules/mysql.py
-index 87e2361e28..e785e5219c 100644
+index fdfe35158a..385e4d92a3 100644
--- a/salt/modules/mysql.py
+++ b/salt/modules/mysql.py
-@@ -58,7 +58,7 @@ try:
+@@ -55,7 +55,7 @@ try:
import MySQLdb.cursors
import MySQLdb.converters
from MySQLdb.constants import FIELD_TYPE, FLAG
@@ -23,7 +23,7 @@ index 87e2361e28..e785e5219c 100644
except ImportError:
try:
# MySQLdb import failed, try to import PyMySQL
-@@ -68,7 +68,7 @@ except ImportError:
+@@ -66,7 +66,7 @@ except ImportError:
import MySQLdb.cursors
import MySQLdb.converters
from MySQLdb.constants import FIELD_TYPE, FLAG
@@ -33,6 +33,6 @@ index 87e2361e28..e785e5219c 100644
MySQLdb = None
--
-2.16.4
+2.29.2
diff --git a/fix-__mount_device-wrapper-254.patch b/fix-__mount_device-wrapper-254.patch
index c374831..d0a191d 100644
--- a/fix-__mount_device-wrapper-254.patch
+++ b/fix-__mount_device-wrapper-254.patch
@@ -1,4 +1,4 @@
-From 7ad2d6067400f55dc7b70745216fab20620f35fd Mon Sep 17 00:00:00 2001
+From 1e00e2b72321b5312efb7b8b426a037c8db72b79 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Wed, 29 Jul 2020 16:11:47 +0200
Subject: [PATCH] Fix __mount_device wrapper (#254)
@@ -17,9 +17,9 @@ Fix #58012
(cherry picked from commit 2089645e2478751dc795127cfd14d0385c2e0899)
---
changelog/58012.fixed | 1 +
- salt/states/btrfs.py | 6 +++---
+ salt/states/btrfs.py | 4 ++--
tests/unit/states/test_btrfs.py | 27 +++++++++++++++++++++++++++
- 3 files changed, 31 insertions(+), 3 deletions(-)
+ 3 files changed, 30 insertions(+), 2 deletions(-)
create mode 100644 changelog/58012.fixed
diff --git a/changelog/58012.fixed b/changelog/58012.fixed
@@ -31,27 +31,25 @@ index 0000000000..13a1ef747d
+Fix btrfs state decorator, that produces exceptions when creating subvolumes.
\ No newline at end of file
diff --git a/salt/states/btrfs.py b/salt/states/btrfs.py
-index af78c8ae00..d0d6095c46 100644
+index 1374bbffb4..62a3553758 100644
--- a/salt/states/btrfs.py
+++ b/salt/states/btrfs.py
-@@ -103,9 +103,9 @@ def __mount_device(action):
- '''
+@@ -103,8 +103,8 @@ def __mount_device(action):
+
@functools.wraps(action)
def wrapper(*args, **kwargs):
-- name = kwargs['name']
-- device = kwargs['device']
-- use_default = kwargs.get('use_default', False)
+- name = kwargs["name"]
+- device = kwargs["device"]
+ name = kwargs.get("name", args[0] if args else None)
+ device = kwargs.get("device", args[1] if len(args) > 1 else None)
-+ use_default = kwargs.get("use_default", False)
+ use_default = kwargs.get("use_default", False)
ret = {
- 'name': name,
diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py
-index c68f6279dc..c722630aef 100644
+index b8f70bccfe..dceb971aa1 100644
--- a/tests/unit/states/test_btrfs.py
+++ b/tests/unit/states/test_btrfs.py
-@@ -245,6 +245,33 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+@@ -231,6 +231,33 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
mount.assert_called_once()
umount.assert_called_once()
@@ -82,10 +80,10 @@ index c68f6279dc..c722630aef 100644
+ mount.assert_called_once()
+ umount.assert_called_once()
+
- @patch('salt.states.btrfs._umount')
- @patch('salt.states.btrfs._mount')
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
def test_subvolume_created_exists_test(self, mount, umount):
--
-2.27.0
+2.29.2
diff --git a/fix-a-test-and-some-variable-names-229.patch b/fix-a-test-and-some-variable-names-229.patch
index 7d20047..74670da 100644
--- a/fix-a-test-and-some-variable-names-229.patch
+++ b/fix-a-test-and-some-variable-names-229.patch
@@ -1,4 +1,4 @@
-From c1e66b9953c753dc9eff3652aef316e19c22deb4 Mon Sep 17 00:00:00 2001
+From daf29460408a5e0eb042b3c234c7e21a6b994cf1 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 12 May 2020 14:16:23 +0200
Subject: [PATCH] Fix a test and some variable names (#229)
@@ -7,60 +7,23 @@ Subject: [PATCH] Fix a test and some variable names (#229)
* Fix test_core tests for fqdns errors
---
- salt/modules/network.py | 2 +-
- tests/unit/grains/test_core.py | 24 +++++++++++++-----------
- 2 files changed, 14 insertions(+), 12 deletions(-)
+ tests/unit/grains/test_core.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/salt/modules/network.py b/salt/modules/network.py
-index 880f4f8d5f..9e11eb816e 100644
---- a/salt/modules/network.py
-+++ b/salt/modules/network.py
-@@ -1946,4 +1946,4 @@ def fqdns():
- elapsed = time.time() - start
- log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed))
-
-- return {"fqdns": sorted(list(fqdns))}
-\ No newline at end of file
-+ return {"fqdns": sorted(list(fqdns))}
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
-index 94e4199814..36aa49f232 100644
+index 196dbcf83d..918a9155cb 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
-@@ -1122,20 +1122,22 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
-
- for errno in (0, core.HOST_NOT_FOUND, core.NO_DATA):
- mock_log = MagicMock()
-+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}):
-+ with patch.object(socket, 'gethostbyaddr',
-+ side_effect=_gen_gethostbyaddr(errno)):
-+ with patch('salt.modules.network.log', mock_log):
-+ self.assertEqual(core.fqdns(), {'fqdns': []})
-+ mock_log.debug.assert_called()
-+ mock_log.error.assert_not_called()
-+
-+ mock_log = MagicMock()
-+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}):
- with patch.object(socket, 'gethostbyaddr',
-- side_effect=_gen_gethostbyaddr(errno)):
-- with patch('salt.grains.core.log', mock_log):
-+ side_effect=_gen_gethostbyaddr(-1)):
-+ with patch('salt.modules.network.log', mock_log):
- self.assertEqual(core.fqdns(), {'fqdns': []})
+@@ -1416,7 +1416,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ with patch("salt.modules.network.log", mock_log):
+ self.assertEqual(core.fqdns(), {"fqdns": []})
mock_log.debug.assert_called_once()
-- mock_log.error.assert_not_called()
--
-- mock_log = MagicMock()
-- with patch.object(socket, 'gethostbyaddr',
-- side_effect=_gen_gethostbyaddr(-1)):
-- with patch('salt.grains.core.log', mock_log):
-- self.assertEqual(core.fqdns(), {'fqdns': []})
-- mock_log.debug.assert_not_called()
-- mock_log.error.assert_called_once()
+- mock_log.error.assert_called()
+ mock_log.error.assert_called_once()
- @patch.object(salt.utils.platform, 'is_windows', MagicMock(return_value=False))
- @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8']))
+ @patch.object(salt.utils.platform, "is_windows", MagicMock(return_value=False))
+ @patch(
--
-2.26.2
+2.29.2
diff --git a/fix-a-wrong-rebase-in-test_core.py-180.patch b/fix-a-wrong-rebase-in-test_core.py-180.patch
index c795da1..309c53e 100644
--- a/fix-a-wrong-rebase-in-test_core.py-180.patch
+++ b/fix-a-wrong-rebase-in-test_core.py-180.patch
@@ -1,4 +1,4 @@
-From 67830ea17ae1e87a6bffca2a9542788c200d7dd9 Mon Sep 17 00:00:00 2001
+From 3d5f3cff6b43d7aba35063e970d016401bb82921 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Fri, 25 Oct 2019 15:43:16 +0200
Subject: [PATCH] Fix a wrong rebase in test_core.py (#180)
@@ -17,89 +17,128 @@ This patch ignore this kind of issue during the grains creation.
(cherry picked from commit bd0213bae00b737b24795bec3c030ebfe476e0d8)
---
- salt/grains/core.py | 4 ++--
- tests/unit/grains/test_core.py | 45 ------------------------------------------
- 2 files changed, 2 insertions(+), 47 deletions(-)
+ salt/grains/core.py | 8 +++-
+ tests/unit/grains/test_core.py | 80 ----------------------------------
+ 2 files changed, 6 insertions(+), 82 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 68c43482d3..20950988d9 100644
+index a2983e388b..5dff6ecfd4 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -1000,7 +1000,7 @@ def _virtual(osdata):
+@@ -1066,7 +1066,9 @@ def _virtual(osdata):
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
- pass
-+ log.debug('The content in /sys/devices/virtual/dmi/id/product_name is not valid')
- except IOError:
++ log.debug(
++ "The content in /sys/devices/virtual/dmi/id/product_name is not valid"
++ )
+ except OSError:
pass
- elif osdata['kernel'] == 'FreeBSD':
-@@ -2568,7 +2568,7 @@ def _hw_data(osdata):
+ elif osdata["kernel"] == "FreeBSD":
+@@ -2716,7 +2718,9 @@ def _hw_data(osdata):
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
- pass
-+ log.debug('The content in /sys/devices/virtual/dmi/id/product_name is not valid')
- except (IOError, OSError) as err:
++ log.debug(
++ "The content in /sys/devices/virtual/dmi/id/product_name is not valid"
++ )
+ except OSError as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
-index 33d6a9507f..7fa2436e58 100644
+index 0dc3423646..85d434dd9d 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
-@@ -1560,51 +1560,6 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
- assert all([x is not None for x in info])
- assert all([isinstance(x, int) for x in info])
+@@ -2047,86 +2047,6 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ result = core.path()
+ assert result == {"path": path, "systempath": comps}, result
-- @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
+- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+- @patch("os.path.exists")
+- @patch("salt.utils.platform.is_proxy")
- def test_kernelparams_return(self):
- expectations = [
-- ('BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64',
-- {'kernelparams': [('BOOT_IMAGE', '/vmlinuz-3.10.0-693.2.2.el7.x86_64')]}),
-- ('root=/dev/mapper/centos_daemon-root',
-- {'kernelparams': [('root', '/dev/mapper/centos_daemon-root')]}),
-- ('rhgb quiet ro',
-- {'kernelparams': [('rhgb', None), ('quiet', None), ('ro', None)]}),
-- ('param="value1"',
-- {'kernelparams': [('param', 'value1')]}),
-- ('param="value1 value2 value3"',
-- {'kernelparams': [('param', 'value1 value2 value3')]}),
-- ('param="value1 value2 value3" LANG="pl" ro',
-- {'kernelparams': [('param', 'value1 value2 value3'), ('LANG', 'pl'), ('ro', None)]}),
-- ('ipv6.disable=1',
-- {'kernelparams': [('ipv6.disable', '1')]}),
-- ('param="value1:value2:value3"',
-- {'kernelparams': [('param', 'value1:value2:value3')]}),
-- ('param="value1,value2,value3"',
-- {'kernelparams': [('param', 'value1,value2,value3')]}),
-- ('param="value1" param="value2" param="value3"',
-- {'kernelparams': [('param', 'value1'), ('param', 'value2'), ('param', 'value3')]}),
+- (
+- "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64",
+- {
+- "kernelparams": [
+- ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64")
+- ]
+- },
+- ),
+- (
+- "root=/dev/mapper/centos_daemon-root",
+- {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]},
+- ),
+- (
+- "rhgb quiet ro",
+- {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]},
+- ),
+- ('param="value1"', {"kernelparams": [("param", "value1")]}),
+- (
+- 'param="value1 value2 value3"',
+- {"kernelparams": [("param", "value1 value2 value3")]},
+- ),
+- (
+- 'param="value1 value2 value3" LANG="pl" ro',
+- {
+- "kernelparams": [
+- ("param", "value1 value2 value3"),
+- ("LANG", "pl"),
+- ("ro", None),
+- ]
+- },
+- ),
+- ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}),
+- (
+- 'param="value1:value2:value3"',
+- {"kernelparams": [("param", "value1:value2:value3")]},
+- ),
+- (
+- 'param="value1,value2,value3"',
+- {"kernelparams": [("param", "value1,value2,value3")]},
+- ),
+- (
+- 'param="value1" param="value2" param="value3"',
+- {
+- "kernelparams": [
+- ("param", "value1"),
+- ("param", "value2"),
+- ("param", "value3"),
+- ]
+- },
+- ),
- ]
-
- for cmdline, expectation in expectations:
-- with patch('salt.utils.files.fopen', mock_open(read_data=cmdline)):
+- with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)):
- self.assertEqual(core.kernelparams(), expectation)
-
-- @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
-- @patch('os.path.exists')
-- @patch('salt.utils.platform.is_proxy')
+- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+- @patch("os.path.exists")
+- @patch("salt.utils.platform.is_proxy")
- def test__hw_data_linux_empty(self, is_proxy, exists):
- is_proxy.return_value = False
- exists.return_value = True
-- with patch('salt.utils.files.fopen', mock_open(read_data='')):
-- self.assertEqual(core._hw_data({'kernel': 'Linux'}), {
-- 'biosreleasedate': '',
-- 'biosversion': '',
-- 'manufacturer': '',
-- 'productname': '',
-- 'serialnumber': '',
-- 'uuid': ''
-- })
+- with patch("salt.utils.files.fopen", mock_open(read_data="")):
+- self.assertEqual(
+- core._hw_data({"kernel": "Linux"}),
+- {
+- "biosreleasedate": "",
+- "biosversion": "",
+- "manufacturer": "",
+- "productname": "",
+- "serialnumber": "",
+- "uuid": "",
+- },
+- )
-
- @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
- @skipIf(six.PY2, 'UnicodeDecodeError is throw in Python 3')
- @patch('os.path.exists')
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ @skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3")
+ @patch("os.path.exists")
--
-2.16.4
+2.29.2
diff --git a/fix-aptpkg-systemd-call-bsc-1143301.patch b/fix-aptpkg-systemd-call-bsc-1143301.patch
index 8ef7ab3..0890e7f 100644
--- a/fix-aptpkg-systemd-call-bsc-1143301.patch
+++ b/fix-aptpkg-systemd-call-bsc-1143301.patch
@@ -1,40 +1,41 @@
-From c2989e749f04aa8477130df649e550f5349a9a1f Mon Sep 17 00:00:00 2001
+From 5dadda6822323f409c99112244c2c809e58126e1 Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Wed, 31 Jul 2019 15:29:03 +0200
Subject: [PATCH] Fix aptpkg systemd call (bsc#1143301)
---
salt/modules/aptpkg.py | 2 +-
- tests/unit/modules/test_aptpkg.py | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
+ tests/unit/modules/test_aptpkg.py | 3 +--
+ 2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index 13484c96bc..a5b039fc79 100644
+index bf90d0614f..c47ee852f4 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
-@@ -168,7 +168,7 @@ def _call_apt(args, scope=True, **kwargs):
- '''
- cmd = []
- if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
-- cmd.extend(['systemd-run', '--scope'])
-+ cmd.extend(['systemd-run', '--scope', '--description "{0}"'.format(__name__)])
+@@ -160,7 +160,7 @@ def _call_apt(args, scope=True, **kwargs):
+ and salt.utils.systemd.has_scope(__context__)
+ and __salt__["config.get"]("systemd.scope", True)
+ ):
+- cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)])
++ cmd.extend(["systemd-run", "--scope", '--description "{}"'.format(__name__)])
cmd.extend(args)
- params = {'output_loglevel': 'trace',
+ params = {
diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
-index 10e960f090..88eed062c4 100644
+index 77d8b84896..c3769a7df1 100644
--- a/tests/unit/modules/test_aptpkg.py
+++ b/tests/unit/modules/test_aptpkg.py
-@@ -645,7 +645,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(aptpkg.__salt__, {'cmd.run_all': MagicMock(), 'config.get': MagicMock(return_value=True)}):
- aptpkg._call_apt(['apt-get', 'purge', 'vim']) # pylint: disable=W0106
- aptpkg.__salt__['cmd.run_all'].assert_called_once_with(
-- ['systemd-run', '--scope', 'apt-get', 'purge', 'vim'], env={},
-+ ['systemd-run', '--scope', '--description "salt.modules.aptpkg"', 'apt-get', 'purge', 'vim'], env={},
- output_loglevel='trace', python_shell=False)
-
- def test_call_apt_with_kwargs(self):
+@@ -896,8 +896,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin):
+ [
+ "systemd-run",
+ "--scope",
+- "--description",
+- '"salt.modules.aptpkg"',
++ '--description "salt.modules.aptpkg"',
+ "apt-get",
+ "purge",
+ "vim",
--
-2.16.4
+2.29.2
diff --git a/fix-aptpkg.normalize_name-when-package-arch-is-all.patch b/fix-aptpkg.normalize_name-when-package-arch-is-all.patch
new file mode 100644
index 0000000..85d98de
--- /dev/null
+++ b/fix-aptpkg.normalize_name-when-package-arch-is-all.patch
@@ -0,0 +1,42 @@
+From 763d63b72b9a20f22555b665033899e10f091b60 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Mon, 11 Jan 2021 15:45:28 +0000
+Subject: [PATCH] Fix aptpkg.normalize_name when package arch is 'all'
+
+Add test case of DEB package where arch is 'all'
+---
+ salt/modules/aptpkg.py | 2 +-
+ tests/unit/modules/test_aptpkg.py | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index e001d2f11c..03e99af733 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -208,7 +208,7 @@ def normalize_name(name):
+ pkgname = name
+ pkgarch = __grains__["osarch"]
+
+- return pkgname if pkgarch in (__grains__["osarch"], "any") else name
++ return pkgname if pkgarch in (__grains__["osarch"], "all", "any") else name
+
+
+ def parse_arch(name):
+diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
+index 51dfce29eb..eb3f9e2da7 100644
+--- a/tests/unit/modules/test_aptpkg.py
++++ b/tests/unit/modules/test_aptpkg.py
+@@ -808,6 +808,8 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+ assert result == "foo", result
+ result = aptpkg.normalize_name("foo:any")
+ assert result == "foo", result
++ result = aptpkg.normalize_name("foo:all")
++ assert result == "foo", result
+ result = aptpkg.normalize_name("foo:i386")
+ assert result == "foo:i386", result
+
+--
+2.29.2
+
+
diff --git a/fix-async-batch-multiple-done-events.patch b/fix-async-batch-multiple-done-events.patch
index 633c571..d655138 100644
--- a/fix-async-batch-multiple-done-events.patch
+++ b/fix-async-batch-multiple-done-events.patch
@@ -1,55 +1,57 @@
-From 42d7e1de2c69d82447e73eab483e5d3c299d55f7 Mon Sep 17 00:00:00 2001
+From 85b8666b138cab170327f0217c799277371b2e80 Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Tue, 7 May 2019 12:24:35 +0200
Subject: [PATCH] Fix async-batch multiple done events
---
- salt/cli/batch_async.py | 17 ++++++++++++-----
+ salt/cli/batch_async.py | 19 ++++++++++++-------
tests/unit/cli/test_batch_async.py | 20 +++++++++++++-------
- 2 files changed, 25 insertions(+), 12 deletions(-)
+ 2 files changed, 25 insertions(+), 14 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index 9c20b2fc6e..8c8f481e34 100644
+index b0ab9d9f47..7225491228 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -84,6 +84,7 @@ class BatchAsync(object):
- listen=True,
+@@ -86,6 +86,7 @@ class BatchAsync:
io_loop=ioloop,
- keep_loop=True)
+ keep_loop=True,
+ )
+ self.scheduled = False
def __set_event_handler(self):
- ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid)
-@@ -116,8 +117,7 @@ class BatchAsync(object):
+ ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
+@@ -118,10 +119,7 @@ class BatchAsync:
if minion in self.active:
self.active.remove(minion)
self.done_minions.add(minion)
- # call later so that we maybe gather more returns
-- self.event.io_loop.call_later(self.batch_delay, self.schedule_next)
+- self.event.io_loop.call_later(
+- self.batch_delay, self.schedule_next
+- )
+ self.schedule_next()
def _get_next(self):
- to_run = self.minions.difference(
-@@ -137,7 +137,7 @@ class BatchAsync(object):
- self.active = self.active.difference(self.timedout_minions)
- running = batch_minions.difference(self.done_minions).difference(self.timedout_minions)
+ to_run = (
+@@ -146,7 +144,7 @@ class BatchAsync:
+ self.timedout_minions
+ )
if timedout_minions:
- self.event.io_loop.call_later(self.batch_delay, self.schedule_next)
+ self.schedule_next()
if running:
self.event.io_loop.add_callback(self.find_job, running)
-@@ -189,7 +189,7 @@ class BatchAsync(object):
- "metadata": self.metadata
+@@ -197,7 +195,7 @@ class BatchAsync:
+ "metadata": self.metadata,
}
- self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid))
+ self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid))
- yield self.schedule_next()
+ yield self.run_next()
def end_batch(self):
- left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions))
-@@ -204,8 +204,14 @@ class BatchAsync(object):
- self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid))
+ left = self.minions.symmetric_difference(
+@@ -214,8 +212,14 @@ class BatchAsync:
+ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
self.event.remove_event_handler(self.__event_handler)
- @tornado.gen.coroutine
@@ -64,16 +66,16 @@ index 9c20b2fc6e..8c8f481e34 100644
next_batch = self._get_next()
if next_batch:
self.active = self.active.union(next_batch)
-@@ -225,3 +231,4 @@ class BatchAsync(object):
+@@ -238,3 +242,4 @@ class BatchAsync:
self.active = self.active.difference(next_batch)
else:
self.end_batch()
+ self.scheduled = False
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
-index d519157d92..441f9c58b9 100644
+index d6a4bfcf60..66332a548a 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
-@@ -111,14 +111,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -105,14 +105,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
@tornado.testing.gen_test
def test_start_batch_calls_next(self):
@@ -90,27 +92,27 @@ index d519157d92..441f9c58b9 100644
+ self.assertEqual(len(self.batch.run_next.mock_calls), 1)
def test_batch_fire_done_event(self):
- self.batch.targeted_minions = {'foo', 'baz', 'bar'}
-@@ -154,7 +154,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.batch.targeted_minions = {"foo", "baz", "bar"}
+@@ -147,7 +147,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
- future.set_result({'minions': ['foo', 'bar']})
+ future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
- ret = self.batch.schedule_next().result()
+ ret = self.batch.run_next().result()
self.assertEqual(
self.batch.local.run_job_async.call_args[0],
- ({'foo', 'bar'}, 'my.fun', [], 'list')
-@@ -253,7 +253,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- self.assertEqual(self.batch.done_minions, {'foo'})
+ ({"foo", "bar"}, "my.fun", [], "list"),
+@@ -250,7 +250,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.assertEqual(self.batch.done_minions, {"foo"})
self.assertEqual(
self.batch.event.io_loop.call_later.call_args[0],
-- (self.batch.batch_delay, self.batch.schedule_next))
-+ (self.batch.batch_delay, self.batch.run_next))
+- (self.batch.batch_delay, self.batch.schedule_next),
++ (self.batch.batch_delay, self.batch.run_next),
+ )
def test_batch__event_handler_find_job_return(self):
- self.batch.event = MagicMock(
-@@ -263,10 +263,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- self.assertEqual(self.batch.find_job_returned, {'foo'})
+@@ -262,10 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.assertEqual(self.batch.find_job_returned, {"foo"})
@tornado.testing.gen_test
- def test_batch_schedule_next_end_batch_when_no_next(self):
@@ -122,9 +124,9 @@ index d519157d92..441f9c58b9 100644
self.assertEqual(len(self.batch.end_batch.mock_calls), 1)
@tornado.testing.gen_test
-@@ -342,3 +342,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -345,3 +345,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch.event.io_loop.add_callback.call_args[0],
- (self.batch.find_job, {'foo'})
+ (self.batch.find_job, {"foo"}),
)
+
+ def test_only_on_run_next_is_scheduled(self):
@@ -133,6 +135,6 @@ index d519157d92..441f9c58b9 100644
+ self.batch.schedule_next()
+ self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0)
--
-2.16.4
+2.29.2
diff --git a/fix-async-batch-race-conditions.patch b/fix-async-batch-race-conditions.patch
index 6a114d2..f7ced1f 100644
--- a/fix-async-batch-race-conditions.patch
+++ b/fix-async-batch-race-conditions.patch
@@ -1,19 +1,19 @@
-From dc001cb47fd88a8e8a1bd82a1457325822d1220b Mon Sep 17 00:00:00 2001
+From 4b3badeb52a9de10d6085ee3cc7598a827d1e68f Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Thu, 11 Apr 2019 15:57:59 +0200
Subject: [PATCH] Fix async batch race conditions
Close batching when there is no next batch
---
- salt/cli/batch_async.py | 80 +++++++++++++++++++-------------------
- tests/unit/cli/test_batch_async.py | 35 +++++++----------
- 2 files changed, 54 insertions(+), 61 deletions(-)
+ salt/cli/batch_async.py | 96 +++++++++++++++---------------
+ tests/unit/cli/test_batch_async.py | 38 +++++-------
+ 2 files changed, 62 insertions(+), 72 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index 3160d46d8b..9c20b2fc6e 100644
+index 1557e5105b..b0ab9d9f47 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -37,14 +37,14 @@ class BatchAsync(object):
+@@ -32,14 +32,14 @@ class BatchAsync:
- tag: salt/batch//start
- data: {
"available_minions": self.minions,
@@ -30,36 +30,38 @@ index 3160d46d8b..9c20b2fc6e 100644
"done_minions": self.done_minions,
"timedout_minions": self.timedout_minions
}
-@@ -67,7 +67,7 @@ class BatchAsync(object):
- self.eauth = batch_get_eauth(clear_load['kwargs'])
- self.metadata = clear_load['kwargs'].get('metadata', {})
+@@ -68,7 +68,7 @@ class BatchAsync:
+ self.eauth = batch_get_eauth(clear_load["kwargs"])
+ self.metadata = clear_load["kwargs"].get("metadata", {})
self.minions = set()
- self.down_minions = set()
+ self.targeted_minions = set()
self.timedout_minions = set()
self.done_minions = set()
self.active = set()
-@@ -108,8 +108,7 @@ class BatchAsync(object):
- minion = data['id']
- if op == 'ping_return':
+@@ -110,8 +110,7 @@ class BatchAsync:
+ minion = data["id"]
+ if op == "ping_return":
self.minions.add(minion)
- self.down_minions.remove(minion)
- if not self.down_minions:
+ if self.targeted_minions == self.minions:
self.event.io_loop.spawn_callback(self.start_batch)
- elif op == 'find_job_return':
+ elif op == "find_job_return":
self.find_job_returned.add(minion)
-@@ -120,9 +119,6 @@ class BatchAsync(object):
- # call later so that we maybe gather more returns
- self.event.io_loop.call_later(self.batch_delay, self.schedule_next)
+@@ -124,11 +123,6 @@ class BatchAsync:
+ self.batch_delay, self.schedule_next
+ )
-- if self.initialized and self.done_minions == self.minions.difference(self.timedout_minions):
+- if self.initialized and self.done_minions == self.minions.difference(
+- self.timedout_minions
+- ):
- self.end_batch()
-
def _get_next(self):
- to_run = self.minions.difference(
- self.done_minions).difference(
-@@ -135,16 +131,13 @@ class BatchAsync(object):
+ to_run = (
+ self.minions.difference(self.done_minions)
+@@ -142,20 +136,17 @@ class BatchAsync:
return set(list(to_run)[:next_batch_size])
@tornado.gen.coroutine
@@ -72,35 +74,42 @@ index 3160d46d8b..9c20b2fc6e 100644
- if minion in self.active:
- self.active.remove(minion)
- self.timedout_minions.add(minion)
-- running = minions.difference(did_not_return).difference(self.done_minions).difference(self.timedout_minions)
+- running = (
+- minions.difference(did_not_return)
+- .difference(self.done_minions)
+- .difference(self.timedout_minions)
+ def check_find_job(self, batch_minions):
-+ timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions)
++ timedout_minions = batch_minions.difference(self.find_job_returned).difference(
++ self.done_minions
+ )
+ self.timedout_minions = self.timedout_minions.union(timedout_minions)
+ self.active = self.active.difference(self.timedout_minions)
-+ running = batch_minions.difference(self.done_minions).difference(self.timedout_minions)
++ running = batch_minions.difference(self.done_minions).difference(
++ self.timedout_minions
++ )
+ if timedout_minions:
+ self.event.io_loop.call_later(self.batch_delay, self.schedule_next)
if running:
self.event.io_loop.add_callback(self.find_job, running)
-@@ -183,7 +176,7 @@ class BatchAsync(object):
- jid=self.ping_jid,
+@@ -193,7 +184,7 @@ class BatchAsync:
metadata=self.metadata,
- **self.eauth)
-- self.down_minions = set(ping_return['minions'])
-+ self.targeted_minions = set(ping_return['minions'])
+ **self.eauth
+ )
+- self.down_minions = set(ping_return["minions"])
++ self.targeted_minions = set(ping_return["minions"])
@tornado.gen.coroutine
def start_batch(self):
-@@ -192,36 +185,43 @@ class BatchAsync(object):
+@@ -202,39 +193,48 @@ class BatchAsync:
self.initialized = True
data = {
"available_minions": self.minions,
- "down_minions": self.down_minions,
+ "down_minions": self.targeted_minions.difference(self.minions),
- "metadata": self.metadata
+ "metadata": self.metadata,
}
- self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid))
+ self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid))
yield self.schedule_next()
def end_batch(self):
@@ -109,20 +118,22 @@ index 3160d46d8b..9c20b2fc6e 100644
- "down_minions": self.down_minions,
- "done_minions": self.done_minions,
- "timedout_minions": self.timedout_minions,
-- "metadata": self.metadata
+- "metadata": self.metadata,
- }
-- self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid))
+- self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
- self.event.remove_event_handler(self.__event_handler)
-+ left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions))
++ left = self.minions.symmetric_difference(
++ self.done_minions.union(self.timedout_minions)
++ )
+ if not left:
+ data = {
+ "available_minions": self.minions,
+ "down_minions": self.targeted_minions.difference(self.minions),
+ "done_minions": self.done_minions,
+ "timedout_minions": self.timedout_minions,
-+ "metadata": self.metadata
++ "metadata": self.metadata,
+ }
-+ self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid))
++ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
+ self.event.remove_event_handler(self.__event_handler)
@tornado.gen.coroutine
@@ -131,116 +142,125 @@ index 3160d46d8b..9c20b2fc6e 100644
if next_batch:
- yield self.local.run_job_async(
- next_batch,
-- self.opts['fun'],
-- self.opts['arg'],
-- 'list',
-- raw=self.opts.get('raw', False),
-- ret=self.opts.get('return', ''),
-- gather_job_timeout=self.opts['gather_job_timeout'],
+- self.opts["fun"],
+- self.opts["arg"],
+- "list",
+- raw=self.opts.get("raw", False),
+- ret=self.opts.get("return", ""),
+- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=self.batch_jid,
-- metadata=self.metadata)
-- self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch))
+- metadata=self.metadata,
+- )
+- self.event.io_loop.call_later(
+- self.opts["timeout"], self.find_job, set(next_batch)
+- )
self.active = self.active.union(next_batch)
+ try:
+ yield self.local.run_job_async(
+ next_batch,
-+ self.opts['fun'],
-+ self.opts['arg'],
-+ 'list',
-+ raw=self.opts.get('raw', False),
-+ ret=self.opts.get('return', ''),
-+ gather_job_timeout=self.opts['gather_job_timeout'],
++ self.opts["fun"],
++ self.opts["arg"],
++ "list",
++ raw=self.opts.get("raw", False),
++ ret=self.opts.get("return", ""),
++ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=self.batch_jid,
-+ metadata=self.metadata)
-+ self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch))
++ metadata=self.metadata,
++ )
++ self.event.io_loop.call_later(
++ self.opts["timeout"], self.find_job, set(next_batch)
++ )
+ except Exception as ex:
+ self.active = self.active.difference(next_batch)
+ else:
+ self.end_batch()
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
-index f65b6a06c3..d519157d92 100644
+index 3f8626a2dd..d6a4bfcf60 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
-@@ -75,8 +75,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- self.batch.local.run_job_async.call_args[0],
- ('*', 'test.ping', [], 'glob')
+@@ -68,8 +68,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.assertEqual(
+ self.batch.local.run_job_async.call_args[0], ("*", "test.ping", [], "glob")
)
- # assert down_minions == all minions matched by tgt
-- self.assertEqual(self.batch.down_minions, set(['foo', 'bar']))
+- self.assertEqual(self.batch.down_minions, {"foo", "bar"})
+ # assert targeted_minions == all minions matched by tgt
-+ self.assertEqual(self.batch.targeted_minions, set(['foo', 'bar']))
++ self.assertEqual(self.batch.targeted_minions, {"foo", "bar"})
@tornado.testing.gen_test
def test_batch_start_on_gather_job_timeout(self):
-@@ -121,7 +121,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -115,7 +115,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(len(self.batch.schedule_next.mock_calls), 1)
def test_batch_fire_done_event(self):
-+ self.batch.targeted_minions = {'foo', 'baz', 'bar'}
- self.batch.minions = set(['foo', 'bar'])
-+ self.batch.done_minions = {'foo'}
-+ self.batch.timedout_minions = {'bar'}
++ self.batch.targeted_minions = {"foo", "baz", "bar"}
+ self.batch.minions = {"foo", "bar"}
++ self.batch.done_minions = {"foo"}
++ self.batch.timedout_minions = {"bar"}
self.batch.event = MagicMock()
- self.batch.metadata = {'mykey': 'myvalue'}
+ self.batch.metadata = {"mykey": "myvalue"}
self.batch.end_batch()
-@@ -130,9 +133,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -124,9 +127,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
(
{
- 'available_minions': set(['foo', 'bar']),
-- 'done_minions': set(),
-- 'down_minions': set(),
-- 'timedout_minions': set(),
-+ 'done_minions': self.batch.done_minions,
-+ 'down_minions': {'baz'},
-+ 'timedout_minions': self.batch.timedout_minions,
- 'metadata': self.batch.metadata
+ "available_minions": {"foo", "bar"},
+- "done_minions": set(),
+- "down_minions": set(),
+- "timedout_minions": set(),
++ "done_minions": self.batch.done_minions,
++ "down_minions": {"baz"},
++ "timedout_minions": self.batch.timedout_minions,
+ "metadata": self.batch.metadata,
},
- "salt/batch/1235/done"
-@@ -212,7 +215,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ "salt/batch/1235/done",
+@@ -205,7 +208,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch._get_next(), set())
def test_batch__event_handler_ping_return(self):
-- self.batch.down_minions = {'foo'}
-+ self.batch.targeted_minions = {'foo'}
+- self.batch.down_minions = {"foo"}
++ self.batch.targeted_minions = {"foo"}
self.batch.event = MagicMock(
- unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 'foo'})))
- self.batch.start()
-@@ -222,7 +225,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
+ )
+@@ -216,7 +219,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch.done_minions, set())
def test_batch__event_handler_call_start_batch_when_all_pings_return(self):
-- self.batch.down_minions = {'foo'}
-+ self.batch.targeted_minions = {'foo'}
+- self.batch.down_minions = {"foo"}
++ self.batch.targeted_minions = {"foo"}
self.batch.event = MagicMock(
- unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 'foo'})))
- self.batch.start()
-@@ -232,7 +235,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- (self.batch.start_batch,))
+ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
+ )
+@@ -228,7 +231,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ )
def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self):
-- self.batch.down_minions = {'foo', 'bar'}
-+ self.batch.targeted_minions = {'foo', 'bar'}
+- self.batch.down_minions = {"foo", "bar"}
++ self.batch.targeted_minions = {"foo", "bar"}
self.batch.event = MagicMock(
- unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 'foo'})))
- self.batch.start()
-@@ -260,20 +263,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- self.assertEqual(self.batch.find_job_returned, {'foo'})
+ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
+ )
+@@ -259,23 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.assertEqual(self.batch.find_job_returned, {"foo"})
@tornado.testing.gen_test
- def test_batch__event_handler_end_batch(self):
- self.batch.event = MagicMock(
-- unpack=MagicMock(return_value=('salt/job/not-my-jid/ret/foo', {'id': 'foo'})))
+- unpack=MagicMock(
+- return_value=("salt/job/not-my-jid/ret/foo", {"id": "foo"})
+- )
+- )
- future = tornado.gen.Future()
-- future.set_result({'minions': ['foo', 'bar', 'baz']})
+- future.set_result({"minions": ["foo", "bar", "baz"]})
- self.batch.local.run_job_async.return_value = future
- self.batch.start()
- self.batch.initialized = True
-- self.assertEqual(self.batch.down_minions, {'foo', 'bar', 'baz'})
+- self.assertEqual(self.batch.down_minions, {"foo", "bar", "baz"})
+ def test_batch_schedule_next_end_batch_when_no_next(self):
self.batch.end_batch = MagicMock()
-- self.batch.minions = {'foo', 'bar', 'baz'}
-- self.batch.done_minions = {'foo', 'bar'}
-- self.batch.timedout_minions = {'baz'}
+- self.batch.minions = {"foo", "bar", "baz"}
+- self.batch.done_minions = {"foo", "bar"}
+- self.batch.timedout_minions = {"baz"}
- self.batch._BatchAsync__event_handler(MagicMock())
+ self.batch._get_next = MagicMock(return_value={})
+ self.batch.schedule_next()
@@ -248,6 +268,6 @@ index f65b6a06c3..d519157d92 100644
@tornado.testing.gen_test
--
-2.16.4
+2.29.2
diff --git a/fix-batch_async-obsolete-test.patch b/fix-batch_async-obsolete-test.patch
index d6e4544..86d5f8e 100644
--- a/fix-batch_async-obsolete-test.patch
+++ b/fix-batch_async-obsolete-test.patch
@@ -1,4 +1,4 @@
-From 49780d409630fe18293a077e767aabfd183ff823 Mon Sep 17 00:00:00 2001
+From 5a83801b7733f09c35a7ff0abb5aa32d4c857e4b Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Tue, 3 Dec 2019 11:22:42 +0100
Subject: [PATCH] Fix batch_async obsolete test
@@ -8,26 +8,25 @@ Subject: [PATCH] Fix batch_async obsolete test
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
-index 12dfe543bc..f1d36a81fb 100644
+index c18b42be57..b04965268a 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
-@@ -140,8 +140,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- "salt/batch/1235/done"
- )
+@@ -134,7 +134,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ "salt/batch/1235/done",
+ ),
)
+- self.assertEqual(len(self.batch.event.remove_event_handler.mock_calls), 1)
+
+ def test_batch__del__(self):
+ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
+ event = MagicMock()
+ batch.event = event
+ batch.__del__()
- self.assertEqual(
-- len(self.batch.event.remove_event_handler.mock_calls), 1)
-+ len(event.remove_event_handler.mock_calls), 1)
++ self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
@tornado.testing.gen_test
def test_batch_next(self):
--
-2.16.4
+2.29.2
diff --git a/fix-bsc-1065792.patch b/fix-bsc-1065792.patch
index 9994949..67d82e1 100644
--- a/fix-bsc-1065792.patch
+++ b/fix-bsc-1065792.patch
@@ -1,25 +1,42 @@
-From 4acbe70851e3ef7a04fc5ad0dc9a2519f6989c66 Mon Sep 17 00:00:00 2001
+From 1b9a160f578cf446f5ae622a450d23022e7e3ca5 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Thu, 14 Dec 2017 16:21:40 +0100
Subject: [PATCH] Fix bsc#1065792
---
- salt/states/service.py | 1 +
- 1 file changed, 1 insertion(+)
+ salt/states/service.py | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/salt/states/service.py b/salt/states/service.py
-index de7718ea49..987e37cd42 100644
+index d19c245756..4ea36a78f6 100644
--- a/salt/states/service.py
+++ b/salt/states/service.py
-@@ -80,6 +80,7 @@ def __virtual__():
+@@ -56,16 +56,12 @@ set the reload value to True:
+ :ref:`Requisites ` documentation.
+
+ """
+-# Import Python libs
+
+ import time
+
+-# Import Salt libs
+ import salt.utils.data
+ import salt.utils.platform
+ from salt.exceptions import CommandExecutionError
+-
+-# Import 3rd-party libs
+ from salt.utils.args import get_function_argspec as _argspec
+ from salt.utils.systemd import booted
+
+@@ -79,6 +75,7 @@ def __virtual__():
Only make these states available if a service provider has been detected or
assigned for this minion
- '''
+ """
+ __salt__._load_all()
- if 'service.start' in __salt__:
+ if "service.start" in __salt__:
return __virtualname__
else:
--
-2.16.4
+2.29.2
diff --git a/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch b/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch
index 940eb44..c27fedf 100644
--- a/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch
+++ b/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch
@@ -1,4 +1,4 @@
-From e7514afcba4f57c5cb8599f561fcefdcc3db7314 Mon Sep 17 00:00:00 2001
+From bc7acab857b952353a959339b06c79d851a9d879 Mon Sep 17 00:00:00 2001
From: "Daniel A. Wozniak"
Date: Wed, 16 Sep 2020 00:25:10 +0000
Subject: [PATCH] Fix CVE-2020-25592 and add tests (bsc#1178319)
@@ -7,184 +7,17 @@ Properly validate eauth credentials and tokens on SSH calls made by Salt API
(bsc#1178319) (bsc#1178362) (bsc#1178361) (CVE-2020-25592) (CVE-2020-17490) (CVE-2020-16846)
---
- salt/client/ssh/shell.py | 26 ++-
- salt/modules/tls.py | 18 +-
- salt/netapi/__init__.py | 67 ++++++
- tests/integration/netapi/test_client.py | 296 +++++++++++++++++++++++-
- 4 files changed, 388 insertions(+), 19 deletions(-)
+ salt/netapi/__init__.py | 43 +++++++++++++++++++++++++
+ tests/integration/netapi/test_client.py | 13 ++++++--
+ 2 files changed, 53 insertions(+), 3 deletions(-)
-diff --git a/salt/client/ssh/shell.py b/salt/client/ssh/shell.py
-index bd55c514ee..27aba7b382 100644
---- a/salt/client/ssh/shell.py
-+++ b/salt/client/ssh/shell.py
-@@ -8,6 +8,7 @@ from __future__ import absolute_import, print_function, unicode_literals
- import re
- import os
- import sys
-+import shlex
- import time
- import logging
- import subprocess
-@@ -43,10 +44,10 @@ def gen_key(path):
- '''
- Generate a key for use with salt-ssh
- '''
-- cmd = 'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
-+ cmd = ["ssh-keygen", "-P", '""', "-f", path, "-t", "rsa", "-q"]
- if not os.path.isdir(os.path.dirname(path)):
- os.makedirs(os.path.dirname(path))
-- subprocess.call(cmd, shell=True)
-+ subprocess.call(cmd)
-
-
- def gen_shell(opts, **kwargs):
-@@ -289,8 +290,7 @@ class Shell(object):
- '''
- try:
- proc = salt.utils.nb_popen.NonBlockingPopen(
-- cmd,
-- shell=True,
-+ self._split_cmd(cmd),
- stderr=subprocess.PIPE,
- stdout=subprocess.PIPE,
- )
-@@ -369,6 +369,21 @@ class Shell(object):
-
- return self._run_cmd(cmd)
-
-+ def _split_cmd(self, cmd):
-+ """
-+ Split a command string so that it is suitable to pass to Popen without
-+ shell=True. This prevents shell injection attacks in the options passed
-+ to ssh or some other command.
-+ """
-+ try:
-+ ssh_part, cmd_part = cmd.split("/bin/sh")
-+ except ValueError:
-+ cmd_lst = shlex.split(cmd)
-+ else:
-+ cmd_lst = shlex.split(ssh_part)
-+ cmd_lst.append("/bin/sh {}".format(cmd_part))
-+ return cmd_lst
-+
- def _run_cmd(self, cmd, key_accept=False, passwd_retries=3):
- '''
- Execute a shell command via VT. This is blocking and assumes that ssh
-@@ -378,8 +393,7 @@ class Shell(object):
- return '', 'No command or passphrase', 245
-
- term = salt.utils.vt.Terminal(
-- cmd,
-- shell=True,
-+ self._split_cmd(cmd),
- log_stdout=True,
- log_stdout_level='trace',
- log_stderr=True,
-diff --git a/salt/modules/tls.py b/salt/modules/tls.py
-index af845621a3..116b5fe379 100644
---- a/salt/modules/tls.py
-+++ b/salt/modules/tls.py
-@@ -798,12 +798,13 @@ def create_ca(ca_name,
- if old_key.strip() == keycontent.strip():
- write_key = False
- else:
-- log.info('Saving old CA ssl key in %s', bck)
-- with salt.utils.files.fopen(bck, 'w') as bckf:
-+ log.info('Saving old CA ssl key in {0}'.format(bck))
-+ fp = os.open(bck, os.O_CREAT | os.O_RDWR, 0o600)
-+ with os.fdopen(fp, 'w') as bckf:
- bckf.write(old_key)
-- os.chmod(bck, 0o600)
- if write_key:
-- with salt.utils.files.fopen(ca_keyp, 'wb') as ca_key:
-+ fp = os.open(ca_keyp, os.O_CREAT | os.O_RDWR, 0o600)
-+ with os.fdopen(fp, 'wb') as ca_key:
- ca_key.write(salt.utils.stringutils.to_bytes(keycontent))
-
- with salt.utils.files.fopen(certp, 'wb') as ca_crt:
-@@ -1115,9 +1116,9 @@ def create_csr(ca_name,
- req.sign(key, salt.utils.stringutils.to_str(digest))
-
- # Write private key and request
-- with salt.utils.files.fopen('{0}/{1}.key'.format(csr_path,
-- csr_filename),
-- 'wb+') as priv_key:
-+ priv_keyp = '{0}/{1}.key'.format(csr_path, csr_filename)
-+ fp = os.open(priv_keyp, os.O_CREAT | os.O_RDWR, 0o600)
-+ with os.fdopen(fp, 'wb+') as priv_key:
- priv_key.write(
- salt.utils.stringutils.to_bytes(
- OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
-@@ -1266,7 +1267,8 @@ def create_self_signed_cert(tls_dir='tls',
- priv_key_path = '{0}/{1}/certs/{2}.key'.format(cert_base_path(),
- tls_dir,
- cert_filename)
-- with salt.utils.files.fopen(priv_key_path, 'wb+') as priv_key:
-+ fp = os.open(priv_key_path, os.O_CREAT | os.O_RDWR, 0o600)
-+ with os.fdopen(fp, 'wb+') as priv_key:
- priv_key.write(
- salt.utils.stringutils.to_bytes(
- OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
diff --git a/salt/netapi/__init__.py b/salt/netapi/__init__.py
-index 31a24bb420..4e5b6b093a 100644
+index dec19b37ef..cba1ec574f 100644
--- a/salt/netapi/__init__.py
+++ b/salt/netapi/__init__.py
-@@ -3,24 +3,36 @@
- Make api awesomeness
- '''
- from __future__ import absolute_import, print_function, unicode_literals
-+
-+import copy
-+
- # Import Python libs
- import inspect
-+import logging
- import os
-
- # Import Salt libs
- import salt.log # pylint: disable=W0611
-+import salt.auth
- import salt.client
- import salt.config
-+import salt.daemons.masterapi
- import salt.runner
- import salt.syspaths
- import salt.wheel
- import salt.utils.args
- import salt.client.ssh.client
- import salt.exceptions
-+import salt.utils.args
-+import salt.utils.minions
-+import salt.wheel
-+from salt.defaults import DEFAULT_TARGET_DELIM
-
- # Import third party libs
- from salt.ext import six
-
-+log = logging.getLogger(__name__)
-+
-
- class NetapiClient(object):
- '''
-@@ -34,6 +46,15 @@ class NetapiClient(object):
-
- def __init__(self, opts):
- self.opts = opts
-+ apiopts = copy.deepcopy(self.opts)
-+ apiopts["enable_ssh_minions"] = True
-+ apiopts["cachedir"] = os.path.join(opts["cachedir"], "saltapi")
-+ if not os.path.exists(apiopts["cachedir"]):
-+ os.makedirs(apiopts["cachedir"])
-+ self.resolver = salt.auth.Resolver(apiopts)
-+ self.loadauth = salt.auth.LoadAuth(apiopts)
-+ self.key = salt.daemons.masterapi.access_keys(apiopts)
-+ self.ckminions = salt.utils.minions.CkMinions(apiopts)
-
- def _is_master_running(self):
- '''
-@@ -55,6 +76,49 @@ class NetapiClient(object):
- self.opts['sock_dir'],
- ipc_file))
+@@ -109,6 +109,49 @@ class NetapiClient:
+ "Authorization error occurred."
+ )
+ def _prep_auth_info(self, clear_load):
+ sensitive_load_keys = []
@@ -230,352 +63,58 @@ index 31a24bb420..4e5b6b093a 100644
+ )
+
def run(self, low):
- '''
+ """
Execute the specified function in the specified client by passing the
-@@ -80,6 +144,9 @@ class NetapiClient(object):
- raise salt.exceptions.EauthAuthenticationError(
- 'Raw shell option not allowed.')
-
-+ if low['client'] == 'ssh':
-+ self._authorize_ssh(low)
-+
- l_fun = getattr(self, low['client'])
- f_call = salt.utils.args.format_call(l_fun, low)
- return l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
diff --git a/tests/integration/netapi/test_client.py b/tests/integration/netapi/test_client.py
-index 08030f31ec..b99bdfe313 100644
+index 70471d3148..9eb6e52920 100644
--- a/tests/integration/netapi/test_client.py
+++ b/tests/integration/netapi/test_client.py
-@@ -1,26 +1,30 @@
- # encoding: utf-8
--
- # Import Python libs
- from __future__ import absolute_import, print_function, unicode_literals
-+import copy
- import logging
- import os
- import time
-
-+import salt.config
-+import salt.netapi
-+import salt.utils.files
-+import salt.utils.platform
-+import salt.utils.pycrypto
-+
- # Import Salt Testing libs
- from tests.support.paths import TMP_CONF_DIR, TMP
+@@ -15,10 +15,12 @@ from tests.support.helpers import (
+ SKIP_IF_NOT_RUNNING_PYTEST,
+ SaveRequestsPostHandler,
+ Webserver,
++ requires_sshd_server,
+ slowTest,
+ )
+ from tests.support.mixins import AdaptedConfigurationTestCaseMixin
+ from tests.support.mock import patch
++from tests.support.paths import TMP, TMP_CONF_DIR
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
- from tests.support.mock import patch
--from tests.support.case import SSHCase
-+from tests.support.case import ModuleCase, SSHCase
-+from salt.exceptions import EauthAuthenticationError
- from tests.support.helpers import (
- Webserver,
- SaveRequestsPostHandler,
- requires_sshd_server
- )
--# Import Salt libs
--import salt.config
--import salt.netapi
-
- from salt.exceptions import (
- EauthAuthenticationError
-@@ -174,6 +178,10 @@ class NetapiSSHClientTest(SSHCase):
- '''
- opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, 'master'))
+@@ -178,7 +180,12 @@ class NetapiSSHClientTest(SSHCase):
+ """
+ opts = AdaptedConfigurationTestCaseMixin.get_config("client_config").copy()
self.netapi = salt.netapi.NetapiClient(opts)
-+ opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, "master"))
-+ naopts = copy.deepcopy(opts)
-+ naopts["ignore_host_keys"] = True
-+ self.netapi = salt.netapi.NetapiClient(naopts)
-
- self.priv_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')
- self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR)
-@@ -271,3 +279,281 @@ class NetapiSSHClientTest(SSHCase):
-
- self.assertEqual(ret, None)
- self.assertFalse(os.path.exists('badfile.txt'))
-+
-+ @staticmethod
-+ def cleanup_file(path):
-+ try:
-+ os.remove(path)
-+ except OSError:
-+ pass
-+
-+ @staticmethod
-+ def cleanup_dir(path):
-+ try:
-+ salt.utils.files.rm_rf(path)
-+ except OSError:
-+ pass
-+
-+ def test_shell_inject_ssh_priv(self):
-+ """
-+ Verify CVE-2020-16846 for ssh_priv variable
-+ """
-+ # ZDI-CAN-11143
-+ path = "/tmp/test-11143"
-+ self.addCleanup(self.cleanup_file, path)
-+ self.addCleanup(self.cleanup_file, "aaa")
-+ self.addCleanup(self.cleanup_file, "aaa.pub")
-+ self.addCleanup(self.cleanup_dir, "aaa|id>")
-+ low = {
-+ "roster": "cache",
-+ "client": "ssh",
-+ "tgt": "www.zerodayinitiative.com",
-+ "ssh_priv": "aaa|id>{} #".format(path),
-+ "fun": "test.ping",
-+ "eauth": "auto",
-+ "username": "saltdev_auto",
-+ "password": "saltdev",
-+ }
-+ ret = self.netapi.run(low)
-+ self.assertFalse(os.path.exists(path))
-+
-+ def test_shell_inject_tgt(self):
-+ """
-+ Verify CVE-2020-16846 for tgt variable
-+ """
-+ # ZDI-CAN-11167
-+ path = "/tmp/test-11167"
-+ self.addCleanup(self.cleanup_file, path)
-+ low = {
-+ "roster": "cache",
-+ "client": "ssh",
-+ "tgt": "root|id>{} #@127.0.0.1".format(path),
-+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
-+ "rosters": "/",
-+ "fun": "test.ping",
-+ "eauth": "auto",
-+ "username": "saltdev_auto",
-+ "password": "saltdev",
-+ }
-+ ret = self.netapi.run(low)
-+ self.assertFalse(os.path.exists(path))
-+
-+ def test_shell_inject_ssh_options(self):
-+ """
-+ Verify CVE-2020-16846 for ssh_options
-+ """
-+ # ZDI-CAN-11169
-+ path = "/tmp/test-11169"
-+ self.addCleanup(self.cleanup_file, path)
-+ low = {
-+ "roster": "cache",
-+ "client": "ssh",
-+ "tgt": "127.0.0.1",
-+ "renderer": "cheetah",
-+ "fun": "test.ping",
-+ "eauth": "auto",
-+ "username": "saltdev_auto",
-+ "password": "saltdev",
-+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
-+ "rosters": "/",
-+ "ssh_options": ["|id>{} #".format(path), "lol"],
-+ }
-+ ret = self.netapi.run(low)
-+ self.assertFalse(os.path.exists(path))
-+
-+ def test_shell_inject_ssh_port(self):
-+ """
-+ Verify CVE-2020-16846 for ssh_port variable
-+ """
-+ # ZDI-CAN-11172
-+ path = "/tmp/test-11172"
-+ self.addCleanup(self.cleanup_file, path)
-+ low = {
-+ "roster": "cache",
-+ "client": "ssh",
-+ "tgt": "127.0.0.1",
-+ "renderer": "cheetah",
-+ "fun": "test.ping",
-+ "eauth": "auto",
-+ "username": "saltdev_auto",
-+ "password": "saltdev",
-+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
-+ "rosters": "/",
-+ "ssh_port": "hhhhh|id>{} #".format(path),
-+ }
-+ ret = self.netapi.run(low)
-+ self.assertFalse(os.path.exists(path))
-+
-+ def test_shell_inject_remote_port_forwards(self):
-+ """
-+ Verify CVE-2020-16846 for remote_port_forwards variable
-+ """
-+ # ZDI-CAN-11173
-+ path = "/tmp/test-1173"
-+ self.addCleanup(self.cleanup_file, path)
-+ low = {
-+ "roster": "cache",
-+ "client": "ssh",
-+ "tgt": "127.0.0.1",
-+ "renderer": "cheetah",
-+ "fun": "test.ping",
-+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
-+ "rosters": "/",
-+ "ssh_remote_port_forwards": "hhhhh|id>{} #, lol".format(path),
-+ "eauth": "auto",
-+ "username": "saltdev_auto",
-+ "password": "saltdev",
-+ }
-+ ret = self.netapi.run(low)
-+ self.assertFalse(os.path.exists(path))
-+
-+
-+@requires_sshd_server
-+class NetapiSSHClientAuthTest(SSHCase):
-+
-+ USERA = "saltdev"
-+ USERA_PWD = "saltdev"
-+
-+ def setUp(self):
-+ """
-+ Set up a NetapiClient instance
-+ """
+- self.priv_file = os.path.join(RUNTIME_VARS.TMP_SSH_CONF_DIR, "client_key")
+ opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, "master"))
+ naopts = copy.deepcopy(opts)
+ naopts["ignore_host_keys"] = True
+ self.netapi = salt.netapi.NetapiClient(naopts)
+
+ self.priv_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test")
-+ self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR)
-+ # Initialize salt-ssh
-+ self.run_function("test.ping")
-+ self.mod_case = ModuleCase()
-+ try:
-+ add_user = self.mod_case.run_function(
-+ "user.add", [self.USERA], createhome=False
-+ )
-+ self.assertTrue(add_user)
-+ if salt.utils.platform.is_darwin():
-+ hashed_password = self.USERA_PWD
-+ else:
-+ hashed_password = salt.utils.pycrypto.gen_hash(password=self.USERA_PWD)
-+ add_pwd = self.mod_case.run_function(
-+ "shadow.set_password", [self.USERA, hashed_password],
-+ )
-+ self.assertTrue(add_pwd)
-+ except AssertionError:
-+ self.mod_case.run_function("user.delete", [self.USERA], remove=True)
-+ self.skipTest("Could not add user or password, skipping test")
-+
-+ def tearDown(self):
-+ del self.netapi
-+ self.mod_case.run_function("user.delete", [self.USERA], remove=True)
-+
-+ @classmethod
-+ def setUpClass(cls):
-+ cls.post_webserver = Webserver(handler=SaveRequestsPostHandler)
-+ cls.post_webserver.start()
-+ cls.post_web_root = cls.post_webserver.web_root
-+ cls.post_web_handler = cls.post_webserver.handler
-+
-+ @classmethod
-+ def tearDownClass(cls):
-+ cls.post_webserver.stop()
-+ del cls.post_webserver
-+
-+ def test_ssh_auth_bypass(self):
-+ """
-+ CVE-2020-25592 - Bogus eauth raises exception.
-+ """
-+ low = {
-+ "roster": "cache",
-+ "client": "ssh",
-+ "tgt": "127.0.0.1",
-+ "renderer": "cheetah",
-+ "fun": "test.ping",
+ self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR)
+ self.roster_file = os.path.join(self.rosters, "roster")
+
+@@ -325,7 +332,7 @@ class NetapiSSHClientTest(SSHCase):
+ "roster": "cache",
+ "client": "ssh",
+ "tgt": "root|id>{} #@127.0.0.1".format(path),
+- "roster_file": self.roster_file,
+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
-+ "rosters": "/",
-+ "eauth": "xx",
-+ }
-+ with self.assertRaises(salt.exceptions.EauthAuthenticationError):
-+ ret = self.netapi.run(low)
-+
-+ def test_ssh_auth_valid(self):
-+ """
-+ CVE-2020-25592 - Valid eauth works as expected.
-+ """
-+ low = {
-+ "client": "ssh",
-+ "tgt": "localhost",
-+ "fun": "test.ping",
-+ "roster_file": "roster",
-+ "rosters": [self.rosters],
-+ "ssh_priv": self.priv_file,
-+ "eauth": "pam",
-+ "username": "saltdev",
-+ "password": "saltdev",
-+ }
-+ ret = self.netapi.run(low)
-+ assert "localhost" in ret
-+ assert ret["localhost"]["return"] is True
-+
-+ def test_ssh_auth_invalid(self):
-+ """
-+ CVE-2020-25592 - Wrong password raises exception.
-+ """
-+ low = {
-+ "client": "ssh",
-+ "tgt": "localhost",
-+ "fun": "test.ping",
-+ "roster_file": "roster",
-+ "rosters": [self.rosters],
-+ "ssh_priv": self.priv_file,
-+ "eauth": "pam",
-+ "username": "saltdev",
-+ "password": "notvalidpassword",
-+ }
-+ with self.assertRaises(salt.exceptions.EauthAuthenticationError):
-+ ret = self.netapi.run(low)
-+
-+ def test_ssh_auth_invalid_acl(self):
-+ """
-+ CVE-2020-25592 - Eauth ACL enforced.
-+ """
-+ low = {
-+ "client": "ssh",
-+ "tgt": "localhost",
-+ "fun": "at.at",
-+ "args": ["12:05am", "echo foo"],
-+ "roster_file": "roster",
-+ "rosters": [self.rosters],
-+ "ssh_priv": self.priv_file,
-+ "eauth": "pam",
-+ "username": "saltdev",
-+ "password": "notvalidpassword",
-+ }
-+ with self.assertRaises(salt.exceptions.EauthAuthenticationError):
-+ ret = self.netapi.run(low)
-+
-+ def test_ssh_auth_token(self):
-+ """
-+ CVE-2020-25592 - Eauth tokens work as expected.
-+ """
-+ low = {
-+ "eauth": "pam",
-+ "username": "saltdev",
-+ "password": "saltdev",
-+ }
-+ ret = self.netapi.loadauth.mk_token(low)
-+ assert "token" in ret and ret["token"]
-+ low = {
-+ "client": "ssh",
-+ "tgt": "localhost",
-+ "fun": "test.ping",
-+ "roster_file": "roster",
-+ "rosters": [self.rosters],
-+ "ssh_priv": self.priv_file,
-+ "token": ret["token"],
-+ }
-+ ret = self.netapi.run(low)
-+ assert "localhost" in ret
-+ assert ret["localhost"]["return"] is True
+ "rosters": "/",
+ "fun": "test.ping",
+ "eauth": "auto",
+@@ -355,7 +362,7 @@ class NetapiSSHClientTest(SSHCase):
+ "eauth": "auto",
+ "username": "saltdev_auto",
+ "password": "saltdev",
+- "roster_file": self.roster_file,
++ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
+ "rosters": "/",
+ "ssh_options": ["|id>{} #".format(path), "lol"],
+ }
--
-2.28.0
+2.29.2
diff --git a/fix-failing-unit-tests-for-batch-async.patch b/fix-failing-unit-tests-for-batch-async.patch
index 852eddb..c5246fe 100644
--- a/fix-failing-unit-tests-for-batch-async.patch
+++ b/fix-failing-unit-tests-for-batch-async.patch
@@ -1,4 +1,4 @@
-From e6f6b38c75027c4c4f6395117b734dce6fb7433e Mon Sep 17 00:00:00 2001
+From 3b96edd8d23c65c6788a9980114a7e1c220c9640 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Fri, 4 Oct 2019 15:00:50 +0100
@@ -6,49 +6,49 @@ Subject: [PATCH] Fix failing unit tests for batch async
---
salt/cli/batch_async.py | 2 +-
- tests/unit/cli/test_batch_async.py | 57 ++++++++++++++++++++++----------------
- 2 files changed, 34 insertions(+), 25 deletions(-)
+ tests/unit/cli/test_batch_async.py | 66 +++++++++++++++++-------------
+ 2 files changed, 39 insertions(+), 29 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index f9e736f804..6d0dca1da5 100644
+index 89405ba917..b2d04f9d4d 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -88,7 +88,7 @@ class BatchAsync(object):
- io_loop=ioloop,
- keep_loop=True)
+@@ -91,7 +91,7 @@ class BatchAsync:
+ keep_loop=True,
+ )
self.scheduled = False
- self.patterns = {}
+ self.patterns = set()
def __set_event_handler(self):
- ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid)
+ ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
-index 441f9c58b9..12dfe543bc 100644
+index 66332a548a..c18b42be57 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
-@@ -68,8 +68,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -61,8 +61,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
ret = self.batch.start()
# assert start_batch is called later with batch_presence_ping_timeout as param
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
-- (self.batch.batch_presence_ping_timeout, self.batch.start_batch))
+- (self.batch.batch_presence_ping_timeout, self.batch.start_batch),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
-+ (self.batch.start_batch,))
++ (self.batch.start_batch,),
+ )
# assert test.ping called
self.assertEqual(
- self.batch.local.run_job_async.call_args[0],
-@@ -88,8 +88,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -81,8 +81,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
ret = self.batch.start()
# assert start_batch is called later with gather_job_timeout as param
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
-- (self.batch.opts['gather_job_timeout'], self.batch.start_batch))
+- (self.batch.opts["gather_job_timeout"], self.batch.start_batch),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
-+ (self.batch.start_batch,))
++ (self.batch.start_batch,),
+ )
def test_batch_fire_start_event(self):
- self.batch.minions = set(['foo', 'bar'])
-@@ -113,12 +113,11 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -107,12 +107,11 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
def test_start_batch_calls_next(self):
self.batch.run_next = MagicMock(return_value=MagicMock())
self.batch.event = MagicMock()
@@ -59,127 +59,138 @@ index 441f9c58b9..12dfe543bc 100644
self.assertEqual(self.batch.initialized, True)
- self.assertEqual(len(self.batch.run_next.mock_calls), 1)
+ self.assertEqual(
-+ self.batch.event.io_loop.spawn_callback.call_args[0],
-+ (self.batch.run_next,))
++ self.batch.event.io_loop.spawn_callback.call_args[0], (self.batch.run_next,)
++ )
def test_batch_fire_done_event(self):
- self.batch.targeted_minions = {'foo', 'baz', 'bar'}
-@@ -154,14 +153,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.batch.targeted_minions = {"foo", "baz", "bar"}
+@@ -147,14 +146,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
- future.set_result({'minions': ['foo', 'bar']})
+ future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
- ret = self.batch.run_next().result()
+ self.batch.run_next()
self.assertEqual(
self.batch.local.run_job_async.call_args[0],
- ({'foo', 'bar'}, 'my.fun', [], 'list')
+ ({"foo", "bar"}, "my.fun", [], "list"),
)
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
-- (self.batch.opts['timeout'], self.batch.find_job, {'foo', 'bar'})
+- (self.batch.opts["timeout"], self.batch.find_job, {"foo", "bar"}),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
-+ (self.batch.find_job, {'foo', 'bar'})
++ (self.batch.find_job, {"foo", "bar"}),
)
- self.assertEqual(self.batch.active, {'bar', 'foo'})
+ self.assertEqual(self.batch.active, {"bar", "foo"})
-@@ -252,13 +251,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -249,15 +248,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch.active, set())
- self.assertEqual(self.batch.done_minions, {'foo'})
+ self.assertEqual(self.batch.done_minions, {"foo"})
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
-- (self.batch.batch_delay, self.batch.run_next))
+- (self.batch.batch_delay, self.batch.run_next),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
-+ (self.batch.schedule_next,))
++ (self.batch.schedule_next,),
+ )
def test_batch__event_handler_find_job_return(self):
self.batch.event = MagicMock(
-- unpack=MagicMock(return_value=('salt/job/1236/ret/foo', {'id': 'foo'})))
-+ unpack=MagicMock(return_value=('salt/job/1236/ret/foo', {'id': 'foo', 'return': 'deadbeaf'})))
+- unpack=MagicMock(return_value=("salt/job/1236/ret/foo", {"id": "foo"}))
++ unpack=MagicMock(
++ return_value=(
++ "salt/job/1236/ret/foo",
++ {"id": "foo", "return": "deadbeaf"},
++ )
++ )
+ )
self.batch.start()
-+ self.batch.patterns.add(('salt/job/1236/ret/*', 'find_job_return'))
++ self.batch.patterns.add(("salt/job/1236/ret/*", "find_job_return"))
self.batch._BatchAsync__event_handler(MagicMock())
- self.assertEqual(self.batch.find_job_returned, {'foo'})
+ self.assertEqual(self.batch.find_job_returned, {"foo"})
-@@ -275,10 +275,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -274,14 +279,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
-+ self.batch.minions = set(['foo', 'bar'])
++ self.batch.minions = {"foo", "bar"}
+ self.batch.jid_gen = MagicMock(return_value="1234")
+ tornado.gen.sleep = MagicMock(return_value=future)
- self.batch.find_job({'foo', 'bar'})
+ self.batch.find_job({"foo", "bar"})
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
-- (self.batch.opts['gather_job_timeout'], self.batch.check_find_job, {'foo', 'bar'})
+- (
+- self.batch.opts["gather_job_timeout"],
+- self.batch.check_find_job,
+- {"foo", "bar"},
+- ),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
-+ (self.batch.check_find_job, {'foo', 'bar'}, "1234")
++ (self.batch.check_find_job, {"foo", "bar"}, "1234"),
)
@tornado.testing.gen_test
-@@ -288,17 +291,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -291,17 +295,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
-+ self.batch.minions = set(['foo', 'bar'])
++ self.batch.minions = {"foo", "bar"}
+ self.batch.jid_gen = MagicMock(return_value="1234")
+ tornado.gen.sleep = MagicMock(return_value=future)
- self.batch.find_job({'foo', 'bar'})
+ self.batch.find_job({"foo", "bar"})
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
-- (self.batch.opts['gather_job_timeout'], self.batch.check_find_job, {'foo'})
+- (self.batch.opts["gather_job_timeout"], self.batch.check_find_job, {"foo"}),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
-+ (self.batch.check_find_job, {'foo'}, "1234")
++ (self.batch.check_find_job, {"foo"}, "1234"),
)
def test_batch_check_find_job_did_not_return(self):
self.batch.event = MagicMock()
- self.batch.active = {'foo'}
+ self.batch.active = {"foo"}
self.batch.find_job_returned = set()
-- self.batch.check_find_job({'foo'})
-+ self.batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return') }
-+ self.batch.check_find_job({'foo'}, jid="1234")
+- self.batch.check_find_job({"foo"})
++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ self.batch.check_find_job({"foo"}, jid="1234")
self.assertEqual(self.batch.find_job_returned, set())
self.assertEqual(self.batch.active, set())
self.assertEqual(len(self.batch.event.io_loop.add_callback.mock_calls), 0)
-@@ -306,9 +313,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -309,9 +317,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
def test_batch_check_find_job_did_return(self):
self.batch.event = MagicMock()
- self.batch.find_job_returned = {'foo'}
-- self.batch.check_find_job({'foo'})
-+ self.batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return') }
-+ self.batch.check_find_job({'foo'}, jid="1234")
+ self.batch.find_job_returned = {"foo"}
+- self.batch.check_find_job({"foo"})
++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ self.batch.check_find_job({"foo"}, jid="1234")
self.assertEqual(
- self.batch.event.io_loop.add_callback.call_args[0],
+ self.batch.event.io_loop.spawn_callback.call_args[0],
- (self.batch.find_job, {'foo'})
+ (self.batch.find_job, {"foo"}),
)
-@@ -329,7 +337,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -332,7 +341,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
# both not yet done but only 'foo' responded to find_job
- not_done = {'foo', 'bar'}
+ not_done = {"foo", "bar"}
- self.batch.check_find_job(not_done)
-+ self.batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return') }
++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
+ self.batch.check_find_job(not_done, jid="1234")
# assert 'bar' removed from active
- self.assertEqual(self.batch.active, {'foo'})
-@@ -339,7 +348,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.assertEqual(self.batch.active, {"foo"})
+@@ -342,7 +352,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
# assert 'find_job' schedueled again only for 'foo'
self.assertEqual(
- self.batch.event.io_loop.add_callback.call_args[0],
+ self.batch.event.io_loop.spawn_callback.call_args[0],
- (self.batch.find_job, {'foo'})
+ (self.batch.find_job, {"foo"}),
)
-@@ -347,4 +356,4 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+@@ -350,4 +360,4 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch.event = MagicMock()
self.batch.scheduled = True
self.batch.schedule_next()
- self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0)
+ self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0)
--
-2.16.4
+2.29.2
diff --git a/fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch b/fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch
deleted file mode 100644
index 9bde3a4..0000000
--- a/fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch
+++ /dev/null
@@ -1,738 +0,0 @@
-From c3d8ef9d1387ac3d69fbbd1f8042bf89ba87821a Mon Sep 17 00:00:00 2001
-From: Victor Zhestkov
-Date: Tue, 13 Oct 2020 09:28:39 +0300
-Subject: [PATCH] Fix for bsc#1102248 - psutil is broken and so Process
- is not working on Python 3 as it is implemented
-
----
- salt/modules/ps.py | 268 ++++++++++++++++++++++++++-------------------
- 1 file changed, 157 insertions(+), 111 deletions(-)
-
-diff --git a/salt/modules/ps.py b/salt/modules/ps.py
-index bb37873f48..9925e29968 100644
---- a/salt/modules/ps.py
-+++ b/salt/modules/ps.py
-@@ -1,31 +1,33 @@
- # -*- coding: utf-8 -*-
--'''
-+"""
- A salt interface to psutil, a system and process library.
- See http://code.google.com/p/psutil.
-
- :depends: - psutil Python module, version 0.3.0 or later
- - python-utmp package (optional)
--'''
-+"""
-
- # Import python libs
--from __future__ import absolute_import, unicode_literals, print_function
--import time
-+from __future__ import absolute_import, print_function, unicode_literals
-+
- import datetime
- import re
-+import time
-
- # Import salt libs
- import salt.utils.data
--from salt.exceptions import SaltInvocationError, CommandExecutionError
-
- # Import third party libs
- import salt.utils.decorators.path
-+from salt.exceptions import CommandExecutionError, SaltInvocationError
- from salt.ext import six
-+
- # pylint: disable=import-error
- try:
- import salt.utils.psutil_compat as psutil
-
- HAS_PSUTIL = True
-- PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0)
-+ PSUTIL2 = getattr(psutil, "version_info", ()) >= (2, 0)
- except ImportError:
- HAS_PSUTIL = False
- # pylint: enable=import-error
-@@ -33,7 +35,10 @@ except ImportError:
-
- def __virtual__():
- if not HAS_PSUTIL:
-- return False, 'The ps module cannot be loaded: python module psutil not installed.'
-+ return (
-+ False,
-+ "The ps module cannot be loaded: python module psutil not installed.",
-+ )
-
- # Functions and attributes used in this execution module seem to have been
- # added as of psutil 0.3.0, from an inspection of the source code. Only
-@@ -44,15 +49,20 @@ def __virtual__():
- # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.).
- if psutil.version_info >= (0, 3, 0):
- return True
-- return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info))
-+ return (
-+ False,
-+ "The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0".format(
-+ psutil.version_info
-+ ),
-+ )
-
-
- def _get_proc_cmdline(proc):
-- '''
-+ """
- Returns the cmdline of a Process instance.
-
- It's backward compatible with < 2.0 versions of psutil.
-- '''
-+ """
- try:
- return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline)
- except (psutil.NoSuchProcess, psutil.AccessDenied):
-@@ -60,23 +70,25 @@ def _get_proc_cmdline(proc):
-
-
- def _get_proc_create_time(proc):
-- '''
-+ """
- Returns the create_time of a Process instance.
-
- It's backward compatible with < 2.0 versions of psutil.
-- '''
-+ """
- try:
-- return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time)
-+ return salt.utils.data.decode(
-+ proc.create_time() if PSUTIL2 else proc.create_time
-+ )
- except (psutil.NoSuchProcess, psutil.AccessDenied):
- return None
-
-
- def _get_proc_name(proc):
-- '''
-+ """
- Returns the name of a Process instance.
-
- It's backward compatible with < 2.0 versions of psutil.
-- '''
-+ """
- try:
- return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name)
- except (psutil.NoSuchProcess, psutil.AccessDenied):
-@@ -84,11 +96,11 @@ def _get_proc_name(proc):
-
-
- def _get_proc_status(proc):
-- '''
-+ """
- Returns the status of a Process instance.
-
- It's backward compatible with < 2.0 versions of psutil.
-- '''
-+ """
- try:
- return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status)
- except (psutil.NoSuchProcess, psutil.AccessDenied):
-@@ -96,11 +108,11 @@ def _get_proc_status(proc):
-
-
- def _get_proc_username(proc):
-- '''
-+ """
- Returns the username of a Process instance.
-
- It's backward compatible with < 2.0 versions of psutil.
-- '''
-+ """
- try:
- return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username)
- except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError):
-@@ -108,16 +120,16 @@ def _get_proc_username(proc):
-
-
- def _get_proc_pid(proc):
-- '''
-+ """
- Returns the pid of a Process instance.
-
- It's backward compatible with < 2.0 versions of psutil.
-- '''
-+ """
- return proc.pid
-
-
- def top(num_processes=5, interval=3):
-- '''
-+ """
- Return a list of top CPU consuming processes during the interval.
- num_processes = return the top N CPU consuming processes
- interval = the number of seconds to sample CPU usage over
-@@ -129,57 +141,63 @@ def top(num_processes=5, interval=3):
- salt '*' ps.top
-
- salt '*' ps.top 5 10
-- '''
-+ """
- result = []
- start_usage = {}
- for pid in psutil.pids():
- try:
- process = psutil.Process(pid)
-- user, system = process.cpu_times()
-- except ValueError:
-- user, system, _, _ = process.cpu_times()
- except psutil.NoSuchProcess:
- continue
-+ else:
-+ try:
-+ user, system = process.cpu_times()[:2]
-+ except psutil.ZombieProcess:
-+ user = system = 0.0
- start_usage[process] = user + system
- time.sleep(interval)
- usage = set()
- for process, start in six.iteritems(start_usage):
- try:
-- user, system = process.cpu_times()
-- except ValueError:
-- user, system, _, _ = process.cpu_times()
-+ user, system = process.cpu_times()[:2]
- except psutil.NoSuchProcess:
- continue
- now = user + system
- diff = now - start
- usage.add((diff, process))
-
-- for idx, (diff, process) in enumerate(reversed(sorted(usage))):
-- if num_processes and idx >= num_processes:
-- break
-- if len(_get_proc_cmdline(process)) == 0:
-- cmdline = _get_proc_name(process)
-- else:
-- cmdline = _get_proc_cmdline(process)
-- info = {'cmd': cmdline,
-- 'user': _get_proc_username(process),
-- 'status': _get_proc_status(process),
-- 'pid': _get_proc_pid(process),
-- 'create_time': _get_proc_create_time(process),
-- 'cpu': {},
-- 'mem': {},
-+ for diff, process in sorted(usage, key=lambda x: x[0], reverse=True):
-+ info = {
-+ "cmd": _get_proc_cmdline(process) or _get_proc_name(process),
-+ "user": _get_proc_username(process),
-+ "status": _get_proc_status(process),
-+ "pid": _get_proc_pid(process),
-+ "create_time": _get_proc_create_time(process),
-+ "cpu": {},
-+ "mem": {},
- }
-- for key, value in six.iteritems(process.cpu_times()._asdict()):
-- info['cpu'][key] = value
-- for key, value in six.iteritems(process.memory_info()._asdict()):
-- info['mem'][key] = value
-+ try:
-+ for key, value in six.iteritems(process.cpu_times()._asdict()):
-+ info["cpu"][key] = value
-+ for key, value in six.iteritems(process.memory_info()._asdict()):
-+ info["mem"][key] = value
-+ except psutil.NoSuchProcess:
-+ # Process ended since psutil.pids() was run earlier in this
-+ # function. Ignore this process and do not include this process in
-+ # the return data.
-+ continue
-+
- result.append(info)
-
-+ # Stop gathering process info since we've reached the desired number
-+ if len(result) >= num_processes:
-+ break
-+
- return result
-
-
- def get_pid_list():
-- '''
-+ """
- Return a list of process ids (PIDs) for all running processes.
-
- CLI Example:
-@@ -187,12 +205,12 @@ def get_pid_list():
- .. code-block:: bash
-
- salt '*' ps.get_pid_list
-- '''
-+ """
- return psutil.pids()
-
-
- def proc_info(pid, attrs=None):
-- '''
-+ """
- Return a dictionary of information for a process id (PID).
-
- CLI Example:
-@@ -209,7 +227,7 @@ def proc_info(pid, attrs=None):
- Optional list of desired process attributes. The list of possible
- attributes can be found here:
- http://pythonhosted.org/psutil/#psutil.Process
-- '''
-+ """
- try:
- proc = psutil.Process(pid)
- return proc.as_dict(attrs)
-@@ -218,7 +236,7 @@ def proc_info(pid, attrs=None):
-
-
- def kill_pid(pid, signal=15):
-- '''
-+ """
- Kill a process by PID.
-
- .. code-block:: bash
-@@ -239,7 +257,7 @@ def kill_pid(pid, signal=15):
- .. code-block:: bash
-
- salt 'minion' ps.kill_pid 2000 signal=9
-- '''
-+ """
- try:
- psutil.Process(pid).send_signal(signal)
- return True
-@@ -248,7 +266,7 @@ def kill_pid(pid, signal=15):
-
-
- def pkill(pattern, user=None, signal=15, full=False):
-- '''
-+ """
- Kill processes matching a pattern.
-
- .. code-block:: bash
-@@ -283,12 +301,15 @@ def pkill(pattern, user=None, signal=15, full=False):
- .. code-block:: bash
-
- salt '*' ps.pkill bash signal=9 user=tom
-- '''
-+ """
-
- killed = []
- for proc in psutil.process_iter():
-- name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \
-+ name_match = (
-+ pattern in " ".join(_get_proc_cmdline(proc))
-+ if full
- else pattern in _get_proc_name(proc)
-+ )
- user_match = True if user is None else user == _get_proc_username(proc)
- if name_match and user_match:
- try:
-@@ -299,11 +320,11 @@ def pkill(pattern, user=None, signal=15, full=False):
- if not killed:
- return None
- else:
-- return {'killed': killed}
-+ return {"killed": killed}
-
-
--def pgrep(pattern, user=None, full=False):
-- '''
-+def pgrep(pattern, user=None, full=False, pattern_is_regex=False):
-+ """
- Return the pids for processes matching a pattern.
-
- If full is true, the full command line is searched for a match,
-@@ -323,6 +344,12 @@ def pgrep(pattern, user=None, full=False):
- A boolean value indicating whether only the name of the command or
- the full command line should be matched against the pattern.
-
-+ pattern_is_regex
-+ This flag enables ps.pgrep to mirror the regex search functionality
-+ found in the pgrep command line utility.
-+
-+ .. versionadded:: 3001
-+
- **Examples:**
-
- Find all httpd processes on all 'www' minions:
-@@ -336,20 +363,34 @@ def pgrep(pattern, user=None, full=False):
- .. code-block:: bash
-
- salt '*' ps.pgrep bash user=tom
-- '''
-+ """
-
- procs = []
-+
-+ if pattern_is_regex:
-+ pattern = re.compile(str(pattern))
-+
- for proc in psutil.process_iter():
-- name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \
-- else pattern in _get_proc_name(proc)
-+ if full:
-+ process_line = " ".join(_get_proc_cmdline(proc))
-+ else:
-+ process_line = _get_proc_name(proc)
-+
-+ if pattern_is_regex:
-+ name_match = re.search(pattern, process_line)
-+ else:
-+ name_match = pattern in process_line
-+
- user_match = True if user is None else user == _get_proc_username(proc)
-+
- if name_match and user_match:
- procs.append(_get_proc_pid(proc))
-+
- return procs or None
-
-
- def cpu_percent(interval=0.1, per_cpu=False):
-- '''
-+ """
- Return the percent of time the CPU is busy.
-
- interval
-@@ -363,7 +404,7 @@ def cpu_percent(interval=0.1, per_cpu=False):
- .. code-block:: bash
-
- salt '*' ps.cpu_percent
-- '''
-+ """
- if per_cpu:
- result = list(psutil.cpu_percent(interval, True))
- else:
-@@ -372,7 +413,7 @@ def cpu_percent(interval=0.1, per_cpu=False):
-
-
- def cpu_times(per_cpu=False):
-- '''
-+ """
- Return the percent of time the CPU spends in each state,
- e.g. user, system, idle, nice, iowait, irq, softirq.
-
-@@ -385,7 +426,7 @@ def cpu_times(per_cpu=False):
- .. code-block:: bash
-
- salt '*' ps.cpu_times
-- '''
-+ """
- if per_cpu:
- result = [dict(times._asdict()) for times in psutil.cpu_times(True)]
- else:
-@@ -394,7 +435,7 @@ def cpu_times(per_cpu=False):
-
-
- def virtual_memory():
-- '''
-+ """
- .. versionadded:: 2014.7.0
-
- Return a dict that describes statistics about system memory usage.
-@@ -408,15 +449,15 @@ def virtual_memory():
- .. code-block:: bash
-
- salt '*' ps.virtual_memory
-- '''
-+ """
- if psutil.version_info < (0, 6, 0):
-- msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
-+ msg = "virtual_memory is only available in psutil 0.6.0 or greater"
- raise CommandExecutionError(msg)
- return dict(psutil.virtual_memory()._asdict())
-
-
- def swap_memory():
-- '''
-+ """
- .. versionadded:: 2014.7.0
-
- Return a dict that describes swap memory statistics.
-@@ -430,15 +471,15 @@ def swap_memory():
- .. code-block:: bash
-
- salt '*' ps.swap_memory
-- '''
-+ """
- if psutil.version_info < (0, 6, 0):
-- msg = 'swap_memory is only available in psutil 0.6.0 or greater'
-+ msg = "swap_memory is only available in psutil 0.6.0 or greater"
- raise CommandExecutionError(msg)
- return dict(psutil.swap_memory()._asdict())
-
-
- def disk_partitions(all=False):
-- '''
-+ """
- Return a list of disk partitions and their device, mount point, and
- filesystem type.
-
-@@ -451,14 +492,13 @@ def disk_partitions(all=False):
- .. code-block:: bash
-
- salt '*' ps.disk_partitions
-- '''
-- result = [dict(partition._asdict()) for partition in
-- psutil.disk_partitions(all)]
-+ """
-+ result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)]
- return result
-
-
- def disk_usage(path):
-- '''
-+ """
- Given a path, return a dict listing the total available space as well as
- the free space, and used space.
-
-@@ -467,12 +507,12 @@ def disk_usage(path):
- .. code-block:: bash
-
- salt '*' ps.disk_usage /home
-- '''
-+ """
- return dict(psutil.disk_usage(path)._asdict())
-
-
- def disk_partition_usage(all=False):
-- '''
-+ """
- Return a list of disk partitions plus the mount point, filesystem and usage
- statistics.
-
-@@ -481,15 +521,15 @@ def disk_partition_usage(all=False):
- .. code-block:: bash
-
- salt '*' ps.disk_partition_usage
-- '''
-+ """
- result = disk_partitions(all)
- for partition in result:
-- partition.update(disk_usage(partition['mountpoint']))
-+ partition.update(disk_usage(partition["mountpoint"]))
- return result
-
-
- def total_physical_memory():
-- '''
-+ """
- Return the total number of bytes of physical memory.
-
- CLI Example:
-@@ -497,9 +537,9 @@ def total_physical_memory():
- .. code-block:: bash
-
- salt '*' ps.total_physical_memory
-- '''
-+ """
- if psutil.version_info < (0, 6, 0):
-- msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
-+ msg = "virtual_memory is only available in psutil 0.6.0 or greater"
- raise CommandExecutionError(msg)
- try:
- return psutil.virtual_memory().total
-@@ -510,7 +550,7 @@ def total_physical_memory():
-
-
- def num_cpus():
-- '''
-+ """
- Return the number of CPUs.
-
- CLI Example:
-@@ -518,7 +558,7 @@ def num_cpus():
- .. code-block:: bash
-
- salt '*' ps.num_cpus
-- '''
-+ """
- try:
- return psutil.cpu_count()
- except AttributeError:
-@@ -528,7 +568,7 @@ def num_cpus():
-
-
- def boot_time(time_format=None):
-- '''
-+ """
- Return the boot time in number of seconds since the epoch began.
-
- CLI Example:
-@@ -545,7 +585,7 @@ def boot_time(time_format=None):
- .. code-block:: bash
-
- salt '*' ps.boot_time
-- '''
-+ """
- try:
- b_time = int(psutil.boot_time())
- except AttributeError:
-@@ -558,12 +598,12 @@ def boot_time(time_format=None):
- try:
- return b_time.strftime(time_format)
- except TypeError as exc:
-- raise SaltInvocationError('Invalid format string: {0}'.format(exc))
-+ raise SaltInvocationError("Invalid format string: {0}".format(exc))
- return b_time
-
-
- def network_io_counters(interface=None):
-- '''
-+ """
- Return network I/O statistics.
-
- CLI Example:
-@@ -573,7 +613,7 @@ def network_io_counters(interface=None):
- salt '*' ps.network_io_counters
-
- salt '*' ps.network_io_counters interface=eth0
-- '''
-+ """
- if not interface:
- return dict(psutil.net_io_counters()._asdict())
- else:
-@@ -585,7 +625,7 @@ def network_io_counters(interface=None):
-
-
- def disk_io_counters(device=None):
-- '''
-+ """
- Return disk I/O statistics.
-
- CLI Example:
-@@ -595,7 +635,7 @@ def disk_io_counters(device=None):
- salt '*' ps.disk_io_counters
-
- salt '*' ps.disk_io_counters device=sda1
-- '''
-+ """
- if not device:
- return dict(psutil.disk_io_counters()._asdict())
- else:
-@@ -607,7 +647,7 @@ def disk_io_counters(device=None):
-
-
- def get_users():
-- '''
-+ """
- Return logged-in users.
-
- CLI Example:
-@@ -615,7 +655,7 @@ def get_users():
- .. code-block:: bash
-
- salt '*' ps.get_users
-- '''
-+ """
- try:
- recs = psutil.users()
- return [dict(x._asdict()) for x in recs]
-@@ -634,14 +674,20 @@ def get_users():
- started = rec[8]
- if isinstance(started, tuple):
- started = started[0]
-- result.append({'name': rec[4], 'terminal': rec[2],
-- 'started': started, 'host': rec[5]})
-+ result.append(
-+ {
-+ "name": rec[4],
-+ "terminal": rec[2],
-+ "started": started,
-+ "host": rec[5],
-+ }
-+ )
- except ImportError:
- return False
-
-
- def lsof(name):
-- '''
-+ """
- Retrieve the lsof information of the given process name.
-
- CLI Example:
-@@ -649,17 +695,17 @@ def lsof(name):
- .. code-block:: bash
-
- salt '*' ps.lsof apache2
-- '''
-+ """
- sanitize_name = six.text_type(name)
-- lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name)
-+ lsof_infos = __salt__["cmd.run"]("lsof -c " + sanitize_name)
- ret = []
- ret.extend([sanitize_name, lsof_infos])
- return ret
-
-
--@salt.utils.decorators.path.which('netstat')
-+@salt.utils.decorators.path.which("netstat")
- def netstat(name):
-- '''
-+ """
- Retrieve the netstat information of the given process name.
-
- CLI Example:
-@@ -667,9 +713,9 @@ def netstat(name):
- .. code-block:: bash
-
- salt '*' ps.netstat apache2
-- '''
-+ """
- sanitize_name = six.text_type(name)
-- netstat_infos = __salt__['cmd.run']("netstat -nap")
-+ netstat_infos = __salt__["cmd.run"]("netstat -nap")
- found_infos = []
- ret = []
- for info in netstat_infos.splitlines():
-@@ -679,9 +725,9 @@ def netstat(name):
- return ret
-
-
--@salt.utils.decorators.path.which('ss')
-+@salt.utils.decorators.path.which("ss")
- def ss(name):
-- '''
-+ """
- Retrieve the ss information of the given process name.
-
- CLI Example:
-@@ -692,9 +738,9 @@ def ss(name):
-
- .. versionadded:: 2016.11.6
-
-- '''
-+ """
- sanitize_name = six.text_type(name)
-- ss_infos = __salt__['cmd.run']("ss -neap")
-+ ss_infos = __salt__["cmd.run"]("ss -neap")
- found_infos = []
- ret = []
- for info in ss_infos.splitlines():
-@@ -705,7 +751,7 @@ def ss(name):
-
-
- def psaux(name):
-- '''
-+ """
- Retrieve information corresponding to a "ps aux" filtered
- with the given pattern. It could be just a name or a regular
- expression (using python search from "re" module).
-@@ -715,11 +761,11 @@ def psaux(name):
- .. code-block:: bash
-
- salt '*' ps.psaux www-data.+apache2
-- '''
-+ """
- sanitize_name = six.text_type(name)
- pattern = re.compile(sanitize_name)
- salt_exception_pattern = re.compile("salt.+ps.psaux.+")
-- ps_aux = __salt__['cmd.run']("ps aux")
-+ ps_aux = __salt__["cmd.run"]("ps aux")
- found_infos = []
- ret = []
- nb_lines = 0
---
-2.29.1
-
-
diff --git a/fix-for-log-checking-in-x509-test.patch b/fix-for-log-checking-in-x509-test.patch
index 567ba7b..53679d8 100644
--- a/fix-for-log-checking-in-x509-test.patch
+++ b/fix-for-log-checking-in-x509-test.patch
@@ -1,4 +1,4 @@
-From e0ca0d0d2a62f18e2712223e130af5faa8e0fe05 Mon Sep 17 00:00:00 2001
+From b4f54187ae7d231250f72244ffd874cc2c846150 Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Thu, 28 Nov 2019 15:23:36 +0100
Subject: [PATCH] Fix for log checking in x509 test
@@ -9,10 +9,10 @@ We are logging in debug and not in trace mode here.
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/unit/modules/test_x509.py b/tests/unit/modules/test_x509.py
-index 624a927bec..976af634c7 100644
+index 40aea12272..e7503395eb 100644
--- a/tests/unit/modules/test_x509.py
+++ b/tests/unit/modules/test_x509.py
-@@ -68,9 +68,9 @@ class X509TestCase(TestCase, LoaderModuleMockMixin):
+@@ -127,9 +127,9 @@ class X509TestCase(TestCase, LoaderModuleMockMixin):
subj = FakeSubject()
x509._parse_subject(subj)
@@ -23,9 +23,9 @@ index 624a927bec..976af634c7 100644
+ assert x509.log.debug.call_args[0][1] == list(subj.nid.keys())[0]
+ assert isinstance(x509.log.debug.call_args[0][2], TypeError)
- @skipIf(not HAS_M2CRYPTO, 'Skipping, M2Crypto is unavailble')
+ @skipIf(not HAS_M2CRYPTO, "Skipping, M2Crypto is unavailable")
def test_get_pem_entry(self):
--
-2.16.4
+2.29.2
diff --git a/fix-for-return-value-ret-vs-return-in-batch-mode.patch b/fix-for-return-value-ret-vs-return-in-batch-mode.patch
deleted file mode 100644
index 04e79d5..0000000
--- a/fix-for-return-value-ret-vs-return-in-batch-mode.patch
+++ /dev/null
@@ -1,113 +0,0 @@
-From 0c988e1db59a255b2f707c4e626cec21ff06d7a3 Mon Sep 17 00:00:00 2001
-From: Jochen Breuer
-Date: Thu, 9 Apr 2020 17:12:54 +0200
-Subject: [PATCH] Fix for return value ret vs return in batch mode
-
-The least intrusive fix for ret vs return in batch mode.
----
- salt/cli/batch.py | 16 ++++++----
- tests/unit/cli/test_batch.py | 62 ++++++++++++++++++++++++++++++++++++
- 2 files changed, 71 insertions(+), 7 deletions(-)
-
-diff --git a/salt/cli/batch.py b/salt/cli/batch.py
-index 10fc81a5f4..d5b8754ad7 100644
---- a/salt/cli/batch.py
-+++ b/salt/cli/batch.py
-@@ -234,14 +234,16 @@ class Batch(object):
- if not self.quiet:
- salt.utils.stringutils.print_cli('\nExecuting run on {0}\n'.format(sorted(next_)))
- # create a new iterator for this batch of minions
-+ return_value = self.opts.get("return", self.opts.get("ret", ""))
- new_iter = self.local.cmd_iter_no_block(
-- *args,
-- raw=self.opts.get('raw', False),
-- ret=self.opts.get('return', ''),
-- show_jid=show_jid,
-- verbose=show_verbose,
-- gather_job_timeout=self.opts['gather_job_timeout'],
-- **self.eauth)
-+ *args,
-+ raw=self.opts.get("raw", False),
-+ ret=return_value,
-+ show_jid=show_jid,
-+ verbose=show_verbose,
-+ gather_job_timeout=self.opts["gather_job_timeout"],
-+ **self.eauth
-+ )
- # add it to our iterators and to the minion_tracker
- iters.append(new_iter)
- minion_tracker[new_iter] = {}
-diff --git a/tests/unit/cli/test_batch.py b/tests/unit/cli/test_batch.py
-index acabbe51f5..d7411e8039 100644
---- a/tests/unit/cli/test_batch.py
-+++ b/tests/unit/cli/test_batch.py
-@@ -72,3 +72,65 @@ class BatchTestCase(TestCase):
- '''
- ret = Batch.get_bnum(self.batch)
- self.assertEqual(ret, None)
-+
-+ def test_return_value_in_run_for_ret(self):
-+ """
-+ cmd_iter_no_block should have been called with a return no matter if
-+ the return value was in ret or return.
-+ """
-+ self.batch.opts = {
-+ "batch": "100%",
-+ "timeout": 5,
-+ "fun": "test",
-+ "arg": "foo",
-+ "gather_job_timeout": 5,
-+ "ret": "my_return",
-+ }
-+ self.batch.minions = ["foo", "bar", "baz"]
-+ self.batch.local.cmd_iter_no_block = MagicMock(return_value=iter([]))
-+ ret = Batch.run(self.batch)
-+ # We need to fetch at least one object to trigger the relevant code path.
-+ x = next(ret)
-+ self.batch.local.cmd_iter_no_block.assert_called_with(
-+ ["baz", "bar", "foo"],
-+ "test",
-+ "foo",
-+ 5,
-+ "list",
-+ raw=False,
-+ ret="my_return",
-+ show_jid=False,
-+ verbose=False,
-+ gather_job_timeout=5,
-+ )
-+
-+ def test_return_value_in_run_for_return(self):
-+ """
-+ cmd_iter_no_block should have been called with a return no matter if
-+ the return value was in ret or return.
-+ """
-+ self.batch.opts = {
-+ "batch": "100%",
-+ "timeout": 5,
-+ "fun": "test",
-+ "arg": "foo",
-+ "gather_job_timeout": 5,
-+ "return": "my_return",
-+ }
-+ self.batch.minions = ["foo", "bar", "baz"]
-+ self.batch.local.cmd_iter_no_block = MagicMock(return_value=iter([]))
-+ ret = Batch.run(self.batch)
-+ # We need to fetch at least one object to trigger the relevant code path.
-+ x = next(ret)
-+ self.batch.local.cmd_iter_no_block.assert_called_with(
-+ ["baz", "bar", "foo"],
-+ "test",
-+ "foo",
-+ 5,
-+ "list",
-+ raw=False,
-+ ret="my_return",
-+ show_jid=False,
-+ verbose=False,
-+ gather_job_timeout=5,
-+ )
---
-2.26.1
-
-
diff --git a/fix-for-suse-expanded-support-detection.patch b/fix-for-suse-expanded-support-detection.patch
index 8a72ce9..e9349fd 100644
--- a/fix-for-suse-expanded-support-detection.patch
+++ b/fix-for-suse-expanded-support-detection.patch
@@ -1,4 +1,4 @@
-From 16d656744d2e7d915757d6f2ae26b57ad8230b0b Mon Sep 17 00:00:00 2001
+From 369a732537937dd6865152a87f04777539b27fcd Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Thu, 6 Sep 2018 17:15:18 +0200
Subject: [PATCH] Fix for SUSE Expanded Support detection
@@ -14,26 +14,26 @@ This change also adds a check for redhat-release and then marks the
1 file changed, 9 insertions(+)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 9b244def9c..2851809472 100644
+index 436c058eb6..00bd0565bf 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -1892,6 +1892,15 @@ def os_data():
- log.trace('Parsing distrib info from /etc/centos-release')
+@@ -1990,6 +1990,15 @@ def os_data():
+ log.trace("Parsing distrib info from /etc/centos-release")
# CentOS Linux
- grains['lsb_distrib_id'] = 'CentOS'
+ grains["lsb_distrib_id"] = "CentOS"
+ # Maybe CentOS Linux; could also be SUSE Expanded Support.
+ # SUSE ES has both, centos-release and redhat-release.
-+ if os.path.isfile('/etc/redhat-release'):
-+ with salt.utils.files.fopen('/etc/redhat-release') as ifile:
++ if os.path.isfile("/etc/redhat-release"):
++ with salt.utils.files.fopen("/etc/redhat-release") as ifile:
+ for line in ifile:
+ if "red hat enterprise linux server" in line.lower():
+ # This is a SUSE Expanded Support Rhel installation
-+ grains['lsb_distrib_id'] = 'RedHat'
++ grains["lsb_distrib_id"] = "RedHat"
+ break
- with salt.utils.files.fopen('/etc/centos-release') as ifile:
+ with salt.utils.files.fopen("/etc/centos-release") as ifile:
for line in ifile:
# Need to pull out the version and codename
--
-2.16.4
+2.29.2
diff --git a/fix-for-temp-folder-definition-in-loader-unit-test.patch b/fix-for-temp-folder-definition-in-loader-unit-test.patch
index 86fb7cd..548f3ff 100644
--- a/fix-for-temp-folder-definition-in-loader-unit-test.patch
+++ b/fix-for-temp-folder-definition-in-loader-unit-test.patch
@@ -1,4 +1,4 @@
-From dd01a0fc594f024eee2267bed2f698f5a6c729bf Mon Sep 17 00:00:00 2001
+From 33766e59bd53fac2c75e6ccfa1f363e2f7b1b65f Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Mon, 16 Mar 2020 15:25:42 +0100
Subject: [PATCH] Fix for temp folder definition in loader unit test
@@ -8,13 +8,13 @@ Subject: [PATCH] Fix for temp folder definition in loader unit test
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py
-index fe11cd0681..7e369f2c3b 100644
+index 863e2182b9..5b23ad83e3 100644
--- a/tests/unit/test_loader.py
+++ b/tests/unit/test_loader.py
-@@ -152,12 +152,12 @@ class LazyLoaderUtilsTest(TestCase):
+@@ -240,12 +240,12 @@ class LazyLoaderUtilsTest(TestCase):
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
- cls.opts['grains'] = salt.loader.grains(cls.opts)
+ cls.opts["grains"] = salt.loader.grains(cls.opts)
- if not os.path.isdir(TMP):
- os.makedirs(TMP)
+ if not os.path.isdir(RUNTIME_VARS.TMP):
@@ -24,19 +24,19 @@ index fe11cd0681..7e369f2c3b 100644
# Setup the module
- self.module_dir = tempfile.mkdtemp(dir=TMP)
+ self.module_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
- self.module_file = os.path.join(self.module_dir,
- '{}.py'.format(self.module_name))
- with salt.utils.files.fopen(self.module_file, 'w') as fh:
-@@ -165,7 +165,7 @@ class LazyLoaderUtilsTest(TestCase):
+ self.module_file = os.path.join(
+ self.module_dir, "{}.py".format(self.module_name)
+ )
+@@ -254,7 +254,7 @@ class LazyLoaderUtilsTest(TestCase):
fh.flush()
os.fsync(fh.fileno())
- self.utils_dir = tempfile.mkdtemp(dir=TMP)
+ self.utils_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
- self.utils_file = os.path.join(self.utils_dir,
- '{}.py'.format(self.utils_name))
- with salt.utils.files.fopen(self.utils_file, 'w') as fh:
+ self.utils_file = os.path.join(self.utils_dir, "{}.py".format(self.utils_name))
+ with salt.utils.files.fopen(self.utils_file, "w") as fh:
+ fh.write(salt.utils.stringutils.to_str(loader_template_utils))
--
-2.16.4
+2.29.2
diff --git a/fix-git_pillar-merging-across-multiple-__env__-repos.patch b/fix-git_pillar-merging-across-multiple-__env__-repos.patch
index 9dca7fb..046b837 100644
--- a/fix-git_pillar-merging-across-multiple-__env__-repos.patch
+++ b/fix-git_pillar-merging-across-multiple-__env__-repos.patch
@@ -1,4 +1,4 @@
-From 900d63bc5e85496e16373025457561b405f2329f Mon Sep 17 00:00:00 2001
+From f5c9527aeee190a66a908037770c80a75e911d8c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Tue, 6 Nov 2018 16:38:54 +0000
@@ -11,37 +11,42 @@ Test git ext_pillar across multiple repos using __env__
Remove unicode references
---
- tests/integration/pillar/test_git_pillar.py | 45 +++++++++++++++++++++++++++++
- 1 file changed, 45 insertions(+)
+ tests/integration/pillar/test_git_pillar.py | 55 +++++++++++++++++++++
+ 1 file changed, 55 insertions(+)
diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py
-index 2e549f3948..d417a7ebc3 100644
+index c0362127f6..979dfebb94 100644
--- a/tests/integration/pillar/test_git_pillar.py
+++ b/tests/integration/pillar/test_git_pillar.py
-@@ -1382,6 +1382,51 @@ class TestPygit2SSH(GitPillarSSHTestBase):
- 'nested_dict': {'master': True}}}
+@@ -1600,6 +1600,61 @@ class TestPygit2SSH(GitPillarSSHTestBase):
+ },
)
+
+@skipIf(NO_MOCK, NO_MOCK_REASON)
-+@skipIf(_windows_or_mac(), 'minion is windows or mac')
++@skipIf(_windows_or_mac(), "minion is windows or mac")
+@skip_if_not_root
-+@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER))
-+@skipIf(not HAS_NGINX, 'nginx not present')
-+@skipIf(not HAS_VIRTUALENV, 'virtualenv not present')
++@skipIf(
++ not HAS_PYGIT2,
++ "pygit2 >= {} and libgit2 >= {} required".format(PYGIT2_MINVER, LIBGIT2_MINVER),
++)
++@skipIf(not HAS_NGINX, "nginx not present")
++@skipIf(not HAS_VIRTUALENV, "virtualenv not present")
+class TestPygit2HTTP(GitPillarHTTPTestBase):
-+ '''
++ """
+ Test git_pillar with pygit2 using SSH authentication
-+ '''
++ """
++
+ def test_single_source(self):
-+ '''
++ """
+ Test with git_pillar_includes enabled and using "__env__" as the branch
+ name for the configured repositories.
+ The "gitinfo" repository contains top.sls file with a local reference
+ and also referencing external "nowhere.foo" which is provided by "webinfo"
+ repository mounted as "nowhere".
-+ '''
-+ ret = self.get_pillar('''\
++ """
++ ret = self.get_pillar(
++ """\
+ file_ignore_regex: []
+ file_ignore_glob: []
+ git_pillar_provider: pygit2
@@ -56,21 +61,26 @@ index 2e549f3948..d417a7ebc3 100644
+ - __env__ {url}:
+ - name: webinfo
+ - mountpoint: nowhere
-+ ''')
++ """
++ )
+ self.assertEqual(
+ ret,
-+ {'branch': 'master',
-+ 'motd': 'The force will be with you. Always.',
-+ 'mylist': ['master'],
-+ 'mydict': {'master': True,
-+ 'nested_list': ['master'],
-+ 'nested_dict': {'master': True}}}
++ {
++ "branch": "master",
++ "motd": "The force will be with you. Always.",
++ "mylist": ["master"],
++ "mydict": {
++ "master": True,
++ "nested_list": ["master"],
++ "nested_dict": {"master": True},
++ },
++ },
+ )
+
- @requires_system_grains
- def test_root_parameter(self, grains):
- '''
+ @slowTest
+ def test_root_parameter(self):
+ """
--
-2.16.4
+2.29.2
diff --git a/fix-grains.test_core-unit-test-277.patch b/fix-grains.test_core-unit-test-277.patch
index a65482a..8e9371b 100644
--- a/fix-grains.test_core-unit-test-277.patch
+++ b/fix-grains.test_core-unit-test-277.patch
@@ -1,4 +1,4 @@
-From 4998996a08db72a1b925b2c3f725c4fba4fe9622 Mon Sep 17 00:00:00 2001
+From e2ff2f339ce7938ecdadf867f285a559bc2431dd Mon Sep 17 00:00:00 2001
From: Dominik Gedon
Date: Tue, 6 Oct 2020 14:00:55 +0200
Subject: [PATCH] Fix grains.test_core unit test (#277)
@@ -6,41 +6,38 @@ Subject: [PATCH] Fix grains.test_core unit test (#277)
This reverts 63b94ae and fixes the grains test_core unit test. The
changes are aligned with upstream.
---
- tests/unit/grains/test_core.py | 13 ++++++++-----
- 1 file changed, 8 insertions(+), 5 deletions(-)
+ tests/unit/grains/test_core.py | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
-index 36aa49f232..d3b6515d00 100644
+index 918a9155cb..15de4e363e 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
-@@ -69,10 +69,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
- def test_parse_etc_os_release(self, path_isfile_mock):
- path_isfile_mock.side_effect = lambda x: x == "/usr/lib/os-release"
- with salt.utils.files.fopen(os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")) as os_release_file:
+@@ -60,11 +60,10 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ with salt.utils.files.fopen(
+ os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")
+ ) as os_release_file:
- os_release_content = os_release_file.readlines()
- with patch("salt.utils.files.fopen", mock_open()) as os_release_file:
- os_release_file.return_value.__iter__.return_value = os_release_content
-- os_release = core._parse_os_release(["/etc/os-release", "/usr/lib/os-release"])
+ os_release_content = os_release_file.read()
+ with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)):
-+ os_release = core._parse_os_release(
+ os_release = core._parse_os_release(
+- ["/etc/os-release", "/usr/lib/os-release"]
+ "/etc/os-release", "/usr/lib/os-release"
-+ )
- self.assertEqual(os_release, {
- "NAME": "Ubuntu",
- "VERSION": "17.10 (Artful Aardvark)",
-@@ -134,7 +135,9 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
-
+ )
+ self.assertEqual(
+ os_release,
+@@ -174,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
def test_missing_os_release(self):
- with patch('salt.utils.files.fopen', mock_open(read_data={})):
-- os_release = core._parse_os_release(['/etc/os-release', '/usr/lib/os-release'])
-+ os_release = core._parse_os_release(
+ with patch("salt.utils.files.fopen", mock_open(read_data={})):
+ os_release = core._parse_os_release(
+- ["/etc/os-release", "/usr/lib/os-release"]
+ "/etc/os-release", "/usr/lib/os-release"
-+ )
+ )
self.assertEqual(os_release, {})
- @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows')
--
-2.28.0
+2.29.2
diff --git a/fix-ipv6-scope-bsc-1108557.patch b/fix-ipv6-scope-bsc-1108557.patch
index b29edfb..634cc49 100644
--- a/fix-ipv6-scope-bsc-1108557.patch
+++ b/fix-ipv6-scope-bsc-1108557.patch
@@ -1,4 +1,4 @@
-From 2cb7515f83e2c358b84724e4eb581daa78012fdf Mon Sep 17 00:00:00 2001
+From 082bb6a25b2b025a5c7c6fdbf7dbcbe64a39da2c Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Fri, 28 Sep 2018 15:22:33 +0200
Subject: [PATCH] Fix IPv6 scope (bsc#1108557)
@@ -69,14 +69,14 @@ Lintfix: W0611
Reverse skipping tests: if no ipaddress
---
- salt/_compat.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ salt/_compat.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 74 insertions(+)
diff --git a/salt/_compat.py b/salt/_compat.py
-index e999605d2c..965bb90da3 100644
+index 011eb8af9e..d9425523cf 100644
--- a/salt/_compat.py
+++ b/salt/_compat.py
-@@ -230,7 +230,81 @@ class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped):
+@@ -242,7 +242,81 @@ class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped):
self.hostmask = self.network.hostmask
@@ -159,6 +159,6 @@ index e999605d2c..965bb90da3 100644
+ ipaddress.ip_address = ip_address
+ ipaddress.ip_interface = ip_interface
--
-2.16.4
+2.29.2
diff --git a/fix-issue-2068-test.patch b/fix-issue-2068-test.patch
index d0a00ee..0c1eee7 100644
--- a/fix-issue-2068-test.patch
+++ b/fix-issue-2068-test.patch
@@ -1,4 +1,4 @@
-From bfdd7f946d56d799e89b33f7e3b72426732b0195 Mon Sep 17 00:00:00 2001
+From db77ad3e24daf3bc014dc3d85a49aa1bb33ae1ae Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Wed, 9 Jan 2019 16:08:19 +0100
Subject: [PATCH] Fix issue #2068 test
@@ -13,19 +13,19 @@ Minor update: more correct is-dict check.
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/salt/state.py b/salt/state.py
-index bc5277554e..2fa5f64ca5 100644
+index b1bce4e0cd..cc6db7e1b2 100644
--- a/salt/state.py
+++ b/salt/state.py
-@@ -25,6 +25,7 @@ import traceback
- import re
- import time
- import random
-+import collections
+@@ -12,6 +12,7 @@ The data sent to the state calls is as follows:
+ """
- # Import salt libs
- import salt.loader
-@@ -2896,16 +2897,18 @@ class State(object):
- '''
+
++import collections
+ import copy
+ import datetime
+ import fnmatch
+@@ -3206,16 +3207,18 @@ class State:
+ """
for chunk in high:
state = high[chunk]
+ if not isinstance(state, collections.Mapping):
@@ -35,18 +35,18 @@ index bc5277554e..2fa5f64ca5 100644
+ if not isinstance(state[state_ref], list):
+ continue
for argset in state[state_ref]:
- if isinstance(argset, six.string_types):
+ if isinstance(argset, str):
needs_default = False
break
if needs_default:
- order = state[state_ref].pop(-1)
-- state[state_ref].append('__call__')
+- state[state_ref].append("__call__")
- state[state_ref].append(order)
-+ state[state_ref].insert(-1, '__call__')
++ state[state_ref].insert(-1, "__call__")
def call_high(self, high, orchestration_jid=None):
- '''
+ """
--
-2.16.4
+2.29.2
diff --git a/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch b/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch
index b41b086..4f71f90 100644
--- a/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch
+++ b/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch
@@ -1,4 +1,4 @@
-From 77d53d9567b7aec045a8fffd29afcb76a8405caf Mon Sep 17 00:00:00 2001
+From 00c538383e463febba492e74577ae64be80d4d00 Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Mon, 16 Sep 2019 11:27:30 +0200
Subject: [PATCH] Fix memory leak produced by batch async find_jobs
@@ -16,16 +16,16 @@ Multiple fixes:
Co-authored-by: Pablo Suárez Hernández
---
- salt/cli/batch_async.py | 60 ++++++++++++++++++++++++++++++++-----------------
+ salt/cli/batch_async.py | 59 ++++++++++++++++++++++++++++-------------
salt/client/__init__.py | 1 +
- salt/master.py | 1 -
+ salt/master.py | 2 --
3 files changed, 41 insertions(+), 21 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index 8c8f481e34..8a67331102 100644
+index 7225491228..388b709416 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -72,6 +72,7 @@ class BatchAsync(object):
+@@ -73,6 +73,7 @@ class BatchAsync:
self.done_minions = set()
self.active = set()
self.initialized = False
@@ -33,55 +33,58 @@ index 8c8f481e34..8a67331102 100644
self.ping_jid = jid_gen()
self.batch_jid = jid_gen()
self.find_job_jid = jid_gen()
-@@ -89,14 +90,11 @@ class BatchAsync(object):
+@@ -91,14 +92,11 @@ class BatchAsync:
def __set_event_handler(self):
- ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid)
- batch_return_pattern = 'salt/job/{0}/ret/*'.format(self.batch_jid)
-- find_job_return_pattern = 'salt/job/{0}/ret/*'.format(self.find_job_jid)
- self.event.subscribe(ping_return_pattern, match_type='glob')
- self.event.subscribe(batch_return_pattern, match_type='glob')
-- self.event.subscribe(find_job_return_pattern, match_type='glob')
+ ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
+ batch_return_pattern = "salt/job/{}/ret/*".format(self.batch_jid)
+- find_job_return_pattern = "salt/job/{}/ret/*".format(self.find_job_jid)
+ self.event.subscribe(ping_return_pattern, match_type="glob")
+ self.event.subscribe(batch_return_pattern, match_type="glob")
+- self.event.subscribe(find_job_return_pattern, match_type="glob")
- self.event.patterns = {
+ self.patterns = {
- (ping_return_pattern, 'ping_return'),
- (batch_return_pattern, 'batch_run'),
-- (find_job_return_pattern, 'find_job_return')
+ (ping_return_pattern, "ping_return"),
+ (batch_return_pattern, "batch_run"),
+- (find_job_return_pattern, "find_job_return"),
}
self.event.set_event_handler(self.__event_handler)
-@@ -104,7 +102,7 @@ class BatchAsync(object):
+@@ -106,7 +104,7 @@ class BatchAsync:
if not self.event:
return
mtag, data = self.event.unpack(raw, self.event.serial)
- for (pattern, op) in self.event.patterns:
+ for (pattern, op) in self.patterns:
if fnmatch.fnmatch(mtag, pattern):
- minion = data['id']
- if op == 'ping_return':
-@@ -112,7 +110,8 @@ class BatchAsync(object):
+ minion = data["id"]
+ if op == "ping_return":
+@@ -114,7 +112,8 @@ class BatchAsync:
if self.targeted_minions == self.minions:
self.event.io_loop.spawn_callback(self.start_batch)
- elif op == 'find_job_return':
+ elif op == "find_job_return":
- self.find_job_returned.add(minion)
+ if data.get("return", None):
+ self.find_job_returned.add(minion)
- elif op == 'batch_run':
+ elif op == "batch_run":
if minion in self.active:
self.active.remove(minion)
-@@ -131,31 +130,46 @@ class BatchAsync(object):
+@@ -134,7 +133,11 @@ class BatchAsync:
return set(list(to_run)[:next_batch_size])
@tornado.gen.coroutine
- def check_find_job(self, batch_minions):
+ def check_find_job(self, batch_minions, jid):
-+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid)
-+ self.event.unsubscribe(find_job_return_pattern, match_type='glob')
++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
++ self.event.unsubscribe(find_job_return_pattern, match_type="glob")
+ self.patterns.remove((find_job_return_pattern, "find_job_return"))
+
- timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions)
- self.timedout_minions = self.timedout_minions.union(timedout_minions)
- self.active = self.active.difference(self.timedout_minions)
- running = batch_minions.difference(self.done_minions).difference(self.timedout_minions)
+ timedout_minions = batch_minions.difference(self.find_job_returned).difference(
+ self.done_minions
+ )
+@@ -143,27 +146,39 @@ class BatchAsync:
+ running = batch_minions.difference(self.done_minions).difference(
+ self.timedout_minions
+ )
+
if timedout_minions:
self.schedule_next()
@@ -95,56 +98,59 @@ index 8c8f481e34..8a67331102 100644
- not_done = minions.difference(self.done_minions)
- ping_return = yield self.local.run_job_async(
- not_done,
-- 'saltutil.find_job',
+- "saltutil.find_job",
- [self.batch_jid],
-- 'list',
-- gather_job_timeout=self.opts['gather_job_timeout'],
+- "list",
+- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=self.find_job_jid,
-- **self.eauth)
+- **self.eauth
+- )
- self.event.io_loop.call_later(
-- self.opts['gather_job_timeout'],
-- self.check_find_job,
-- not_done)
-+ not_done = minions.difference(self.done_minions).difference(self.timedout_minions)
-+
+- self.opts["gather_job_timeout"], self.check_find_job, not_done
++ not_done = minions.difference(self.done_minions).difference(
++ self.timedout_minions
+ )
+
+ if not_done:
+ jid = self.jid_gen()
-+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid)
++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
+ self.patterns.add((find_job_return_pattern, "find_job_return"))
-+ self.event.subscribe(find_job_return_pattern, match_type='glob')
++ self.event.subscribe(find_job_return_pattern, match_type="glob")
+
+ ret = yield self.local.run_job_async(
+ not_done,
-+ 'saltutil.find_job',
++ "saltutil.find_job",
+ [self.batch_jid],
-+ 'list',
-+ gather_job_timeout=self.opts['gather_job_timeout'],
++ "list",
++ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=jid,
-+ **self.eauth)
++ **self.eauth
++ )
+ self.event.io_loop.call_later(
-+ self.opts['gather_job_timeout'],
-+ self.check_find_job,
-+ not_done,
-+ jid)
-
++ self.opts["gather_job_timeout"], self.check_find_job, not_done, jid
++ )
++
@tornado.gen.coroutine
def start(self):
-@@ -203,6 +217,9 @@ class BatchAsync(object):
+ self.__set_event_handler()
+@@ -211,6 +226,9 @@ class BatchAsync:
}
- self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid))
+ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
self.event.remove_event_handler(self.__event_handler)
+ for (pattern, label) in self.patterns:
+ if label in ["ping_return", "batch_run"]:
-+ self.event.unsubscribe(pattern, match_type='glob')
++ self.event.unsubscribe(pattern, match_type="glob")
def schedule_next(self):
if not self.scheduled:
-@@ -226,9 +243,12 @@ class BatchAsync(object):
- gather_job_timeout=self.opts['gather_job_timeout'],
+@@ -235,11 +253,14 @@ class BatchAsync:
jid=self.batch_jid,
- metadata=self.metadata)
+ metadata=self.metadata,
+ )
+
- self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch))
+ self.event.io_loop.call_later(
+ self.opts["timeout"], self.find_job, set(next_batch)
+ )
except Exception as ex:
+ log.error("Error in scheduling next batch: %s", ex)
self.active = self.active.difference(next_batch)
@@ -153,30 +159,31 @@ index 8c8f481e34..8a67331102 100644
self.scheduled = False
+ yield
diff --git a/salt/client/__init__.py b/salt/client/__init__.py
-index 3bbc7f9de7..a48d79ef8d 100644
+index 1e9f11df4c..cc8fd4048d 100644
--- a/salt/client/__init__.py
+++ b/salt/client/__init__.py
-@@ -1622,6 +1622,7 @@ class LocalClient(object):
- 'key': self.key,
- 'tgt_type': tgt_type,
- 'ret': ret,
-+ 'timeout': timeout,
- 'jid': jid}
+@@ -1776,6 +1776,7 @@ class LocalClient:
+ "key": self.key,
+ "tgt_type": tgt_type,
+ "ret": ret,
++ "timeout": timeout,
+ "jid": jid,
+ }
- # if kwargs are passed, pack them.
diff --git a/salt/master.py b/salt/master.py
-index 5e2277ba76..3abf7ae60b 100644
+index b9bc1a7a67..7a99af357a 100644
--- a/salt/master.py
+++ b/salt/master.py
-@@ -2044,7 +2044,6 @@ class ClearFuncs(object):
+@@ -2232,8 +2232,6 @@ class ClearFuncs(TransportMethods):
def publish_batch(self, clear_load, minions, missing):
batch_load = {}
batch_load.update(clear_load)
- import salt.cli.batch_async
+-
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
--
-2.16.4
+2.29.2
diff --git a/fix-novendorchange-option-284.patch b/fix-novendorchange-option-284.patch
index 1733896..5c3178a 100644
--- a/fix-novendorchange-option-284.patch
+++ b/fix-novendorchange-option-284.patch
@@ -1,4 +1,4 @@
-From f69c1178de003866af412e61e0146597974eec0d Mon Sep 17 00:00:00 2001
+From 4123cf7b9428af1442f4aa0a54489e5c0deb4aaa Mon Sep 17 00:00:00 2001
From: Martin Seidl
Date: Tue, 27 Oct 2020 16:12:29 +0100
Subject: [PATCH] Fix novendorchange option (#284)
@@ -7,39 +7,43 @@ Subject: [PATCH] Fix novendorchange option (#284)
* refactor handling of novendorchange and fix tests
---
- salt/modules/zypperpkg.py | 19 ++--
- tests/unit/modules/test_zypperpkg.py | 150 ++++++++++++++++++++++++---
- 2 files changed, 148 insertions(+), 21 deletions(-)
+ salt/modules/zypperpkg.py | 21 +++---
+ tests/unit/modules/test_zypperpkg.py | 100 ++++++++++++++++++++++++++-
+ 2 files changed, 108 insertions(+), 13 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index ad11da4ad1..d84a6af6e0 100644
+index 5369a0342e..d06c265202 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -1617,7 +1617,7 @@ def upgrade(refresh=True,
- dryrun=False,
- dist_upgrade=False,
- fromrepo=None,
-- novendorchange=False,
-+ novendorchange=True,
- skip_verify=False,
- no_recommends=False,
- root=None,
-@@ -1701,13 +1701,18 @@ def upgrade(refresh=True,
- log.info('Targeting repos: %s', fromrepo)
+@@ -1707,7 +1707,7 @@ def upgrade(
+ dryrun=False,
+ dist_upgrade=False,
+ fromrepo=None,
+- novendorchange=False,
++ novendorchange=True,
+ skip_verify=False,
+ no_recommends=False,
+ root=None,
+@@ -1794,19 +1794,18 @@ def upgrade(
+ log.info("Targeting repos: %s", fromrepo)
if dist_upgrade:
- if novendorchange:
- # TODO: Grains validation should be moved to Zypper class
-- if __grains__['osrelease_info'][0] > 11:
-- cmd_update.append('--no-allow-vendor-change')
-- log.info('Disabling vendor changes')
+- if __grains__["osrelease_info"][0] > 11:
+ # TODO: Grains validation should be moved to Zypper class
+ if __grains__["osrelease_info"][0] > 11:
+ if novendorchange:
-+ cmd_update.append("--no-allow-vendor-change")
-+ log.info("Disabling vendor changes")
+ cmd_update.append("--no-allow-vendor-change")
+ log.info("Disabling vendor changes")
else:
-- log.warning('Disabling vendor changes is not supported on this Zypper version')
+- log.warning(
+- "Disabling vendor changes is not supported on this Zypper version"
+- )
+-
+- if no_recommends:
+- cmd_update.append("--no-recommends")
+- log.info("Disabling recommendations")
+ cmd_update.append("--allow-vendor-change")
+ log.info("Enabling vendor changes")
+ else:
@@ -48,121 +52,26 @@ index ad11da4ad1..d84a6af6e0 100644
+ )
if no_recommends:
- cmd_update.append('--no-recommends')
+ cmd_update.append("--no-recommends")
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index a3d20f66d5..8cc84485b5 100644
+index a60b1546c6..eaa4d9a76a 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -480,7 +480,11 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
- with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}])):
+@@ -642,7 +642,9 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
ret = zypper.upgrade(dist_upgrade=True)
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
-- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses')
-+ zypper_mock.assert_any_call(
+ zypper_mock.assert_any_call(
+- "dist-upgrade", "--auto-agree-with-licenses"
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--no-allow-vendor-change",
-+ )
+ )
- with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])):
- ret = zypper.upgrade(dist_upgrade=True, dryrun=True)
-@@ -488,25 +492,138 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses',
- '--dry-run', '--debug-solver')
+ with patch(
+@@ -660,6 +662,76 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ "--debug-solver",
+ )
-- with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])):
-- ret = zypper.upgrade(dist_upgrade=True, dryrun=True,
-- fromrepo=["Dummy", "Dummy2"], novendorchange=True)
-- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run',
-- '--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change')
-- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run',
-- '--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change',
-- '--debug-solver')
--
- with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])):
- ret = zypper.upgrade(dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False)
- zypper_mock.assert_any_call('update', '--auto-agree-with-licenses', '--repo', "Dummy", '--repo', 'Dummy2')
-
- with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}])):
- ret = zypper.upgrade(dist_upgrade=True, fromrepo=["Dummy", "Dummy2"], novendorchange=True)
-+ zypper_mock.assert_any_call(
-+ "dist-upgrade",
-+ "--auto-agree-with-licenses",
-+ "--dry-run",
-+ "--no-allow-vendor-change",
-+ )
-+ zypper_mock.assert_any_call(
-+ "dist-upgrade",
-+ "--auto-agree-with-licenses",
-+ "--dry-run",
-+ "--no-allow-vendor-change",
-+ )
-+
-+ with patch(
-+ "salt.modules.zypperpkg.list_pkgs",
-+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
-+ ):
-+ ret = zypper.upgrade(
-+ dist_upgrade=True,
-+ dryrun=True,
-+ fromrepo=["Dummy", "Dummy2"],
-+ novendorchange=False,
-+ )
-+ zypper_mock.assert_any_call(
-+ "dist-upgrade",
-+ "--auto-agree-with-licenses",
-+ "--dry-run",
-+ "--from",
-+ "Dummy",
-+ "--from",
-+ "Dummy2",
-+ "--allow-vendor-change",
-+ )
-+ zypper_mock.assert_any_call(
-+ "dist-upgrade",
-+ "--auto-agree-with-licenses",
-+ "--dry-run",
-+ "--from",
-+ "Dummy",
-+ "--from",
-+ "Dummy2",
-+ "--allow-vendor-change",
-+ "--debug-solver",
-+ )
-+
-+
-+ with patch(
-+ "salt.modules.zypperpkg.list_pkgs",
-+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
-+ ):
-+ ret = zypper.upgrade(
-+ dist_upgrade=True,
-+ dryrun=True,
-+ fromrepo=["Dummy", "Dummy2"],
-+ novendorchange=True,
-+ )
-+ zypper_mock.assert_any_call(
-+ "dist-upgrade",
-+ "--auto-agree-with-licenses",
-+ "--dry-run",
-+ "--from",
-+ "Dummy",
-+ "--from",
-+ "Dummy2",
-+ "--no-allow-vendor-change",
-+ )
-+ zypper_mock.assert_any_call(
-+ "dist-upgrade",
-+ "--auto-agree-with-licenses",
-+ "--dry-run",
-+ "--from",
-+ "Dummy",
-+ "--from",
-+ "Dummy2",
-+ "--no-allow-vendor-change",
-+ "--debug-solver",
-+ )
-+
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
@@ -188,9 +97,57 @@ index a3d20f66d5..8cc84485b5 100644
+ fromrepo=["Dummy", "Dummy2"],
+ novendorchange=True,
+ )
- self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--from', "Dummy",
- '--from', 'Dummy2', '--no-allow-vendor-change')
++ zypper_mock.assert_any_call(
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--dry-run",
++ "--no-allow-vendor-change",
++ )
++ zypper_mock.assert_any_call(
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--dry-run",
++ "--no-allow-vendor-change",
++ )
++
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ ret = zypper.upgrade(
++ dist_upgrade=True,
++ dryrun=True,
++ fromrepo=["Dummy", "Dummy2"],
++ novendorchange=False,
++ )
++ zypper_mock.assert_any_call(
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--dry-run",
++ "--from",
++ "Dummy",
++ "--from",
++ "Dummy2",
++ "--allow-vendor-change",
++ )
++ zypper_mock.assert_any_call(
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--dry-run",
++ "--from",
++ "Dummy",
++ "--from",
++ "Dummy2",
++ "--allow-vendor-change",
++ "--debug-solver",
++ )
++
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
+@@ -728,6 +800,26 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ "--no-allow-vendor-change",
+ )
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
@@ -211,30 +168,24 @@ index a3d20f66d5..8cc84485b5 100644
+ "Dummy2",
+ "--allow-vendor-change",
+ )
++
def test_upgrade_kernel(self):
- '''
+ """
Test kernel package upgrade success.
-@@ -558,10 +675,15 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])):
- with self.assertRaises(CommandExecutionError) as cmd_exc:
- ret = zypper.upgrade(dist_upgrade=True, fromrepo=["DUMMY"])
-- self.assertEqual(cmd_exc.exception.info['changes'], {})
-- self.assertEqual(cmd_exc.exception.info['result']['stdout'], zypper_out)
-- zypper_mock.noraise.call.assert_called_with('dist-upgrade', '--auto-agree-with-licenses',
-- '--from', 'DUMMY')
-+ self.assertEqual(cmd_exc.exception.info["changes"], {})
-+ self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out)
-+ zypper_mock.noraise.call.assert_called_with(
+@@ -815,7 +907,11 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ self.assertEqual(cmd_exc.exception.info["changes"], {})
+ self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out)
+ zypper_mock.noraise.call.assert_called_with(
+- "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY"
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--from",
+ "DUMMY",
+ "--no-allow-vendor-change",
-+ )
+ )
def test_upgrade_available(self):
- '''
--
-2.28.0
+2.29.2
diff --git a/fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch b/fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
new file mode 100644
index 0000000..8bed8a5
--- /dev/null
+++ b/fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
@@ -0,0 +1,99 @@
+From 435d9fbee299b06e1c58cdc0574b6a1975841879 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 25 Nov 2020 15:09:41 +0300
+Subject: [PATCH] Fix salt.utils.stringutils.to_str calls to make it
+ working with numeric uid/gid
+
+---
+ salt/modules/file.py | 16 ++++++++++------
+ salt/states/file.py | 11 +++++++++--
+ 2 files changed, 19 insertions(+), 8 deletions(-)
+
+diff --git a/salt/modules/file.py b/salt/modules/file.py
+index b830b390d3..b9744393d7 100644
+--- a/salt/modules/file.py
++++ b/salt/modules/file.py
+@@ -4970,6 +4970,12 @@ def check_perms(
+ is_dir = os.path.isdir(name)
+ is_link = os.path.islink(name)
+
++ def __safe_to_str(s):
++ try:
++ return salt.utils.stringutils.to_str(s)
++ except:
++ return salt.utils.stringutils.to_str(str(s))
++
+ # user/group changes if needed, then check if it worked
+ if user:
+ if isinstance(user, int):
+@@ -4979,7 +4985,7 @@ def check_perms(
+ and user_to_uid(user) != user_to_uid(perms["luser"])
+ ) or (
+ not salt.utils.platform.is_windows()
+- and salt.utils.stringutils.to_str(user) != perms["luser"]
++ and __safe_to_str(user) != perms["luser"]
+ ):
+ perms["cuser"] = user
+
+@@ -4991,7 +4997,7 @@ def check_perms(
+ and group_to_gid(group) != group_to_gid(perms["lgroup"])
+ ) or (
+ not salt.utils.platform.is_windows()
+- and salt.utils.stringutils.to_str(group) != perms["lgroup"]
++ and __safe_to_str(group) != perms["lgroup"]
+ ):
+ perms["cgroup"] = group
+
+@@ -5023,8 +5029,7 @@ def check_perms(
+ and user != ""
+ ) or (
+ not salt.utils.platform.is_windows()
+- and salt.utils.stringutils.to_str(user)
+- != get_user(name, follow_symlinks=follow_symlinks)
++ and __safe_to_str(user) != get_user(name, follow_symlinks=follow_symlinks)
+ and user != ""
+ ):
+ if __opts__["test"] is True:
+@@ -5045,8 +5050,7 @@ def check_perms(
+ and group != ""
+ ) or (
+ not salt.utils.platform.is_windows()
+- and salt.utils.stringutils.to_str(group)
+- != get_group(name, follow_symlinks=follow_symlinks)
++ and __safe_to_str(group) != get_group(name, follow_symlinks=follow_symlinks)
+ and group != ""
+ ):
+ if __opts__["test"] is True:
+diff --git a/salt/states/file.py b/salt/states/file.py
+index 89c70eb454..fd8ffde757 100644
+--- a/salt/states/file.py
++++ b/salt/states/file.py
+@@ -989,15 +989,22 @@ def _check_dir_meta(name, user, group, mode, follow_symlinks=False):
+ if not stats:
+ changes["directory"] = "new"
+ return changes
++
++ def __safe_to_str(s):
++ try:
++ return salt.utils.stringutils.to_str(s)
++ except:
++ return salt.utils.stringutils.to_str(str(s))
++
+ if (
+ user is not None
+- and salt.utils.stringutils.to_str(user) != stats["user"]
++ and __safe_to_str(user) != stats["user"]
+ and user != stats.get("uid")
+ ):
+ changes["user"] = user
+ if (
+ group is not None
+- and salt.utils.stringutils.to_str(group) != stats["group"]
++ and __safe_to_str(group) != stats["group"]
+ and group != stats.get("gid")
+ ):
+ changes["group"] = group
+--
+2.29.2
+
+
diff --git a/fix-the-removed-six.itermitems-and-six.-_type-262.patch b/fix-the-removed-six.itermitems-and-six.-_type-262.patch
index 78cf626..9781d37 100644
--- a/fix-the-removed-six.itermitems-and-six.-_type-262.patch
+++ b/fix-the-removed-six.itermitems-and-six.-_type-262.patch
@@ -1,4 +1,4 @@
-From 5b6ac3bb81f24bbb8c39f80c71c490c339cce756 Mon Sep 17 00:00:00 2001
+From 01e2e60a5aba609d219b73f1018f12517a294a64 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat
Date: Tue, 15 Sep 2020 13:46:06 +0200
Subject: [PATCH] Fix the removed six.itermitems and six.*_type* (#262)
@@ -11,29 +11,81 @@ on python 2.7.
* fixup! Fix the removed six.itermitems and six.*_type*
---
- salt/_compat.py | 1 +
- salt/modules/virt.py | 57 +++++++++++-----------
- salt/states/virt.py | 15 +++---
- salt/utils/data.py | 51 ++++++++++----------
- salt/utils/xmlutil.py | 5 +-
+ salt/_compat.py | 25 ++++++++++++++++---------
+ salt/modules/virt.py | 11 ++++-------
+ salt/states/virt.py | 1 +
+ salt/utils/xmlutil.py | 3 ++-
tests/unit/modules/test_virt.py | 2 +-
- tests/unit/utils/test_data.py | 85 ++++++++++++++++++---------------
- 7 files changed, 115 insertions(+), 101 deletions(-)
+ 5 files changed, 24 insertions(+), 18 deletions(-)
diff --git a/salt/_compat.py b/salt/_compat.py
-index 965bb90da3..22daaa31a0 100644
+index d9425523cf..de100de3fa 100644
--- a/salt/_compat.py
+++ b/salt/_compat.py
-@@ -39,6 +39,7 @@ except Exception: # pylint: disable=broad-except
- # True if we are running on Python 3.
- PY3 = sys.version_info.major == 3
-
+@@ -7,6 +7,7 @@ Salt compatibility code
+ import binascii
+ import logging
+ import sys
+import xml.sax.saxutils as saxutils
- if PY3:
- import builtins
+ from salt.exceptions import SaltException
+ from salt.ext.six import binary_type, integer_types, string_types, text_type
+@@ -261,21 +262,25 @@ def ip_address(address):
+ try:
+ return ipaddress.IPv4Address(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+- log.debug('Error while parsing IPv4 address: %s', address)
++ log.debug("Error while parsing IPv4 address: %s", address)
+ log.debug(err)
+
+ try:
+ return IPv6AddressScoped(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+- log.debug('Error while parsing IPv6 address: %s', address)
++ log.debug("Error while parsing IPv6 address: %s", address)
+ log.debug(err)
+
+ if isinstance(address, bytes):
+- raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. '
+- 'Did you pass in a bytes (str in Python 2) instead '
+- 'of a unicode object?'.format(repr(address)))
++ raise ipaddress.AddressValueError(
++ "{} does not appear to be an IPv4 or IPv6 address. "
++ "Did you pass in a bytes (str in Python 2) instead "
++ "of a unicode object?".format(repr(address))
++ )
+
+- raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address)))
++ raise ValueError(
++ "{} does not appear to be an IPv4 or IPv6 address".format(repr(address))
++ )
+
+
+ def ip_interface(address):
+@@ -302,16 +307,18 @@ def ip_interface(address):
+ try:
+ return ipaddress.IPv4Interface(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+- log.debug('Error while getting IPv4 interface for address %s', address)
++ log.debug("Error while getting IPv4 interface for address %s", address)
+ log.debug(err)
+
+ try:
+ return ipaddress.IPv6Interface(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+- log.debug('Error while getting IPv6 interface for address %s', address)
++ log.debug("Error while getting IPv6 interface for address %s", address)
+ log.debug(err)
+
+- raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address))
++ raise ValueError(
++ "{} does not appear to be an IPv4 or IPv6 interface".format(address)
++ )
+
+
+ if ipaddress:
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index cd80fbe608..c07fabb406 100644
+index ec40f08359..c042738370 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -88,8 +88,6 @@ import string # pylint: disable=deprecated-module
@@ -43,87 +95,20 @@ index cd80fbe608..c07fabb406 100644
-from xml.etree import ElementTree
-from xml.sax import saxutils
- # Import third party libs
import jinja2.exceptions
-@@ -104,7 +102,10 @@ import salt.utils.templates
+ import salt.utils.files
+@@ -99,8 +97,9 @@ import salt.utils.stringutils
+ import salt.utils.templates
import salt.utils.xmlutil as xmlutil
import salt.utils.yaml
- from salt._compat import ipaddress
-+from salt._compat import ElementTree
-+from salt._compat import saxutils
+-from salt._compat import ipaddress
++from salt._compat import ElementTree, ipaddress, saxutils
from salt.exceptions import CommandExecutionError, SaltInvocationError
+from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
from salt.utils.virt import check_remote, download_remote
-@@ -657,8 +658,8 @@ def _gen_xml(
- context = {
- "hypervisor": hypervisor,
- "name": name,
-- "cpu": str(cpu),
-- "mem": str(mem),
-+ "cpu": six.text_type(cpu),
-+ "mem": six.text_type(mem),
- }
- if hypervisor in ["qemu", "kvm"]:
- context["controller_model"] = False
-@@ -722,7 +723,7 @@ def _gen_xml(
- "target_dev": _get_disk_target(targets, len(diskp), prefix),
- "disk_bus": disk["model"],
- "format": disk.get("format", "raw"),
-- "index": str(i),
-+ "index": six.text_type(i),
- }
- targets.append(disk_context["target_dev"])
- if disk.get("source_file"):
-@@ -827,8 +828,8 @@ def _gen_vol_xml(
- "name": name,
- "target": {"permissions": permissions, "nocow": nocow},
- "format": format,
-- "size": str(size),
-- "allocation": str(int(allocation) * 1024),
-+ "size": six.text_type(size),
-+ "allocation": six.text_type(int(allocation) * 1024),
- "backingStore": backing_store,
- }
- fn_ = "libvirt_volume.jinja"
-@@ -1253,7 +1254,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name):
- )
-
- # Transform the list to remove one level of dictionary and add the name as a property
-- disklist = [dict(d, name=name) for disk in disklist for name, d in disk.items()]
-+ disklist = [dict(d, name=name) for disk in disklist for name, d in six.iteritems(disk)]
-
- # Merge with the user-provided disks definitions
- if disks:
-@@ -1274,7 +1275,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name):
- disk["model"] = "ide"
-
- # Add the missing properties that have defaults
-- for key, val in overlay.items():
-+ for key, val in six.iteritems(overlay):
- if key not in disk:
- disk[key] = val
-
-@@ -1422,7 +1423,7 @@ def _complete_nics(interfaces, hypervisor):
- """
- Apply the default overlay to attributes
- """
-- for key, value in overlays[hypervisor].items():
-+ for key, value in six.iteritems(overlays[hypervisor]):
- if key not in attributes or not attributes[key]:
- attributes[key] = value
-
-@@ -1449,7 +1450,7 @@ def _nic_profile(profile_name, hypervisor):
- """
- Append dictionary profile data to interfaces list
- """
-- for interface_name, attributes in profile_dict.items():
-+ for interface_name, attributes in six.iteritems(profile_dict):
- attributes["name"] = interface_name
- interfaces.append(attributes)
-
-@@ -1520,7 +1521,7 @@ def _handle_remote_boot_params(orig_boot):
+@@ -1516,7 +1515,7 @@ def _handle_remote_boot_params(orig_boot):
"""
saltinst_dir = None
new_boot = orig_boot.copy()
@@ -132,139 +117,22 @@ index cd80fbe608..c07fabb406 100644
cases = [
{"efi"},
{"kernel", "initrd", "efi"},
-@@ -2380,8 +2381,8 @@ def update(
- # Update the cpu
- cpu_node = desc.find("vcpu")
- if cpu and int(cpu_node.text) != cpu:
-- cpu_node.text = str(cpu)
-- cpu_node.set("current", str(cpu))
-+ cpu_node.text = six.text_type(cpu)
-+ cpu_node.set("current", six.text_type(cpu))
- need_update = True
+@@ -2559,9 +2558,7 @@ def update(
- def _set_loader(node, value):
-@@ -2394,7 +2395,7 @@ def update(
- node.set("template", value)
-
- def _set_with_mib_unit(node, value):
-- node.text = str(value)
-+ node.text = six.text_type(value)
- node.set("unit", "MiB")
-
- # Update the kernel boot parameters
-@@ -2426,7 +2427,7 @@ def update(
- },
- ]
-
-- data = {k: v for k, v in locals().items() if bool(v)}
-+ data = {k: v for k, v in six.iteritems(locals()) if bool(v)}
- if boot_dev:
- data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
- need_update = need_update or salt.utils.xmlutil.change_xml(
-@@ -2547,7 +2548,7 @@ def update(
# Attaching device
if source_file:
- ElementTree.SubElement(
+- ElementTree.SubElement(
- updated_disk, "source", attrib={"file": source_file}
-+ updated_disk, "source", file=source_file
- )
+- )
++ ElementTree.SubElement(updated_disk, "source", file=source_file)
changes["disk"]["new"] = new_disks
-@@ -2609,7 +2610,7 @@ def update(
- except libvirt.libvirtError as err:
- if "errors" not in status:
- status["errors"] = []
-- status["errors"].append(str(err))
-+ status["errors"].append(six.text_type(err))
- conn.close()
- return status
-@@ -2823,7 +2824,7 @@ def _node_info(conn):
- info = {
- "cpucores": raw[6],
- "cpumhz": raw[3],
-- "cpumodel": str(raw[0]),
-+ "cpumodel": six.text_type(raw[0]),
- "cpus": raw[2],
- "cputhreads": raw[7],
- "numanodes": raw[4],
-@@ -3628,7 +3629,7 @@ def _define_vol_xml_str(conn, xml, pool=None): # pylint: disable=redefined-oute
- poolname = (
- pool if pool else __salt__["config.get"]("virt:storagepool", default_pool)
- )
-- pool = conn.storagePoolLookupByName(str(poolname))
-+ pool = conn.storagePoolLookupByName(six.text_type(poolname))
- ret = pool.createXML(xml, 0) is not None
- return ret
-
-@@ -3829,7 +3830,7 @@ def seed_non_shared_migrate(disks, force=False):
-
- salt '*' virt.seed_non_shared_migrate
- """
-- for _, data in disks.items():
-+ for _, data in six.iteritems(disks):
- fn_ = data["file"]
- form = data["file format"]
- size = data["virtual size"].split()[1][1:]
-@@ -4852,7 +4853,7 @@ def capabilities(**kwargs):
- try:
- caps = _capabilities(conn)
- except libvirt.libvirtError as err:
-- raise CommandExecutionError(str(err))
-+ raise CommandExecutionError(six.text_type(err))
- finally:
- conn.close()
- return caps
-@@ -5352,7 +5353,7 @@ def network_info(name=None, **kwargs):
- for net in nets
- }
- except libvirt.libvirtError as err:
-- log.debug("Silenced libvirt error: %s", str(err))
-+ log.debug("Silenced libvirt error: %s", six.text_type(err))
- finally:
- conn.close()
- return result
-@@ -6214,7 +6215,7 @@ def pool_info(name=None, **kwargs):
- ]
- result = {pool.name(): _pool_extract_infos(pool) for pool in pools}
- except libvirt.libvirtError as err:
-- log.debug("Silenced libvirt error: %s", str(err))
-+ log.debug("Silenced libvirt error: %s", six.text_type(err))
- finally:
- conn.close()
- return result
-@@ -6591,12 +6592,12 @@ def volume_infos(pool=None, volume=None, **kwargs):
- if vol.path():
- as_backing_store = {
- path
-- for (path, all_paths) in backing_stores.items()
-+ for (path, all_paths) in six.iteritems(backing_stores)
- if vol.path() in all_paths
- }
- used_by = [
- vm_name
-- for (vm_name, vm_disks) in disks.items()
-+ for (vm_name, vm_disks) in six.iteritems(disks)
- if vm_disks & as_backing_store or vol.path() in vm_disks
- ]
-
-@@ -6625,9 +6626,9 @@ def volume_infos(pool=None, volume=None, **kwargs):
- }
- for pool_obj in pools
- }
-- return {pool_name: volumes for (pool_name, volumes) in vols.items() if volumes}
-+ return {pool_name: volumes for (pool_name, volumes) in six.iteritems(vols) if volumes}
- except libvirt.libvirtError as err:
-- log.debug("Silenced libvirt error: %s", str(err))
-+ log.debug("Silenced libvirt error: %s", six.text_type(err))
- finally:
- conn.close()
- return result
diff --git a/salt/states/virt.py b/salt/states/virt.py
-index 3d99fd53c8..1a0c889d58 100644
+index b45cf72ed3..df7ebb63e6 100644
--- a/salt/states/virt.py
+++ b/salt/states/virt.py
-@@ -23,6 +23,7 @@ import salt.utils.files
+@@ -22,6 +22,7 @@ import salt.utils.files
import salt.utils.stringutils
import salt.utils.versions
from salt.exceptions import CommandExecutionError, SaltInvocationError
@@ -272,288 +140,19 @@ index 3d99fd53c8..1a0c889d58 100644
try:
import libvirt # pylint: disable=import-error
-@@ -97,7 +98,7 @@ def keys(name, basepath="/etc/pki", **kwargs):
- # rename them to something hopefully unique to avoid
- # overriding anything existing
- pillar_kwargs = {}
-- for key, value in kwargs.items():
-+ for key, value in six.iteritems(kwargs):
- pillar_kwargs["ext_pillar_virt.{}".format(key)] = value
-
- pillar = __salt__["pillar.ext"]({"libvirt": "_"}, pillar_kwargs)
-@@ -187,7 +188,7 @@ def _virt_call(
- else:
- noaction_domains.append(targeted_domain)
- except libvirt.libvirtError as err:
-- ignored_domains.append({"domain": targeted_domain, "issue": str(err)})
-+ ignored_domains.append({"domain": targeted_domain, "issue": six.text_type(err)})
- if not changed_domains:
- ret["result"] = not ignored_domains and bool(targeted_domains)
- ret["comment"] = "No changes had happened"
-@@ -461,7 +462,7 @@ def defined(
- ret["comment"] = "Domain {} defined".format(name)
- except libvirt.libvirtError as err:
- # Something bad happened when defining / updating the VM, report it
-- ret["comment"] = str(err)
-+ ret["comment"] = six.text_type(err)
- ret["result"] = False
-
- return ret
-@@ -704,7 +705,7 @@ def running(
-
- except libvirt.libvirtError as err:
- # Something bad happened when starting / updating the VM, report it
-- ret["comment"] = str(err)
-+ ret["comment"] = six.text_type(err)
- ret["result"] = False
-
- return ret
-@@ -867,7 +868,7 @@ def reverted(
- }
- except CommandExecutionError as err:
- if len(domains) > 1:
-- ignored_domains.append({"domain": domain, "issue": str(err)})
-+ ignored_domains.append({"domain": domain, "issue": six.text_type(err)})
- if len(domains) > 1:
- if result:
- ret["changes"]["reverted"].append(result)
-@@ -885,9 +886,9 @@ def reverted(
- if not ret["changes"]["reverted"]:
- ret["changes"].pop("reverted")
- except libvirt.libvirtError as err:
-- ret["comment"] = str(err)
-+ ret["comment"] = six.text_type(err)
- except CommandExecutionError as err:
-- ret["comment"] = str(err)
-+ ret["comment"] = six.text_type(err)
-
- return ret
-
-diff --git a/salt/utils/data.py b/salt/utils/data.py
-index 1c4c22efb3..d98b56e06f 100644
---- a/salt/utils/data.py
-+++ b/salt/utils/data.py
-@@ -4,6 +4,7 @@ Functions for manipulating, inspecting, or otherwise working with data types
- and data structures.
- """
-
-+from __future__ import absolute_import, print_function, unicode_literals
-
- # Import Python libs
- import copy
-@@ -71,7 +72,7 @@ class CaseInsensitiveDict(MutableMapping):
- return self._data[to_lowercase(key)][1]
-
- def __iter__(self):
-- return (item[0] for item in self._data.values())
-+ return (item[0] for item in six.itervalues(self._data))
-
- def __eq__(self, rval):
- if not isinstance(rval, Mapping):
-@@ -80,20 +81,20 @@ class CaseInsensitiveDict(MutableMapping):
- return dict(self.items_lower()) == dict(CaseInsensitiveDict(rval).items_lower())
-
- def __repr__(self):
-- return repr(dict(self.items()))
-+ return repr(dict(six.iteritems(self)))
-
- def items_lower(self):
- """
- Returns a generator iterating over keys and values, with the keys all
- being lowercase.
- """
-- return ((key, val[1]) for key, val in self._data.items())
-+ return ((key, val[1]) for key, val in six.iteritems(self._data))
-
- def copy(self):
- """
- Returns a copy of the object
- """
-- return CaseInsensitiveDict(self._data.items())
-+ return CaseInsensitiveDict(six.iteritems(self._data))
-
-
- def __change_case(data, attr, preserve_dict_class=False):
-@@ -115,7 +116,7 @@ def __change_case(data, attr, preserve_dict_class=False):
- __change_case(key, attr, preserve_dict_class),
- __change_case(val, attr, preserve_dict_class),
- )
-- for key, val in data.items()
-+ for key, val in six.iteritems(data)
- )
- if isinstance(data, Sequence):
- return data_type(
-@@ -145,7 +146,7 @@ def compare_dicts(old=None, new=None):
- dict describing the changes that were made.
- """
- ret = {}
-- for key in set(new or {}).union(old or {}):
-+ for key in set((new or {})).union((old or {})):
- if key not in old:
- # New key
- ret[key] = {"old": "", "new": new[key]}
-@@ -205,7 +206,7 @@ def _remove_circular_refs(ob, _seen=None):
- if isinstance(ob, dict):
- res = {
- _remove_circular_refs(k, _seen): _remove_circular_refs(v, _seen)
-- for k, v in ob.items()
-+ for k, v in six.iteritems(ob)
- }
- elif isinstance(ob, (list, tuple, set, frozenset)):
- res = type(ob)(_remove_circular_refs(v, _seen) for v in ob)
-@@ -336,7 +337,7 @@ def decode_dict(
- )
- # Make sure we preserve OrderedDicts
- ret = data.__class__() if preserve_dict_class else {}
-- for key, value in data.items():
-+ for key, value in six.iteritems(data):
- if isinstance(key, tuple):
- key = (
- decode_tuple(
-@@ -592,7 +593,7 @@ def encode_dict(
- # Clean data object before encoding to avoid circular references
- data = _remove_circular_refs(data)
- ret = data.__class__() if preserve_dict_class else {}
-- for key, value in data.items():
-+ for key, value in six.iteritems(data):
- if isinstance(key, tuple):
- key = (
- encode_tuple(key, encoding, errors, keep, preserve_dict_class)
-@@ -734,8 +735,8 @@ def filter_by(lookup_dict, lookup, traverse, merge=None, default="default", base
- # lookup_dict keys
- for each in val if isinstance(val, list) else [val]:
- for key in lookup_dict:
-- test_key = key if isinstance(key, str) else str(key)
-- test_each = each if isinstance(each, str) else str(each)
-+ test_key = key if isinstance(key, six.string_types) else six.text_type(key)
-+ test_each = each if isinstance(each, six.string_types) else six.text_type(each)
- if fnmatch.fnmatchcase(test_each, test_key):
- ret = lookup_dict[key]
- break
-@@ -851,11 +852,11 @@ def subdict_match(
- # begin with is that (by design) to_unicode will raise a TypeError if a
- # non-string/bytestring/bytearray value is passed.
- try:
-- target = str(target).lower()
-+ target = six.text_type(target).lower()
- except UnicodeDecodeError:
- target = salt.utils.stringutils.to_unicode(target).lower()
- try:
-- pattern = str(pattern).lower()
-+ pattern = six.text_type(pattern).lower()
- except UnicodeDecodeError:
- pattern = salt.utils.stringutils.to_unicode(pattern).lower()
-
-@@ -997,7 +998,7 @@ def repack_dictlist(data, strict=False, recurse=False, key_cb=None, val_cb=None)
- Takes a list of one-element dicts (as found in many SLS schemas) and
- repacks into a single dictionary.
- """
-- if isinstance(data, str):
-+ if isinstance(data, six.string_types):
- try:
- data = salt.utils.yaml.safe_load(data)
- except salt.utils.yaml.parser.ParserError as err:
-@@ -1009,7 +1010,7 @@ def repack_dictlist(data, strict=False, recurse=False, key_cb=None, val_cb=None)
- if val_cb is None:
- val_cb = lambda x, y: y
-
-- valid_non_dict = ((str,), (int,), float)
-+ valid_non_dict = (six.string_types, six.integer_types, float)
- if isinstance(data, list):
- for element in data:
- if isinstance(element, valid_non_dict):
-@@ -1067,7 +1068,7 @@ def is_list(value):
-
-
- @jinja_filter("is_iter")
--def is_iter(thing, ignore=(str,)):
-+def is_iter(thing, ignore=six.string_types):
- """
- Test if an object is iterable, but not a string type.
-
-@@ -1124,10 +1125,10 @@ def is_true(value=None):
- pass
-
- # Now check for truthiness
-- if isinstance(value, ((int,), float)):
-+ if isinstance(value, (six.integer_types, float)):
- return value > 0
-- if isinstance(value, str):
-- return str(value).lower() == "true"
-+ if isinstance(value, six.string_types):
-+ return six.text_type(value).lower() == "true"
- return bool(value)
-
-
-@@ -1167,7 +1168,7 @@ def simple_types_filter(data):
- if data is None:
- return data
-
-- simpletypes_keys = ((str,), str, (int,), float, bool)
-+ simpletypes_keys = (six.string_types, six.text_type, six.integer_types, float, bool)
- simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple])
-
- if isinstance(data, (list, tuple)):
-@@ -1183,7 +1184,7 @@ def simple_types_filter(data):
-
- if isinstance(data, dict):
- simpledict = {}
-- for key, value in data.items():
-+ for key, value in six.iteritems(data):
- if key is not None and not isinstance(key, simpletypes_keys):
- key = repr(key)
- if value is not None and isinstance(value, (dict, list, tuple)):
-@@ -1205,8 +1206,8 @@ def stringify(data):
- for item in data:
- if six.PY2 and isinstance(item, str):
- item = salt.utils.stringutils.to_unicode(item)
-- elif not isinstance(item, str):
-- item = str(item)
-+ elif not isinstance(item, six.string_types):
-+ item = six.text_type(item)
- ret.append(item)
- return ret
-
-@@ -1282,7 +1283,7 @@ def filter_falsey(data, recurse_depth=None, ignore_types=()):
-
- if isinstance(data, dict):
- processed_elements = [
-- (key, filter_element(value)) for key, value in data.items()
-+ (key, filter_element(value)) for key, value in six.iteritems(data)
- ]
- return type(data)(
- [
-@@ -1472,7 +1473,7 @@ def get_value(obj, path, default=None):
- if obj is None:
- return res
- if isinstance(obj, dict):
-- items = obj.items()
-+ items = six.iteritems(obj)
- elif isinstance(obj, list):
- items = enumerate(obj)
-
diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py
-index 2b9c7bf43f..68191bc528 100644
+index b9f047820b..111ca155d4 100644
--- a/salt/utils/xmlutil.py
+++ b/salt/utils/xmlutil.py
-@@ -9,6 +9,7 @@ from xml.etree import ElementTree
+@@ -7,6 +7,7 @@ import string # pylint: disable=deprecated-module
+ from xml.etree import ElementTree
- # Import salt libs
import salt.utils.data
+from salt.ext import six
def _conv_name(x):
-@@ -147,7 +148,7 @@ def set_node_text(node, value):
- :param node: the node to set the text to
- :param value: the value to set
- """
-- node.text = str(value)
-+ node.text = six.text_type(value)
-
-
- def clean_node(parent_map, node, ignored=None):
-@@ -162,7 +163,7 @@ def clean_node(parent_map, node, ignored=None):
+@@ -160,7 +161,7 @@ def clean_node(parent_map, node, ignored=None):
has_text = node.text is not None and node.text.strip()
parent = parent_map.get(node)
if (
@@ -563,10 +162,10 @@ index 2b9c7bf43f..68191bc528 100644
and not has_text
):
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index 5ec8de77e7..27c4b9d1b0 100644
+index 4775fec31f..4a4c0395a7 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
-@@ -48,7 +48,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
+@@ -45,7 +45,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
"""
def __init__(self, msg):
@@ -575,202 +174,7 @@ index 5ec8de77e7..27c4b9d1b0 100644
self.msg = msg
def get_error_message(self):
-diff --git a/tests/unit/utils/test_data.py b/tests/unit/utils/test_data.py
-index 8a6956d442..fb4a8cc3c2 100644
---- a/tests/unit/utils/test_data.py
-+++ b/tests/unit/utils/test_data.py
-@@ -1,14 +1,17 @@
-+# -*- coding: utf-8 -*-
- """
- Tests for salt.utils.data
- """
-
- # Import Python libs
-+from __future__ import absolute_import, print_function, unicode_literals
-
- import logging
-
- # Import Salt libs
- import salt.utils.data
- import salt.utils.stringutils
-+from salt.ext import six
-
- # Import 3rd party libs
- from salt.ext.six.moves import ( # pylint: disable=import-error,redefined-builtin
-@@ -414,18 +417,19 @@ class DataTestCase(TestCase):
- )
- self.assertEqual(ret, expected)
-
-- # The binary data in the data structure should fail to decode, even
-- # using the fallback, and raise an exception.
-- self.assertRaises(
-- UnicodeDecodeError,
-- salt.utils.data.decode,
-- self.test_data,
-- keep=False,
-- normalize=True,
-- preserve_dict_class=True,
-- preserve_tuples=True,
-- to_str=True,
-- )
-+ if six.PY3:
-+ # The binary data in the data structure should fail to decode, even
-+ # using the fallback, and raise an exception.
-+ self.assertRaises(
-+ UnicodeDecodeError,
-+ salt.utils.data.decode,
-+ self.test_data,
-+ keep=False,
-+ normalize=True,
-+ preserve_dict_class=True,
-+ preserve_tuples=True,
-+ to_str=True,
-+ )
-
- # Now munge the expected data so that we get what we would expect if we
- # disable preservation of dict class and tuples
-@@ -469,9 +473,14 @@ class DataTestCase(TestCase):
-
- # Test binary blob
- self.assertEqual(salt.utils.data.decode(BYTES, keep=True, to_str=True), BYTES)
-- self.assertRaises(
-- UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False, to_str=True,
-- )
-+ if six.PY3:
-+ self.assertRaises(
-+ UnicodeDecodeError,
-+ salt.utils.data.decode,
-+ BYTES,
-+ keep=False,
-+ to_str=True,
-+ )
-
- def test_decode_fallback(self):
- """
-@@ -666,7 +675,7 @@ class DataTestCase(TestCase):
- self.assertRaises(TypeError, salt.utils.data.stringify, 9)
- self.assertEqual(
- salt.utils.data.stringify(
-- ["one", "two", "three", 4, 5]
-+ ["one", "two", str("three"), 4, 5]
- ), # future lint: disable=blacklisted-function
- ["one", "two", "three", "4", "5"],
- )
-@@ -720,7 +729,7 @@ class FilterFalseyTestCase(TestCase):
- # Check returned type equality
- self.assertIs(type(old_list), type(new_list))
- # Test with set
-- old_set = {"foo", "bar"}
-+ old_set = set(["foo", "bar"])
- new_set = salt.utils.data.filter_falsey(old_set)
- self.assertEqual(old_set, new_set)
- # Check returned type equality
-@@ -839,9 +848,9 @@ class FilterFalseyTestCase(TestCase):
- Test filtering a set without recursing.
- Note that a set cannot contain unhashable types, so recursion is not possible.
- """
-- old_set = {"foo", None, 0, ""}
-+ old_set = set(["foo", None, 0, ""])
- new_set = salt.utils.data.filter_falsey(old_set)
-- expect_set = {"foo"}
-+ expect_set = set(["foo"])
- self.assertEqual(expect_set, new_set)
- self.assertIs(type(expect_set), type(new_set))
-
-@@ -1053,13 +1062,13 @@ class FilterRecursiveDiff(TestCase):
- """
- Test cases where equal sets are compared.
- """
-- test_set = {0, 1, 2, 3, "foo"}
-+ test_set = set([0, 1, 2, 3, "foo"])
- self.assertEqual({}, salt.utils.data.recursive_diff(test_set, test_set))
-
- # This is a bit of an oddity, as python seems to sort the sets in memory
- # so both sets end up with the same ordering (0..3).
-- set_one = {0, 1, 2, 3}
-- set_two = {3, 2, 1, 0}
-+ set_one = set([0, 1, 2, 3])
-+ set_two = set([3, 2, 1, 0])
- self.assertEqual({}, salt.utils.data.recursive_diff(set_one, set_two))
-
- def test_tuple_equality(self):
-@@ -1149,13 +1158,13 @@ class FilterRecursiveDiff(TestCase):
- Tricky as the sets are compared zipped, so shuffled sets of equal values
- are considered different.
- """
-- set_one = {0, 1, 2, 4}
-- set_two = {0, 1, 3, 4}
-- expected_result = {"old": {2}, "new": {3}}
-+ set_one = set([0, 1, 2, 4])
-+ set_two = set([0, 1, 3, 4])
-+ expected_result = {"old": set([2]), "new": set([3])}
- self.assertEqual(
- expected_result, salt.utils.data.recursive_diff(set_one, set_two)
- )
-- expected_result = {"new": {2}, "old": {3}}
-+ expected_result = {"new": set([2]), "old": set([3])}
- self.assertEqual(
- expected_result, salt.utils.data.recursive_diff(set_two, set_one)
- )
-@@ -1164,8 +1173,8 @@ class FilterRecursiveDiff(TestCase):
- # Python 2.7 seems to sort it (i.e. set_one below becomes {0, 1, 'foo', 'bar'}
- # However Python 3.6.8 stores it differently each run.
- # So just test for "not equal" here.
-- set_one = {0, "foo", 1, "bar"}
-- set_two = {"foo", 1, "bar", 2}
-+ set_one = set([0, "foo", 1, "bar"])
-+ set_two = set(["foo", 1, "bar", 2])
- expected_result = {}
- self.assertNotEqual(
- expected_result, salt.utils.data.recursive_diff(set_one, set_two)
-@@ -1203,18 +1212,18 @@ class FilterRecursiveDiff(TestCase):
- expected_result, salt.utils.data.recursive_diff(list_two, list_one)
- )
-
-- mixed_one = {"foo": {0, 1, 2}, "bar": [0, 1, 2]}
-- mixed_two = {"foo": {1, 2, 3}, "bar": [1, 2, 3]}
-+ mixed_one = {"foo": set([0, 1, 2]), "bar": [0, 1, 2]}
-+ mixed_two = {"foo": set([1, 2, 3]), "bar": [1, 2, 3]}
- expected_result = {
-- "old": {"foo": {0}, "bar": [0, 1, 2]},
-- "new": {"foo": {3}, "bar": [1, 2, 3]},
-+ "old": {"foo": set([0]), "bar": [0, 1, 2]},
-+ "new": {"foo": set([3]), "bar": [1, 2, 3]},
- }
- self.assertEqual(
- expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)
- )
- expected_result = {
-- "new": {"foo": {0}, "bar": [0, 1, 2]},
-- "old": {"foo": {3}, "bar": [1, 2, 3]},
-+ "new": {"foo": set([0]), "bar": [0, 1, 2]},
-+ "old": {"foo": set([3]), "bar": [1, 2, 3]},
- }
- self.assertEqual(
- expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one)
-@@ -1236,7 +1245,7 @@ class FilterRecursiveDiff(TestCase):
- Test case comparing a list with a set, will be compared unordered.
- """
- mixed_one = [1, 2, 3]
-- mixed_two = {3, 2, 1}
-+ mixed_two = set([3, 2, 1])
- expected_result = {}
- self.assertEqual(
- expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)
-@@ -1351,9 +1360,9 @@ class FilterRecursiveDiff(TestCase):
- Test case comparing two sets of unequal length.
- This does not do anything special, as it is unordered.
- """
-- set_one = {1, 2, 3}
-- set_two = {4, 3, 2, 1}
-- expected_result = {"old": set(), "new": {4}}
-+ set_one = set([1, 2, 3])
-+ set_two = set([4, 3, 2, 1])
-+ expected_result = {"old": set([]), "new": set([4])}
- self.assertEqual(
- expected_result, salt.utils.data.recursive_diff(set_one, set_two)
- )
--
-2.28.0
+2.29.2
diff --git a/fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch b/fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch
deleted file mode 100644
index 083a27e..0000000
--- a/fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 5a2c7671be0fcdf03050049ac4a1bbf4929abf1e Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
-
-Date: Fri, 27 Mar 2020 15:58:40 +0000
-Subject: [PATCH] Fix typo on msgpack version when sanitizing msgpack
- kwargs (bsc#1167437)
-
----
- salt/utils/msgpack.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/salt/utils/msgpack.py b/salt/utils/msgpack.py
-index 1d02aa96ba8b659eb4038f00563c9cfc31a568e5..4b5a256513a524a33d7d42773644567a0970a46b 100644
---- a/salt/utils/msgpack.py
-+++ b/salt/utils/msgpack.py
-@@ -61,7 +61,7 @@ def _sanitize_msgpack_kwargs(kwargs):
- assert isinstance(kwargs, dict)
- if version < (0, 6, 0) and kwargs.pop('strict_map_key', None) is not None:
- log.info('removing unsupported `strict_map_key` argument from msgpack call')
-- if version < (0, 5, 5) and kwargs.pop('raw', None) is not None:
-+ if version < (0, 5, 2) and kwargs.pop('raw', None) is not None:
- log.info('removing unsupported `raw` argument from msgpack call')
- if version < (0, 4, 0) and kwargs.pop('use_bin_type', None) is not None:
- log.info('removing unsupported `use_bin_type` argument from msgpack call')
---
-2.23.0
-
-
diff --git a/fix-unit-test-for-grains-core.patch b/fix-unit-test-for-grains-core.patch
index 1d78938..95364fd 100644
--- a/fix-unit-test-for-grains-core.patch
+++ b/fix-unit-test-for-grains-core.patch
@@ -1,42 +1,41 @@
-From 6bb7b6c4a530abb7e831449545a35ee5ede49dcb Mon Sep 17 00:00:00 2001
+From 192bac1ae2f20b098384264c8802034a340cd124 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Thu, 11 Oct 2018 16:20:40 +0200
Subject: [PATCH] Fix unit test for grains core
---
- tests/unit/grains/test_core.py | 11 +++++------
- 1 file changed, 5 insertions(+), 6 deletions(-)
+ tests/unit/grains/test_core.py | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
-index b31f5dcddd..c40595eb3f 100644
+index 34aaa4f5bc..7dbf34deac 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
-@@ -68,11 +68,10 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
- def test_parse_etc_os_release(self, path_isfile_mock):
- path_isfile_mock.side_effect = lambda x: x == "/usr/lib/os-release"
- with salt.utils.files.fopen(os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")) as os_release_file:
+@@ -59,10 +59,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ with salt.utils.files.fopen(
+ os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")
+ ) as os_release_file:
- os_release_content = os_release_file.read()
- with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)):
-- os_release = core._parse_os_release(
-- '/etc/os-release',
-- '/usr/lib/os-release')
+ os_release_content = os_release_file.readlines()
+ with patch("salt.utils.files.fopen", mock_open()) as os_release_file:
+ os_release_file.return_value.__iter__.return_value = os_release_content
-+ os_release = core._parse_os_release(["/etc/os-release", "/usr/lib/os-release"])
- self.assertEqual(os_release, {
- "NAME": "Ubuntu",
- "VERSION": "17.10 (Artful Aardvark)",
-@@ -134,7 +133,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
-
+ os_release = core._parse_os_release(
+- "/etc/os-release", "/usr/lib/os-release"
++ ["/etc/os-release", "/usr/lib/os-release"]
+ )
+ self.assertEqual(
+ os_release,
+@@ -172,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
def test_missing_os_release(self):
- with patch('salt.utils.files.fopen', mock_open(read_data={})):
-- os_release = core._parse_os_release('/etc/os-release', '/usr/lib/os-release')
-+ os_release = core._parse_os_release(['/etc/os-release', '/usr/lib/os-release'])
+ with patch("salt.utils.files.fopen", mock_open(read_data={})):
+ os_release = core._parse_os_release(
+- "/etc/os-release", "/usr/lib/os-release"
++ ["/etc/os-release", "/usr/lib/os-release"]
+ )
self.assertEqual(os_release, {})
- @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows')
--
-2.16.4
+2.29.2
diff --git a/fix-unit-tests-for-batch-async-after-refactor.patch b/fix-unit-tests-for-batch-async-after-refactor.patch
index 1a58e4a..c3d6b36 100644
--- a/fix-unit-tests-for-batch-async-after-refactor.patch
+++ b/fix-unit-tests-for-batch-async-after-refactor.patch
@@ -1,21 +1,21 @@
-From e9f2af1256a52d58a7c8e6dd0122eb6d5cc47dd3 Mon Sep 17 00:00:00 2001
+From 09a871c197be4933475ee4582755d9b0cb5a700e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 4 Mar 2020 10:13:43 +0000
Subject: [PATCH] Fix unit tests for batch async after refactor
---
- tests/unit/cli/test_batch_async.py | 18 +++++++++++++++++-
- 1 file changed, 17 insertions(+), 1 deletion(-)
+ tests/unit/cli/test_batch_async.py | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
-index f1d36a81fb..e1ce60859b 100644
+index b04965268a..dcee9a87bd 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
-@@ -126,9 +126,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
- self.batch.timedout_minions = {'bar'}
+@@ -120,9 +120,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ self.batch.timedout_minions = {"bar"}
self.batch.event = MagicMock()
- self.batch.metadata = {'mykey': 'myvalue'}
+ self.batch.metadata = {"mykey": "myvalue"}
+ old_event = self.batch.event
self.batch.end_batch()
self.assertEqual(
@@ -23,8 +23,8 @@ index f1d36a81fb..e1ce60859b 100644
+ old_event.fire_event.call_args[0],
(
{
- 'available_minions': set(['foo', 'bar']),
-@@ -146,6 +147,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
+ "available_minions": {"foo", "bar"},
+@@ -140,6 +141,23 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
event = MagicMock()
batch.event = event
batch.__del__()
@@ -36,17 +36,19 @@ index f1d36a81fb..e1ce60859b 100644
+ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
+ event = MagicMock()
+ batch.event = event
-+ batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return'), ('salt/job/4321/ret/*', 'find_job_return') }
++ batch.patterns = {
++ ("salt/job/1234/ret/*", "find_job_return"),
++ ("salt/job/4321/ret/*", "find_job_return"),
++ }
+ batch.close_safe()
+ self.assertEqual(batch.local, None)
+ self.assertEqual(batch.event, None)
+ self.assertEqual(batch.ioloop, None)
-+ self.assertEqual(
-+ len(event.unsubscribe.mock_calls), 2)
- self.assertEqual(
- len(event.remove_event_handler.mock_calls), 1)
++ self.assertEqual(len(event.unsubscribe.mock_calls), 2)
+ self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
+ @tornado.testing.gen_test
--
-2.23.0
+2.29.2
diff --git a/fix-virt.update-with-cpu-defined-263.patch b/fix-virt.update-with-cpu-defined-263.patch
index e61f0d8..450d941 100644
--- a/fix-virt.update-with-cpu-defined-263.patch
+++ b/fix-virt.update-with-cpu-defined-263.patch
@@ -1,4 +1,4 @@
-From 37800f008e46a7321bcd4b88b4858d3ea1fabcdf Mon Sep 17 00:00:00 2001
+From c05d571058b9520dbaf4aba3de001b1aefe8e2c2 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat
Date: Tue, 15 Sep 2020 16:03:30 +0200
Subject: [PATCH] Fix virt.update with cpu defined (#263)
@@ -10,11 +10,11 @@ updated.
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index c07fabb406..4a8a55ced6 100644
+index c042738370..c1a73fcb7f 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
-@@ -2430,9 +2430,9 @@ def update(
- data = {k: v for k, v in six.iteritems(locals()) if bool(v)}
+@@ -2441,9 +2441,9 @@ def update(
+ data = {k: v for k, v in locals().items() if bool(v)}
if boot_dev:
data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
- need_update = need_update or salt.utils.xmlutil.change_xml(
@@ -26,6 +26,6 @@ index c07fabb406..4a8a55ced6 100644
# Update the XML definition with the new disks and diff changes
devices_node = desc.find("devices")
--
-2.28.0
+2.29.2
diff --git a/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch b/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch
index d29f694..8670aa3 100644
--- a/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch
+++ b/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch
@@ -1,70 +1,79 @@
-From a8f0a15e4067ec278c8a2d690e3bf815523286ca Mon Sep 17 00:00:00 2001
+From f3ac041e34952a4b753e4afc9dc4b6adaa1d0ff2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 12 Mar 2020 13:26:51 +0000
-Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test after
- rebase
+Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test
+ after rebase
---
- tests/integration/modules/test_pkg.py | 56 +++------------------------
- 1 file changed, 6 insertions(+), 50 deletions(-)
+ tests/integration/modules/test_pkg.py | 63 ++++-----------------------
+ 1 file changed, 8 insertions(+), 55 deletions(-)
diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py
-index 6f3767bfbd272848277b877d1fe640caf8f349f6..0f4c5c9d459c56bb485408f943c1dee49c46cd21 100644
+index 3ece73074b..933755a9ec 100644
--- a/tests/integration/modules/test_pkg.py
+++ b/tests/integration/modules/test_pkg.py
-@@ -134,6 +134,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
- if repo is not None:
- self.run_function('pkg.del_repo', [repo])
+@@ -143,6 +143,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
+ self.run_function("pkg.del_repo", [repo])
+ @slowTest
+ @destructiveTest
-+ @requires_salt_modules('pkg.mod_repo', 'pkg.del_repo', 'pkg.get_repo')
++ @requires_salt_modules("pkg.mod_repo", "pkg.del_repo", "pkg.get_repo")
+ @requires_network()
+ @requires_system_grains
def test_mod_del_repo_multiline_values(self):
- '''
+ """
test modifying and deleting a software repository defined with multiline values
-@@ -141,8 +145,9 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
- os_grain = self.run_function('grains.item', ['os'])['os']
+@@ -150,10 +154,13 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
+ os_grain = self.run_function("grains.item", ["os"])["os"]
repo = None
try:
-- if os_grain in ['CentOS', 'RedHat']:
-+ if os_grain in ['CentOS', 'RedHat', 'SUSE']:
- my_baseurl = 'http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/'
-+ expected_get_repo_baseurl_zypp = 'http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/'
- expected_get_repo_baseurl = 'http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/'
- major_release = int(
- self.run_function(
-@@ -189,55 +194,6 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
+- if os_grain in ["CentOS", "RedHat"]:
++ if os_grain in ["CentOS", "RedHat", "SUSE"]:
+ my_baseurl = (
+ "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/"
+ )
++ expected_get_repo_baseurl_zypp = (
++ "http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/"
++ )
+ expected_get_repo_baseurl = (
+ "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/"
+ )
+@@ -207,60 +214,6 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
if repo is not None:
- self.run_function('pkg.del_repo', [repo])
+ self.run_function("pkg.del_repo", [repo])
- def test_mod_del_repo_multiline_values(self):
-- '''
+- """
- test modifying and deleting a software repository defined with multiline values
-- '''
-- os_grain = self.run_function('grains.item', ['os'])['os']
+- """
+- os_grain = self.run_function("grains.item", ["os"])["os"]
- repo = None
- try:
-- if os_grain in ['CentOS', 'RedHat', 'SUSE']:
-- my_baseurl = 'http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/'
-- expected_get_repo_baseurl_zypp = 'http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/'
-- expected_get_repo_baseurl = 'http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/'
-- major_release = int(
-- self.run_function(
-- 'grains.item',
-- ['osmajorrelease']
-- )['osmajorrelease']
+- if os_grain in ["CentOS", "RedHat", "SUSE"]:
+- my_baseurl = (
+- "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/"
- )
-- repo = 'fakerepo'
-- name = 'Fake repo for RHEL/CentOS/SUSE'
+- expected_get_repo_baseurl_zypp = (
+- "http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/"
+- )
+- expected_get_repo_baseurl = (
+- "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/"
+- )
+- major_release = int(
+- self.run_function("grains.item", ["osmajorrelease"])[
+- "osmajorrelease"
+- ]
+- )
+- repo = "fakerepo"
+- name = "Fake repo for RHEL/CentOS/SUSE"
- baseurl = my_baseurl
-- gpgkey = 'https://my.fake.repo/foo/bar/MY-GPG-KEY.pub'
-- failovermethod = 'priority'
+- gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub"
+- failovermethod = "priority"
- gpgcheck = 1
- enabled = 1
- ret = self.run_function(
-- 'pkg.mod_repo',
+- "pkg.mod_repo",
- [repo],
- name=name,
- baseurl=baseurl,
@@ -78,20 +87,20 @@ index 6f3767bfbd272848277b877d1fe640caf8f349f6..0f4c5c9d459c56bb485408f943c1dee4
- self.assertNotEqual(ret, {})
- repo_info = ret[next(iter(ret))]
- self.assertIn(repo, repo_info)
-- self.assertEqual(repo_info[repo]['baseurl'], my_baseurl)
-- ret = self.run_function('pkg.get_repo', [repo])
-- self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
-- self.run_function('pkg.mod_repo', [repo])
-- ret = self.run_function('pkg.get_repo', [repo])
-- self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
+- self.assertEqual(repo_info[repo]["baseurl"], my_baseurl)
+- ret = self.run_function("pkg.get_repo", [repo])
+- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
+- self.run_function("pkg.mod_repo", [repo])
+- ret = self.run_function("pkg.get_repo", [repo])
+- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
- finally:
- if repo is not None:
-- self.run_function('pkg.del_repo', [repo])
+- self.run_function("pkg.del_repo", [repo])
-
- @requires_salt_modules('pkg.owner')
+ @requires_salt_modules("pkg.owner")
def test_owner(self):
- '''
+ """
--
-2.23.0
+2.29.2
diff --git a/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch b/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch
index ffbbc22..abac036 100644
--- a/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch
+++ b/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch
@@ -1,43 +1,53 @@
-From eb51734ad93b1fa0c6bc8fde861fdabfe3e0d6b0 Mon Sep 17 00:00:00 2001
+From 81f38c8cb16634b2c86b3e1e7c745870f90771d0 Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Thu, 13 Jun 2019 17:48:55 +0200
Subject: [PATCH] Fix zypper pkg.list_pkgs expectation and dpkg mocking
---
- tests/unit/modules/test_dpkg_lowpkg.py | 12 ++++++------
- 1 file changed, 6 insertions(+), 6 deletions(-)
+ tests/unit/modules/test_dpkg_lowpkg.py | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py
-index a0b3346f9d..bc564f080a 100644
+index 160bbcd5b1..dadbc30dfa 100644
--- a/tests/unit/modules/test_dpkg_lowpkg.py
+++ b/tests/unit/modules/test_dpkg_lowpkg.py
-@@ -125,9 +125,9 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(dpkg.__salt__, {'cmd.run_all': mock}):
- self.assertEqual(dpkg.file_dict('httpd'), 'Error: error')
+@@ -308,9 +308,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
+ dpkg.bin_pkg_info("package.deb")["name"], "package_name"
+ )
-- @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg))
-- @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info))
-- @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3'))
-+ @patch('salt.modules.dpkg_lowpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg))
-+ @patch('salt.modules.dpkg_lowpkg._get_pkg_info', MagicMock(return_value=pkgs_info))
-+ @patch('salt.modules.dpkg_lowpkg._get_pkg_license', MagicMock(return_value='BSD v3'))
+- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg))
+- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
+- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3"))
++ @patch(
++ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail",
++ MagicMock(return_value=dselect_pkg),
++ )
++ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
++ @patch(
++ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3")
++ )
def test_info(self):
- '''
+ """
Test info
-@@ -152,9 +152,9 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
- assert pkg_data['maintainer'] == 'Simpsons Developers '
- assert pkg_data['license'] == 'BSD v3'
+@@ -359,9 +364,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
+ )
+ assert pkg_data["license"] == "BSD v3"
-- @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg))
-- @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info))
-- @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3'))
-+ @patch('salt.modules.dpkg_lowpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg))
-+ @patch('salt.modules.dpkg_lowpkg._get_pkg_info', MagicMock(return_value=pkgs_info))
-+ @patch('salt.modules.dpkg_lowpkg._get_pkg_license', MagicMock(return_value='BSD v3'))
+- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg))
+- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
+- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3"))
++ @patch(
++ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail",
++ MagicMock(return_value=dselect_pkg),
++ )
++ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
++ @patch(
++ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3")
++ )
def test_info_attr(self):
- '''
+ """
Test info with 'attr' parameter
--
-2.16.4
+2.29.2
diff --git a/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch b/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch
index 0cda954..766c8ec 100644
--- a/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch
+++ b/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch
@@ -1,4 +1,4 @@
-From 0612549b3acfeb15e0b499b6f469d64062d6ae2d Mon Sep 17 00:00:00 2001
+From b9ba6875945e1ffafdeb862d8b2ac7fccd9cccf5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Mon, 25 Jun 2018 13:06:40 +0100
@@ -14,17 +14,17 @@ Fix '_find_remove_targets' after aligning Zypper with pkg state
1 file changed, 21 deletions(-)
diff --git a/salt/states/pkg.py b/salt/states/pkg.py
-index c0fa2f6b69..a13d418400 100644
+index a1b2a122bb..f7327a33e3 100644
--- a/salt/states/pkg.py
+++ b/salt/states/pkg.py
-@@ -450,16 +450,6 @@ def _find_remove_targets(name=None,
+@@ -477,16 +477,6 @@ def _find_remove_targets(
- if __grains__['os'] == 'FreeBSD' and origin:
- cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname]
-- elif __grains__['os_family'] == 'Suse':
+ if __grains__["os"] == "FreeBSD" and origin:
+ cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname]
+- elif __grains__["os_family"] == "Suse":
- # On SUSE systems. Zypper returns packages without "arch" in name
- try:
-- namepart, archpart = pkgname.rsplit('.', 1)
+- namepart, archpart = pkgname.rsplit(".", 1)
- except ValueError:
- cver = cur_pkgs.get(pkgname, [])
- else:
@@ -34,14 +34,14 @@ index c0fa2f6b69..a13d418400 100644
else:
cver = cur_pkgs.get(pkgname, [])
-@@ -866,17 +856,6 @@ def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None):
- cver = new_pkgs.get(pkgname.split('%')[0])
- elif __grains__['os_family'] == 'Debian':
- cver = new_pkgs.get(pkgname.split('=')[0])
-- elif __grains__['os_family'] == 'Suse':
+@@ -930,17 +920,6 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None):
+ cver = new_pkgs.get(pkgname.split("%")[0])
+ elif __grains__["os_family"] == "Debian":
+ cver = new_pkgs.get(pkgname.split("=")[0])
+- elif __grains__["os_family"] == "Suse":
- # On SUSE systems. Zypper returns packages without "arch" in name
- try:
-- namepart, archpart = pkgname.rsplit('.', 1)
+- namepart, archpart = pkgname.rsplit(".", 1)
- except ValueError:
- cver = new_pkgs.get(pkgname)
- else:
@@ -53,6 +53,6 @@ index c0fa2f6b69..a13d418400 100644
cver = new_pkgs.get(pkgname)
if not cver and pkgname in new_caps:
--
-2.16.4
+2.29.2
diff --git a/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch b/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch
index 27b3e19..08a9c4b 100644
--- a/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch
+++ b/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch
@@ -1,4 +1,4 @@
-From 3df8359421f60140fd335d95c3c06de0bfd6ac4f Mon Sep 17 00:00:00 2001
+From 17ad05e3cbb3718ca12cef20600be81aa5d42d33 Mon Sep 17 00:00:00 2001
From: tyl0re
Date: Wed, 17 Jul 2019 10:13:09 +0200
Subject: [PATCH] Fixed Bug LVM has no Parttion Type. the Scipt Later
@@ -11,23 +11,41 @@ Subject: [PATCH] Fixed Bug LVM has no Parttion Type. the Scipt Later
So the check on not defined fs_type is missing
---
- salt/modules/parted_partition.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ salt/modules/parted_partition.py | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
diff --git a/salt/modules/parted_partition.py b/salt/modules/parted_partition.py
-index 9441fec49fd1833da590b3f65637e8e92b287d1c..7d08a7b315c990e7a87c9c77fd6550a6174b7160 100644
+index 015d4cbc29..bb34cd58b4 100644
--- a/salt/modules/parted_partition.py
+++ b/salt/modules/parted_partition.py
-@@ -515,7 +515,7 @@ def mkpartfs(device, part_type, fs_type, start, end):
- 'Invalid part_type passed to partition.mkpartfs'
- )
+@@ -552,10 +552,23 @@ def mkpartfs(device, part_type, fs_type=None, start=None, end=None):
-- if not _is_fstype(fs_type):
+ .. code-block:: bash
+
+- salt '*' partition.mkpartfs /dev/sda primary fs_type=fat32 start=0 end=639
+- salt '*' partition.mkpartfs /dev/sda primary start=0 end=639
++ salt '*' partition.mkpartfs /dev/sda logical ext2 440 670
+ """
+- out = mkpart(device, part_type, fs_type, start, end)
++ _validate_device(device)
++
++ if part_type not in {"primary", "logical", "extended"}:
++ raise CommandExecutionError("Invalid part_type passed to partition.mkpartfs")
++
+ if fs_type and not _is_fstype(fs_type):
- raise CommandExecutionError(
- 'Invalid fs_type passed to partition.mkpartfs'
- )
++ raise CommandExecutionError("Invalid fs_type passed to partition.mkpartfs")
++
++ _validate_partition_boundary(start)
++ _validate_partition_boundary(end)
++
++ cmd = "parted -m -s -- {} mkpart {} {} {} {}".format(
++ device, part_type, fs_type, start, end
++ )
++ out = __salt__["cmd.run"](cmd).splitlines()
+ return out
+
+
--
-2.23.0
+2.29.2
diff --git a/fixes-cve-2018-15750-cve-2018-15751.patch b/fixes-cve-2018-15750-cve-2018-15751.patch
index 771b633..9c8999a 100644
--- a/fixes-cve-2018-15750-cve-2018-15751.patch
+++ b/fixes-cve-2018-15750-cve-2018-15751.patch
@@ -1,4 +1,4 @@
-From 9ec54e8c1394ab678c6129d98f07c6eafd446399 Mon Sep 17 00:00:00 2001
+From 731a53bd241240e08c455a8cb3a59e4d65a6abb5 Mon Sep 17 00:00:00 2001
From: Erik Johnson
Date: Fri, 24 Aug 2018 10:35:55 -0500
Subject: [PATCH] Fixes: CVE-2018-15750, CVE-2018-15751
@@ -12,43 +12,47 @@ Handle Auth exceptions in run_job
Update tornado test to correct authentication message
---
salt/netapi/rest_cherrypy/app.py | 7 -------
- tests/integration/netapi/rest_tornado/test_app.py | 4 ++--
- 2 files changed, 2 insertions(+), 9 deletions(-)
+ tests/integration/netapi/rest_tornado/test_app.py | 8 ++++++--
+ 2 files changed, 6 insertions(+), 9 deletions(-)
diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py
-index fa1b540e5f..f8b500482b 100644
+index e7641ccbc5..5dfbadf759 100644
--- a/salt/netapi/rest_cherrypy/app.py
+++ b/salt/netapi/rest_cherrypy/app.py
-@@ -1176,13 +1176,6 @@ class LowDataAdapter(object):
+@@ -1181,13 +1181,6 @@ class LowDataAdapter:
except (TypeError, ValueError):
- raise cherrypy.HTTPError(401, 'Invalid token')
+ raise cherrypy.HTTPError(401, "Invalid token")
-- if 'token' in chunk:
+- if "token" in chunk:
- # Make sure that auth token is hex
- try:
-- int(chunk['token'], 16)
+- int(chunk["token"], 16)
- except (TypeError, ValueError):
-- raise cherrypy.HTTPError(401, 'Invalid token')
+- raise cherrypy.HTTPError(401, "Invalid token")
-
if client:
- chunk['client'] = client
+ chunk["client"] = client
diff --git a/tests/integration/netapi/rest_tornado/test_app.py b/tests/integration/netapi/rest_tornado/test_app.py
-index 10ec29f7fa..4102b5645a 100644
+index e3ad8820d3..4e5e741f1d 100644
--- a/tests/integration/netapi/rest_tornado/test_app.py
+++ b/tests/integration/netapi/rest_tornado/test_app.py
-@@ -282,8 +282,8 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase):
- self.assertIn('jid', ret[0]) # the first 2 are regular returns
- self.assertIn('jid', ret[1])
- self.assertIn('Failed to authenticate', ret[2]) # bad auth
-- self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion']))
-- self.assertEqual(ret[1]['minions'], sorted(['minion', 'sub_minion']))
-+ self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion', 'localhost']))
-+ self.assertEqual(ret[1]['minions'], sorted(['minion', 'sub_minion', 'localhost']))
+@@ -326,8 +326,12 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase):
+ self.assertIn("jid", ret[0]) # the first 2 are regular returns
+ self.assertIn("jid", ret[1])
+ self.assertIn("Failed to authenticate", ret[2]) # bad auth
+- self.assertEqual(ret[0]["minions"], sorted(["minion", "sub_minion"]))
+- self.assertEqual(ret[1]["minions"], sorted(["minion", "sub_minion"]))
++ self.assertEqual(
++ ret[0]["minions"], sorted(["minion", "sub_minion", "localhost"])
++ )
++ self.assertEqual(
++ ret[1]["minions"], sorted(["minion", "sub_minion", "localhost"])
++ )
+ @slowTest
def test_simple_local_async_post_no_tgt(self):
- low = [{'client': 'local_async',
--
-2.16.4
+2.29.2
diff --git a/fixing-streamclosed-issue.patch b/fixing-streamclosed-issue.patch
index 510f565..da2e635 100644
--- a/fixing-streamclosed-issue.patch
+++ b/fixing-streamclosed-issue.patch
@@ -1,4 +1,4 @@
-From 9a5f007a5baa4ba1d28b0e6708bac8b134e4891c Mon Sep 17 00:00:00 2001
+From 82d1cadff4fa6248a9d891a3c228fc415207d8d6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mihai=20Dinc=C4=83?=
Date: Tue, 26 Nov 2019 18:26:31 +0100
Subject: [PATCH] Fixing StreamClosed issue
@@ -8,18 +8,18 @@ Subject: [PATCH] Fixing StreamClosed issue
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index 754c257b36..c4545e3ebc 100644
+index f3d92b88f1..8d2601e636 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -221,7 +221,6 @@ class BatchAsync(object):
- "metadata": self.metadata
+@@ -232,7 +232,6 @@ class BatchAsync:
+ "metadata": self.metadata,
}
- self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid))
+ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
- self.event.remove_event_handler(self.__event_handler)
for (pattern, label) in self.patterns:
if label in ["ping_return", "batch_run"]:
- self.event.unsubscribe(pattern, match_type='glob')
-@@ -265,6 +264,7 @@ class BatchAsync(object):
+ self.event.unsubscribe(pattern, match_type="glob")
+@@ -277,6 +276,7 @@ class BatchAsync:
def __del__(self):
self.local = None
@@ -28,6 +28,6 @@ index 754c257b36..c4545e3ebc 100644
self.ioloop = None
gc.collect()
--
-2.16.4
+2.29.2
diff --git a/force-zyppnotify-to-prefer-packages.db-than-packages.patch b/force-zyppnotify-to-prefer-packages.db-than-packages.patch
new file mode 100644
index 0000000..6203d03
--- /dev/null
+++ b/force-zyppnotify-to-prefer-packages.db-than-packages.patch
@@ -0,0 +1,29 @@
+From 36b107fb5108fe4e52e9ef522765d6ada588c50d Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 9 Dec 2020 14:58:55 +0300
+Subject: [PATCH] Force zyppnotify to prefer Packages.db than Packages
+ if it exists
+
+---
+ scripts/suse/zypper/plugins/commit/zyppnotify | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/scripts/suse/zypper/plugins/commit/zyppnotify b/scripts/suse/zypper/plugins/commit/zyppnotify
+index 51ac02254e..d6a1bef42b 100755
+--- a/scripts/suse/zypper/plugins/commit/zyppnotify
++++ b/scripts/suse/zypper/plugins/commit/zyppnotify
+@@ -20,7 +20,9 @@ class DriftDetector(Plugin):
+ def __init__(self):
+ Plugin.__init__(self)
+ self.ck_path = "/var/cache/salt/minion/rpmdb.cookie"
+- self.rpm_path = "/var/lib/rpm/Packages"
++ self.rpm_path = "/var/lib/rpm/Packages.db"
++ if not os.path.exists(self.rpm_path):
++ self.rpm_path = "/var/lib/rpm/Packages"
+
+ def _get_mtime(self):
+ """
+--
+2.29.2
+
+
diff --git a/get-os_arch-also-without-rpm-package-installed.patch b/get-os_arch-also-without-rpm-package-installed.patch
index b94bfed..decb600 100644
--- a/get-os_arch-also-without-rpm-package-installed.patch
+++ b/get-os_arch-also-without-rpm-package-installed.patch
@@ -1,4 +1,4 @@
-From 98f3bd70aaa145b88e8bd4b947b578435e2b1e57 Mon Sep 17 00:00:00 2001
+From e987664551debb9affce8ce5a70593ef0750dcd5 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Wed, 14 Nov 2018 17:36:23 +0100
Subject: [PATCH] Get os_arch also without RPM package installed
@@ -17,29 +17,31 @@ Add UT for OS arch detection when no CPU arch or machine can be determined
Remove unsupported testcase
---
- tests/unit/utils/test_pkg.py | 48 ++++++++------------------------------------
- 1 file changed, 8 insertions(+), 40 deletions(-)
+ tests/unit/utils/test_pkg.py | 53 ++++++------------------------------
+ 1 file changed, 8 insertions(+), 45 deletions(-)
diff --git a/tests/unit/utils/test_pkg.py b/tests/unit/utils/test_pkg.py
-index e8b19bef14..361e0bf92f 100644
+index b4a67b8e57..404b01b12b 100644
--- a/tests/unit/utils/test_pkg.py
+++ b/tests/unit/utils/test_pkg.py
-@@ -2,51 +2,19 @@
-
- from __future__ import absolute_import, unicode_literals, print_function
-
--from tests.support.unit import TestCase
--from tests.support.mock import MagicMock, patch
-+from tests.support.unit import TestCase, skipIf
-+from tests.support.mock import Mock, MagicMock, patch, NO_MOCK, NO_MOCK_REASON
+@@ -1,53 +1,16 @@
+-# -*- coding: utf-8 -*-
+-
+-from __future__ import absolute_import, print_function, unicode_literals
+-
import salt.utils.pkg
from salt.utils.pkg import rpm
-
+-from tests.support.mock import MagicMock, patch
+-from tests.support.unit import TestCase
-
++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, Mock, patch
++from tests.support.unit import TestCase, skipIf
+
-class PkgUtilsTestCase(TestCase):
-- '''
+- """
- TestCase for salt.utils.pkg module
-- '''
+- """
+-
- test_parameters = [
- ("16.0.0.49153-0+f1", "", "16.0.0.49153-0+f1"),
- ("> 15.0.0", ">", "15.0.0"),
@@ -62,13 +64,13 @@ index e8b19bef14..361e0bf92f 100644
- ("<=>15.0.0", "<=>", "15.0.0"),
- ("<>15.0.0", "<>", "15.0.0"),
- ("=15.0.0", "=", "15.0.0"),
-- ("", "", "")
+- ("", "", ""),
- ]
-
- def test_split_comparison(self):
-- '''
+- """
- Tests salt.utils.pkg.split_comparison
-- '''
+- """
- for test_parameter in self.test_parameters:
- oper, verstr = salt.utils.pkg.split_comparison(test_parameter[0])
- self.assertEqual(test_parameter[1], oper)
@@ -80,11 +82,11 @@ index e8b19bef14..361e0bf92f 100644
+@skipIf(NO_MOCK, NO_MOCK_REASON)
-+@skipIf(pytest is None, 'PyTest is missing')
++@skipIf(pytest is None, "PyTest is missing")
class PkgRPMTestCase(TestCase):
- '''
+ """
Test case for pkg.rpm utils
--
-2.16.4
+2.29.2
diff --git a/grains-master-can-read-grains.patch b/grains-master-can-read-grains.patch
index e6cae6b..0f91120 100644
--- a/grains-master-can-read-grains.patch
+++ b/grains-master-can-read-grains.patch
@@ -1,4 +1,4 @@
-From 0b6106815b708bc4cf25b4a02ebc8b7ebf299b39 Mon Sep 17 00:00:00 2001
+From d9618fed8ff241c6f127f08ec59fea9c8b8e12a6 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 27 Oct 2020 13:16:37 +0100
Subject: [PATCH] grains: master can read grains
@@ -8,10 +8,10 @@ Subject: [PATCH] grains: master can read grains
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
-index 6a26aece77..f80061ff4e 100644
+index d25faac3b7..7729a5c0a5 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
-@@ -94,8 +94,14 @@ def __secure_boot():
+@@ -76,8 +76,14 @@ def __secure_boot():
enabled = False
sboot = glob.glob("/sys/firmware/efi/vars/SecureBoot-*/data")
if len(sboot) == 1:
@@ -29,6 +29,6 @@ index 6a26aece77..f80061ff4e 100644
--
-2.29.1
+2.29.2
diff --git a/html.tar.bz2 b/html.tar.bz2
index 2103847..d2f8ee6 100644
--- a/html.tar.bz2
+++ b/html.tar.bz2
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:b2c1abe2851b8a9055a361fc2409477ac01ec4829f0588f3b58533cb5f1e4e89
-size 8775440
+oid sha256:7480c92d4197b02504c9a130a0268fd028eb0fd45d3c7a7075b8b78da85050ed
+size 9943287
diff --git a/implement-network.fqdns-module-function-bsc-1134860-.patch b/implement-network.fqdns-module-function-bsc-1134860-.patch
index 9feae9e..6492017 100644
--- a/implement-network.fqdns-module-function-bsc-1134860-.patch
+++ b/implement-network.fqdns-module-function-bsc-1134860-.patch
@@ -1,4 +1,4 @@
-From a11587a1209cd198f421fafdb43510b6d651f4b2 Mon Sep 17 00:00:00 2001
+From ac34a8d839f91285f4ced605250422a1ecf5cb55 Mon Sep 17 00:00:00 2001
From: EricS <54029547+ESiebigteroth@users.noreply.github.com>
Date: Tue, 3 Sep 2019 11:22:53 +0200
Subject: [PATCH] Implement network.fqdns module function (bsc#1134860)
@@ -9,70 +9,30 @@ Subject: [PATCH] Implement network.fqdns module function (bsc#1134860)
* Reuse network.fqdns in grains.core.fqdns
* Return empty list when fqdns grains is disabled
-
Co-authored-by: Eric Siebigteroth
---
- salt/grains/core.py | 66 +++++-------------------------------------
- salt/modules/network.py | 60 ++++++++++++++++++++++++++++++++++++++
- salt/utils/network.py | 12 ++++++++
- tests/unit/grains/test_core.py | 63 +++++++++++++++++++++++++++++++---------
- 4 files changed, 130 insertions(+), 71 deletions(-)
+ salt/grains/core.py | 58 +++-------------------------------
+ salt/modules/network.py | 12 +++----
+ salt/utils/network.py | 2 +-
+ tests/unit/grains/test_core.py | 55 ++++++++++++--------------------
+ 4 files changed, 31 insertions(+), 96 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 0f3ccd9b92..77ae99590f 100644
+index 5f18ba4a58..0dc1d97f97 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -26,8 +26,9 @@ from errno import EACCES, EPERM
- import datetime
+@@ -23,7 +23,6 @@ import uuid
import warnings
- import time
-+import salt.modules.network
-
+ import zlib
+ from errno import EACCES, EPERM
-from multiprocessing.pool import ThreadPool
-+from salt.utils.network import _get_interfaces
- # pylint: disable=import-error
- try:
-@@ -84,6 +85,7 @@ __salt__ = {
- 'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
- 'smbios.records': salt.modules.smbios.records,
- 'smbios.get': salt.modules.smbios.get,
-+ 'network.fqdns': salt.modules.network.fqdns,
- }
- log = logging.getLogger(__name__)
-
-@@ -107,7 +109,6 @@ HAS_UNAME = True
- if not hasattr(os, 'uname'):
- HAS_UNAME = False
-
--_INTERFACES = {}
-
- # Possible value for h_errno defined in netdb.h
- HOST_NOT_FOUND = 1
-@@ -1553,17 +1554,6 @@ def _linux_bin_exists(binary):
- return False
-
-
--def _get_interfaces():
-- '''
-- Provide a dict of the connected interfaces and their ip addresses
-- '''
--
-- global _INTERFACES
-- if not _INTERFACES:
-- _INTERFACES = salt.utils.network.interfaces()
-- return _INTERFACES
--
--
- def _parse_lsb_release():
- ret = {}
- try:
-@@ -2271,52 +2261,12 @@ def fqdns():
- '''
- Return all known FQDNs for the system by enumerating all interfaces and
+ import distro
+ import salt.exceptions
+@@ -2406,59 +2405,10 @@ def fqdns():
then trying to reverse resolve them (excluding 'lo' interface).
-+ To disable the fqdns grain, set enable_fqdns_grains: False in the minion configuration file.
- '''
+ To disable the fqdns grain, set enable_fqdns_grains: False in the minion configuration file.
+ """
- # Provides:
- # fqdns
-
@@ -82,224 +42,220 @@ index 0f3ccd9b92..77ae99590f 100644
- def _lookup_fqdn(ip):
- try:
- name, aliaslist, addresslist = socket.gethostbyaddr(ip)
-- return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]
+- return [socket.getfqdn(name)] + [
+- als for als in aliaslist if salt.utils.network.is_fqdn(als)
+- ]
- except socket.herror as err:
- if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
- # No FQDN for this IP address, so we don't need to know this all the time.
- log.debug("Unable to resolve address %s: %s", ip, err)
- else:
- log.error(err_message, ip, err)
-- except (socket.error, socket.gaierror, socket.timeout) as err:
+- except (OSError, socket.gaierror, socket.timeout) as err:
- log.error(err_message, ip, err)
-
- start = time.time()
-
-- addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces())
-- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces()))
-- err_message = 'Exception during resolving address: %s'
+- addresses = salt.utils.network.ip_addrs(
+- include_loopback=False, interface_data=_get_interfaces()
+- )
+- addresses.extend(
+- salt.utils.network.ip_addrs6(
+- include_loopback=False, interface_data=_get_interfaces()
+- )
+- )
+- err_message = "Exception during resolving address: %s"
-
- # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel.
- # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing
- # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds.
-
- try:
-- pool = ThreadPool(8)
-- results = pool.map(_lookup_fqdn, addresses)
-- pool.close()
-- pool.join()
+- pool = ThreadPool(8)
+- results = pool.map(_lookup_fqdn, addresses)
+- pool.close()
+- pool.join()
- except Exception as exc:
-- log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc)
+- log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc)
-
- for item in results:
- if item:
- fqdns.update(item)
-
- elapsed = time.time() - start
-- log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed))
+- log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed))
-
- return {"fqdns": sorted(list(fqdns))}
+ opt = {"fqdns": []}
-+ if __opts__.get('enable_fqdns_grains', True) == True:
-+ opt = __salt__['network.fqdns']()
++ if __opts__.get("enable_fqdns_grains", True) == True:
++ opt = __salt__["network.fqdns"]()
+ return opt
def ip_fqdn():
diff --git a/salt/modules/network.py b/salt/modules/network.py
-index 38e2bc326e..880f4f8d5f 100644
+index 2e1410c288..59ed43bba6 100644
--- a/salt/modules/network.py
+++ b/salt/modules/network.py
-@@ -11,6 +11,10 @@ import logging
- import re
- import os
- import socket
-+import time
-+
-+from multiprocessing.pool import ThreadPool
-+
+@@ -2,7 +2,6 @@
+ Module for gathering and managing network information
+ """
- # Import salt libs
+-# Import python libs
+ import datetime
+ import hashlib
+ import logging
+@@ -12,7 +11,6 @@ import socket
+ import time
+ from multiprocessing.pool import ThreadPool
+
+-# Import salt libs
import salt.utils.decorators.path
-@@ -1887,3 +1891,59 @@ def iphexval(ip):
- a = ip.split('.')
- hexval = ['%02X' % int(x) for x in a] # pylint: disable=E1321
- return ''.join(hexval)
-+
-+
-+def fqdns():
-+ '''
-+ Return all known FQDNs for the system by enumerating all interfaces and
-+ then trying to reverse resolve them (excluding 'lo' interface).
-+ '''
-+ # Provides:
-+ # fqdns
-+
-+ # Possible value for h_errno defined in netdb.h
-+ HOST_NOT_FOUND = 1
-+ NO_DATA = 4
-+
-+ grains = {}
-+ fqdns = set()
-+
-+ def _lookup_fqdn(ip):
-+ try:
+ import salt.utils.functools
+ import salt.utils.network
+@@ -20,8 +18,6 @@ import salt.utils.platform
+ import salt.utils.validate.net
+ from salt._compat import ipaddress
+ from salt.exceptions import CommandExecutionError
+-
+-# Import 3rd-party libs
+ from salt.ext.six.moves import range
+
+ log = logging.getLogger(__name__)
+@@ -2076,7 +2072,10 @@ def fqdns():
+
+ def _lookup_fqdn(ip):
+ try:
+- return [socket.getfqdn(socket.gethostbyaddr(ip)[0])]
+ name, aliaslist, addresslist = socket.gethostbyaddr(ip)
-+ return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]
-+ except socket.herror as err:
-+ if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
-+ # No FQDN for this IP address, so we don't need to know this all the time.
-+ log.debug("Unable to resolve address %s: %s", ip, err)
-+ else:
-+ log.error(err_message, err)
-+ except (socket.error, socket.gaierror, socket.timeout) as err:
-+ log.error(err_message, err)
-+
-+ start = time.time()
-+
-+ addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=salt.utils.network._get_interfaces())
-+ addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=salt.utils.network._get_interfaces()))
-+ err_message = 'Exception during resolving address: %s'
-+
-+ # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel.
-+ # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing
-+ # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds.
-+
-+ try:
-+ pool = ThreadPool(8)
-+ results = pool.map(_lookup_fqdn, addresses)
-+ pool.close()
-+ pool.join()
++ return [socket.getfqdn(name)] + [
++ als for als in aliaslist if salt.utils.network.is_fqdn(als)
++ ]
+ except socket.herror as err:
+ if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
+ # No FQDN for this IP address, so we don't need to know this all the time.
+@@ -2102,13 +2101,12 @@ def fqdns():
+ # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing
+ # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds.
+
+- results = []
+ try:
+ pool = ThreadPool(8)
+ results = pool.map(_lookup_fqdn, addresses)
+ pool.close()
+ pool.join()
+- except Exception as exc: # pylint: disable=broad-except
+ except Exception as exc:
-+ log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc)
-+
-+ for item in results:
-+ if item:
-+ fqdns.update(item)
-+
-+ elapsed = time.time() - start
-+ log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed))
-+
-+ return {"fqdns": sorted(list(fqdns))}
-\ No newline at end of file
+ log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc)
+
+ for item in results:
diff --git a/salt/utils/network.py b/salt/utils/network.py
-index 74536cc143..4cc8a05c4a 100644
+index d253ded3ab..25b2d06758 100644
--- a/salt/utils/network.py
+++ b/salt/utils/network.py
-@@ -50,6 +50,18 @@ except (ImportError, OSError, AttributeError, TypeError):
- pass
+@@ -49,7 +49,7 @@ except (ImportError, OSError, AttributeError, TypeError):
+ _INTERFACES = {}
-+_INTERFACES = {}
-+def _get_interfaces(): #! function
-+ '''
-+ Provide a dict of the connected interfaces and their ip addresses
-+ '''
-+
-+ global _INTERFACES
-+ if not _INTERFACES:
-+ _INTERFACES = interfaces()
-+ return _INTERFACES
-+
-+
- def sanitize_host(host):
- '''
- Sanitize host string.
+-def _get_interfaces():
++def _get_interfaces(): #! function
+ """
+ Provide a dict of the connected interfaces and their ip addresses
+ """
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
-index ac03b57226..60914204b0 100644
+index d760e57a54..a5ceeb8317 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
-@@ -35,6 +35,7 @@ import salt.utils.path
- import salt.modules.cmdmod
- import salt.modules.smbios
- import salt.grains.core as core
-+import salt.modules.network
-
- # Import 3rd-party libs
- from salt.ext import six
-@@ -1029,6 +1030,40 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
- with patch.object(salt.utils.dns, 'parse_resolv', MagicMock(return_value=resolv_mock)):
+@@ -18,6 +18,7 @@ import salt.utils.network
+ import salt.utils.path
+ import salt.utils.platform
+ from salt._compat import ipaddress
++from salt.ext import six
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, Mock, mock_open, patch
+ from tests.support.unit import TestCase, skipIf
+@@ -1293,14 +1294,14 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ ):
assert core.dns() == ret
-+
+- def test_enable_fqdns_false(self):
+ def test_enablefqdnsFalse(self):
-+ '''
-+ tests enable_fqdns_grains is set to False
-+ '''
-+ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':False}):
-+ assert core.fqdns() == {"fqdns": []}
-+
-+
+ """
+ tests enable_fqdns_grains is set to False
+ """
+ with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": False}):
+ assert core.fqdns() == {"fqdns": []}
+
+- def test_enable_fqdns_true(self):
+ def test_enablefqdnsTrue(self):
-+ '''
-+ testing that grains uses network.fqdns module
-+ '''
-+ with patch.dict('salt.grains.core.__salt__', {'network.fqdns': MagicMock(return_value="my.fake.domain")}):
-+ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':True}):
-+ assert core.fqdns() == 'my.fake.domain'
-+
-+
+ """
+ testing that grains uses network.fqdns module
+ """
+@@ -1311,14 +1312,14 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": True}):
+ assert core.fqdns() == "my.fake.domain"
+
+- def test_enable_fqdns_none(self):
+ def test_enablefqdnsNone(self):
-+ '''
-+ testing default fqdns grains is returned when enable_fqdns_grains is None
-+ '''
-+ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':None}):
-+ assert core.fqdns() == {"fqdns": []}
-+
-+
+ """
+ testing default fqdns grains is returned when enable_fqdns_grains is None
+ """
+ with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": None}):
+ assert core.fqdns() == {"fqdns": []}
+
+- def test_enable_fqdns_without_patching(self):
+ def test_enablefqdnswithoutpaching(self):
-+ '''
-+ testing fqdns grains is enabled by default
-+ '''
-+ with patch.dict('salt.grains.core.__salt__', {'network.fqdns': MagicMock(return_value="my.fake.domain")}):
-+ assert core.fqdns() == 'my.fake.domain'
-+
-+
- @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
- @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8']))
- @patch('salt.utils.network.ip_addrs6',
-@@ -1044,11 +1079,12 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
- ('foo.bar.baz', [], ['fe80::a8b2:93ff:fe00:0']),
- ('bluesniff.foo.bar', [], ['fe80::a8b2:93ff:dead:beef'])]
- ret = {'fqdns': ['bluesniff.foo.bar', 'foo.bar.baz', 'rinzler.evil-corp.com']}
-- with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock):
+ """
+ testing fqdns grains is enabled by default
+ """
+@@ -1326,23 +1327,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ "salt.grains.core.__salt__",
+ {"network.fqdns": MagicMock(return_value="my.fake.domain")},
+ ):
+- # fqdns is disabled by default on Windows
+- if salt.utils.platform.is_windows():
+- assert core.fqdns() == {"fqdns": []}
+- else:
+- assert core.fqdns() == "my.fake.domain"
+-
+- def test_enable_fqdns_false_is_proxy(self):
+- """
+- testing fqdns grains is disabled by default for proxy minions
+- """
+- with patch("salt.utils.platform.is_proxy", return_value=True, autospec=True):
+- with patch.dict(
+- "salt.grains.core.__salt__",
+- {"network.fqdns": MagicMock(return_value="my.fake.domain")},
+- ):
+- # fqdns is disabled by default on proxy minions
+- assert core.fqdns() == {"fqdns": []}
++ assert core.fqdns() == "my.fake.domain"
+
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ @patch(
+@@ -1367,11 +1352,12 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ ("bluesniff.foo.bar", [], ["fe80::a8b2:93ff:dead:beef"]),
+ ]
+ ret = {"fqdns": ["bluesniff.foo.bar", "foo.bar.baz", "rinzler.evil-corp.com"]}
+- with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock):
- fqdns = core.fqdns()
- assert "fqdns" in fqdns
-- assert len(fqdns['fqdns']) == len(ret['fqdns'])
-- assert set(fqdns['fqdns']) == set(ret['fqdns'])
-+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}):
-+ with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock):
+- assert len(fqdns["fqdns"]) == len(ret["fqdns"])
+- assert set(fqdns["fqdns"]) == set(ret["fqdns"])
++ with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}):
++ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock):
+ fqdns = core.fqdns()
+ assert "fqdns" in fqdns
-+ assert len(fqdns['fqdns']) == len(ret['fqdns'])
-+ assert set(fqdns['fqdns']) == set(ret['fqdns'])
++ assert len(fqdns["fqdns"]) == len(ret["fqdns"])
++ assert set(fqdns["fqdns"]) == set(ret["fqdns"])
- @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
- @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4']))
-@@ -1094,14 +1130,15 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
- ('rinzler.evil-corp.com', ["false-hostname", "badaliass"], ['5.6.7.8']),
- ('foo.bar.baz', [], ['fe80::a8b2:93ff:fe00:0']),
- ('bluesniff.foo.bar', ["alias.bluesniff.foo.bar"], ['fe80::a8b2:93ff:dead:beef'])]
-- with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock):
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ @patch("salt.utils.network.ip_addrs", MagicMock(return_value=["1.2.3.4"]))
+@@ -1437,14 +1423,15 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
+ ["fe80::a8b2:93ff:dead:beef"],
+ ),
+ ]
+- with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock):
- fqdns = core.fqdns()
- assert "fqdns" in fqdns
- for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]:
@@ -307,8 +263,8 @@ index ac03b57226..60914204b0 100644
-
- for alias in ["throwmeaway", "false-hostname", "badaliass"]:
- assert alias not in fqdns["fqdns"]
-+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}):
-+ with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock):
++ with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}):
++ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock):
+ fqdns = core.fqdns()
+ assert "fqdns" in fqdns
+ for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]:
@@ -318,8 +274,8 @@ index ac03b57226..60914204b0 100644
+ assert alias not in fqdns["fqdns"]
def test_core_virtual(self):
- '''
+ """
--
-2.16.4
+2.29.2
diff --git a/improve-batch_async-to-release-consumed-memory-bsc-1.patch b/improve-batch_async-to-release-consumed-memory-bsc-1.patch
index c12f42a..34cf8ee 100644
--- a/improve-batch_async-to-release-consumed-memory-bsc-1.patch
+++ b/improve-batch_async-to-release-consumed-memory-bsc-1.patch
@@ -1,4 +1,4 @@
-From 65e33acaf10fdd838c0cdf34ec93df3a2ed1f0d2 Mon Sep 17 00:00:00 2001
+From e53d50ce5fabf67eeb5344f7be9cccbb09d0179b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 26 Sep 2019 10:41:06 +0100
@@ -6,38 +6,39 @@ Subject: [PATCH] Improve batch_async to release consumed memory
(bsc#1140912)
---
- salt/cli/batch_async.py | 73 ++++++++++++++++++++++++++++++-------------------
- 1 file changed, 45 insertions(+), 28 deletions(-)
+ salt/cli/batch_async.py | 89 ++++++++++++++++++++++++-----------------
+ 1 file changed, 52 insertions(+), 37 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index 8a67331102..2bb50459c8 100644
+index 388b709416..0a0b8f5f83 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -5,6 +5,7 @@ Execute a job on the targeted minions by using a moving window of fixed size `ba
+@@ -2,7 +2,7 @@
+ Execute a job on the targeted minions by using a moving window of fixed size `batch`.
+ """
- # Import python libs
- from __future__ import absolute_import, print_function, unicode_literals
+-import fnmatch
+import gc
- import tornado
- # Import salt libs
-@@ -77,6 +78,7 @@ class BatchAsync(object):
+ # pylint: enable=import-error,no-name-in-module,redefined-builtin
+ import logging
+@@ -78,6 +78,7 @@ class BatchAsync:
self.batch_jid = jid_gen()
self.find_job_jid = jid_gen()
self.find_job_returned = set()
+ self.ended = False
self.event = salt.utils.event.get_event(
- 'master',
- self.opts['sock_dir'],
-@@ -86,6 +88,7 @@ class BatchAsync(object):
- io_loop=ioloop,
- keep_loop=True)
+ "master",
+ self.opts["sock_dir"],
+@@ -88,6 +89,7 @@ class BatchAsync:
+ keep_loop=True,
+ )
self.scheduled = False
+ self.patterns = {}
def __set_event_handler(self):
- ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid)
-@@ -116,7 +119,7 @@ class BatchAsync(object):
+ ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
+@@ -118,7 +120,7 @@ class BatchAsync:
if minion in self.active:
self.active.remove(minion)
self.done_minions.add(minion)
@@ -45,29 +46,37 @@ index 8a67331102..2bb50459c8 100644
+ self.event.io_loop.spawn_callback(self.schedule_next)
def _get_next(self):
- to_run = self.minions.difference(
-@@ -129,23 +132,23 @@ class BatchAsync(object):
+ to_run = (
+@@ -132,27 +134,27 @@ class BatchAsync:
)
return set(list(to_run)[:next_batch_size])
- @tornado.gen.coroutine
def check_find_job(self, batch_minions, jid):
-- find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid)
-- self.event.unsubscribe(find_job_return_pattern, match_type='glob')
+- find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
+- self.event.unsubscribe(find_job_return_pattern, match_type="glob")
- self.patterns.remove((find_job_return_pattern, "find_job_return"))
+ if self.event:
-+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid)
-+ self.event.unsubscribe(find_job_return_pattern, match_type='glob')
++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
++ self.event.unsubscribe(find_job_return_pattern, match_type="glob")
+ self.patterns.remove((find_job_return_pattern, "find_job_return"))
-- timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions)
+- timedout_minions = batch_minions.difference(self.find_job_returned).difference(
+- self.done_minions
+- )
- self.timedout_minions = self.timedout_minions.union(timedout_minions)
- self.active = self.active.difference(self.timedout_minions)
-- running = batch_minions.difference(self.done_minions).difference(self.timedout_minions)
-+ timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions)
+- running = batch_minions.difference(self.done_minions).difference(
+- self.timedout_minions
+- )
++ timedout_minions = batch_minions.difference(
++ self.find_job_returned
++ ).difference(self.done_minions)
+ self.timedout_minions = self.timedout_minions.union(timedout_minions)
+ self.active = self.active.difference(self.timedout_minions)
-+ running = batch_minions.difference(self.done_minions).difference(self.timedout_minions)
++ running = batch_minions.difference(self.done_minions).difference(
++ self.timedout_minions
++ )
- if timedout_minions:
- self.schedule_next()
@@ -83,61 +92,65 @@ index 8a67331102..2bb50459c8 100644
@tornado.gen.coroutine
def find_job(self, minions):
-@@ -165,8 +168,8 @@ class BatchAsync(object):
- gather_job_timeout=self.opts['gather_job_timeout'],
+@@ -175,18 +177,12 @@ class BatchAsync:
jid=jid,
- **self.eauth)
+ **self.eauth
+ )
- self.event.io_loop.call_later(
-- self.opts['gather_job_timeout'],
-+ yield tornado.gen.sleep(self.opts['gather_job_timeout'])
-+ self.event.io_loop.spawn_callback(
- self.check_find_job,
- not_done,
- jid)
-@@ -174,10 +177,6 @@ class BatchAsync(object):
+- self.opts["gather_job_timeout"], self.check_find_job, not_done, jid
+- )
++ yield tornado.gen.sleep(self.opts["gather_job_timeout"])
++ self.event.io_loop.spawn_callback(self.check_find_job, not_done, jid)
+
@tornado.gen.coroutine
def start(self):
self.__set_event_handler()
-- #start batching even if not all minions respond to ping
+- # start batching even if not all minions respond to ping
- self.event.io_loop.call_later(
-- self.batch_presence_ping_timeout or self.opts['gather_job_timeout'],
-- self.start_batch)
+- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"],
+- self.start_batch,
+- )
ping_return = yield self.local.run_job_async(
- self.opts['tgt'],
- 'test.ping',
-@@ -191,6 +190,10 @@ class BatchAsync(object):
- metadata=self.metadata,
- **self.eauth)
- self.targeted_minions = set(ping_return['minions'])
-+ #start batching even if not all minions respond to ping
-+ yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout'])
+ self.opts["tgt"],
+ "test.ping",
+@@ -198,6 +194,11 @@ class BatchAsync:
+ **self.eauth
+ )
+ self.targeted_minions = set(ping_return["minions"])
++ # start batching even if not all minions respond to ping
++ yield tornado.gen.sleep(
++ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
++ )
+ self.event.io_loop.spawn_callback(self.start_batch)
-+
@tornado.gen.coroutine
def start_batch(self):
-@@ -202,12 +205,14 @@ class BatchAsync(object):
+@@ -209,14 +210,18 @@ class BatchAsync:
"down_minions": self.targeted_minions.difference(self.minions),
- "metadata": self.metadata
+ "metadata": self.metadata,
}
-- self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid))
+- self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid))
- yield self.run_next()
-+ ret = self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid))
++ ret = self.event.fire_event(
++ data, "salt/batch/{}/start".format(self.batch_jid)
++ )
+ self.event.io_loop.spawn_callback(self.run_next)
+ @tornado.gen.coroutine
def end_batch(self):
- left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions))
+ left = self.minions.symmetric_difference(
+ self.done_minions.union(self.timedout_minions)
+ )
- if not left:
+ if not left and not self.ended:
+ self.ended = True
data = {
"available_minions": self.minions,
"down_minions": self.targeted_minions.difference(self.minions),
-@@ -220,20 +225,26 @@ class BatchAsync(object):
+@@ -229,20 +234,26 @@ class BatchAsync:
for (pattern, label) in self.patterns:
if label in ["ping_return", "batch_run"]:
- self.event.unsubscribe(pattern, match_type='glob')
+ self.event.unsubscribe(pattern, match_type="glob")
+ del self
+ gc.collect()
+ yield
@@ -161,14 +174,16 @@ index 8a67331102..2bb50459c8 100644
- yield self.local.run_job_async(
+ ret = yield self.local.run_job_async(
next_batch,
- self.opts['fun'],
- self.opts['arg'],
-@@ -244,11 +255,17 @@ class BatchAsync(object):
- jid=self.batch_jid,
- metadata=self.metadata)
+ self.opts["fun"],
+ self.opts["arg"],
+@@ -254,13 +265,17 @@ class BatchAsync:
+ metadata=self.metadata,
+ )
-- self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch))
-+ yield tornado.gen.sleep(self.opts['timeout'])
+- self.event.io_loop.call_later(
+- self.opts["timeout"], self.find_job, set(next_batch)
+- )
++ yield tornado.gen.sleep(self.opts["timeout"])
+ self.event.io_loop.spawn_callback(self.find_job, set(next_batch))
except Exception as ex:
log.error("Error in scheduling next batch: %s", ex)
@@ -185,6 +200,6 @@ index 8a67331102..2bb50459c8 100644
+ self.ioloop = None
+ gc.collect()
--
-2.16.4
+2.29.2
diff --git a/include-aliases-in-the-fqdns-grains.patch b/include-aliases-in-the-fqdns-grains.patch
index 7823d00..f030206 100644
--- a/include-aliases-in-the-fqdns-grains.patch
+++ b/include-aliases-in-the-fqdns-grains.patch
@@ -1,4 +1,4 @@
-From 512b189808ea0d7b333587689d7e7eb52d16b189 Mon Sep 17 00:00:00 2001
+From 3c956a1cf1de17c5c49f0856051cabe2ffb4d0f2 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Tue, 29 Jan 2019 11:11:38 +0100
Subject: [PATCH] Include aliases in the fqdns grains
@@ -15,54 +15,116 @@ Add UT for fqdns aliases
Leverage cached interfaces, if any.
---
- salt/grains/core.py | 14 ++++++--------
- salt/utils/network.py | 12 ++++++++++++
- tests/unit/grains/test_core.py | 28 +++++++++++++++++++++++++---
- tests/unit/utils/test_network.py | 24 ++++++++++++++++++++++++
- 4 files changed, 67 insertions(+), 11 deletions(-)
+ salt/grains/core.py | 69 +++++++++++++++++++++-----------
+ salt/utils/network.py | 16 ++++++++
+ tests/unit/grains/test_core.py | 45 ++++++++++++++++++---
+ tests/unit/utils/test_network.py | 37 +++++++++++++++++
+ 4 files changed, 138 insertions(+), 29 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 7b7e328520..309e4c9c4a 100644
+index bc3cf129cd..006878f806 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -2275,14 +2275,13 @@ def fqdns():
- grains = {}
- fqdns = set()
+@@ -1733,29 +1733,31 @@ def _parse_cpe_name(cpe):
-- addresses = salt.utils.network.ip_addrs(include_loopback=False,
-- interface_data=_INTERFACES)
-- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False,
-- interface_data=_INTERFACES))
-- err_message = 'An exception occurred resolving address \'%s\': %s'
-+ addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces())
-+ addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces()))
-+ err_message = 'Exception during resolving address: %s'
- for ip in addresses:
- try:
-- fqdns.add(socket.getfqdn(socket.gethostbyaddr(ip)[0]))
+
+ def _parse_cpe_name(cpe):
+- '''
++ """
+ Parse CPE_NAME data from the os-release
+
+ Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
+
+ :param cpe:
+ :return:
+- '''
++ """
+ part = {
+- 'o': 'operating system',
+- 'h': 'hardware',
+- 'a': 'application',
++ "o": "operating system",
++ "h": "hardware",
++ "a": "application",
+ }
+ ret = {}
+- cpe = (cpe or '').split(':')
+- if len(cpe) > 4 and cpe[0] == 'cpe':
+- if cpe[1].startswith('/'): # WFN to URI
+- ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
+- ret['phase'] = cpe[5] if len(cpe) > 5 else None
+- ret['part'] = part.get(cpe[1][1:])
+- elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string
+- ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
+- ret['part'] = part.get(cpe[2])
++ cpe = (cpe or "").split(":")
++ if len(cpe) > 4 and cpe[0] == "cpe":
++ if cpe[1].startswith("/"): # WFN to URI
++ ret["vendor"], ret["product"], ret["version"] = cpe[2:5]
++ ret["phase"] = cpe[5] if len(cpe) > 5 else None
++ ret["part"] = part.get(cpe[1][1:])
++ elif len(cpe) == 13 and cpe[1] == "2.3": # WFN to a string
++ ret["vendor"], ret["product"], ret["version"], ret["phase"] = [
++ x if x != "*" else None for x in cpe[3:7]
++ ]
++ ret["part"] = part.get(cpe[2])
+
+ return ret
+
+@@ -2396,15 +2398,36 @@ def fqdns():
+ """
+ # Provides:
+ # fqdns
+- opt = {"fqdns": []}
+- if __opts__.get(
+- "enable_fqdns_grains",
+- False
+- if salt.utils.platform.is_windows() or salt.utils.platform.is_proxy()
+- else True,
+- ):
+- opt = __salt__["network.fqdns"]()
+- return opt
++
++ grains = {}
++ fqdns = set()
++
++ addresses = salt.utils.network.ip_addrs(
++ include_loopback=False, interface_data=_get_interfaces()
++ )
++ addresses.extend(
++ salt.utils.network.ip_addrs6(
++ include_loopback=False, interface_data=_get_interfaces()
++ )
++ )
++ err_message = "Exception during resolving address: %s"
++ for ip in addresses:
++ try:
+ name, aliaslist, addresslist = socket.gethostbyaddr(ip)
-+ fqdns.update([socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)])
- except socket.herror as err:
- if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
- # No FQDN for this IP address, so we don't need to know this all the time.
-@@ -2292,8 +2291,7 @@ def fqdns():
- except (socket.error, socket.gaierror, socket.timeout) as err:
- log.error(err_message, ip, err)
-
-- grains['fqdns'] = sorted(list(fqdns))
-- return grains
++ fqdns.update(
++ [socket.getfqdn(name)]
++ + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]
++ )
++ except socket.herror as err:
++ if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
++ # No FQDN for this IP address, so we don't need to know this all the time.
++ log.debug("Unable to resolve address %s: %s", ip, err)
++ else:
++ log.error(err_message, ip, err)
++ except (OSError, socket.gaierror, socket.timeout) as err:
++ log.error(err_message, ip, err)
++
+ return {"fqdns": sorted(list(fqdns))}
def ip_fqdn():
diff --git a/salt/utils/network.py b/salt/utils/network.py
-index 906d1cb3bc..2ae2e213b7 100644
+index b3e8db3886..dd7fceb91a 100644
--- a/salt/utils/network.py
+++ b/salt/utils/network.py
-@@ -1958,3 +1958,15 @@ def parse_host_port(host_port):
- raise ValueError('bad hostname: "{}"'.format(host))
-
- return host, port
+@@ -2208,3 +2208,19 @@ def filter_by_networks(values, networks):
+ raise ValueError("Do not know how to filter a {}".format(type(values)))
+ else:
+ return values
+
+
+def is_fqdn(hostname):
@@ -74,42 +136,63 @@ index 906d1cb3bc..2ae2e213b7 100644
+ """
+
+ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?
Date: Tue, 19 May 2020 10:34:35 +0200
Subject: [PATCH] info_installed works without status attr now
@@ -8,39 +8,39 @@ detect if a package was installed or not. Now info_installed adds the
'status' for the 'lowpkg.info' request again.
---
salt/modules/aptpkg.py | 9 +++++++++
- tests/unit/modules/test_aptpkg.py | 17 +++++++++++++++++
- 2 files changed, 26 insertions(+)
+ tests/unit/modules/test_aptpkg.py | 20 ++++++++++++++++++++
+ 2 files changed, 29 insertions(+)
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index 2835d32263..765d69aff2 100644
+index db0480b45d..e4a9872aad 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
-@@ -2867,6 +2867,15 @@ def info_installed(*names, **kwargs):
- failhard = kwargs.pop('failhard', True)
- kwargs.pop('errors', None) # Only for compatibility with RPM
- attr = kwargs.pop('attr', None) # Package attributes to return
+@@ -2923,6 +2923,15 @@ def info_installed(*names, **kwargs):
+ failhard = kwargs.pop("failhard", True)
+ kwargs.pop("errors", None) # Only for compatibility with RPM
+ attr = kwargs.pop("attr", None) # Package attributes to return
+
+ # status is needed to see if a package is installed. So we have to add it,
+ # even if it's excluded via attr parameter. Otherwise all packages are
+ # returned.
+ if attr:
-+ attr_list = set(attr.split(','))
-+ attr_list.add('status')
-+ attr = ','.join(attr_list)
++ attr_list = set(attr.split(","))
++ attr_list.add("status")
++ attr = ",".join(attr_list)
+
- all_versions = kwargs.pop('all_versions', False) # This is for backward compatible structure only
-
- if kwargs:
+ all_versions = kwargs.pop(
+ "all_versions", False
+ ) # This is for backward compatible structure only
diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
-index ba1d874e69..b0193aeaf7 100644
+index 3c9744e224..51dfce29eb 100644
--- a/tests/unit/modules/test_aptpkg.py
+++ b/tests/unit/modules/test_aptpkg.py
-@@ -257,6 +257,23 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
- self.assertEqual(aptpkg.info_installed('wget'), installed)
+@@ -297,6 +297,26 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(aptpkg.info_installed("wget"), installed)
self.assertEqual(len(aptpkg.info_installed()), 1)
+ def test_info_installed_attr_without_status(self):
-+ '''
++ """
+ Test info_installed 'attr' for inclusion of 'status' attribute.
+
+ Since info_installed should only return installed packages, we need to
@@ -49,17 +49,20 @@ index ba1d874e69..b0193aeaf7 100644
+ to check if the package is installed and would return everything.
+
+ :return:
-+ '''
-+ with patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)}) as wget_lowpkg:
-+ ret = aptpkg.info_installed('wget', attr='version')
-+ calls = wget_lowpkg['lowpkg.info'].call_args_list.pop()
-+ self.assertIn('status', calls.kwargs['attr'])
-+ self.assertIn('version', calls.kwargs['attr'])
++ """
++ with patch(
++ "salt.modules.aptpkg.__salt__",
++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)},
++ ) as wget_lowpkg:
++ ret = aptpkg.info_installed("wget", attr="version")
++ calls = wget_lowpkg["lowpkg.info"].call_args_list.pop()
++ self.assertIn("status", calls.kwargs["attr"])
++ self.assertIn("version", calls.kwargs["attr"])
+
- @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)})
- def test_info_installed_attr(self):
- '''
+ @patch(
+ "salt.modules.aptpkg.__salt__",
+ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)},
--
-2.27.0
+2.29.2
diff --git a/integration-of-msi-authentication-with-azurearm-clou.patch b/integration-of-msi-authentication-with-azurearm-clou.patch
index 9792079..2f8a78c 100644
--- a/integration-of-msi-authentication-with-azurearm-clou.patch
+++ b/integration-of-msi-authentication-with-azurearm-clou.patch
@@ -1,18 +1,23 @@
-From c750e854c637e405a788f91d5b9a7bd1a0a6edfd Mon Sep 17 00:00:00 2001
+From bb2070d4f4e8fbb5a963c521d61feb7419abdec1 Mon Sep 17 00:00:00 2001
From: ed lane
Date: Thu, 30 Aug 2018 06:07:08 -0600
Subject: [PATCH] Integration of MSI authentication with azurearm cloud
driver (#105)
---
- salt/cloud/clouds/azurearm.py | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
+ salt/cloud/clouds/azurearm.py | 98 +++++++++++++++--------------------
+ 1 file changed, 43 insertions(+), 55 deletions(-)
diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py
-index 047fdac0a9..2c1fa04ae8 100644
+index 54fc7b497b..8b9254cecb 100644
--- a/salt/cloud/clouds/azurearm.py
+++ b/salt/cloud/clouds/azurearm.py
-@@ -58,6 +58,9 @@ The Azure ARM cloud module is used to control access to Microsoft Azure Resource
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Azure ARM Cloud Module
+ ======================
+@@ -61,6 +60,9 @@ The Azure ARM cloud module is used to control access to Microsoft Azure Resource
virtual machine type will be "Windows". Only set this parameter on profiles which install Windows operating systems.
@@ -22,25 +27,361 @@ index 047fdac0a9..2c1fa04ae8 100644
Example ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/azure.conf`` configuration:
-@@ -258,7 +261,8 @@ def get_configured_provider():
- provider = __is_provider_configured(
- __opts__,
- __active_provider_name__ or __virtualname__,
-- ('subscription_id', 'username', 'password')
-+ required_keys=('subscription_id', 'username', 'password'),
-+ log_message=False
+@@ -91,7 +93,6 @@ Example ``/etc/salt/cloud.providers`` or
+
+
+ # pylint: disable=wrong-import-position,wrong-import-order
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import importlib
+ import logging
+@@ -121,7 +122,6 @@ from salt.exceptions import (
+ # Salt libs
+ from salt.ext import six
+
+-# Import 3rd-party libs
+ HAS_LIBS = False
+ try:
+ import azure.mgmt.compute.models as compute_models
+@@ -179,7 +179,7 @@ def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument
)
- return provider
-@@ -301,6 +305,7 @@ def get_conn(client_type):
+ for resource in provider_query.resource_types:
+- if six.text_type(resource.resource_type) == kwargs["resource_type"]:
++ if str(resource.resource_type) == kwargs["resource_type"]:
+ resource_dict = resource.as_dict()
+ api_versions = resource_dict["api_versions"]
+ except CloudError as exc:
+@@ -263,6 +263,7 @@ def get_conn(client_type):
)
if tenant is not None:
+ # using Service Principle style authentication...
client_id = config.get_cloud_config_value(
- 'client_id',
- get_configured_provider(), __opts__, search_global=False
+ "client_id", get_configured_provider(), __opts__, search_global=False
+ )
+@@ -319,7 +320,7 @@ def avail_locations(call=None):
+ )
+ locations = []
+ for resource in provider_query.resource_types:
+- if six.text_type(resource.resource_type) == "virtualMachines":
++ if str(resource.resource_type) == "virtualMachines":
+ resource_dict = resource.as_dict()
+ locations = resource_dict["locations"]
+ for location in locations:
+@@ -399,7 +400,7 @@ def avail_images(call=None):
+ results = pool.map_async(_get_publisher_images, publishers)
+ results.wait()
+
+- ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
++ ret = {k: v for result in results.get() for k, v in result.items()}
+
+ return ret
+
+@@ -529,7 +530,7 @@ def list_nodes_full(call=None):
+ results = pool.map_async(_get_node_info, nodes)
+ results.wait()
+
+- group_ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
++ group_ret = {k: v for result in results.get() for k, v in result.items()}
+ ret.update(group_ret)
+
+ return ret
+@@ -707,7 +708,7 @@ def create_network_interface(call=None, kwargs=None):
+ )
+
+ if kwargs.get("iface_name") is None:
+- kwargs["iface_name"] = "{0}-iface0".format(vm_["name"])
++ kwargs["iface_name"] = "{}-iface0".format(vm_["name"])
+
+ try:
+ subnet_obj = netconn.subnets.get(
+@@ -717,7 +718,7 @@ def create_network_interface(call=None, kwargs=None):
+ )
+ except CloudError as exc:
+ raise SaltCloudSystemExit(
+- '{0} (Resource Group: "{1}", VNET: "{2}", Subnet: "{3}")'.format(
++ '{} (Resource Group: "{}", VNET: "{}", Subnet: "{}")'.format(
+ exc.message,
+ kwargs["network_resource_group"],
+ kwargs["network"],
+@@ -740,11 +741,11 @@ def create_network_interface(call=None, kwargs=None):
+ )
+ pool_ids.append({"id": lbbep_data.as_dict()["id"]})
+ except CloudError as exc:
+- log.error("There was a cloud error: %s", six.text_type(exc))
++ log.error("There was a cloud error: %s", str(exc))
+ except KeyError as exc:
+ log.error(
+ "There was an error getting the Backend Pool ID: %s",
+- six.text_type(exc),
++ str(exc),
+ )
+ ip_kwargs["load_balancer_backend_address_pools"] = pool_ids
+
+@@ -755,7 +756,7 @@ def create_network_interface(call=None, kwargs=None):
+ ip_kwargs["private_ip_allocation_method"] = IPAllocationMethod.dynamic
+
+ if kwargs.get("allocate_public_ip") is True:
+- pub_ip_name = "{0}-ip".format(kwargs["iface_name"])
++ pub_ip_name = "{}-ip".format(kwargs["iface_name"])
+ poller = netconn.public_ip_addresses.create_or_update(
+ resource_group_name=kwargs["resource_group"],
+ public_ip_address_name=pub_ip_name,
+@@ -773,11 +774,11 @@ def create_network_interface(call=None, kwargs=None):
+ )
+ if pub_ip_data.ip_address: # pylint: disable=no-member
+ ip_kwargs["public_ip_address"] = PublicIPAddress(
+- id=six.text_type(pub_ip_data.id), # pylint: disable=no-member
++ id=str(pub_ip_data.id), # pylint: disable=no-member
+ )
+ ip_configurations = [
+ NetworkInterfaceIPConfiguration(
+- name="{0}-ip".format(kwargs["iface_name"]),
++ name="{}-ip".format(kwargs["iface_name"]),
+ subnet=subnet_obj,
+ **ip_kwargs
+ )
+@@ -790,7 +791,7 @@ def create_network_interface(call=None, kwargs=None):
+ raise ValueError("Timed out waiting for public IP Address.")
+ time.sleep(5)
+ else:
+- priv_ip_name = "{0}-ip".format(kwargs["iface_name"])
++ priv_ip_name = "{}-ip".format(kwargs["iface_name"])
+ ip_configurations = [
+ NetworkInterfaceIPConfiguration(
+ name=priv_ip_name, subnet=subnet_obj, **ip_kwargs
+@@ -900,7 +901,7 @@ def request_instance(vm_):
+ )
+ vm_["iface_id"] = iface_data["id"]
+
+- disk_name = "{0}-vol0".format(vm_["name"])
++ disk_name = "{}-vol0".format(vm_["name"])
+
+ vm_username = config.get_cloud_config_value(
+ "ssh_username",
+@@ -922,8 +923,8 @@ def request_instance(vm_):
+ ssh_publickeyfile_contents = spkc_.read()
+ except Exception as exc: # pylint: disable=broad-except
+ raise SaltCloudConfigError(
+- "Failed to read ssh publickey file '{0}': "
+- "{1}".format(ssh_publickeyfile, exc.args[-1])
++ "Failed to read ssh publickey file '{}': "
++ "{}".format(ssh_publickeyfile, exc.args[-1])
+ )
+
+ disable_password_authentication = config.get_cloud_config_value(
+@@ -941,7 +942,7 @@ def request_instance(vm_):
+ if not win_installer and ssh_publickeyfile_contents is not None:
+ sshpublickey = SshPublicKey(
+ key_data=ssh_publickeyfile_contents,
+- path="/home/{0}/.ssh/authorized_keys".format(vm_username),
++ path="/home/{}/.ssh/authorized_keys".format(vm_username),
+ )
+ sshconfiguration = SshConfiguration(public_keys=[sshpublickey],)
+ linuxconfiguration = LinuxConfiguration(
+@@ -991,9 +992,9 @@ def request_instance(vm_):
+ availability_set = config.get_cloud_config_value(
+ "availability_set", vm_, __opts__, search_global=False, default=None
+ )
+- if availability_set is not None and isinstance(availability_set, six.string_types):
++ if availability_set is not None and isinstance(availability_set, str):
+ availability_set = {
+- "id": "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/availabilitySets/{2}".format(
++ "id": "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}".format(
+ subscription_id, vm_["resource_group"], availability_set
+ )
+ }
+@@ -1004,7 +1005,7 @@ def request_instance(vm_):
+
+ storage_endpoint_suffix = cloud_env.suffixes.storage_endpoint
+
+- if isinstance(vm_.get("volumes"), six.string_types):
++ if isinstance(vm_.get("volumes"), str):
+ volumes = salt.utils.yaml.safe_load(vm_["volumes"])
+ else:
+ volumes = vm_.get("volumes")
+@@ -1018,16 +1019,14 @@ def request_instance(vm_):
+ lun = 0
+ luns = []
+ for volume in volumes:
+- if isinstance(volume, six.string_types):
++ if isinstance(volume, str):
+ volume = {"name": volume}
+
+ volume.setdefault(
+ "name",
+ volume.get(
+ "name",
+- volume.get(
+- "name", "{0}-datadisk{1}".format(vm_["name"], six.text_type(lun))
+- ),
++ volume.get("name", "{}-datadisk{}".format(vm_["name"], str(lun))),
+ ),
+ )
+
+@@ -1050,7 +1049,7 @@ def request_instance(vm_):
+ del volume["media_link"]
+ elif volume.get("vhd") == "unmanaged":
+ volume["vhd"] = VirtualHardDisk(
+- uri="https://{0}.blob.{1}/vhds/{2}-datadisk{3}.vhd".format(
++ uri="https://{}.blob.{}/vhds/{}-datadisk{}.vhd".format(
+ vm_["storage_account"],
+ storage_endpoint_suffix,
+ vm_["name"],
+@@ -1090,7 +1089,7 @@ def request_instance(vm_):
+ create_option=DiskCreateOptionTypes.from_image,
+ name=disk_name,
+ vhd=VirtualHardDisk(
+- uri="https://{0}.blob.{1}/vhds/{2}.vhd".format(
++ uri="https://{}.blob.{}/vhds/{}.vhd".format(
+ vm_["storage_account"], storage_endpoint_suffix, disk_name,
+ ),
+ ),
+@@ -1209,7 +1208,7 @@ def request_instance(vm_):
+ __utils__["cloud.fire_event"](
+ "event",
+ "requesting instance",
+- "salt/cloud/{0}/requesting".format(vm_["name"]),
++ "salt/cloud/{}/requesting".format(vm_["name"]),
+ args=__utils__["cloud.filter_event"](
+ "requesting", vm_, ["name", "profile", "provider", "driver"]
+ ),
+@@ -1260,7 +1259,7 @@ def create(vm_):
+ __utils__["cloud.fire_event"](
+ "event",
+ "starting create",
+- "salt/cloud/{0}/creating".format(vm_["name"]),
++ "salt/cloud/{}/creating".format(vm_["name"]),
+ args=__utils__["cloud.filter_event"](
+ "creating", vm_, ["name", "profile", "provider", "driver"]
+ ),
+@@ -1278,9 +1277,7 @@ def create(vm_):
+ vm_request = request_instance(vm_=vm_)
+
+ if not vm_request or "error" in vm_request:
+- err_message = "Error creating VM {0}! ({1})".format(
+- vm_["name"], six.text_type(vm_request)
+- )
++ err_message = "Error creating VM {}! ({})".format(vm_["name"], str(vm_request))
+ log.error(err_message)
+ raise SaltCloudSystemExit(err_message)
+
+@@ -1322,7 +1319,7 @@ def create(vm_):
+ try:
+ log.warning(exc)
+ finally:
+- raise SaltCloudSystemExit(six.text_type(exc))
++ raise SaltCloudSystemExit(str(exc))
+
+ vm_["ssh_host"] = data
+ if not vm_.get("ssh_username"):
+@@ -1341,7 +1338,7 @@ def create(vm_):
+ __utils__["cloud.fire_event"](
+ "event",
+ "created instance",
+- "salt/cloud/{0}/created".format(vm_["name"]),
++ "salt/cloud/{}/created".format(vm_["name"]),
+ args=__utils__["cloud.filter_event"](
+ "created", vm_, ["name", "profile", "provider", "driver"]
+ ),
+@@ -1548,9 +1545,7 @@ def _get_cloud_environment():
+ cloud_env = getattr(cloud_env_module, cloud_environment or "AZURE_PUBLIC_CLOUD")
+ except (AttributeError, ImportError):
+ raise SaltCloudSystemExit(
+- "The azure {0} cloud environment is not available.".format(
+- cloud_environment
+- )
++ "The azure {} cloud environment is not available.".format(cloud_environment)
+ )
+
+ return cloud_env
+@@ -1585,7 +1580,7 @@ def _get_block_blob_service(kwargs=None):
+ resource_group, storage_account
+ )
+ storage_keys = {v.key_name: v.value for v in storage_keys.keys}
+- storage_key = next(six.itervalues(storage_keys))
++ storage_key = next(iter(storage_keys.values()))
+
+ cloud_env = _get_cloud_environment()
+
+@@ -1620,7 +1615,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
+ "server_encrypted": blob.properties.server_encrypted,
+ }
+ except Exception as exc: # pylint: disable=broad-except
+- log.warning(six.text_type(exc))
++ log.warning(str(exc))
+
+ return ret
+
+@@ -1655,9 +1650,7 @@ def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argum
+ compconn.disks.delete(kwargs["resource_group"], kwargs["blob"])
+ except Exception as exc: # pylint: disable=broad-except
+ log.error(
+- "Error deleting managed disk %s - %s",
+- kwargs.get("blob"),
+- six.text_type(exc),
++ "Error deleting managed disk %s - %s", kwargs.get("blob"), str(exc),
+ )
+ return False
+
+@@ -1834,7 +1827,7 @@ def create_or_update_vmextension(
+ except CloudError as exc:
+ __utils__["azurearm.log_cloud_error"](
+ "compute",
+- "Error attempting to create the VM extension: {0}".format(exc.message),
++ "Error attempting to create the VM extension: {}".format(exc.message),
+ )
+ ret = {"error": exc.message}
+
+@@ -1881,11 +1874,9 @@ def stop(name, call=None):
+ ret = {"error": exc.message}
+ if not ret:
+ __utils__["azurearm.log_cloud_error"](
+- "compute", "Unable to find virtual machine with name: {0}".format(name)
++ "compute", "Unable to find virtual machine with name: {}".format(name)
+ )
+- ret = {
+- "error": "Unable to find virtual machine with name: {0}".format(name)
+- }
++ ret = {"error": "Unable to find virtual machine with name: {}".format(name)}
+ else:
+ try:
+ instance = compconn.virtual_machines.deallocate(
+@@ -1896,7 +1887,7 @@ def stop(name, call=None):
+ ret = vm_result.as_dict()
+ except CloudError as exc:
+ __utils__["azurearm.log_cloud_error"](
+- "compute", "Error attempting to stop {0}: {1}".format(name, exc.message)
++ "compute", "Error attempting to stop {}: {}".format(name, exc.message)
+ )
+ ret = {"error": exc.message}
+
+@@ -1945,11 +1936,9 @@ def start(name, call=None):
+ ret = {"error": exc.message}
+ if not ret:
+ __utils__["azurearm.log_cloud_error"](
+- "compute", "Unable to find virtual machine with name: {0}".format(name)
++ "compute", "Unable to find virtual machine with name: {}".format(name)
+ )
+- ret = {
+- "error": "Unable to find virtual machine with name: {0}".format(name)
+- }
++ ret = {"error": "Unable to find virtual machine with name: {}".format(name)}
+ else:
+ try:
+ instance = compconn.virtual_machines.start(
+@@ -1960,8 +1949,7 @@ def start(name, call=None):
+ ret = vm_result.as_dict()
+ except CloudError as exc:
+ __utils__["azurearm.log_cloud_error"](
+- "compute",
+- "Error attempting to start {0}: {1}".format(name, exc.message),
++ "compute", "Error attempting to start {}: {}".format(name, exc.message),
+ )
+ ret = {"error": exc.message}
+
--
-2.16.4
+2.29.2
diff --git a/invalidate-file-list-cache-when-cache-file-modified-.patch b/invalidate-file-list-cache-when-cache-file-modified-.patch
index a4826a3..981febd 100644
--- a/invalidate-file-list-cache-when-cache-file-modified-.patch
+++ b/invalidate-file-list-cache-when-cache-file-modified-.patch
@@ -1,4 +1,4 @@
-From 1ca1bb7c01b1e589147c32b16eda719537ab5b62 Mon Sep 17 00:00:00 2001
+From c9268ec731371cdd7b2fc129ad111d9f73800752 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Tue, 22 Sep 2020 15:15:51 +0100
@@ -8,16 +8,16 @@ Subject: [PATCH] Invalidate file list cache when cache file modified
Add test_future_file_list_cache_file_ignored unit test
---
salt/fileserver/__init__.py | 2 +-
- tests/unit/test_fileserver.py | 47 +++++++++++++++++++++++++++++++++--
- 2 files changed, 46 insertions(+), 3 deletions(-)
+ tests/unit/test_fileserver.py | 53 +++++++++++++++++++++++------------
+ 2 files changed, 36 insertions(+), 19 deletions(-)
diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py
-index 919987e2fc..1b8de51bdc 100644
+index c8c417168f..b9e345d8c3 100644
--- a/salt/fileserver/__init__.py
+++ b/salt/fileserver/__init__.py
-@@ -142,7 +142,7 @@ def check_file_list_cache(opts, form, list_cache, w_lock):
- 'file=%s mtime=%s current_time=%s',
- list_cache, current_time, file_mtime
+@@ -132,7 +132,7 @@ def check_file_list_cache(opts, form, list_cache, w_lock):
+ current_time,
+ file_mtime,
)
- age = 0
+ age = -1
@@ -25,46 +25,58 @@ index 919987e2fc..1b8de51bdc 100644
age = current_time - file_mtime
else:
diff --git a/tests/unit/test_fileserver.py b/tests/unit/test_fileserver.py
-index d38e22c8e1..b92b32947b 100644
+index 0bf30ee5cc..a1087bf4b0 100644
--- a/tests/unit/test_fileserver.py
+++ b/tests/unit/test_fileserver.py
-@@ -6,11 +6,17 @@
- # Import Python libs
- from __future__ import absolute_import, print_function, unicode_literals
+@@ -1,14 +1,15 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Joao Mesquita
+ """
--# Import Salt Testing libs
--from tests.support.unit import TestCase
+-# Import Python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+-from salt import fileserver
+import datetime
+import os
+import time
+-# Import Salt Testing libs
+import salt.utils.files
- from salt import fileserver
-
-+# Import Salt Testing libs
++from salt import fileserver
+from tests.support.helpers import with_tempdir
-+from tests.support.unit import TestCase
-+
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.unit import TestCase
- class MapDiffTestCase(TestCase):
- def test_diff_with_diffent_keys(self):
-@@ -28,3 +34,40 @@ class MapDiffTestCase(TestCase):
- map1 = {'file1': 12345}
- map2 = {'file1': 1234}
+@@ -31,22 +32,38 @@ class MapDiffTestCase(TestCase):
assert fileserver.diff_mtime_map(map1, map2) is True
-+
-+
+
+
+-class VCSBackendWhitelistCase(TestCase, LoaderModuleMockMixin):
+class VCSBackendWhitelistCase(TestCase):
-+ def setup_loader_modules(self):
-+ return {fileserver: {}}
-+
+ def setup_loader_modules(self):
+ return {fileserver: {}}
+
+- def test_whitelist(self):
+ @with_tempdir()
+ def test_future_file_list_cache_file_ignored(self, cachedir):
-+ opts = {
+ opts = {
+- "fileserver_backend": ["roots", "git", "hgfs", "svn"],
+ "fileserver_backend": ["roots"],
+ "cachedir": cachedir,
-+ "extension_modules": "",
-+ }
+ "extension_modules": "",
+ }
+- fs = fileserver.Fileserver(opts)
+- assert fs.servers.whitelist == [
+- "git",
+- "gitfs",
+- "hg",
+- "hgfs",
+- "svn",
+- "svnfs",
+- "roots",
+- ], fs.servers.whitelist
+
+ back_cachedir = os.path.join(cachedir, "file_lists/roots")
+ os.makedirs(os.path.join(back_cachedir))
@@ -90,6 +102,6 @@ index d38e22c8e1..b92b32947b 100644
+ ret[1] is True
+ ), "Cache file list cache file is not refreshed when future modification time"
--
-2.28.0
+2.29.2
diff --git a/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch b/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
index fc103b0..ebbc276 100644
--- a/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
+++ b/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
@@ -1,4 +1,4 @@
-From 2b5903d2429607a3f46d648520e24c357a56aea6 Mon Sep 17 00:00:00 2001
+From a6e490d8cede6e66bb5f22f314e1ec4e898dfa3c Mon Sep 17 00:00:00 2001
From: Can Bulut Bayburt <1103552+cbbayburt@users.noreply.github.com>
Date: Wed, 4 Dec 2019 15:59:46 +0100
Subject: [PATCH] Let salt-ssh use 'platform-python' binary in RHEL8
@@ -14,7 +14,7 @@ creating the sh shim.
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
-index 1373274739..d9e91b0f50 100644
+index 287d0b8c4c..ef9eb0c07e 100644
--- a/salt/client/ssh/__init__.py
+++ b/salt/client/ssh/__init__.py
@@ -147,7 +147,7 @@ elif [ "$SUDO" ] && [ -n "$SUDO_USER" ]
@@ -27,6 +27,6 @@ index 1373274739..d9e91b0f50 100644
do
if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
--
-2.16.4
+2.29.2
diff --git a/loader-invalidate-the-import-cachefor-extra-modules.patch b/loader-invalidate-the-import-cachefor-extra-modules.patch
deleted file mode 100644
index de72b97..0000000
--- a/loader-invalidate-the-import-cachefor-extra-modules.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 444e00c6601b878444923f573fdb5f000342be9a Mon Sep 17 00:00:00 2001
-From: Alberto Planas
-Date: Thu, 12 Mar 2020 16:39:42 +0100
-Subject: [PATCH] loader: invalidate the import cachefor extra modules
-
-Because we are mangling with importlib, we can find from time to
-time an invalidation issue with sys.path_importer_cache, that
-requires the removal of FileFinder that remain None for the
-extra_module_dirs
-
-(cherry picked from commit 0fb8e707a45d5caf40759e8b4943590d6fce5046)
----
- salt/loader.py | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
-
-diff --git a/salt/loader.py b/salt/loader.py
-index 742b2f8e22..5bd4773645 100644
---- a/salt/loader.py
-+++ b/salt/loader.py
-@@ -1544,9 +1544,11 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- self._clean_module_dirs.append(directory)
-
- def __clean_sys_path(self):
-+ invalidate_path_importer_cache = False
- for directory in self._clean_module_dirs:
- if directory in sys.path:
- sys.path.remove(directory)
-+ invalidate_path_importer_cache = True
- self._clean_module_dirs = []
-
- # Be sure that sys.path_importer_cache do not contains any
-@@ -1554,6 +1556,16 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- if USE_IMPORTLIB:
- importlib.invalidate_caches()
-
-+ # Because we are mangling with importlib, we can find from
-+ # time to time an invalidation issue with
-+ # sys.path_importer_cache, that requires the removal of
-+ # FileFinder that remain None for the extra_module_dirs
-+ if invalidate_path_importer_cache:
-+ for directory in self.extra_module_dirs:
-+ if directory in sys.path_importer_cache \
-+ and sys.path_importer_cache[directory] is None:
-+ del sys.path_importer_cache[directory]
-+
- def _load_module(self, name):
- mod = None
- fpath, suffix = self.file_mapping[name][:2]
---
-2.16.4
-
-
diff --git a/loop-fix-variable-names-for-until_no_eval.patch b/loop-fix-variable-names-for-until_no_eval.patch
index 4a7e1d6..03d3416 100644
--- a/loop-fix-variable-names-for-until_no_eval.patch
+++ b/loop-fix-variable-names-for-until_no_eval.patch
@@ -1,4 +1,4 @@
-From 2670f83fd1309fbf9fdc98f15f9a6e6a3ecc038d Mon Sep 17 00:00:00 2001
+From 239e897776b889105cfd6f54092100c86f52ce21 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 24 Mar 2020 17:46:23 +0100
Subject: [PATCH] loop: fix variable names for until_no_eval
@@ -8,12 +8,12 @@ Subject: [PATCH] loop: fix variable names for until_no_eval
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/salt/states/loop.py b/salt/states/loop.py
-index 726c8c80165803f3b2d98bf7a197013c53f3ebc8..b631e6c8f62416c04b458a595dc31393987eb904 100644
+index de37b7d60c..533166c5dc 100644
--- a/salt/states/loop.py
+++ b/salt/states/loop.py
-@@ -185,10 +185,10 @@ def until_no_eval(
- ''.format(name, expected))
- if ret['comment']:
+@@ -182,10 +182,10 @@ def until_no_eval(
+ )
+ if ret["comment"]:
return ret
- if not m_args:
- m_args = []
@@ -27,6 +27,6 @@ index 726c8c80165803f3b2d98bf7a197013c53f3ebc8..b631e6c8f62416c04b458a595dc31393
if init_wait:
time.sleep(init_wait)
--
-2.23.0
+2.29.2
diff --git a/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch b/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch
index 3b1f943..fb57693 100644
--- a/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch
+++ b/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch
@@ -1,7 +1,8 @@
-From c9538180f4dd8875ab57dfa3f51ff59608d2481b Mon Sep 17 00:00:00 2001
+From 6381be1a6e6d863f85dd33c82b9b949b552a7e49 Mon Sep 17 00:00:00 2001
From: Joachim Gleissner
Date: Tue, 18 Sep 2018 15:07:13 +0200
-Subject: [PATCH] loosen azure sdk dependencies in azurearm cloud driver
+Subject: [PATCH] loosen azure sdk dependencies in azurearm cloud
+ driver
Remove dependency to azure-cli, which is not used at all.
Use azure-storage-sdk as fallback if multiapi version is not available.
@@ -12,22 +13,22 @@ remove unused import from azurearm driver
1 file changed, 6 insertions(+)
diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py
-index 2c1fa04ae8..d5757c6d28 100644
+index 8b9254cecb..0e92a56156 100644
--- a/salt/cloud/clouds/azurearm.py
+++ b/salt/cloud/clouds/azurearm.py
-@@ -104,6 +104,7 @@ import time
-
- # Salt libs
- from salt.ext import six
+@@ -98,6 +98,7 @@ import importlib
+ import logging
+ import os
+ import os.path
+import pkgutil
- import salt.cache
- import salt.config as config
- import salt.loader
-@@ -126,6 +127,11 @@ try:
- import azure.mgmt.network.models as network_models
+ import pprint
+ import string
+ import time
+@@ -129,6 +130,11 @@ try:
from azure.storage.blob.blockblobservice import BlockBlobService
from msrestazure.azure_exceptions import CloudError
-+ if pkgutil.find_loader('azure.multiapi'):
+
++ if pkgutil.find_loader("azure.multiapi"):
+ # use multiapi version if available
+ from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount
+ else:
@@ -36,6 +37,6 @@ index 2c1fa04ae8..d5757c6d28 100644
except ImportError:
pass
--
-2.16.4
+2.29.2
diff --git a/make-aptpkg.list_repos-compatible-on-enabled-disable.patch b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch
index 99c22b9..1e05b8b 100644
--- a/make-aptpkg.list_repos-compatible-on-enabled-disable.patch
+++ b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch
@@ -1,26 +1,34 @@
-From 93f69a227b7f8c3d4625c0699ab3923d4a0b3127 Mon Sep 17 00:00:00 2001
+From 7d507f8f5879a1de3e707fdb5cadd618a150123f Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Fri, 16 Nov 2018 10:54:12 +0100
Subject: [PATCH] Make aptpkg.list_repos compatible on enabled/disabled
output
---
- salt/modules/aptpkg.py | 1 +
- 1 file changed, 1 insertion(+)
+ salt/modules/aptpkg.py | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index b5503f0b10..8f4d95a195 100644
+index 1e2866b47b..70e173806a 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
-@@ -1641,6 +1641,7 @@ def list_repos():
- repo['file'] = source.file
- repo['comps'] = getattr(source, 'comps', [])
- repo['disabled'] = source.disabled
-+ repo['enabled'] = not repo['disabled'] # This is for compatibility with the other modules
- repo['dist'] = source.dist
- repo['type'] = source.type
- repo['uri'] = source.uri.rstrip('/')
+@@ -1681,10 +1681,13 @@ def list_repos(**kwargs):
+ repo["file"] = source.file
+ repo["comps"] = getattr(source, "comps", [])
+ repo["disabled"] = source.disabled
++ repo["enabled"] = not repo[
++ "disabled"
++ ] # This is for compatibility with the other modules
+ repo["dist"] = source.dist
+ repo["type"] = source.type
+- repo["uri"] = source.uri
+- repo["line"] = source.line.strip()
++ repo["uri"] = source.uri.rstrip("/")
++ repo["line"] = salt.utils.pkg.deb.strip_uri(source.line.strip())
+ repo["architectures"] = getattr(source, "architectures", [])
+ repos.setdefault(source.uri, []).append(repo)
+ return repos
--
-2.16.4
+2.29.2
diff --git a/make-lazyloader.__init__-call-to-_refresh_file_mappi.patch b/make-lazyloader.__init__-call-to-_refresh_file_mappi.patch
deleted file mode 100644
index 277808a..0000000
--- a/make-lazyloader.__init__-call-to-_refresh_file_mappi.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 6af6a52165c70c3be7c8d339a3dd5e539f3c1772 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
-
-Date: Thu, 23 Apr 2020 09:54:53 +0100
-Subject: [PATCH] Make LazyLoader.__init__ call to
- _refresh_file_mapping thread-safe (bsc#1169604)
-
----
- salt/loader.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/salt/loader.py b/salt/loader.py
-index 5bd4773645c77a133701982e19d19739be00a38f..54dadb0b513dbaa4914b0d4b1d343dde709699ad 100644
---- a/salt/loader.py
-+++ b/salt/loader.py
-@@ -1251,7 +1251,8 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- self.suffix_order.append(suffix)
-
- self._lock = threading.RLock()
-- self._refresh_file_mapping()
-+ with self._lock:
-+ self._refresh_file_mapping()
-
- super(LazyLoader, self).__init__() # late init the lazy loader
- # create all of the import namespaces
---
-2.23.0
-
-
diff --git a/make-profiles-a-package.patch b/make-profiles-a-package.patch
index 58f3855..e10a211 100644
--- a/make-profiles-a-package.patch
+++ b/make-profiles-a-package.patch
@@ -1,4 +1,4 @@
-From 2aeefa07ff52048e2db5c8c4ebb1cde6efe87cee Mon Sep 17 00:00:00 2001
+From 44dfbc906e4c19eef6c9cfe96c76a99e6077c7ec Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Mon, 8 Oct 2018 17:52:07 +0200
Subject: [PATCH] Make profiles a package.
@@ -22,6 +22,6 @@ index 0000000000..b86aef30b8
+Profiles for salt-support.
+'''
--
-2.16.4
+2.29.2
diff --git a/make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch b/make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch
deleted file mode 100644
index 7c0f22c..0000000
--- a/make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 023d1256106319d042233021c0f200bcdc0cd1f0 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
-
-Date: Fri, 13 Mar 2020 13:01:57 +0000
-Subject: [PATCH] Make salt.ext.tornado.gen to use salt.ext.backports_abc
- on Python 2
-
----
- salt/ext/tornado/gen.py | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/salt/ext/tornado/gen.py b/salt/ext/tornado/gen.py
-index 6cb19730bf1ef3893a4626e9e144eac1c6fa9683..72f422ce28fa43132782a7a0d61b31acd32d138b 100644
---- a/salt/ext/tornado/gen.py
-+++ b/salt/ext/tornado/gen.py
-@@ -115,13 +115,13 @@ try:
- # py35+
- from collections.abc import Generator as GeneratorType # type: ignore
- except ImportError:
-- from backports_abc import Generator as GeneratorType # type: ignore
-+ from salt.ext.backports_abc import Generator as GeneratorType # type: ignore
-
- try:
- # py35+
- from inspect import isawaitable # type: ignore
- except ImportError:
-- from backports_abc import isawaitable
-+ from salt.ext.backports_abc import isawaitable
- except ImportError:
- if 'APPENGINE_RUNTIME' not in os.environ:
- raise
---
-2.23.0
-
-
diff --git a/make-setup.py-script-to-not-require-setuptools-9.1.patch b/make-setup.py-script-to-not-require-setuptools-9.1.patch
index 90e47a7..67bf87c 100644
--- a/make-setup.py-script-to-not-require-setuptools-9.1.patch
+++ b/make-setup.py-script-to-not-require-setuptools-9.1.patch
@@ -1,4 +1,4 @@
-From b73d7f73bebcde2936a55245471fbcb383778b6d Mon Sep 17 00:00:00 2001
+From 64c2735b64a074acc1ef05a82f9fcf342426f87e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 25 Mar 2020 13:09:52 +0000
@@ -9,17 +9,17 @@ Subject: [PATCH] Make setup.py script to not require setuptools > 9.1
1 file changed, 8 deletions(-)
diff --git a/setup.py b/setup.py
-index e852080e4b..033ccee8c3 100755
+index 39a66fefba..d9c3d6e303 100755
--- a/setup.py
+++ b/setup.py
-@@ -727,14 +727,6 @@ class Install(install):
+@@ -805,14 +805,6 @@ class Install(install):
install.finalize_options(self)
def run(self):
-- if LooseVersion(setuptools.__version__) < LooseVersion('9.1'):
+- if LooseVersion(setuptools.__version__) < LooseVersion("9.1"):
- sys.stderr.write(
-- '\n\nInstalling Salt requires setuptools >= 9.1\n'
-- 'Available setuptools version is {}\n\n'.format(setuptools.__version__)
+- "\n\nInstalling Salt requires setuptools >= 9.1\n"
+- "Available setuptools version is {}\n\n".format(setuptools.__version__)
- )
- sys.stderr.flush()
- sys.exit(1)
@@ -28,6 +28,6 @@ index e852080e4b..033ccee8c3 100755
# _version.py in the build command
self.distribution.running_salt_install = True
--
-2.16.4
+2.29.2
diff --git a/move-server_id-deprecation-warning-to-reduce-log-spa.patch b/move-server_id-deprecation-warning-to-reduce-log-spa.patch
index 4000a59..1053782 100644
--- a/move-server_id-deprecation-warning-to-reduce-log-spa.patch
+++ b/move-server_id-deprecation-warning-to-reduce-log-spa.patch
@@ -1,57 +1,61 @@
-From c375d1e25e8b5c77b6a8f89855f17df6e49db9f2 Mon Sep 17 00:00:00 2001
+From caffb14059c2d4ab186cb24918f4e53332f697af Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Fri, 14 Jun 2019 15:13:12 +0200
Subject: [PATCH] Move server_id deprecation warning to reduce log
spamming (bsc#1135567) (bsc#1135732)
---
- salt/grains/core.py | 4 ----
- salt/minion.py | 9 +++++++++
- 2 files changed, 9 insertions(+), 4 deletions(-)
+ salt/grains/core.py | 7 -------
+ salt/minion.py | 10 ++++++++++
+ 2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index b58c29dbc3..0f3ccd9b92 100644
+index d7d03c5e70..5f18ba4a58 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -2890,10 +2890,6 @@ def get_server_id():
- if bool(use_crc):
- id_hash = getattr(zlib, use_crc, zlib.adler32)(__opts__.get('id', '').encode()) & 0xffffffff
+@@ -3066,13 +3066,6 @@ def get_server_id():
+ & 0xFFFFFFFF
+ )
else:
-- salt.utils.versions.warn_until('Sodium', 'This server_id is computed nor by Adler32 neither by CRC32. '
-- 'Please use "server_id_use_crc" option and define algorithm you'
-- 'prefer (default "Adler32"). The server_id will be computed with'
-- 'Adler32 by default.')
+- salt.utils.versions.warn_until(
+- "Sodium",
+- "This server_id is computed nor by Adler32 neither by CRC32. "
+- 'Please use "server_id_use_crc" option and define algorithm you'
+- 'prefer (default "Adler32"). The server_id will be computed with'
+- "Adler32 by default.",
+- )
id_hash = _get_hash_by_shell()
- server_id = {'server_id': id_hash}
+ server_id = {"server_id": id_hash}
diff --git a/salt/minion.py b/salt/minion.py
-index 457f485b0a..4730f68b87 100644
+index 4da665a130..4d271c6d08 100644
--- a/salt/minion.py
+++ b/salt/minion.py
-@@ -97,6 +97,7 @@ from salt.utils.odict import OrderedDict
- from salt.utils.process import (default_signals,
- SignalHandlingProcess,
- ProcessManager)
+@@ -82,6 +82,7 @@ from salt.utils.event import tagify
+ from salt.utils.network import parse_host_port
+ from salt.utils.odict import OrderedDict
+ from salt.utils.process import ProcessManager, SignalHandlingProcess, default_signals
+from salt.utils.versions import warn_until
- from salt.exceptions import (
- CommandExecutionError,
- CommandNotFoundError,
-@@ -1002,6 +1003,14 @@ class MinionManager(MinionBase):
- if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
+ from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq
+
+ HAS_PSUTIL = False
+@@ -1096,6 +1097,15 @@ class MinionManager(MinionBase):
+ ):
masters = [masters]
-+ if not self.opts.get('server_id_use_crc'):
++ if not self.opts.get("server_id_use_crc"):
+ warn_until(
-+ 'Sodium',
-+ 'This server_id is computed nor by Adler32 neither by CRC32. '
-+ 'Please use "server_id_use_crc" option and define algorithm you'
-+ 'prefer (default "Adler32"). The server_id will be computed with'
-+ 'Adler32 by default.')
++ "Sodium",
++ "This server_id is computed nor by Adler32 neither by CRC32. "
++ 'Please use "server_id_use_crc" option and define algorithm you'
++ 'prefer (default "Adler32"). The server_id will be computed with'
++ "Adler32 by default.",
++ )
+
beacons_leader = True
for master in masters:
s_opts = copy.deepcopy(self.opts)
--
-2.16.4
+2.29.2
diff --git a/msgpack-support-versions-1.0.0.patch b/msgpack-support-versions-1.0.0.patch
deleted file mode 100644
index 4ed2394..0000000
--- a/msgpack-support-versions-1.0.0.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From ef23c1d53e99e19e5b03658aa62b67cfef9adce5 Mon Sep 17 00:00:00 2001
-From: Alberto Planas
-Date: Thu, 7 May 2020 12:40:55 +0200
-Subject: [PATCH] msgpack: support versions >= 1.0.0
-
-A recent change in msgpack >= 1.0.0, update the default value for the
-parameter `raw` to False. This change breaks Salt for those versions.
-
-This patch add the parameter `raw=True` to all the unpack operations,
-restoring the old default.
-
-Fix bsc#1171257
-
-(cherry picked from commit 1b3939fb01fc3405d8d222f118617220aecee092)
----
- salt/utils/msgpack.py | 20 +++++++++++++++++---
- 1 file changed, 17 insertions(+), 3 deletions(-)
-
-diff --git a/salt/utils/msgpack.py b/salt/utils/msgpack.py
-index 4b5a256513..027fe81a18 100644
---- a/salt/utils/msgpack.py
-+++ b/salt/utils/msgpack.py
-@@ -69,12 +69,26 @@ def _sanitize_msgpack_kwargs(kwargs):
- return kwargs
-
-
-+def _sanitize_msgpack_unpack_kwargs(kwargs):
-+ """
-+ Clean up msgpack keyword arguments for unpack operations, based on
-+ the version
-+ https://github.com/msgpack/msgpack-python/blob/master/ChangeLog.rst
-+ """
-+ assert isinstance(kwargs, dict)
-+ if version >= (1, 0, 0) and kwargs.get("raw", None) is None:
-+ log.info("adding `raw=True` argument to msgpack call")
-+ kwargs["raw"] = True
-+
-+ return _sanitize_msgpack_kwargs(kwargs)
-+
-+
- class Unpacker(msgpack.Unpacker):
- '''
- Wraps the msgpack.Unpacker and removes non-relevant arguments
- '''
- def __init__(self, *args, **kwargs):
-- msgpack.Unpacker.__init__(self, *args, **_sanitize_msgpack_kwargs(kwargs))
-+ msgpack.Unpacker.__init__(self, *args, **_sanitize_msgpack_unpack_kwargs(kwargs))
-
-
- def pack(o, stream, **kwargs):
-@@ -113,7 +127,7 @@ def unpack(stream, **kwargs):
- By default, this function uses the msgpack module and falls back to
- msgpack_pure, if the msgpack is not available.
- '''
-- return msgpack.unpack(stream, **_sanitize_msgpack_kwargs(kwargs))
-+ return msgpack.unpack(stream, **_sanitize_msgpack_unpack_kwargs(kwargs))
-
-
- def unpackb(packed, **kwargs):
-@@ -125,7 +139,7 @@ def unpackb(packed, **kwargs):
- By default, this function uses the msgpack module and falls back to
- msgpack_pure.
- '''
-- return msgpack.unpackb(packed, **_sanitize_msgpack_kwargs(kwargs))
-+ return msgpack.unpackb(packed, **_sanitize_msgpack_unpack_kwargs(kwargs))
-
-
- # alias for compatibility to simplejson/marshal/pickle.
---
-2.26.1
-
-
diff --git a/open-suse-3002.2-bigvm-310.patch b/open-suse-3002.2-bigvm-310.patch
new file mode 100644
index 0000000..cf80e59
--- /dev/null
+++ b/open-suse-3002.2-bigvm-310.patch
@@ -0,0 +1,6515 @@
+From 0d606b481752d1112321046ce78d3a7f9d2a6604 Mon Sep 17 00:00:00 2001
+From: Cedric Bosdonnat
+Date: Tue, 12 Jan 2021 10:48:27 +0100
+Subject: [PATCH] Open suse 3002.2 bigvm (#310)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* revert stop_on_reboot commits to help applying upstream patches
+
+* libvirt domain template memory config fixes
+
+Add unit tests for _gen_xml() on the recently added memory parameters.
+Also fixes an issue with an optional attribute.
+
+* virt: support host numa tunning capability
+
+* fixup! precommit failure fix
+
+* virt: support cpu model and topology
+
+* virt: make context preprocessing more reusable in _gen_xml
+
+Introduce mapping structures in order to help reusing the common patterns
+in the virt._gen_xml() context pre processing.
+
+* xmlutil.change_xml properly handle xpath node number
+
+In XPath the node numbers are counted from 1 rather than 0.
+Thus /foo/bar[0] is invalid and should be /foo/bar[1].
+
+Since in the change_xml function we are getting the index from python
+lists in these cases, we need to offset these.
+
+* virt: support memory_backing
+
+* virt: support cpu tunning and Iothread allocation
+
+* xmlutil.change_xml: properly handle updated return value for removals
+
+When deleting an attribute that doesn't exist in the node we should not
+report a change was made.
+
+* virt.update: properly handle nosharepages and locked elements
+
+When updating we shouldn't set the value as text in those elements.
+Libvirt seems happy with it, but it forces modifying the VM definition
+even if there was no change.
+
+* xmlutil: use a comparison function to update XML
+
+When updating an XML file, we may need to have a more intelligent
+comparison of the current and new values. This typically fits for the
+case of numeric values that may have a negligible delta.
+
+* virt.update: handle tiny difference in memory values
+
+Libvirt may round the memory values when defining or updating a VM. That
+is perfectly fine, but then the value are slightly different from the
+ones passed to the virt.update() function or the virt.running state.
+In those cases the state would be reapplied even though there is no real
+difference with the VM.
+
+In order to handle that case the memory parameters in the virt.update
+mapping now have a comparison function that considers the tiny differences
+as equal.
+
+This commit also factorizes the creation of the memory entries in the
+virt.update() mapping.
+
+* virt.update: factorize the mapping value definition
+
+In the mapping passed to xmlutil.change_xml() in virt.update() there are
+a lot of common patterns. Extract these into helper functions. Some of
+them are common enough to even be defined in the xmlutil module.
+
+* virt: add kvm-hint-dedicated feature handling
+
+* virt: add clock configuration for guests
+
+* virt: add qemu guest agent channel
+
+For libvirt to be able to communicate with the QEMU Guest Agent if
+installed in the guest, a channel named org.qemu.guest_agent.0 is
+needed.
+
+Add this channel by default on all newly created KVM virtual machines.
+
+* virt: allow using IO threads on disks
+
+* Remove unneeded VM XML definition fragments in tests
+
+* virt: canonicalize cpuset before comparing
+
+Multiple libvirt cpuset notations can designate the same thing. We need
+to expand those notations into an actual cpu list in order to be able to
+properly compare.
+
+For instance if the libvirt definition has '0-5,^4', and we have
+'0,1,2,3,5' passed to virt.update(), those should not trigger an update
+of the définition since they are defining the same thing.
+
+* virt: only live update vcpu max if there is a change
+
+* Add console and serial to update and running status
+
+* virt: cleanup the consoles and serials support
+
+* virt: add stop_on_reboot parameter in guest states and definition
+
+It can be needed to force a VM to stop instead of rebooting. A typical
+example of this is when creating a VM using a install CDROM ISO or when
+using an autoinstallation profile. Forcing a shutdown allows libvirt to
+pick up another XML definition for the new start to remove the
+firstboot-only options.
+
+* virt: expose live parameter in virt.defined state
+
+Allow updating the definition of a VM without touching the live
+instance. This can be helpful since live update may change the device
+names in the guest.
+
+* Ensure virt.update stop_on_reboot is updated with its default value
+
+While all virt.update properties default values should not be used when
+updating the XML definition, the stop_on_reboot default value (False)
+needs to be passed still or the user will never be able to update with
+this value.
+
+Co-authored-by: gqlo
+Co-authored-by: gqlo
+Co-authored-by: marina2209
+---
+ changelog/57880.added | 1 +
+ changelog/58844.added | 1 +
+ salt/modules/virt.py | 1232 ++++++-
+ salt/states/virt.py | 341 +-
+ salt/templates/virt/libvirt_chardevs.jinja | 16 +
+ salt/templates/virt/libvirt_domain.jinja | 268 +-
+ salt/utils/xmlutil.py | 79 +-
+ tests/pytests/unit/modules/virt/conftest.py | 126 +
+ .../pytests/unit/modules/virt/test_domain.py | 335 ++
+ tests/pytests/unit/utils/test_xmlutil.py | 41 +
+ tests/unit/modules/test_virt.py | 2961 +++++++++++++++--
+ tests/unit/states/test_virt.py | 57 +
+ 12 files changed, 4934 insertions(+), 524 deletions(-)
+ create mode 100644 changelog/57880.added
+ create mode 100644 changelog/58844.added
+ create mode 100644 salt/templates/virt/libvirt_chardevs.jinja
+
+diff --git a/changelog/57880.added b/changelog/57880.added
+new file mode 100644
+index 0000000000..6fff4295fa
+--- /dev/null
++++ b/changelog/57880.added
+@@ -0,0 +1 @@
++CPU model, topology and NUMA node tuning
+diff --git a/changelog/58844.added b/changelog/58844.added
+new file mode 100644
+index 0000000000..c8599125d2
+--- /dev/null
++++ b/changelog/58844.added
+@@ -0,0 +1 @@
++Enhance console and serial support in virt module
+diff --git a/salt/modules/virt.py b/salt/modules/virt.py
+index 786bfa1e58..b852f8175d 100644
+--- a/salt/modules/virt.py
++++ b/salt/modules/virt.py
+@@ -788,11 +788,11 @@ def _handle_unit(s, def_unit="m"):
+ return int(value)
+
+
+-def nesthash():
++def nesthash(value=None):
+ """
+ create default dict that allows arbitrary level of nesting
+ """
+- return collections.defaultdict(nesthash)
++ return collections.defaultdict(nesthash, value or {})
+
+
+ def _gen_xml(
+@@ -808,6 +808,11 @@ def _gen_xml(
+ graphics=None,
+ boot=None,
+ boot_dev=None,
++ numatune=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ **kwargs
+ ):
+@@ -817,24 +822,36 @@ def _gen_xml(
+ context = {
+ "hypervisor": hypervisor,
+ "name": name,
+- "cpu": str(cpu),
++ "hypervisor_features": hypervisor_features or {},
++ "clock": clock or {},
+ "on_reboot": "destroy" if stop_on_reboot else "restart",
+ }
+
++ context["to_kib"] = lambda v: int(_handle_unit(v) / 1024)
++ context["yesno"] = lambda v: "yes" if v else "no"
++
+ context["mem"] = nesthash()
+ if isinstance(mem, int):
+- mem = int(mem) * 1024 # MB
+- context["mem"]["boot"] = str(mem)
+- context["mem"]["current"] = str(mem)
++ context["mem"]["boot"] = mem
++ context["mem"]["current"] = mem
+ elif isinstance(mem, dict):
+- for tag, val in mem.items():
+- if val:
+- if tag == "slots":
+- context["mem"]["slots"] = "{}='{}'".format(tag, val)
+- else:
+- context["mem"][tag] = str(int(_handle_unit(val) / 1024))
++ context["mem"] = nesthash(mem)
++
++ context["cpu"] = nesthash()
++ context["cputune"] = nesthash()
++ if isinstance(cpu, int):
++ context["cpu"]["maximum"] = str(cpu)
++ elif isinstance(cpu, dict):
++ context["cpu"] = nesthash(cpu)
++
++ if clock:
++ offset = "utc" if clock.get("utc", True) else "localtime"
++ if "timezone" in clock:
++ offset = "timezone"
++ context["clock"]["offset"] = offset
+
+ if hypervisor in ["qemu", "kvm"]:
++ context["numatune"] = numatune if numatune else {}
+ context["controller_model"] = False
+ elif hypervisor == "vmware":
+ # TODO: make bus and model parameterized, this works for 64-bit Linux
+@@ -873,18 +890,57 @@ def _gen_xml(
+ context["boot"]["kernel"] = "/usr/lib/grub2/x86_64-xen/grub.xen"
+ context["boot_dev"] = []
+
+- if "serial_type" in kwargs:
+- context["serial_type"] = kwargs["serial_type"]
+- if "serial_type" in context and context["serial_type"] == "tcp":
+- if "telnet_port" in kwargs:
+- context["telnet_port"] = kwargs["telnet_port"]
+- else:
+- context["telnet_port"] = 23023 # FIXME: use random unused port
+- if "serial_type" in context:
+- if "console" in kwargs:
+- context["console"] = kwargs["console"]
+- else:
+- context["console"] = True
++ default_port = 23023
++ default_chardev_type = "tcp"
++
++ chardev_types = ["serial", "console"]
++ for chardev_type in chardev_types:
++ context[chardev_type + "s"] = []
++ parameter_value = locals()[chardev_type + "s"]
++ if parameter_value is not None:
++ for chardev in parameter_value:
++ chardev_context = chardev
++ chardev_context["type"] = chardev.get("type", default_chardev_type)
++
++ if chardev_context["type"] == "tcp":
++ chardev_context["port"] = chardev.get("port", default_port)
++ chardev_context["protocol"] = chardev.get("protocol", "telnet")
++ context[chardev_type + "s"].append(chardev_context)
++
++ # processing of deprecated parameters
++ old_port = kwargs.get("telnet_port")
++ if old_port:
++ salt.utils.versions.warn_until(
++ "Phosphorus",
++ "'telnet_port' parameter has been deprecated, use the 'serials' and 'consoles' parameters instead. "
++ "'telnet_port' parameter has been deprecated, use the 'serials' parameter with a value "
++ "like ``{{{{'type': 'tcp', 'protocol': 'telnet', 'port': {}}}}}`` instead and a similar `consoles` parameter. "
++ "It will be removed in {{version}}.".format(old_port),
++ )
++
++ old_serial_type = kwargs.get("serial_type")
++ if old_serial_type:
++ salt.utils.versions.warn_until(
++ "Phosphorus",
++ "'serial_type' parameter has been deprecated, use the 'serials' parameter with a value "
++ "like ``{{{{'type': '{}', 'protocol': 'telnet' }}}}`` instead and a similar `consoles` parameter. "
++ "It will be removed in {{version}}.".format(old_serial_type),
++ )
++ serial_context = {"type": old_serial_type}
++ if serial_context["type"] == "tcp":
++ serial_context["port"] = old_port or default_port
++ serial_context["protocol"] = "telnet"
++ context["serials"].append(serial_context)
++
++ old_console = kwargs.get("console")
++ if old_console:
++ salt.utils.versions.warn_until(
++ "Phosphorus",
++ "'console' parameter has been deprecated, use the 'serials' and 'consoles' parameters instead. "
++ "It will be removed in {version}.",
++ )
++ if old_console is True:
++ context["consoles"].append(serial_context)
+
+ context["disks"] = []
+ disk_bus_map = {"virtio": "vd", "xen": "xvd", "fdc": "fd", "ide": "hd"}
+@@ -897,6 +953,7 @@ def _gen_xml(
+ "disk_bus": disk["model"],
+ "format": disk.get("format", "raw"),
+ "index": str(i),
++ "io": "threads" if disk.get("iothreads", False) else "native",
+ }
+ targets.append(disk_context["target_dev"])
+ if disk.get("source_file"):
+@@ -946,7 +1003,6 @@ def _gen_xml(
+
+ context["os_type"] = os_type
+ context["arch"] = arch
+-
+ fn_ = "libvirt_domain.jinja"
+ try:
+ template = JINJA.get_template(fn_)
+@@ -1751,6 +1807,11 @@ def init(
+ arch=None,
+ boot=None,
+ boot_dev=None,
++ numatune=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ **kwargs
+ ):
+@@ -1758,13 +1819,126 @@ def init(
+ Initialize a new vm
+
+ :param name: name of the virtual machine to create
+- :param cpu: Number of virtual CPUs to assign to the virtual machine
+- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to
++ :param cpu:
++ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure
++ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is
++ documented in :ref:`init-cpu-def`.
++
++ .. code-block:: yaml
++
++ cpu:
++ placement: static
++ cpuset: 0-11
++ current: 5
++ maximum: 12
++ vcpus:
++ 0:
++ enabled: True
++ hotpluggable: False
++ order: 1
++ 1:
++ enabled: False
++ hotpluggable: True
++ match: minimum
++ mode: custom
++ check: full
++ vendor: Intel
++ model:
++ name: core2duo
++ fallback: allow
++ vendor_id: GenuineIntel
++ topology:
++ sockets: 1
++ cores: 12
++ threads: 1
++ cache:
++ level: 3
++ mode: emulate
++ features:
++ lahf: optional
++ pcid: require
++ numa:
++ 0:
++ cpus: 0-3
++ memory: 1g
++ discard: True
++ distances:
++ 0: 10 # sibling id : value
++ 1: 21
++ 2: 31
++ 3: 41
++ 1:
++ cpus: 4-6
++ memory: 1g
++ memAccess: shared
++ distances:
++ 0: 21
++ 1: 10
++ 2: 21
++ 3: 31
++ tuning:
++ vcpupin:
++ 0: 1-4,^2 # vcpuid : cpuset
++ 1: 0,1
++ 2: 2,3
++ 3: 0,4
++ emulatorpin: 1-3
++ iothreadpin:
++ 1: 5,6 # iothread id: cpuset
++ 2: 7,8
++ shares: 2048
++ period: 1000000
++ quota: -1
++ global_period: 1000000
++ global_quota: -1
++ emulator_period: 1000000
++ emulator_quota: -1
++ iothread_period: 1000000
++ iothread_quota: -1
++ vcpusched:
++ - scheduler: fifo
++ priority: 1
++ vcpus: 0,3-5
++ - scheduler: rr
++ priority: 3
++ iothreadsched:
++ - scheduler: idle
++ - scheduler: batch
++ iothreads: 2,3
++ emulatorsched:
++ - scheduler: batch
++ cachetune:
++ 0-3: # vcpus set
++ 0: # cache id
++ level: 3
++ type: both
++ size: 4
++ 1:
++ level: 3
++ type: both
++ size: 6
++ monitor:
++ 1: 3
++ 0-3: 3
++ 4-5:
++ monitor:
++ 4: 3 # vcpus: level
++ 5: 3
++ memorytune:
++ 0-3: # vcpus set
++ 0: 60 # node id: bandwidth
++ 4-5:
++ 0: 60
++ iothreads: 4
++
++ .. versionadded:: Aluminium
++
++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to
+ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``,
+- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The
+- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported.
+- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be
+- an integer.
++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``,
++ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure
++ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit
++ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer.
+
+ .. code-block:: python
+
+@@ -1773,10 +1947,17 @@ def init(
+ 'current': 1g,
+ 'max': 1g,
+ 'slots': 10,
+- 'hard_limit': '1024'
+- 'soft_limit': '512m'
+- 'swap_hard_limit': '1g'
+- 'min_guarantee': '512mib'
++ 'hard_limit': '1024',
++ 'soft_limit': '512m',
++ 'swap_hard_limit': '1g',
++ 'min_guarantee': '512mib',
++ 'hugepages': [{'nodeset': '0-3,^2', 'size': '1g'}, {'nodeset': '2', 'size': '2m'}],
++ 'nosharepages': True,
++ 'locked': True,
++ 'source': 'file',
++ 'access': 'shared',
++ 'allocation': 'immediate',
++ 'discard': True
+ }
+
+ .. versionchanged:: Magnesium
+@@ -1872,6 +2053,232 @@ def init(
+
+ By default, the value will ``"hd"``.
+
++ :param numatune:
++ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA
++ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process
++ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition
++ used in the dictionary can be found at :ref:`init-cpu-def`.
++
++ .. versionadded:: Aluminium
++
++ .. code-block:: python
++
++ {
++ 'memory': {'mode': 'strict', 'nodeset': '0-11'},
++ 'memnodes': {0: {'mode': 'strict', 'nodeset': 1}, 1: {'mode': 'preferred', 'nodeset': 2}}
++ }
++
++ :param hypervisor_features:
++ Enable or disable hypervisor-specific features on the virtual machine.
++
++ .. versionadded:: Aluminium
++
++ .. code-block:: yaml
++
++ hypervisor_features:
++ kvm-hint-dedicated: True
++
++ :param clock:
++ Configure the guest clock.
++ The value is a dictionary with the following keys:
++
++ adjustment
++ time adjustment in seconds or ``reset``
++
++ utc
++ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``.
++
++ timezone
++ synchronize the guest to the correspding timezone
++
++ timers
++ a dictionary associating the timer name with its configuration.
++ This configuration is a dictionary with the properties ``track``, ``tickpolicy``,
++ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``.
++ See `libvirt time keeping documentation `_ for the possible values.
++
++ .. versionadded:: Aluminium
++
++ Set the clock to local time using an offset in seconds
++ .. code-block:: yaml
++
++ clock:
++ adjustment: 3600
++ utc: False
++
++ Set the clock to a specific time zone:
++
++ .. code-block:: yaml
++
++ clock:
++ timezone: CEST
++
++ Tweak guest timers:
++
++ .. code-block:: yaml
++
++ clock:
++ timers:
++ tsc:
++ frequency: 3504000000
++ mode: native
++ rtc:
++ track: wall
++ tickpolicy: catchup
++ slew: 4636
++ threshold: 123
++ limit: 2342
++ hpet:
++ present: False
++
++ :param serials:
++ Dictionary providing details on the serials connection to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
++
++ :param consoles:
++ Dictionary providing details on the consoles device to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
++
++ .. _init-cpu-def:
++
++ .. rubric:: cpu parameters definition
++
++ The cpu parameters dictionary can contain the following properties:
++
++ cpuset
++ a comma-separated list of physical CPU numbers that domain process and virtual CPUs can be pinned to by default.
++ eg. ``1-4,^3`` cpuset 3 is excluded.
++
++ current
++ the number of virtual cpus available at startup
++
++ placement
++ indicate the CPU placement mode for domain process. the value can be either ``static`` or ``auto``
++
++ vcpus
++ specify the state of individual vcpu. Possible attribute for each individual vcpu include: ``id``, ``enabled``,
++ ``hotpluggable`` and ``order``. Valid ``ids`` are from 0 to the maximum vCPU count minus 1. ``enabled`` takes
++ boolean values which controls the state of the vcpu. ``hotpluggable`` take boolean value which controls whether
++ given vCPU can be hotplugged and hotunplugged. ``order`` takes an integer value which specifies the order to add
++ the online vCPUs.
++
++ match
++ The cpu attribute ``match`` attribute specifies how strictly the virtual CPU provided to the guest matches the CPU
++ requirements, possible values are ``minimum``, ``exact`` or ``strict``.
++
++ check
++ Optional cpu attribute ``check`` attribute can be used to request a specific way of checking whether the virtual
++ CPU matches the specification, possible values are ``none``, ``partial`` and ``full``.
++
++ mode
++ Optional cpu attribute ``mode`` attribute may be used to make it easier to configure a guest CPU to be as close
++ to host CPU as possible, possible values are ``custom``, ``host-model`` and ``host-passthrough``.
++
++ model
++ specifies CPU model requested by the guest. An optional ``fallback`` attribute can be used to forbid libvirt falls
++ back to the closest model supported by the hypervisor, possible values are ``allow`` or ``forbid``. ``vendor_id``
++ attribute can be used to set the vendor id seen by the guest, the length must be exactly 12 characters long.
++
++ vendor
++ specifies CPU vendor requested by the guest.
++
++ topology
++ specifies requested topology of virtual CPU provided to the guest. Four possible attributes , ``sockets``, ``dies``,
++ ``cores``, and ``threads``, accept non-zero positive integer values. They refer to the number of CPU sockets per
++ NUMA node, number of dies per socket, number of cores per die, and number of threads per core, respectively.
++
++ features
++ A dictionary conains a set of cpu features to fine-tune features provided by the selected CPU model. Use cpu
++ feature ``name`` as the key and the ``policy`` as the value. ``policy`` Attribute takes ``force``, ``require``,
++ ``optional``, ``disable`` or ``forbid``.
++
++ cache
++ describes the virtual CPU cache. Optional attribute ``level`` takes an integer value which describes cache level
++ ``mode`` attribute supported three possible values: ``emulate``, ``passthrough``, ``disable``
++
++ numa
++ specify the guest numa topology. ``cell`` element specifies a NUMA cell or a NUMA node, ``cpus`` specifies the
++ CPU or range of CPUs that are part of the node, ``memory`` specifies the size of the node memory. All cells
++ should have ``id`` attribute in case referring to some cell is necessary in the code. optional attribute
++ ``memAccess`` control whether the memory is to be mapped as ``shared`` or ``private``, ``discard`` attribute which
++ fine tunes the discard feature for given numa node, possible values are ``True`` or ``False``. ``distances``
++ element define the distance between NUMA cells and ``sibling`` sub-element is used to specify the distance value
++ between sibling NUMA cells.
++
++ vcpupin
++ The optional vcpupin element specifies which of host's physical CPUs the domain vCPU will be pinned to.
++
++ emulatorpin
++ The optional emulatorpin element specifies which of host physical CPUs the "emulator", a subset of a domain not
++ including vCPU or iothreads will be pinned to.
++
++ iothreadpin
++ The optional iothreadpin element specifies which of host physical CPUs the IOThreads will be pinned to.
++
++ shares
++ The optional shares element specifies the proportional weighted share for the domain.
++
++ period
++ The optional period element specifies the enforcement interval (unit: microseconds).
++
++ quota
++ The optional quota element specifies the maximum allowed bandwidth (unit: microseconds).
++
++ global_period
++ The optional global_period element specifies the enforcement CFS scheduler interval (unit: microseconds) for the
++ whole domain in contrast with period which enforces the interval per vCPU.
++
++ global_quota
++ The optional global_quota element specifies the maximum allowed bandwidth (unit: microseconds) within a period
++ for the whole domain.
++
++ emulator_period
++ The optional emulator_period element specifies the enforcement interval (unit: microseconds).
++
++ emulator_quota
++ The optional emulator_quota element specifies the maximum allowed bandwidth (unit: microseconds) for domain's
++ emulator threads (those excluding vCPUs).
++
++ iothread_period
++ The optional iothread_period element specifies the enforcement interval (unit: microseconds) for IOThreads.
++
++ iothread_quota
++ The optional iothread_quota element specifies the maximum allowed bandwidth (unit: microseconds) for IOThreads.
++
++ vcpusched
++ specify the scheduler type for vCPUs.
++ The value is a list of dictionaries with the ``scheduler`` key (values ``batch``, ``idle``, ``fifo``, ``rr``)
++ and the optional ``priority`` and ``vcpus`` keys. The ``priority`` value usually is a positive integer and the
++ ``vcpus`` value is a cpu set like ``1-4,^3,6`` or simply the vcpu id.
++
++ iothreadsched
++ specify the scheduler type for IO threads.
++ The value is a list of dictionaries with the ``scheduler`` key (values ``batch``, ``idle``, ``fifo``, ``rr``)
++ and the optional ``priority`` and ``vcpus`` keys. The ``priority`` value usually is a positive integer and the
++ ``vcpus`` value is a cpu set like ``1-4,^3,6`` or simply the vcpu id.
++
++ emulatorsched
++ specify the scheduler type (values batch, idle, fifo, rr) for particular the emulator.
++ The value is a dictionary with the ``scheduler`` key (values ``batch``, ``idle``, ``fifo``, ``rr``)
++ and the optional ``priority`` and ``vcpus`` keys. The ``priority`` value usually is a positive integer.
++
++ cachetune
++ Optional cachetune element can control allocations for CPU caches using the resctrl on the host.
++
++ monitor
++ The optional element monitor creates the cache monitor(s) for current cache allocation.
++
++ memorytune
++ Optional memorytune element can control allocations for memory bandwidth using the resctrl on the host.
++
++ iothreads
++ Number of threads for supported disk devices to perform I/O requests. iothread id will be numbered from 1 to
++ the provided number (Default: None).
++
+ .. _init-boot-def:
+
+ .. rubric:: Boot parameters definition
+@@ -1932,6 +2339,33 @@ def init(
+ min_guarantee
+ the guaranteed minimum memory allocation for the guest
+
++ hugepages
++ memory allocated using ``hugepages`` instead of the normal native page size. It takes a list of
++ dictionaries with ``nodeset`` and ``size`` keys.
++ For example ``"hugepages": [{"nodeset": "1-4,^3", "size": "2m"}, {"nodeset": "3", "size": "1g"}]``.
++
++ nosharepages
++ boolean value to instruct hypervisor to disable shared pages (memory merge, KSM) for this domain
++
++ locked
++ boolean value that allows memory pages belonging to the domain will be locked in host's memory and the host will
++ not be allowed to swap them out, which might be required for some workloads such as real-time.
++
++ source
++ possible values are ``file`` which utilizes file memorybacking, ``anonymous`` by default and ``memfd`` backing.
++ (QEMU/KVM only)
++
++ access
++ specify if the memory is to be ``shared`` or ``private``. This can be overridden per numa node by memAccess.
++
++ allocation
++ specify when to allocate the memory by supplying either ``immediate`` or ``ondemand``.
++
++ discard
++ boolean value to ensure the memory content is discarded just before guest shuts down (or when DIMM module is
++ unplugged). Please note that this is just an optimization and is not guaranteed to work in all cases
++ (e.g. when hypervisor crashes). (QEMU/KVM only)
++
+ .. _init-nic-def:
+
+ .. rubric:: Network Interfaces Definitions
+@@ -2051,6 +2485,10 @@ def init(
+ hostname_property: virt:hostname
+ sparse_volume: True
+
++ iothreads
++ When ``True`` dedicated threads will be used for the I/O of the disk.
++ (Default: ``False``)
++
+ .. _init-graphics-def:
+
+ .. rubric:: Graphics Definition
+@@ -2077,6 +2515,42 @@ def init(
+ By default, not setting the ``listen`` part of the dictionary will default to
+ listen on all addresses.
+
++ .. _init-chardevs-def:
++
++ .. rubric:: Serials and Consoles Definitions
++
++ Serial dictionaries can contain the following properties:
++
++ type
++ Type of the serial connection, like ``'tcp'``, ``'pty'``, ``'file'``, ``'udp'``, ``'dev'``,
++ ``'pipe'``, ``'unix'``.
++
++ path
++ Path to the source device. Can be a log file, a host character device to pass through,
++ a unix socket, a named pipe path.
++
++ host
++ The serial UDP or TCP host name.
++ (Default: 23023)
++
++ port
++ The serial UDP or TCP port number.
++ (Default: 23023)
++
++ protocol
++ Name of the TCP connection protocol.
++ (Default: telnet)
++
++ tls
++ Boolean value indicating whether to use hypervisor TLS certificates environment for TCP devices.
++
++ target_port
++ The guest device port number starting from 0
++
++ target_type
++ The guest device type. Common values are ``serial``, ``virtio`` or ``usb-serial``, but more are documented in
++ `the libvirt documentation `_.
++
+ .. rubric:: CLI Example
+
+ .. code-block:: bash
+@@ -2226,6 +2700,11 @@ def init(
+ graphics,
+ boot,
+ boot_dev,
++ numatune,
++ hypervisor_features,
++ clock,
++ serials,
++ consoles,
+ stop_on_reboot,
+ **kwargs
+ )
+@@ -2249,19 +2728,15 @@ def _disks_equal(disk1, disk2):
+ """
+ target1 = disk1.find("target")
+ target2 = disk2.find("target")
+- source1 = (
+- disk1.find("source")
+- if disk1.find("source") is not None
+- else ElementTree.Element("source")
+- )
+- source2 = (
+- disk2.find("source")
+- if disk2.find("source") is not None
+- else ElementTree.Element("source")
+- )
+
+- source1_dict = xmlutil.to_dict(source1, True)
+- source2_dict = xmlutil.to_dict(source2, True)
++ disk1_dict = xmlutil.to_dict(disk1, True)
++ disk2_dict = xmlutil.to_dict(disk2, True)
++
++ source1_dict = disk1_dict.get("source", {})
++ source2_dict = disk2_dict.get("source", {})
++
++ io1 = disk1_dict.get("driver", {}).get("io", "native")
++ io2 = disk2_dict.get("driver", {}).get("io", "native")
+
+ # Remove the index added by libvirt in the source for backing chain
+ if source1_dict:
+@@ -2276,6 +2751,7 @@ def _disks_equal(disk1, disk2):
+ and target1.get("bus") == target2.get("bus")
+ and disk1.get("device", "disk") == disk2.get("device", "disk")
+ and target1.get("dev") == target2.get("dev")
++ and io1 == io2
+ )
+
+
+@@ -2443,6 +2919,101 @@ def _diff_graphics_lists(old, new):
+ return _diff_lists(old, new, _graphics_equal)
+
+
++def _expand_cpuset(cpuset):
++ """
++ Expand the libvirt cpuset and nodeset values into a list of cpu/node IDs
++ """
++ if cpuset is None:
++ return None
++
++ if isinstance(cpuset, int):
++ return str(cpuset)
++
++ result = set()
++ toremove = set()
++ for part in cpuset.split(","):
++ m = re.match("([0-9]+)-([0-9]+)", part)
++ if m:
++ result |= set(range(int(m.group(1)), int(m.group(2)) + 1))
++ elif part.startswith("^"):
++ toremove.add(int(part[1:]))
++ else:
++ result.add(int(part))
++ cpus = list(result - toremove)
++ cpus.sort()
++ cpus = [str(cpu) for cpu in cpus]
++ return ",".join(cpus)
++
++
++def _normalize_cpusets(desc, data):
++ """
++ Expand the cpusets that can't be expanded by the change_xml() function,
++ namely the ones that are used as keys and in the middle of the XPath expressions.
++ """
++ # Normalize the cpusets keys in the XML
++ xpaths = ["cputune/cachetune", "cputune/cachetune/monitor", "cputune/memorytune"]
++ for xpath in xpaths:
++ nodes = desc.findall(xpath)
++ for node in nodes:
++ node.set("vcpus", _expand_cpuset(node.get("vcpus")))
++
++ # data paths to change:
++ # - cpu:tuning:cachetune:{id}:monitor:{sid}
++ # - cpu:tuning:memorytune:{id}
++ if not isinstance(data.get("cpu"), dict):
++ return
++ tuning = data["cpu"].get("tuning", {})
++ for child in ["cachetune", "memorytune"]:
++ if tuning.get(child):
++ new_item = dict()
++ for cpuset, value in tuning[child].items():
++ if child == "cachetune" and value.get("monitor"):
++ value["monitor"] = {
++ _expand_cpuset(monitor_cpus): monitor
++ for monitor_cpus, monitor in value["monitor"].items()
++ }
++ new_item[_expand_cpuset(cpuset)] = value
++ tuning[child] = new_item
++
++
++def _serial_or_concole_equal(old, new):
++ def _filter_serial_or_concole(item):
++ """
++ Filter out elements to ignore when comparing items
++ """
++ return {
++ "type": item.attrib["type"],
++ "port": item.find("source").attrib["service"]
++ if item.find("source") is not None
++ else None,
++ "protocol": item.find("protocol").attrib["type"]
++ if item.find("protocol") is not None
++ else None,
++ }
++
++ return _filter_serial_or_concole(old) == _filter_serial_or_concole(new)
++
++
++def _diff_serial_list(old, new):
++ """
++ Compare serial definitions to extract the changes
++
++ :param old: list of ElementTree nodes representing the old serials
++ :param new: list of ElementTree nodes representing the new serials
++ """
++ return _diff_lists(old, new, _serial_or_concole_equal)
++
++
++def _diff_console_list(old, new):
++ """
++ Compare console definitions to extract the changes
++
++ :param old: list of ElementTree nodes representing the old consoles
++ :param new: list of ElementTree nodes representing the new consoles
++ """
++ return _diff_lists(old, new, _serial_or_concole_equal)
++
++
+ def update(
+ name,
+ cpu=0,
+@@ -2454,8 +3025,13 @@ def update(
+ graphics=None,
+ live=True,
+ boot=None,
++ numatune=None,
+ test=False,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ **kwargs
+ ):
+@@ -2463,13 +3039,20 @@ def update(
+ Update the definition of an existing domain.
+
+ :param name: Name of the domain to update
+- :param cpu: Number of virtual CPUs to assign to the virtual machine
+- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to
++ :param cpu:
++ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure
++ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is
++ documented in :ref:`init-cpu-def`.
++
++ To update any cpu parameters specify the new values to the corresponding tag. To remove any element or attribute,
++ specify ``None`` object. Please note that ``None`` object is mapped to ``null`` in yaml, use ``null`` in sls file
++ instead.
++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to
+ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``,
+- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The
+- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported.
+- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be
+- an integer.
++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``,
++ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure
++ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit
++ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer.
+
+ To remove any parameters, pass a None object, for instance: 'soft_limit': ``None``. Please note that ``None``
+ is mapped to ``null`` in sls file, pass ``null`` in sls file instead.
+@@ -2538,6 +3121,30 @@ def update(
+
+ .. versionadded:: Magnesium
+
++ :param numatune:
++ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA
++ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process
++ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition
++ used in the dictionary can be found at :ref:`init-cpu-def`.
++
++ To update any numatune parameters, specify the new value. To remove any ``numatune`` parameters, pass a None object,
++ for instance: 'numatune': ``None``. Please note that ``None`` is mapped to ``null`` in sls file, pass ``null`` in
++ sls file instead.
++
++ .. versionadded:: Aluminium
++
++ :param serials:
++ Dictionary providing details on the serials connection to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
++
++ :param consoles:
++ Dictionary providing details on the consoles device to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
++
+ :param stop_on_reboot:
+ If set to ``True`` the guest will stop instead of rebooting.
+ This is specially useful when creating a virtual machine with an installation cdrom or
+@@ -2550,6 +3157,69 @@ def update(
+
+ .. versionadded:: sodium
+
++ :param hypervisor_features:
++ Enable or disable hypervisor-specific features on the virtual machine.
++
++ .. versionadded:: Aluminium
++
++ .. code-block:: yaml
++
++ hypervisor_features:
++ kvm-hint-dedicated: True
++
++ :param clock:
++ Configure the guest clock.
++ The value is a dictionary with the following keys:
++
++ adjustment
++ time adjustment in seconds or ``reset``
++
++ utc
++ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``.
++
++ timezone
++ synchronize the guest to the correspding timezone
++
++ timers
++ a dictionary associating the timer name with its configuration.
++ This configuration is a dictionary with the properties ``track``, ``tickpolicy``,
++ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``.
++ See `libvirt time keeping documentation `_ for the possible values.
++
++ .. versionadded:: Aluminium
++
++ Set the clock to local time using an offset in seconds
++ .. code-block:: yaml
++
++ clock:
++ adjustment: 3600
++ utc: False
++
++ Set the clock to a specific time zone:
++
++ .. code-block:: yaml
++
++ clock:
++ timezone: CEST
++
++ Tweak guest timers:
++
++ .. code-block:: yaml
++
++ clock:
++ timers:
++ tsc:
++ frequency: 3504000000
++ mode: native
++ rtc:
++ track: wall
++ tickpolicy: catchup
++ slew: 4636
++ threshold: 123
++ limit: 2342
++ hpet:
++ present: False
++
+ :return:
+
+ Returns a dictionary indicating the status of what has been done. It is structured in
+@@ -2595,12 +3265,11 @@ def update(
+ boot = _handle_remote_boot_params(boot)
+ if boot.get("efi", None) is not None:
+ need_update = _handle_efi_param(boot, desc)
+-
+ new_desc = ElementTree.fromstring(
+ _gen_xml(
+ conn,
+ name,
+- cpu or 0,
++ cpu,
+ mem or 0,
+ all_disks,
+ _get_merged_nics(hypervisor, nic_profile, interfaces),
+@@ -2610,17 +3279,19 @@ def update(
+ graphics,
+ boot,
+ boot_dev,
+- stop_on_reboot,
++ numatune,
++ serial=serials,
++ consoles=consoles,
++ stop_on_reboot=stop_on_reboot,
+ **kwargs
+ )
+ )
+
+- # Update the cpu
+- cpu_node = desc.find("vcpu")
+- if cpu and int(cpu_node.text) != cpu:
+- cpu_node.text = str(cpu)
+- cpu_node.set("current", str(cpu))
+- need_update = True
++ if clock:
++ offset = "utc" if clock.get("utc", True) else "localtime"
++ if "timezone" in clock:
++ offset = "timezone"
++ clock["offset"] = offset
+
+ def _set_loader(node, value):
+ salt.utils.xmlutil.set_node_text(node, value)
+@@ -2631,20 +3302,110 @@ def update(
+ def _set_nvram(node, value):
+ node.set("template", value)
+
+- def _set_with_byte_unit(node, value):
+- node.text = str(value)
+- node.set("unit", "bytes")
++ def _set_with_byte_unit(attr_name=None):
++ def _setter(node, value):
++ if attr_name:
++ node.set(attr_name, str(value))
++ else:
++ node.text = str(value)
++ node.set("unit", "bytes")
++
++ return _setter
+
+ def _get_with_unit(node):
+ unit = node.get("unit", "KiB")
+ # _handle_unit treats bytes as invalid unit for the purpose of consistency
+ unit = unit if unit != "bytes" else "b"
+- value = node.get("memory") or node.text
++ value = node.get("memory") or node.get("size") or node.text
+ return _handle_unit("{}{}".format(value, unit)) if value else None
+
++ def _set_vcpu(node, value):
++ node.text = str(value)
++ node.set("current", str(value))
++
+ old_mem = int(_get_with_unit(desc.find("memory")) / 1024)
++ old_cpu = int(desc.find("./vcpu").text)
++
++ def _almost_equal(current, new):
++ if current is None or new is None:
++ return False
++ return abs(current - new) / current < 1e-03
++
++ def _yesno_attribute(path, xpath, attr_name, ignored=None):
++ return xmlutil.attribute(
++ path, xpath, attr_name, ignored, lambda v: "yes" if v else "no"
++ )
++
++ def _memory_parameter(path, xpath, attr_name=None, ignored=None):
++ entry = {
++ "path": path,
++ "xpath": xpath,
++ "convert": _handle_unit,
++ "get": _get_with_unit,
++ "set": _set_with_byte_unit(attr_name),
++ "equals": _almost_equal,
++ }
++ if attr_name:
++ entry["del"] = salt.utils.xmlutil.del_attribute(attr_name, ignored)
++ return entry
++
++ def _cpuset_parameter(path, xpath, attr_name=None, ignored=None):
++ def _set_cpuset(node, value):
++ if attr_name:
++ node.set(attr_name, value)
++ else:
++ node.text = value
++
++ entry = {
++ "path": path,
++ "xpath": xpath,
++ "convert": _expand_cpuset,
++ "get": lambda n: _expand_cpuset(n.get(attr_name) if attr_name else n.text),
++ "set": _set_cpuset,
++ }
++ if attr_name:
++ entry["del"] = salt.utils.xmlutil.del_attribute(attr_name, ignored)
++ return entry
+
+ # Update the kernel boot parameters
++ data = {k: v for k, v in locals().items() if bool(v)}
++ data["stop_on_reboot"] = stop_on_reboot
++ if boot_dev:
++ data["boot_dev"] = boot_dev.split()
++
++ # Set the missing optional attributes and timers to None in timers to help cleaning up
++ timer_names = [
++ "platform",
++ "hpet",
++ "kvmclock",
++ "pit",
++ "rtc",
++ "tsc",
++ "hypervclock",
++ "armvtimer",
++ ]
++ if data.get("clock", {}).get("timers"):
++ attributes = [
++ "track",
++ "tickpolicy",
++ "frequency",
++ "mode",
++ "present",
++ "slew",
++ "threshold",
++ "limit",
++ ]
++ for timer in data["clock"]["timers"].values():
++ for attribute in attributes:
++ if attribute not in timer:
++ timer[attribute] = None
++
++ for timer_name in timer_names:
++ if timer_name not in data["clock"]["timers"]:
++ data["clock"]["timers"][timer_name] = None
++
++ _normalize_cpusets(desc, data)
++
+ params_mapping = [
+ {
+ "path": "stop_on_reboot",
+@@ -2657,89 +3418,251 @@ def update(
+ {"path": "boot:loader", "xpath": "os/loader", "set": _set_loader},
+ {"path": "boot:nvram", "xpath": "os/nvram", "set": _set_nvram},
+ # Update the memory, note that libvirt outputs all memory sizes in KiB
++ _memory_parameter("mem", "memory"),
++ _memory_parameter("mem", "currentMemory"),
++ _memory_parameter("mem:max", "maxMemory"),
++ _memory_parameter("mem:boot", "memory"),
++ _memory_parameter("mem:current", "currentMemory"),
++ xmlutil.attribute("mem:slots", "maxMemory", "slots", ["unit"]),
++ _memory_parameter("mem:hard_limit", "memtune/hard_limit"),
++ _memory_parameter("mem:soft_limit", "memtune/soft_limit"),
++ _memory_parameter("mem:swap_hard_limit", "memtune/swap_hard_limit"),
++ _memory_parameter("mem:min_guarantee", "memtune/min_guarantee"),
++ xmlutil.attribute("boot_dev:{dev}", "os/boot[$dev]", "dev"),
++ _memory_parameter(
++ "mem:hugepages:{id}:size",
++ "memoryBacking/hugepages/page[$id]",
++ "size",
++ ["unit", "nodeset"],
++ ),
++ _cpuset_parameter(
++ "mem:hugepages:{id}:nodeset", "memoryBacking/hugepages/page[$id]", "nodeset"
++ ),
+ {
+- "path": "mem",
+- "xpath": "memory",
+- "convert": _handle_unit,
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
+- },
+- {
+- "path": "mem",
+- "xpath": "currentMemory",
+- "convert": _handle_unit,
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
+- },
+- {
+- "path": "mem:max",
+- "convert": _handle_unit,
+- "xpath": "maxMemory",
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
++ "path": "mem:nosharepages",
++ "xpath": "memoryBacking/nosharepages",
++ "get": lambda n: n is not None,
++ "set": lambda n, v: None,
+ },
+ {
+- "path": "mem:boot",
+- "convert": _handle_unit,
+- "xpath": "memory",
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
+- },
+- {
+- "path": "mem:current",
+- "convert": _handle_unit,
+- "xpath": "currentMemory",
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
++ "path": "mem:locked",
++ "xpath": "memoryBacking/locked",
++ "get": lambda n: n is not None,
++ "set": lambda n, v: None,
+ },
++ xmlutil.attribute("mem:source", "memoryBacking/source", "type"),
++ xmlutil.attribute("mem:access", "memoryBacking/access", "mode"),
++ xmlutil.attribute("mem:allocation", "memoryBacking/allocation", "mode"),
++ {"path": "mem:discard", "xpath": "memoryBacking/discard"},
+ {
+- "path": "mem:slots",
+- "xpath": "maxMemory",
+- "get": lambda n: n.get("slots"),
+- "set": lambda n, v: n.set("slots", str(v)),
+- "del": salt.utils.xmlutil.del_attribute("slots", ["unit"]),
+- },
+- {
+- "path": "mem:hard_limit",
+- "convert": _handle_unit,
+- "xpath": "memtune/hard_limit",
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
+- },
+- {
+- "path": "mem:soft_limit",
+- "convert": _handle_unit,
+- "xpath": "memtune/soft_limit",
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
+- },
+- {
+- "path": "mem:swap_hard_limit",
+- "convert": _handle_unit,
+- "xpath": "memtune/swap_hard_limit",
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
+- },
+- {
+- "path": "mem:min_guarantee",
+- "convert": _handle_unit,
+- "xpath": "memtune/min_guarantee",
+- "get": _get_with_unit,
+- "set": _set_with_byte_unit,
+- },
+- {
+- "path": "boot_dev:{dev}",
+- "xpath": "os/boot[$dev]",
+- "get": lambda n: n.get("dev"),
+- "set": lambda n, v: n.set("dev", v),
+- "del": salt.utils.xmlutil.del_attribute("dev"),
++ "path": "cpu",
++ "xpath": "vcpu",
++ "get": lambda n: int(n.text),
++ "set": _set_vcpu,
+ },
++ {"path": "cpu:maximum", "xpath": "vcpu", "get": lambda n: int(n.text)},
++ xmlutil.attribute("cpu:placement", "vcpu", "placement"),
++ _cpuset_parameter("cpu:cpuset", "vcpu", "cpuset"),
++ xmlutil.attribute("cpu:current", "vcpu", "current"),
++ xmlutil.attribute("cpu:match", "cpu", "match"),
++ xmlutil.attribute("cpu:mode", "cpu", "mode"),
++ xmlutil.attribute("cpu:check", "cpu", "check"),
++ {"path": "cpu:model:name", "xpath": "cpu/model"},
++ xmlutil.attribute("cpu:model:fallback", "cpu/model", "fallback"),
++ xmlutil.attribute("cpu:model:vendor_id", "cpu/model", "vendor_id"),
++ {"path": "cpu:vendor", "xpath": "cpu/vendor"},
++ xmlutil.attribute("cpu:topology:sockets", "cpu/topology", "sockets"),
++ xmlutil.attribute("cpu:topology:cores", "cpu/topology", "cores"),
++ xmlutil.attribute("cpu:topology:threads", "cpu/topology", "threads"),
++ xmlutil.attribute("cpu:cache:level", "cpu/cache", "level"),
++ xmlutil.attribute("cpu:cache:mode", "cpu/cache", "mode"),
++ xmlutil.attribute(
++ "cpu:features:{id}", "cpu/feature[@name='$id']", "policy", ["name"]
++ ),
++ _yesno_attribute(
++ "cpu:vcpus:{id}:enabled", "vcpus/vcpu[@id='$id']", "enabled", ["id"]
++ ),
++ _yesno_attribute(
++ "cpu:vcpus:{id}:hotpluggable",
++ "vcpus/vcpu[@id='$id']",
++ "hotpluggable",
++ ["id"],
++ ),
++ xmlutil.int_attribute(
++ "cpu:vcpus:{id}:order", "vcpus/vcpu[@id='$id']", "order", ["id"]
++ ),
++ _cpuset_parameter(
++ "cpu:numa:{id}:cpus", "cpu/numa/cell[@id='$id']", "cpus", ["id"]
++ ),
++ _memory_parameter(
++ "cpu:numa:{id}:memory", "cpu/numa/cell[@id='$id']", "memory", ["id"]
++ ),
++ _yesno_attribute(
++ "cpu:numa:{id}:discard", "cpu/numa/cell[@id='$id']", "discard", ["id"]
++ ),
++ xmlutil.attribute(
++ "cpu:numa:{id}:memAccess", "cpu/numa/cell[@id='$id']", "memAccess", ["id"]
++ ),
++ xmlutil.attribute(
++ "cpu:numa:{id}:distances:{sid}",
++ "cpu/numa/cell[@id='$id']/distances/sibling[@id='$sid']",
++ "value",
++ ["id"],
++ ),
++ {"path": "cpu:iothreads", "xpath": "iothreads"},
++ {"path": "cpu:tuning:shares", "xpath": "cputune/shares"},
++ {"path": "cpu:tuning:period", "xpath": "cputune/period"},
++ {"path": "cpu:tuning:quota", "xpath": "cputune/quota"},
++ {"path": "cpu:tuning:global_period", "xpath": "cputune/global_period"},
++ {"path": "cpu:tuning:global_quota", "xpath": "cputune/global_quota"},
++ {"path": "cpu:tuning:emulator_period", "xpath": "cputune/emulator_period"},
++ {"path": "cpu:tuning:emulator_quota", "xpath": "cputune/emulator_quota"},
++ {"path": "cpu:tuning:iothread_period", "xpath": "cputune/iothread_period"},
++ {"path": "cpu:tuning:iothread_quota", "xpath": "cputune/iothread_quota"},
++ _cpuset_parameter(
++ "cpu:tuning:vcpupin:{id}",
++ "cputune/vcpupin[@vcpu='$id']",
++ "cpuset",
++ ["vcpu"],
++ ),
++ _cpuset_parameter("cpu:tuning:emulatorpin", "cputune/emulatorpin", "cpuset"),
++ _cpuset_parameter(
++ "cpu:tuning:iothreadpin:{id}",
++ "cputune/iothreadpin[@iothread='$id']",
++ "cpuset",
++ ["iothread"],
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:vcpusched:{id}:scheduler",
++ "cputune/vcpusched[$id]",
++ "scheduler",
++ ["priority", "vcpus"],
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:vcpusched:{id}:priority", "cputune/vcpusched[$id]", "priority"
++ ),
++ _cpuset_parameter(
++ "cpu:tuning:vcpusched:{id}:vcpus", "cputune/vcpusched[$id]", "vcpus"
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:iothreadsched:{id}:scheduler",
++ "cputune/iothreadsched[$id]",
++ "scheduler",
++ ["priority", "iothreads"],
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:iothreadsched:{id}:priority",
++ "cputune/iothreadsched[$id]",
++ "priority",
++ ),
++ _cpuset_parameter(
++ "cpu:tuning:iothreadsched:{id}:iothreads",
++ "cputune/iothreadsched[$id]",
++ "iothreads",
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:emulatorsched:scheduler",
++ "cputune/emulatorsched",
++ "scheduler",
++ ["priority"],
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:emulatorsched:priority", "cputune/emulatorsched", "priority"
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:cachetune:{id}:monitor:{sid}",
++ "cputune/cachetune[@vcpus='$id']/monitor[@vcpus='$sid']",
++ "level",
++ ["vcpus"],
++ ),
++ xmlutil.attribute(
++ "cpu:tuning:memorytune:{id}:{sid}",
++ "cputune/memorytune[@vcpus='$id']/node[@id='$sid']",
++ "bandwidth",
++ ["id", "vcpus"],
++ ),
++ xmlutil.attribute("clock:offset", "clock", "offset"),
++ xmlutil.attribute("clock:adjustment", "clock", "adjustment", convert=str),
++ xmlutil.attribute("clock:timezone", "clock", "timezone"),
+ ]
+
+- data = {k: v for k, v in locals().items() if bool(v)}
+- data["stop_on_reboot"] = stop_on_reboot
+- if boot_dev:
+- data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
++ for timer in timer_names:
++ params_mapping += [
++ xmlutil.attribute(
++ "clock:timers:{}:track".format(timer),
++ "clock/timer[@name='{}']".format(timer),
++ "track",
++ ["name"],
++ ),
++ xmlutil.attribute(
++ "clock:timers:{}:tickpolicy".format(timer),
++ "clock/timer[@name='{}']".format(timer),
++ "tickpolicy",
++ ["name"],
++ ),
++ xmlutil.int_attribute(
++ "clock:timers:{}:frequency".format(timer),
++ "clock/timer[@name='{}']".format(timer),
++ "frequency",
++ ["name"],
++ ),
++ xmlutil.attribute(
++ "clock:timers:{}:mode".format(timer),
++ "clock/timer[@name='{}']".format(timer),
++ "mode",
++ ["name"],
++ ),
++ _yesno_attribute(
++ "clock:timers:{}:present".format(timer),
++ "clock/timer[@name='{}']".format(timer),
++ "present",
++ ["name"],
++ ),
++ ]
++ for attr in ["slew", "threshold", "limit"]:
++ params_mapping.append(
++ xmlutil.int_attribute(
++ "clock:timers:{}:{}".format(timer, attr),
++ "clock/timer[@name='{}']/catchup".format(timer),
++ attr,
++ )
++ )
++
++ for attr in ["level", "type", "size"]:
++ params_mapping.append(
++ xmlutil.attribute(
++ "cpu:tuning:cachetune:{id}:{sid}:" + attr,
++ "cputune/cachetune[@vcpus='$id']/cache[@id='$sid']",
++ attr,
++ ["id", "unit", "vcpus"],
++ )
++ )
++
++ # update NUMA host policy
++ if hypervisor in ["qemu", "kvm"]:
++ params_mapping += [
++ xmlutil.attribute("numatune:memory:mode", "numatune/memory", "mode"),
++ _cpuset_parameter("numatune:memory:nodeset", "numatune/memory", "nodeset"),
++ xmlutil.attribute(
++ "numatune:memnodes:{id}:mode",
++ "numatune/memnode[@cellid='$id']",
++ "mode",
++ ["cellid"],
++ ),
++ _cpuset_parameter(
++ "numatune:memnodes:{id}:nodeset",
++ "numatune/memnode[@cellid='$id']",
++ "nodeset",
++ ["cellid"],
++ ),
++ xmlutil.attribute(
++ "hypervisor_features:kvm-hint-dedicated",
++ "features/kvm/hint-dedicated",
++ "state",
++ convert=lambda v: "on" if v else "off",
++ ),
++ ]
++
+ need_update = (
+ salt.utils.xmlutil.change_xml(desc, data, params_mapping) or need_update
+ )
+@@ -2750,6 +3673,8 @@ def update(
+ "disk": ["disks", "disk_profile"],
+ "interface": ["interfaces", "nic_profile"],
+ "graphics": ["graphics"],
++ "serial": ["serial"],
++ "console": ["console"],
+ }
+ changes = {}
+ for dev_type in parameters:
+@@ -2787,7 +3712,6 @@ def update(
+ _qemu_image_create(all_disks[idx])
+ elif item in changes["disk"]["new"] and not source_file:
+ _disk_volume_create(conn, all_disks[idx])
+-
+ if not test:
+ xml_desc = ElementTree.tostring(desc)
+ log.debug("Update virtual machine definition: %s", xml_desc)
+@@ -2803,14 +3727,18 @@ def update(
+ commands = []
+ removable_changes = []
+ if domain.isActive() and live:
+- if cpu:
+- commands.append(
+- {
+- "device": "cpu",
+- "cmd": "setVcpusFlags",
+- "args": [cpu, libvirt.VIR_DOMAIN_AFFECT_LIVE],
+- }
+- )
++ if cpu and (
++ isinstance(cpu, int) or isinstance(cpu, dict) and cpu.get("maximum")
++ ):
++ new_cpu = cpu.get("maximum") if isinstance(cpu, dict) else cpu
++ if old_cpu != new_cpu and new_cpu is not None:
++ commands.append(
++ {
++ "device": "cpu",
++ "cmd": "setVcpusFlags",
++ "args": [new_cpu, libvirt.VIR_DOMAIN_AFFECT_LIVE],
++ }
++ )
+ if mem:
+ if isinstance(mem, dict):
+ # setMemoryFlags takes memory amount in KiB
+@@ -2822,7 +3750,7 @@ def update(
+ elif isinstance(mem, int):
+ new_mem = int(mem * 1024)
+
+- if old_mem != new_mem and new_mem is not None:
++ if not _almost_equal(old_mem, new_mem) and new_mem is not None:
+ commands.append(
+ {
+ "device": "mem",
+@@ -4402,7 +5330,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
+ directories.add(os.path.dirname(disks[disk]["file"]))
+ else:
+ # We may have a volume to delete here
+- matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"],)
++ matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"])
+ if matcher:
+ pool_name = matcher.group("pool")
+ pool = None
+diff --git a/salt/states/virt.py b/salt/states/virt.py
+index 20ea1c25f1..784cdca73c 100644
+--- a/salt/states/virt.py
++++ b/salt/states/virt.py
+@@ -287,8 +287,13 @@ def defined(
+ os_type=None,
+ arch=None,
+ boot=None,
++ numatune=None,
+ update=True,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ live=True,
+ ):
+@@ -298,26 +303,151 @@ def defined(
+ .. versionadded:: sodium
+
+ :param name: name of the virtual machine to run
+- :param cpu: number of CPUs for the virtual machine to create
+- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to
++ :param cpu:
++ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure
++ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is
++ documented in :ref:`init-cpu-def`.
++
++ .. code-block:: yaml
++
++ cpu:
++ placement: static
++ cpuset: 0-11
++ current: 5
++ maximum: 12
++ vcpus:
++ 0:
++ enabled: 'yes'
++ hotpluggable: 'no'
++ order: 1
++ 1:
++ enabled: 'no'
++ hotpluggable: 'yes'
++ match: minimum
++ mode: custom
++ check: full
++ vendor: Intel
++ model:
++ name: core2duo
++ fallback: allow
++ vendor_id: GenuineIntel
++ topology:
++ sockets: 1
++ cores: 12
++ threads: 1
++ cache:
++ level: 3
++ mode: emulate
++ feature:
++ policy: optional
++ name: lahf_lm
++ numa:
++ 0:
++ cpus: 0-3
++ memory: 1g
++ discard: 'yes'
++ distances:
++ 0: 10 # sibling id : value
++ 1: 21
++ 2: 31
++ 3: 41
++ 1:
++ cpus: 4-6
++ memory: 1g
++ memAccess: shared
++ distances:
++ 0: 21
++ 1: 10
++ 2: 21
++ 3: 31
++ tuning:
++ vcpupin:
++ 0: 1-4,^2 # vcpuid : cpuset
++ 1: 0,1
++ 2: 2,3
++ 3: 0,4
++ emulatorpin: 1-3
++ iothreadpin:
++ 1: 5,6 # iothread id: cpuset
++ 2: 7,8
++ shares: 2048
++ period: 1000000
++ quota: -1
++ global_period: 1000000
++ global_quota: -1
++ emulator_period: 1000000
++ emulator_quota: -1
++ iothread_period: 1000000
++ iothread_quota: -1
++ vcpusched:
++ - scheduler: fifo
++ priority: 1
++ - scheduler: fifo
++ priority: 2
++ vcpus: 1-3
++ - scheduler: rr
++ priority: 3
++ vcpus: 4
++ iothreadsched:
++ - scheduler: batch
++ iothreads: 2
++ emulatorsched:
++ scheduler: idle
++ cachetune:
++ 0-3: # vcpus set
++ 0: # cache id
++ level: 3
++ type: both
++ size: 4
++ 1:
++ level: 3
++ type: both
++ size: 6
++ monitor:
++ 1: 3
++ 0-3: 3
++ 4-5:
++ monitor:
++ 4: 3 # vcpus: level
++ 5: 3
++ memorytune:
++ 0-3: # vcpus set
++ 0: 60 # node id: bandwidth
++ 4-5:
++ 0: 60
++ iothreads: 4
++
++ .. versionadded:: Aluminium
++
++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to
+ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``,
+- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The
+- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported.
+- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be
+- an integer.
++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``,
++ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure
++ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit
++ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer.
+
+- .. code-block:: python
++ .. code-block:: yaml
+
+- {
+- 'boot': 1g,
+- 'current': 1g,
+- 'max': 1g,
+- 'slots': 10,
+- 'hard_limit': '1024'
+- 'soft_limit': '512m'
+- 'swap_hard_limit': '1g'
+- 'min_guarantee': '512mib'
+- }
++ boot: 1g
++ current: 1g
++ max: 1g
++ slots: 10
++ hard_limit: 1024
++ soft_limit: 512m
++ swap_hard_limit: 1g
++ min_guarantee: 512mib
++ hugepages:
++ - size: 2m
++ - nodeset: 0-2
++ size: 1g
++ - nodeset: 3
++ size: 2g
++ nosharepages: True
++ locked: True
++ source: file
++ access: shared
++ allocation: immediate
++ discard: True
+
+ .. versionchanged:: Magnesium
+
+@@ -380,6 +510,77 @@ def defined(
+
+ .. versionadded:: Magnesium
+
++ :param numatune:
++ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA
++ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process
++ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition
++ used in the dictionary can be found at :ref:`init-cpu-def`.
++
++ .. versionadded:: Aluminium
++
++ .. code-block:: python
++
++ {
++ 'memory': {'mode': 'strict', 'nodeset': '0-11'},
++ 'memnodes': {0: {'mode': 'strict', 'nodeset': 1}, 1: {'mode': 'preferred', 'nodeset': 2}}
++ }
++
++ :param hypervisor_features:
++ Enable or disable hypervisor-specific features on the virtual machine.
++
++ .. versionadded:: Aluminium
++
++ .. code-block:: yaml
++
++ hypervisor_features:
++ kvm-hint-dedicated: True
++
++ :param clock:
++ Configure the guest clock.
++ The value is a dictionary with the following keys:
++
++ adjustment
++ time adjustment in seconds or ``reset``
++
++ utc
++ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``.
++
++ timezone
++ synchronize the guest to the correspding timezone
++
++ timers
++ a dictionary associating the timer name with its configuration.
++ This configuration is a dictionary with the properties ``track``, ``tickpolicy``,
++ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``.
++ See `libvirt time keeping documentation `_ for the possible values.
++
++ .. versionadded:: Aluminium
++
++ Set the clock to local time using an offset in seconds
++ .. code-block:: yaml
++
++ clock:
++ adjustment: 3600
++ utc: False
++
++ Set the clock to a specific time zone:
++
++ .. code-block:: yaml
++
++ clock:
++ timezone: CEST
++
++ :param serials:
++ Dictionary providing details on the serials connection to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
++ :param consoles:
++ Dictionary providing details on the consoles device to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
++
+ :param stop_on_reboot:
+ If set to ``True`` the guest will stop instead of rebooting.
+ This is specially useful when creating a virtual machine with an installation cdrom or
+@@ -456,8 +657,13 @@ def defined(
+ username=username,
+ password=password,
+ boot=boot,
++ numatune=numatune,
++ serials=serials,
++ consoles=consoles,
+ test=__opts__["test"],
+ boot_dev=boot_dev,
++ hypervisor_features=hypervisor_features,
++ clock=clock,
+ stop_on_reboot=stop_on_reboot,
+ )
+ ret["changes"][name] = status
+@@ -492,8 +698,13 @@ def defined(
+ username=username,
+ password=password,
+ boot=boot,
++ numatune=numatune,
++ serials=serials,
++ consoles=consoles,
+ start=False,
+ boot_dev=boot_dev,
++ hypervisor_features=hypervisor_features,
++ clock=clock,
+ stop_on_reboot=stop_on_reboot,
+ )
+ ret["changes"][name] = {"definition": True}
+@@ -528,6 +739,11 @@ def running(
+ arch=None,
+ boot=None,
+ boot_dev=None,
++ numatune=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ ):
+ """
+@@ -536,13 +752,20 @@ def running(
+ .. versionadded:: 2016.3.0
+
+ :param name: name of the virtual machine to run
+- :param cpu: number of CPUs for the virtual machine to create
+- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to
++ :param cpu:
++ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure
++ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is
++ documented in :ref:`init-cpu-def`.
++
++ To update any cpu parameters specify the new values to the corresponding tag. To remove any element or attribute,
++ specify ``None`` object. Please note that ``None`` object is mapped to ``null`` in yaml, use ``null`` in sls file
++ instead.
++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to
+ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``,
+- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The
+- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported.
+- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be
+- an integer.
++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``,
++ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure
++ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit
++ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer.
+
+ To remove any parameters, pass a None object, for instance: 'soft_limit': ``None``. Please note that ``None``
+ is mapped to ``null`` in sls file, pass ``null`` in sls file instead.
+@@ -638,6 +861,16 @@ def running(
+ pass a None object, for instance: 'kernel': ``None``.
+
+ .. versionadded:: 3000
++ :param serials:
++ Dictionary providing details on the serials connection to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
++ :param consoles:
++ Dictionary providing details on the consoles device to create. (Default: ``None``)
++ See :ref:`init-chardevs-def` for more details on the possible values.
++
++ .. versionadded:: Aluminium
+
+ :param boot:
+ Specifies kernel for the virtual machine, as well as boot parameters
+@@ -664,6 +897,18 @@ def running(
+
+ .. versionadded:: Magnesium
+
++ :param numatune:
++ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA
++ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process
++ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition
++ used in the dictionary can be found at :ref:`init-cpu-def`.
++
++ To update any numatune parameters, specify the new value. To remove any ``numatune`` parameters, pass a None object,
++ for instance: 'numatune': ``None``. Please note that ``None`` is mapped to ``null`` in sls file, pass ``null`` in
++ sls file instead.
++
++ .. versionadded:: Aluminium
++
+ :param stop_on_reboot:
+ If set to ``True`` the guest will stop instead of rebooting.
+ This is specially useful when creating a virtual machine with an installation cdrom or
+@@ -672,6 +917,51 @@ def running(
+
+ .. versionadded:: Aluminium
+
++ :param hypervisor_features:
++ Enable or disable hypervisor-specific features on the virtual machine.
++
++ .. versionadded:: Aluminium
++
++ .. code-block:: yaml
++
++ hypervisor_features:
++ kvm-hint-dedicated: True
++
++ :param clock:
++ Configure the guest clock.
++ The value is a dictionary with the following keys:
++
++ adjustment
++ time adjustment in seconds or ``reset``
++
++ utc
++ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``.
++
++ timezone
++ synchronize the guest to the correspding timezone
++
++ timers
++ a dictionary associating the timer name with its configuration.
++ This configuration is a dictionary with the properties ``track``, ``tickpolicy``,
++ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``.
++ See `libvirt time keeping documentation `_ for the possible values.
++
++ .. versionadded:: Aluminium
++
++ Set the clock to local time using an offset in seconds
++ .. code-block:: yaml
++
++ clock:
++ adjustment: 3600
++ utc: False
++
++ Set the clock to a specific time zone:
++
++ .. code-block:: yaml
++
++ clock:
++ timezone: CEST
++
+ .. rubric:: Example States
+
+ Make sure an already-defined virtual machine called ``domain_name`` is running:
+@@ -740,10 +1030,15 @@ def running(
+ boot=boot,
+ update=update,
+ boot_dev=boot_dev,
++ numatune=numatune,
++ hypervisor_features=hypervisor_features,
++ clock=clock,
+ stop_on_reboot=stop_on_reboot,
+ connection=connection,
+ username=username,
+ password=password,
++ serials=serials,
++ consoles=consoles,
+ )
+
+ result = True if not __opts__["test"] else None
+diff --git a/salt/templates/virt/libvirt_chardevs.jinja b/salt/templates/virt/libvirt_chardevs.jinja
+new file mode 100644
+index 0000000000..1795277180
+--- /dev/null
++++ b/salt/templates/virt/libvirt_chardevs.jinja
+@@ -0,0 +1,16 @@
++{% macro chardev(dev) -%}
++ {% if dev.type == "unix" -%}
++
++ {% elif dev.type in ["udp", "tcp"] -%}
++
++ {% elif dev.type in ["pipe", "dev", "pty", "file"] and dev.path -%}
++
++ {%- endif %}
++ {% if dev.type == "tcp" -%}
++
++ {%- endif %}
++ {% if "target_port" in dev or "target_type" in dev -%}
++
++ {%- endif %}
++{%- endmacro %}
+diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja
+index fb4c9f40d0..6ac3e867b9 100644
+--- a/salt/templates/virt/libvirt_domain.jinja
++++ b/salt/templates/virt/libvirt_domain.jinja
+@@ -1,32 +1,220 @@
+ {%- import 'libvirt_disks.jinja' as libvirt_disks -%}
++{%- macro opt_attribute(obj, name, conv=none) %}
++{%- if obj.get(name) is not none %} {{ name }}='{{ obj[name] if conv is none else conv(obj[name]) }}'{% endif -%}
++{%- endmacro %}
++{%- import 'libvirt_chardevs.jinja' as libvirt_chardevs -%}
+
+ {{ name }}
+- {{ cpu }}
++ {%- if cpu %}
++ {{ cpu.get('maximum', '') }}
++ {%- endif %}
++ {%- if cpu.get('vcpus') %}
++
++ {%- for vcpu_id in cpu["vcpus"].keys() %}
++
++ {%- endfor %}
++
++ {%- endif %}
++ {%- if cpu %}
++
++ {%- if cpu.model %}
++ {{ cpu.model.get('name', '') }}
++ {%- endif %}
++ {%- if cpu.vendor %}
++ {{ cpu.get('vendor', '') }}
++ {%- endif %}
++ {%- if cpu.topology %}
++
++ {%- endif %}
++ {%- if cpu.cache %}
++
++ {%- endif %}
++ {%- if cpu.features %}
++ {%- for k, v in cpu.features.items() %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.numa %}
++
++ {%- for numa_id in cpu.numa.keys() %}
++ {%- if cpu.numa.get(numa_id) %}
++
++ {%- if cpu.numa[numa_id].distances %}
++
++ {%- for sibling_id in cpu.numa[numa_id].distances %}
++
++ {%- endfor %}
++
++ {%- endif %}
++ |
++ {%- endif %}
++ {%- endfor %}
++
++ {%- endif %}
++
++ {%- if cpu.iothreads %}
++ {{ cpu.iothreads }}
++ {%- endif %}
++ {%- endif %}
++ {%- if cpu.tuning %}
++
++ {%- if cpu.tuning.vcpupin %}
++ {%- for vcpu_id, cpuset in cpu.tuning.vcpupin.items() %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.emulatorpin %}
++
++ {%- endif %}
++ {%- if cpu.tuning.iothreadpin %}
++ {%- for thread_id, cpuset in cpu.tuning.iothreadpin.items() %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.shares %}
++ {{ cpu.tuning.shares }}
++ {%- endif %}
++ {%- if cpu.tuning.period %}
++ {{ cpu.tuning.period }}
++ {%- endif %}
++ {%- if cpu.tuning.quota %}
++ {{ cpu.tuning.quota }}
++ {%- endif %}
++ {%- if cpu.tuning.global_period %}
++ {{ cpu.tuning.global_period }}
++ {%- endif %}
++ {%- if cpu.tuning.global_quota %}
++ {{ cpu.tuning.global_quota }}
++ {%- endif %}
++ {%- if cpu.tuning.emulator_period %}
++ {{ cpu.tuning.emulator_period }}
++ {%- endif %}
++ {%- if cpu.tuning.emulator_quota %}
++ {{ cpu.tuning.emulator_quota }}
++ {%- endif %}
++ {%- if cpu.tuning.iothread_period %}
++ {{ cpu.tuning.iothread_period }}
++ {%- endif %}
++ {%- if cpu.tuning.iothread_quota %}
++ {{ cpu.tuning.iothread_quota }}
++ {%- endif %}
++ {%- if cpu.tuning.vcpusched %}
++ {%- for sched in cpu.tuning.vcpusched %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.iothreadsched %}
++ {%- for sched in cpu.tuning.iothreadsched %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.emulatorsched %}
++
++ {%- endif %}
++ {%- if cpu.tuning.cachetune %}
++ {%- for k, v in cpu.tuning.cachetune.items() %}
++
++ {%- for e, atrs in v.items() %}
++ {%- if e is number and atrs %}
++
++ {%- elif e is not number %}
++ {%- for atr, val in atrs.items() %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- endfor %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.memorytune %}
++ {%- for vcpus, nodes in cpu.tuning.memorytune.items() %}
++
++ {%- for id, bandwidth in nodes.items() %}
++
++ {%- endfor %}
++
++ {%- endfor %}
++ {%- endif %}
++
++ {%- endif %}
+ {%- if mem.max %}
+- {{ mem.max }}
++ {{ to_kib(mem.max) }}
+ {%- endif %}
+ {%- if mem.boot %}
+- {{ mem.boot }}
++ {{ to_kib(mem.boot) }}
+ {%- endif %}
+ {%- if mem.current %}
+- {{ mem.current }}
++ {{ to_kib(mem.current) }}
+ {%- endif %}
+ {%- if mem %}
+
+ {%- if 'hard_limit' in mem and mem.hard_limit %}
+- {{ mem.hard_limit }}
++ {{ to_kib(mem.hard_limit) }}
+ {%- endif %}
+ {%- if 'soft_limit' in mem and mem.soft_limit %}
+- {{ mem.soft_limit }}
++ {{ to_kib(mem.soft_limit) }}
+ {%- endif %}
+ {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %}
+- {{ mem.swap_hard_limit }}
++ {{ to_kib(mem.swap_hard_limit) }}
+ {%- endif %}
+ {%- if 'min_guarantee' in mem and mem.min_guarantee %}
+- {{ mem.min_guarantee }}
++ {{ to_kib(mem.min_guarantee) }}
+ {%- endif %}
+
+ {%- endif %}
++ {%- if numatune %}
++
++ {%- if 'memory' in numatune and numatune.memory %}
++
++ {%- endif %}
++ {%- if 'memnodes' in numatune and numatune.memnodes %}
++ {%- for cell_id in numatune['memnodes'] %}
++
++ {%- endfor %}
++ {%- endif %}
++
++ {%- endif %}
++ {%- if mem %}
++
++ {%- if mem.hugepages %}
++
++ {%- for page in mem.hugepages %}
++
++ {%- endfor %}
++
++ {%- if mem.nosharepages %}
++
++ {%- endif %}
++ {%- if mem.locked %}
++
++ {%- endif %}
++ {%- if mem.source %}
++
++ {%- endif %}
++ {%- if mem.access %}
++
++ {%- endif %}
++ {%- if mem.allocation %}
++
++ {%- endif %}
++ {%- if mem.discard %}
++
++ {%- endif %}
++ {%- endif %}
++
++ {%- endif %}
+
+ {{ os_type }}
+ {% if boot %}
+@@ -50,6 +238,18 @@
+
+ {% endfor %}
+
++{%- if clock %}
++
++ {%- for timer_name in clock.timers %}
++ {%- set timer = clock.timers[timer_name] %}
++
++ {%- if "threshold" in timer or "slew" in timer or "limit" in timer %}
++
++ {%- endif %}
++
++ {%- endfor %}
++
++{%- endif %}
+ {{ on_reboot }}
+
+ {% for disk in disks %}
+@@ -69,7 +269,7 @@
+
+ {% endif %}
+ {% if disk.driver -%}
+-
++
+ {% endif %}
+
+ {% endfor %}
+@@ -104,35 +304,39 @@
+ address='{{ graphics.listen.address }}'
+ {% endif %}/>
+
+- {% endif %}
+- {% if serial_type == 'pty' %}
+-
+-
+-
+- {% if console %}
+-
+-
+-
+- {% endif %}
++
++ {% if graphics.type == "spice" -%}
++
++
++
++ {%- endif %}
+ {% endif %}
+
+- {% if serial_type == 'tcp' %}
+-
+-
+-
+-
++ {%- for serial in serials %}
++
++ {{ libvirt_chardevs.chardev(serial) }}
+
+- {% if console %}
+-
+-
+-
+-
+-
+- {% endif %}
+- {% endif %}
++ {%- endfor %}
+
++ {%- for console in consoles %}
++
++ {{ libvirt_chardevs.chardev(console) }}
++
++ {% endfor %}
++{%- if hypervisor in ["qemu", "kvm"] %}
++
++
++
++{%- endif %}
+
+
+
++
++
++{%- if hypervisor_features.get("kvm-hint-dedicated") %}
++
++
++
++{%- endif %}
+
+
+diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py
+index d25f5c8da5..5c187ca7e5 100644
+--- a/salt/utils/xmlutil.py
++++ b/salt/utils/xmlutil.py
+@@ -157,18 +157,24 @@ def clean_node(parent_map, node, ignored=None):
+ :param parent_map: dictionary mapping each node to its parent
+ :param node: the node to clean
+ :param ignored: a list of ignored attributes.
++ :return: True if anything has been removed, False otherwise
+ """
+ has_text = node.text is not None and node.text.strip()
+ parent = parent_map.get(node)
++ removed = False
+ if (
+ len(set(node.attrib.keys()) - set(ignored or [])) == 0
+ and not list(node)
+ and not has_text
++ and parent
+ ):
+ parent.remove(node)
++ removed = True
+ # Clean parent nodes if needed
+ if parent is not None:
+- clean_node(parent_map, parent, ignored)
++ parent_cleaned = clean_node(parent_map, parent, ignored)
++ removed = removed or parent_cleaned
++ return removed
+
+
+ def del_text(parent_map, node):
+@@ -180,6 +186,7 @@ def del_text(parent_map, node):
+ parent = parent_map[node]
+ parent.remove(node)
+ clean_node(parent, node)
++ return True
+
+
+ def del_attribute(attribute, ignored=None):
+@@ -197,13 +204,54 @@ def del_attribute(attribute, ignored=None):
+
+ def _do_delete(parent_map, node):
+ if attribute not in node.keys():
+- return
++ return False
+ node.attrib.pop(attribute)
+ clean_node(parent_map, node, ignored)
++ return True
+
+ return _do_delete
+
+
++def attribute(path, xpath, attr_name, ignored=None, convert=None):
++ """
++ Helper function creating a change_xml mapping entry for a text XML attribute.
++
++ :param path: the path to the value in the data
++ :param xpath: the xpath to the node holding the attribute
++ :param attr_name: the attribute name
++ :param ignored: the list of attributes to ignore when cleaning up the node
++ :param convert: a function used to convert the value
++ """
++ entry = {
++ "path": path,
++ "xpath": xpath,
++ "get": lambda n: n.get(attr_name),
++ "set": lambda n, v: n.set(attr_name, str(v)),
++ "del": salt.utils.xmlutil.del_attribute(attr_name, ignored),
++ }
++ if convert:
++ entry["convert"] = convert
++ return entry
++
++
++def int_attribute(path, xpath, attr_name, ignored=None):
++ """
++ Helper function creating a change_xml mapping entry for a text XML integer attribute.
++
++ :param path: the path to the value in the data
++ :param xpath: the xpath to the node holding the attribute
++ :param attr_name: the attribute name
++ :param ignored: the list of attributes to ignore when cleaning up the node
++ """
++ return {
++ "path": path,
++ "xpath": xpath,
++ "get": lambda n: int(n.get(attr_name)) if n.get(attr_name) else None,
++ "set": lambda n, v: n.set(attr_name, str(v)),
++ "del": salt.utils.xmlutil.del_attribute(attr_name, ignored),
++ }
++
++
+ def change_xml(doc, data, mapping):
+ """
+ Change an XML ElementTree document according.
+@@ -237,6 +285,7 @@ def change_xml(doc, data, mapping):
+ del
+ function deleting the value in the XML.
+ Takes two parameters for the parent node and the node matched by the XPath.
++ Returns True if anything was removed, False otherwise.
+ Default is to remove the text value.
+ More cleanup may be performed, see the :py:func:`clean_node` function for details.
+
+@@ -281,8 +330,17 @@ def change_xml(doc, data, mapping):
+ continue
+
+ if new_value is not None:
++ # We need to increment ids from arrays since xpath starts at 1
++ converters = {
++ p: (lambda n: n + 1)
++ if "[${}]".format(p) in xpath
++ else (lambda n: n)
++ for p in placeholders
++ }
+ ctx = {
+- placeholder: value_item.get(placeholder, "")
++ placeholder: converters[placeholder](
++ value_item.get(placeholder, "")
++ )
+ for placeholder in placeholders
+ }
+ node_xpath = string.Template(xpath).substitute(ctx)
+@@ -299,7 +357,9 @@ def change_xml(doc, data, mapping):
+ if convert_fn:
+ new_value = convert_fn(new_value)
+
+- if str(current_value) != str(new_value):
++ # Allow custom comparison. Can be useful for almost equal numeric values
++ compare_fn = param.get("equals", lambda o, n: str(o) == str(n))
++ if not compare_fn(current_value, new_value):
+ set_fn(node, new_value)
+ need_update = True
+ else:
+@@ -307,17 +367,16 @@ def change_xml(doc, data, mapping):
+ del_fn = param.get("del", del_text)
+ parent_map = {c: p for p in doc.iter() for c in p}
+ for node in nodes:
+- del_fn(parent_map, node)
+- need_update = True
++ deleted = del_fn(parent_map, node)
++ need_update = need_update or deleted
+
+ # Clean the left over XML elements if there were placeholders
+- if placeholders and values[0].get("value") != []:
++ if placeholders and [v for v in values if v.get("value") != []]:
+ all_nodes = set(doc.findall(all_nodes_xpath))
+ to_remove = all_nodes - kept_nodes
+ del_fn = param.get("del", del_text)
+ parent_map = {c: p for p in doc.iter() for c in p}
+ for node in to_remove:
+- del_fn(parent_map, node)
+- need_update = True
+-
++ deleted = del_fn(parent_map, node)
++ need_update = need_update or deleted
+ return need_update
+diff --git a/tests/pytests/unit/modules/virt/conftest.py b/tests/pytests/unit/modules/virt/conftest.py
+index 1c32ae12eb..ec56bdff24 100644
+--- a/tests/pytests/unit/modules/virt/conftest.py
++++ b/tests/pytests/unit/modules/virt/conftest.py
+@@ -189,3 +189,129 @@ def make_mock_storage_pool():
+ return mocked_pool
+
+ return _make_mock_storage_pool
++
++
++@pytest.fixture
++def make_capabilities():
++ def _make_capabilities():
++ mocked_conn = virt.libvirt.openAuth.return_value
++ mocked_conn.getCapabilities.return_value = """
++
++
++ 44454c4c-3400-105a-8033-b3c04f4b344a
++
++ x86_64
++ Nehalem
++ Intel
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ tcp
++ rdma
++
++
++
++
++
++ 12367120
++ 3091780
++ 0
++
++
++
++
++
++
++
++
++
++
++
++
++
++ |
++
++
++
++
++
++
++ apparmor
++ 0
++
++
++ dac
++ 0
++ +487:+486
++ +487:+486
++
++
++
++
++ hvm
++
++ 32
++ /usr/bin/qemu-system-i386
++ pc-i440fx-2.6
++ pc
++ pc-0.12
++
++
++ /usr/bin/qemu-kvm
++ pc-i440fx-2.6
++ pc
++ pc-0.12
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ hvm
++
++ 64
++ /usr/bin/qemu-system-x86_64
++ pc-i440fx-2.6
++ pc
++ pc-0.12
++
++
++ /usr/bin/qemu-kvm
++ pc-i440fx-2.6
++ pc
++ pc-0.12
++
++
++
++
++
++
++
++
++
++
++
++"""
++
++ return _make_capabilities
+diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py
+index 5f9b45ec9a..347c3bcd88 100644
+--- a/tests/pytests/unit/modules/virt/test_domain.py
++++ b/tests/pytests/unit/modules/virt/test_domain.py
+@@ -254,3 +254,338 @@ def test_get_disk_convert_volumes(make_mock_vm, make_mock_storage_pool):
+ "virtual size": 214748364800,
+ },
+ } == virt.get_disks("srv01")
++
++
++def test_update_approx_mem(make_mock_vm):
++ """
++ test virt.update with memory parameter unchanged thought not exactly equals to the current value.
++ This may happen since libvirt sometimes rounds the memory value.
++ """
++ xml_def = """
++
++ my_vm
++ 3177680
++ 3177680
++ 1
++
++ hvm
++
++ restart
++
++ """
++ domain_mock = make_mock_vm(xml_def)
++
++ ret = virt.update("my_vm", mem={"boot": "3253941043B", "current": "3253941043B"})
++ assert not ret["definition"]
++
++
++def test_gen_hypervisor_features():
++ """
++ Test the virt._gen_xml hypervisor_features handling
++ """
++ xml_data = virt._gen_xml(
++ virt.libvirt.openAuth.return_value,
++ "hello",
++ 1,
++ 512,
++ {},
++ {},
++ "kvm",
++ "hvm",
++ "x86_64",
++ hypervisor_features={"kvm-hint-dedicated": True},
++ )
++ root = ET.fromstring(xml_data)
++ assert "on" == root.find("features/kvm/hint-dedicated").attrib["state"]
++
++
++def test_update_hypervisor_features(make_mock_vm):
++ """
++ Test changing the hypervisor features of a guest
++ """
++ xml_def = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++
++ linux
++ /usr/lib/grub2/x86_64-xen/grub.xen
++
++
++
++
++
++
++ restart
++
++ """
++ domain_mock = make_mock_vm(xml_def)
++
++ # Update with no change to the features
++ ret = virt.update("my_vm", hypervisor_features={"kvm-hint-dedicated": True})
++ assert not ret["definition"]
++
++ # Alter the features
++ ret = virt.update("my_vm", hypervisor_features={"kvm-hint-dedicated": False})
++ assert ret["definition"]
++ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0])
++ assert "off" == setxml.find("features/kvm/hint-dedicated").get("state")
++
++ # Add the features
++ xml_def = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++
++ linux
++ /usr/lib/grub2/x86_64-xen/grub.xen
++
++
++ """
++ domain_mock = make_mock_vm(xml_def)
++ ret = virt.update("my_vm", hypervisor_features={"kvm-hint-dedicated": True})
++ assert ret["definition"]
++ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0])
++ assert "on" == setxml.find("features/kvm/hint-dedicated").get("state")
++
++
++def test_gen_clock():
++ """
++ Test the virt._gen_xml clock property
++ """
++ # Localtime with adjustment
++ xml_data = virt._gen_xml(
++ virt.libvirt.openAuth.return_value,
++ "hello",
++ 1,
++ 512,
++ {},
++ {},
++ "kvm",
++ "hvm",
++ "x86_64",
++ clock={"adjustment": 3600, "utc": False},
++ )
++ root = ET.fromstring(xml_data)
++ assert "localtime" == root.find("clock").get("offset")
++ assert "3600" == root.find("clock").get("adjustment")
++
++ # Specific timezone
++ xml_data = virt._gen_xml(
++ virt.libvirt.openAuth.return_value,
++ "hello",
++ 1,
++ 512,
++ {},
++ {},
++ "kvm",
++ "hvm",
++ "x86_64",
++ clock={"timezone": "CEST"},
++ )
++ root = ET.fromstring(xml_data)
++ assert "timezone" == root.find("clock").get("offset")
++ assert "CEST" == root.find("clock").get("timezone")
++
++ # UTC
++ xml_data = virt._gen_xml(
++ virt.libvirt.openAuth.return_value,
++ "hello",
++ 1,
++ 512,
++ {},
++ {},
++ "kvm",
++ "hvm",
++ "x86_64",
++ clock={"utc": True},
++ )
++ root = ET.fromstring(xml_data)
++ assert "utc" == root.find("clock").get("offset")
++
++ # Timers
++ xml_data = virt._gen_xml(
++ virt.libvirt.openAuth.return_value,
++ "hello",
++ 1,
++ 512,
++ {},
++ {},
++ "kvm",
++ "hvm",
++ "x86_64",
++ clock={
++ "timers": {
++ "tsc": {"frequency": 3504000000, "mode": "native"},
++ "rtc": {
++ "tickpolicy": "catchup",
++ "slew": 4636,
++ "threshold": 123,
++ "limit": 2342,
++ },
++ "hpet": {"present": False},
++ },
++ },
++ )
++ root = ET.fromstring(xml_data)
++ assert "utc" == root.find("clock").get("offset")
++ assert "3504000000" == root.find("clock/timer[@name='tsc']").get("frequency")
++ assert "native" == root.find("clock/timer[@name='tsc']").get("mode")
++ assert "catchup" == root.find("clock/timer[@name='rtc']").get("tickpolicy")
++ assert {"slew": "4636", "threshold": "123", "limit": "2342"} == root.find(
++ "clock/timer[@name='rtc']/catchup"
++ ).attrib
++ assert "no" == root.find("clock/timer[@name='hpet']").get("present")
++
++
++def test_update_clock(make_mock_vm):
++ """
++ test virt.update with clock parameter
++ """
++ xml_def = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++
++ linux
++ /usr/lib/grub2/x86_64-xen/grub.xen
++
++
++
++
++
++ restart
++
++ """
++ domain_mock = make_mock_vm(xml_def)
++
++ # Update with no change to the features
++ ret = virt.update(
++ "my_vm",
++ clock={
++ "utc": False,
++ "adjustment": -3600,
++ "timers": {
++ "tsc": {"frequency": 3504000000, "mode": "native"},
++ "kvmclock": {"present": False},
++ },
++ },
++ )
++ assert not ret["definition"]
++
++ # Update
++ ret = virt.update(
++ "my_vm",
++ clock={
++ "timezone": "CEST",
++ "timers": {
++ "rtc": {
++ "track": "wall",
++ "tickpolicy": "catchup",
++ "slew": 4636,
++ "threshold": 123,
++ "limit": 2342,
++ },
++ "hpet": {"present": True},
++ },
++ },
++ )
++ assert ret["definition"]
++ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0])
++ assert "timezone" == setxml.find("clock").get("offset")
++ assert "CEST" == setxml.find("clock").get("timezone")
++ assert {"rtc", "hpet"} == {t.get("name") for t in setxml.findall("clock/timer")}
++ assert "catchup" == setxml.find("clock/timer[@name='rtc']").get("tickpolicy")
++ assert "wall" == setxml.find("clock/timer[@name='rtc']").get("track")
++ assert {"slew": "4636", "threshold": "123", "limit": "2342"} == setxml.find(
++ "clock/timer[@name='rtc']/catchup"
++ ).attrib
++ assert "yes" == setxml.find("clock/timer[@name='hpet']").get("present")
++
++ # Revert to UTC
++ ret = virt.update("my_vm", clock={"utc": True, "adjustment": None, "timers": None})
++ assert ret["definition"]
++ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0])
++ assert {"offset": "utc"} == setxml.find("clock").attrib
++ assert setxml.find("clock/timer") is None
++
++
++def test_update_stop_on_reboot_reset(make_mock_vm):
++ """
++ Test virt.update to remove the on_reboot=destroy flag
++ """
++ xml_def = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++ destroy
++
++ hvm
++
++ """
++ domain_mock = make_mock_vm(xml_def)
++
++ ret = virt.update("my_vm")
++
++ assert ret["definition"]
++ define_mock = virt.libvirt.openAuth().defineXML
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ assert "restart" == setxml.find("./on_reboot").text
++
++
++def test_update_stop_on_reboot(make_mock_vm):
++ """
++ Test virt.update to add the on_reboot=destroy flag
++ """
++ xml_def = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++
++ hvm
++
++ """
++ domain_mock = make_mock_vm(xml_def)
++
++ ret = virt.update("my_vm", stop_on_reboot=True)
++
++ assert ret["definition"]
++ define_mock = virt.libvirt.openAuth().defineXML
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ assert "destroy" == setxml.find("./on_reboot").text
++
++
++def test_init_no_stop_on_reboot(make_capabilities):
++ """
++ Test virt.init to add the on_reboot=restart flag
++ """
++ make_capabilities()
++ with patch.dict(virt.os.__dict__, {"chmod": MagicMock(), "makedirs": MagicMock()}):
++ with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}):
++ virt.init("test_vm", 2, 2048, start=False)
++ define_mock = virt.libvirt.openAuth().defineXML
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ assert "restart" == setxml.find("./on_reboot").text
++
++
++def test_init_stop_on_reboot(make_capabilities):
++ """
++ Test virt.init to add the on_reboot=destroy flag
++ """
++ make_capabilities()
++ with patch.dict(virt.os.__dict__, {"chmod": MagicMock(), "makedirs": MagicMock()}):
++ with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}):
++ virt.init("test_vm", 2, 2048, stop_on_reboot=True, start=False)
++ define_mock = virt.libvirt.openAuth().defineXML
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ assert "destroy" == setxml.find("./on_reboot").text
+diff --git a/tests/pytests/unit/utils/test_xmlutil.py b/tests/pytests/unit/utils/test_xmlutil.py
+index 081cc64193..2bcaff3a17 100644
+--- a/tests/pytests/unit/utils/test_xmlutil.py
++++ b/tests/pytests/unit/utils/test_xmlutil.py
+@@ -16,6 +16,11 @@ def xml_doc():
+
+
+
++
++
++
++
++
+
+ """
+ )
+@@ -36,6 +41,22 @@ def test_change_xml_text_nochange(xml_doc):
+ assert not ret
+
+
++def test_change_xml_equals_nochange(xml_doc):
++ ret = xml.change_xml(
++ xml_doc,
++ {"mem": 1023},
++ [
++ {
++ "path": "mem",
++ "xpath": "memory",
++ "get": lambda n: int(n.text),
++ "equals": lambda o, n: abs(o - n) <= 1,
++ }
++ ],
++ )
++ assert not ret
++
++
+ def test_change_xml_text_notdefined(xml_doc):
+ ret = xml.change_xml(xml_doc, {}, [{"path": "name", "xpath": "name"}])
+ assert not ret
+@@ -167,3 +188,23 @@ def test_change_xml_template_remove(xml_doc):
+ )
+ assert ret
+ assert xml_doc.find("vcpus") is None
++
++
++def test_change_xml_template_list(xml_doc):
++ ret = xml.change_xml(
++ xml_doc,
++ {"memtune": {"hugepages": [{"size": "1024"}, {"size": "512"}]}},
++ [
++ {
++ "path": "memtune:hugepages:{id}:size",
++ "xpath": "memtune/hugepages/page[$id]",
++ "get": lambda n: n.get("size"),
++ "set": lambda n, v: n.set("size", v),
++ "del": xml.del_attribute("size"),
++ },
++ ],
++ )
++ assert ret
++ assert ["1024", "512"] == [
++ n.get("size") for n in xml_doc.findall("memtune/hugepages/page")
++ ]
+diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
+index 83152eda6e..91dee2098d 100644
+--- a/tests/unit/modules/test_virt.py
++++ b/tests/unit/modules/test_virt.py
+@@ -106,6 +106,10 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ mock_domain.name.return_value = name
+ return mock_domain
+
++ def assertEqualUnit(self, actual, expected, unit="KiB"):
++ self.assertEqual(actual.get("unit"), unit)
++ self.assertEqual(actual.text, str(expected))
++
+ def test_disk_profile_merge(self):
+ """
+ Test virt._disk_profile() when merging with user-defined disks
+@@ -215,16 +219,14 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "kvm",
+ "hvm",
+ "x86_64",
+- serial_type="pty",
+- console=True,
++ serials=[{"type": "pty"}],
+ )
+ root = ET.fromstring(xml_data)
+ self.assertEqual(root.find("devices/serial").attrib["type"], "pty")
+- self.assertEqual(root.find("devices/console").attrib["type"], "pty")
+
+- def test_gen_xml_for_serial_console(self):
++ def test_gen_xml_for_telnet_serial(self):
+ """
+- Test virt._gen_xml() serial console
++ Test virt._gen_xml() telnet serial
+ """
+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
+ nicp = virt._nic_profile("default", "kvm")
+@@ -238,11 +240,134 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "kvm",
+ "hvm",
+ "x86_64",
+- serial_type="pty",
+- console=True,
++ serials=[{"type": "tcp", "port": 22223, "protocol": "telnet"}],
++ )
++ root = ET.fromstring(xml_data)
++ self.assertEqual(root.find("devices/serial").attrib["type"], "tcp")
++ self.assertEqual(root.find("devices/serial/source").attrib["service"], "22223")
++ self.assertEqual(root.find("devices/serial/protocol").attrib["type"], "telnet")
++
++ def test_gen_xml_for_telnet_serial_unspecified_port(self):
++ """
++ Test virt._gen_xml() telnet serial without any specified port
++ """
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ 1,
++ 512,
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ serials=[{"type": "tcp"}],
++ )
++ root = ET.fromstring(xml_data)
++ self.assertEqual(root.find("devices/serial").attrib["type"], "tcp")
++ self.assertEqual(root.find("devices/serial/source").attrib["service"], "23023")
++ self.assertFalse("tls" in root.find("devices/serial/source").keys())
++ self.assertEqual(root.find("devices/serial/protocol").attrib["type"], "telnet")
++
++ def test_gen_xml_for_chardev_types(self):
++ """
++ Test virt._gen_xml() consoles and serials of various types
++ """
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ 1,
++ 512,
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ consoles=[
++ {"type": "pty", "path": "/dev/pts/2", "target_port": 2},
++ {"type": "pty", "target_type": "usb-serial"},
++ {"type": "stdio"},
++ {"type": "file", "path": "/path/to/serial.log"},
++ ],
++ serials=[
++ {"type": "pipe", "path": "/tmp/mypipe"},
++ {"type": "udp", "host": "127.0.0.1", "port": 1234},
++ {"type": "tcp", "port": 22223, "protocol": "raw", "tls": True},
++ {"type": "unix", "path": "/path/to/socket"},
++ ],
++ )
++ root = ET.fromstring(xml_data)
++
++ self.assertEqual(root.find("devices/console[1]").attrib["type"], "pty")
++ self.assertEqual(
++ root.find("devices/console[1]/source").attrib["path"], "/dev/pts/2"
++ )
++ self.assertEqual(root.find("devices/console[1]/target").attrib["port"], "2")
++
++ self.assertEqual(root.find("devices/console[2]").attrib["type"], "pty")
++ self.assertIsNone(root.find("devices/console[2]/source"))
++ self.assertEqual(
++ root.find("devices/console[2]/target").attrib["type"], "usb-serial"
++ )
++
++ self.assertEqual(root.find("devices/console[3]").attrib["type"], "stdio")
++ self.assertIsNone(root.find("devices/console[3]/source"))
++
++ self.assertEqual(root.find("devices/console[4]").attrib["type"], "file")
++ self.assertEqual(
++ root.find("devices/console[4]/source").attrib["path"], "/path/to/serial.log"
++ )
++
++ self.assertEqual(root.find("devices/serial[1]").attrib["type"], "pipe")
++ self.assertEqual(
++ root.find("devices/serial[1]/source").attrib["path"], "/tmp/mypipe"
++ )
++
++ self.assertEqual(root.find("devices/serial[2]").attrib["type"], "udp")
++ self.assertEqual(root.find("devices/serial[2]/source").attrib["mode"], "bind")
++ self.assertEqual(
++ root.find("devices/serial[2]/source").attrib["service"], "1234"
++ )
++ self.assertEqual(
++ root.find("devices/serial[2]/source").attrib["host"], "127.0.0.1"
++ )
++
++ self.assertEqual(root.find("devices/serial[3]").attrib["type"], "tcp")
++ self.assertEqual(root.find("devices/serial[3]/source").attrib["mode"], "bind")
++ self.assertEqual(
++ root.find("devices/serial[3]/source").attrib["service"], "22223"
++ )
++ self.assertEqual(root.find("devices/serial[3]/source").attrib["tls"], "yes")
++ self.assertEqual(root.find("devices/serial[3]/protocol").attrib["type"], "raw")
++
++ self.assertEqual(root.find("devices/serial[4]").attrib["type"], "unix")
++ self.assertEqual(
++ root.find("devices/serial[4]/source").attrib["path"], "/path/to/socket"
++ )
++
++ def test_gen_xml_no_nic_console(self):
++ """
++ Test virt._gen_xml() console
++ """
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ 1,
++ 512,
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ consoles=[{"type": "pty"}],
+ )
+ root = ET.fromstring(xml_data)
+- self.assertEqual(root.find("devices/serial").attrib["type"], "pty")
+ self.assertEqual(root.find("devices/console").attrib["type"], "pty")
+
+ def test_gen_xml_for_telnet_console(self):
+@@ -261,14 +386,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "kvm",
+ "hvm",
+ "x86_64",
+- serial_type="tcp",
+- console=True,
+- telnet_port=22223,
++ consoles=[{"type": "tcp", "port": 22223, "protocol": "telnet"}],
+ )
+ root = ET.fromstring(xml_data)
+- self.assertEqual(root.find("devices/serial").attrib["type"], "tcp")
+ self.assertEqual(root.find("devices/console").attrib["type"], "tcp")
+ self.assertEqual(root.find("devices/console/source").attrib["service"], "22223")
++ self.assertEqual(root.find("devices/console/protocol").attrib["type"], "telnet")
+
+ def test_gen_xml_for_telnet_console_unspecified_port(self):
+ """
+@@ -286,15 +409,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "kvm",
+ "hvm",
+ "x86_64",
+- serial_type="tcp",
+- console=True,
++ consoles=[{"type": "tcp"}],
+ )
+ root = ET.fromstring(xml_data)
+- self.assertEqual(root.find("devices/serial").attrib["type"], "tcp")
+ self.assertEqual(root.find("devices/console").attrib["type"], "tcp")
+- self.assertIsInstance(
+- int(root.find("devices/console/source").attrib["service"]), int
+- )
++ self.assertEqual(root.find("devices/console/source").attrib["service"], "23023")
++ self.assertEqual(root.find("devices/console/protocol").attrib["type"], "telnet")
+
+ def test_gen_xml_for_serial_no_console(self):
+ """
+@@ -312,8 +432,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "kvm",
+ "hvm",
+ "x86_64",
+- serial_type="pty",
+- console=False,
++ serials=[{"type": "pty"}],
++ consoles=[],
+ )
+ root = ET.fromstring(xml_data)
+ self.assertEqual(root.find("devices/serial").attrib["type"], "pty")
+@@ -335,8 +455,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "kvm",
+ "hvm",
+ "x86_64",
+- serial_type="tcp",
+- console=False,
++ serials=[{"type": "tcp", "port": 22223, "protocol": "telnet"}],
++ consoles=[],
+ )
+ root = ET.fromstring(xml_data)
+ self.assertEqual(root.find("devices/serial").attrib["type"], "tcp")
+@@ -459,109 +579,493 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(root.find("devices/graphics/listen").attrib["type"], "none")
+ self.assertFalse("address" in root.find("devices/graphics/listen").attrib)
+
+- def test_default_disk_profile_hypervisor_esxi(self):
++ def test_gen_xml_memory(self):
+ """
+- Test virt._disk_profile() default ESXi profile
++ Test virt._gen_xml() with advanced memory settings
+ """
+- mock = MagicMock(return_value={})
+- with patch.dict(
+- virt.__salt__, {"config.get": mock} # pylint: disable=no-member
+- ):
+- ret = virt._disk_profile(
+- self.mock_conn, "nonexistent", "vmware", None, "test-vm"
+- )
+- self.assertTrue(len(ret) == 1)
+- found = [disk for disk in ret if disk["name"] == "system"]
+- self.assertTrue(bool(found))
+- system = found[0]
+- self.assertEqual(system["format"], "vmdk")
+- self.assertEqual(system["model"], "scsi")
+- self.assertTrue(int(system["size"]) >= 1)
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ 1,
++ {
++ "boot": "512m",
++ "current": "256m",
++ "max": "1g",
++ "hard_limit": "1024",
++ "soft_limit": "512m",
++ "swap_hard_limit": "1g",
++ "min_guarantee": "256m",
++ "hugepages": [
++ {"nodeset": "", "size": "128m"},
++ {"nodeset": "0", "size": "256m"},
++ {"nodeset": "1", "size": "512m"},
++ ],
++ "nosharepages": True,
++ "locked": True,
++ "source": "file",
++ "access": "shared",
++ "allocation": "immediate",
++ "discard": True,
++ },
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ )
++ root = ET.fromstring(xml_data)
++ self.assertEqualUnit(root.find("memory"), 512 * 1024)
++ self.assertEqualUnit(root.find("currentMemory"), 256 * 1024)
++ self.assertEqualUnit(root.find("maxMemory"), 1024 * 1024)
++ self.assertFalse("slots" in root.find("maxMemory").keys())
++ self.assertEqualUnit(root.find("memtune/hard_limit"), 1024 * 1024)
++ self.assertEqualUnit(root.find("memtune/soft_limit"), 512 * 1024)
++ self.assertEqualUnit(root.find("memtune/swap_hard_limit"), 1024 ** 2)
++ self.assertEqualUnit(root.find("memtune/min_guarantee"), 256 * 1024)
++ self.assertEqual(
++ [
++ {"nodeset": page.get("nodeset"), "size": page.get("size")}
++ for page in root.findall("memoryBacking/hugepages/page")
++ ],
++ [
++ {"nodeset": None, "size": str(128 * 1024)},
++ {"nodeset": "0", "size": str(256 * 1024)},
++ {"nodeset": "1", "size": str(512 * 1024)},
++ ],
++ )
++ self.assertIsNotNone(root.find("memoryBacking/nosharepages"))
++ self.assertIsNotNone(root.find("memoryBacking/locked"))
++ self.assertIsNotNone(root.find("memoryBacking/discard"))
++ self.assertEqual(root.find("memoryBacking/source").get("type"), "file")
++ self.assertEqual(root.find("memoryBacking/access").get("mode"), "shared")
++ self.assertEqual(root.find("memoryBacking/allocation").get("mode"), "immediate")
+
+- def test_default_disk_profile_hypervisor_kvm(self):
++ def test_gen_xml_cpu(self):
+ """
+- Test virt._disk_profile() default KVM profile
++ Test virt._gen_xml() with CPU advanced properties
+ """
+- mock = MagicMock(side_effect=[{}, "/images/dir"])
+- with patch.dict(
+- virt.__salt__, {"config.get": mock} # pylint: disable=no-member
+- ):
+- ret = virt._disk_profile(
+- self.mock_conn, "nonexistent", "kvm", None, "test-vm"
+- )
+- self.assertTrue(len(ret) == 1)
+- found = [disk for disk in ret if disk["name"] == "system"]
+- self.assertTrue(bool(found))
+- system = found[0]
+- self.assertEqual(system["format"], "qcow2")
+- self.assertEqual(system["model"], "virtio")
+- self.assertTrue(int(system["size"]) >= 1)
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ {
++ "maximum": 12,
++ "placement": "static",
++ "cpuset": "0-11",
++ "current": 5,
++ "mode": "custom",
++ "match": "minimum",
++ "check": "full",
++ "vendor": "Intel",
++ "model": {
++ "name": "core2duo",
++ "fallback": "allow",
++ "vendor_id": "GenuineIntel",
++ },
++ "cache": {"level": 3, "mode": "emulate"},
++ "features": {"lahf": "optional", "vmx": "require"},
++ "vcpus": {
++ 0: {"enabled": True, "hotpluggable": True},
++ 1: {"enabled": False},
++ },
++ },
++ 512,
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ )
++ root = ET.fromstring(xml_data)
++ self.assertEqual(root.find("vcpu").get("current"), "5")
++ self.assertEqual(root.find("vcpu").get("placement"), "static")
++ self.assertEqual(root.find("vcpu").get("cpuset"), "0-11")
++ self.assertEqual(root.find("vcpu").text, "12")
++ self.assertEqual(root.find("cpu").get("match"), "minimum")
++ self.assertEqual(root.find("cpu").get("mode"), "custom")
++ self.assertEqual(root.find("cpu").get("check"), "full")
++ self.assertEqual(root.find("cpu/vendor").text, "Intel")
++ self.assertEqual(root.find("cpu/model").text, "core2duo")
++ self.assertEqual(root.find("cpu/model").get("fallback"), "allow")
++ self.assertEqual(root.find("cpu/model").get("vendor_id"), "GenuineIntel")
++ self.assertEqual(root.find("cpu/cache").get("level"), "3")
++ self.assertEqual(root.find("cpu/cache").get("mode"), "emulate")
++ self.assertEqual(
++ {f.get("name"): f.get("policy") for f in root.findall("cpu/feature")},
++ {"lahf": "optional", "vmx": "require"},
++ )
++ self.assertEqual(
++ {
++ v.get("id"): {
++ "enabled": v.get("enabled"),
++ "hotpluggable": v.get("hotpluggable"),
++ }
++ for v in root.findall("vcpus/vcpu")
++ },
++ {
++ "0": {"enabled": "yes", "hotpluggable": "yes"},
++ "1": {"enabled": "no", "hotpluggable": None},
++ },
++ )
+
+- def test_default_disk_profile_hypervisor_xen(self):
++ def test_gen_xml_cpu_topology(self):
+ """
+- Test virt._disk_profile() default XEN profile
++ Test virt._gen_xml() with CPU topology
+ """
+- mock = MagicMock(side_effect=[{}, "/images/dir"])
+- with patch.dict(
+- virt.__salt__, {"config.get": mock} # pylint: disable=no-member
+- ):
+- ret = virt._disk_profile(
+- self.mock_conn, "nonexistent", "xen", None, "test-vm"
+- )
+- self.assertTrue(len(ret) == 1)
+- found = [disk for disk in ret if disk["name"] == "system"]
+- self.assertTrue(bool(found))
+- system = found[0]
+- self.assertEqual(system["format"], "qcow2")
+- self.assertEqual(system["model"], "xen")
+- self.assertTrue(int(system["size"]) >= 1)
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ {"maximum": 1, "topology": {"sockets": 4, "cores": 16, "threads": 2}},
++ 512,
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ )
++ root = ET.fromstring(xml_data)
++ self.assertEqual(root.find("cpu/topology").get("sockets"), "4")
++ self.assertEqual(root.find("cpu/topology").get("cores"), "16")
++ self.assertEqual(root.find("cpu/topology").get("threads"), "2")
+
+- def test_default_nic_profile_hypervisor_esxi(self):
++ def test_gen_xml_cpu_numa(self):
+ """
+- Test virt._nic_profile() default ESXi profile
++ Test virt._gen_xml() with CPU numa settings
+ """
+- mock = MagicMock(return_value={})
+- with patch.dict(
+- virt.__salt__, {"config.get": mock} # pylint: disable=no-member
+- ):
+- ret = virt._nic_profile("nonexistent", "vmware")
+- self.assertTrue(len(ret) == 1)
+- eth0 = ret[0]
+- self.assertEqual(eth0["name"], "eth0")
+- self.assertEqual(eth0["type"], "bridge")
+- self.assertEqual(eth0["source"], "DEFAULT")
+- self.assertEqual(eth0["model"], "e1000")
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ {
++ "maximum": 1,
++ "numa": {
++ 0: {
++ "cpus": "0-3",
++ "memory": "1g",
++ "discard": True,
++ "distances": {0: 10, 1: 20},
++ },
++ 1: {"cpus": "4-7", "memory": "2g", "distances": {0: 20, 1: 10}},
++ },
++ },
++ 512,
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ )
++ root = ET.fromstring(xml_data)
++ cell0 = root.find("cpu/numa/cell[@id='0']")
++ self.assertEqual(cell0.get("cpus"), "0-3")
++ self.assertIsNone(cell0.get("unit"))
++ self.assertEqual(cell0.get("memory"), str(1024 ** 2))
++ self.assertEqual(cell0.get("discard"), "yes")
++ self.assertEqual(
++ {d.get("id"): d.get("value") for d in cell0.findall("distances/sibling")},
++ {"0": "10", "1": "20"},
++ )
+
+- def test_default_nic_profile_hypervisor_kvm(self):
+- """
+- Test virt._nic_profile() default KVM profile
+- """
+- mock = MagicMock(return_value={})
+- with patch.dict(
+- virt.__salt__, {"config.get": mock} # pylint: disable=no-member
+- ):
+- ret = virt._nic_profile("nonexistent", "kvm")
+- self.assertTrue(len(ret) == 1)
+- eth0 = ret[0]
+- self.assertEqual(eth0["name"], "eth0")
+- self.assertEqual(eth0["type"], "bridge")
+- self.assertEqual(eth0["source"], "br0")
+- self.assertEqual(eth0["model"], "virtio")
++ cell1 = root.find("cpu/numa/cell[@id='1']")
++ self.assertEqual(cell1.get("cpus"), "4-7")
++ self.assertIsNone(cell0.get("unit"))
++ self.assertEqual(cell1.get("memory"), str(2 * 1024 ** 2))
++ self.assertFalse("discard" in cell1.keys())
++ self.assertEqual(
++ {d.get("id"): d.get("value") for d in cell1.findall("distances/sibling")},
++ {"0": "20", "1": "10"},
++ )
+
+- def test_default_nic_profile_hypervisor_xen(self):
++ def test_gen_xml_cputune(self):
+ """
+- Test virt._nic_profile() default XEN profile
++ Test virt._gen_xml() with CPU tuning
+ """
+- mock = MagicMock(return_value={})
+- with patch.dict(
+- virt.__salt__, {"config.get": mock} # pylint: disable=no-member
+- ):
+- ret = virt._nic_profile("nonexistent", "xen")
+- self.assertTrue(len(ret) == 1)
+- eth0 = ret[0]
+- self.assertEqual(eth0["name"], "eth0")
+- self.assertEqual(eth0["type"], "bridge")
+- self.assertEqual(eth0["source"], "br0")
++ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
++ nicp = virt._nic_profile("default", "kvm")
++ cputune = {
++ "shares": 2048,
++ "period": 122000,
++ "quota": -1,
++ "global_period": 1000000,
++ "global_quota": -3,
++ "emulator_period": 1200000,
++ "emulator_quota": -10,
++ "iothread_period": 133000,
++ "iothread_quota": -1,
++ "vcpupin": {0: "1-4,^2", 1: "0,1", 2: "2,3", 3: "0,4"},
++ "emulatorpin": "1-3",
++ "iothreadpin": {1: "5-6", 2: "7-8"},
++ "vcpusched": [
++ {"scheduler": "fifo", "priority": 1, "vcpus": "0"},
++ {"scheduler": "fifo", "priority": 2, "vcpus": "1"},
++ {"scheduler": "idle", "priority": 3, "vcpus": "2"},
++ ],
++ "iothreadsched": [
++ {"scheduler": "idle"},
++ {"scheduler": "batch", "iothreads": "5-7", "priority": 1},
++ ],
++ "emulatorsched": {"scheduler": "rr", "priority": 2},
++ "cachetune": {
++ "0-3": {
++ 0: {"level": 3, "type": "both", "size": 3},
++ 1: {"level": 3, "type": "both", "size": 3},
++ "monitor": {1: 3, "0-3": 3},
++ },
++ "4-5": {"monitor": {4: 3, 5: 2}},
++ },
++ "memorytune": {"0-2": {0: 60}, "3-4": {0: 50, 1: 70}},
++ }
++ xml_data = virt._gen_xml(
++ self.mock_conn,
++ "hello",
++ {"maximum": 1, "tuning": cputune, "iothreads": 2},
++ 512,
++ diskp,
++ nicp,
++ "kvm",
++ "hvm",
++ "x86_64",
++ )
++ root = ET.fromstring(xml_data)
++ self.assertEqual(root.find("cputune").find("shares").text, "2048")
++ self.assertEqual(root.find("cputune").find("period").text, "122000")
++ self.assertEqual(root.find("cputune").find("quota").text, "-1")
++ self.assertEqual(root.find("cputune").find("global_period").text, "1000000")
++ self.assertEqual(root.find("cputune").find("global_quota").text, "-3")
++ self.assertEqual(root.find("cputune").find("emulator_period").text, "1200000")
++ self.assertEqual(root.find("cputune").find("emulator_quota").text, "-10")
++ self.assertEqual(root.find("cputune").find("iothread_period").text, "133000")
++ self.assertEqual(root.find("cputune").find("iothread_quota").text, "-1")
++ self.assertEqual(
++ root.find("cputune").find("vcpupin[@vcpu='0']").attrib.get("cpuset"),
++ "1-4,^2",
++ )
++ self.assertEqual(
++ root.find("cputune").find("vcpupin[@vcpu='1']").attrib.get("cpuset"), "0,1",
++ )
++ self.assertEqual(
++ root.find("cputune").find("vcpupin[@vcpu='2']").attrib.get("cpuset"), "2,3",
++ )
++ self.assertEqual(
++ root.find("cputune").find("vcpupin[@vcpu='3']").attrib.get("cpuset"), "0,4",
++ )
++ self.assertEqual(
++ root.find("cputune").find("emulatorpin").attrib.get("cpuset"), "1-3"
++ )
++ self.assertEqual(
++ root.find("cputune")
++ .find("iothreadpin[@iothread='1']")
++ .attrib.get("cpuset"),
++ "5-6",
++ )
++ self.assertEqual(
++ root.find("cputune")
++ .find("iothreadpin[@iothread='2']")
++ .attrib.get("cpuset"),
++ "7-8",
++ )
++ self.assertDictEqual(
++ {
++ s.get("vcpus"): {
++ "scheduler": s.get("scheduler"),
++ "priority": s.get("priority"),
++ }
++ for s in root.findall("cputune/vcpusched")
++ },
++ {
++ "0": {"scheduler": "fifo", "priority": "1"},
++ "1": {"scheduler": "fifo", "priority": "2"},
++ "2": {"scheduler": "idle", "priority": "3"},
++ },
++ )
++ self.assertDictEqual(
++ {
++ s.get("iothreads"): {
++ "scheduler": s.get("scheduler"),
++ "priority": s.get("priority"),
++ }
++ for s in root.findall("cputune/iothreadsched")
++ },
++ {
++ None: {"scheduler": "idle", "priority": None},
++ "5-7": {"scheduler": "batch", "priority": "1"},
++ },
++ )
++ self.assertEqual(root.find("cputune/emulatorsched").get("scheduler"), "rr")
++ self.assertEqual(root.find("cputune/emulatorsched").get("priority"), "2")
++ self.assertEqual(
++ root.find("./cputune/cachetune[@vcpus='0-3']").attrib.get("vcpus"), "0-3"
++ )
++ self.assertEqual(
++ root.find("./cputune/cachetune[@vcpus='0-3']/cache[@id='0']").attrib.get(
++ "level"
++ ),
++ "3",
++ )
++ self.assertEqual(
++ root.find("./cputune/cachetune[@vcpus='0-3']/cache[@id='0']").attrib.get(
++ "type"
++ ),
++ "both",
++ )
++ self.assertEqual(
++ root.find(
++ "./cputune/cachetune[@vcpus='0-3']/monitor[@vcpus='1']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertNotEqual(
++ root.find("./cputune/cachetune[@vcpus='0-3']/monitor[@vcpus='1']"), None
++ )
++ self.assertNotEqual(
++ root.find("./cputune/cachetune[@vcpus='4-5']").attrib.get("vcpus"), None
++ )
++ self.assertEqual(
++ root.find("./cputune/cachetune[@vcpus='4-5']/cache[@id='0']"), None
++ )
++ self.assertEqual(
++ root.find(
++ "./cputune/cachetune[@vcpus='4-5']/monitor[@vcpus='4']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertEqual(
++ root.find(
++ "./cputune/cachetune[@vcpus='4-5']/monitor[@vcpus='5']"
++ ).attrib.get("level"),
++ "2",
++ )
++ self.assertNotEqual(root.find("./cputune/memorytune[@vcpus='0-2']"), None)
++ self.assertEqual(
++ root.find("./cputune/memorytune[@vcpus='0-2']/node[@id='0']").attrib.get(
++ "bandwidth"
++ ),
++ "60",
++ )
++ self.assertNotEqual(root.find("./cputune/memorytune[@vcpus='3-4']"), None)
++ self.assertEqual(
++ root.find("./cputune/memorytune[@vcpus='3-4']/node[@id='0']").attrib.get(
++ "bandwidth"
++ ),
++ "50",
++ )
++ self.assertEqual(
++ root.find("./cputune/memorytune[@vcpus='3-4']/node[@id='1']").attrib.get(
++ "bandwidth"
++ ),
++ "70",
++ )
++ self.assertEqual(root.find("iothreads").text, "2")
++
++ def test_default_disk_profile_hypervisor_esxi(self):
++ """
++ Test virt._disk_profile() default ESXi profile
++ """
++ mock = MagicMock(return_value={})
++ with patch.dict(
++ virt.__salt__, {"config.get": mock} # pylint: disable=no-member
++ ):
++ ret = virt._disk_profile(
++ self.mock_conn, "nonexistent", "vmware", None, "test-vm"
++ )
++ self.assertTrue(len(ret) == 1)
++ found = [disk for disk in ret if disk["name"] == "system"]
++ self.assertTrue(bool(found))
++ system = found[0]
++ self.assertEqual(system["format"], "vmdk")
++ self.assertEqual(system["model"], "scsi")
++ self.assertTrue(int(system["size"]) >= 1)
++
++ def test_default_disk_profile_hypervisor_kvm(self):
++ """
++ Test virt._disk_profile() default KVM profile
++ """
++ mock = MagicMock(side_effect=[{}, "/images/dir"])
++ with patch.dict(
++ virt.__salt__, {"config.get": mock} # pylint: disable=no-member
++ ):
++ ret = virt._disk_profile(
++ self.mock_conn, "nonexistent", "kvm", None, "test-vm"
++ )
++ self.assertTrue(len(ret) == 1)
++ found = [disk for disk in ret if disk["name"] == "system"]
++ self.assertTrue(bool(found))
++ system = found[0]
++ self.assertEqual(system["format"], "qcow2")
++ self.assertEqual(system["model"], "virtio")
++ self.assertTrue(int(system["size"]) >= 1)
++
++ def test_default_disk_profile_hypervisor_xen(self):
++ """
++ Test virt._disk_profile() default XEN profile
++ """
++ mock = MagicMock(side_effect=[{}, "/images/dir"])
++ with patch.dict(
++ virt.__salt__, {"config.get": mock} # pylint: disable=no-member
++ ):
++ ret = virt._disk_profile(
++ self.mock_conn, "nonexistent", "xen", None, "test-vm"
++ )
++ self.assertTrue(len(ret) == 1)
++ found = [disk for disk in ret if disk["name"] == "system"]
++ self.assertTrue(bool(found))
++ system = found[0]
++ self.assertEqual(system["format"], "qcow2")
++ self.assertEqual(system["model"], "xen")
++ self.assertTrue(int(system["size"]) >= 1)
++
++ def test_default_nic_profile_hypervisor_esxi(self):
++ """
++ Test virt._nic_profile() default ESXi profile
++ """
++ mock = MagicMock(return_value={})
++ with patch.dict(
++ virt.__salt__, {"config.get": mock} # pylint: disable=no-member
++ ):
++ ret = virt._nic_profile("nonexistent", "vmware")
++ self.assertTrue(len(ret) == 1)
++ eth0 = ret[0]
++ self.assertEqual(eth0["name"], "eth0")
++ self.assertEqual(eth0["type"], "bridge")
++ self.assertEqual(eth0["source"], "DEFAULT")
++ self.assertEqual(eth0["model"], "e1000")
++
++ def test_default_nic_profile_hypervisor_kvm(self):
++ """
++ Test virt._nic_profile() default KVM profile
++ """
++ mock = MagicMock(return_value={})
++ with patch.dict(
++ virt.__salt__, {"config.get": mock} # pylint: disable=no-member
++ ):
++ ret = virt._nic_profile("nonexistent", "kvm")
++ self.assertTrue(len(ret) == 1)
++ eth0 = ret[0]
++ self.assertEqual(eth0["name"], "eth0")
++ self.assertEqual(eth0["type"], "bridge")
++ self.assertEqual(eth0["source"], "br0")
++ self.assertEqual(eth0["model"], "virtio")
++
++ def test_default_nic_profile_hypervisor_xen(self):
++ """
++ Test virt._nic_profile() default XEN profile
++ """
++ mock = MagicMock(return_value={})
++ with patch.dict(
++ virt.__salt__, {"config.get": mock} # pylint: disable=no-member
++ ):
++ ret = virt._nic_profile("nonexistent", "xen")
++ self.assertTrue(len(ret) == 1)
++ eth0 = ret[0]
++ self.assertEqual(eth0["name"], "eth0")
++ self.assertEqual(eth0["type"], "bridge")
++ self.assertEqual(eth0["source"], "br0")
+ self.assertFalse(eth0["model"])
+
+ def test_gen_vol_xml_esx(self):
+@@ -1836,6 +2340,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+
+
+
++
++
+
+
+ """.format(
+@@ -1896,10 +2402,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ username=None,
+ password=None,
+ boot=None,
++ numatune=None,
+ ),
+ )
+
+- # Update vcpus case
++ # test cpu passed as an integer case
+ setvcpus_mock = MagicMock(return_value=0)
+ domain_mock.setVcpusFlags = setvcpus_mock
+ self.assertEqual(
+@@ -1914,142 +2421,400 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+ self.assertEqual(setxml.find("vcpu").text, "2")
+ self.assertEqual(setvcpus_mock.call_args[0][0], 2)
++ define_mock.reset_mock()
+
+- boot = {
+- "kernel": "/root/f8-i386-vmlinuz",
+- "initrd": "/root/f8-i386-initrd",
+- "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
++ # test updating vcpu attribute
++ vcpu = {
++ "placement": "static",
++ "cpuset": "0-11",
++ "current": 5,
++ "maximum": 12,
+ }
+-
+- # Update boot devices case
+- define_mock.reset_mock()
+ self.assertEqual(
+ {
+ "definition": True,
++ "cpu": True,
+ "disk": {"attached": [], "detached": [], "updated": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm", boot_dev="cdrom network hd"),
++ virt.update("my_vm", cpu=vcpu),
+ )
+ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("vcpu").text, "12")
++ self.assertEqual(setxml.find("vcpu").attrib["placement"], "static")
+ self.assertEqual(
+- ["cdrom", "network", "hd"],
+- [node.get("dev") for node in setxml.findall("os/boot")],
++ setxml.find("vcpu").attrib["cpuset"], "0,1,2,3,4,5,6,7,8,9,10,11"
+ )
++ self.assertEqual(setxml.find("vcpu").attrib["current"], "5")
+
+- # Update unchanged boot devices case
+- define_mock.reset_mock()
++ # test adding vcpus elements
++ vcpus = {
++ "vcpus": {
++ "0": {"enabled": True, "hotpluggable": False, "order": 1},
++ "1": {"enabled": False, "hotpluggable": True},
++ }
++ }
+ self.assertEqual(
+ {
+- "definition": False,
++ "definition": True,
+ "disk": {"attached": [], "detached": [], "updated": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm", boot_dev="hd"),
++ virt.update("my_vm", cpu=vcpus),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["id"], "0")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["enabled"], "yes")
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='0']").attrib["hotpluggable"], "no"
++ )
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["order"], "1")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='1']").attrib["id"], "1")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='1']").attrib["enabled"], "no")
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='1']").attrib["hotpluggable"], "yes"
++ )
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='1']").attrib.get("order"), None
+ )
+- define_mock.assert_not_called()
+
+- # Update with boot parameter case
+- define_mock.reset_mock()
++ # test adding cpu attribute
++ cpu_atr = {"mode": "custom", "match": "exact", "check": "full"}
+ self.assertEqual(
+ {
+ "definition": True,
+ "disk": {"attached": [], "detached": [], "updated": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm", boot=boot),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz")
+- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd")
+- self.assertEqual(
+- setxml.find("os").find("cmdline").text,
+- "console=ttyS0 ks=http://example.com/f8-i386/os/",
++ virt.update("my_vm", cpu=cpu_atr),
+ )
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz")
+- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd")
+- self.assertEqual(
+- setxml.find("os").find("cmdline").text,
+- "console=ttyS0 ks=http://example.com/f8-i386/os/",
+- )
+-
+- boot_uefi = {
+- "loader": "/usr/share/OVMF/OVMF_CODE.fd",
+- "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd",
++ self.assertEqual(setxml.find("cpu").attrib["mode"], "custom")
++ self.assertEqual(setxml.find("cpu").attrib["match"], "exact")
++ self.assertEqual(setxml.find("cpu").attrib["check"], "full")
++
++ # test adding cpu model
++ cpu_model = {
++ "model": {
++ "name": "coreduo",
++ "fallback": "allow",
++ "vendor_id": "Genuine20201",
++ }
+ }
+-
+ self.assertEqual(
+ {
+ "definition": True,
+ "disk": {"attached": [], "detached": [], "updated": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm", boot=boot_uefi),
++ virt.update("my_vm", cpu=cpu_model),
+ )
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+ self.assertEqual(
+- setxml.find("os").find("loader").text, "/usr/share/OVMF/OVMF_CODE.fd"
++ setxml.find("cpu").find("model").attrib.get("vendor_id"), "Genuine20201"
+ )
+- self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes")
+- self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash")
+ self.assertEqual(
+- setxml.find("os").find("nvram").attrib["template"],
+- "/usr/share/OVMF/OVMF_VARS.ms.fd",
++ setxml.find("cpu").find("model").attrib.get("fallback"), "allow"
+ )
++ self.assertEqual(setxml.find("cpu").find("model").text, "coreduo")
+
++ # test adding cpu vendor
++ cpu_vendor = {"vendor": "Intel"}
+ self.assertEqual(
+ {
+ "definition": True,
+ "disk": {"attached": [], "detached": [], "updated": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm", boot={"efi": True}),
++ virt.update("my_vm", cpu=cpu_vendor),
+ )
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("os").attrib.get("firmware"), "efi")
+-
+- invalid_boot = {
+- "loader": "/usr/share/OVMF/OVMF_CODE.fd",
+- "initrd": "/root/f8-i386-initrd",
+- }
+-
+- with self.assertRaises(SaltInvocationError):
+- virt.update("my_vm", boot=invalid_boot)
+-
+- with self.assertRaises(SaltInvocationError):
+- virt.update("my_vm", boot={"efi": "Not a boolean value"})
+-
+- # Update memtune parameter case
+- memtune = {
+- "soft_limit": "0.5g",
+- "hard_limit": "1024",
+- "swap_hard_limit": "2048m",
+- "min_guarantee": "1 g",
+- }
++ self.assertEqual(setxml.find("cpu").find("vendor").text, "Intel")
+
++ # test adding cpu topology
++ cpu_topology = {"topology": {"sockets": 1, "cores": 12, "threads": 1}}
+ self.assertEqual(
+ {
+ "definition": True,
+ "disk": {"attached": [], "detached": [], "updated": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm", mem=memtune),
++ virt.update("my_vm", cpu=cpu_topology),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("sockets"), "1")
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("cores"), "12")
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("threads"), "1")
++
++ # test adding cache
++ cpu_cache = {"cache": {"mode": "emulate", "level": 3}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", cpu=cpu_cache),
+ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("cache").attrib.get("level"), "3")
++ self.assertEqual(setxml.find("cpu").find("cache").attrib.get("mode"), "emulate")
+
++ # test adding feature
++ cpu_feature = {"features": {"lahf": "optional", "pcid": "disable"}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", cpu=cpu_feature),
++ )
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+ self.assertEqual(
+- setxml.find("memtune").find("soft_limit").text, str(int(0.5 * 1024 ** 3))
++ setxml.find("./cpu/feature[@name='pcid']").attrib.get("policy"), "disable"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/feature[@name='lahf']").attrib.get("policy"), "optional"
+ )
+- self.assertEqual(setxml.find("memtune").find("soft_limit").get("unit"), "bytes")
++
++ # test adding numa cell
++ numa_cell = {
++ "numa": {
++ "0": {
++ "cpus": "0-3",
++ "memory": "1g",
++ "discard": True,
++ "distances": {0: 10, 1: 21, 2: 31, 3: 41},
++ },
++ "1": {
++ "cpus": "4-6",
++ "memory": "0.5g",
++ "discard": False,
++ "memAccess": "shared",
++ "distances": {0: 21, 1: 10, 2: 15, 3: 30},
++ },
++ }
++ }
+ self.assertEqual(
+- setxml.find("memtune").find("hard_limit").text, str(1024 * 1024 ** 2)
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", cpu=numa_cell),
+ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
+ self.assertEqual(
+- setxml.find("memtune").find("swap_hard_limit").text, str(2048 * 1024 ** 2)
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["cpus"], "0,1,2,3"
+ )
+ self.assertEqual(
+- setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3)
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["memory"], str(1024 ** 3)
++ )
++ self.assertEqual(setxml.find("./cpu/numa/cell/[@id='0']").get("unit"), "bytes")
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["discard"], "yes"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='0']").attrib[
++ "value"
++ ],
++ "10",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='1']").attrib[
++ "value"
++ ],
++ "21",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='2']").attrib[
++ "value"
++ ],
++ "31",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='3']").attrib[
++ "value"
++ ],
++ "41",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["cpus"], "4,5,6"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memory"],
++ str(int(1024 ** 3 / 2)),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").get("unit"), "bytes",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["discard"], "no"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memAccess"], "shared"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='0']").attrib[
++ "value"
++ ],
++ "21",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='1']").attrib[
++ "value"
++ ],
++ "10",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='2']").attrib[
++ "value"
++ ],
++ "15",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='3']").attrib[
++ "value"
++ ],
++ "30",
++ )
++
++ # Update boot parameter case
++ boot = {
++ "kernel": "/root/f8-i386-vmlinuz",
++ "initrd": "/root/f8-i386-initrd",
++ "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
++ }
++
++ # Update boot devices case
++ define_mock.reset_mock()
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", boot_dev="cdrom network hd"),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ ["cdrom", "network", "hd"],
++ [node.get("dev") for node in setxml.findall("os/boot")],
++ )
++
++ # Update unchanged boot devices case
++ define_mock.reset_mock()
++ self.assertEqual(
++ {
++ "definition": False,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", boot_dev="hd"),
++ )
++ define_mock.assert_not_called()
++
++ # Update with boot parameter case
++ define_mock.reset_mock()
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", boot=boot),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz")
++ self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd")
++ self.assertEqual(
++ setxml.find("os").find("cmdline").text,
++ "console=ttyS0 ks=http://example.com/f8-i386/os/",
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz")
++ self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd")
++ self.assertEqual(
++ setxml.find("os").find("cmdline").text,
++ "console=ttyS0 ks=http://example.com/f8-i386/os/",
++ )
++
++ boot_uefi = {
++ "loader": "/usr/share/OVMF/OVMF_CODE.fd",
++ "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd",
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", boot=boot_uefi),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("os").find("loader").text, "/usr/share/OVMF/OVMF_CODE.fd"
++ )
++ self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes")
++ self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash")
++ self.assertEqual(
++ setxml.find("os").find("nvram").attrib["template"],
++ "/usr/share/OVMF/OVMF_VARS.ms.fd",
++ )
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", boot={"efi": True}),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("os").attrib.get("firmware"), "efi")
++
++ invalid_boot = {
++ "loader": "/usr/share/OVMF/OVMF_CODE.fd",
++ "initrd": "/root/f8-i386-initrd",
++ }
++
++ with self.assertRaises(SaltInvocationError):
++ virt.update("my_vm", boot=invalid_boot)
++
++ with self.assertRaises(SaltInvocationError):
++ virt.update("my_vm", boot={"efi": "Not a boolean value"})
++
++ # Update memtune parameter case
++ memtune = {
++ "soft_limit": "0.5g",
++ "hard_limit": "1024",
++ "swap_hard_limit": "2048m",
++ "min_guarantee": "1 g",
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", mem=memtune),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqualUnit(
++ setxml.find("memtune").find("soft_limit"), int(0.5 * 1024 ** 3), "bytes"
++ )
++ self.assertEqualUnit(
++ setxml.find("memtune").find("hard_limit"), 1024 * 1024 ** 2, "bytes"
++ )
++ self.assertEqualUnit(
++ setxml.find("memtune").find("swap_hard_limit"), 2048 * 1024 ** 2, "bytes"
++ )
++ self.assertEqualUnit(
++ setxml.find("memtune").find("min_guarantee"), 1 * 1024 ** 3, "bytes"
+ )
+
+ invalid_unit = {"soft_limit": "2HB"}
+@@ -2064,6 +2829,50 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ with self.assertRaises(SaltInvocationError):
+ virt.update("my_vm", mem=invalid_number)
+
++ # Update numatune case
++ numatune = {
++ "memory": {"mode": "strict", "nodeset": 1},
++ "memnodes": {
++ 0: {"mode": "strict", "nodeset": 1},
++ 1: {"mode": "preferred", "nodeset": 2},
++ },
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", numatune=numatune),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("numatune").find("memory").attrib.get("mode"), "strict"
++ )
++
++ self.assertEqual(
++ setxml.find("numatune").find("memory").attrib.get("nodeset"), "1"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("mode"), "strict"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("nodeset"), "1"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='1']").attrib.get("mode"),
++ "preferred",
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='1']").attrib.get("nodeset"), "2"
++ )
++
+ # Update memory case
+ setmem_mock = MagicMock(return_value=0)
+ domain_mock.setMemoryFlags = setmem_mock
+@@ -2115,37 +2924,250 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2))
+ self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10")
+
+- # Update disks case
+- devattach_mock = MagicMock(return_value=0)
+- devdetach_mock = MagicMock(return_value=0)
+- domain_mock.attachDevice = devattach_mock
+- domain_mock.detachDevice = devdetach_mock
+- mock_chmod = MagicMock()
+- mock_run = MagicMock()
+- with patch.dict(
+- os.__dict__, {"chmod": mock_chmod, "makedirs": MagicMock()}
+- ): # pylint: disable=no-member
+- with patch.dict(
+- virt.__salt__, {"cmd.run": mock_run}
+- ): # pylint: disable=no-member
+- ret = virt.update(
+- "my_vm",
+- disk_profile="default",
+- disks=[
+- {
+- "name": "cddrive",
+- "device": "cdrom",
+- "source_file": None,
+- "model": "ide",
+- },
+- {"name": "added", "size": 2048},
+- ],
+- )
+- added_disk_path = os.path.join(
+- virt.__salt__["config.get"]("virt:images"), "my_vm_added.qcow2"
+- ) # pylint: disable=no-member
+- self.assertEqual(
+- mock_run.call_args[0][0],
++ # update memory backing case
++ mem_back = {
++ "hugepages": [
++ {"nodeset": "1-5,^4", "size": "1g"},
++ {"nodeset": "4", "size": "2g"},
++ ],
++ "nosharepages": True,
++ "locked": True,
++ "source": "file",
++ "access": "shared",
++ "allocation": "immediate",
++ "discard": True,
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", mem=mem_back),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertDictEqual(
++ {
++ p.get("nodeset"): {"size": p.get("size"), "unit": p.get("unit")}
++ for p in setxml.findall("memoryBacking/hugepages/page")
++ },
++ {
++ "1,2,3,5": {"size": str(1024 ** 3), "unit": "bytes"},
++ "4": {"size": str(2 * 1024 ** 3), "unit": "bytes"},
++ },
++ )
++ self.assertNotEqual(setxml.find("./memoryBacking/nosharepages"), None)
++ self.assertIsNone(setxml.find("./memoryBacking/nosharepages").text)
++ self.assertEqual([], setxml.find("./memoryBacking/nosharepages").keys())
++ self.assertNotEqual(setxml.find("./memoryBacking/locked"), None)
++ self.assertIsNone(setxml.find("./memoryBacking/locked").text)
++ self.assertEqual([], setxml.find("./memoryBacking/locked").keys())
++ self.assertEqual(setxml.find("./memoryBacking/source").attrib["type"], "file")
++ self.assertEqual(setxml.find("./memoryBacking/access").attrib["mode"], "shared")
++ self.assertNotEqual(setxml.find("./memoryBacking/discard"), None)
++
++ # test adding iothreads
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", cpu={"iothreads": 5}),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("iothreads").text, "5")
++
++ # test adding cpu tune parameters
++ cputune = {
++ "shares": 2048,
++ "period": 122000,
++ "quota": -1,
++ "global_period": 1000000,
++ "global_quota": -3,
++ "emulator_period": 1200000,
++ "emulator_quota": -10,
++ "iothread_period": 133000,
++ "iothread_quota": -1,
++ "vcpupin": {0: "1-4,^2", 1: "0,1", 2: "2,3", 3: "0,4"},
++ "emulatorpin": "1-3",
++ "iothreadpin": {1: "5-6", 2: "7-8"},
++ "vcpusched": [
++ {"scheduler": "fifo", "priority": 1, "vcpus": "0"},
++ {"scheduler": "fifo", "priotity": 2, "vcpus": "1"},
++ {"scheduler": "idle", "priotity": 3, "vcpus": "2"},
++ ],
++ "iothreadsched": [{"scheduler": "batch", "iothreads": "7"}],
++ "cachetune": {
++ "0-3": {
++ 0: {"level": 3, "type": "both", "size": 3},
++ 1: {"level": 3, "type": "both", "size": 3},
++ "monitor": {1: 3, "0-3": 3},
++ },
++ "4-5": {"monitor": {4: 3, 5: 2}},
++ },
++ "memorytune": {"0-2": {0: 60}, "3-4": {0: 50, 1: 70}},
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", cpu={"tuning": cputune}),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cputune").find("shares").text, "2048")
++ self.assertEqual(setxml.find("cputune").find("period").text, "122000")
++ self.assertEqual(setxml.find("cputune").find("quota").text, "-1")
++ self.assertEqual(setxml.find("cputune").find("global_period").text, "1000000")
++ self.assertEqual(setxml.find("cputune").find("global_quota").text, "-3")
++ self.assertEqual(setxml.find("cputune").find("emulator_period").text, "1200000")
++ self.assertEqual(setxml.find("cputune").find("emulator_quota").text, "-10")
++ self.assertEqual(setxml.find("cputune").find("iothread_period").text, "133000")
++ self.assertEqual(setxml.find("cputune").find("iothread_quota").text, "-1")
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='0']").attrib.get("cpuset"),
++ "1,3,4",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='1']").attrib.get("cpuset"),
++ "0,1",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='2']").attrib.get("cpuset"),
++ "2,3",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='3']").attrib.get("cpuset"),
++ "0,4",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("emulatorpin").attrib.get("cpuset"), "1,2,3"
++ )
++ self.assertEqual(
++ setxml.find("cputune")
++ .find("iothreadpin[@iothread='1']")
++ .attrib.get("cpuset"),
++ "5,6",
++ )
++ self.assertEqual(
++ setxml.find("cputune")
++ .find("iothreadpin[@iothread='2']")
++ .attrib.get("cpuset"),
++ "7,8",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("vcpusched[@vcpus='0']").attrib.get("priority"),
++ "1",
++ )
++ self.assertEqual(
++ setxml.find("cputune")
++ .find("vcpusched[@vcpus='0']")
++ .attrib.get("scheduler"),
++ "fifo",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("iothreadsched").attrib.get("iothreads"), "7"
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("iothreadsched").attrib.get("scheduler"),
++ "batch",
++ )
++ self.assertIsNotNone(setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']"))
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']"
++ ).attrib.get("type"),
++ "both",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='1']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertNotEqual(
++ setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='1']"),
++ None,
++ )
++ self.assertNotEqual(
++ setxml.find("./cputune/cachetune[@vcpus='4,5']").attrib.get("vcpus"), None
++ )
++ self.assertEqual(
++ setxml.find("./cputune/cachetune[@vcpus='4,5']/cache[@id='0']"), None
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='4,5']/monitor[@vcpus='4']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='4,5']/monitor[@vcpus='5']"
++ ).attrib.get("level"),
++ "2",
++ )
++ self.assertNotEqual(setxml.find("./cputune/memorytune[@vcpus='0,1,2']"), None)
++ self.assertEqual(
++ setxml.find(
++ "./cputune/memorytune[@vcpus='0,1,2']/node[@id='0']"
++ ).attrib.get("bandwidth"),
++ "60",
++ )
++ self.assertNotEqual(setxml.find("./cputune/memorytune[@vcpus='3,4']"), None)
++ self.assertEqual(
++ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='0']").attrib.get(
++ "bandwidth"
++ ),
++ "50",
++ )
++ self.assertEqual(
++ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='1']").attrib.get(
++ "bandwidth"
++ ),
++ "70",
++ )
++
++ # Update disks case
++ devattach_mock = MagicMock(return_value=0)
++ devdetach_mock = MagicMock(return_value=0)
++ domain_mock.attachDevice = devattach_mock
++ domain_mock.detachDevice = devdetach_mock
++ mock_chmod = MagicMock()
++ mock_run = MagicMock()
++ with patch.dict(
++ os.__dict__, {"chmod": mock_chmod, "makedirs": MagicMock()}
++ ): # pylint: disable=no-member
++ with patch.dict(
++ virt.__salt__, {"cmd.run": mock_run}
++ ): # pylint: disable=no-member
++ ret = virt.update(
++ "my_vm",
++ disk_profile="default",
++ disks=[
++ {
++ "name": "cddrive",
++ "device": "cdrom",
++ "source_file": None,
++ "model": "ide",
++ },
++ {"name": "added", "size": 2048, "iothreads": True},
++ ],
++ )
++ added_disk_path = os.path.join(
++ virt.__salt__["config.get"]("virt:images"), "my_vm_added.qcow2"
++ ) # pylint: disable=no-member
++ self.assertEqual(
++ mock_run.call_args[0][0],
+ 'qemu-img create -f qcow2 "{}" 2048M'.format(added_disk_path),
+ )
+ self.assertEqual(mock_chmod.call_args[0][0], added_disk_path)
+@@ -2170,6 +3192,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(devattach_mock.call_count, 2)
+ self.assertEqual(devdetach_mock.call_count, 2)
+
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ "threads", setxml.find("devices/disk[3]/driver").get("io")
++ )
++
+ # Update nics case
+ yaml_config = """
+ virt:
+@@ -2244,6 +3271,19 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+ self.assertEqual("vnc", setxml.find("devices/graphics").get("type"))
+
++ # Serial and console test case
++ self.assertEqual(
++ {
++ "definition": False,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("my_vm", serials=[{"type": "tcp"}], consoles=[{"type": "tcp"}]),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("devices/serial").attrib["type"], "pty")
++ self.assertEqual(setxml.find("devices/console").attrib["type"], "pty")
++
+ # Update with no diff case
+ pool_mock = MagicMock()
+ default_pool_desc = ""
+@@ -2644,48 +3684,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ /usr/share/old/OVMF_CODE.fd
+ /usr/share/old/OVMF_VARS.ms.fd
+
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+
+ """
+ domain_mock_boot = self.set_mock_vm("vm_with_boot_param", xml_boot)
+@@ -2722,71 +3720,909 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ )
+
+ self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("vm_with_boot_param", boot=uefi_boot_new),
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_boot_param", boot=uefi_boot_new),
++ )
++
++ setxml = ET.fromstring(define_mock_boot.call_args[0][0])
++ self.assertEqual(
++ setxml.find("os").find("loader").text, "/usr/share/new/OVMF_CODE.fd"
++ )
++ self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes")
++ self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash")
++ self.assertEqual(
++ setxml.find("os").find("nvram").attrib["template"],
++ "/usr/share/new/OVMF_VARS.ms.fd",
++ )
++
++ kernel_none = {
++ "kernel": None,
++ "initrd": None,
++ "cmdline": None,
++ }
++
++ uefi_none = {"loader": None, "nvram": None}
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_boot_param", boot=kernel_none),
++ )
++
++ setxml = ET.fromstring(define_mock_boot.call_args[0][0])
++ self.assertEqual(setxml.find("os").find("kernel"), None)
++ self.assertEqual(setxml.find("os").find("initrd"), None)
++ self.assertEqual(setxml.find("os").find("cmdline"), None)
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_boot_param", boot={"efi": False}),
++ )
++ setxml = ET.fromstring(define_mock_boot.call_args[0][0])
++ self.assertEqual(setxml.find("os").find("nvram"), None)
++ self.assertEqual(setxml.find("os").find("loader"), None)
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_boot_param", boot=uefi_none),
++ )
++
++ setxml = ET.fromstring(define_mock_boot.call_args[0][0])
++ self.assertEqual(setxml.find("os").find("loader"), None)
++ self.assertEqual(setxml.find("os").find("nvram"), None)
++
++ def test_update_existing_numatune_params(self):
++ """
++ Test virt.update() with existing numatune parameters.
++ """
++ xml_numatune = """
++
++ vm_with_numatune_param
++ 1048576
++ 1048576
++ 1048576
++ 1
++
++
++
++
++
++
++ hvm
++
++ restart
++
++ """
++ domain_mock = self.set_mock_vm("vm_with_numatune_param", xml_numatune)
++ domain_mock.OSType = MagicMock(return_value="hvm")
++ define_mock = MagicMock(return_value=True)
++ self.mock_conn.defineXML = define_mock
++
++ # test update existing numatune node
++ numatune = {
++ "memory": {"mode": "preferred", "nodeset": "0-5"},
++ "memnodes": {
++ 0: {"mode": "strict", "nodeset": "4"},
++ 3: {"mode": "preferred", "nodeset": "7"},
++ 4: {"mode": "strict", "nodeset": "6"},
++ },
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_numatune_param", numatune=numatune),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("numatune").find("memory").attrib.get("mode"), "preferred"
++ )
++
++ self.assertEqual(
++ setxml.find("numatune").find("memory").attrib.get("nodeset"),
++ ",".join([str(i) for i in range(0, 6)]),
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("mode"), "strict"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("nodeset"), "4"
++ )
++
++ self.assertEqual(setxml.find("./numatune/memnode/[@cellid='1']"), None)
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='3']").attrib.get("mode"),
++ "preferred",
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='3']").attrib.get("nodeset"), "7"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='4']").attrib.get("mode"), "strict"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='4']").attrib.get("nodeset"), "6"
++ )
++
++ self.assertEqual(setxml.find("./numatune/memnode/[@cellid='2']"), None)
++
++ numatune_mem_none = {
++ "memory": None,
++ "memnodes": {
++ 0: {"mode": "strict", "nodeset": "4"},
++ 3: {"mode": "preferred", "nodeset": "7"},
++ 4: {"mode": "strict", "nodeset": "6"},
++ },
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_numatune_param", numatune=numatune_mem_none),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("numatune").find("memory"), None)
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("mode"), "strict"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("nodeset"), "4"
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='3']").attrib.get("mode"),
++ "preferred",
++ )
++
++ self.assertEqual(
++ setxml.find("./numatune/memnode/[@cellid='3']").attrib.get("nodeset"), "7"
++ )
++
++ self.assertEqual(setxml.find("./numatune/memnode/[@cellid='2']"), None)
++
++ numatune_mnodes_none = {
++ "memory": {"mode": "preferred", "nodeset": "0-5"},
++ "memnodes": None,
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_numatune_param", numatune=numatune_mnodes_none),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("numatune").find("memory").attrib.get("mode"), "preferred"
++ )
++
++ self.assertEqual(
++ setxml.find("numatune").find("memory").attrib.get("nodeset"),
++ ",".join([str(i) for i in range(0, 6)]),
++ )
++
++ self.assertEqual(setxml.find("./numatune/memnode"), None)
++
++ numatune_without_change = {
++ "memory": {"mode": "strict", "nodeset": "0-5,6,7-11"},
++ "memnodes": {
++ 1: {"mode": "strict", "nodeset": "3"},
++ 3: {"mode": "preferred", "nodeset": "7"},
++ },
++ }
++
++ self.assertEqual(
++ {
++ "definition": False,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_numatune_param", numatune=numatune_without_change),
++ )
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update(
++ "vm_with_numatune_param", numatune={"memory": None, "memnodes": None}
++ ),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("numatune"), None)
++
++ def test_update_existing_cpu_params(self):
++ """
++ Test virt.update() with existing cpu-related parameters.
++ """
++ xml_with_existing_params = """
++
++ vm_with_boot_param
++ 1048576
++ 1048576
++ 6
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ core2duo
++ Intel
++
++
++
++
++
++
++
++
++
++
++
++
++ |
++
++
++
++
++
++
++
++ |
++
++
++
++ hvm
++
++
++ """
++ domain_mock = self.set_mock_vm(
++ "vm_with_existing_param", xml_with_existing_params
++ )
++ domain_mock.OSType = MagicMock(return_value="hvm")
++ define_mock = MagicMock(return_value=True)
++ self.mock_conn.defineXML = define_mock
++
++ # test update vcpu with existing attributes case
++ setvcpus_mock = MagicMock(return_value=0)
++ domain_mock.setVcpusFlags = setvcpus_mock
++
++ cpu_attr = {"placement": "static", "cpuset": "0-5", "current": 3, "maximum": 5}
++ self.assertEqual(
++ {
++ "definition": True,
++ "cpu": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_attr),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("vcpu").text, "5")
++ self.assertEqual(setxml.find("vcpu").attrib["placement"], "static")
++ self.assertEqual(
++ setxml.find("vcpu").attrib["cpuset"],
++ ",".join([str(i) for i in range(0, 6)]),
++ )
++ self.assertEqual(setxml.find("vcpu").attrib["current"], "3")
++
++ # test removing vcpu attribute
++ cpu_none = {"placement": "auto", "cpuset": None, "current": 2, "maximum": 5}
++ self.assertEqual(
++ {
++ "definition": True,
++ "cpu": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("vcpu").text, "5")
++ self.assertEqual(setxml.find("vcpu").attrib["placement"], "auto")
++ self.assertEqual(setxml.find("vcpu").attrib.get("cpuset"), None)
++ self.assertEqual(setxml.find("vcpu").attrib.get("current"), "2")
++
++ # test update individual vcpu with exisiting attributes
++ vcpus = {
++ "vcpus": {
++ "0": {"enabled": False, "hotpluggable": True, "order": 5},
++ "3": {"enabled": True, "hotpluggable": False, "order": 3},
++ "7": {"enabled": True, "hotpluggable": False},
++ }
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=vcpus),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["id"], "0")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["enabled"], "no")
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='0']").attrib["hotpluggable"], "yes"
++ )
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["order"], "5")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='3']").attrib["id"], "3")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='3']").attrib["enabled"], "yes")
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='3']").attrib["hotpluggable"], "no"
++ )
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='3']").attrib["order"], "3")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='7']").attrib["id"], "7")
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='7']").attrib["enabled"], "yes")
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='7']").attrib["hotpluggable"], "no"
++ )
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='7']").attrib.get("order"), None
++ )
++
++ # test removing vcpu element
++ ind_vcpu = {
++ "vcpus": {"3": {"enabled": True, "hotpluggable": False, "order": None}}
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=ind_vcpu),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']"), None)
++ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='3']").attrib["enabled"], "yes")
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='3']").attrib["hotpluggable"], "no"
++ )
++ self.assertEqual(
++ setxml.find("./vcpus/vcpu/[@id='3']").attrib.get("order"), None
++ )
++
++ # test removing vcpus element
++ vcpus_none = {"vcpus": None}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=vcpus_none),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("vcpus"), None)
++
++ # test removing cpu attrbutes
++ cpu_atr_none = {"match": None, "mode": None, "check": None}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_atr_none),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").attrib, {})
++
++ cpu_atr_mn = {"match": None}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_atr_mn),
++ )
++
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").attrib.get("match"), None)
++ self.assertEqual(setxml.find("cpu").attrib.get("mode"), "custom")
++ self.assertEqual(setxml.find("cpu").attrib.get("check"), "full")
++
++ # test update existing cpu model
++ cpu_model_none = {"model": None}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_model_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("model"), None)
++
++ cpu_model_atr_none = {
++ "model": {"name": "coresolo", "fallback": "forbid", "vendor_id": None}
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_model_atr_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("model").attrib.get("vendor_id"), None)
++ self.assertEqual(
++ setxml.find("cpu").find("model").attrib.get("fallback"), "forbid"
++ )
++ self.assertEqual(setxml.find("cpu").find("model").text, "coresolo")
++
++ cpu_model_atr = {
++ "model": {
++ "name": "coresolo",
++ "fallback": "forbid",
++ "vendor_id": "AuthenticAMD",
++ }
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_model_atr),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("cpu").find("model").attrib.get("fallback"), "forbid"
++ )
++ self.assertEqual(
++ setxml.find("cpu").find("model").attrib.get("vendor_id"), "AuthenticAMD"
++ )
++ self.assertEqual(setxml.find("cpu").find("model").text, "coresolo")
++
++ # test update existing cpu vendor
++ cpu_vendor = {"vendor": "AMD"}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_vendor),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("vendor").text, "AMD")
++
++ cpu_vendor_none = {"vendor": None}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_vendor_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("vendor"), None)
++
++ # test update exisiting cpu topology
++ cpu_topology = {"topology": {"sockets": 1, "cores": 12, "threads": 1}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_topology),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("sockets"), "1")
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("cores"), "12")
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("threads"), "1")
++
++ cpu_topology_atr_none = {
++ "topology": {"sockets": None, "cores": 12, "threads": 1}
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_topology_atr_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("cpu").find("topology").attrib.get("sockets"), None
++ )
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("cores"), "12")
++ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("threads"), "1")
++
++ cpu_topology_none = {"topology": None}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_topology_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("topology"), None)
++
++ # test update existing cache
++ cpu_cache = {"cache": {"mode": "passthrough", "level": 2}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_cache),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("cache").attrib.get("level"), "2")
++ self.assertEqual(
++ setxml.find("cpu").find("cache").attrib.get("mode"), "passthrough"
++ )
++
++ cpu_cache_atr_none = {"cache": {"mode": "passthrough", "level": None}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_cache_atr_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("cache").attrib.get("level"), None)
++ self.assertEqual(
++ setxml.find("cpu").find("cache").attrib.get("mode"), "passthrough"
++ )
++
++ cpu_cache_none = {"cache": None}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_cache_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cpu").find("cache"), None)
++
++ # test update existing feature
++ cpu_feature = {"features": {"lahf_lm": "require", "pcid": "optional"}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_feature),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("./cpu/feature[@name='pcid']").attrib.get("policy"), "optional"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/feature[@name='lahf_lm']").attrib.get("policy"),
++ "require",
++ )
++
++ cpu_feature_atr_none = {"features": {"pcid": "optional", "lahf_lm": "disable"}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_feature_atr_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("./cpu/feature[@name='lahf_lm']").attrib.get("policy"),
++ "disable",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/feature[@name='pcid']").attrib.get("policy"), "optional"
++ )
++
++ cpu_feature_none = {"features": {"lahf_lm": None, "pcid": None}}
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=cpu_feature_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("./cpu/feature"), None)
++
++ # test update existing numa cell
++ numa_cell = {
++ "numa": {
++ 0: {
++ "cpus": "0-6",
++ "memory": "512m",
++ "discard": True,
++ "distances": {0: 15, 1: 16, 2: 17, 3: 18},
++ },
++ 1: {
++ "cpus": "7-12",
++ "memory": "2g",
++ "discard": True,
++ "memAccess": "shared",
++ "distances": {0: 23, 1: 24, 2: 25, 3: 26},
++ },
++ }
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=numa_cell),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["cpus"],
++ ",".join([str(i) for i in range(0, 7)]),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["memory"],
++ str(512 * 1024 ** 2),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").get("unit"), "bytes",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["discard"], "yes"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='0']").attrib[
++ "value"
++ ],
++ "15",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='1']").attrib[
++ "value"
++ ],
++ "16",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='2']").attrib[
++ "value"
++ ],
++ "17",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='3']").attrib[
++ "value"
++ ],
++ "18",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["cpus"],
++ ",".join([str(i) for i in range(7, 13)]),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memory"],
++ str(int(2 * 1024 ** 3)),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").get("unit"), "bytes",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["discard"], "yes"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memAccess"], "shared"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='0']").attrib[
++ "value"
++ ],
++ "23",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='1']").attrib[
++ "value"
++ ],
++ "24",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='2']").attrib[
++ "value"
++ ],
++ "25",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='3']").attrib[
++ "value"
++ ],
++ "26",
++ )
++
++ numa_cell_atr_none = {
++ "numa": {
++ "0": {
++ "cpus": "0-6",
++ "memory": "512m",
++ "discard": False,
++ "distances": {0: 15, 2: 17, 3: 18},
++ },
++ "1": {
++ "cpus": "7-12",
++ "memory": "2g",
++ "discard": True,
++ "distances": {0: 23, 1: 24, 2: 25},
++ },
++ }
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_existing_param", cpu=numa_cell_atr_none),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["cpus"],
++ ",".join([str(i) for i in range(0, 7)]),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib["memory"],
++ str(512 * 1024 ** 2),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").get("unit"), "bytes",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']").attrib.get("discard"), "no"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='0']").attrib[
++ "value"
++ ],
++ "15",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='1']"), None
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='2']").attrib[
++ "value"
++ ],
++ "17",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='3']").attrib[
++ "value"
++ ],
++ "18",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["cpus"],
++ ",".join([str(i) for i in range(7, 13)]),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memory"],
++ str(int(2 * 1024 ** 3)),
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["discard"], "yes"
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='0']").attrib[
++ "value"
++ ],
++ "23",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='1']").attrib[
++ "value"
++ ],
++ "24",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='2']").attrib[
++ "value"
++ ],
++ "25",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='3']"), None
++ )
++
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["cpus"],
++ ",".join([str(i) for i in range(7, 13)]),
+ )
+-
+- setxml = ET.fromstring(define_mock_boot.call_args[0][0])
+ self.assertEqual(
+- setxml.find("os").find("loader").text, "/usr/share/new/OVMF_CODE.fd"
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memory"],
++ str(int(1024 ** 3 * 2)),
+ )
+- self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes")
+- self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash")
+ self.assertEqual(
+- setxml.find("os").find("nvram").attrib["template"],
+- "/usr/share/new/OVMF_VARS.ms.fd",
++ setxml.find("./cpu/numa/cell/[@id='1']").attrib["discard"], "yes"
+ )
+-
+- kernel_none = {
+- "kernel": None,
+- "initrd": None,
+- "cmdline": None,
+- }
+-
+- uefi_none = {"loader": None, "nvram": None}
+-
+ self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("vm_with_boot_param", boot=kernel_none),
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='0']").attrib[
++ "value"
++ ],
++ "23",
+ )
+-
+- setxml = ET.fromstring(define_mock_boot.call_args[0][0])
+- self.assertEqual(setxml.find("os").find("kernel"), None)
+- self.assertEqual(setxml.find("os").find("initrd"), None)
+- self.assertEqual(setxml.find("os").find("cmdline"), None)
+-
+ self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("vm_with_boot_param", boot={"efi": False}),
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='1']").attrib[
++ "value"
++ ],
++ "24",
+ )
+- setxml = ET.fromstring(define_mock_boot.call_args[0][0])
+- self.assertEqual(setxml.find("os").find("nvram"), None)
+- self.assertEqual(setxml.find("os").find("loader"), None)
+-
+ self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("vm_with_boot_param", boot=uefi_none),
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='2']").attrib[
++ "value"
++ ],
++ "25",
++ )
++ self.assertEqual(
++ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='3']"), None,
+ )
+-
+- setxml = ET.fromstring(define_mock_boot.call_args[0][0])
+- self.assertEqual(setxml.find("os").find("loader"), None)
+- self.assertEqual(setxml.find("os").find("nvram"), None)
+
+ def test_update_memtune_params(self):
+ """
+@@ -2965,6 +4801,517 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(setxml.find("currentMemory").text, str(int(1 * 1024 ** 2)))
+ self.assertEqual(setxml.find("memory").text, str(int(1 * 1024 ** 2)))
+
++ def test_update_exist_memorybacking_params(self):
++ """
++ Test virt.update() with memory backing parameters.
++ """
++ xml_with_memback_params = """
++
++ vm_with_memback_param
++ 1048576
++ 1048576
++ 1048576
++ 1
++
++
++
++
++
++
++
++
++
++
++
++
++
++ hvm
++
++ restart
++
++ """
++ domain_mock = self.set_mock_vm("vm_with_memback_param", xml_with_memback_params)
++ domain_mock.OSType = MagicMock(return_value="hvm")
++ define_mock = MagicMock(return_value=True)
++ self.mock_conn.defineXML = define_mock
++
++ # update memory backing case
++ mem_back_param = {
++ "hugepages": [
++ {"nodeset": "1-4,^3", "size": "1g"},
++ {"nodeset": "3", "size": "2g"},
++ ],
++ "nosharepages": None,
++ "locked": None,
++ "source": "anonymous",
++ "access": "private",
++ "allocation": "ondemand",
++ "discard": None,
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_memback_param", mem=mem_back_param),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertDictEqual(
++ {
++ p.get("nodeset"): {"size": p.get("size"), "unit": p.get("unit")}
++ for p in setxml.findall("memoryBacking/hugepages/page")
++ },
++ {
++ "1,2,4": {"size": str(1024 ** 3), "unit": "bytes"},
++ "3": {"size": str(2 * 1024 ** 3), "unit": "bytes"},
++ },
++ )
++ self.assertEqual(setxml.find("./memoryBacking/nosharepages"), None)
++ self.assertEqual(setxml.find("./memoryBacking/locked"), None)
++ self.assertEqual(
++ setxml.find("./memoryBacking/source").attrib["type"], "anonymous"
++ )
++ self.assertEqual(
++ setxml.find("./memoryBacking/access").attrib["mode"], "private"
++ )
++ self.assertEqual(
++ setxml.find("./memoryBacking/allocation").attrib["mode"], "ondemand"
++ )
++ self.assertEqual(setxml.find("./memoryBacking/discard"), None)
++
++ unchanged_page = {
++ "hugepages": [
++ {"size": "2m"},
++ {"nodeset": "1-4,^3", "size": "3g"},
++ {"nodeset": "3", "size": "1g"},
++ ],
++ }
++
++ self.assertEqual(
++ {
++ "definition": False,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("vm_with_memback_param", mem=unchanged_page),
++ )
++
++ def test_update_iothreads_params(self):
++ """
++ Test virt.update() with iothreads parameters.
++ """
++ xml_with_iothreads_params = """
++
++ xml_with_iothreads_params
++ 1048576
++ 1048576
++ 1048576
++ 1
++ 6
++
++ hvm
++
++
++ """
++ domain_mock = self.set_mock_vm(
++ "xml_with_iothreads_params", xml_with_iothreads_params
++ )
++ domain_mock.OSType = MagicMock(return_value="hvm")
++ define_mock = MagicMock(return_value=True)
++ self.mock_conn.defineXML = define_mock
++
++ # test updating existing iothreads
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("xml_with_iothreads_params", cpu={"iothreads": 7}),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("iothreads").text, "7")
++
++ def test_update_cputune_paramters(self):
++ """
++ Test virt.update() with cputune parameters.
++ """
++ xml_with_cputune_params = """
++
++ xml_with_cputune_params
++ 1048576
++ 1048576
++ 1048576
++ 1
++ 4
++
++ 2048
++ 1000000
++ -1
++ 1000000
++ -1
++ 1000000
++ -1
++ 1000000
++ -1
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ hvm
++
++
++ """
++ domain_mock = self.set_mock_vm(
++ "xml_with_cputune_params", xml_with_cputune_params
++ )
++ domain_mock.OSType = MagicMock(return_value="hvm")
++ define_mock = MagicMock(return_value=True)
++ self.mock_conn.defineXML = define_mock
++
++ # test updating existing cputune parameters
++ cputune = {
++ "shares": 1024,
++ "period": 5000,
++ "quota": -20,
++ "global_period": 4000,
++ "global_quota": -30,
++ "emulator_period": 3000,
++ "emulator_quota": -4,
++ "iothread_period": 7000,
++ "iothread_quota": -5,
++ "vcpupin": {0: "1-4,^2", 1: "0,1", 2: "2,3", 3: "0,4"},
++ "emulatorpin": "1-3",
++ "iothreadpin": {1: "5-6", 2: "7-8"},
++ "vcpusched": [
++ {"scheduler": "fifo", "priority": 1, "vcpus": "0"},
++ {"scheduler": "fifo", "priority": 2, "vcpus": "1"},
++ {"scheduler": "idle", "priority": 3, "vcpus": "2"},
++ ],
++ "iothreadsched": [
++ {"scheduler": "batch", "iothreads": "5-7", "priority": 1}
++ ],
++ "emulatorsched": {"scheduler": "rr", "priority": 2},
++ "cachetune": {
++ "0-3": {
++ 0: {"level": 3, "type": "both", "size": 3},
++ 1: {"level": 3, "type": "both", "size": 3},
++ "monitor": {1: 3, "0-3": 3},
++ },
++ "4-5": {"monitor": {4: 3, 5: 2}},
++ },
++ "memorytune": {"0-2": {0: 60}, "3-4": {0: 50, 1: 70}},
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("xml_with_cputune_params", cpu={"tuning": cputune}),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cputune").find("shares").text, "1024")
++ self.assertEqual(setxml.find("cputune").find("period").text, "5000")
++ self.assertEqual(setxml.find("cputune").find("quota").text, "-20")
++ self.assertEqual(setxml.find("cputune").find("global_period").text, "4000")
++ self.assertEqual(setxml.find("cputune").find("global_quota").text, "-30")
++ self.assertEqual(setxml.find("cputune").find("emulator_period").text, "3000")
++ self.assertEqual(setxml.find("cputune").find("emulator_quota").text, "-4")
++ self.assertEqual(setxml.find("cputune").find("iothread_period").text, "7000")
++ self.assertEqual(setxml.find("cputune").find("iothread_quota").text, "-5")
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='0']").attrib.get("cpuset"),
++ "1,3,4",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='1']").attrib.get("cpuset"),
++ "0,1",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='2']").attrib.get("cpuset"),
++ "2,3",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='3']").attrib.get("cpuset"),
++ "0,4",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("emulatorpin").attrib.get("cpuset"), "1,2,3"
++ )
++ self.assertEqual(
++ setxml.find("cputune")
++ .find("iothreadpin[@iothread='1']")
++ .attrib.get("cpuset"),
++ "5,6",
++ )
++ self.assertEqual(
++ setxml.find("cputune")
++ .find("iothreadpin[@iothread='2']")
++ .attrib.get("cpuset"),
++ "7,8",
++ )
++ self.assertDictEqual(
++ {
++ s.get("vcpus"): {
++ "scheduler": s.get("scheduler"),
++ "priority": s.get("priority"),
++ }
++ for s in setxml.findall("cputune/vcpusched")
++ },
++ {
++ "0": {"scheduler": "fifo", "priority": "1"},
++ "1": {"scheduler": "fifo", "priority": "2"},
++ "2": {"scheduler": "idle", "priority": "3"},
++ },
++ )
++ self.assertDictEqual(
++ {
++ s.get("iothreads"): {
++ "scheduler": s.get("scheduler"),
++ "priority": s.get("priority"),
++ }
++ for s in setxml.findall("cputune/iothreadsched")
++ },
++ {"5,6,7": {"scheduler": "batch", "priority": "1"}},
++ )
++ self.assertEqual(setxml.find("cputune/emulatorsched").get("scheduler"), "rr")
++ self.assertEqual(setxml.find("cputune/emulatorsched").get("priority"), "2")
++ self.assertIsNotNone(setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']"))
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']"
++ ).attrib.get("type"),
++ "both",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='1']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertNotEqual(
++ setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='1']"),
++ None,
++ )
++ self.assertNotEqual(
++ setxml.find("./cputune/cachetune[@vcpus='4,5']").attrib.get("vcpus"), None
++ )
++ self.assertEqual(
++ setxml.find("./cputune/cachetune[@vcpus='4,5']/cache[@id='0']"), None
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='4,5']/monitor[@vcpus='4']"
++ ).attrib.get("level"),
++ "3",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='4,5']/monitor[@vcpus='5']"
++ ).attrib.get("level"),
++ "2",
++ )
++ self.assertNotEqual(setxml.find("./cputune/memorytune[@vcpus='0,1,2']"), None)
++ self.assertEqual(
++ setxml.find(
++ "./cputune/memorytune[@vcpus='0,1,2']/node[@id='0']"
++ ).attrib.get("bandwidth"),
++ "60",
++ )
++ self.assertNotEqual(setxml.find("./cputune/memorytune[@vcpus='3,4']"), None)
++ self.assertEqual(
++ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='0']").attrib.get(
++ "bandwidth"
++ ),
++ "50",
++ )
++ self.assertEqual(
++ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='1']").attrib.get(
++ "bandwidth"
++ ),
++ "70",
++ )
++
++ # test removing cputune attributes and sub elements
++ cputune = {
++ "shares": None,
++ "period": 20000,
++ "quota": None,
++ "global_period": 5000,
++ "global_quota": None,
++ "emulator_period": 2000,
++ "emulator_quota": -4,
++ "iothread_period": None,
++ "iothread_quota": -5,
++ "vcpupin": {0: "1-4,^2", 2: "2,4"},
++ "emulatorpin": None,
++ "iothreadpin": {1: "5-6"},
++ "vcpusched": [{"scheduler": "idle", "priority": 5, "vcpus": "1"}],
++ "iothreadsched": None,
++ "cachetune": {
++ "0-3": {
++ 0: {"level": 4, "type": "data", "size": 7},
++ "monitor": {"1-2": 11},
++ },
++ },
++ "memorytune": {"3-4": {0: 37, 1: 73}},
++ }
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("xml_with_cputune_params", cpu={"tuning": cputune}),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cputune").find("shares"), None)
++ self.assertEqual(setxml.find("cputune").find("period").text, "20000")
++ self.assertEqual(setxml.find("cputune").find("quota"), None)
++ self.assertEqual(setxml.find("cputune").find("global_period").text, "5000")
++ self.assertEqual(setxml.find("cputune").find("global_quota"), None)
++ self.assertEqual(setxml.find("cputune").find("emulator_period").text, "2000")
++ self.assertEqual(setxml.find("cputune").find("emulator_quota").text, "-4")
++ self.assertEqual(setxml.find("cputune").find("iothread_period"), None)
++ self.assertEqual(setxml.find("cputune").find("iothread_quota").text, "-5")
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='0']").attrib.get("cpuset"),
++ "1,3,4",
++ )
++ self.assertEqual(setxml.find("cputune").find("vcpupin[@vcpu='1']"), None)
++ self.assertEqual(
++ setxml.find("cputune").find("vcpupin[@vcpu='2']").attrib.get("cpuset"),
++ "2,4",
++ )
++ self.assertEqual(setxml.find("cputune").find("vcpupin[@vcpu='3']"), None)
++ self.assertEqual(setxml.find("cputune").find("emulatorpin"), None)
++ self.assertEqual(
++ setxml.find("cputune")
++ .find("iothreadpin[@iothread='1']")
++ .attrib.get("cpuset"),
++ "5,6",
++ )
++ self.assertEqual(
++ setxml.find("cputune").find("iothreadpin[@iothread='2']"), None
++ )
++ self.assertDictEqual(
++ {
++ s.get("vcpus"): {
++ "scheduler": s.get("scheduler"),
++ "priority": s.get("priority"),
++ }
++ for s in setxml.findall("cputune/vcpusched")
++ },
++ {"1": {"scheduler": "idle", "priority": "5"}},
++ )
++ self.assertEqual(setxml.find("cputune").find("iothreadsched"), None)
++ self.assertIsNotNone(setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']"))
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']"
++ ).attrib.get("size"),
++ "7",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']"
++ ).attrib.get("level"),
++ "4",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']"
++ ).attrib.get("type"),
++ "data",
++ )
++ self.assertEqual(
++ setxml.find(
++ "./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='1,2']"
++ ).attrib.get("level"),
++ "11",
++ )
++ self.assertEqual(
++ setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='3,4']"),
++ None,
++ )
++ self.assertEqual(
++ setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='1']"), None
++ )
++ self.assertEqual(setxml.find("./cputune/cachetune[@vcpus='4,5']"), None)
++ self.assertEqual(setxml.find("./cputune/memorytune[@vcpus='0,1,2']"), None)
++ self.assertEqual(
++ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='0']").attrib.get(
++ "bandwidth"
++ ),
++ "37",
++ )
++ self.assertEqual(
++ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='1']").attrib.get(
++ "bandwidth"
++ ),
++ "73",
++ )
++
++ cputune_subelement = {
++ "vcpupin": None,
++ "iothreadpin": None,
++ "vcpusched": None,
++ "iothreadsched": None,
++ "cachetune": None,
++ "memorytune": None,
++ }
++
++ self.assertEqual(
++ {
++ "definition": True,
++ "disk": {"attached": [], "detached": [], "updated": []},
++ "interface": {"attached": [], "detached": []},
++ },
++ virt.update("xml_with_cputune_params", cpu={"tuning": cputune_subelement}),
++ )
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ self.assertEqual(setxml.find("cputune").find("vcpupin"), None)
++ self.assertEqual(setxml.find("cputune").find("iothreadpin"), None)
++ self.assertEqual(setxml.find("cputune").find("vcpusched"), None)
++ self.assertEqual(setxml.find("cputune").find("iothreadsched"), None)
++ self.assertEqual(setxml.find("cputune").find("cachetune"), None)
++ self.assertEqual(setxml.find("cputune").find("memorytune"), None)
++
+ def test_handle_unit(self):
+ """
+ Test regex function for handling units
+diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py
+index 1923ae5c0f..dadc6dd08e 100644
+--- a/tests/unit/states/test_virt.py
++++ b/tests/unit/states/test_virt.py
+@@ -327,6 +327,14 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ "type": "spice",
+ "listen": {"type": "address", "address": "192.168.0.1"},
+ }
++ serials = [
++ {"type": "tcp", "port": 22223, "protocol": "telnet"},
++ {"type": "pty"},
++ ]
++ consoles = [
++ {"type": "tcp", "port": 22223, "protocol": "telnet"},
++ {"type": "pty"},
++ ]
+ self.assertDictEqual(
+ virt.defined(
+ "myvm",
+@@ -345,10 +353,14 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ install=False,
+ pub_key="/path/to/key.pub",
+ priv_key="/path/to/key",
++ hypervisor_features={"kvm-hint-dedicated": True},
++ clock={"utc": True},
+ stop_on_reboot=True,
+ connection="someconnection",
+ username="libvirtuser",
+ password="supersecret",
++ serials=serials,
++ consoles=consoles,
+ ),
+ ret,
+ )
+@@ -367,14 +379,19 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ hypervisor="qemu",
+ seed=False,
+ boot=None,
++ numatune=None,
+ install=False,
+ start=False,
+ pub_key="/path/to/key.pub",
+ priv_key="/path/to/key",
++ hypervisor_features={"kvm-hint-dedicated": True},
++ clock={"utc": True},
+ stop_on_reboot=True,
+ connection="someconnection",
+ username="libvirtuser",
+ password="supersecret",
++ serials=serials,
++ consoles=consoles,
+ )
+
+ # Working update case when running
+@@ -484,7 +501,12 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ username=None,
+ password=None,
+ boot=None,
++ numatune=None,
+ test=False,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ )
+
+@@ -597,8 +619,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ username=None,
+ password=None,
+ boot=None,
++ numatune=None,
+ test=True,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ )
+
+@@ -633,8 +660,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ username=None,
+ password=None,
+ boot=None,
++ numatune=None,
+ test=True,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ )
+
+@@ -701,6 +733,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ os_type=None,
+ arch=None,
+ boot=None,
++ numatune=None,
+ disk=None,
+ disks=[{"name": "system", "image": "/path/to/img.qcow2"}],
+ nic=None,
+@@ -713,10 +746,14 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ pub_key=None,
+ priv_key=None,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
+ stop_on_reboot=False,
+ connection=None,
+ username=None,
+ password=None,
++ serials=None,
++ consoles=None,
+ )
+ start_mock.assert_called_with(
+ "myvm", connection=None, username=None, password=None
+@@ -797,15 +834,20 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ hypervisor="qemu",
+ seed=False,
+ boot=None,
++ numatune=None,
+ install=False,
+ start=False,
+ pub_key="/path/to/key.pub",
+ priv_key="/path/to/key",
+ boot_dev="network hd",
++ hypervisor_features=None,
++ clock=None,
+ stop_on_reboot=True,
+ connection="someconnection",
+ username="libvirtuser",
+ password="supersecret",
++ serials=None,
++ consoles=None,
+ )
+ start_mock.assert_called_with(
+ "myvm",
+@@ -946,8 +988,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ username=None,
+ password=None,
+ boot=None,
++ numatune=None,
+ test=False,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ )
+
+@@ -1067,8 +1114,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ username=None,
+ password=None,
+ boot=None,
++ numatune=None,
+ test=True,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ )
+ start_mock.assert_not_called()
+@@ -1105,8 +1157,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ username=None,
+ password=None,
+ boot=None,
++ numatune=None,
+ test=True,
+ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
+ stop_on_reboot=False,
+ )
+
+--
+2.29.2
+
+
diff --git a/open-suse-3002.2-virt-network-311.patch b/open-suse-3002.2-virt-network-311.patch
new file mode 100644
index 0000000..b655bd0
--- /dev/null
+++ b/open-suse-3002.2-virt-network-311.patch
@@ -0,0 +1,8842 @@
+From ff3273ffb5be499d14a0023b8b9f8baed133807b Mon Sep 17 00:00:00 2001
+From: Cedric Bosdonnat
+Date: Tue, 12 Jan 2021 11:28:24 +0100
+Subject: [PATCH] Open suse 3002.2 virt network (#311)
+
+* Bump to `pytest-salt-factories >= 0.120.0`
+
+* Switch to using the pytest-salt-factories loader mock support
+
+* Fix the new iothreads virtual disk parameter
+
+io_uring has recently been added as another IO policy on virtual disks.
+Keep the parameter opened for changes. Also add the optional iothread ID
+in case the user wants to pin a disk to some IO thread (and thus to a
+CPU).
+
+* Remove deprecated update parameter in virt.defined and virt.running
+
+* Fix indentation of Jinja instructions in libvirt_domain.jinja
+
+* Use more opt_attribute macro in libvirt_domain.jinja
+
+* Unify XML indentation in libvirt_domain.jinja
+
+* Move virt network generation tests to pytest
+
+* Extract XML space stripping function from virt module
+
+Stripping spaces and indentation from XML could also be useful in other
+places, moving to xmlutil to help reuse.
+
+* Fix link in virt state documentation
+
+* Extract the XML cleanup code from virt.pool_update
+
+The network_update code will be rather similar to the pool_update one.
+In order to share the XML tree cleanup for easier comparisons, create a
+helper function in the virt module.
+
+* virt: expose more properties in virt.network_define
+
+In order to let users define more types of virtual networks, expose more
+of the libvirt virtual network properties.
+
+* Remove useless code in virt pytest fixture
+
+* Add virt.network_update function
+
+In order to enhance the virt.network_defined state, a function to test
+if an update is needed and update the network is needed. This is done by
+the newly added virt.network_update function.
+
+* Convert the virt network state unit tests to pytest
+
+Converting these tests helped reducing the number of lines of code
+thanks to the pytest parametrize feature. This is also the occasion to
+split the big tests into smaller ones to report more meaningfull errors
+and make it more readable.
+
+* Let virt.network_update state change existing networks
+
+Instead of simply reporting existing networks, update them if needed
+like other states. Also bubble up the new properties from the
+virt.define() function.
+
+* Add virt.node_devices function
+
+For the user to be able to pass host devices through he needs to get a
+list of the devices that can be passed.
+
+* virt: add PCI and USB host devices support to virt init and update
+
+In quite a few cases it may be useful to pass a PCI or USB device from
+the host to the VM. Add support for this in the virt.init() and
+virt.update() functions.
+
+* Convert virt domain state unit tests to pytest
+
+While converting the virt domain-related states to pytest I realized
+the __opts__["test"] == False case was not handled in some of them.
+This commit also fixes the return code for virt.shutdown,
+virt.powered_off, virt.snapshot and virt.rebooted states. It also
+prevents the actual call to be issued in test mode.
+
+* Add host_devices to virt running and defined states
+
+Expose the new host_devices parameter to the virt.running and
+virt.defined states.
+
+* Convert virt _diff_nics() unit test to pytest
+
+* virt: better compare NICs of running VMs
+
+On a running guest, libvirt changes the XML definition of the network
+interfaces of type "network" to the type of the network (for instance
+bridge). In such a case the virt.update() function will find the two
+NICs different even if they may not be... so we need to try harder to
+compare.
+
+* virt: hostdev network fixes
+
+A network with hostdev forward mode has no bridge and no mac. So we need
+to handle this in a few places in the virt module.
+
+* virt: extract the live update code from the update function
+
+In order to help reusing the device changes computing code and avoid
+getting a giant virt.update(), move the live update code of it into a
+specific internal function.
+
+* virt: better handle comparison of hostdev NIC interfaces
+
+When a domain has a NIC of type network pointing to a network with hostdev
+forward, libvirt changes its running XML definition with a hostdev
+interface with a PCI address from those in the network.
+
+Handle this case to avoid useless interface detaching / attaching.
+
+* virt: better compare hostdev networks
+
+Libvirt adds the PCI addresses of the SR-IOV device virtual functions
+when only providing the physical function. Those need to be removed in
+order to avoid network changes for no reason in virt.network_update()
+
+* Add xmlutil function dumping a node into a string
+
+Co-authored-by: Pedro Algarvio
+---
+ changelog/59143.added | 1 +
+ requirements/pytest.txt | 2 +-
+ requirements/static/ci/py3.5/darwin.txt | 2 +-
+ requirements/static/ci/py3.5/freebsd.txt | 2 +-
+ requirements/static/ci/py3.5/linux.txt | 2 +-
+ requirements/static/ci/py3.5/windows.txt | 2 +-
+ requirements/static/ci/py3.6/darwin.txt | 2 +-
+ requirements/static/ci/py3.6/freebsd.txt | 2 +-
+ requirements/static/ci/py3.6/linux.txt | 2 +-
+ requirements/static/ci/py3.6/windows.txt | 2 +-
+ requirements/static/ci/py3.7/darwin.txt | 2 +-
+ requirements/static/ci/py3.7/freebsd.txt | 2 +-
+ requirements/static/ci/py3.7/linux.txt | 2 +-
+ requirements/static/ci/py3.7/windows.txt | 2 +-
+ requirements/static/ci/py3.8/darwin.txt | 2 +-
+ requirements/static/ci/py3.8/freebsd.txt | 2 +-
+ requirements/static/ci/py3.8/linux.txt | 2 +-
+ requirements/static/ci/py3.9/darwin.txt | 2 +-
+ requirements/static/ci/py3.9/freebsd.txt | 2 +-
+ requirements/static/ci/py3.9/linux.txt | 2 +-
+ salt/modules/virt.py | 1260 +++++++++---
+ salt/states/virt.py | 477 ++++-
+ salt/templates/virt/libvirt_domain.jinja | 646 ++++---
+ salt/templates/virt/libvirt_macros.jinja | 3 +
+ salt/templates/virt/libvirt_network.jinja | 98 +-
+ salt/utils/xmlutil.py | 29 +
+ tests/conftest.py | 2 +-
+ tests/pytests/functional/modules/test_opkg.py | 8 +-
+ tests/pytests/unit/beacons/test_sensehat.py | 8 +-
+ tests/pytests/unit/beacons/test_status.py | 8 +-
+ .../pytests/unit/modules/test_alternatives.py | 8 +-
+ .../pytests/unit/modules/test_ansiblegate.py | 13 +-
+ tests/pytests/unit/modules/test_archive.py | 8 +-
+ .../pytests/unit/modules/test_azurearm_dns.py | 8 +-
+ tests/pytests/unit/modules/test_nilrt_ip.py | 8 +-
+ tests/pytests/unit/modules/test_opkg.py | 8 +-
+ .../pytests/unit/modules/test_restartcheck.py | 8 +-
+ .../unit/modules/test_slackware_service.py | 12 +-
+ tests/pytests/unit/modules/test_swarm.py | 10 +-
+ tests/pytests/unit/modules/test_tls.py | 12 +-
+ tests/pytests/unit/modules/virt/conftest.py | 88 +-
+ .../pytests/unit/modules/virt/test_domain.py | 473 ++++-
+ .../pytests/unit/modules/virt/test_helpers.py | 25 +
+ tests/pytests/unit/modules/virt/test_host.py | 219 +++
+ .../pytests/unit/modules/virt/test_network.py | 450 +++++
+ tests/pytests/unit/output/test_highstate.py | 8 +-
+ .../pytests/unit/states/test_alternatives.py | 8 +-
+ tests/pytests/unit/states/test_ini_manage.py | 24 +-
+ tests/pytests/unit/states/virt/__init__.py | 0
+ tests/pytests/unit/states/virt/conftest.py | 36 +
+ tests/pytests/unit/states/virt/test_domain.py | 840 ++++++++
+ .../pytests/unit/states/virt/test_helpers.py | 99 +
+ .../pytests/unit/states/virt/test_network.py | 476 +++++
+ tests/pytests/unit/utils/test_xmlutil.py | 14 +
+ tests/unit/modules/test_linux_sysctl.py | 173 --
+ tests/unit/modules/test_virt.py | 137 +-
+ tests/unit/states/test_virt.py | 1703 +----------------
+ 57 files changed, 4689 insertions(+), 2757 deletions(-)
+ create mode 100644 changelog/59143.added
+ create mode 100644 salt/templates/virt/libvirt_macros.jinja
+ create mode 100644 tests/pytests/unit/modules/virt/test_host.py
+ create mode 100644 tests/pytests/unit/modules/virt/test_network.py
+ create mode 100644 tests/pytests/unit/states/virt/__init__.py
+ create mode 100644 tests/pytests/unit/states/virt/conftest.py
+ create mode 100644 tests/pytests/unit/states/virt/test_domain.py
+ create mode 100644 tests/pytests/unit/states/virt/test_helpers.py
+ create mode 100644 tests/pytests/unit/states/virt/test_network.py
+ delete mode 100644 tests/unit/modules/test_linux_sysctl.py
+
+diff --git a/changelog/59143.added b/changelog/59143.added
+new file mode 100644
+index 0000000000..802e925a53
+--- /dev/null
++++ b/changelog/59143.added
+@@ -0,0 +1 @@
++Add more network and PCI/USB host devices passthrough support to virt module and states
+diff --git a/requirements/pytest.txt b/requirements/pytest.txt
+index 96faa73c27..77d60767d1 100644
+--- a/requirements/pytest.txt
++++ b/requirements/pytest.txt
+@@ -2,6 +2,6 @@ mock >= 3.0.0
+ # PyTest
+ pytest >= 6.1.0
+ pytest-salt
+-pytest-salt-factories >= 0.93.0
++pytest-salt-factories >= 0.120.0
+ pytest-tempdir >= 2019.10.12
+ pytest-helpers-namespace >= 2019.1.8
+diff --git a/requirements/static/ci/py3.5/darwin.txt b/requirements/static/ci/py3.5/darwin.txt
+index acfb43b542..efaac38353 100644
+--- a/requirements/static/ci/py3.5/darwin.txt
++++ b/requirements/static/ci/py3.5/darwin.txt
+@@ -89,7 +89,7 @@ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.5/freebsd.txt b/requirements/static/ci/py3.5/freebsd.txt
+index 868cea5220..d4faa715c9 100644
+--- a/requirements/static/ci/py3.5/freebsd.txt
++++ b/requirements/static/ci/py3.5/freebsd.txt
+@@ -91,7 +91,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.5/linux.txt b/requirements/static/ci/py3.5/linux.txt
+index c6b57bf491..6b64d844db 100644
+--- a/requirements/static/ci/py3.5/linux.txt
++++ b/requirements/static/ci/py3.5/linux.txt
+@@ -184,7 +184,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.5/windows.txt b/requirements/static/ci/py3.5/windows.txt
+index 8646edac12..3de8e54de0 100644
+--- a/requirements/static/ci/py3.5/windows.txt
++++ b/requirements/static/ci/py3.5/windows.txt
+@@ -82,7 +82,7 @@ pymysql==0.9.3
+ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via packaging
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.6/darwin.txt b/requirements/static/ci/py3.6/darwin.txt
+index 223ae11a0a..cf560de09d 100644
+--- a/requirements/static/ci/py3.6/darwin.txt
++++ b/requirements/static/ci/py3.6/darwin.txt
+@@ -94,7 +94,7 @@ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.6/freebsd.txt b/requirements/static/ci/py3.6/freebsd.txt
+index 6493dd4c8f..13a7678376 100644
+--- a/requirements/static/ci/py3.6/freebsd.txt
++++ b/requirements/static/ci/py3.6/freebsd.txt
+@@ -96,7 +96,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.6/linux.txt b/requirements/static/ci/py3.6/linux.txt
+index 3317837a35..55800bfa25 100644
+--- a/requirements/static/ci/py3.6/linux.txt
++++ b/requirements/static/ci/py3.6/linux.txt
+@@ -188,7 +188,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.6/windows.txt b/requirements/static/ci/py3.6/windows.txt
+index eae87eadb1..325e6ec969 100644
+--- a/requirements/static/ci/py3.6/windows.txt
++++ b/requirements/static/ci/py3.6/windows.txt
+@@ -81,7 +81,7 @@ pymysql==0.9.3
+ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via packaging
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.7/darwin.txt b/requirements/static/ci/py3.7/darwin.txt
+index d7c43ab796..8411522975 100644
+--- a/requirements/static/ci/py3.7/darwin.txt
++++ b/requirements/static/ci/py3.7/darwin.txt
+@@ -92,7 +92,7 @@ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.7/freebsd.txt b/requirements/static/ci/py3.7/freebsd.txt
+index 8c7a7df48b..98c4c85dfe 100644
+--- a/requirements/static/ci/py3.7/freebsd.txt
++++ b/requirements/static/ci/py3.7/freebsd.txt
+@@ -94,7 +94,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.7/linux.txt b/requirements/static/ci/py3.7/linux.txt
+index 9c6a5139b2..c3490e6ba6 100644
+--- a/requirements/static/ci/py3.7/linux.txt
++++ b/requirements/static/ci/py3.7/linux.txt
+@@ -186,7 +186,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.7/windows.txt b/requirements/static/ci/py3.7/windows.txt
+index 7ca5bc9b49..53b5db2734 100644
+--- a/requirements/static/ci/py3.7/windows.txt
++++ b/requirements/static/ci/py3.7/windows.txt
+@@ -79,7 +79,7 @@ pymysql==0.9.3
+ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via packaging
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.8/darwin.txt b/requirements/static/ci/py3.8/darwin.txt
+index f410432e54..541fd4c2d6 100644
+--- a/requirements/static/ci/py3.8/darwin.txt
++++ b/requirements/static/ci/py3.8/darwin.txt
+@@ -91,7 +91,7 @@ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.8/freebsd.txt b/requirements/static/ci/py3.8/freebsd.txt
+index d0c20f466c..6030e259d1 100644
+--- a/requirements/static/ci/py3.8/freebsd.txt
++++ b/requirements/static/ci/py3.8/freebsd.txt
+@@ -93,7 +93,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.8/linux.txt b/requirements/static/ci/py3.8/linux.txt
+index 9ae7e8957e..da66159c3e 100644
+--- a/requirements/static/ci/py3.8/linux.txt
++++ b/requirements/static/ci/py3.8/linux.txt
+@@ -186,7 +186,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.9/darwin.txt b/requirements/static/ci/py3.9/darwin.txt
+index 3e6b92586d..50a3c95995 100644
+--- a/requirements/static/ci/py3.9/darwin.txt
++++ b/requirements/static/ci/py3.9/darwin.txt
+@@ -91,7 +91,7 @@ pyopenssl==19.0.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.9/freebsd.txt b/requirements/static/ci/py3.9/freebsd.txt
+index 48da272966..08e5e3c51e 100644
+--- a/requirements/static/ci/py3.9/freebsd.txt
++++ b/requirements/static/ci/py3.9/freebsd.txt
+@@ -93,7 +93,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/requirements/static/ci/py3.9/linux.txt b/requirements/static/ci/py3.9/linux.txt
+index ae6683ea03..d11c63ce7a 100644
+--- a/requirements/static/ci/py3.9/linux.txt
++++ b/requirements/static/ci/py3.9/linux.txt
+@@ -186,7 +186,7 @@ pyopenssl==19.1.0
+ pyparsing==2.4.5 # via junos-eznc, packaging
+ pyserial==3.4 # via junos-eznc, netmiko
+ pytest-helpers-namespace==2019.1.8
+-pytest-salt-factories==0.93.0
++pytest-salt-factories==0.120.0
+ pytest-salt==2020.1.27
+ pytest-tempdir==2019.10.12
+ pytest==6.1.1
+diff --git a/salt/modules/virt.py b/salt/modules/virt.py
+index b852f8175d..9f61983e8d 100644
+--- a/salt/modules/virt.py
++++ b/salt/modules/virt.py
+@@ -428,7 +428,8 @@ def _get_nics(dom):
+ Get domain network interfaces from a libvirt domain object.
+ """
+ nics = {}
+- doc = ElementTree.fromstring(dom.XMLDesc(0))
++ # Don't expose the active configuration since it may be changed by libvirt
++ doc = ElementTree.fromstring(dom.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE))
+ for iface_node in doc.findall("devices/interface"):
+ nic = {}
+ nic["type"] = iface_node.get("type")
+@@ -814,6 +815,7 @@ def _gen_xml(
+ serials=None,
+ consoles=None,
+ stop_on_reboot=False,
++ host_devices=None,
+ **kwargs
+ ):
+ """
+@@ -953,7 +955,8 @@ def _gen_xml(
+ "disk_bus": disk["model"],
+ "format": disk.get("format", "raw"),
+ "index": str(i),
+- "io": "threads" if disk.get("iothreads", False) else "native",
++ "io": disk.get("io", "native"),
++ "iothread": disk.get("iothread_id", None),
+ }
+ targets.append(disk_context["target_dev"])
+ if disk.get("source_file"):
+@@ -1001,6 +1004,44 @@ def _gen_xml(
+ context["disks"].append(disk_context)
+ context["nics"] = nicp
+
++ # Process host devices passthrough
++ hostdev_context = []
++ try:
++ for hostdev_name in host_devices or []:
++ hostdevice = conn.nodeDeviceLookupByName(hostdev_name)
++ doc = ElementTree.fromstring(hostdevice.XMLDesc())
++ if "pci" in hostdevice.listCaps():
++ hostdev_context.append(
++ {
++ "type": "pci",
++ "domain": "0x{:04x}".format(
++ int(doc.find("./capability[@type='pci']/domain").text)
++ ),
++ "bus": "0x{:02x}".format(
++ int(doc.find("./capability[@type='pci']/bus").text)
++ ),
++ "slot": "0x{:02x}".format(
++ int(doc.find("./capability[@type='pci']/slot").text)
++ ),
++ "function": "0x{}".format(
++ doc.find("./capability[@type='pci']/function").text
++ ),
++ }
++ )
++ elif "usb_device" in hostdevice.listCaps():
++ vendor_id = doc.find(".//vendor").get("id")
++ product_id = doc.find(".//product").get("id")
++ hostdev_context.append(
++ {"type": "usb", "vendor": vendor_id, "product": product_id}
++ )
++ # For the while we only handle pci and usb passthrough
++ except libvirt.libvirtError as err:
++ conn.close()
++ raise CommandExecutionError(
++ "Failed to get host devices: " + err.get_error_message()
++ )
++ context["hostdevs"] = hostdev_context
++
+ context["os_type"] = os_type
+ context["arch"] = arch
+ fn_ = "libvirt_domain.jinja"
+@@ -1044,23 +1085,75 @@ def _gen_vol_xml(
+ return template.render(**context)
+
+
+-def _gen_net_xml(name, bridge, forward, vport, tag=None, ip_configs=None):
++def _gen_net_xml(
++ name,
++ bridge,
++ forward,
++ vport,
++ tag=None,
++ ip_configs=None,
++ mtu=None,
++ domain=None,
++ nat=None,
++ interfaces=None,
++ addresses=None,
++ physical_function=None,
++ dns=None,
++):
+ """
+ Generate the XML string to define a libvirt network
+ """
++ if isinstance(vport, str):
++ vport_context = {"type": vport}
++ else:
++ vport_context = vport
++
++ if isinstance(tag, (str, int)):
++ tag_context = {"tags": [{"id": tag}]}
++ else:
++ tag_context = tag
++
++ addresses_context = []
++ if addresses:
++ matches = [
++ re.fullmatch(r"([0-9]+):([0-9A-Fa-f]+):([0-9A-Fa-f]+)\.([0-9])", addr)
++ for addr in addresses.lower().split(" ")
++ ]
++ addresses_context = [
++ {
++ "domain": m.group(1),
++ "bus": m.group(2),
++ "slot": m.group(3),
++ "function": m.group(4),
++ }
++ for m in matches
++ if m
++ ]
++
+ context = {
+ "name": name,
+ "bridge": bridge,
++ "mtu": mtu,
++ "domain": domain,
+ "forward": forward,
+- "vport": vport,
+- "tag": tag,
++ "nat": nat,
++ "interfaces": interfaces.split(" ") if interfaces else [],
++ "addresses": addresses_context,
++ "pf": physical_function,
++ "vport": vport_context,
++ "vlan": tag_context,
++ "dns": dns,
+ "ip_configs": [
+ {
+ "address": ipaddress.ip_network(config["cidr"]),
+ "dhcp_ranges": config.get("dhcp_ranges", []),
++ "hosts": config.get("hosts", {}),
++ "bootp": config.get("bootp", {}),
++ "tftp": config.get("tftp"),
+ }
+ for config in ip_configs or []
+ ],
++ "yesno": lambda v: "yes" if v else "no",
+ }
+ fn_ = "libvirt_network.jinja"
+ try:
+@@ -1813,6 +1906,7 @@ def init(
+ serials=None,
+ consoles=None,
+ stop_on_reboot=False,
++ host_devices=None,
+ **kwargs
+ ):
+ """
+@@ -2143,6 +2237,13 @@ def init(
+
+ .. versionadded:: Aluminium
+
++ :param host_devices:
++ List of host devices to passthrough to the guest.
++ The value is a list of device names as provided by the :py:func:`~salt.modules.virt.node_devices` function.
++ (Default: ``None``)
++
++ .. versionadded:: Aluminium
++
+ .. _init-cpu-def:
+
+ .. rubric:: cpu parameters definition
+@@ -2485,9 +2586,17 @@ def init(
+ hostname_property: virt:hostname
+ sparse_volume: True
+
+- iothreads
+- When ``True`` dedicated threads will be used for the I/O of the disk.
+- (Default: ``False``)
++ io
++ I/O control policy. String value amongst ``native``, ``threads`` and ``io_uring``.
++ (Default: ``native``)
++
++ ..versionadded:: Aluminium
++
++ iothread_id
++ I/O thread id to assign the disk to.
++ (Default: none assigned)
++
++ ..versionadded:: Aluminium
+
+ .. _init-graphics-def:
+
+@@ -2706,6 +2815,7 @@ def init(
+ serials,
+ consoles,
+ stop_on_reboot,
++ host_devices,
+ **kwargs
+ )
+ log.debug("New virtual machine definition: %s", vm_xml)
+@@ -2764,10 +2874,20 @@ def _nics_equal(nic1, nic2):
+ """
+ Filter out elements to ignore when comparing nics
+ """
++ source_node = nic.find("source")
++ source_attrib = source_node.attrib if source_node is not None else {}
++ source_type = "network" if "network" in source_attrib else nic.attrib["type"]
++
++ source_getters = {
++ "network": lambda n: n.get("network"),
++ "bridge": lambda n: n.get("bridge"),
++ "direct": lambda n: n.get("dev"),
++ "hostdev": lambda n: _format_pci_address(n.find("address")),
++ }
+ return {
+- "type": nic.attrib["type"],
+- "source": nic.find("source").attrib[nic.attrib["type"]]
+- if nic.find("source") is not None
++ "type": source_type,
++ "source": source_getters[source_type](source_node)
++ if source_node is not None
+ else None,
+ "model": nic.find("model").attrib["type"]
+ if nic.find("model") is not None
+@@ -2819,6 +2939,32 @@ def _graphics_equal(gfx1, gfx2):
+ )
+
+
++def _hostdevs_equal(dev1, dev2):
++ """
++ Test if two hostdevs devices should be considered the same device
++ """
++
++ def _filter_hostdevs(dev):
++ """
++ When the domain is running, the hostdevs element may contain additional properties.
++ This function will only keep the ones we care about
++ """
++ type_ = dev.get("type")
++ definition = {
++ "type": type_,
++ }
++ if type_ == "pci":
++ address_node = dev.find("./source/address")
++ for attr in ["domain", "bus", "slot", "function"]:
++ definition[attr] = address_node.get(attr)
++ elif type_ == "usb":
++ for attr in ["vendor", "product"]:
++ definition[attr] = dev.find("./source/" + attr).get("id")
++ return definition
++
++ return _filter_hostdevs(dev1) == _filter_hostdevs(dev2)
++
++
+ def _diff_lists(old, new, comparator):
+ """
+ Compare lists to extract the changes
+@@ -2919,6 +3065,16 @@ def _diff_graphics_lists(old, new):
+ return _diff_lists(old, new, _graphics_equal)
+
+
++def _diff_hostdev_lists(old, new):
++ """
++ Compare hostdev devices definitions to extract the changes
++
++ :param old: list of ElementTree nodes representing the old hostdev devices
++ :param new: list of ElementTree nodes representing the new hostdev devices
++ """
++ return _diff_lists(old, new, _hostdevs_equal)
++
++
+ def _expand_cpuset(cpuset):
+ """
+ Expand the libvirt cpuset and nodeset values into a list of cpu/node IDs
+@@ -3014,6 +3170,218 @@ def _diff_console_list(old, new):
+ return _diff_lists(old, new, _serial_or_concole_equal)
+
+
++def _format_pci_address(node):
++ return "{}:{}:{}.{}".format(
++ node.get("domain").replace("0x", ""),
++ node.get("bus").replace("0x", ""),
++ node.get("slot").replace("0x", ""),
++ node.get("function").replace("0x", ""),
++ )
++
++
++def _almost_equal(current, new):
++ """
++ return True if the parameters are numbers that are almost
++ """
++ if current is None or new is None:
++ return False
++ return abs(current - new) / current < 1e-03
++
++
++def _compute_device_changes(old_xml, new_xml, to_skip):
++ """
++ Compute the device changes between two domain XML definitions.
++ """
++ devices_node = old_xml.find("devices")
++ changes = {}
++ for dev_type in to_skip:
++ changes[dev_type] = {}
++ if not to_skip[dev_type]:
++ old = devices_node.findall(dev_type)
++ new = new_xml.findall("devices/{}".format(dev_type))
++ changes[dev_type] = globals()["_diff_{}_lists".format(dev_type)](old, new)
++ return changes
++
++
++def _get_pci_addresses(node):
++ """
++ Get all the pci addresses in the node in 0000:00:00.0 form
++ """
++ return {_format_pci_address(address) for address in node.findall(".//address")}
++
++
++def _correct_networks(conn, desc):
++ """
++ Adjust the interface devices matching existing networks.
++ Returns the network interfaces XML definition as string mapped to the new device node.
++ """
++ networks = [ElementTree.fromstring(net.XMLDesc()) for net in conn.listAllNetworks()]
++ nics = desc.findall("devices/interface")
++ device_map = {}
++ for nic in nics:
++ if nic.get("type") == "hostdev":
++ # Do we have a network matching this NIC PCI address?
++ addr = _get_pci_addresses(nic.find("source"))
++ matching_nets = [
++ net
++ for net in networks
++ if net.find("forward").get("mode") == "hostdev"
++ and addr & _get_pci_addresses(net)
++ ]
++ if matching_nets:
++ # We need to store the XML before modifying it
++ # since libvirt needs it to detach the device
++ old_xml = ElementTree.tostring(nic)
++ nic.set("type", "network")
++ nic.find("source").set("network", matching_nets[0].find("name").text)
++ device_map[nic] = old_xml
++ return device_map
++
++
++def _update_live(domain, new_desc, mem, cpu, old_mem, old_cpu, to_skip, test):
++ """
++ Perform the live update of a domain.
++ """
++ status = {}
++ errors = []
++
++ if not domain.isActive():
++ return status, errors
++
++ # Do the live changes now that we know the definition has been properly set
++ # From that point on, failures are not blocking to try to live update as much
++ # as possible.
++ commands = []
++ if cpu and (isinstance(cpu, int) or isinstance(cpu, dict) and cpu.get("maximum")):
++ new_cpu = cpu.get("maximum") if isinstance(cpu, dict) else cpu
++ if old_cpu != new_cpu and new_cpu is not None:
++ commands.append(
++ {
++ "device": "cpu",
++ "cmd": "setVcpusFlags",
++ "args": [new_cpu, libvirt.VIR_DOMAIN_AFFECT_LIVE],
++ }
++ )
++ if mem:
++ if isinstance(mem, dict):
++ # setMemoryFlags takes memory amount in KiB
++ new_mem = (
++ int(_handle_unit(mem.get("current")) / 1024)
++ if "current" in mem
++ else None
++ )
++ elif isinstance(mem, int):
++ new_mem = int(mem * 1024)
++
++ if not _almost_equal(old_mem, new_mem) and new_mem is not None:
++ commands.append(
++ {
++ "device": "mem",
++ "cmd": "setMemoryFlags",
++ "args": [new_mem, libvirt.VIR_DOMAIN_AFFECT_LIVE],
++ }
++ )
++
++ # Compute the changes with the live definition
++ old_desc = ElementTree.fromstring(domain.XMLDesc(0))
++ changed_devices = {"interface": _correct_networks(domain.connect(), old_desc)}
++ changes = _compute_device_changes(old_desc, new_desc, to_skip)
++
++ # Look for removable device source changes
++ removable_changes = []
++ new_disks = []
++ for new_disk in changes["disk"].get("new", []):
++ device = new_disk.get("device", "disk")
++ if device not in ["cdrom", "floppy"]:
++ new_disks.append(new_disk)
++ continue
++
++ target_dev = new_disk.find("target").get("dev")
++ matching = [
++ old_disk
++ for old_disk in changes["disk"].get("deleted", [])
++ if old_disk.get("device", "disk") == device
++ and old_disk.find("target").get("dev") == target_dev
++ ]
++ if not matching:
++ new_disks.append(new_disk)
++ else:
++ # libvirt needs to keep the XML exactly as it was before
++ updated_disk = matching[0]
++ changes["disk"]["deleted"].remove(updated_disk)
++ removable_changes.append(updated_disk)
++ source_node = updated_disk.find("source")
++ new_source_node = new_disk.find("source")
++ source_file = (
++ new_source_node.get("file") if new_source_node is not None else None
++ )
++
++ updated_disk.set("type", "file")
++ # Detaching device
++ if source_node is not None:
++ updated_disk.remove(source_node)
++
++ # Attaching device
++ if source_file:
++ ElementTree.SubElement(
++ updated_disk, "source", attrib={"file": source_file}
++ )
++
++ changes["disk"]["new"] = new_disks
++
++ for dev_type in ["disk", "interface", "hostdev"]:
++ for added in changes[dev_type].get("new", []):
++ commands.append(
++ {
++ "device": dev_type,
++ "cmd": "attachDevice",
++ "args": [xmlutil.element_to_str(added)],
++ }
++ )
++
++ for removed in changes[dev_type].get("deleted", []):
++ removed_def = changed_devices.get(dev_type, {}).get(
++ removed, ElementTree.tostring(removed)
++ )
++ commands.append(
++ {
++ "device": dev_type,
++ "cmd": "detachDevice",
++ "args": [salt.utils.stringutils.to_str(removed_def)],
++ }
++ )
++
++ for updated_disk in removable_changes:
++ commands.append(
++ {
++ "device": "disk",
++ "cmd": "updateDeviceFlags",
++ "args": [xmlutil.element_to_str(updated_disk)],
++ }
++ )
++
++ for cmd in commands:
++ try:
++ ret = 0 if test else getattr(domain, cmd["cmd"])(*cmd["args"])
++ device_type = cmd["device"]
++ if device_type in ["cpu", "mem"]:
++ status[device_type] = not ret
++ else:
++ actions = {
++ "attachDevice": "attached",
++ "detachDevice": "detached",
++ "updateDeviceFlags": "updated",
++ }
++ device_status = status.setdefault(device_type, {})
++ cmd_status = device_status.setdefault(actions[cmd["cmd"]], [])
++ cmd_status.append(cmd["args"][0])
++
++ except libvirt.libvirtError as err:
++ errors.append(str(err))
++
++ return status, errors
++
++
+ def update(
+ name,
+ cpu=0,
+@@ -3033,6 +3401,7 @@ def update(
+ serials=None,
+ consoles=None,
+ stop_on_reboot=False,
++ host_devices=None,
+ **kwargs
+ ):
+ """
+@@ -3220,6 +3589,13 @@ def update(
+ hpet:
+ present: False
+
++ :param host_devices:
++ List of host devices to passthrough to the guest.
++ The value is a list of device names as provided by the :py:func:`~salt.modules.virt.node_devices` function.
++ (Default: ``None``)
++
++ .. versionadded:: Aluminium
++
+ :return:
+
+ Returns a dictionary indicating the status of what has been done. It is structured in
+@@ -3254,7 +3630,7 @@ def update(
+ }
+ conn = __get_conn(**kwargs)
+ domain = _get_domain(conn, name)
+- desc = ElementTree.fromstring(domain.XMLDesc(0))
++ desc = ElementTree.fromstring(domain.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE))
+ need_update = False
+
+ # Compute the XML to get the disks, interfaces and graphics
+@@ -3283,6 +3659,7 @@ def update(
+ serial=serials,
+ consoles=consoles,
+ stop_on_reboot=stop_on_reboot,
++ host_devices=host_devices,
+ **kwargs
+ )
+ )
+@@ -3326,11 +3703,6 @@ def update(
+ old_mem = int(_get_with_unit(desc.find("memory")) / 1024)
+ old_cpu = int(desc.find("./vcpu").text)
+
+- def _almost_equal(current, new):
+- if current is None or new is None:
+- return False
+- return abs(current - new) / current < 1e-03
+-
+ def _yesno_attribute(path, xpath, attr_name, ignored=None):
+ return xmlutil.attribute(
+ path, xpath, attr_name, ignored, lambda v: "yes" if v else "no"
+@@ -3669,26 +4041,24 @@ def update(
+
+ # Update the XML definition with the new disks and diff changes
+ devices_node = desc.find("devices")
+- parameters = {
+- "disk": ["disks", "disk_profile"],
+- "interface": ["interfaces", "nic_profile"],
+- "graphics": ["graphics"],
+- "serial": ["serial"],
+- "console": ["console"],
++ func_locals = locals()
++
++ def _skip_update(names):
++ return all(func_locals.get(n) is None for n in names)
++
++ to_skip = {
++ "disk": _skip_update(["disks", "disk_profile"]),
++ "interface": _skip_update(["interfaces", "nic_profile"]),
++ "graphics": _skip_update(["graphics"]),
++ "serial": _skip_update(["serial"]),
++ "console": _skip_update(["console"]),
++ "hostdev": _skip_update(["host_devices"]),
+ }
+- changes = {}
+- for dev_type in parameters:
+- changes[dev_type] = {}
+- func_locals = locals()
+- if [
+- param
+- for param in parameters[dev_type]
+- if func_locals.get(param, None) is not None
+- ]:
++ changes = _compute_device_changes(desc, new_desc, to_skip)
++ for dev_type in changes:
++ if not to_skip[dev_type]:
+ old = devices_node.findall(dev_type)
+- new = new_desc.findall("devices/{}".format(dev_type))
+- changes[dev_type] = globals()["_diff_{}_lists".format(dev_type)](old, new)
+- if changes[dev_type]["deleted"] or changes[dev_type]["new"]:
++ if changes[dev_type].get("deleted") or changes[dev_type].get("new"):
+ for item in old:
+ devices_node.remove(item)
+ devices_node.extend(changes[dev_type]["sorted"])
+@@ -3713,151 +4083,22 @@ def update(
+ elif item in changes["disk"]["new"] and not source_file:
+ _disk_volume_create(conn, all_disks[idx])
+ if not test:
+- xml_desc = ElementTree.tostring(desc)
++ xml_desc = xmlutil.element_to_str(desc)
+ log.debug("Update virtual machine definition: %s", xml_desc)
+- conn.defineXML(salt.utils.stringutils.to_str(xml_desc))
++ conn.defineXML(xml_desc)
+ status["definition"] = True
+ except libvirt.libvirtError as err:
+ conn.close()
+ raise err
+
+- # Do the live changes now that we know the definition has been properly set
+- # From that point on, failures are not blocking to try to live update as much
+- # as possible.
+- commands = []
+- removable_changes = []
+- if domain.isActive() and live:
+- if cpu and (
+- isinstance(cpu, int) or isinstance(cpu, dict) and cpu.get("maximum")
+- ):
+- new_cpu = cpu.get("maximum") if isinstance(cpu, dict) else cpu
+- if old_cpu != new_cpu and new_cpu is not None:
+- commands.append(
+- {
+- "device": "cpu",
+- "cmd": "setVcpusFlags",
+- "args": [new_cpu, libvirt.VIR_DOMAIN_AFFECT_LIVE],
+- }
+- )
+- if mem:
+- if isinstance(mem, dict):
+- # setMemoryFlags takes memory amount in KiB
+- new_mem = (
+- int(_handle_unit(mem.get("current")) / 1024)
+- if "current" in mem
+- else None
+- )
+- elif isinstance(mem, int):
+- new_mem = int(mem * 1024)
+-
+- if not _almost_equal(old_mem, new_mem) and new_mem is not None:
+- commands.append(
+- {
+- "device": "mem",
+- "cmd": "setMemoryFlags",
+- "args": [new_mem, libvirt.VIR_DOMAIN_AFFECT_LIVE],
+- }
+- )
+-
+- # Look for removable device source changes
+- new_disks = []
+- for new_disk in changes["disk"].get("new", []):
+- device = new_disk.get("device", "disk")
+- if device not in ["cdrom", "floppy"]:
+- new_disks.append(new_disk)
+- continue
+-
+- target_dev = new_disk.find("target").get("dev")
+- matching = [
+- old_disk
+- for old_disk in changes["disk"].get("deleted", [])
+- if old_disk.get("device", "disk") == device
+- and old_disk.find("target").get("dev") == target_dev
+- ]
+- if not matching:
+- new_disks.append(new_disk)
+- else:
+- # libvirt needs to keep the XML exactly as it was before
+- updated_disk = matching[0]
+- changes["disk"]["deleted"].remove(updated_disk)
+- removable_changes.append(updated_disk)
+- source_node = updated_disk.find("source")
+- new_source_node = new_disk.find("source")
+- source_file = (
+- new_source_node.get("file")
+- if new_source_node is not None
+- else None
+- )
+-
+- updated_disk.set("type", "file")
+- # Detaching device
+- if source_node is not None:
+- updated_disk.remove(source_node)
+-
+- # Attaching device
+- if source_file:
+- ElementTree.SubElement(updated_disk, "source", file=source_file)
+-
+- changes["disk"]["new"] = new_disks
+-
+- for dev_type in ["disk", "interface"]:
+- for added in changes[dev_type].get("new", []):
+- commands.append(
+- {
+- "device": dev_type,
+- "cmd": "attachDevice",
+- "args": [
+- salt.utils.stringutils.to_str(
+- ElementTree.tostring(added)
+- )
+- ],
+- }
+- )
+-
+- for removed in changes[dev_type].get("deleted", []):
+- commands.append(
+- {
+- "device": dev_type,
+- "cmd": "detachDevice",
+- "args": [
+- salt.utils.stringutils.to_str(
+- ElementTree.tostring(removed)
+- )
+- ],
+- }
+- )
+-
+- for updated_disk in removable_changes:
+- commands.append(
+- {
+- "device": "disk",
+- "cmd": "updateDeviceFlags",
+- "args": [
+- salt.utils.stringutils.to_str(
+- ElementTree.tostring(updated_disk)
+- )
+- ],
+- }
+- )
+-
+- for cmd in commands:
+- try:
+- ret = getattr(domain, cmd["cmd"])(*cmd["args"]) if not test else 0
+- device_type = cmd["device"]
+- if device_type in ["cpu", "mem"]:
+- status[device_type] = not bool(ret)
+- else:
+- actions = {
+- "attachDevice": "attached",
+- "detachDevice": "detached",
+- "updateDeviceFlags": "updated",
+- }
+- status[device_type][actions[cmd["cmd"]]].append(cmd["args"][0])
+-
+- except libvirt.libvirtError as err:
+- if "errors" not in status:
+- status["errors"] = []
+- status["errors"].append(str(err))
++ if live:
++ live_status, errors = _update_live(
++ domain, new_desc, mem, cpu, old_mem, old_cpu, to_skip, test
++ )
++ status.update(live_status)
++ if errors:
++ status_errors = status.setdefault("errors", [])
++ status_errors += errors
+
+ conn.close()
+ return status
+@@ -4107,6 +4348,121 @@ def node_info(**kwargs):
+ return info
+
+
++def _node_devices(conn):
++ """
++ List the host available devices, using an established connection.
++
++ :param conn: the libvirt connection handle to use.
++
++ .. versionadded:: Aluminium
++ """
++ devices = conn.listAllDevices()
++
++ devices_infos = []
++ for dev in devices:
++ root = ElementTree.fromstring(dev.XMLDesc())
++
++ # Only list PCI and USB devices that can be passed through as well as NICs
++ if not set(dev.listCaps()) & {"pci", "usb_device", "net"}:
++ continue
++
++ infos = {
++ "caps": " ".join(dev.listCaps()),
++ }
++
++ if "net" in dev.listCaps():
++ parent = root.find(".//parent").text
++ # Don't show, lo, dummies and libvirt-created NICs
++ if parent == "computer":
++ continue
++ infos.update(
++ {
++ "name": root.find(".//interface").text,
++ "address": root.find(".//address").text,
++ "device name": parent,
++ "state": root.find(".//link").get("state"),
++ }
++ )
++ devices_infos.append(infos)
++ continue
++
++ vendor_node = root.find(".//vendor")
++ vendor_id = vendor_node.get("id").lower()
++ product_node = root.find(".//product")
++ product_id = product_node.get("id").lower()
++ infos.update(
++ {"name": dev.name(), "vendor_id": vendor_id, "product_id": product_id}
++ )
++
++ # Vendor or product display name may not be set
++ if vendor_node.text:
++ infos["vendor"] = vendor_node.text
++ if product_node.text:
++ infos["product"] = product_node.text
++
++ if "pci" in dev.listCaps():
++ infos["address"] = "{:04x}:{:02x}:{:02x}.{}".format(
++ int(root.find(".//domain").text),
++ int(root.find(".//bus").text),
++ int(root.find(".//slot").text),
++ root.find(".//function").text,
++ )
++ class_node = root.find(".//class")
++ if class_node is not None:
++ infos["PCI class"] = class_node.text
++
++ # Get the list of Virtual Functions if any
++ vf_addresses = [
++ _format_pci_address(vf)
++ for vf in root.findall(
++ "./capability[@type='pci']/capability[@type='virt_functions']/address"
++ )
++ ]
++ if vf_addresses:
++ infos["virtual functions"] = vf_addresses
++
++ # Get the Physical Function if any
++ pf = root.find(
++ "./capability[@type='pci']/capability[@type='phys_function']/address"
++ )
++ if pf is not None:
++ infos["physical function"] = _format_pci_address(pf)
++ elif "usb_device" in dev.listCaps():
++ infos["address"] = "{:03}:{:03}".format(
++ int(root.find(".//bus").text), int(root.find(".//device").text)
++ )
++
++ # Don't list the pci bridges and USB hosts from the linux foundation
++ linux_usb_host = vendor_id == "0x1d6b" and product_id in [
++ "0x0001",
++ "0x0002",
++ "0x0003",
++ ]
++ if (
++ root.find(".//capability[@type='pci-bridge']") is None
++ and not linux_usb_host
++ ):
++ devices_infos.append(infos)
++
++ return devices_infos
++
++
++def node_devices(**kwargs):
++ """
++ List the host available devices.
++
++ :param connection: libvirt connection URI, overriding defaults
++ :param username: username to connect with, overriding defaults
++ :param password: password to connect with, overriding defaults
++
++ .. versionadded:: Aluminium
++ """
++ conn = __get_conn(**kwargs)
++ devs = _node_devices(conn)
++ conn.close()
++ return devs
++
++
+ def get_nics(vm_, **kwargs):
+ """
+ Return info about the network interfaces of a named vm
+@@ -5791,9 +6147,7 @@ def snapshot(domain, name=None, suffix=None, **kwargs):
+ n_name.text = name
+
+ conn = __get_conn(**kwargs)
+- _get_domain(conn, domain).snapshotCreateXML(
+- salt.utils.stringutils.to_str(ElementTree.tostring(doc))
+- )
++ _get_domain(conn, domain).snapshotCreateXML(xmlutil.element_to_str(doc))
+ conn.close()
+
+ return {"name": name}
+@@ -6464,10 +6818,8 @@ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs):
+ conn = __get_conn(**kwargs)
+ caps = ElementTree.fromstring(conn.getCapabilities())
+ cpu = caps.find("host/cpu")
+- log.debug(
+- "Host CPU model definition: %s",
+- salt.utils.stringutils.to_str(ElementTree.tostring(cpu)),
+- )
++ host_cpu_def = xmlutil.element_to_str(cpu)
++ log.debug("Host CPU model definition: %s", host_cpu_def)
+
+ flags = 0
+ if migratable:
+@@ -6482,11 +6834,7 @@ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs):
+ # This one is only in 1.1.3+
+ flags += libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+
+- cpu = ElementTree.fromstring(
+- conn.baselineCPU(
+- [salt.utils.stringutils.to_str(ElementTree.tostring(cpu))], flags
+- )
+- )
++ cpu = ElementTree.fromstring(conn.baselineCPU([host_cpu_def], flags))
+ conn.close()
+
+ if full and not getattr(libvirt, "VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES", False):
+@@ -6532,18 +6880,70 @@ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs):
+ return ElementTree.tostring(cpu)
+
+
+-def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **kwargs):
++def network_define(
++ name,
++ bridge,
++ forward,
++ ipv4_config=None,
++ ipv6_config=None,
++ vport=None,
++ tag=None,
++ autostart=True,
++ start=True,
++ mtu=None,
++ domain=None,
++ nat=None,
++ interfaces=None,
++ addresses=None,
++ physical_function=None,
++ dns=None,
++ **kwargs
++):
+ """
+ Create libvirt network.
+
+- :param name: Network name
+- :param bridge: Bridge name
+- :param forward: Forward mode(bridge, router, nat)
+- :param vport: Virtualport type
+- :param tag: Vlan tag
+- :param autostart: Network autostart (default True)
+- :param start: Network start (default True)
+- :param ipv4_config: IP v4 configuration
++ :param name: Network name.
++ :param bridge: Bridge name.
++ :param forward: Forward mode (bridge, router, nat).
++
++ .. versionchanged:: Aluminium
++ a ``None`` value creates an isolated network with no forwarding at all
++
++ :param vport: Virtualport type.
++ The value can also be a dictionary with ``type`` and ``parameters`` keys.
++ The ``parameters`` value is a dictionary of virtual port parameters.
++
++ .. code-block:: yaml
++
++ - vport:
++ type: openvswitch
++ parameters:
++ interfaceid: 09b11c53-8b5c-4eeb-8f00-d84eaa0aaa4f
++
++ .. versionchanged:: Aluminium
++ possible dictionary value
++
++ :param tag: Vlan tag.
++ The value can also be a dictionary with the ``tags`` and optional ``trunk`` keys.
++ ``trunk`` is a boolean value indicating whether to use VLAN trunking.
++ ``tags`` is a list of dictionaries with keys ``id`` and ``nativeMode``.
++ The ``nativeMode`` value can be one of ``tagged`` or ``untagged``.
++
++ .. code-block:: yaml
++
++ - tag:
++ trunk: True
++ tags:
++ - id: 42
++ nativeMode: untagged
++ - id: 47
++
++ .. versionchanged:: Aluminium
++ possible dictionary value
++
++ :param autostart: Network autostart (default True).
++ :param start: Network start (default True).
++ :param ipv4_config: IP v4 configuration.
+ Dictionary describing the IP v4 setup like IP range and
+ a possible DHCP configuration. The structure is documented
+ in net-define-ip_.
+@@ -6551,7 +6951,7 @@ def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **
+ .. versionadded:: 3000
+ :type ipv4_config: dict or None
+
+- :param ipv6_config: IP v6 configuration
++ :param ipv6_config: IP v6 configuration.
+ Dictionary describing the IP v6 setup like IP range and
+ a possible DHCP configuration. The structure is documented
+ in net-define-ip_.
+@@ -6559,13 +6959,108 @@ def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **
+ .. versionadded:: 3000
+ :type ipv6_config: dict or None
+
+- :param connection: libvirt connection URI, overriding defaults
+- :param username: username to connect with, overriding defaults
+- :param password: password to connect with, overriding defaults
++ :param connection: libvirt connection URI, overriding defaults.
++ :param username: username to connect with, overriding defaults.
++ :param password: password to connect with, overriding defaults.
++
++ :param mtu: size of the Maximum Transmission Unit (MTU) of the network.
++ (default ``None``)
++
++ .. versionadded:: Aluminium
++
++ :param domain: DNS domain name of the DHCP server.
++ The value is a dictionary with a mandatory ``name`` property and an optional ``localOnly`` boolean one.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - domain:
++ name: lab.acme.org
++ localOnly: True
++
++ .. versionadded:: Aluminium
++
++ :param nat: addresses and ports to route in NAT forward mode.
++ The value is a dictionary with optional keys ``address`` and ``port``.
++ Both values are a dictionary with ``start`` and ``end`` values.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: nat
++ - nat:
++ address:
++ start: 1.2.3.4
++ end: 1.2.3.10
++ port:
++ start: 500
++ end: 1000
++
++ .. versionadded:: Aluminium
++
++ :param interfaces: whitespace separated list of network interfaces devices that can be used for this network.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: passthrough
++ - interfaces: "eth10 eth11 eth12"
++
++ .. versionadded:: Aluminium
++
++ :param addresses: whitespace separated list of addreses of PCI devices that can be used for this network in `hostdev` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - interfaces: "0000:04:00.1 0000:e3:01.2"
++
++ .. versionadded:: Aluminium
++
++ :param physical_function: device name of the physical interface to use in ``hostdev`` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - physical_function: "eth0"
++
++ .. versionadded:: Aluminium
++
++ :param dns: virtual network DNS configuration.
++ The value is a dictionary described in net-define-dns_.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - dns:
++ forwarders:
++ - domain: example.com
++ addr: 192.168.1.1
++ - addr: 8.8.8.8
++ - domain: www.example.com
++ txt:
++ example.com: "v=spf1 a -all"
++ _http.tcp.example.com: "name=value,paper=A4"
++ hosts:
++ 192.168.1.2:
++ - mirror.acme.lab
++ - test.acme.lab
++ srvs:
++ - name: ldap
++ protocol: tcp
++ domain: ldapserver.example.com
++ target: .
++ port: 389
++ priority: 1
++ weight: 10
++
++ .. versionadded:: Aluminium
+
+ .. _net-define-ip:
+
+- ** IP configuration definition
++ .. rubric:: IP configuration definition
+
+ Both the IPv4 and IPv6 configuration dictionaries can contain the following properties:
+
+@@ -6573,7 +7068,47 @@ def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **
+ CIDR notation for the network. For example '192.168.124.0/24'
+
+ dhcp_ranges
+- A list of dictionary with ``'start'`` and ``'end'`` properties.
++ A list of dictionaries with ``'start'`` and ``'end'`` properties.
++
++ hosts
++ A list of dictionaries with ``ip`` property and optional ``name``, ``mac`` and ``id`` properties.
++
++ .. versionadded:: Aluminium
++
++ bootp
++ A dictionary with a ``file`` property and an optional ``server`` one.
++
++ .. versionadded:: Aluminium
++
++ tftp
++ The path to the TFTP root directory to serve.
++
++ .. versionadded:: Aluminium
++
++ .. _net-define-dns:
++
++ .. rubric:: DNS configuration definition
++
++ The DNS configuration dictionary contains the following optional properties:
++
++ forwarders
++ List of alternate DNS forwarders to use.
++ Each item is a dictionary with the optional ``domain`` and ``addr`` keys.
++ If both are provided, the requests to the domain are forwarded to the server at the ``addr``.
++ If only ``domain`` is provided the requests matching this domain will be resolved locally.
++ If only ``addr`` is provided all requests will be forwarded to this DNS server.
++
++ txt:
++ Dictionary of TXT fields to set.
++
++ hosts:
++ Dictionary of host DNS entries.
++ The key is the IP of the host, and the value is a list of hostnames for it.
++
++ srvs:
++ List of SRV DNS entries.
++ Each entry is a dictionary with the mandatory ``name`` and ``protocol`` keys.
++ Entries can also have ``target``, ``port``, ``priority`` and ``weight`` optional properties.
+
+ CLI Example:
+
+@@ -6586,8 +7121,6 @@ def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **
+ conn = __get_conn(**kwargs)
+ vport = kwargs.get("vport", None)
+ tag = kwargs.get("tag", None)
+- autostart = kwargs.get("autostart", True)
+- starting = kwargs.get("start", True)
+
+ net_xml = _gen_net_xml(
+ name,
+@@ -6596,6 +7129,13 @@ def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **
+ vport,
+ tag=tag,
+ ip_configs=[config for config in [ipv4_config, ipv6_config] if config],
++ mtu=mtu,
++ domain=domain,
++ nat=nat,
++ interfaces=interfaces,
++ addresses=addresses,
++ physical_function=physical_function,
++ dns=dns,
+ )
+ try:
+ conn.networkDefineXML(net_xml)
+@@ -6615,12 +7155,12 @@ def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **
+ conn.close()
+ return False
+
+- if (starting is True or autostart is True) and network.isActive() != 1:
++ if (start or autostart) and network.isActive() != 1:
+ network.create()
+
+- if autostart is True and network.autostart() != 1:
++ if autostart and network.autostart() != 1:
+ network.setAutostart(int(autostart))
+- elif autostart is False and network.autostart() == 1:
++ elif not autostart and network.autostart() == 1:
+ network.setAutostart(int(autostart))
+
+ conn.close()
+@@ -6628,6 +7168,271 @@ def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **
+ return True
+
+
++def _remove_empty_xml_node(node):
++ """
++ Remove the nodes with no children, no text and no attribute
++ """
++ for child in node:
++ if not child.tail and not child.text and not child.items() and not child:
++ node.remove(child)
++ else:
++ _remove_empty_xml_node(child)
++ return node
++
++
++def network_update(
++ name,
++ bridge,
++ forward,
++ ipv4_config=None,
++ ipv6_config=None,
++ vport=None,
++ tag=None,
++ mtu=None,
++ domain=None,
++ nat=None,
++ interfaces=None,
++ addresses=None,
++ physical_function=None,
++ dns=None,
++ test=False,
++ **kwargs
++):
++ """
++ Update a virtual network if needed.
++
++ :param name: Network name.
++ :param bridge: Bridge name.
++ :param forward: Forward mode (bridge, router, nat).
++ A ``None`` value creates an isolated network with no forwarding at all.
++
++ :param vport: Virtualport type.
++ The value can also be a dictionary with ``type`` and ``parameters`` keys.
++ The ``parameters`` value is a dictionary of virtual port parameters.
++
++ .. code-block:: yaml
++
++ - vport:
++ type: openvswitch
++ parameters:
++ interfaceid: 09b11c53-8b5c-4eeb-8f00-d84eaa0aaa4f
++
++ :param tag: Vlan tag.
++ The value can also be a dictionary with the ``tags`` and optional ``trunk`` keys.
++ ``trunk`` is a boolean value indicating whether to use VLAN trunking.
++ ``tags`` is a list of dictionaries with keys ``id`` and ``nativeMode``.
++ The ``nativeMode`` value can be one of ``tagged`` or ``untagged``.
++
++ .. code-block:: yaml
++
++ - tag:
++ trunk: True
++ tags:
++ - id: 42
++ nativeMode: untagged
++ - id: 47
++
++ :param ipv4_config: IP v4 configuration.
++ Dictionary describing the IP v4 setup like IP range and
++ a possible DHCP configuration. The structure is documented
++ in net-define-ip_.
++
++ :type ipv4_config: dict or None
++
++ :param ipv6_config: IP v6 configuration.
++ Dictionary describing the IP v6 setup like IP range and
++ a possible DHCP configuration. The structure is documented
++ in net-define-ip_.
++
++ :type ipv6_config: dict or None
++
++ :param connection: libvirt connection URI, overriding defaults.
++ :param username: username to connect with, overriding defaults.
++ :param password: password to connect with, overriding defaults.
++
++ :param mtu: size of the Maximum Transmission Unit (MTU) of the network.
++ (default ``None``)
++
++ :param domain: DNS domain name of the DHCP server.
++ The value is a dictionary with a mandatory ``name`` property and an optional ``localOnly`` boolean one.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - domain:
++ name: lab.acme.org
++ localOnly: True
++
++ :param nat: addresses and ports to route in NAT forward mode.
++ The value is a dictionary with optional keys ``address`` and ``port``.
++ Both values are a dictionary with ``start`` and ``end`` values.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: nat
++ - nat:
++ address:
++ start: 1.2.3.4
++ end: 1.2.3.10
++ port:
++ start: 500
++ end: 1000
++
++ :param interfaces: whitespace separated list of network interfaces devices that can be used for this network.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: passthrough
++ - interfaces: "eth10 eth11 eth12"
++
++ :param addresses: whitespace separated list of addreses of PCI devices that can be used for this network in `hostdev` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - interfaces: "0000:04:00.1 0000:e3:01.2"
++
++ :param physical_function: device name of the physical interface to use in ``hostdev`` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - physical_function: "eth0"
++
++ :param dns: virtual network DNS configuration.
++ The value is a dictionary described in net-define-dns_.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - dns:
++ forwarders:
++ - domain: example.com
++ addr: 192.168.1.1
++ - addr: 8.8.8.8
++ - domain: www.example.com
++ txt:
++ example.com: "v=spf1 a -all"
++ _http.tcp.example.com: "name=value,paper=A4"
++ hosts:
++ 192.168.1.2:
++ - mirror.acme.lab
++ - test.acme.lab
++ srvs:
++ - name: ldap
++ protocol: tcp
++ domain: ldapserver.example.com
++ target: .
++ port: 389
++ priority: 1
++ weight: 10
++
++ .. versionadded:: Aluminium
++ """
++ # Get the current definition to compare the two
++ conn = __get_conn(**kwargs)
++ needs_update = False
++ try:
++ net = conn.networkLookupByName(name)
++ old_xml = ElementTree.fromstring(net.XMLDesc())
++
++ # Compute new definition
++ new_xml = ElementTree.fromstring(
++ _gen_net_xml(
++ name,
++ bridge,
++ forward,
++ vport,
++ tag=tag,
++ ip_configs=[config for config in [ipv4_config, ipv6_config] if config],
++ mtu=mtu,
++ domain=domain,
++ nat=nat,
++ interfaces=interfaces,
++ addresses=addresses,
++ physical_function=physical_function,
++ dns=dns,
++ )
++ )
++
++ elements_to_copy = ["uuid", "mac"]
++ for to_copy in elements_to_copy:
++ element = old_xml.find(to_copy)
++ # mac may not be present (hostdev network for instance)
++ if element is not None:
++ new_xml.insert(1, element)
++
++ # Libvirt adds a connection attribute on running networks, remove before comparing
++ old_xml.attrib.pop("connections", None)
++
++ # Libvirt adds the addresses of the VF devices on running networks with the ph passed
++ # Those need to be removed before comparing
++ if old_xml.find("forward/pf") is not None:
++ forward_node = old_xml.find("forward")
++ address_nodes = forward_node.findall("address")
++ for node in address_nodes:
++ forward_node.remove(node)
++
++ # Remove libvirt auto-added bridge attributes to compare
++ default_bridge_attribs = {"stp": "on", "delay": "0"}
++ old_bridge_node = old_xml.find("bridge")
++ if old_bridge_node is not None:
++ for key, value in default_bridge_attribs.items():
++ if old_bridge_node.get(key, None) == value:
++ old_bridge_node.attrib.pop(key, None)
++
++ # Libvirt may also add the whole bridge network since the name can be computed
++ # If the bridge name starts with virbr in a nat, route, open or isolated network
++ # there is a good change it has been autogenerated...
++ old_forward = (
++ old_xml.find("forward").get("mode")
++ if old_xml.find("forward") is not None
++ else None
++ )
++ if (
++ old_forward == forward
++ and forward in ["nat", "route", "open", None]
++ and bridge is None
++ and old_bridge_node.get("name", "").startswith("virbr")
++ ):
++ old_bridge_node.attrib.pop("name", None)
++
++ # In the ipv4 address, we need to convert netmask to prefix in the old XML
++ ipv4_nodes = [
++ node
++ for node in old_xml.findall("ip")
++ if node.get("family", "ipv4") == "ipv4"
++ ]
++ for ip_node in ipv4_nodes:
++ netmask = ip_node.attrib.pop("netmask")
++ if netmask:
++ address = ipaddress.ip_network(
++ "{}/{}".format(ip_node.get("address"), netmask), strict=False
++ )
++ ip_node.set("prefix", str(address.prefixlen))
++
++ # Add default ipv4 family if needed
++ for doc in [old_xml, new_xml]:
++ for node in doc.findall("ip"):
++ if "family" not in node.keys():
++ node.set("family", "ipv4")
++
++ # Filter out spaces and empty elements since those would mislead the comparison
++ _remove_empty_xml_node(xmlutil.strip_spaces(old_xml))
++ xmlutil.strip_spaces(new_xml)
++
++ needs_update = xmlutil.to_dict(old_xml, True) != xmlutil.to_dict(new_xml, True)
++ if needs_update and not test:
++ conn.networkDefineXML(xmlutil.element_to_str(new_xml))
++ finally:
++ conn.close()
++ return needs_update
++
++
+ def list_networks(**kwargs):
+ """
+ List all virtual networks.
+@@ -6687,6 +7492,16 @@ def network_info(name=None, **kwargs):
+ lease["type"] = "unknown"
+ return leases
+
++ def _net_get_bridge(net):
++ """
++ Get the bridge of the network or None
++ """
++ try:
++ return net.bridgeName()
++ except libvirt.libvirtError as err:
++ # Some network configurations have no bridge
++ return None
++
+ try:
+ nets = [
+ net for net in conn.listAllNetworks() if name is None or net.name() == name
+@@ -6694,7 +7509,7 @@ def network_info(name=None, **kwargs):
+ result = {
+ net.name(): {
+ "uuid": net.UUIDString(),
+- "bridge": net.bridgeName(),
++ "bridge": _net_get_bridge(net),
+ "autostart": net.autostart(),
+ "active": net.isActive(),
+ "persistent": net.isPersistent(),
+@@ -7453,37 +8268,12 @@ def pool_update(
+ new_xml.insert(1, element)
+
+ # Filter out spaces and empty elements like since those would mislead the comparison
+- def visit_xml(node, fn):
+- fn(node)
+- for child in node:
+- visit_xml(child, fn)
+-
+- def space_stripper(node):
+- if node.tail is not None:
+- node.tail = node.tail.strip(" \t\n")
+- if node.text is not None:
+- node.text = node.text.strip(" \t\n")
+-
+- visit_xml(old_xml, space_stripper)
+- visit_xml(new_xml, space_stripper)
+-
+- def empty_node_remover(node):
+- for child in node:
+- if (
+- not child.tail
+- and not child.text
+- and not child.items()
+- and not child
+- ):
+- node.remove(child)
+-
+- visit_xml(old_xml, empty_node_remover)
++ _remove_empty_xml_node(xmlutil.strip_spaces(old_xml))
++ xmlutil.strip_spaces(new_xml)
+
+ needs_update = xmlutil.to_dict(old_xml, True) != xmlutil.to_dict(new_xml, True)
+ if needs_update and not test:
+- conn.storagePoolDefineXML(
+- salt.utils.stringutils.to_str(ElementTree.tostring(new_xml))
+- )
++ conn.storagePoolDefineXML(xmlutil.element_to_str(new_xml))
+ finally:
+ conn.close()
+ return needs_update
+diff --git a/salt/states/virt.py b/salt/states/virt.py
+index 784cdca73c..c677c9ad84 100644
+--- a/salt/states/virt.py
++++ b/salt/states/virt.py
+@@ -161,7 +161,8 @@ def _virt_call(
+ :param state: the expected final state of the VM. If None the VM state won't be checked.
+ :return: the salt state return
+ """
+- ret = {"name": domain, "changes": {}, "result": True, "comment": ""}
++ result = True if not __opts__["test"] else None
++ ret = {"name": domain, "changes": {}, "result": result, "comment": ""}
+ targeted_domains = fnmatch.filter(__salt__["virt.list_domains"](), domain)
+ changed_domains = list()
+ ignored_domains = list()
+@@ -174,15 +175,17 @@ def _virt_call(
+ domain_state = __salt__["virt.vm_state"](targeted_domain)
+ action_needed = domain_state.get(targeted_domain) != state
+ if action_needed:
+- response = __salt__["virt.{}".format(function)](
+- targeted_domain,
+- connection=connection,
+- username=username,
+- password=password,
+- **kwargs
+- )
+- if isinstance(response, dict):
+- response = response["name"]
++ response = True
++ if not __opts__["test"]:
++ response = __salt__["virt.{}".format(function)](
++ targeted_domain,
++ connection=connection,
++ username=username,
++ password=password,
++ **kwargs
++ )
++ if isinstance(response, dict):
++ response = response["name"]
+ changed_domains.append({"domain": targeted_domain, function: response})
+ else:
+ noaction_domains.append(targeted_domain)
+@@ -288,7 +291,6 @@ def defined(
+ arch=None,
+ boot=None,
+ numatune=None,
+- update=True,
+ boot_dev=None,
+ hypervisor_features=None,
+ clock=None,
+@@ -296,6 +298,7 @@ def defined(
+ consoles=None,
+ stop_on_reboot=False,
+ live=True,
++ host_devices=None,
+ ):
+ """
+ Starts an existing guest, or defines and starts a new VM with specified arguments.
+@@ -498,10 +501,6 @@ def defined(
+
+ .. versionadded:: 3000
+
+- :param update: set to ``False`` to prevent updating a defined domain. (Default: ``True``)
+-
+- .. deprecated:: sodium
+-
+ :param boot_dev:
+ Space separated list of devices to boot from sorted by decreasing priority.
+ Values can be ``hd``, ``fd``, ``cdrom`` or ``network``.
+@@ -595,6 +594,13 @@ def defined(
+
+ .. versionadded:: Aluminium
+
++ :param host_devices:
++ List of host devices to passthrough to the guest.
++ The value is a list of device names as provided by the :py:func:`~salt.modules.virt.node_devices` function.
++ (Default: ``None``)
++
++ .. versionadded:: Aluminium
++
+ .. rubric:: Example States
+
+ Make sure a virtual machine called ``domain_name`` is defined:
+@@ -641,31 +647,30 @@ def defined(
+ if name in __salt__["virt.list_domains"](
+ connection=connection, username=username, password=password
+ ):
+- status = {}
+- if update:
+- status = __salt__["virt.update"](
+- name,
+- cpu=cpu,
+- mem=mem,
+- disk_profile=disk_profile,
+- disks=disks,
+- nic_profile=nic_profile,
+- interfaces=interfaces,
+- graphics=graphics,
+- live=live,
+- connection=connection,
+- username=username,
+- password=password,
+- boot=boot,
+- numatune=numatune,
+- serials=serials,
+- consoles=consoles,
+- test=__opts__["test"],
+- boot_dev=boot_dev,
+- hypervisor_features=hypervisor_features,
+- clock=clock,
+- stop_on_reboot=stop_on_reboot,
+- )
++ status = __salt__["virt.update"](
++ name,
++ cpu=cpu,
++ mem=mem,
++ disk_profile=disk_profile,
++ disks=disks,
++ nic_profile=nic_profile,
++ interfaces=interfaces,
++ graphics=graphics,
++ live=live,
++ connection=connection,
++ username=username,
++ password=password,
++ boot=boot,
++ numatune=numatune,
++ serials=serials,
++ consoles=consoles,
++ test=__opts__["test"],
++ boot_dev=boot_dev,
++ hypervisor_features=hypervisor_features,
++ clock=clock,
++ stop_on_reboot=stop_on_reboot,
++ host_devices=host_devices,
++ )
+ ret["changes"][name] = status
+ if not status.get("definition"):
+ ret["comment"] = "Domain {} unchanged".format(name)
+@@ -706,6 +711,7 @@ def defined(
+ hypervisor_features=hypervisor_features,
+ clock=clock,
+ stop_on_reboot=stop_on_reboot,
++ host_devices=host_devices,
+ )
+ ret["changes"][name] = {"definition": True}
+ ret["comment"] = "Domain {} defined".format(name)
+@@ -731,7 +737,6 @@ def running(
+ install=True,
+ pub_key=None,
+ priv_key=None,
+- update=False,
+ connection=None,
+ username=None,
+ password=None,
+@@ -745,6 +750,7 @@ def running(
+ serials=None,
+ consoles=None,
+ stop_on_reboot=False,
++ host_devices=None,
+ ):
+ """
+ Starts an existing guest, or defines and starts a new VM with specified arguments.
+@@ -826,10 +832,6 @@ def running(
+ :param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``)
+
+ .. versionadded:: 2019.2.0
+- :param update: set to ``True`` to update a defined domain. (Default: ``False``)
+-
+- .. versionadded:: 2019.2.0
+- .. deprecated:: sodium
+ :param connection: libvirt connection URI, overriding defaults
+
+ .. versionadded:: 2019.2.0
+@@ -962,6 +964,13 @@ def running(
+ clock:
+ timezone: CEST
+
++ :param host_devices:
++ List of host devices to passthrough to the guest.
++ The value is a list of device names as provided by the :py:func:`~salt.modules.virt.node_devices` function.
++ (Default: ``None``)
++
++ .. versionadded:: Aluminium
++
+ .. rubric:: Example States
+
+ Make sure an already-defined virtual machine called ``domain_name`` is running:
+@@ -1005,12 +1014,6 @@ def running(
+ """
+ merged_disks = disks
+
+- if not update:
+- salt.utils.versions.warn_until(
+- "Aluminium",
+- "'update' parameter has been deprecated. Future behavior will be the one of update=True"
+- "It will be removed in {version}.",
+- )
+ ret = defined(
+ name,
+ cpu=cpu,
+@@ -1028,7 +1031,6 @@ def running(
+ os_type=os_type,
+ arch=arch,
+ boot=boot,
+- update=update,
+ boot_dev=boot_dev,
+ numatune=numatune,
+ hypervisor_features=hypervisor_features,
+@@ -1039,6 +1041,7 @@ def running(
+ password=password,
+ serials=serials,
+ consoles=consoles,
++ host_devices=host_devices,
+ )
+
+ result = True if not __opts__["test"] else None
+@@ -1264,21 +1267,64 @@ def network_defined(
+ connection=None,
+ username=None,
+ password=None,
++ mtu=None,
++ domain=None,
++ nat=None,
++ interfaces=None,
++ addresses=None,
++ physical_function=None,
++ dns=None,
+ ):
+ """
+ Defines a new network with specified arguments.
+
++ :param name: Network name
+ :param bridge: Bridge name
+ :param forward: Forward mode(bridge, router, nat)
++
++ .. versionchanged:: Aluminium
++ a ``None`` value creates an isolated network with no forwarding at all
++
+ :param vport: Virtualport type (Default: ``'None'``)
++ The value can also be a dictionary with ``type`` and ``parameters`` keys.
++ The ``parameters`` value is a dictionary of virtual port parameters.
++
++ .. code-block:: yaml
++
++ - vport:
++ type: openvswitch
++ parameters:
++ interfaceid: 09b11c53-8b5c-4eeb-8f00-d84eaa0aaa4f
++
++ .. versionchanged:: Aluminium
++ possible dictionary value
++
+ :param tag: Vlan tag (Default: ``'None'``)
++ The value can also be a dictionary with the ``tags`` and optional ``trunk`` keys.
++ ``trunk`` is a boolean value indicating whether to use VLAN trunking.
++ ``tags`` is a list of dictionaries with keys ``id`` and ``nativeMode``.
++ The ``nativeMode`` value can be one of ``tagged`` or ``untagged``.
++
++ .. code-block:: yaml
++
++ - tag:
++ trunk: True
++ tags:
++ - id: 42
++ nativeMode: untagged
++ - id: 47
++
++ .. versionchanged:: Aluminium
++ possible dictionary value
++
+ :param ipv4_config:
+- IPv4 network configuration. See the :py:func`virt.network_define
+- ` function corresponding parameter documentation
++ IPv4 network configuration. See the
++ :py:func:`virt.network_define `
++ function corresponding parameter documentation
+ for more details on this dictionary.
+ (Default: None).
+ :param ipv6_config:
+- IPv6 network configuration. See the :py:func`virt.network_define
++ IPv6 network configuration. See the :py:func:`virt.network_define
+ ` function corresponding parameter documentation
+ for more details on this dictionary.
+ (Default: None).
+@@ -1286,6 +1332,100 @@ def network_defined(
+ :param connection: libvirt connection URI, overriding defaults
+ :param username: username to connect with, overriding defaults
+ :param password: password to connect with, overriding defaults
++ :param mtu: size of the Maximum Transmission Unit (MTU) of the network.
++ (default ``None``)
++
++ .. versionadded:: Aluminium
++
++ :param domain: DNS domain name of the DHCP server.
++ The value is a dictionary with a mandatory ``name`` property and an optional ``localOnly`` boolean one.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - domain:
++ name: lab.acme.org
++ localOnly: True
++
++ .. versionadded:: Aluminium
++
++ :param nat: addresses and ports to route in NAT forward mode.
++ The value is a dictionary with optional keys ``address`` and ``port``.
++ Both values are a dictionary with ``start`` and ``end`` values.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: nat
++ - nat:
++ address:
++ start: 1.2.3.4
++ end: 1.2.3.10
++ port:
++ start: 500
++ end: 1000
++
++ .. versionadded:: Aluminium
++
++ :param interfaces: whitespace separated list of network interfaces devices that can be used for this network.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: passthrough
++ - interfaces: "eth10 eth11 eth12"
++
++ .. versionadded:: Aluminium
++
++ :param addresses: whitespace separated list of addreses of PCI devices that can be used for this network in `hostdev` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - interfaces: "0000:04:00.1 0000:e3:01.2"
++
++ .. versionadded:: Aluminium
++
++ :param physical_function: device name of the physical interface to use in ``hostdev`` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - physical_function: "eth0"
++
++ .. versionadded:: Aluminium
++
++ :param dns: virtual network DNS configuration
++ The value is a dictionary described in :ref:`net-define-dns`.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - dns:
++ forwarders:
++ - domain: example.com
++ addr: 192.168.1.1
++ - addr: 8.8.8.8
++ - domain: www.example.com
++ txt:
++ example.com: "v=spf1 a -all"
++ _http.tcp.example.com: "name=value,paper=A4"
++ hosts:
++ 192.168.1.2:
++ - mirror.acme.lab
++ - test.acme.lab
++ srvs:
++ - name: ldap
++ protocol: tcp
++ domain: ldapserver.example.com
++ target: .
++ port: 389
++ priority: 1
++ weight: 10
++
++ .. versionadded:: Aluminium
+
+ .. versionadded:: sodium
+
+@@ -1332,9 +1472,62 @@ def network_defined(
+ name, connection=connection, username=username, password=password
+ )
+ if info and info[name]:
+- ret["comment"] = "Network {} exists".format(name)
+- ret["result"] = True
++ needs_autostart = (
++ info[name]["autostart"]
++ and not autostart
++ or not info[name]["autostart"]
++ and autostart
++ )
++ needs_update = __salt__["virt.network_update"](
++ name,
++ bridge,
++ forward,
++ vport=vport,
++ tag=tag,
++ ipv4_config=ipv4_config,
++ ipv6_config=ipv6_config,
++ mtu=mtu,
++ domain=domain,
++ nat=nat,
++ interfaces=interfaces,
++ addresses=addresses,
++ physical_function=physical_function,
++ dns=dns,
++ test=True,
++ connection=connection,
++ username=username,
++ password=password,
++ )
++ if needs_update:
++ if not __opts__["test"]:
++ __salt__["virt.network_update"](
++ name,
++ bridge,
++ forward,
++ vport=vport,
++ tag=tag,
++ ipv4_config=ipv4_config,
++ ipv6_config=ipv6_config,
++ mtu=mtu,
++ domain=domain,
++ nat=nat,
++ interfaces=interfaces,
++ addresses=addresses,
++ physical_function=physical_function,
++ dns=dns,
++ test=False,
++ connection=connection,
++ username=username,
++ password=password,
++ )
++ action = ", autostart flag changed" if needs_autostart else ""
++ ret["changes"][name] = "Network updated{}".format(action)
++ ret["comment"] = "Network {} updated{}".format(name, action)
++ else:
++ ret["comment"] = "Network {} unchanged".format(name)
++ ret["result"] = True
+ else:
++ needs_autostart = autostart
+ if not __opts__["test"]:
+ __salt__["virt.network_define"](
+ name,
+@@ -1344,14 +1537,35 @@ def network_defined(
+ tag=tag,
+ ipv4_config=ipv4_config,
+ ipv6_config=ipv6_config,
+- autostart=autostart,
++ mtu=mtu,
++ domain=domain,
++ nat=nat,
++ interfaces=interfaces,
++ addresses=addresses,
++ physical_function=physical_function,
++ dns=dns,
++ autostart=False,
+ start=False,
+ connection=connection,
+ username=username,
+ password=password,
+ )
+- ret["changes"][name] = "Network defined"
+- ret["comment"] = "Network {} defined".format(name)
++ if needs_autostart:
++ ret["changes"][name] = "Network defined, marked for autostart"
++ ret["comment"] = "Network {} defined, marked for autostart".format(name)
++ else:
++ ret["changes"][name] = "Network defined"
++ ret["comment"] = "Network {} defined".format(name)
++
++ if needs_autostart:
++ if not __opts__["test"]:
++ __salt__["virt.network_set_autostart"](
++ name,
++ state="on" if autostart else "off",
++ connection=connection,
++ username=username,
++ password=password,
++ )
+ except libvirt.libvirtError as err:
+ ret["result"] = False
+ ret["comment"] = err.get_error_message()
+@@ -1371,14 +1585,56 @@ def network_running(
+ connection=None,
+ username=None,
+ password=None,
++ mtu=None,
++ domain=None,
++ nat=None,
++ interfaces=None,
++ addresses=None,
++ physical_function=None,
++ dns=None,
+ ):
+ """
+ Defines and starts a new network with specified arguments.
+
++ :param name: Network name
+ :param bridge: Bridge name
+ :param forward: Forward mode(bridge, router, nat)
++
++ .. versionchanged:: Aluminium
++ a ``None`` value creates an isolated network with no forwarding at all
++
+ :param vport: Virtualport type (Default: ``'None'``)
++ The value can also be a dictionary with ``type`` and ``parameters`` keys.
++ The ``parameters`` value is a dictionary of virtual port parameters.
++
++ .. code-block:: yaml
++
++ - vport:
++ type: openvswitch
++ parameters:
++ interfaceid: 09b11c53-8b5c-4eeb-8f00-d84eaa0aaa4f
++
++ .. versionchanged:: Aluminium
++ possible dictionary value
++
+ :param tag: Vlan tag (Default: ``'None'``)
++ The value can also be a dictionary with the ``tags`` and optional ``trunk`` keys.
++ ``trunk`` is a boolean value indicating whether to use VLAN trunking.
++ ``tags`` is a list of dictionaries with keys ``id`` and ``nativeMode``.
++ The ``nativeMode`` value can be one of ``tagged`` or ``untagged``.
++
++ .. code-block:: yaml
++
++ - tag:
++ trunk: True
++ tags:
++ - id: 42
++ nativeMode: untagged
++ - id: 47
++
++ .. versionchanged:: Aluminium
++ possible dictionary value
++
+ :param ipv4_config:
+ IPv4 network configuration. See the :py:func`virt.network_define
+ ` function corresponding parameter documentation
+@@ -1403,6 +1659,100 @@ def network_running(
+ :param password: password to connect with, overriding defaults
+
+ .. versionadded:: 2019.2.0
++ :param mtu: size of the Maximum Transmission Unit (MTU) of the network.
++ (default ``None``)
++
++ .. versionadded:: Aluminium
++
++ :param domain: DNS domain name of the DHCP server.
++ The value is a dictionary with a mandatory ``name`` property and an optional ``localOnly`` boolean one.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - domain:
++ name: lab.acme.org
++ localOnly: True
++
++ .. versionadded:: Aluminium
++
++ :param nat: addresses and ports to route in NAT forward mode.
++ The value is a dictionary with optional keys ``address`` and ``port``.
++ Both values are a dictionary with ``start`` and ``end`` values.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: nat
++ - nat:
++ address:
++ start: 1.2.3.4
++ end: 1.2.3.10
++ port:
++ start: 500
++ end: 1000
++
++ .. versionadded:: Aluminium
++
++ :param interfaces: whitespace separated list of network interfaces devices that can be used for this network.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: passthrough
++ - interfaces: "eth10 eth11 eth12"
++
++ .. versionadded:: Aluminium
++
++ :param addresses: whitespace separated list of addreses of PCI devices that can be used for this network in `hostdev` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - interfaces: "0000:04:00.1 0000:e3:01.2"
++
++ .. versionadded:: Aluminium
++
++ :param physical_function: device name of the physical interface to use in ``hostdev`` forward mode.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - forward: hostdev
++ - physical_function: "eth0"
++
++ .. versionadded:: Aluminium
++
++ :param dns: virtual network DNS configuration
++ The value is a dictionary described in :ref:`net-define-dns`.
++ (default ``None``)
++
++ .. code-block:: yaml
++
++ - dns:
++ forwarders:
++ - domain: example.com
++ addr: 192.168.1.1
++ - addr: 8.8.8.8
++ - domain: www.example.com
++ txt:
++ host.widgets.com.: "printer=lpr5"
++ example.com.: "This domain name is reserved for use in documentation"
++ hosts:
++ 192.168.1.2:
++ - mirror.acme.lab
++ - test.acme.lab
++ srvs:
++ - name: ldap
++ protocol: tcp
++ domain: ldapserver.example.com
++ target: .
++ port: 389
++ priority: 1
++ weight: 10
++
++ .. versionadded:: Aluminium
+
+ .. code-block:: yaml
+
+@@ -1443,6 +1793,13 @@ def network_running(
+ tag=tag,
+ ipv4_config=ipv4_config,
+ ipv6_config=ipv6_config,
++ mtu=mtu,
++ domain=domain,
++ nat=nat,
++ interfaces=interfaces,
++ addresses=addresses,
++ physical_function=physical_function,
++ dns=dns,
+ autostart=autostart,
+ connection=connection,
+ username=username,
+diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja
+index 6ac3e867b9..4603dfd8de 100644
+--- a/salt/templates/virt/libvirt_domain.jinja
++++ b/salt/templates/virt/libvirt_domain.jinja
+@@ -1,342 +1,336 @@
+ {%- import 'libvirt_disks.jinja' as libvirt_disks -%}
++{%- from 'libvirt_macros.jinja' import opt_attribute as opt_attribute -%}
+ {%- macro opt_attribute(obj, name, conv=none) %}
+ {%- if obj.get(name) is not none %} {{ name }}='{{ obj[name] if conv is none else conv(obj[name]) }}'{% endif -%}
+ {%- endmacro %}
+ {%- import 'libvirt_chardevs.jinja' as libvirt_chardevs -%}
+
+- {{ name }}
+- {%- if cpu %}
+- {{ cpu.get('maximum', '') }}
+- {%- endif %}
+- {%- if cpu.get('vcpus') %}
+-
+- {%- for vcpu_id in cpu["vcpus"].keys() %}
+-
+- {%- endfor %}
+-
+- {%- endif %}
+- {%- if cpu %}
+-
+- {%- if cpu.model %}
+- {{ cpu.model.get('name', '') }}
+- {%- endif %}
+- {%- if cpu.vendor %}
+- {{ cpu.get('vendor', '') }}
+- {%- endif %}
+- {%- if cpu.topology %}
+-
+- {%- endif %}
+- {%- if cpu.cache %}
+-
+- {%- endif %}
+- {%- if cpu.features %}
+- {%- for k, v in cpu.features.items() %}
+-
+- {%- endfor %}
+- {%- endif %}
+- {%- if cpu.numa %}
+-
+- {%- for numa_id in cpu.numa.keys() %}
+- {%- if cpu.numa.get(numa_id) %}
+-
+- {%- if cpu.numa[numa_id].distances %}
+-
+- {%- for sibling_id in cpu.numa[numa_id].distances %}
+-
+- {%- endfor %}
+-
+- {%- endif %}
+- |
+- {%- endif %}
+- {%- endfor %}
+-
+- {%- endif %}
+-
+- {%- if cpu.iothreads %}
+- {{ cpu.iothreads }}
+- {%- endif %}
+- {%- endif %}
+- {%- if cpu.tuning %}
+-
+- {%- if cpu.tuning.vcpupin %}
+- {%- for vcpu_id, cpuset in cpu.tuning.vcpupin.items() %}
+-
+- {%- endfor %}
+- {%- endif %}
+- {%- if cpu.tuning.emulatorpin %}
+-
+- {%- endif %}
+- {%- if cpu.tuning.iothreadpin %}
+- {%- for thread_id, cpuset in cpu.tuning.iothreadpin.items() %}
+-
+- {%- endfor %}
+- {%- endif %}
+- {%- if cpu.tuning.shares %}
+- {{ cpu.tuning.shares }}
+- {%- endif %}
+- {%- if cpu.tuning.period %}
+- {{ cpu.tuning.period }}
+- {%- endif %}
+- {%- if cpu.tuning.quota %}
+- {{ cpu.tuning.quota }}
+- {%- endif %}
+- {%- if cpu.tuning.global_period %}
+- {{ cpu.tuning.global_period }}
+- {%- endif %}
+- {%- if cpu.tuning.global_quota %}
+- {{ cpu.tuning.global_quota }}
+- {%- endif %}
+- {%- if cpu.tuning.emulator_period %}
+- {{ cpu.tuning.emulator_period }}
+- {%- endif %}
+- {%- if cpu.tuning.emulator_quota %}
+- {{ cpu.tuning.emulator_quota }}
+- {%- endif %}
+- {%- if cpu.tuning.iothread_period %}
+- {{ cpu.tuning.iothread_period }}
+- {%- endif %}
+- {%- if cpu.tuning.iothread_quota %}
+- {{ cpu.tuning.iothread_quota }}
+- {%- endif %}
+- {%- if cpu.tuning.vcpusched %}
+- {%- for sched in cpu.tuning.vcpusched %}
+-
+- {%- endfor %}
+- {%- endif %}
+- {%- if cpu.tuning.iothreadsched %}
+- {%- for sched in cpu.tuning.iothreadsched %}
+-
+- {%- endfor %}
+- {%- endif %}
+- {%- if cpu.tuning.emulatorsched %}
+-
+- {%- endif %}
+- {%- if cpu.tuning.cachetune %}
+- {%- for k, v in cpu.tuning.cachetune.items() %}
+-
+- {%- for e, atrs in v.items() %}
+- {%- if e is number and atrs %}
+-
+- {%- elif e is not number %}
+- {%- for atr, val in atrs.items() %}
+-
+- {%- endfor %}
+- {%- endif %}
+- {%- endfor %}
+-
+- {%- endfor %}
+- {%- endif %}
+- {%- if cpu.tuning.memorytune %}
+- {%- for vcpus, nodes in cpu.tuning.memorytune.items() %}
+-
+- {%- for id, bandwidth in nodes.items() %}
+-
+- {%- endfor %}
+-
+- {%- endfor %}
+- {%- endif %}
+-
+- {%- endif %}
+- {%- if mem.max %}
+- {{ to_kib(mem.max) }}
+- {%- endif %}
+- {%- if mem.boot %}
+- {{ to_kib(mem.boot) }}
+- {%- endif %}
+- {%- if mem.current %}
+- {{ to_kib(mem.current) }}
+- {%- endif %}
+- {%- if mem %}
+-
+- {%- if 'hard_limit' in mem and mem.hard_limit %}
+- {{ to_kib(mem.hard_limit) }}
+- {%- endif %}
+- {%- if 'soft_limit' in mem and mem.soft_limit %}
+- {{ to_kib(mem.soft_limit) }}
+- {%- endif %}
+- {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %}
+- {{ to_kib(mem.swap_hard_limit) }}
+- {%- endif %}
+- {%- if 'min_guarantee' in mem and mem.min_guarantee %}
+- {{ to_kib(mem.min_guarantee) }}
+- {%- endif %}
+-
+- {%- endif %}
+- {%- if numatune %}
+-
+- {%- if 'memory' in numatune and numatune.memory %}
+-
+- {%- endif %}
+- {%- if 'memnodes' in numatune and numatune.memnodes %}
+- {%- for cell_id in numatune['memnodes'] %}
+-
+- {%- endfor %}
+- {%- endif %}
+-
++ {{ name }}
++{%- if cpu %}
++ {{ cpu.get('maximum', '') }}
++{%- endif %}
++{%- if cpu.get('vcpus') %}
++
++ {%- for vcpu_id in cpu["vcpus"].keys() %}
++
++ {%- endfor %}
++
++{%- endif %}
++{%- if cpu %}
++
++ {%- if cpu.model %}
++ {{ cpu.model.get('name', '') }}
++ {%- endif %}
++ {%- if cpu.vendor %}
++ {{ cpu.get('vendor', '') }}
++ {%- endif %}
++ {%- if cpu.topology %}
++
++ {%- endif %}
++ {%- if cpu.cache %}
++
++ {%- endif %}
++ {%- if cpu.features %}
++ {%- for k, v in cpu.features.items() %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.numa %}
++
++ {%- for numa_id in cpu.numa.keys() %}
++ {%- if cpu.numa.get(numa_id) %}
++
++ {%- if cpu.numa[numa_id].distances %}
++
++ {%- for sibling_id in cpu.numa[numa_id].distances %}
++
++ {%- endfor %}
++
+ {%- endif %}
+- {%- if mem %}
+-
+- {%- if mem.hugepages %}
+-
+- {%- for page in mem.hugepages %}
+-
+- {%- endfor %}
+-
+- {%- if mem.nosharepages %}
+-
+- {%- endif %}
+- {%- if mem.locked %}
+-
+- {%- endif %}
+- {%- if mem.source %}
+-
+- {%- endif %}
+- {%- if mem.access %}
+-
+- {%- endif %}
+- {%- if mem.allocation %}
+-
+- {%- endif %}
+- {%- if mem.discard %}
+-
+- {%- endif %}
+- {%- endif %}
+-
++ |
++ {%- endif %}
++ {%- endfor %}
++
++ {%- endif %}
++
++ {%- if cpu.iothreads %}
++ {{ cpu.iothreads }}
++ {%- endif %}
++{%- endif %}
++{%- if cpu.tuning %}
++
++ {%- if cpu.tuning.vcpupin %}
++ {%- for vcpu_id, cpuset in cpu.tuning.vcpupin.items() %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.emulatorpin %}
++
++ {%- endif %}
++ {%- if cpu.tuning.iothreadpin %}
++ {%- for thread_id, cpuset in cpu.tuning.iothreadpin.items() %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.shares %}
++ {{ cpu.tuning.shares }}
++ {%- endif %}
++ {%- if cpu.tuning.period %}
++ {{ cpu.tuning.period }}
++ {%- endif %}
++ {%- if cpu.tuning.quota %}
++ {{ cpu.tuning.quota }}
++ {%- endif %}
++ {%- if cpu.tuning.global_period %}
++ {{ cpu.tuning.global_period }}
++ {%- endif %}
++ {%- if cpu.tuning.global_quota %}
++ {{ cpu.tuning.global_quota }}
++ {%- endif %}
++ {%- if cpu.tuning.emulator_period %}
++ {{ cpu.tuning.emulator_period }}
++ {%- endif %}
++ {%- if cpu.tuning.emulator_quota %}
++ {{ cpu.tuning.emulator_quota }}
++ {%- endif %}
++ {%- if cpu.tuning.iothread_period %}
++ {{ cpu.tuning.iothread_period }}
++ {%- endif %}
++ {%- if cpu.tuning.iothread_quota %}
++ {{ cpu.tuning.iothread_quota }}
++ {%- endif %}
++ {%- if cpu.tuning.vcpusched %}
++ {%- for sched in cpu.tuning.vcpusched %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.iothreadsched %}
++ {%- for sched in cpu.tuning.iothreadsched %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.emulatorsched %}
++
++ {%- endif %}
++ {%- if cpu.tuning.cachetune %}
++ {%- for k, v in cpu.tuning.cachetune.items() %}
++
++ {%- for e, atrs in v.items() %}
++ {%- if e is number and atrs %}
++
++ {%- elif e is not number %}
++ {%- for atr, val in atrs.items() %}
++
++ {%- endfor %}
+ {%- endif %}
+-
+- {{ os_type }}
+- {% if boot %}
+- {% if 'kernel' in boot %}
+- {{ boot.kernel }}
+- {% endif %}
+- {% if 'initrd' in boot %}
+- {{ boot.initrd }}
+- {% endif %}
+- {% if 'cmdline' in boot %}
+- {{ boot.cmdline }}
+- {% endif %}
+- {% if 'loader' in boot %}
+- {{ boot.loader }}
+- {% endif %}
+- {% if 'nvram' in boot %}
+-
+- {% endif %}
+- {% endif %}
+- {% for dev in boot_dev %}
+-
+- {% endfor %}
+-
++ {%- endfor %}
++
++ {%- endfor %}
++ {%- endif %}
++ {%- if cpu.tuning.memorytune %}
++ {%- for vcpus, nodes in cpu.tuning.memorytune.items() %}
++
++ {%- for id, bandwidth in nodes.items() %}
++
++ {%- endfor %}
++
++ {%- endfor %}
++ {%- endif %}
++
++{%- endif %}
++{%- if mem.max %}
++ {{ to_kib(mem.max) }}
++{%- endif %}
++{%- if mem.boot %}
++ {{ to_kib(mem.boot) }}
++{%- endif %}
++{%- if mem.current %}
++ {{ to_kib(mem.current) }}
++{%- endif %}
++{%- if mem %}
++
++ {%- if 'hard_limit' in mem and mem.hard_limit %}
++ {{ to_kib(mem.hard_limit) }}
++ {%- endif %}
++ {%- if 'soft_limit' in mem and mem.soft_limit %}
++ {{ to_kib(mem.soft_limit) }}
++ {%- endif %}
++ {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %}
++ {{ to_kib(mem.swap_hard_limit) }}
++ {%- endif %}
++ {%- if 'min_guarantee' in mem and mem.min_guarantee %}
++ {{ to_kib(mem.min_guarantee) }}
++ {%- endif %}
++
++{%- endif %}
++{%- if numatune %}
++
++ {%- if 'memory' in numatune and numatune.memory %}
++
++ {%- endif %}
++ {%- if 'memnodes' in numatune and numatune.memnodes %}
++ {%- for cell_id in numatune['memnodes'] %}
++
++ {%- endfor %}
++ {%- endif %}
++
++{%- endif %}
++{%- if mem %}
++
++ {%- if mem.hugepages %}
++
++ {%- for page in mem.hugepages %}
++
++ {%- endfor %}
++
++ {%- if mem.nosharepages %}
++
++ {%- endif %}
++ {%- if mem.locked %}
++
++ {%- endif %}
++ {%- if mem.source %}
++
++ {%- endif %}
++ {%- if mem.access %}
++
++ {%- endif %}
++ {%- if mem.allocation %}
++
++ {%- endif %}
++ {%- if mem.discard %}
++
++ {%- endif %}
++ {%- endif %}
++
++{%- endif %}
++
++ {{ os_type }}
++{%- if boot %}
++ {%- if 'kernel' in boot %}
++ {{ boot.kernel }}
++ {%- endif %}
++ {%- if 'initrd' in boot %}
++ {{ boot.initrd }}
++ {%- endif %}
++ {%- if 'cmdline' in boot %}
++ {{ boot.cmdline }}
++ {%- endif %}
++ {%- if 'loader' in boot %}
++ {{ boot.loader }}
++ {%- endif %}
++ {%- if 'nvram' in boot %}
++
++ {%- endif %}
++{%- endif %}
++{%- for dev in boot_dev %}
++
++{%- endfor %}
++
+ {%- if clock %}
+-
+- {%- for timer_name in clock.timers %}
++
++ {%- for timer_name in clock.timers %}
+ {%- set timer = clock.timers[timer_name] %}
+-
+- {%- if "threshold" in timer or "slew" in timer or "limit" in timer %}
+-
+- {%- endif %}
+-
+- {%- endfor %}
+-
++
++ {%- if "threshold" in timer or "slew" in timer or "limit" in timer %}
++
++ {%- endif %}
++
++ {%- endfor %}
++
++{%- endif %}
++ {{ on_reboot }}
++
++{%- for disk in disks %}
++
++ {%- if disk.type == 'file' and 'source_file' in disk -%}
++
++ {%- endif %}
++ {%- if disk.type == 'block' -%}
++
++ {%- endif %}
++ {%- if disk.type == 'volume' and 'pool' in disk -%}
++
++ {%- endif %}
++ {%- if disk.type == 'network' %}{{ libvirt_disks.network_source(disk) }}{%- endif %}
++
++ {%- if disk.address -%}
++
++ {%- endif %}
++ {%- if disk.driver -%}
++
++ {%- endif %}
++
++{%- endfor %}
++{%- if controller_model %}
++
++{%- endif %}
++{%- for nic in nics %}
++
++
++ {%- if nic.get('mac') -%}
++
++ {%- endif %}
++ {%- if nic.model %}{% endif %}
++
++{%- endfor %}
++{%- if graphics %}
++
++
++
++ {%- if graphics.type == "spice" %}
++
++
++
++ {%- endif %}
+ {%- endif %}
+- {{ on_reboot }}
+-
+- {% for disk in disks %}
+-
+- {% if disk.type == 'file' and 'source_file' in disk -%}
+-
+- {% endif %}
+- {% if disk.type == 'block' -%}
+-
+- {% endif %}
+- {% if disk.type == 'volume' and 'pool' in disk -%}
+-
+- {% endif %}
+- {%- if disk.type == 'network' %}{{ libvirt_disks.network_source(disk) }}{%- endif %}
+-
+- {% if disk.address -%}
+-
+- {% endif %}
+- {% if disk.driver -%}
+-
+- {% endif %}
+-
+- {% endfor %}
+-
+- {% if controller_model %}
+-
+- {% endif %}
+-
+- {% for nic in nics %}
+-
+-
+- {% if nic.get('mac') -%}
+-
+- {%- endif %}
+- {% if nic.model %}{% endif %}
+-
+- {% endfor %}
+- {% if graphics %}
+-
+-
+-
+-
+- {% if graphics.type == "spice" -%}
+-
+-
+-
+- {%- endif %}
+- {% endif %}
+-
+- {%- for serial in serials %}
+-
+- {{ libvirt_chardevs.chardev(serial) }}
+-
+- {%- endfor %}
+-
+- {%- for console in consoles %}
+-
+- {{ libvirt_chardevs.chardev(console) }}
+-
+- {% endfor %}
++{%- for serial in serials %}
++
++ {{ libvirt_chardevs.chardev(serial) }}
++
++{%- endfor %}
++{%- for console in consoles %}
++
++ {{ libvirt_chardevs.chardev(console) }}
++
++{%- endfor %}
+ {%- if hypervisor in ["qemu", "kvm"] %}
+-
+-
+-
++
++
++
+ {%- endif %}
+-
+-
+-
+-
+-
++{%- for hostdev in hostdevs %}
++
++
++
++{%- endfor %}
++
++
++
++
++
+ {%- if hypervisor_features.get("kvm-hint-dedicated") %}
+-
+-
+-
++
++
++
+ {%- endif %}
+-
++
+
+diff --git a/salt/templates/virt/libvirt_macros.jinja b/salt/templates/virt/libvirt_macros.jinja
+new file mode 100644
+index 0000000000..d2e2fc213d
+--- /dev/null
++++ b/salt/templates/virt/libvirt_macros.jinja
+@@ -0,0 +1,3 @@
++{%- macro opt_attribute(obj, name, conv=none) %}
++{%- if obj.get(name) is not none %} {{ name }}='{{ obj[name] if conv is none else conv(obj[name]) }}'{% endif -%}
++{%- endmacro %}
+diff --git a/salt/templates/virt/libvirt_network.jinja b/salt/templates/virt/libvirt_network.jinja
+index 2f11e64559..ab14408712 100644
+--- a/salt/templates/virt/libvirt_network.jinja
++++ b/salt/templates/virt/libvirt_network.jinja
+@@ -1,20 +1,98 @@
++{%- from 'libvirt_macros.jinja' import opt_attribute as opt_attribute -%}
+
+ {{ name }}
++{%- if bridge %}
+
+- {% if vport != None %}
+- {% endif %}{% if tag != None %}
+-
+-
+- {% endif %}
+- {% for ip_config in ip_configs %}
++{%- endif %}
++{%- if mtu %}
++
++{%- endif %}
++{%- if domain %}
++
++{%- endif %}
++{%- if forward %}
++
++{%- endif %}
++{%- if nat %}
++
++ {%- if nat.address %}
++
++ {%- endif %}
++ {%- if nat.port %}
++
++ {%- endif %}
++
++{%- endif %}
++{%- for iface in interfaces %}
++
++{%- endfor %}
++{%- for addr in addresses %}
++
++{%- endfor %}
++{%- if pf %}
++
++{%- endif %}
++{%- if forward %}
++
++{%- endif %}
++{%- if vport %}
++
++ {%- if vport.parameters %}
++
++ {%- endif %}
++
++{%- endif %}
++{%- if vlan %}
++
++ {%- for tag in vlan.tags %}
++
++ {%- endfor %}
++
++{%- endif %}
++{%- if dns %}
++
++ {%- for forwarder in dns.forwarders %}
++
++ {%- endfor %}
++ {%- for key in dns.txt.keys()|sort %}
++
++ {%- endfor %}
++ {%- for ip in dns.hosts.keys()|sort %}
++
++ {%- for hostname in dns.hosts[ip] %}
++ {{ hostname }}
++ {%- endfor %}
++
++ {%- endfor %}
++ {%- for srv in dns.srvs %}
++
++ {%- endfor %}
++
++{%- endif %}
++{%- for ip_config in ip_configs %}
+
+
+- {% for range in ip_config.dhcp_ranges %}
++ {%- for range in ip_config.dhcp_ranges %}
+
+- {% endfor %}
++ {%- endfor %}
++ {%- for ip in ip_config.hosts.keys()|sort %}
++ {%- set host = ip_config.hosts[ip] %}
++
++ {%- endfor %}
++ {%- if ip_config.bootp %}
++
++ {%- endif %}
+
++ {%- if ip_config.tftp %}
++
++ {%- endif %}
+
+- {% endfor %}
++{%- endfor %}
+
+diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py
+index 5c187ca7e5..c91c3f6275 100644
+--- a/salt/utils/xmlutil.py
++++ b/salt/utils/xmlutil.py
+@@ -380,3 +380,32 @@ def change_xml(doc, data, mapping):
+ deleted = del_fn(parent_map, node)
+ need_update = need_update or deleted
+ return need_update
++
++
++def strip_spaces(node):
++ """
++ Remove all spaces and line breaks before and after nodes.
++ This helps comparing XML trees.
++
++ :param node: the XML node to remove blanks from
++ :return: the node
++ """
++
++ if node.tail is not None:
++ node.tail = node.tail.strip(" \t\n")
++ if node.text is not None:
++ node.text = node.text.strip(" \t\n")
++ try:
++ for child in node:
++ strip_spaces(child)
++ except RecursionError:
++ raise Exception("Failed to recurse on the node")
++
++ return node
++
++
++def element_to_str(node):
++ """
++ Serialize an XML node into a string
++ """
++ return salt.utils.stringutils.to_str(ElementTree.tostring(node))
+diff --git a/tests/conftest.py b/tests/conftest.py
+index 6922f626f8..27df1f272d 100644
+--- a/tests/conftest.py
++++ b/tests/conftest.py
+@@ -27,10 +27,10 @@ import _pytest.logging
+ import _pytest.skipping
+ import psutil
+ import pytest
++import salt._logging.impl
+ import salt.config
+ import salt.loader
+ import salt.log.mixins
+-import salt.log.setup
+ import salt.utils.files
+ import salt.utils.path
+ import salt.utils.platform
+diff --git a/tests/pytests/functional/modules/test_opkg.py b/tests/pytests/functional/modules/test_opkg.py
+index 4e1d2f9c20..8b5a690de8 100644
+--- a/tests/pytests/functional/modules/test_opkg.py
++++ b/tests/pytests/functional/modules/test_opkg.py
+@@ -8,14 +8,12 @@ from tests.support.mock import patch
+ pytestmark = pytest.mark.skip_if_binaries_missing("stat", "md5sum", "uname")
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {
++@pytest.fixture
++def configure_loader_modules():
++ return {
+ opkg: {"__salt__": {"cmd.shell": cmd.shell, "cmd.run_stdout": cmd.run_stdout}},
+ cmd: {},
+ }
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
+
+
+ def test_conf_d_path_does_not_exist_not_created_by_restart_check(tmp_path):
+diff --git a/tests/pytests/unit/beacons/test_sensehat.py b/tests/pytests/unit/beacons/test_sensehat.py
+index 501cb1c69b..b4b964b443 100644
+--- a/tests/pytests/unit/beacons/test_sensehat.py
++++ b/tests/pytests/unit/beacons/test_sensehat.py
+@@ -3,9 +3,9 @@ import salt.beacons.sensehat as sensehat
+ from tests.support.mock import MagicMock
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {
++@pytest.fixture
++def configure_loader_modules():
++ return {
+ sensehat: {
+ "__salt__": {
+ "sensehat.get_humidity": MagicMock(return_value=80),
+@@ -14,8 +14,6 @@ def setup_loader():
+ },
+ }
+ }
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
+
+
+ def test_non_list_config():
+diff --git a/tests/pytests/unit/beacons/test_status.py b/tests/pytests/unit/beacons/test_status.py
+index bb32253c3e..6c010ddd80 100644
+--- a/tests/pytests/unit/beacons/test_status.py
++++ b/tests/pytests/unit/beacons/test_status.py
+@@ -10,16 +10,14 @@ import salt.modules.status as status_module
+ from salt.beacons import status
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {
++@pytest.fixture
++def configure_loader_modules():
++ return {
+ status: {
+ "__salt__": pytest.helpers.salt_loader_module_functions(status_module)
+ },
+ status_module: {"__grains__": {"kernel": "Linux"}, "__salt__": {}},
+ }
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
+
+
+ def test_empty_config():
+diff --git a/tests/pytests/unit/modules/test_alternatives.py b/tests/pytests/unit/modules/test_alternatives.py
+index 49c6c5e415..aa05c3f0f4 100644
+--- a/tests/pytests/unit/modules/test_alternatives.py
++++ b/tests/pytests/unit/modules/test_alternatives.py
+@@ -4,11 +4,9 @@ from tests.support.helpers import TstSuiteLoggingHandler
+ from tests.support.mock import MagicMock, patch
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {alternatives: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {alternatives: {}}
+
+
+ def test_display():
+diff --git a/tests/pytests/unit/modules/test_ansiblegate.py b/tests/pytests/unit/modules/test_ansiblegate.py
+index ca5a6ab1ef..42c0968a6e 100644
+--- a/tests/pytests/unit/modules/test_ansiblegate.py
++++ b/tests/pytests/unit/modules/test_ansiblegate.py
+@@ -1,4 +1,3 @@
+-#
+ # Author: Bo Maryniuk
+
+
+@@ -17,6 +16,11 @@ pytestmark = pytest.mark.skipif(
+ )
+
+
++@pytest.fixture
++def configure_loader_modules():
++ return {ansible: {}}
++
++
+ @pytest.fixture
+ def resolver():
+ _resolver = ansible.AnsibleModuleResolver({})
+@@ -28,13 +32,6 @@ def resolver():
+ return _resolver
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {ansible: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
+-
+-
+ def test_ansible_module_help(resolver):
+ """
+ Test help extraction from the module
+diff --git a/tests/pytests/unit/modules/test_archive.py b/tests/pytests/unit/modules/test_archive.py
+index c2a7f24d1d..a4dfca8c84 100644
+--- a/tests/pytests/unit/modules/test_archive.py
++++ b/tests/pytests/unit/modules/test_archive.py
+@@ -18,11 +18,9 @@ class ZipFileMock(MagicMock):
+ return self._files
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {archive: {"__grains__": {"id": 0}}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {archive: {"__grains__": {"id": 0}}}
+
+
+ def test_tar():
+diff --git a/tests/pytests/unit/modules/test_azurearm_dns.py b/tests/pytests/unit/modules/test_azurearm_dns.py
+index de096915a1..d1f42a60d7 100644
+--- a/tests/pytests/unit/modules/test_azurearm_dns.py
++++ b/tests/pytests/unit/modules/test_azurearm_dns.py
+@@ -109,8 +109,8 @@ def credentials():
+ }
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
++@pytest.fixture
++def configure_loader_modules():
+ """
+ setup loader modules and override the azurearm.get_client utility
+ """
+@@ -120,11 +120,9 @@ def setup_loader():
+ minion_config, utils=utils, whitelist=["azurearm_dns", "config"]
+ )
+ utils["azurearm.get_client"] = AzureClientMock()
+- setup_loader_modules = {
++ return {
+ azurearm_dns: {"__utils__": utils, "__salt__": funcs},
+ }
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
+
+
+ def test_record_set_create_or_update(credentials):
+diff --git a/tests/pytests/unit/modules/test_nilrt_ip.py b/tests/pytests/unit/modules/test_nilrt_ip.py
+index adf08531dd..3e4bd414e9 100644
+--- a/tests/pytests/unit/modules/test_nilrt_ip.py
++++ b/tests/pytests/unit/modules/test_nilrt_ip.py
+@@ -5,11 +5,9 @@ import salt.modules.nilrt_ip as nilrt_ip
+ from tests.support.mock import patch
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {nilrt_ip: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {nilrt_ip: {}}
+
+
+ @pytest.fixture
+diff --git a/tests/pytests/unit/modules/test_opkg.py b/tests/pytests/unit/modules/test_opkg.py
+index e5817eef38..7fd12015e5 100644
+--- a/tests/pytests/unit/modules/test_opkg.py
++++ b/tests/pytests/unit/modules/test_opkg.py
+@@ -3,11 +3,9 @@ import salt.modules.opkg as opkg
+ from tests.support.mock import patch
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {opkg: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {opkg: {}}
+
+
+ def test_when_os_is_NILinuxRT_and_creation_of_RESTART_CHECK_STATE_PATH_fails_virtual_should_be_False():
+diff --git a/tests/pytests/unit/modules/test_restartcheck.py b/tests/pytests/unit/modules/test_restartcheck.py
+index b0c55dd0fe..8b4dc01bca 100644
+--- a/tests/pytests/unit/modules/test_restartcheck.py
++++ b/tests/pytests/unit/modules/test_restartcheck.py
+@@ -8,11 +8,9 @@ import salt.modules.systemd_service as service
+ from tests.support.mock import create_autospec, patch
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {restartcheck: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {restartcheck: {}}
+
+
+ def test_when_timestamp_file_does_not_exist_then_file_changed_nilrt_should_be_True():
+diff --git a/tests/pytests/unit/modules/test_slackware_service.py b/tests/pytests/unit/modules/test_slackware_service.py
+index 047582e668..2fe38c5232 100644
+--- a/tests/pytests/unit/modules/test_slackware_service.py
++++ b/tests/pytests/unit/modules/test_slackware_service.py
+@@ -8,6 +8,11 @@ import salt.modules.slackware_service as slackware_service
+ from tests.support.mock import MagicMock, patch
+
+
++@pytest.fixture
++def configure_loader_modules():
++ return {slackware_service: {}}
++
++
+ @pytest.fixture
+ def mocked_rcd():
+ glob_output = [
+@@ -39,13 +44,6 @@ def mocked_rcd():
+ return glob_mock, os_path_exists_mock, os_access_mock
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {slackware_service: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
+-
+-
+ def test_get_all_rc_services_minus_system_and_config_files(mocked_rcd):
+ """
+ In Slackware, the services are started, stopped, enabled or disabled
+diff --git a/tests/pytests/unit/modules/test_swarm.py b/tests/pytests/unit/modules/test_swarm.py
+index e474f89f62..6259d0bd17 100644
+--- a/tests/pytests/unit/modules/test_swarm.py
++++ b/tests/pytests/unit/modules/test_swarm.py
+@@ -16,15 +16,13 @@ pytestmark = pytest.mark.skipif(
+ )
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {swarm: {"__context__": {}}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {swarm: {"__context__": {}}}
+
+
+ @pytest.fixture
+-def fake_context_client():
++def fake_context_client(setup_loader_mock):
+ fake_swarm_client = MagicMock()
+ patch_context = patch.dict(
+ swarm.__context__, {"client": fake_swarm_client, "server_name": "test swarm"}
+diff --git a/tests/pytests/unit/modules/test_tls.py b/tests/pytests/unit/modules/test_tls.py
+index d7e79d91ad..a1db1930ee 100644
+--- a/tests/pytests/unit/modules/test_tls.py
++++ b/tests/pytests/unit/modules/test_tls.py
+@@ -5,6 +5,11 @@ import salt.modules.tls as tls
+ from tests.support.mock import MagicMock, patch
+
+
++@pytest.fixture
++def configure_loader_modules():
++ return {tls: {}}
++
++
+ @pytest.fixture(scope="module")
+ def tls_test_data():
+ return {
+@@ -23,13 +28,6 @@ def tls_test_data():
+ }
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {tls: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
+-
+-
+ def test_create_ca_permissions_on_cert_and_key(tmpdir, tls_test_data):
+ ca_name = "test_ca"
+ certp = tmpdir.join(ca_name).join("{}_ca_cert.crt".format(ca_name)).strpath
+diff --git a/tests/pytests/unit/modules/virt/conftest.py b/tests/pytests/unit/modules/virt/conftest.py
+index ec56bdff24..3bacd734a7 100644
+--- a/tests/pytests/unit/modules/virt/conftest.py
++++ b/tests/pytests/unit/modules/virt/conftest.py
+@@ -43,32 +43,29 @@ class MappedResultMock(MagicMock):
+
+ super().__init__(side_effect=mapped_results)
+
+- def add(self, name):
+- self._instances[name] = MagicMock()
++ def add(self, name, value=None):
++ self._instances[name] = value or MagicMock()
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader(request):
++def loader_modules_config():
+ # Create libvirt mock and connection mock
+ mock_libvirt = LibvirtMock()
+ mock_conn = MagicMock()
+ mock_conn.getStoragePoolCapabilities.return_value = ""
+
+ mock_libvirt.openAuth.return_value = mock_conn
+- setup_loader_modules = {
++ return {
+ virt: {
+ "libvirt": mock_libvirt,
+ "__salt__": {"config.get": config.get, "config.option": config.option},
+ },
+ config: {},
+ }
+- with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
+- yield loader_mock
+
+
+ @pytest.fixture
+ def make_mock_vm():
+- def _make_mock_vm(xml_def):
++ def _make_mock_vm(xml_def, running=False, inactive_def=None):
+ mocked_conn = virt.libvirt.openAuth.return_value
+
+ doc = ET.fromstring(xml_def)
+@@ -81,17 +78,21 @@ def make_mock_vm():
+ mocked_conn.listDefinedDomains.return_value = [name]
+
+ # Configure the mocked domain
+- domain_mock = virt.libvirt.virDomain()
+ if not isinstance(mocked_conn.lookupByName, MappedResultMock):
+ mocked_conn.lookupByName = MappedResultMock()
+ mocked_conn.lookupByName.add(name)
+ domain_mock = mocked_conn.lookupByName(name)
+- domain_mock.XMLDesc.return_value = xml_def
++
++ domain_mock.XMLDesc = MappedResultMock()
++ domain_mock.XMLDesc.add(0, xml_def)
++ domain_mock.XMLDesc.add(
++ virt.libvirt.VIR_DOMAIN_XML_INACTIVE, inactive_def or xml_def
++ )
+ domain_mock.OSType.return_value = os_type
+
+ # Return state as shutdown
+ domain_mock.info.return_value = [
+- 4,
++ 0 if running else 4,
+ 2048 * 1024,
+ 1024 * 1024,
+ 2,
+@@ -103,6 +104,8 @@ def make_mock_vm():
+ domain_mock.attachDevice.return_value = 0
+ domain_mock.detachDevice.return_value = 0
+
++ domain_mock.connect.return_value = mocked_conn
++
+ return domain_mock
+
+ return _make_mock_vm
+@@ -315,3 +318,66 @@ def make_capabilities():
+ """
+
+ return _make_capabilities
++
++
++@pytest.fixture
++def make_mock_network():
++ def _make_mock_net(xml_def):
++ mocked_conn = virt.libvirt.openAuth.return_value
++
++ doc = ET.fromstring(xml_def)
++ name = doc.find("name").text
++
++ if not isinstance(mocked_conn.networkLookupByName, MappedResultMock):
++ mocked_conn.networkLookupByName = MappedResultMock()
++ mocked_conn.networkLookupByName.add(name)
++ net_mock = mocked_conn.networkLookupByName(name)
++ net_mock.XMLDesc.return_value = xml_def
++
++ # libvirt defaults the autostart to unset
++ net_mock.autostart.return_value = 0
++
++ # Append the network to listAllNetworks return value
++ all_nets = mocked_conn.listAllNetworks.return_value
++ if not isinstance(all_nets, list):
++ all_nets = []
++ all_nets.append(net_mock)
++ mocked_conn.listAllNetworks.return_value = all_nets
++
++ return net_mock
++
++ return _make_mock_net
++
++
++@pytest.fixture
++def make_mock_device():
++ """
++ Create a mock host device
++ """
++
++ def _make_mock_device(xml_def):
++ mocked_conn = virt.libvirt.openAuth.return_value
++ if not isinstance(mocked_conn.nodeDeviceLookupByName, MappedResultMock):
++ mocked_conn.nodeDeviceLookupByName = MappedResultMock()
++
++ doc = ET.fromstring(xml_def)
++ name = doc.find("./name").text
++
++ mocked_conn.nodeDeviceLookupByName.add(name)
++ mocked_device = mocked_conn.nodeDeviceLookupByName(name)
++ mocked_device.name.return_value = name
++ mocked_device.XMLDesc.return_value = xml_def
++ mocked_device.listCaps.return_value = [
++ cap.get("type") for cap in doc.findall("./capability")
++ ]
++ return mocked_device
++
++ return _make_mock_device
++
++
++@pytest.fixture(params=[True, False], ids=["test", "notest"])
++def test(request):
++ """
++ Run the test with both True and False test values
++ """
++ return request.param
+diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py
+index 347c3bcd88..0bde881403 100644
+--- a/tests/pytests/unit/modules/virt/test_domain.py
++++ b/tests/pytests/unit/modules/virt/test_domain.py
+@@ -1,8 +1,16 @@
++import pytest
+ import salt.modules.virt as virt
++import salt.utils.xmlutil as xmlutil
+ from salt._compat import ElementTree as ET
+ from tests.support.mock import MagicMock, patch
+
+-from .test_helpers import append_to_XMLDesc
++from .conftest import loader_modules_config
++from .test_helpers import append_to_XMLDesc, assert_called, strip_xml
++
++
++@pytest.fixture
++def configure_loader_modules():
++ return loader_modules_config()
+
+
+ def test_update_xen_disk_volumes(make_mock_vm, make_mock_storage_pool):
+@@ -589,3 +597,466 @@ def test_init_stop_on_reboot(make_capabilities):
+ define_mock = virt.libvirt.openAuth().defineXML
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+ assert "destroy" == setxml.find("./on_reboot").text
++
++
++def test_init_hostdev_usb(make_capabilities, make_mock_device):
++ """
++ Test virt.init with USB host device passed through
++ """
++ make_capabilities()
++ make_mock_device(
++ """
++
++ usb_3_1_3
++ /sys/devices/pci0000:00/0000:00:1d.6/0000:06:00.0/0000:07:02.0/0000:3e:00.0/usb3/3-1/3-1.3
++ /dev/bus/usb/003/004
++ usb_3_1
++
++ usb
++
++
++ 3
++ 4
++ AUKEY PC-LM1E Camera
++ KYE Systems Corp. (Mouse Systems)
++
++
++ """
++ )
++ with patch.dict(virt.os.__dict__, {"chmod": MagicMock(), "makedirs": MagicMock()}):
++ with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}):
++ virt.init("test_vm", 2, 2048, host_devices=["usb_3_1_3"], start=False)
++ define_mock = virt.libvirt.openAuth().defineXML
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ expected_xml = strip_xml(
++ """
++
++
++
++ """
++ )
++ assert expected_xml == strip_xml(
++ ET.tostring(setxml.find("./devices/hostdev"))
++ )
++
++
++def test_init_hostdev_pci(make_capabilities, make_mock_device):
++ """
++ Test virt.init with PCI host device passed through
++ """
++ make_capabilities()
++ make_mock_device(
++ """
++
++ pci_1002_71c4
++ pci_8086_27a1
++
++ 0xffffff
++ 0
++ 1
++ 0
++ 0
++ M56GL [Mobility FireGL V5200]
++ ATI Technologies Inc
++
++
++
++ """
++ )
++ with patch.dict(virt.os.__dict__, {"chmod": MagicMock(), "makedirs": MagicMock()}):
++ with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}):
++ virt.init("test_vm", 2, 2048, host_devices=["pci_1002_71c4"], start=False)
++ define_mock = virt.libvirt.openAuth().defineXML
++ setxml = ET.fromstring(define_mock.call_args[0][0])
++ expected_xml = strip_xml(
++ """
++
++
++
++ """
++ )
++ assert expected_xml == strip_xml(
++ ET.tostring(setxml.find("./devices/hostdev"))
++ )
++
++
++def test_update_hostdev_nochange(make_mock_device, make_mock_vm):
++ """
++ Test the virt.update function with no host device changes
++ """
++ xml_def = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++
++ hvm
++
++ restart
++
++
++
++
++
++
++
++
++
++
++
++ """
++ domain_mock = make_mock_vm(xml_def)
++
++ make_mock_device(
++ """
++
++ usb_3_1_3
++ /sys/devices/pci0000:00/0000:00:1d.6/0000:06:00.0/0000:07:02.0/0000:3e:00.0/usb3/3-1/3-1.3
++ /dev/bus/usb/003/004
++ usb_3_1
++
++ usb
++
++
++ 3
++ 4
++ AUKEY PC-LM1E Camera
++ KYE Systems Corp. (Mouse Systems)
++
++
++ """
++ )
++ make_mock_device(
++ """
++
++ pci_1002_71c4
++ pci_8086_27a1
++
++ 0xffffff
++ 0
++ 1
++ 0
++ 0
++ M56GL [Mobility FireGL V5200]
++ ATI Technologies Inc
++
++
++
++ """
++ )
++
++ ret = virt.update("my_vm", host_devices=["pci_1002_71c4", "usb_3_1_3"])
++
++ assert not ret["definition"]
++ define_mock = virt.libvirt.openAuth().defineXML
++ define_mock.assert_not_called()
++
++
++@pytest.mark.parametrize(
++ "running,live",
++ [(False, False), (True, False), (True, True)],
++ ids=["stopped, no live", "running, no live", "running, live"],
++)
++def test_update_hostdev_changes(running, live, make_mock_device, make_mock_vm, test):
++ """
++ Test the virt.update function with host device changes
++ """
++ xml_def = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++
++ hvm
++
++ restart
++
++
++
++
++
++
++ """
++ domain_mock = make_mock_vm(xml_def, running)
++
++ make_mock_device(
++ """
++
++ usb_3_1_3
++ /sys/devices/pci0000:00/0000:00:1d.6/0000:06:00.0/0000:07:02.0/0000:3e:00.0/usb3/3-1/3-1.3
++ /dev/bus/usb/003/004
++ usb_3_1
++
++ usb
++
++
++ 3
++ 4
++ AUKEY PC-LM1E Camera
++ KYE Systems Corp. (Mouse Systems)
++
++
++ """
++ )
++
++ make_mock_device(
++ """
++
++ pci_1002_71c4
++ pci_8086_27a1
++
++ 0xffffff
++ 0
++ 1
++ 0
++ 0
++ M56GL [Mobility FireGL V5200]
++ ATI Technologies Inc
++
++
++
++ """
++ )
++
++ ret = virt.update("my_vm", host_devices=["usb_3_1_3"], test=test, live=live)
++ define_mock = virt.libvirt.openAuth().defineXML
++ assert_called(define_mock, not test)
++
++ # Test that the XML is updated with the proper devices
++ usb_device_xml = strip_xml(
++ """
++
++
++
++ """
++ )
++ if not test:
++ set_xml = ET.fromstring(define_mock.call_args[0][0])
++ actual_hostdevs = [
++ ET.tostring(xmlutil.strip_spaces(node))
++ for node in set_xml.findall("./devices/hostdev")
++ ]
++ assert [usb_device_xml] == actual_hostdevs
++
++ if not test and live:
++ attach_xml = strip_xml(domain_mock.attachDevice.call_args[0][0])
++ assert usb_device_xml == attach_xml
++
++ pci_device_xml = strip_xml(
++ """
++
++
++
++
++ """
++ )
++ detach_xml = strip_xml(domain_mock.detachDevice.call_args[0][0])
++ assert pci_device_xml == detach_xml
++ else:
++ domain_mock.attachDevice.assert_not_called()
++ domain_mock.detachDevice.assert_not_called()
++
++
++def test_diff_nics():
++ """
++ Test virt._diff_nics()
++ """
++ old_nics = ET.fromstring(
++ """
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ """
++ ).findall("interface")
++
++ new_nics = ET.fromstring(
++ """
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ """
++ ).findall("interface")
++ ret = virt._diff_interface_lists(old_nics, new_nics)
++ assert ["52:54:00:39:02:b1"] == [
++ nic.find("mac").get("address") for nic in ret["unchanged"]
++ ]
++ assert ["52:54:00:39:02:b2", "52:54:00:39:02:b4"] == [
++ nic.find("mac").get("address") for nic in ret["new"]
++ ]
++ assert ["52:54:00:39:02:b2", "52:54:00:39:02:b3"] == [
++ nic.find("mac").get("address") for nic in ret["deleted"]
++ ]
++
++
++def test_diff_nics_live_nochange():
++ """
++ Libvirt alters the NICs of network type when running the guest, test the virt._diff_nics()
++ function with no change in such a case.
++ """
++ old_nics = ET.fromstring(
++ """
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ """
++ ).findall("interface")
++
++ new_nics = ET.fromstring(
++ """
++
++
++
++
++
++
++
++
++
++
++ """
++ )
++ ret = virt._diff_interface_lists(old_nics, new_nics)
++ assert ["52:54:00:03:02:15", "52:54:00:ea:2e:89"] == [
++ nic.find("mac").get("address") for nic in ret["unchanged"]
++ ]
++
++
++def test_update_nic_hostdev_nochange(make_mock_network, make_mock_vm, test):
++ """
++ Test the virt.update function with a running host with hostdev nic
++ """
++ xml_def_template = """
++
++ my_vm
++ 524288
++ 524288
++ 1
++
++ hvm
++
++ restart
++
++ {}
++
++
++ """
++ inactive_nic = """
++
++
++
++
++
++
++
++ """
++ running_nic = """
++
++
++
++
++
++
++
++
++ """
++ domain_mock = make_mock_vm(
++ xml_def_template.format(running_nic),
++ running="running",
++ inactive_def=xml_def_template.format(inactive_nic),
++ )
++
++ make_mock_network(
++ """
++
++ test-hostdev
++ 51d0aaa5-7530-4c60-8498-5bc3ab8c655b
++
++
++
++
++
++
++ """
++ )
++
++ ret = virt.update(
++ "my_vm",
++ interfaces=[{"name": "eth0", "type": "network", "source": "test-hostdev"}],
++ test=test,
++ live=True,
++ )
++ assert not ret.get("definition")
++ assert not ret.get("interface").get("attached")
++ assert not ret.get("interface").get("detached")
++ define_mock = virt.libvirt.openAuth().defineXML
++ define_mock.assert_not_called()
++ domain_mock.attachDevice.assert_not_called()
++ domain_mock.detachDevice.assert_not_called()
+diff --git a/tests/pytests/unit/modules/virt/test_helpers.py b/tests/pytests/unit/modules/virt/test_helpers.py
+index f64aee2821..5410f45603 100644
+--- a/tests/pytests/unit/modules/virt/test_helpers.py
++++ b/tests/pytests/unit/modules/virt/test_helpers.py
+@@ -1,3 +1,4 @@
++import salt.utils.xmlutil as xmlutil
+ from salt._compat import ElementTree as ET
+
+
+@@ -9,3 +10,27 @@ def append_to_XMLDesc(mocked, fragment):
+ xml_fragment = ET.fromstring(fragment)
+ xml_doc.append(xml_fragment)
+ mocked.XMLDesc.return_value = ET.tostring(xml_doc)
++
++
++def assert_xml_equals(expected, actual):
++ """
++ Assert that two ElementTree nodes are equal
++ """
++ assert xmlutil.to_dict(xmlutil.strip_spaces(expected), True) == xmlutil.to_dict(
++ xmlutil.strip_spaces(actual), True
++ )
++
++
++def strip_xml(xml_str):
++ """
++ Remove all spaces and formatting from an XML string
++ """
++ return ET.tostring(xmlutil.strip_spaces(ET.fromstring(xml_str)))
++
++
++def assert_called(mock, condition):
++ """
++ Assert that the mock has been called if not in test mode, and vice-versa.
++ I know it's a simple XOR, but makes the tests easier to read
++ """
++ assert not condition and not mock.called or condition and mock.called
+diff --git a/tests/pytests/unit/modules/virt/test_host.py b/tests/pytests/unit/modules/virt/test_host.py
+new file mode 100644
+index 0000000000..555deb23bb
+--- /dev/null
++++ b/tests/pytests/unit/modules/virt/test_host.py
+@@ -0,0 +1,219 @@
++import pytest
++import salt.modules.virt as virt
++
++from .conftest import loader_modules_config
++
++
++@pytest.fixture
++def configure_loader_modules():
++ return loader_modules_config()
++
++
++def test_node_devices(make_mock_device):
++ """
++ Test the virt.node_devices() function
++ """
++ mock_devs = [
++ make_mock_device(
++ """
++
++ pci_1002_71c4
++ pci_8086_27a1
++
++ 0xffffff
++ 0
++ 1
++ 0
++ 0
++ M56GL [Mobility FireGL V5200]
++ ATI Technologies Inc
++
++
++
++ """
++ ),
++ # Linux USB hub to be ignored
++ make_mock_device(
++ """
++
++ usb_device_1d6b_1_0000_00_1d_0
++ pci_8086_27c8
++
++ 2
++ 1
++ 1.1 root hub
++ Linux Foundation
++
++
++ """
++ ),
++ # SR-IOV PCI device with multiple capabilities
++ make_mock_device(
++ """
++
++ pci_0000_02_10_7
++ pci_0000_00_04_0
++
++ 0
++ 2
++ 16
++ 7
++ 82576 Virtual Function
++ Intel Corporation
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ """
++ ),
++ # PCI bridge to be ignored
++ make_mock_device(
++ """
++
++ pci_0000_00_1c_0
++ computer
++
++ 0xffffff
++ 0
++ 0
++ 28
++ 0
++ 8 Series/C220 Series Chipset Family PCI Express Root Port #1
++ Intel Corporation
++
++
++
++
++
++
++
++
++
++
++ """
++ ),
++ # Other device to be ignored
++ make_mock_device(
++ """
++
++ mdev_3627463d_b7f0_4fea_b468_f1da537d301b
++ computer
++
++
++
++
++
++ """
++ ),
++ # USB device to be listed
++ make_mock_device(
++ """
++
++ usb_3_1_3
++ /sys/devices/pci0000:00/0000:00:1d.6/0000:06:00.0/0000:07:02.0/0000:3e:00.0/usb3/3-1/3-1.3
++ /dev/bus/usb/003/004
++ usb_3_1
++
++ usb
++
++
++ 3
++ 4
++ AUKEY PC-LM1E Camera
++ KYE Systems Corp. (Mouse Systems)
++
++
++ """
++ ),
++ # Network device to be listed
++ make_mock_device(
++ """
++
++ net_eth8_e6_86_48_46_c5_29
++ /sys/devices/pci0000:3a/0000:3a:00.0/0000:3b:00.0/0000:3c:03.0/0000:3d:02.2/net/eth8
++ pci_0000_02_10_7
++
++ eth8
++ e6:86:48:46:c5:29
++
++
++
++ """
++ ),
++ # Network device to be ignored
++ make_mock_device(
++ """
++
++ net_lo_00_00_00_00_00_00
++ /sys/devices/virtual/net/lo
++ computer
++
++ lo
++ 00:00:00:00:00:00
++
++
++
++ """
++ ),
++ ]
++ virt.libvirt.openAuth().listAllDevices.return_value = mock_devs
++
++ assert [
++ {
++ "name": "pci_1002_71c4",
++ "caps": "pci",
++ "vendor_id": "0x1002",
++ "vendor": "ATI Technologies Inc",
++ "product_id": "0x71c4",
++ "product": "M56GL [Mobility FireGL V5200]",
++ "address": "0000:01:00.0",
++ "PCI class": "0xffffff",
++ },
++ {
++ "name": "pci_0000_02_10_7",
++ "caps": "pci",
++ "vendor_id": "0x8086",
++ "vendor": "Intel Corporation",
++ "product_id": "0x10ca",
++ "product": "82576 Virtual Function",
++ "address": "0000:02:10.7",
++ "physical function": "0000:02:00.1",
++ "virtual functions": [
++ "0000:02:00.2",
++ "0000:02:00.3",
++ "0000:02:00.4",
++ "0000:02:00.5",
++ ],
++ },
++ {
++ "name": "usb_3_1_3",
++ "caps": "usb_device",
++ "vendor": "KYE Systems Corp. (Mouse Systems)",
++ "vendor_id": "0x0458",
++ "product": "AUKEY PC-LM1E Camera",
++ "product_id": "0x6006",
++ "address": "003:004",
++ },
++ {
++ "name": "eth8",
++ "caps": "net",
++ "address": "e6:86:48:46:c5:29",
++ "state": "down",
++ "device name": "pci_0000_02_10_7",
++ },
++ ] == virt.node_devices()
+diff --git a/tests/pytests/unit/modules/virt/test_network.py b/tests/pytests/unit/modules/virt/test_network.py
+new file mode 100644
+index 0000000000..e7e544c580
+--- /dev/null
++++ b/tests/pytests/unit/modules/virt/test_network.py
+@@ -0,0 +1,450 @@
++import pytest
++import salt.modules.virt as virt
++import salt.utils.xmlutil as xmlutil
++from salt._compat import ElementTree as ET
++
++from .conftest import loader_modules_config
++from .test_helpers import assert_called, assert_xml_equals, strip_xml
++
++
++@pytest.fixture
++def configure_loader_modules():
++ return loader_modules_config()
++
++
++def test_gen_xml():
++ """
++ Test virt._get_net_xml()
++ """
++ xml_data = virt._gen_net_xml("network", "main", "bridge", "openvswitch")
++ root = ET.fromstring(xml_data)
++ assert "network" == root.find("name").text
++ assert "main" == root.find("bridge").attrib["name"]
++ assert "bridge" == root.find("forward").attrib["mode"]
++ assert "openvswitch" == root.find("virtualport").attrib["type"]
++
++
++def test_gen_xml_nat():
++ """
++ Test virt._get_net_xml() in a nat setup
++ """
++ xml_data = virt._gen_net_xml(
++ "network",
++ "main",
++ "nat",
++ None,
++ ip_configs=[
++ {
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [
++ {"start": "192.168.2.10", "end": "192.168.2.25"},
++ {"start": "192.168.2.110", "end": "192.168.2.125"},
++ ],
++ "hosts": {
++ "192.168.2.10": {
++ "mac": "00:16:3e:77:e2:ed",
++ "name": "foo.example.com",
++ },
++ },
++ "bootp": {"file": "pxeboot.img", "server": "192.168.2.1"},
++ "tftp": "/path/to/tftp",
++ },
++ {
++ "cidr": "2001:db8:ca2:2::/64",
++ "hosts": {
++ "2001:db8:ca2:2:3::1": {"name": "paul"},
++ "2001:db8:ca2:2:3::2": {
++ "id": "0:3:0:1:0:16:3e:11:22:33",
++ "name": "ralph",
++ },
++ },
++ },
++ ],
++ nat={
++ "address": {"start": "1.2.3.4", "end": "1.2.3.10"},
++ "port": {"start": 500, "end": 1000},
++ },
++ domain={"name": "acme.lab", "localOnly": True},
++ mtu=9000,
++ )
++ root = ET.fromstring(xml_data)
++ assert "network" == root.find("name").text
++ assert "main" == root.find("bridge").attrib["name"]
++ assert "nat" == root.find("forward").attrib["mode"]
++ expected_ipv4 = ET.fromstring(
++ """
++
++
++
++
++
++
++
++
++
++ """
++ )
++ assert_xml_equals(expected_ipv4, root.find("./ip[@address='192.168.2.1']"))
++
++ expected_ipv6 = ET.fromstring(
++ """
++
++
++
++
++
++
++ """
++ )
++ assert_xml_equals(expected_ipv6, root.find("./ip[@address='2001:db8:ca2:2::1']"))
++
++ actual_nat = ET.tostring(xmlutil.strip_spaces(root.find("./forward/nat")))
++ expected_nat = strip_xml(
++ """
++
++
++
++
++ """
++ )
++ assert expected_nat == actual_nat
++
++ assert {"name": "acme.lab", "localOnly": "yes"} == root.find("./domain").attrib
++ assert "9000" == root.find("mtu").get("size")
++
++
++def test_gen_xml_dns():
++ """
++ Test virt._get_net_xml() with DNS configuration
++ """
++ xml_data = virt._gen_net_xml(
++ "network",
++ "main",
++ "nat",
++ None,
++ ip_configs=[
++ {
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [{"start": "192.168.2.10", "end": "192.168.2.25"}],
++ }
++ ],
++ dns={
++ "forwarders": [
++ {"domain": "example.com", "addr": "192.168.1.1"},
++ {"addr": "8.8.8.8"},
++ {"domain": "www.example.com"},
++ ],
++ "txt": {
++ "host.widgets.com.": "printer=lpr5",
++ "example.com.": "reserved for doc",
++ },
++ "hosts": {"192.168.1.2": ["mirror.acme.lab", "test.acme.lab"]},
++ "srvs": [
++ {
++ "name": "srv1",
++ "protocol": "tcp",
++ "domain": "test-domain-name",
++ "target": ".",
++ "port": 1024,
++ "priority": 10,
++ "weight": 10,
++ },
++ {"name": "srv2", "protocol": "udp"},
++ ],
++ },
++ )
++ root = ET.fromstring(xml_data)
++ expected_xml = ET.fromstring(
++ """
++
++
++
++
++
++
++
++ mirror.acme.lab
++ test.acme.lab
++
++
++
++
++ """
++ )
++ assert_xml_equals(expected_xml, root.find("./dns"))
++
++
++def test_gen_xml_isolated():
++ """
++ Test the virt._gen_net_xml() function for an isolated network
++ """
++ xml_data = virt._gen_net_xml("network", "main", None, None)
++ assert ET.fromstring(xml_data).find("forward") is None
++
++
++def test_gen_xml_passthrough_interfaces():
++ """
++ Test the virt._gen_net_xml() function for a passthrough forward mode
++ """
++ xml_data = virt._gen_net_xml(
++ "network", "virbr0", "passthrough", None, interfaces="eth10 eth11 eth12",
++ )
++ root = ET.fromstring(xml_data)
++ assert "passthrough" == root.find("forward").get("mode")
++ assert ["eth10", "eth11", "eth12"] == [
++ n.get("dev") for n in root.findall("forward/interface")
++ ]
++
++
++def test_gen_xml_hostdev_addresses():
++ """
++ Test the virt._gen_net_xml() function for a hostdev forward mode with PCI addresses
++ """
++ xml_data = virt._gen_net_xml(
++ "network", "virbr0", "hostdev", None, addresses="0000:04:00.1 0000:e3:01.2",
++ )
++ root = ET.fromstring(xml_data)
++ expected_forward = ET.fromstring(
++ """
++
++
++
++
++ """
++ )
++ assert_xml_equals(expected_forward, root.find("./forward"))
++
++
++def test_gen_xml_hostdev_pf():
++ """
++ Test the virt._gen_net_xml() function for a hostdev forward mode with physical function
++ """
++ xml_data = virt._gen_net_xml(
++ "network", "virbr0", "hostdev", None, physical_function="eth0"
++ )
++ root = ET.fromstring(xml_data)
++ expected_forward = strip_xml(
++ """
++
++
++
++ """
++ )
++ actual_forward = ET.tostring(xmlutil.strip_spaces(root.find("./forward")))
++ assert expected_forward == actual_forward
++
++
++def test_gen_xml_openvswitch():
++ """
++ Test the virt._gen_net_xml() function for an openvswitch setup with virtualport and vlan
++ """
++ xml_data = virt._gen_net_xml(
++ "network",
++ "ovsbr0",
++ "bridge",
++ {
++ "type": "openvswitch",
++ "parameters": {"interfaceid": "09b11c53-8b5c-4eeb-8f00-d84eaa0aaa4f"},
++ },
++ tag={
++ "trunk": True,
++ "tags": [{"id": 42, "nativeMode": "untagged"}, {"id": 47}],
++ },
++ )
++ expected_xml = ET.fromstring(
++ """
++
++ network
++
++
++
++
++
++
++
++
++
++
++ """
++ )
++ assert_xml_equals(expected_xml, ET.fromstring(xml_data))
++
++
++@pytest.mark.parametrize(
++ "autostart, start", [(True, True), (False, True), (False, False)],
++)
++def test_define(make_mock_network, autostart, start):
++ """
++ Test the virt.defined function
++ """
++ # We create a network mock to fake the autostart flag at start
++ # and allow checking everything went fine. This doesn't mess up with the network define part
++ mock_network = make_mock_network("default")
++ assert virt.network_define(
++ "default",
++ "test-br0",
++ "nat",
++ ipv4_config={
++ "cidr": "192.168.124.0/24",
++ "dhcp_ranges": [{"start": "192.168.124.2", "end": "192.168.124.254"}],
++ },
++ autostart=autostart,
++ start=start,
++ )
++
++ expected_xml = strip_xml(
++ """
++
++ default
++
++
++
++
++
++
++
++
++ """
++ )
++ define_mock = virt.libvirt.openAuth().networkDefineXML
++ assert expected_xml == strip_xml(define_mock.call_args[0][0])
++
++ if autostart:
++ mock_network.setAutostart.assert_called_with(1)
++ else:
++ mock_network.setAutostart.assert_not_called()
++
++ assert_called(mock_network.create, autostart or start)
++
++
++def test_update_nat_nochange(make_mock_network):
++ """
++ Test updating a NAT network without changes
++ """
++ net_mock = make_mock_network(
++ """
++
++ default
++ d6c95a31-16a2-473a-b8cd-7ad2fe2dd855
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ """
++ )
++ assert not virt.network_update(
++ "default",
++ None,
++ "nat",
++ ipv4_config={
++ "cidr": "192.168.122.0/24",
++ "dhcp_ranges": [{"start": "192.168.122.2", "end": "192.168.122.254"}],
++ "hosts": {
++ "192.168.122.136": {"mac": "52:54:00:46:4d:9e", "name": "mirror"},
++ },
++ "bootp": {"file": "pxelinux.0", "server": "192.168.122.110"},
++ },
++ domain={"name": "my.lab", "localOnly": True},
++ nat={"port": {"start": 1024, "end": "65535"}},
++ )
++ define_mock = virt.libvirt.openAuth().networkDefineXML
++ define_mock.assert_not_called()
++
++
++@pytest.mark.parametrize("test", [True, False])
++def test_update_nat_change(make_mock_network, test):
++ """
++ Test updating a NAT network with changes
++ """
++ net_mock = make_mock_network(
++ """
++
++ default
++ d6c95a31-16a2-473a-b8cd-7ad2fe2dd855
++
++
++
++
++
++
++
++
++
++
++ """
++ )
++ assert virt.network_update(
++ "default",
++ "test-br0",
++ "nat",
++ ipv4_config={
++ "cidr": "192.168.124.0/24",
++ "dhcp_ranges": [{"start": "192.168.124.2", "end": "192.168.124.254"}],
++ },
++ test=test,
++ )
++ define_mock = virt.libvirt.openAuth().networkDefineXML
++ assert_called(define_mock, not test)
++
++ if not test:
++ # Test the passed new XML
++ expected_xml = strip_xml(
++ """
++
++ default
++
++ d6c95a31-16a2-473a-b8cd-7ad2fe2dd855
++
++
++
++
++
++
++
++
++ """
++ )
++ assert expected_xml == strip_xml(define_mock.call_args[0][0])
++
++
++@pytest.mark.parametrize("change", [True, False], ids=["changed", "unchanged"])
++def test_update_hostdev_pf(make_mock_network, change):
++ """
++ Test updating a hostdev network without changes
++ """
++ net_mock = make_mock_network(
++ """
++
++ test-hostdev
++ 51d0aaa5-7530-4c60-8498-5bc3ab8c655b
++
++
++
++
++
++
++ """
++ )
++ assert change == virt.network_update(
++ "test-hostdev",
++ None,
++ "hostdev",
++ physical_function="eth0" if not change else "eth1",
++ )
++ define_mock = virt.libvirt.openAuth().networkDefineXML
++ if change:
++ define_mock.assert_called()
++ else:
++ define_mock.assert_not_called()
+diff --git a/tests/pytests/unit/output/test_highstate.py b/tests/pytests/unit/output/test_highstate.py
+index 8336208bae..53eaf6fde7 100644
+--- a/tests/pytests/unit/output/test_highstate.py
++++ b/tests/pytests/unit/output/test_highstate.py
+@@ -2,11 +2,9 @@ import pytest
+ import salt.output.highstate as highstate
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {highstate: {"__opts__": {"strip_colors": True}}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {highstate: {"__opts__": {"strip_colors": True}}}
+
+
+ @pytest.mark.parametrize("data", [None, {"return": None}, {"return": {"data": None}}])
+diff --git a/tests/pytests/unit/states/test_alternatives.py b/tests/pytests/unit/states/test_alternatives.py
+index 7bdcdb97cb..de0bc509b9 100644
+--- a/tests/pytests/unit/states/test_alternatives.py
++++ b/tests/pytests/unit/states/test_alternatives.py
+@@ -7,11 +7,9 @@ import salt.states.alternatives as alternatives
+ from tests.support.mock import MagicMock, patch
+
+
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {alternatives: {}}
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++@pytest.fixture
++def configure_loader_modules():
++ return {alternatives: {}}
+
+
+ # 'install' function tests: 1
+diff --git a/tests/pytests/unit/states/test_ini_manage.py b/tests/pytests/unit/states/test_ini_manage.py
+index b0030793da..2b44e2ffd6 100644
+--- a/tests/pytests/unit/states/test_ini_manage.py
++++ b/tests/pytests/unit/states/test_ini_manage.py
+@@ -9,17 +9,8 @@ from tests.support.mock import patch
+
+
+ @pytest.fixture
+-def sections():
+- sections = OrderedDict()
+- sections["general"] = OrderedDict()
+- sections["general"]["hostname"] = "myserver.com"
+- sections["general"]["port"] = "1234"
+- return sections
+-
+-
+-@pytest.fixture(autouse=True)
+-def setup_loader():
+- setup_loader_modules = {
++def configure_loader_modules():
++ return {
+ ini_manage: {
+ "__salt__": {
+ "ini.get_ini": mod_ini_manage.get_ini,
+@@ -29,8 +20,15 @@ def setup_loader():
+ },
+ mod_ini_manage: {"__opts__": {"test": False}},
+ }
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+- yield loader_mock
++
++
++@pytest.fixture
++def sections():
++ sections = OrderedDict()
++ sections["general"] = OrderedDict()
++ sections["general"]["hostname"] = "myserver.com"
++ sections["general"]["port"] = "1234"
++ return sections
+
+
+ def test_options_present(tmpdir, sections):
+diff --git a/tests/pytests/unit/states/virt/__init__.py b/tests/pytests/unit/states/virt/__init__.py
+new file mode 100644
+index 0000000000..e69de29bb2
+diff --git a/tests/pytests/unit/states/virt/conftest.py b/tests/pytests/unit/states/virt/conftest.py
+new file mode 100644
+index 0000000000..cc975fddbf
+--- /dev/null
++++ b/tests/pytests/unit/states/virt/conftest.py
+@@ -0,0 +1,36 @@
++import pytest
++import salt.states.virt as virt
++from tests.support.mock import MagicMock
++
++
++class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
++ """
++ Libvirt library mock
++ """
++
++ class libvirtError(Exception):
++ """
++ libvirtError mock
++ """
++
++ def __init__(self, msg):
++ super().__init__(msg)
++ self.msg = msg
++
++ def get_error_message(self):
++ return self.msg
++
++
++@pytest.fixture(autouse=True)
++def setup_loader():
++ setup_loader_modules = {virt: {"libvirt": LibvirtMock()}}
++ with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
++ yield loader_mock
++
++
++@pytest.fixture(params=[True, False], ids=["test", "notest"])
++def test(request):
++ """
++ Run the test with both True and False test values
++ """
++ return request.param
+diff --git a/tests/pytests/unit/states/virt/test_domain.py b/tests/pytests/unit/states/virt/test_domain.py
+new file mode 100644
+index 0000000000..a4ae8c0694
+--- /dev/null
++++ b/tests/pytests/unit/states/virt/test_domain.py
+@@ -0,0 +1,840 @@
++import pytest
++import salt.states.virt as virt
++from salt.exceptions import CommandExecutionError
++from tests.support.mock import MagicMock, patch
++
++from .test_helpers import domain_update_call
++
++
++def test_defined_no_change(test):
++ """
++ defined state test, no change required case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ init_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(return_value={"definition": False})
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ "virt.update": update_mock,
++ "virt.init": init_mock,
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {"myvm": {"definition": False}},
++ "result": True,
++ "comment": "Domain myvm unchanged",
++ } == virt.defined("myvm")
++ init_mock.assert_not_called()
++ assert [domain_update_call("myvm", test=test)] == update_mock.call_args_list
++
++
++def test_defined_new_with_connection(test):
++ """
++ defined state test, new guest with connection details passed case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ init_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(side_effect=CommandExecutionError("not found"))
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=[]),
++ "virt.init": init_mock,
++ "virt.update": update_mock,
++ },
++ ):
++ disks = [
++ {
++ "name": "system",
++ "size": 8192,
++ "overlay_image": True,
++ "pool": "default",
++ "image": "/path/to/image.qcow2",
++ },
++ {"name": "data", "size": 16834},
++ ]
++ ifaces = [
++ {"name": "eth0", "mac": "01:23:45:67:89:AB"},
++ {"name": "eth1", "type": "network", "source": "admin"},
++ ]
++ graphics = {
++ "type": "spice",
++ "listen": {"type": "address", "address": "192.168.0.1"},
++ }
++ serials = [
++ {"type": "tcp", "port": 22223, "protocol": "telnet"},
++ {"type": "pty"},
++ ]
++ consoles = [
++ {"type": "tcp", "port": 22223, "protocol": "telnet"},
++ {"type": "pty"},
++ ]
++ assert {
++ "name": "myvm",
++ "result": True if not test else None,
++ "changes": {"myvm": {"definition": True}},
++ "comment": "Domain myvm defined",
++ } == virt.defined(
++ "myvm",
++ cpu=2,
++ mem=2048,
++ boot_dev="cdrom hd",
++ os_type="linux",
++ arch="i686",
++ vm_type="qemu",
++ disk_profile="prod",
++ disks=disks,
++ nic_profile="prod",
++ interfaces=ifaces,
++ graphics=graphics,
++ seed=False,
++ install=False,
++ pub_key="/path/to/key.pub",
++ priv_key="/path/to/key",
++ hypervisor_features={"kvm-hint-dedicated": True},
++ clock={"utc": True},
++ stop_on_reboot=True,
++ connection="someconnection",
++ username="libvirtuser",
++ password="supersecret",
++ serials=serials,
++ consoles=consoles,
++ host_devices=["pci_0000_00_17_0"],
++ )
++ if not test:
++ init_mock.assert_called_with(
++ "myvm",
++ cpu=2,
++ mem=2048,
++ boot_dev="cdrom hd",
++ os_type="linux",
++ arch="i686",
++ disk="prod",
++ disks=disks,
++ nic="prod",
++ interfaces=ifaces,
++ graphics=graphics,
++ hypervisor="qemu",
++ seed=False,
++ boot=None,
++ numatune=None,
++ install=False,
++ start=False,
++ pub_key="/path/to/key.pub",
++ priv_key="/path/to/key",
++ hypervisor_features={"kvm-hint-dedicated": True},
++ clock={"utc": True},
++ stop_on_reboot=True,
++ connection="someconnection",
++ username="libvirtuser",
++ password="supersecret",
++ serials=serials,
++ consoles=consoles,
++ host_devices=["pci_0000_00_17_0"],
++ )
++ else:
++ init_mock.assert_not_called()
++ update_mock.assert_not_called()
++
++
++def test_defined_update(test):
++ """
++ defined state test, with change required case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ init_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(return_value={"definition": True, "cpu": True})
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ "virt.update": update_mock,
++ "virt.init": init_mock,
++ },
++ ):
++ boot = {
++ "kernel": "/root/f8-i386-vmlinuz",
++ "initrd": "/root/f8-i386-initrd",
++ "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
++ }
++ assert {
++ "name": "myvm",
++ "changes": {"myvm": {"definition": True, "cpu": True}},
++ "result": True if not test else None,
++ "comment": "Domain myvm updated",
++ } == virt.defined("myvm", cpu=2, boot=boot,)
++ init_mock.assert_not_called()
++ assert [
++ domain_update_call("myvm", cpu=2, test=test, boot=boot)
++ ] == update_mock.call_args_list
++
++
++def test_defined_update_error(test):
++ """
++ defined state test, with error during the update.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ init_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(
++ return_value={"definition": True, "cpu": False, "errors": ["some error"]}
++ )
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ "virt.update": update_mock,
++ "virt.init": init_mock,
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {
++ "myvm": {
++ "definition": True,
++ "cpu": False,
++ "errors": ["some error"],
++ }
++ },
++ "result": True if not test else None,
++ "comment": "Domain myvm updated with live update(s) failures",
++ } == virt.defined("myvm", cpu=2, boot_dev="cdrom hd")
++ init_mock.assert_not_called()
++ update_mock.assert_called_with(
++ "myvm",
++ cpu=2,
++ boot_dev="cdrom hd",
++ mem=None,
++ disk_profile=None,
++ disks=None,
++ nic_profile=None,
++ interfaces=None,
++ graphics=None,
++ live=True,
++ connection=None,
++ username=None,
++ password=None,
++ boot=None,
++ numatune=None,
++ test=test,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
++ stop_on_reboot=False,
++ host_devices=None,
++ )
++
++
++def test_defined_update_definition_error(test):
++ """
++ defined state test, with definition update failure
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ init_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(
++ side_effect=[virt.libvirt.libvirtError("error message")]
++ )
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ "virt.update": update_mock,
++ "virt.init": init_mock,
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {},
++ "result": False,
++ "comment": "error message",
++ } == virt.defined("myvm", cpu=2)
++ init_mock.assert_not_called()
++ assert [
++ domain_update_call("myvm", cpu=2, test=test)
++ ] == update_mock.call_args_list
++
++
++@pytest.mark.parametrize("running", ["running", "shutdown"])
++def test_running_no_change(test, running):
++ """
++ running state test, no change required case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ update_mock = MagicMock(return_value={"definition": False})
++ start_mock = MagicMock(return_value=0)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.vm_state": MagicMock(return_value={"myvm": running}),
++ "virt.start": start_mock,
++ "virt.update": MagicMock(return_value={"definition": False}),
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ },
++ ):
++ changes = {"definition": False}
++ comment = "Domain myvm exists and is running"
++ if running == "shutdown":
++ changes["started"] = True
++ comment = "Domain myvm started"
++ assert {
++ "name": "myvm",
++ "result": True,
++ "changes": {"myvm": changes},
++ "comment": comment,
++ } == virt.running("myvm")
++ if running == "shutdown" and not test:
++ start_mock.assert_called()
++ else:
++ start_mock.assert_not_called()
++
++
++def test_running_define(test):
++ """
++ running state test, defining and start a guest the old way
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ init_mock = MagicMock(return_value=True)
++ start_mock = MagicMock(return_value=0)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
++ "virt.init": init_mock,
++ "virt.start": start_mock,
++ "virt.list_domains": MagicMock(return_value=[]),
++ },
++ ):
++ disks = [
++ {
++ "name": "system",
++ "size": 8192,
++ "overlay_image": True,
++ "pool": "default",
++ "image": "/path/to/image.qcow2",
++ },
++ {"name": "data", "size": 16834},
++ ]
++ ifaces = [
++ {"name": "eth0", "mac": "01:23:45:67:89:AB"},
++ {"name": "eth1", "type": "network", "source": "admin"},
++ ]
++ graphics = {
++ "type": "spice",
++ "listen": {"type": "address", "address": "192.168.0.1"},
++ }
++
++ assert {
++ "name": "myvm",
++ "result": True if not test else None,
++ "changes": {"myvm": {"definition": True, "started": True}},
++ "comment": "Domain myvm defined and started",
++ } == virt.running(
++ "myvm",
++ cpu=2,
++ mem=2048,
++ os_type="linux",
++ arch="i686",
++ vm_type="qemu",
++ disk_profile="prod",
++ disks=disks,
++ nic_profile="prod",
++ interfaces=ifaces,
++ graphics=graphics,
++ seed=False,
++ install=False,
++ pub_key="/path/to/key.pub",
++ priv_key="/path/to/key",
++ boot_dev="network hd",
++ stop_on_reboot=True,
++ host_devices=["pci_0000_00_17_0"],
++ connection="someconnection",
++ username="libvirtuser",
++ password="supersecret",
++ )
++ if not test:
++ init_mock.assert_called_with(
++ "myvm",
++ cpu=2,
++ mem=2048,
++ os_type="linux",
++ arch="i686",
++ disk="prod",
++ disks=disks,
++ nic="prod",
++ interfaces=ifaces,
++ graphics=graphics,
++ hypervisor="qemu",
++ seed=False,
++ boot=None,
++ numatune=None,
++ install=False,
++ start=False,
++ pub_key="/path/to/key.pub",
++ priv_key="/path/to/key",
++ boot_dev="network hd",
++ hypervisor_features=None,
++ clock=None,
++ stop_on_reboot=True,
++ connection="someconnection",
++ username="libvirtuser",
++ password="supersecret",
++ serials=None,
++ consoles=None,
++ host_devices=["pci_0000_00_17_0"],
++ )
++ start_mock.assert_called_with(
++ "myvm",
++ connection="someconnection",
++ username="libvirtuser",
++ password="supersecret",
++ )
++ else:
++ init_mock.assert_not_called()
++ start_mock.assert_not_called()
++
++
++def test_running_start_error():
++ """
++ running state test, start an existing guest raising an error
++ """
++ with patch.dict(virt.__opts__, {"test": False}):
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
++ "virt.update": MagicMock(return_value={"definition": False}),
++ "virt.start": MagicMock(
++ side_effect=[virt.libvirt.libvirtError("libvirt error msg")]
++ ),
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {"myvm": {"definition": False}},
++ "result": False,
++ "comment": "libvirt error msg",
++ } == virt.running("myvm")
++
++
++@pytest.mark.parametrize("running", ["running", "shutdown"])
++def test_running_update(test, running):
++ """
++ running state test, update an existing guest
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ start_mock = MagicMock(return_value=0)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.vm_state": MagicMock(return_value={"myvm": running}),
++ "virt.update": MagicMock(
++ return_value={"definition": True, "cpu": True}
++ ),
++ "virt.start": start_mock,
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ },
++ ):
++ changes = {"myvm": {"definition": True, "cpu": True}}
++ if running == "shutdown":
++ changes["myvm"]["started"] = True
++ assert {
++ "name": "myvm",
++ "changes": changes,
++ "result": True if not test else None,
++ "comment": "Domain myvm updated"
++ if running == "running"
++ else "Domain myvm updated and started",
++ } == virt.running("myvm", cpu=2)
++ if running == "shutdown" and not test:
++ start_mock.assert_called()
++ else:
++ start_mock.assert_not_called()
++
++
++def test_running_definition_error():
++ """
++ running state test, update an existing guest raising an error when setting the XML
++ """
++ with patch.dict(virt.__opts__, {"test": False}):
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
++ "virt.update": MagicMock(
++ side_effect=[virt.libvirt.libvirtError("error message")]
++ ),
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {},
++ "result": False,
++ "comment": "error message",
++ } == virt.running("myvm", cpu=3)
++
++
++def test_running_update_error():
++ """
++ running state test, update an existing guest raising an error
++ """
++ with patch.dict(virt.__opts__, {"test": False}):
++ update_mock = MagicMock(
++ return_value={"definition": True, "cpu": False, "errors": ["some error"]}
++ )
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
++ "virt.update": update_mock,
++ "virt.list_domains": MagicMock(return_value=["myvm"]),
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {
++ "myvm": {
++ "definition": True,
++ "cpu": False,
++ "errors": ["some error"],
++ }
++ },
++ "result": True,
++ "comment": "Domain myvm updated with live update(s) failures",
++ } == virt.running("myvm", cpu=2)
++ update_mock.assert_called_with(
++ "myvm",
++ cpu=2,
++ mem=None,
++ disk_profile=None,
++ disks=None,
++ nic_profile=None,
++ interfaces=None,
++ graphics=None,
++ live=True,
++ connection=None,
++ username=None,
++ password=None,
++ boot=None,
++ numatune=None,
++ test=False,
++ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
++ stop_on_reboot=False,
++ host_devices=None,
++ )
++
++
++@pytest.mark.parametrize("running", ["running", "shutdown"])
++def test_stopped(test, running):
++ """
++ stopped state test, running guest
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ shutdown_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.vm_state": MagicMock(return_value={"myvm": running}),
++ "virt.shutdown": shutdown_mock,
++ },
++ ):
++ changes = {}
++ comment = "No changes had happened"
++ if running == "running":
++ changes = {"stopped": [{"domain": "myvm", "shutdown": True}]}
++ comment = "Machine has been shut down"
++ assert {
++ "name": "myvm",
++ "changes": changes,
++ "comment": comment,
++ "result": True if not test or running == "shutdown" else None,
++ } == virt.stopped(
++ "myvm", connection="myconnection", username="user", password="secret",
++ )
++ if not test and running == "running":
++ shutdown_mock.assert_called_with(
++ "myvm",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ else:
++ shutdown_mock.assert_not_called()
++
++
++def test_stopped_error():
++ """
++ stopped state test, error while stopping guest
++ """
++ with patch.dict(virt.__opts__, {"test": False}):
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
++ "virt.shutdown": MagicMock(
++ side_effect=virt.libvirt.libvirtError("Some error")
++ ),
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
++ "result": False,
++ "comment": "No changes had happened",
++ } == virt.stopped("myvm")
++
++
++def test_stopped_not_existing(test):
++ """
++ stopped state test, non existing guest
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ shutdown_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])},
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {},
++ "comment": "No changes had happened",
++ "result": False,
++ } == virt.stopped("myvm")
++
++
++@pytest.mark.parametrize("running", ["running", "shutdown"])
++def test_powered_off(test, running):
++ """
++ powered_off state test
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ stop_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.vm_state": MagicMock(return_value={"myvm": running}),
++ "virt.stop": stop_mock,
++ },
++ ):
++ changes = {}
++ comment = "No changes had happened"
++ if running == "running":
++ changes = {"unpowered": [{"domain": "myvm", "stop": True}]}
++ comment = "Machine has been powered off"
++ assert {
++ "name": "myvm",
++ "result": True if not test or running == "shutdown" else None,
++ "changes": changes,
++ "comment": comment,
++ } == virt.powered_off(
++ "myvm", connection="myconnection", username="user", password="secret",
++ )
++ if not test and running == "running":
++ stop_mock.assert_called_with(
++ "myvm",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ else:
++ stop_mock.assert_not_called()
++
++
++def test_powered_off_error():
++ """
++ powered_off state test, error case
++ """
++ with patch.dict(virt.__opts__, {"test": False}):
++ stop_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
++ "virt.stop": MagicMock(
++ side_effect=virt.libvirt.libvirtError("Some error")
++ ),
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "result": False,
++ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
++ "comment": "No changes had happened",
++ } == virt.powered_off("myvm")
++
++
++def test_powered_off_not_existing():
++ """
++ powered_off state test cases.
++ """
++ ret = {"name": "myvm", "changes": {}, "result": True}
++ with patch.dict(virt.__opts__, {"test": False}):
++ with patch.dict(
++ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
++ ): # pylint: disable=no-member
++ ret.update(
++ {"changes": {}, "result": False, "comment": "No changes had happened"}
++ )
++ assert {
++ "name": "myvm",
++ "changes": {},
++ "result": False,
++ "comment": "No changes had happened",
++ } == virt.powered_off("myvm")
++
++
++def test_snapshot(test):
++ """
++ snapshot state test
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ snapshot_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.snapshot": snapshot_mock,
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "result": True if not test else None,
++ "changes": {"saved": [{"domain": "myvm", "snapshot": True}]},
++ "comment": "Snapshot has been taken",
++ } == virt.snapshot(
++ "myvm",
++ suffix="snap",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ if not test:
++ snapshot_mock.assert_called_with(
++ "myvm",
++ suffix="snap",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ else:
++ snapshot_mock.assert_not_called()
++
++
++def test_snapshot_error():
++ """
++ snapshot state test, error case
++ """
++ with patch.dict(virt.__opts__, {"test": False}):
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.snapshot": MagicMock(
++ side_effect=virt.libvirt.libvirtError("Some error")
++ ),
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "result": False,
++ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
++ "comment": "No changes had happened",
++ } == virt.snapshot("myvm")
++
++
++def test_snapshot_not_existing(test):
++ """
++ snapshot state test, guest not existing.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ with patch.dict(
++ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {},
++ "result": False,
++ "comment": "No changes had happened",
++ } == virt.snapshot("myvm")
++
++
++def test_rebooted(test):
++ """
++ rebooted state test
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ reboot_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.reboot": reboot_mock,
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "result": True if not test else None,
++ "changes": {"rebooted": [{"domain": "myvm", "reboot": True}]},
++ "comment": "Machine has been rebooted",
++ } == virt.rebooted(
++ "myvm", connection="myconnection", username="user", password="secret",
++ )
++ if not test:
++ reboot_mock.assert_called_with(
++ "myvm",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ else:
++ reboot_mock.assert_not_called()
++
++
++def test_rebooted_error():
++ """
++ rebooted state test, error case.
++ """
++ with patch.dict(virt.__opts__, {"test": False}):
++ reboot_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
++ "virt.reboot": MagicMock(
++ side_effect=virt.libvirt.libvirtError("Some error")
++ ),
++ },
++ ):
++ assert {
++ "name": "myvm",
++ "result": False,
++ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
++ "comment": "No changes had happened",
++ } == virt.rebooted("myvm")
++
++
++def test_rebooted_not_existing(test):
++ """
++ rebooted state test cases.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ with patch.dict(
++ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
++ ):
++ assert {
++ "name": "myvm",
++ "changes": {},
++ "result": False,
++ "comment": "No changes had happened",
++ } == virt.rebooted("myvm")
+diff --git a/tests/pytests/unit/states/virt/test_helpers.py b/tests/pytests/unit/states/virt/test_helpers.py
+new file mode 100644
+index 0000000000..b8e2cb06e2
+--- /dev/null
++++ b/tests/pytests/unit/states/virt/test_helpers.py
+@@ -0,0 +1,99 @@
++from tests.support.mock import call
++
++
++def network_update_call(
++ name,
++ bridge,
++ forward,
++ vport=None,
++ tag=None,
++ ipv4_config=None,
++ ipv6_config=None,
++ connection=None,
++ username=None,
++ password=None,
++ mtu=None,
++ domain=None,
++ nat=None,
++ interfaces=None,
++ addresses=None,
++ physical_function=None,
++ dns=None,
++ test=False,
++):
++ """
++ Create a call object with the missing default parameters from virt.network_update()
++ """
++ return call(
++ name,
++ bridge,
++ forward,
++ vport=vport,
++ tag=tag,
++ ipv4_config=ipv4_config,
++ ipv6_config=ipv6_config,
++ mtu=mtu,
++ domain=domain,
++ nat=nat,
++ interfaces=interfaces,
++ addresses=addresses,
++ physical_function=physical_function,
++ dns=dns,
++ test=test,
++ connection=connection,
++ username=username,
++ password=password,
++ )
++
++
++def domain_update_call(
++ name,
++ cpu=None,
++ mem=None,
++ disk_profile=None,
++ disks=None,
++ nic_profile=None,
++ interfaces=None,
++ graphics=None,
++ connection=None,
++ username=None,
++ password=None,
++ boot=None,
++ numatune=None,
++ boot_dev=None,
++ hypervisor_features=None,
++ clock=None,
++ serials=None,
++ consoles=None,
++ stop_on_reboot=False,
++ live=True,
++ host_devices=None,
++ test=False,
++):
++ """
++ Create a call object with the missing default parameters from virt.update()
++ """
++ return call(
++ name,
++ cpu=cpu,
++ mem=mem,
++ disk_profile=disk_profile,
++ disks=disks,
++ nic_profile=nic_profile,
++ interfaces=interfaces,
++ graphics=graphics,
++ live=live,
++ connection=connection,
++ username=username,
++ password=password,
++ boot=boot,
++ numatune=numatune,
++ serials=serials,
++ consoles=consoles,
++ test=test,
++ boot_dev=boot_dev,
++ hypervisor_features=hypervisor_features,
++ clock=clock,
++ stop_on_reboot=stop_on_reboot,
++ host_devices=host_devices,
++ )
+diff --git a/tests/pytests/unit/states/virt/test_network.py b/tests/pytests/unit/states/virt/test_network.py
+new file mode 100644
+index 0000000000..668eee0c64
+--- /dev/null
++++ b/tests/pytests/unit/states/virt/test_network.py
+@@ -0,0 +1,476 @@
++import salt.states.virt as virt
++from tests.support.mock import MagicMock, patch
++
++from .test_helpers import network_update_call
++
++
++def test_network_defined_not_existing(test):
++ """
++ network_defined state tests if the network doesn't exist yet.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ define_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.network_info": MagicMock(
++ side_effect=[{}, {"mynet": {"active": False}}]
++ ),
++ "virt.network_define": define_mock,
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {"mynet": "Network defined"},
++ "result": None if test else True,
++ "comment": "Network mynet defined",
++ } == virt.network_defined(
++ "mynet",
++ "br2",
++ "bridge",
++ vport="openvswitch",
++ tag=180,
++ ipv4_config={
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [
++ {"start": "192.168.2.10", "end": "192.168.2.25"},
++ {"start": "192.168.2.110", "end": "192.168.2.125"},
++ ],
++ },
++ ipv6_config={
++ "cidr": "2001:db8:ca2:2::1/64",
++ "dhcp_ranges": [
++ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
++ ],
++ },
++ mtu=9000,
++ domain={"name": "acme.lab"},
++ nat={"ports": {"start": 1024, "end": 2048}},
++ interfaces="eth0 eth1",
++ addresses="0000:01:02.4 0000:01:02.5",
++ physical_function="eth4",
++ dns={
++ "hosts": {
++ "192.168.2.10": {"name": "web", "mac": "de:ad:be:ef:00:00"}
++ }
++ },
++ autostart=False,
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ if not test:
++ define_mock.assert_called_with(
++ "mynet",
++ "br2",
++ "bridge",
++ vport="openvswitch",
++ tag=180,
++ autostart=False,
++ start=False,
++ ipv4_config={
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [
++ {"start": "192.168.2.10", "end": "192.168.2.25"},
++ {"start": "192.168.2.110", "end": "192.168.2.125"},
++ ],
++ },
++ ipv6_config={
++ "cidr": "2001:db8:ca2:2::1/64",
++ "dhcp_ranges": [
++ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
++ ],
++ },
++ mtu=9000,
++ domain={"name": "acme.lab"},
++ nat={"ports": {"start": 1024, "end": 2048}},
++ interfaces="eth0 eth1",
++ addresses="0000:01:02.4 0000:01:02.5",
++ physical_function="eth4",
++ dns={
++ "hosts": {
++ "192.168.2.10": {"name": "web", "mac": "de:ad:be:ef:00:00"}
++ }
++ },
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ else:
++ define_mock.assert_not_called()
++
++
++def test_network_defined_no_change(test):
++ """
++ network_defined state tests if the network doesn't need update.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ define_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(return_value=False)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.network_info": MagicMock(
++ return_value={"mynet": {"active": True, "autostart": True}}
++ ),
++ "virt.network_define": define_mock,
++ "virt.network_update": update_mock,
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {},
++ "result": True,
++ "comment": "Network mynet unchanged",
++ } == virt.network_defined("mynet", "br2", "bridge")
++ define_mock.assert_not_called()
++ assert [
++ network_update_call("mynet", "br2", "bridge", test=True)
++ ] == update_mock.call_args_list
++
++
++def test_network_defined_change(test):
++ """
++ network_defined state tests if the network needs update.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ define_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(return_value=True)
++ autostart_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.network_info": MagicMock(
++ return_value={"mynet": {"active": True, "autostart": True}}
++ ),
++ "virt.network_define": define_mock,
++ "virt.network_update": update_mock,
++ "virt.network_set_autostart": autostart_mock,
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {"mynet": "Network updated, autostart flag changed"},
++ "result": None if test else True,
++ "comment": "Network mynet updated, autostart flag changed",
++ } == virt.network_defined(
++ "mynet",
++ "br2",
++ "bridge",
++ vport="openvswitch",
++ tag=180,
++ ipv4_config={
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [
++ {"start": "192.168.2.10", "end": "192.168.2.25"},
++ {"start": "192.168.2.110", "end": "192.168.2.125"},
++ ],
++ },
++ ipv6_config={
++ "cidr": "2001:db8:ca2:2::1/64",
++ "dhcp_ranges": [
++ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
++ ],
++ },
++ mtu=9000,
++ domain={"name": "acme.lab"},
++ nat={"ports": {"start": 1024, "end": 2048}},
++ interfaces="eth0 eth1",
++ addresses="0000:01:02.4 0000:01:02.5",
++ physical_function="eth4",
++ dns={
++ "hosts": {
++ "192.168.2.10": {"name": "web", "mac": "de:ad:be:ef:00:00"}
++ }
++ },
++ autostart=False,
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ define_mock.assert_not_called()
++ expected_update_kwargs = {
++ "vport": "openvswitch",
++ "tag": 180,
++ "ipv4_config": {
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [
++ {"start": "192.168.2.10", "end": "192.168.2.25"},
++ {"start": "192.168.2.110", "end": "192.168.2.125"},
++ ],
++ },
++ "ipv6_config": {
++ "cidr": "2001:db8:ca2:2::1/64",
++ "dhcp_ranges": [
++ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
++ ],
++ },
++ "mtu": 9000,
++ "domain": {"name": "acme.lab"},
++ "nat": {"ports": {"start": 1024, "end": 2048}},
++ "interfaces": "eth0 eth1",
++ "addresses": "0000:01:02.4 0000:01:02.5",
++ "physical_function": "eth4",
++ "dns": {
++ "hosts": {
++ "192.168.2.10": {"name": "web", "mac": "de:ad:be:ef:00:00"}
++ }
++ },
++ "connection": "myconnection",
++ "username": "user",
++ "password": "secret",
++ }
++ calls = [
++ network_update_call(
++ "mynet", "br2", "bridge", **expected_update_kwargs, test=True
++ )
++ ]
++ if test:
++ assert calls == update_mock.call_args_list
++ autostart_mock.assert_not_called()
++ else:
++ calls.append(
++ network_update_call(
++ "mynet", "br2", "bridge", **expected_update_kwargs, test=False
++ )
++ )
++ assert calls == update_mock.call_args_list
++ autostart_mock.assert_called_with(
++ "mynet",
++ state="off",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++
++
++def test_network_defined_error(test):
++ """
++ network_defined state tests if an error is triggered by libvirt.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ define_mock = MagicMock(return_value=True)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.network_info": MagicMock(
++ side_effect=virt.libvirt.libvirtError("Some error")
++ )
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {},
++ "result": False,
++ "comment": "Some error",
++ } == virt.network_defined("mynet", "br2", "bridge")
++ define_mock.assert_not_called()
++
++
++def test_network_running_not_existing(test):
++ """
++ network_running state test cases, non-existing network case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ define_mock = MagicMock(return_value=True)
++ start_mock = MagicMock(return_value=True)
++ # Non-existing network case
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.network_info": MagicMock(
++ side_effect=[{}, {"mynet": {"active": False}}]
++ ),
++ "virt.network_define": define_mock,
++ "virt.network_start": start_mock,
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {"mynet": "Network defined and started"},
++ "comment": "Network mynet defined and started",
++ "result": None if test else True,
++ } == virt.network_running(
++ "mynet",
++ "br2",
++ "bridge",
++ vport="openvswitch",
++ tag=180,
++ ipv4_config={
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [
++ {"start": "192.168.2.10", "end": "192.168.2.25"},
++ {"start": "192.168.2.110", "end": "192.168.2.125"},
++ ],
++ },
++ ipv6_config={
++ "cidr": "2001:db8:ca2:2::1/64",
++ "dhcp_ranges": [
++ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
++ ],
++ },
++ mtu=9000,
++ domain={"name": "acme.lab"},
++ nat={"ports": {"start": 1024, "end": 2048}},
++ interfaces="eth0 eth1",
++ addresses="0000:01:02.4 0000:01:02.5",
++ physical_function="eth4",
++ dns={
++ "hosts": {
++ "192.168.2.10": {"name": "web", "mac": "de:ad:be:ef:00:00"}
++ }
++ },
++ autostart=False,
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ if not test:
++ define_mock.assert_called_with(
++ "mynet",
++ "br2",
++ "bridge",
++ vport="openvswitch",
++ tag=180,
++ autostart=False,
++ start=False,
++ ipv4_config={
++ "cidr": "192.168.2.0/24",
++ "dhcp_ranges": [
++ {"start": "192.168.2.10", "end": "192.168.2.25"},
++ {"start": "192.168.2.110", "end": "192.168.2.125"},
++ ],
++ },
++ ipv6_config={
++ "cidr": "2001:db8:ca2:2::1/64",
++ "dhcp_ranges": [
++ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
++ ],
++ },
++ mtu=9000,
++ domain={"name": "acme.lab"},
++ nat={"ports": {"start": 1024, "end": 2048}},
++ interfaces="eth0 eth1",
++ addresses="0000:01:02.4 0000:01:02.5",
++ physical_function="eth4",
++ dns={
++ "hosts": {
++ "192.168.2.10": {"name": "web", "mac": "de:ad:be:ef:00:00"}
++ }
++ },
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ start_mock.assert_called_with(
++ "mynet",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ else:
++ define_mock.assert_not_called()
++ start_mock.assert_not_called()
++
++
++def test_network_running_nochange(test):
++ """
++ network_running state test cases, no change case case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ define_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(return_value=False)
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.network_info": MagicMock(
++ return_value={"mynet": {"active": True, "autostart": True}}
++ ),
++ "virt.network_define": define_mock,
++ "virt.network_update": update_mock,
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {},
++ "comment": "Network mynet unchanged and is running",
++ "result": None if test else True,
++ } == virt.network_running("mynet", "br2", "bridge")
++ assert [
++ network_update_call("mynet", "br2", "bridge", test=True)
++ ] == update_mock.call_args_list
++
++
++def test_network_running_stopped(test):
++ """
++ network_running state test cases, network stopped case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ define_mock = MagicMock(return_value=True)
++ start_mock = MagicMock(return_value=True)
++ update_mock = MagicMock(return_value=False)
++ with patch.dict(
++ virt.__salt__,
++ { # pylint: disable=no-member
++ "virt.network_info": MagicMock(
++ return_value={"mynet": {"active": False, "autostart": True}}
++ ),
++ "virt.network_start": start_mock,
++ "virt.network_define": define_mock,
++ "virt.network_update": update_mock,
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {"mynet": "Network started"},
++ "comment": "Network mynet unchanged and started",
++ "result": None if test else True,
++ } == virt.network_running(
++ "mynet",
++ "br2",
++ "bridge",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ assert [
++ network_update_call(
++ "mynet",
++ "br2",
++ "bridge",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ test=True,
++ )
++ ] == update_mock.call_args_list
++ if not test:
++ start_mock.assert_called_with(
++ "mynet",
++ connection="myconnection",
++ username="user",
++ password="secret",
++ )
++ else:
++ start_mock.assert_not_called()
++
++
++def test_network_running_error(test):
++ """
++ network_running state test cases, libvirt error case.
++ """
++ with patch.dict(virt.__opts__, {"test": test}):
++ with patch.dict(
++ virt.__salt__,
++ {
++ "virt.network_info": MagicMock(
++ side_effect=virt.libvirt.libvirtError("Some error")
++ ),
++ },
++ ):
++ assert {
++ "name": "mynet",
++ "changes": {},
++ "comment": "Some error",
++ "result": False,
++ } == virt.network_running("mynet", "br2", "bridge")
+diff --git a/tests/pytests/unit/utils/test_xmlutil.py b/tests/pytests/unit/utils/test_xmlutil.py
+index 2bcaff3a17..aed3e42e06 100644
+--- a/tests/pytests/unit/utils/test_xmlutil.py
++++ b/tests/pytests/unit/utils/test_xmlutil.py
+@@ -208,3 +208,17 @@ def test_change_xml_template_list(xml_doc):
+ assert ["1024", "512"] == [
+ n.get("size") for n in xml_doc.findall("memtune/hugepages/page")
+ ]
++
++
++def test_strip_spaces():
++ xml_str = """
++ test01
++ 1024
++
++ """
++ expected_str = (
++ b'test011024'
++ )
++
++ node = ET.fromstring(xml_str)
++ assert expected_str == ET.tostring(xml.strip_spaces(node))
+diff --git a/tests/unit/modules/test_linux_sysctl.py b/tests/unit/modules/test_linux_sysctl.py
+deleted file mode 100644
+index 7f463bb7ab..0000000000
+--- a/tests/unit/modules/test_linux_sysctl.py
++++ /dev/null
+@@ -1,173 +0,0 @@
+-"""
+- :codeauthor: jmoney
+-"""
+-
+-
+-import salt.modules.linux_sysctl as linux_sysctl
+-import salt.modules.systemd_service as systemd
+-from salt.exceptions import CommandExecutionError
+-from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import MagicMock, mock_open, patch
+-from tests.support.unit import TestCase
+-
+-
+-class LinuxSysctlTestCase(TestCase, LoaderModuleMockMixin):
+- """
+- TestCase for salt.modules.linux_sysctl module
+- """
+-
+- def setup_loader_modules(self):
+- return {linux_sysctl: {}, systemd: {}}
+-
+- def test_get(self):
+- """
+- Tests the return of get function
+- """
+- mock_cmd = MagicMock(return_value=1)
+- with patch.dict(linux_sysctl.__salt__, {"cmd.run": mock_cmd}):
+- self.assertEqual(linux_sysctl.get("net.ipv4.ip_forward"), 1)
+-
+- def test_assign_proc_sys_failed(self):
+- """
+- Tests if /proc/sys/ exists or not
+- """
+- with patch("os.path.exists", MagicMock(return_value=False)):
+- cmd = {
+- "pid": 1337,
+- "retcode": 0,
+- "stderr": "",
+- "stdout": "net.ipv4.ip_forward = 1",
+- }
+- mock_cmd = MagicMock(return_value=cmd)
+- with patch.dict(linux_sysctl.__salt__, {"cmd.run_all": mock_cmd}):
+- self.assertRaises(
+- CommandExecutionError, linux_sysctl.assign, "net.ipv4.ip_forward", 1
+- )
+-
+- def test_assign_cmd_failed(self):
+- """
+- Tests if the assignment was successful or not
+- """
+- with patch("os.path.exists", MagicMock(return_value=True)):
+- cmd = {
+- "pid": 1337,
+- "retcode": 0,
+- "stderr": 'sysctl: setting key "net.ipv4.ip_forward": Invalid argument',
+- "stdout": "net.ipv4.ip_forward = backward",
+- }
+- mock_cmd = MagicMock(return_value=cmd)
+- with patch.dict(linux_sysctl.__salt__, {"cmd.run_all": mock_cmd}):
+- self.assertRaises(
+- CommandExecutionError,
+- linux_sysctl.assign,
+- "net.ipv4.ip_forward",
+- "backward",
+- )
+-
+- def test_assign_success(self):
+- """
+- Tests the return of successful assign function
+- """
+- with patch("os.path.exists", MagicMock(return_value=True)):
+- cmd = {
+- "pid": 1337,
+- "retcode": 0,
+- "stderr": "",
+- "stdout": "net.ipv4.ip_forward = 1",
+- }
+- ret = {"net.ipv4.ip_forward": "1"}
+- mock_cmd = MagicMock(return_value=cmd)
+- with patch.dict(linux_sysctl.__salt__, {"cmd.run_all": mock_cmd}):
+- self.assertEqual(linux_sysctl.assign("net.ipv4.ip_forward", 1), ret)
+-
+- def test_persist_no_conf_failure(self):
+- """
+- Tests adding of config file failure
+- """
+- asn_cmd = {
+- "pid": 1337,
+- "retcode": 0,
+- "stderr": "sysctl: permission denied",
+- "stdout": "",
+- }
+- mock_asn_cmd = MagicMock(return_value=asn_cmd)
+- cmd = "sysctl -w net.ipv4.ip_forward=1"
+- mock_cmd = MagicMock(return_value=cmd)
+- with patch.dict(
+- linux_sysctl.__salt__,
+- {"cmd.run_stdout": mock_cmd, "cmd.run_all": mock_asn_cmd},
+- ):
+- with patch("salt.utils.files.fopen", mock_open()) as m_open:
+- self.assertRaises(
+- CommandExecutionError,
+- linux_sysctl.persist,
+- "net.ipv4.ip_forward",
+- 1,
+- config=None,
+- )
+-
+- def test_persist_no_conf_success(self):
+- """
+- Tests successful add of config file when previously not one
+- """
+- config = "/etc/sysctl.conf"
+- with patch("os.path.isfile", MagicMock(return_value=False)), patch(
+- "os.path.exists", MagicMock(return_value=True)
+- ):
+- asn_cmd = {
+- "pid": 1337,
+- "retcode": 0,
+- "stderr": "",
+- "stdout": "net.ipv4.ip_forward = 1",
+- }
+- mock_asn_cmd = MagicMock(return_value=asn_cmd)
+-
+- sys_cmd = "systemd 208\n+PAM +LIBWRAP"
+- mock_sys_cmd = MagicMock(return_value=sys_cmd)
+-
+- with patch("salt.utils.files.fopen", mock_open()) as m_open, patch.dict(
+- linux_sysctl.__context__, {"salt.utils.systemd.version": 232}
+- ), patch.dict(
+- linux_sysctl.__salt__,
+- {"cmd.run_stdout": mock_sys_cmd, "cmd.run_all": mock_asn_cmd},
+- ), patch.dict(
+- systemd.__context__,
+- {"salt.utils.systemd.booted": True, "salt.utils.systemd.version": 232},
+- ):
+- linux_sysctl.persist("net.ipv4.ip_forward", 1, config=config)
+- writes = m_open.write_calls()
+- assert writes == ["#\n# Kernel sysctl configuration\n#\n"], writes
+-
+- def test_persist_read_conf_success(self):
+- """
+- Tests sysctl.conf read success
+- """
+- with patch("os.path.isfile", MagicMock(return_value=True)), patch(
+- "os.path.exists", MagicMock(return_value=True)
+- ):
+- asn_cmd = {
+- "pid": 1337,
+- "retcode": 0,
+- "stderr": "",
+- "stdout": "net.ipv4.ip_forward = 1",
+- }
+- mock_asn_cmd = MagicMock(return_value=asn_cmd)
+-
+- sys_cmd = "systemd 208\n+PAM +LIBWRAP"
+- mock_sys_cmd = MagicMock(return_value=sys_cmd)
+-
+- with patch("salt.utils.files.fopen", mock_open()):
+- with patch.dict(
+- linux_sysctl.__context__, {"salt.utils.systemd.version": 232}
+- ):
+- with patch.dict(
+- linux_sysctl.__salt__,
+- {"cmd.run_stdout": mock_sys_cmd, "cmd.run_all": mock_asn_cmd},
+- ):
+- with patch.dict(
+- systemd.__context__, {"salt.utils.systemd.booted": True}
+- ):
+- self.assertEqual(
+- linux_sysctl.persist("net.ipv4.ip_forward", 1),
+- "Updated",
+- )
+diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
+index 91dee2098d..f717513944 100644
+--- a/tests/unit/modules/test_virt.py
++++ b/tests/unit/modules/test_virt.py
+@@ -598,7 +598,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "swap_hard_limit": "1g",
+ "min_guarantee": "256m",
+ "hugepages": [
+- {"nodeset": "", "size": "128m"},
++ {"size": "128m"},
+ {"nodeset": "0", "size": "256m"},
+ {"nodeset": "1", "size": "512m"},
+ ],
+@@ -1881,70 +1881,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ ],
+ )
+
+- def test_diff_nics(self):
+- """
+- Test virt._diff_nics()
+- """
+- old_nics = ET.fromstring(
+- """
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- """
+- ).findall("interface")
+-
+- new_nics = ET.fromstring(
+- """
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- """
+- ).findall("interface")
+- ret = virt._diff_interface_lists(old_nics, new_nics)
+- self.assertEqual(
+- [nic.find("mac").get("address") for nic in ret["unchanged"]],
+- ["52:54:00:39:02:b1"],
+- )
+- self.assertEqual(
+- [nic.find("mac").get("address") for nic in ret["new"]],
+- ["52:54:00:39:02:b2", "52:54:00:39:02:b4"],
+- )
+- self.assertEqual(
+- [nic.find("mac").get("address") for nic in ret["deleted"]],
+- ["52:54:00:39:02:b2", "52:54:00:39:02:b3"],
+- )
+-
+ def test_init(self):
+ """
+ Test init() function
+@@ -3160,7 +3096,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "source_file": None,
+ "model": "ide",
+ },
+- {"name": "added", "size": 2048, "iothreads": True},
++ {
++ "name": "added",
++ "size": 2048,
++ "io": "threads",
++ "iothread_id": 2,
++ },
+ ],
+ )
+ added_disk_path = os.path.join(
+@@ -3196,6 +3137,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(
+ "threads", setxml.find("devices/disk[3]/driver").get("io")
+ )
++ self.assertEqual(
++ "2", setxml.find("devices/disk[3]/driver").get("iothread")
++ )
+
+ # Update nics case
+ yaml_config = """
+@@ -3245,7 +3189,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ devattach_mock.reset_mock()
+ devdetach_mock.reset_mock()
+ ret = virt.update("my_vm", nic_profile=None, interfaces=[])
+- self.assertEqual([], ret["interface"]["attached"])
++ self.assertFalse(ret["interface"].get("attached"))
+ self.assertEqual(2, len(ret["interface"]["detached"]))
+ devattach_mock.assert_not_called()
+ devdetach_mock.assert_called()
+@@ -3254,7 +3198,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ devattach_mock.reset_mock()
+ devdetach_mock.reset_mock()
+ ret = virt.update("my_vm", disk_profile=None, disks=[])
+- self.assertEqual([], ret["disk"]["attached"])
++ self.assertFalse(ret["disk"].get("attached"))
+ self.assertEqual(3, len(ret["disk"]["detached"]))
+ devattach_mock.assert_not_called()
+ devdetach_mock.assert_called()
+@@ -3540,8 +3484,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ )
+
+ self.assertTrue(ret["definition"])
+- self.assertFalse(ret["disk"]["attached"])
+- self.assertFalse(ret["disk"]["detached"])
++ self.assertFalse(ret["disk"].get("attached"))
++ self.assertFalse(ret["disk"].get("detached"))
+ self.assertEqual(
+ [
+ {
+@@ -6119,59 +6063,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ }
+ self.assertEqual(expected, caps)
+
+- def test_network(self):
+- """
+- Test virt._get_net_xml()
+- """
+- xml_data = virt._gen_net_xml("network", "main", "bridge", "openvswitch")
+- root = ET.fromstring(xml_data)
+- self.assertEqual(root.find("name").text, "network")
+- self.assertEqual(root.find("bridge").attrib["name"], "main")
+- self.assertEqual(root.find("forward").attrib["mode"], "bridge")
+- self.assertEqual(root.find("virtualport").attrib["type"], "openvswitch")
+-
+- def test_network_nat(self):
+- """
+- Test virt._get_net_xml() in a nat setup
+- """
+- xml_data = virt._gen_net_xml(
+- "network",
+- "main",
+- "nat",
+- None,
+- ip_configs=[
+- {
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- }
+- ],
+- )
+- root = ET.fromstring(xml_data)
+- self.assertEqual(root.find("name").text, "network")
+- self.assertEqual(root.find("bridge").attrib["name"], "main")
+- self.assertEqual(root.find("forward").attrib["mode"], "nat")
+- self.assertEqual(
+- root.find("./ip[@address='192.168.2.0']").attrib["prefix"], "24"
+- )
+- self.assertEqual(
+- root.find("./ip[@address='192.168.2.0']").attrib["family"], "ipv4"
+- )
+- self.assertEqual(
+- root.find(
+- "./ip[@address='192.168.2.0']/dhcp/range[@start='192.168.2.10']"
+- ).attrib["end"],
+- "192.168.2.25",
+- )
+- self.assertEqual(
+- root.find(
+- "./ip[@address='192.168.2.0']/dhcp/range[@start='192.168.2.110']"
+- ).attrib["end"],
+- "192.168.2.125",
+- )
+-
+ def test_domain_capabilities(self):
+ """
+ Test the virt.domain_capabilities parsing
+diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py
+index dadc6dd08e..2ab73f8af4 100644
+--- a/tests/unit/states/test_virt.py
++++ b/tests/unit/states/test_virt.py
+@@ -7,7 +7,7 @@ import tempfile
+
+ import salt.states.virt as virt
+ import salt.utils.files
+-from salt.exceptions import CommandExecutionError, SaltInvocationError
++from salt.exceptions import SaltInvocationError
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, mock_open, patch
+ from tests.support.runtests import RUNTIME_VARS
+@@ -263,1707 +263,6 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ ret,
+ )
+
+- def test_defined(self):
+- """
+- defined state test cases.
+- """
+- ret = {
+- "name": "myvm",
+- "changes": {},
+- "result": True,
+- "comment": "myvm is running",
+- }
+- with patch.dict(virt.__opts__, {"test": False}):
+- # no change test
+- init_mock = MagicMock(return_value=True)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(return_value={"definition": False}),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": False}},
+- "comment": "Domain myvm unchanged",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm"), ret)
+-
+- # Test defining a guest with connection details
+- init_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=[]),
+- "virt.init": init_mock,
+- "virt.update": MagicMock(
+- side_effect=CommandExecutionError("not found")
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "comment": "Domain myvm defined",
+- }
+- )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/image.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ]
+- ifaces = [
+- {"name": "eth0", "mac": "01:23:45:67:89:AB"},
+- {"name": "eth1", "type": "network", "source": "admin"},
+- ]
+- graphics = {
+- "type": "spice",
+- "listen": {"type": "address", "address": "192.168.0.1"},
+- }
+- serials = [
+- {"type": "tcp", "port": 22223, "protocol": "telnet"},
+- {"type": "pty"},
+- ]
+- consoles = [
+- {"type": "tcp", "port": 22223, "protocol": "telnet"},
+- {"type": "pty"},
+- ]
+- self.assertDictEqual(
+- virt.defined(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- boot_dev="cdrom hd",
+- os_type="linux",
+- arch="i686",
+- vm_type="qemu",
+- disk_profile="prod",
+- disks=disks,
+- nic_profile="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- seed=False,
+- install=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- hypervisor_features={"kvm-hint-dedicated": True},
+- clock={"utc": True},
+- stop_on_reboot=True,
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- serials=serials,
+- consoles=consoles,
+- ),
+- ret,
+- )
+- init_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- boot_dev="cdrom hd",
+- os_type="linux",
+- arch="i686",
+- disk="prod",
+- disks=disks,
+- nic="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- hypervisor="qemu",
+- seed=False,
+- boot=None,
+- numatune=None,
+- install=False,
+- start=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- hypervisor_features={"kvm-hint-dedicated": True},
+- clock={"utc": True},
+- stop_on_reboot=True,
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- serials=serials,
+- consoles=consoles,
+- )
+-
+- # Working update case when running
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(
+- return_value={"definition": True, "cpu": True}
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "cpu": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
+-
+- # Working update case when running with boot params
+- boot = {
+- "kernel": "/root/f8-i386-vmlinuz",
+- "initrd": "/root/f8-i386-initrd",
+- "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
+- }
+-
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(
+- return_value={"definition": True, "cpu": True}
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "cpu": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", boot=boot), ret)
+-
+- # Working update case when stopped
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(return_value={"definition": True}),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
+-
+- # Failed live update case
+- update_mock = MagicMock(
+- return_value={
+- "definition": True,
+- "cpu": False,
+- "errors": ["some error"],
+- }
+- )
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {
+- "myvm": {
+- "definition": True,
+- "cpu": False,
+- "errors": ["some error"],
+- }
+- },
+- "result": True,
+- "comment": "Domain myvm updated with live update(s) failures",
+- }
+- )
+- self.assertDictEqual(
+- virt.defined("myvm", cpu=2, boot_dev="cdrom hd"), ret
+- )
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- boot_dev="cdrom hd",
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- numatune=None,
+- test=False,
+- hypervisor_features=None,
+- clock=None,
+- serials=None,
+- consoles=None,
+- stop_on_reboot=False,
+- )
+-
+- # Failed definition update case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(
+- side_effect=[self.mock_libvirt.libvirtError("error message")]
+- ),
+- },
+- ):
+- ret.update({"changes": {}, "result": False, "comment": "error message"})
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
+-
+- # Test dry-run mode
+- with patch.dict(virt.__opts__, {"test": True}):
+- # Guest defined case
+- init_mock = MagicMock(return_value=True)
+- update_mock = MagicMock(side_effect=CommandExecutionError("not found"))
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=[]),
+- "virt.init": init_mock,
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "result": None,
+- "comment": "Domain myvm defined",
+- }
+- )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/image.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ]
+- ifaces = [
+- {"name": "eth0", "mac": "01:23:45:67:89:AB"},
+- {"name": "eth1", "type": "network", "source": "admin"},
+- ]
+- graphics = {
+- "type": "spice",
+- "listen": {"type": "address", "address": "192.168.0.1"},
+- }
+- self.assertDictEqual(
+- virt.defined(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type="linux",
+- arch="i686",
+- vm_type="qemu",
+- disk_profile="prod",
+- disks=disks,
+- nic_profile="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- seed=False,
+- install=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- stop_on_reboot=False,
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- ),
+- ret,
+- )
+- init_mock.assert_not_called()
+- update_mock.assert_not_called()
+-
+- # Guest update case
+- update_mock = MagicMock(return_value={"definition": True})
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "result": None,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- numatune=None,
+- test=True,
+- boot_dev=None,
+- hypervisor_features=None,
+- clock=None,
+- serials=None,
+- consoles=None,
+- stop_on_reboot=False,
+- )
+-
+- # No changes case
+- update_mock = MagicMock(return_value={"definition": False})
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": False}},
+- "result": True,
+- "comment": "Domain myvm unchanged",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm"), ret)
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=None,
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- numatune=None,
+- test=True,
+- boot_dev=None,
+- hypervisor_features=None,
+- clock=None,
+- serials=None,
+- consoles=None,
+- stop_on_reboot=False,
+- )
+-
+- def test_running(self):
+- """
+- running state test cases.
+- """
+- ret = {
+- "name": "myvm",
+- "changes": {},
+- "result": True,
+- "comment": "myvm is running",
+- }
+- with patch.dict(virt.__opts__, {"test": False}):
+- # Test starting an existing guest without changing it
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.start": MagicMock(return_value=0),
+- "virt.update": MagicMock(return_value={"definition": False}),
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"started": True}},
+- "comment": "Domain myvm started",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm"), ret)
+-
+- # Test defining and starting a guest the old way
+- init_mock = MagicMock(return_value=True)
+- start_mock = MagicMock(return_value=0)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.init": init_mock,
+- "virt.start": start_mock,
+- "virt.list_domains": MagicMock(return_value=[]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "started": True}},
+- "comment": "Domain myvm defined and started",
+- }
+- )
+- self.assertDictEqual(
+- virt.running(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- disks=[{"name": "system", "image": "/path/to/img.qcow2"}],
+- ),
+- ret,
+- )
+- init_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type=None,
+- arch=None,
+- boot=None,
+- numatune=None,
+- disk=None,
+- disks=[{"name": "system", "image": "/path/to/img.qcow2"}],
+- nic=None,
+- interfaces=None,
+- graphics=None,
+- hypervisor=None,
+- start=False,
+- seed=True,
+- install=True,
+- pub_key=None,
+- priv_key=None,
+- boot_dev=None,
+- hypervisor_features=None,
+- clock=None,
+- stop_on_reboot=False,
+- connection=None,
+- username=None,
+- password=None,
+- serials=None,
+- consoles=None,
+- )
+- start_mock.assert_called_with(
+- "myvm", connection=None, username=None, password=None
+- )
+-
+- # Test defining and starting a guest the new way with connection details
+- init_mock.reset_mock()
+- start_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.init": init_mock,
+- "virt.start": start_mock,
+- "virt.list_domains": MagicMock(return_value=[]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "started": True}},
+- "comment": "Domain myvm defined and started",
+- }
+- )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/image.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ]
+- ifaces = [
+- {"name": "eth0", "mac": "01:23:45:67:89:AB"},
+- {"name": "eth1", "type": "network", "source": "admin"},
+- ]
+- graphics = {
+- "type": "spice",
+- "listen": {"type": "address", "address": "192.168.0.1"},
+- }
+- self.assertDictEqual(
+- virt.running(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type="linux",
+- arch="i686",
+- vm_type="qemu",
+- disk_profile="prod",
+- disks=disks,
+- nic_profile="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- seed=False,
+- install=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- boot_dev="network hd",
+- stop_on_reboot=True,
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- ),
+- ret,
+- )
+- init_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type="linux",
+- arch="i686",
+- disk="prod",
+- disks=disks,
+- nic="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- hypervisor="qemu",
+- seed=False,
+- boot=None,
+- numatune=None,
+- install=False,
+- start=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- boot_dev="network hd",
+- hypervisor_features=None,
+- clock=None,
+- stop_on_reboot=True,
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- serials=None,
+- consoles=None,
+- )
+- start_mock.assert_called_with(
+- "myvm",
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- )
+-
+- # Test with existing guest, but start raising an error
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.update": MagicMock(return_value={"definition": False}),
+- "virt.start": MagicMock(
+- side_effect=[
+- self.mock_libvirt.libvirtError("libvirt error msg")
+- ]
+- ),
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {}},
+- "result": False,
+- "comment": "libvirt error msg",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm"), ret)
+-
+- # Working update case when running
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.update": MagicMock(
+- return_value={"definition": True, "cpu": True}
+- ),
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "cpu": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
+-
+- # Working update case when running with boot params
+- boot = {
+- "kernel": "/root/f8-i386-vmlinuz",
+- "initrd": "/root/f8-i386-initrd",
+- "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
+- }
+-
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.update": MagicMock(
+- return_value={"definition": True, "cpu": True}
+- ),
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "cpu": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm", boot=boot, update=True), ret)
+-
+- # Working update case when stopped
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.start": MagicMock(return_value=0),
+- "virt.update": MagicMock(return_value={"definition": True}),
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "started": True}},
+- "result": True,
+- "comment": "Domain myvm updated and started",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
+-
+- # Failed live update case
+- update_mock = MagicMock(
+- return_value={
+- "definition": True,
+- "cpu": False,
+- "errors": ["some error"],
+- }
+- )
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.update": update_mock,
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {
+- "myvm": {
+- "definition": True,
+- "cpu": False,
+- "errors": ["some error"],
+- }
+- },
+- "result": True,
+- "comment": "Domain myvm updated with live update(s) failures",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- numatune=None,
+- test=False,
+- boot_dev=None,
+- hypervisor_features=None,
+- clock=None,
+- serials=None,
+- consoles=None,
+- stop_on_reboot=False,
+- )
+-
+- # Failed definition update case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.update": MagicMock(
+- side_effect=[self.mock_libvirt.libvirtError("error message")]
+- ),
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update({"changes": {}, "result": False, "comment": "error message"})
+- self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
+-
+- # Test dry-run mode
+- with patch.dict(virt.__opts__, {"test": True}):
+- # Guest defined case
+- init_mock = MagicMock(return_value=True)
+- start_mock = MagicMock(return_value=0)
+- list_mock = MagicMock(return_value=[])
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.init": init_mock,
+- "virt.start": start_mock,
+- "virt.list_domains": list_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "started": True}},
+- "result": None,
+- "comment": "Domain myvm defined and started",
+- }
+- )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/image.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ]
+- ifaces = [
+- {"name": "eth0", "mac": "01:23:45:67:89:AB"},
+- {"name": "eth1", "type": "network", "source": "admin"},
+- ]
+- graphics = {
+- "type": "spice",
+- "listen": {"type": "address", "address": "192.168.0.1"},
+- }
+- self.assertDictEqual(
+- virt.running(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type="linux",
+- arch="i686",
+- vm_type="qemu",
+- disk_profile="prod",
+- disks=disks,
+- nic_profile="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- seed=False,
+- install=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- stop_on_reboot=True,
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- ),
+- ret,
+- )
+- init_mock.assert_not_called()
+- start_mock.assert_not_called()
+-
+- # Guest update case
+- update_mock = MagicMock(return_value={"definition": True})
+- start_mock = MagicMock(return_value=0)
+- list_mock = MagicMock(return_value=["myvm"])
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.start": start_mock,
+- "virt.update": update_mock,
+- "virt.list_domains": list_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "started": True}},
+- "result": None,
+- "comment": "Domain myvm updated and started",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- numatune=None,
+- test=True,
+- boot_dev=None,
+- hypervisor_features=None,
+- clock=None,
+- serials=None,
+- consoles=None,
+- stop_on_reboot=False,
+- )
+- start_mock.assert_not_called()
+-
+- # No changes case
+- update_mock = MagicMock(return_value={"definition": False})
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.update": update_mock,
+- "virt.list_domains": list_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": False}},
+- "result": True,
+- "comment": "Domain myvm exists and is running",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm", update=True), ret)
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=None,
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- numatune=None,
+- test=True,
+- boot_dev=None,
+- hypervisor_features=None,
+- clock=None,
+- serials=None,
+- consoles=None,
+- stop_on_reboot=False,
+- )
+-
+- def test_stopped(self):
+- """
+- stopped state test cases.
+- """
+- ret = {"name": "myvm", "changes": {}, "result": True}
+-
+- shutdown_mock = MagicMock(return_value=True)
+-
+- # Normal case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.shutdown": shutdown_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"stopped": [{"domain": "myvm", "shutdown": True}]},
+- "comment": "Machine has been shut down",
+- }
+- )
+- self.assertDictEqual(virt.stopped("myvm"), ret)
+- shutdown_mock.assert_called_with(
+- "myvm", connection=None, username=None, password=None
+- )
+-
+- # Normal case with user-provided connection parameters
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.shutdown": shutdown_mock,
+- },
+- ):
+- self.assertDictEqual(
+- virt.stopped(
+- "myvm",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- shutdown_mock.assert_called_with(
+- "myvm", connection="myconnection", username="user", password="secret"
+- )
+-
+- # Case where an error occurred during the shutdown
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.shutdown": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
+- "result": False,
+- "comment": "No changes had happened",
+- }
+- )
+- self.assertDictEqual(virt.stopped("myvm"), ret)
+-
+- # Case there the domain doesn't exist
+- with patch.dict(
+- virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
+- ): # pylint: disable=no-member
+- ret.update(
+- {"changes": {}, "result": False, "comment": "No changes had happened"}
+- )
+- self.assertDictEqual(virt.stopped("myvm"), ret)
+-
+- # Case where the domain is already stopped
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "shutdown"}),
+- },
+- ):
+- ret.update(
+- {"changes": {}, "result": True, "comment": "No changes had happened"}
+- )
+- self.assertDictEqual(virt.stopped("myvm"), ret)
+-
+- def test_powered_off(self):
+- """
+- powered_off state test cases.
+- """
+- ret = {"name": "myvm", "changes": {}, "result": True}
+-
+- stop_mock = MagicMock(return_value=True)
+-
+- # Normal case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.stop": stop_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"unpowered": [{"domain": "myvm", "stop": True}]},
+- "comment": "Machine has been powered off",
+- }
+- )
+- self.assertDictEqual(virt.powered_off("myvm"), ret)
+- stop_mock.assert_called_with(
+- "myvm", connection=None, username=None, password=None
+- )
+-
+- # Normal case with user-provided connection parameters
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.stop": stop_mock,
+- },
+- ):
+- self.assertDictEqual(
+- virt.powered_off(
+- "myvm",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- stop_mock.assert_called_with(
+- "myvm", connection="myconnection", username="user", password="secret"
+- )
+-
+- # Case where an error occurred during the poweroff
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
+- "virt.stop": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
+- "result": False,
+- "comment": "No changes had happened",
+- }
+- )
+- self.assertDictEqual(virt.powered_off("myvm"), ret)
+-
+- # Case there the domain doesn't exist
+- with patch.dict(
+- virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
+- ): # pylint: disable=no-member
+- ret.update(
+- {"changes": {}, "result": False, "comment": "No changes had happened"}
+- )
+- self.assertDictEqual(virt.powered_off("myvm"), ret)
+-
+- # Case where the domain is already stopped
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.vm_state": MagicMock(return_value={"myvm": "shutdown"}),
+- },
+- ):
+- ret.update(
+- {"changes": {}, "result": True, "comment": "No changes had happened"}
+- )
+- self.assertDictEqual(virt.powered_off("myvm"), ret)
+-
+- def test_snapshot(self):
+- """
+- snapshot state test cases.
+- """
+- ret = {"name": "myvm", "changes": {}, "result": True}
+-
+- snapshot_mock = MagicMock(return_value=True)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.snapshot": snapshot_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"saved": [{"domain": "myvm", "snapshot": True}]},
+- "comment": "Snapshot has been taken",
+- }
+- )
+- self.assertDictEqual(virt.snapshot("myvm"), ret)
+- snapshot_mock.assert_called_with(
+- "myvm", suffix=None, connection=None, username=None, password=None
+- )
+-
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.snapshot": snapshot_mock,
+- },
+- ):
+- self.assertDictEqual(
+- virt.snapshot(
+- "myvm",
+- suffix="snap",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- snapshot_mock.assert_called_with(
+- "myvm",
+- suffix="snap",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- )
+-
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.snapshot": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
+- "result": False,
+- "comment": "No changes had happened",
+- }
+- )
+- self.assertDictEqual(virt.snapshot("myvm"), ret)
+-
+- with patch.dict(
+- virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
+- ): # pylint: disable=no-member
+- ret.update(
+- {"changes": {}, "result": False, "comment": "No changes had happened"}
+- )
+- self.assertDictEqual(virt.snapshot("myvm"), ret)
+-
+- def test_rebooted(self):
+- """
+- rebooted state test cases.
+- """
+- ret = {"name": "myvm", "changes": {}, "result": True}
+-
+- reboot_mock = MagicMock(return_value=True)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.reboot": reboot_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"rebooted": [{"domain": "myvm", "reboot": True}]},
+- "comment": "Machine has been rebooted",
+- }
+- )
+- self.assertDictEqual(virt.rebooted("myvm"), ret)
+- reboot_mock.assert_called_with(
+- "myvm", connection=None, username=None, password=None
+- )
+-
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.reboot": reboot_mock,
+- },
+- ):
+- self.assertDictEqual(
+- virt.rebooted(
+- "myvm",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- reboot_mock.assert_called_with(
+- "myvm", connection="myconnection", username="user", password="secret"
+- )
+-
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
+- "virt.reboot": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
+- "result": False,
+- "comment": "No changes had happened",
+- }
+- )
+- self.assertDictEqual(virt.rebooted("myvm"), ret)
+-
+- with patch.dict(
+- virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
+- ): # pylint: disable=no-member
+- ret.update(
+- {"changes": {}, "result": False, "comment": "No changes had happened"}
+- )
+- self.assertDictEqual(virt.rebooted("myvm"), ret)
+-
+- def test_network_defined(self):
+- """
+- network_defined state test cases.
+- """
+- ret = {"name": "mynet", "changes": {}, "result": True, "comment": ""}
+- with patch.dict(virt.__opts__, {"test": False}):
+- define_mock = MagicMock(return_value=True)
+- # Non-existing network case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- side_effect=[{}, {"mynet": {"active": False}}]
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network defined"},
+- "comment": "Network mynet defined",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_defined(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- },
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {
+- "start": "2001:db8:ca2:1::10",
+- "end": "2001:db8:ca2::1f",
+- },
+- ],
+- },
+- autostart=False,
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- define_mock.assert_called_with(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- autostart=False,
+- start=False,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- },
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
+- ],
+- },
+- connection="myconnection",
+- username="user",
+- password="secret",
+- )
+-
+- # Case where there is nothing to be done
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": True}}
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Network mynet exists"})
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
+-
+- # Error case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(return_value={}),
+- "virt.network_define": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- ),
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Some error", "result": False})
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
+-
+- # Test cases with __opt__['test'] set to True
+- with patch.dict(virt.__opts__, {"test": True}):
+- ret.update({"result": None})
+-
+- # Non-existing network case
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(return_value={}),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network defined"},
+- "comment": "Network mynet defined",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_defined(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- },
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {
+- "start": "2001:db8:ca2:1::10",
+- "end": "2001:db8:ca2::1f",
+- },
+- ],
+- },
+- autostart=False,
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- define_mock.assert_not_called()
+-
+- # Case where there is nothing to be done
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": True}}
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {"changes": {}, "comment": "Network mynet exists", "result": True}
+- )
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
+-
+- # Error case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- )
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Some error", "result": False})
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
+-
+- def test_network_running(self):
+- """
+- network_running state test cases.
+- """
+- ret = {"name": "mynet", "changes": {}, "result": True, "comment": ""}
+- with patch.dict(virt.__opts__, {"test": False}):
+- define_mock = MagicMock(return_value=True)
+- start_mock = MagicMock(return_value=True)
+- # Non-existing network case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- side_effect=[{}, {"mynet": {"active": False}}]
+- ),
+- "virt.network_define": define_mock,
+- "virt.network_start": start_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network defined and started"},
+- "comment": "Network mynet defined and started",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_running(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- },
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {
+- "start": "2001:db8:ca2:1::10",
+- "end": "2001:db8:ca2::1f",
+- },
+- ],
+- },
+- autostart=False,
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- define_mock.assert_called_with(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- autostart=False,
+- start=False,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- },
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
+- ],
+- },
+- connection="myconnection",
+- username="user",
+- password="secret",
+- )
+- start_mock.assert_called_with(
+- "mynet",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- )
+-
+- # Case where there is nothing to be done
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": True}}
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {"changes": {}, "comment": "Network mynet exists and is running"}
+- )
+- self.assertDictEqual(
+- virt.network_running("mynet", "br2", "bridge"), ret
+- )
+-
+- # Network existing and stopped case
+- start_mock = MagicMock(return_value=True)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": False}}
+- ),
+- "virt.network_start": start_mock,
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network started"},
+- "comment": "Network mynet exists and started",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_running(
+- "mynet",
+- "br2",
+- "bridge",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- start_mock.assert_called_with(
+- "mynet",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- )
+-
+- # Error case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(return_value={}),
+- "virt.network_define": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- ),
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Some error", "result": False})
+- self.assertDictEqual(
+- virt.network_running("mynet", "br2", "bridge"), ret
+- )
+-
+- # Test cases with __opt__['test'] set to True
+- with patch.dict(virt.__opts__, {"test": True}):
+- ret.update({"result": None})
+-
+- # Non-existing network case
+- define_mock.reset_mock()
+- start_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(return_value={}),
+- "virt.network_define": define_mock,
+- "virt.network_start": start_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network defined and started"},
+- "comment": "Network mynet defined and started",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_running(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- },
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {
+- "start": "2001:db8:ca2:1::10",
+- "end": "2001:db8:ca2::1f",
+- },
+- ],
+- },
+- autostart=False,
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- define_mock.assert_not_called()
+- start_mock.assert_not_called()
+-
+- # Case where there is nothing to be done
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": True}}
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {"changes": {}, "comment": "Network mynet exists and is running"}
+- )
+- self.assertDictEqual(
+- virt.network_running("mynet", "br2", "bridge"), ret
+- )
+-
+- # Network existing and stopped case
+- start_mock = MagicMock(return_value=True)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": False}}
+- ),
+- "virt.network_start": start_mock,
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network started"},
+- "comment": "Network mynet exists and started",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_running(
+- "mynet",
+- "br2",
+- "bridge",
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- start_mock.assert_not_called()
+-
+- # Error case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- )
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Some error", "result": False})
+- self.assertDictEqual(
+- virt.network_running("mynet", "br2", "bridge"), ret
+- )
+-
+ def test_pool_defined(self):
+ """
+ pool_defined state test cases.
+--
+2.29.2
+
+
diff --git a/opensuse-3000-libvirt-engine-fixes-251.patch b/opensuse-3000-libvirt-engine-fixes-251.patch
index e5ee199..f16cadc 100644
--- a/opensuse-3000-libvirt-engine-fixes-251.patch
+++ b/opensuse-3000-libvirt-engine-fixes-251.patch
@@ -1,4 +1,4 @@
-From 1f753894b1a5a3f17d1452a572a9a126fa526998 Mon Sep 17 00:00:00 2001
+From 78f2a450ea51a7e72eb0e712131e23fe2777a4ac Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Fri, 3 Jul 2020 14:43:53 +0200
Subject: [PATCH] Opensuse 3000 libvirt engine fixes (#251)
@@ -21,10 +21,10 @@ at the engine start.
Co-authored-by: Cédric Bosdonnat
---
- changelog/57746.fixed | 1 +
- salt/engines/libvirt_events.py | 685 ++++++++++++----------
- tests/unit/engines/test_libvirt_events.py | 187 +++---
- 3 files changed, 495 insertions(+), 378 deletions(-)
+ changelog/57746.fixed | 1 +
+ salt/engines/libvirt_events.py | 2 --
+ tests/unit/engines/test_libvirt_events.py | 4 ----
+ 3 files changed, 1 insertion(+), 6 deletions(-)
create mode 100644 changelog/57746.fixed
diff --git a/changelog/57746.fixed b/changelog/57746.fixed
@@ -35,1306 +35,38 @@ index 0000000000..5102bb04e9
@@ -0,0 +1 @@
+Fix the registration of libvirt pool and nodedev events
diff --git a/salt/engines/libvirt_events.py b/salt/engines/libvirt_events.py
-index cdb5d1dfe8..96ba9efc91 100644
+index 45983fe9a0..c090b64a10 100644
--- a/salt/engines/libvirt_events.py
+++ b/salt/engines/libvirt_events.py
-@@ -1,6 +1,6 @@
- # -*- coding: utf-8 -*-
-
--'''
-+"""
- An engine that listens for libvirt events and resends them to the salt event bus.
-
- The minimal configuration is the following and will listen to all events on the
-@@ -35,7 +35,7 @@ CALLBACK_DEFS constant. If the filters list contains ``all``, all
- events will be relayed.
-
- Be aware that the list of events increases with libvirt versions, for example
--network events have been added in libvirt 1.2.1.
-+network events have been added in libvirt 1.2.1 and storage events in 2.0.0.
-
- Running the engine on non-root
- ------------------------------
-@@ -63,7 +63,7 @@ A polkit rule like the following one will allow `salt` user to connect to libvir
- :depends: libvirt 1.0.0+ python binding
-
+@@ -63,10 +63,8 @@ A polkit rule like the following one will allow `salt` user to connect to libvir
.. versionadded:: 2019.2.0
--'''
-+"""
+ """
- from __future__ import absolute_import, unicode_literals, print_function
+-
import logging
-@@ -73,6 +73,7 @@ import salt.utils.event
+
+-# Import salt libs
+ import salt.utils.event
# pylint: disable=no-name-in-module,import-error
- from salt.ext.six.moves.urllib.parse import urlparse
-+
- # pylint: enable=no-name-in-module,import-error
-
- log = logging.getLogger(__name__)
-@@ -85,112 +86,125 @@ except ImportError:
-
-
- def __virtual__():
-- '''
-+ """
- Only load if libvirt python binding is present
-- '''
-+ """
- if libvirt is None:
-- msg = 'libvirt module not found'
-+ msg = "libvirt module not found"
- elif libvirt.getVersion() < 1000000:
-- msg = 'libvirt >= 1.0.0 required'
-+ msg = "libvirt >= 1.0.0 required"
- else:
-- msg = ''
-+ msg = ""
- return not bool(msg), msg
-
-
- REGISTER_FUNCTIONS = {
-- 'domain': 'domainEventRegisterAny',
-- 'network': 'networkEventRegisterAny',
-- 'pool': 'storagePoolEventRegisterAny',
-- 'nodedev': 'nodeDeviceEventRegisterAny',
-- 'secret': 'secretEventRegisterAny'
-+ "domain": "domainEventRegisterAny",
-+ "network": "networkEventRegisterAny",
-+ "pool": "storagePoolEventRegisterAny",
-+ "nodedev": "nodeDeviceEventRegisterAny",
-+ "secret": "secretEventRegisterAny",
- }
-
- # Handle either BLOCK_JOB or BLOCK_JOB_2, but prefer the latter
--if hasattr(libvirt, 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2'):
-- BLOCK_JOB_ID = 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2'
-+if hasattr(libvirt, "VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2"):
-+ BLOCK_JOB_ID = "VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2"
- else:
-- BLOCK_JOB_ID = 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB'
-+ BLOCK_JOB_ID = "VIR_DOMAIN_EVENT_ID_BLOCK_JOB"
-
- CALLBACK_DEFS = {
-- 'domain': (('lifecycle', None),
-- ('reboot', None),
-- ('rtc_change', None),
-- ('watchdog', None),
-- ('graphics', None),
-- ('io_error', 'VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON'),
-- ('control_error', None),
-- ('disk_change', None),
-- ('tray_change', None),
-- ('pmwakeup', None),
-- ('pmsuspend', None),
-- ('balloon_change', None),
-- ('pmsuspend_disk', None),
-- ('device_removed', None),
-- ('block_job', BLOCK_JOB_ID),
-- ('tunable', None),
-- ('agent_lifecycle', None),
-- ('device_added', None),
-- ('migration_iteration', None),
-- ('job_completed', None),
-- ('device_removal_failed', None),
-- ('metadata_change', None),
-- ('block_threshold', None)),
-- 'network': (('lifecycle', None),),
-- 'pool': (('lifecycle', None),
-- ('refresh', None)),
-- 'nodedev': (('lifecycle', None),
-- ('update', None)),
-- 'secret': (('lifecycle', None),
-- ('value_changed', None))
-+ "domain": (
-+ ("lifecycle", None),
-+ ("reboot", None),
-+ ("rtc_change", None),
-+ ("watchdog", None),
-+ ("graphics", None),
-+ ("io_error", "VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON"),
-+ ("control_error", None),
-+ ("disk_change", None),
-+ ("tray_change", None),
-+ ("pmwakeup", None),
-+ ("pmsuspend", None),
-+ ("balloon_change", None),
-+ ("pmsuspend_disk", None),
-+ ("device_removed", None),
-+ ("block_job", BLOCK_JOB_ID),
-+ ("tunable", None),
-+ ("agent_lifecycle", None),
-+ ("device_added", None),
-+ ("migration_iteration", None),
-+ ("job_completed", None),
-+ ("device_removal_failed", None),
-+ ("metadata_change", None),
-+ ("block_threshold", None),
-+ ),
-+ "network": (("lifecycle", None),),
-+ "pool": (
-+ ("lifecycle", "VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE"),
-+ ("refresh", "VIR_STORAGE_POOL_EVENT_ID_REFRESH"),
-+ ),
-+ "nodedev": (
-+ ("lifecycle", "VIR_NODE_DEVICE_EVENT_ID_LIFECYCLE"),
-+ ("update", "VIR_NODE_DEVICE_EVENT_ID_UPDATE"),
-+ ),
-+ "secret": (("lifecycle", None), ("value_changed", None)),
- }
-
-
- def _compute_subprefix(attr):
-- '''
-+ """
- Get the part before the first '_' or the end of attr including
- the potential '_'
-- '''
-- return ''.join((attr.split('_')[0], '_' if len(attr.split('_')) > 1 else ''))
-+ """
-+ return "".join((attr.split("_")[0], "_" if len(attr.split("_")) > 1 else ""))
-
-
- def _get_libvirt_enum_string(prefix, value):
-- '''
-+ """
- Convert the libvirt enum integer value into a human readable string.
-
- :param prefix: start of the libvirt attribute to look for.
- :param value: integer to convert to string
-- '''
-- attributes = [attr[len(prefix):] for attr in libvirt.__dict__ if attr.startswith(prefix)]
-+ """
-+ attributes = [
-+ attr[len(prefix) :] for attr in libvirt.__dict__ if attr.startswith(prefix)
-+ ]
-
- # Filter out the values starting with a common base as they match another enum
- prefixes = [_compute_subprefix(p) for p in attributes]
- counts = {p: prefixes.count(p) for p in prefixes}
-- sub_prefixes = [p for p, count in counts.items() if count > 1 or (p.endswith('_') and p[:-1] in prefixes)]
-- filtered = [attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes]
-+ sub_prefixes = [
-+ p
-+ for p, count in counts.items()
-+ if count > 1 or (p.endswith("_") and p[:-1] in prefixes)
-+ ]
-+ filtered = [
-+ attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes
-+ ]
-
- for candidate in filtered:
-- if value == getattr(libvirt, ''.join((prefix, candidate))):
-- name = candidate.lower().replace('_', ' ')
-+ if value == getattr(libvirt, "".join((prefix, candidate))):
-+ name = candidate.lower().replace("_", " ")
- return name
-- return 'unknown'
-+ return "unknown"
-
-
- def _get_domain_event_detail(event, detail):
-- '''
-+ """
- Convert event and detail numeric values into a tuple of human readable strings
-- '''
-- event_name = _get_libvirt_enum_string('VIR_DOMAIN_EVENT_', event)
-- if event_name == 'unknown':
-- return event_name, 'unknown'
-+ """
-+ event_name = _get_libvirt_enum_string("VIR_DOMAIN_EVENT_", event)
-+ if event_name == "unknown":
-+ return event_name, "unknown"
-
-- prefix = 'VIR_DOMAIN_EVENT_{0}_'.format(event_name.upper())
-+ prefix = "VIR_DOMAIN_EVENT_{0}_".format(event_name.upper())
- detail_name = _get_libvirt_enum_string(prefix, detail)
-
- return event_name, detail_name
-
-
- def _salt_send_event(opaque, conn, data):
-- '''
-+ """
- Convenience function adding common data to the event and sending it
- on the salt event bus.
-
-@@ -198,10 +212,10 @@ def _salt_send_event(opaque, conn, data):
- This is a dict with 'prefix', 'object' and 'event' keys.
- :param conn: libvirt connection
- :param data: additional event data dict to send
-- '''
-- tag_prefix = opaque['prefix']
-- object_type = opaque['object']
-- event_type = opaque['event']
-+ """
-+ tag_prefix = opaque["prefix"]
-+ object_type = opaque["object"]
-+ event_type = opaque["event"]
-
- # Prepare the connection URI to fit in the tag
- # qemu+ssh://user@host:1234/system -> qemu+ssh/user@host:1234/system
-@@ -209,30 +223,28 @@ def _salt_send_event(opaque, conn, data):
- uri_tag = [uri.scheme]
- if uri.netloc:
- uri_tag.append(uri.netloc)
-- path = uri.path.strip('/')
-+ path = uri.path.strip("/")
- if path:
- uri_tag.append(path)
- uri_str = "/".join(uri_tag)
-
- # Append some common data
-- all_data = {
-- 'uri': conn.getURI()
-- }
-+ all_data = {"uri": conn.getURI()}
- all_data.update(data)
-
-- tag = '/'.join((tag_prefix, uri_str, object_type, event_type))
-+ tag = "/".join((tag_prefix, uri_str, object_type, event_type))
-
- # Actually send the event in salt
-- if __opts__.get('__role') == 'master':
-- salt.utils.event.get_master_event(
-- __opts__,
-- __opts__['sock_dir']).fire_event(all_data, tag)
-+ if __opts__.get("__role") == "master":
-+ salt.utils.event.get_master_event(__opts__, __opts__["sock_dir"]).fire_event(
-+ all_data, tag
-+ )
- else:
-- __salt__['event.send'](tag, all_data)
-+ __salt__["event.send"](tag, all_data)
-
-
- def _salt_send_domain_event(opaque, conn, domain, event, event_data):
-- '''
-+ """
- Helper function send a salt event for a libvirt domain.
-
- :param opaque: the opaque data that is passed to the callback.
-@@ -241,375 +253,428 @@ def _salt_send_domain_event(opaque, conn, domain, event, event_data):
- :param domain: name of the domain related to the event
- :param event: name of the event
- :param event_data: additional event data dict to send
-- '''
-+ """
- data = {
-- 'domain': {
-- 'name': domain.name(),
-- 'id': domain.ID(),
-- 'uuid': domain.UUIDString()
-+ "domain": {
-+ "name": domain.name(),
-+ "id": domain.ID(),
-+ "uuid": domain.UUIDString(),
- },
-- 'event': event
-+ "event": event,
- }
- data.update(event_data)
- _salt_send_event(opaque, conn, data)
-
-
- def _domain_event_lifecycle_cb(conn, domain, event, detail, opaque):
-- '''
-+ """
- Domain lifecycle events handler
-- '''
-+ """
- event_str, detail_str = _get_domain_event_detail(event, detail)
-
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'event': event_str,
-- 'detail': detail_str
-- })
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {"event": event_str, "detail": detail_str},
-+ )
-
-
- def _domain_event_reboot_cb(conn, domain, opaque):
-- '''
-+ """
- Domain reboot events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {})
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {})
-
-
- def _domain_event_rtc_change_cb(conn, domain, utcoffset, opaque):
-- '''
-+ """
- Domain RTC change events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'utcoffset': utcoffset
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque, conn, domain, opaque["event"], {"utcoffset": utcoffset}
-+ )
-
-
- def _domain_event_watchdog_cb(conn, domain, action, opaque):
-- '''
-+ """
- Domain watchdog events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_WATCHDOG_', action)
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {"action": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_WATCHDOG_", action)},
-+ )
-
-
- def _domain_event_io_error_cb(conn, domain, srcpath, devalias, action, reason, opaque):
-- '''
-+ """
- Domain I/O Error events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'srcPath': srcpath,
-- 'dev': devalias,
-- 'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_IO_ERROR_', action),
-- 'reason': reason
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {
-+ "srcPath": srcpath,
-+ "dev": devalias,
-+ "action": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_IO_ERROR_", action),
-+ "reason": reason,
-+ },
-+ )
-
-
--def _domain_event_graphics_cb(conn, domain, phase, local, remote, auth, subject, opaque):
-- '''
-+def _domain_event_graphics_cb(
-+ conn, domain, phase, local, remote, auth, subject, opaque
-+):
-+ """
- Domain graphics events handler
-- '''
-- prefix = 'VIR_DOMAIN_EVENT_GRAPHICS_'
-+ """
-+ prefix = "VIR_DOMAIN_EVENT_GRAPHICS_"
-
- def get_address(addr):
-- '''
-+ """
- transform address structure into event data piece
-- '''
-- return {'family': _get_libvirt_enum_string('{0}_ADDRESS_'.format(prefix), addr['family']),
-- 'node': addr['node'],
-- 'service': addr['service']}
-+ """
-+ return {
-+ "family": _get_libvirt_enum_string(
-+ "{0}_ADDRESS_".format(prefix), addr["family"]
-+ ),
-+ "node": addr["node"],
-+ "service": addr["service"],
-+ }
-
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'phase': _get_libvirt_enum_string(prefix, phase),
-- 'local': get_address(local),
-- 'remote': get_address(remote),
-- 'authScheme': auth,
-- 'subject': [{'type': item[0], 'name': item[1]} for item in subject]
-- })
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {
-+ "phase": _get_libvirt_enum_string(prefix, phase),
-+ "local": get_address(local),
-+ "remote": get_address(remote),
-+ "authScheme": auth,
-+ "subject": [{"type": item[0], "name": item[1]} for item in subject],
-+ },
-+ )
-
-
- def _domain_event_control_error_cb(conn, domain, opaque):
-- '''
-+ """
- Domain control error events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {})
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {})
-
-
- def _domain_event_disk_change_cb(conn, domain, old_src, new_src, dev, reason, opaque):
-- '''
-+ """
- Domain disk change events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'oldSrcPath': old_src,
-- 'newSrcPath': new_src,
-- 'dev': dev,
-- 'reason': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_DISK_', reason)
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {
-+ "oldSrcPath": old_src,
-+ "newSrcPath": new_src,
-+ "dev": dev,
-+ "reason": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_DISK_", reason),
-+ },
-+ )
-
-
- def _domain_event_tray_change_cb(conn, domain, dev, reason, opaque):
-- '''
-+ """
- Domain tray change events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'dev': dev,
-- 'reason': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_TRAY_CHANGE_', reason)
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {
-+ "dev": dev,
-+ "reason": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_TRAY_CHANGE_", reason),
-+ },
-+ )
-
-
- def _domain_event_pmwakeup_cb(conn, domain, reason, opaque):
-- '''
-+ """
- Domain wakeup events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'reason': 'unknown' # currently unused
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused
-+ )
-
-
- def _domain_event_pmsuspend_cb(conn, domain, reason, opaque):
-- '''
-+ """
- Domain suspend events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'reason': 'unknown' # currently unused
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused
-+ )
-
-
- def _domain_event_balloon_change_cb(conn, domain, actual, opaque):
-- '''
-+ """
- Domain balloon change events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'actual': actual
-- })
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"actual": actual})
-
-
- def _domain_event_pmsuspend_disk_cb(conn, domain, reason, opaque):
-- '''
-+ """
- Domain disk suspend events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'reason': 'unknown' # currently unused
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused
-+ )
-
-
- def _domain_event_block_job_cb(conn, domain, disk, job_type, status, opaque):
-- '''
-+ """
- Domain block job events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'disk': disk,
-- 'type': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_TYPE_', job_type),
-- 'status': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_', status)
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {
-+ "disk": disk,
-+ "type": _get_libvirt_enum_string("VIR_DOMAIN_BLOCK_JOB_TYPE_", job_type),
-+ "status": _get_libvirt_enum_string("VIR_DOMAIN_BLOCK_JOB_", status),
-+ },
-+ )
-
-
- def _domain_event_device_removed_cb(conn, domain, dev, opaque):
-- '''
-+ """
- Domain device removal events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'dev': dev
-- })
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev})
-
-
- def _domain_event_tunable_cb(conn, domain, params, opaque):
-- '''
-+ """
- Domain tunable events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'params': params
-- })
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"params": params})
-
-
- # pylint: disable=invalid-name
- def _domain_event_agent_lifecycle_cb(conn, domain, state, reason, opaque):
-- '''
-+ """
- Domain agent lifecycle events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'state': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_', state),
-- 'reason': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_', reason)
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {
-+ "state": _get_libvirt_enum_string(
-+ "VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_", state
-+ ),
-+ "reason": _get_libvirt_enum_string(
-+ "VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_", reason
-+ ),
-+ },
-+ )
-
-
- def _domain_event_device_added_cb(conn, domain, dev, opaque):
-- '''
-+ """
- Domain device addition events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'dev': dev
-- })
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev})
-
-
- # pylint: disable=invalid-name
- def _domain_event_migration_iteration_cb(conn, domain, iteration, opaque):
-- '''
-+ """
- Domain migration iteration events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'iteration': iteration
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque, conn, domain, opaque["event"], {"iteration": iteration}
-+ )
-
-
- def _domain_event_job_completed_cb(conn, domain, params, opaque):
-- '''
-+ """
- Domain job completion events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'params': params
-- })
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"params": params})
-
-
- def _domain_event_device_removal_failed_cb(conn, domain, dev, opaque):
-- '''
-+ """
- Domain device removal failure events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'dev': dev
-- })
-+ """
-+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev})
-
-
- def _domain_event_metadata_change_cb(conn, domain, mtype, nsuri, opaque):
-- '''
-+ """
- Domain metadata change events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'type': _get_libvirt_enum_string('VIR_DOMAIN_METADATA_', mtype),
-- 'nsuri': nsuri
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {
-+ "type": _get_libvirt_enum_string("VIR_DOMAIN_METADATA_", mtype),
-+ "nsuri": nsuri,
-+ },
-+ )
-
-
--def _domain_event_block_threshold_cb(conn, domain, dev, path, threshold, excess, opaque):
-- '''
-+def _domain_event_block_threshold_cb(
-+ conn, domain, dev, path, threshold, excess, opaque
-+):
-+ """
- Domain block threshold events handler
-- '''
-- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
-- 'dev': dev,
-- 'path': path,
-- 'threshold': threshold,
-- 'excess': excess
-- })
-+ """
-+ _salt_send_domain_event(
-+ opaque,
-+ conn,
-+ domain,
-+ opaque["event"],
-+ {"dev": dev, "path": path, "threshold": threshold, "excess": excess},
-+ )
-
-
- def _network_event_lifecycle_cb(conn, net, event, detail, opaque):
-- '''
-+ """
- Network lifecycle events handler
-- '''
--
-- _salt_send_event(opaque, conn, {
-- 'network': {
-- 'name': net.name(),
-- 'uuid': net.UUIDString()
-+ """
-+
-+ _salt_send_event(
-+ opaque,
-+ conn,
-+ {
-+ "network": {"name": net.name(), "uuid": net.UUIDString()},
-+ "event": _get_libvirt_enum_string("VIR_NETWORK_EVENT_", event),
-+ "detail": "unknown", # currently unused
- },
-- 'event': _get_libvirt_enum_string('VIR_NETWORK_EVENT_', event),
-- 'detail': 'unknown' # currently unused
-- })
-+ )
-
-
- def _pool_event_lifecycle_cb(conn, pool, event, detail, opaque):
-- '''
-+ """
- Storage pool lifecycle events handler
-- '''
-- _salt_send_event(opaque, conn, {
-- 'pool': {
-- 'name': pool.name(),
-- 'uuid': pool.UUIDString()
-+ """
-+ _salt_send_event(
-+ opaque,
-+ conn,
-+ {
-+ "pool": {"name": pool.name(), "uuid": pool.UUIDString()},
-+ "event": _get_libvirt_enum_string("VIR_STORAGE_POOL_EVENT_", event),
-+ "detail": "unknown", # currently unused
- },
-- 'event': _get_libvirt_enum_string('VIR_STORAGE_POOL_EVENT_', event),
-- 'detail': 'unknown' # currently unused
-- })
-+ )
-
-
- def _pool_event_refresh_cb(conn, pool, opaque):
-- '''
-+ """
- Storage pool refresh events handler
-- '''
-- _salt_send_event(opaque, conn, {
-- 'pool': {
-- 'name': pool.name(),
-- 'uuid': pool.UUIDString()
-+ """
-+ _salt_send_event(
-+ opaque,
-+ conn,
-+ {
-+ "pool": {"name": pool.name(), "uuid": pool.UUIDString()},
-+ "event": opaque["event"],
- },
-- 'event': opaque['event']
-- })
-+ )
-
-
- def _nodedev_event_lifecycle_cb(conn, dev, event, detail, opaque):
-- '''
-+ """
- Node device lifecycle events handler
-- '''
-- _salt_send_event(opaque, conn, {
-- 'nodedev': {
-- 'name': dev.name()
-+ """
-+ _salt_send_event(
-+ opaque,
-+ conn,
-+ {
-+ "nodedev": {"name": dev.name()},
-+ "event": _get_libvirt_enum_string("VIR_NODE_DEVICE_EVENT_", event),
-+ "detail": "unknown", # currently unused
- },
-- 'event': _get_libvirt_enum_string('VIR_NODE_DEVICE_EVENT_', event),
-- 'detail': 'unknown' # currently unused
-- })
-+ )
-
-
- def _nodedev_event_update_cb(conn, dev, opaque):
-- '''
-+ """
- Node device update events handler
-- '''
-- _salt_send_event(opaque, conn, {
-- 'nodedev': {
-- 'name': dev.name()
-- },
-- 'event': opaque['event']
-- })
-+ """
-+ _salt_send_event(
-+ opaque, conn, {"nodedev": {"name": dev.name()}, "event": opaque["event"]}
-+ )
-
-
- def _secret_event_lifecycle_cb(conn, secret, event, detail, opaque):
-- '''
-+ """
- Secret lifecycle events handler
-- '''
-- _salt_send_event(opaque, conn, {
-- 'secret': {
-- 'uuid': secret.UUIDString()
-+ """
-+ _salt_send_event(
-+ opaque,
-+ conn,
-+ {
-+ "secret": {"uuid": secret.UUIDString()},
-+ "event": _get_libvirt_enum_string("VIR_SECRET_EVENT_", event),
-+ "detail": "unknown", # currently unused
- },
-- 'event': _get_libvirt_enum_string('VIR_SECRET_EVENT_', event),
-- 'detail': 'unknown' # currently unused
-- })
-+ )
-
-
- def _secret_event_value_changed_cb(conn, secret, opaque):
-- '''
-+ """
- Secret value change events handler
-- '''
-- _salt_send_event(opaque, conn, {
-- 'secret': {
-- 'uuid': secret.UUIDString()
-- },
-- 'event': opaque['event']
-- })
-+ """
-+ _salt_send_event(
-+ opaque,
-+ conn,
-+ {"secret": {"uuid": secret.UUIDString()}, "event": opaque["event"]},
-+ )
-
-
- def _cleanup(cnx):
-- '''
-+ """
- Close the libvirt connection
-
- :param cnx: libvirt connection
-- '''
-- log.debug('Closing libvirt connection: %s', cnx.getURI())
-+ """
-+ log.debug("Closing libvirt connection: %s", cnx.getURI())
- cnx.close()
-
-
- def _callbacks_cleanup(cnx, callback_ids):
-- '''
-+ """
- Unregister all the registered callbacks
-
- :param cnx: libvirt connection
- :param callback_ids: dictionary mapping a libvirt object type to an ID list
- of callbacks to deregister
-- '''
-+ """
- for obj, ids in callback_ids.items():
- register_name = REGISTER_FUNCTIONS[obj]
-- deregister_name = register_name.replace('Reg', 'Dereg')
-+ deregister_name = register_name.replace("Reg", "Dereg")
- deregister = getattr(cnx, deregister_name)
- for callback_id in ids:
- deregister(callback_id)
-
-
- def _register_callback(cnx, tag_prefix, obj, event, real_id):
-- '''
-+ """
- Helper function registering a callback
-
- :param cnx: libvirt connection
-@@ -620,10 +685,10 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id):
- :param real_id: the libvirt name of an alternative event id to use or None
-
- :rtype integer value needed to deregister the callback
-- '''
-+ """
- libvirt_name = real_id
- if real_id is None:
-- libvirt_name = 'VIR_{0}_EVENT_ID_{1}'.format(obj, event).upper()
-+ libvirt_name = "VIR_{0}_EVENT_ID_{1}".format(obj, event).upper()
-
- if not hasattr(libvirt, libvirt_name):
- log.warning('Skipping "%s/%s" events: libvirt too old', obj, event)
-@@ -633,34 +698,34 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id):
- callback_name = "_{0}_event_{1}_cb".format(obj, event)
- callback = globals().get(callback_name, None)
- if callback is None:
-- log.error('Missing function %s in engine', callback_name)
-+ log.error("Missing function %s in engine", callback_name)
- return None
-
- register = getattr(cnx, REGISTER_FUNCTIONS[obj])
-- return register(None, libvirt_id, callback,
-- {'prefix': tag_prefix,
-- 'object': obj,
-- 'event': event})
-+ return register(
-+ None,
-+ libvirt_id,
-+ callback,
-+ {"prefix": tag_prefix, "object": obj, "event": event},
-+ )
-
-
- def _append_callback_id(ids, obj, callback_id):
-- '''
-+ """
- Helper function adding a callback ID to the IDs dict.
- The callback ids dict maps an object to event callback ids.
-
- :param ids: dict of callback IDs to update
- :param obj: one of the keys of REGISTER_FUNCTIONS
- :param callback_id: the result of _register_callback
-- '''
-+ """
- if obj not in ids:
- ids[obj] = []
- ids[obj].append(callback_id)
-
-
--def start(uri=None,
-- tag_prefix='salt/engines/libvirt_events',
-- filters=None):
-- '''
-+def start(uri=None, tag_prefix="salt/engines/libvirt_events", filters=None):
-+ """
- Listen to libvirt events and forward them to salt.
-
- :param uri: libvirt URI to listen on.
-@@ -668,14 +733,14 @@ def start(uri=None,
- :param tag_prefix: the begining of the salt event tag to use.
- Defaults to 'salt/engines/libvirt_events'
- :param filters: the list of event of listen on. Defaults to 'all'
-- '''
-+ """
- if filters is None:
-- filters = ['all']
-+ filters = ["all"]
- try:
- libvirt.virEventRegisterDefaultImpl()
-
- cnx = libvirt.openReadOnly(uri)
-- log.debug('Opened libvirt uri: %s', cnx.getURI())
-+ log.debug("Opened libvirt uri: %s", cnx.getURI())
-
- callback_ids = {}
- all_filters = "all" in filters
-@@ -683,17 +748,19 @@ def start(uri=None,
- for obj, event_defs in CALLBACK_DEFS.items():
- for event, real_id in event_defs:
- event_filter = "/".join((obj, event))
-- if event_filter not in filters and obj not in filters and not all_filters:
-+ if (
-+ event_filter not in filters
-+ and obj not in filters
-+ and not all_filters
-+ ):
- continue
-- registered_id = _register_callback(cnx, tag_prefix,
-- obj, event, real_id)
-+ registered_id = _register_callback(cnx, tag_prefix, obj, event, real_id)
- if registered_id:
- _append_callback_id(callback_ids, obj, registered_id)
-
- exit_loop = False
- while not exit_loop:
- exit_loop = libvirt.virEventRunDefaultImpl() < 0
-- log.debug('=== in the loop exit_loop %s ===', exit_loop)
-
- except Exception as err: # pylint: disable=broad-except
- log.exception(err)
diff --git a/tests/unit/engines/test_libvirt_events.py b/tests/unit/engines/test_libvirt_events.py
-index d9143a320b..5f1488e422 100644
+index 63623c4d79..5cf80f0bc7 100644
--- a/tests/unit/engines/test_libvirt_events.py
+++ b/tests/unit/engines/test_libvirt_events.py
-@@ -1,16 +1,14 @@
- # -*- coding: utf-8 -*-
--'''
-+"""
+@@ -1,12 +1,8 @@
+ """
unit tests for the libvirt_events engine
--'''
-+"""
- # Import Python libs
- from __future__ import absolute_import, print_function, unicode_literals
+ """
+-# Import Python libs
- # Import Salt Testing Libs
- from tests.support.mixins import LoaderModuleMockMixin
- from tests.support.unit import TestCase
--from tests.support.mock import (
-- MagicMock,
-- patch)
-+from tests.support.mock import MagicMock, patch
-
- # Import Salt Libs
+-# Import Salt Libs
import salt.engines.libvirt_events as libvirt_events
-@@ -20,68 +18,78 @@ import salt.engines.libvirt_events as libvirt_events
-
-
- class EngineLibvirtEventTestCase(TestCase, LoaderModuleMockMixin):
-- '''
-+ """
- Test cases for salt.engine.libvirt_events
-- '''
-+ """
-
- def setup_loader_modules(self):
-- patcher = patch('salt.engines.libvirt_events.libvirt')
-+ patcher = patch("salt.engines.libvirt_events.libvirt")
- self.mock_libvirt = patcher.start()
- self.mock_libvirt.getVersion.return_value = 2000000
-- self.mock_libvirt.virEventRunDefaultImpl.return_value = -1 # Don't loop for ever
-+ self.mock_libvirt.virEventRunDefaultImpl.return_value = (
-+ -1
-+ ) # Don't loop for ever
- self.mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
- self.mock_libvirt.VIR_DOMAIN_EVENT_ID_REBOOT = 1
-+ self.mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE = 0
-+ self.mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_REFRESH = 1
-+ self.mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_LIFECYCLE = 0
-+ self.mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_UPDATE = 1
- self.addCleanup(patcher.stop)
-- self.addCleanup(delattr, self, 'mock_libvirt')
-+ self.addCleanup(delattr, self, "mock_libvirt")
- return {libvirt_events: {}}
-
-- @patch('salt.engines.libvirt_events.libvirt',
-- VIR_PREFIX_NONE=0,
-- VIR_PREFIX_ONE=1,
-- VIR_PREFIX_TWO=2,
-- VIR_PREFIX_SUB_FOO=0,
-- VIR_PREFIX_SUB_BAR=1,
-- VIR_PREFIX_SUB_FOOBAR=2)
-+ @patch(
-+ "salt.engines.libvirt_events.libvirt",
-+ VIR_PREFIX_NONE=0,
-+ VIR_PREFIX_ONE=1,
-+ VIR_PREFIX_TWO=2,
-+ VIR_PREFIX_SUB_FOO=0,
-+ VIR_PREFIX_SUB_BAR=1,
-+ VIR_PREFIX_SUB_FOOBAR=2,
-+ )
- def test_get_libvirt_enum_string_subprefix(self, libvirt_mock):
-- '''
-+ """
- Make sure the libvirt enum value to string works reliably with
- elements with a sub prefix, eg VIR_PREFIX_SUB_* in this case.
-- '''
-+ """
- # Test case with a sub prefix
-
-- assert libvirt_events._get_libvirt_enum_string('VIR_PREFIX_', 2) == 'two'
-+ assert libvirt_events._get_libvirt_enum_string("VIR_PREFIX_", 2) == "two"
-
-- @patch('salt.engines.libvirt_events.libvirt',
-- VIR_PREFIX_FOO=0,
-- VIR_PREFIX_BAR_FOO=1)
-+ @patch(
-+ "salt.engines.libvirt_events.libvirt", VIR_PREFIX_FOO=0, VIR_PREFIX_BAR_FOO=1
-+ )
- def test_get_libvirt_enum_string_underscores(self, libvirt_mock):
-- '''
-+ """
- Make sure the libvirt enum value to string works reliably and items
- with an underscore aren't confused with sub prefixes.
-- '''
-- assert libvirt_events._get_libvirt_enum_string('VIR_PREFIX_', 1) == 'bar foo'
-
-- @patch('salt.engines.libvirt_events.libvirt',
-- VIR_DOMAIN_EVENT_CRASHED_PANICKED=0,
-- VIR_DOMAIN_EVENT_DEFINED=0,
-- VIR_DOMAIN_EVENT_UNDEFINED=1,
-- VIR_DOMAIN_EVENT_CRASHED=2,
-- VIR_DOMAIN_EVENT_DEFINED_ADDED=0,
-- VIR_DOMAIN_EVENT_DEFINED_UPDATED=1)
-+ """
-+ assert libvirt_events._get_libvirt_enum_string("VIR_PREFIX_", 1) == "bar foo"
-+
-+ @patch(
-+ "salt.engines.libvirt_events.libvirt",
-+ VIR_DOMAIN_EVENT_CRASHED_PANICKED=0,
-+ VIR_DOMAIN_EVENT_DEFINED=0,
-+ VIR_DOMAIN_EVENT_UNDEFINED=1,
-+ VIR_DOMAIN_EVENT_CRASHED=2,
-+ VIR_DOMAIN_EVENT_DEFINED_ADDED=0,
-+ VIR_DOMAIN_EVENT_DEFINED_UPDATED=1,
-+ )
- def test_get_domain_event_detail(self, mock_libvirt):
-- '''
-+ """
- Test get_domain_event_detail function
-- '''
-- assert libvirt_events._get_domain_event_detail(1, 2) == ('undefined', 'unknown')
-- assert libvirt_events._get_domain_event_detail(0, 1) == ('defined', 'updated')
-- assert libvirt_events._get_domain_event_detail(4, 2) == ('unknown', 'unknown')
-+ """
-+ assert libvirt_events._get_domain_event_detail(1, 2) == ("undefined", "unknown")
-+ assert libvirt_events._get_domain_event_detail(0, 1) == ("defined", "updated")
-+ assert libvirt_events._get_domain_event_detail(4, 2) == ("unknown", "unknown")
-
-- @patch('salt.engines.libvirt_events.libvirt', VIR_NETWORK_EVENT_ID_LIFECYCLE=1000)
-+ @patch("salt.engines.libvirt_events.libvirt", VIR_NETWORK_EVENT_ID_LIFECYCLE=1000)
- def test_event_register(self, mock_libvirt):
-- '''
-+ """
- Test that the libvirt_events engine actually registers events catch them and cleans
- before leaving the place.
-- '''
-+ """
- mock_cnx = MagicMock()
- mock_libvirt.openReadOnly.return_value = mock_cnx
-
-@@ -90,71 +98,112 @@ class EngineLibvirtEventTestCase(TestCase, LoaderModuleMockMixin):
-
- mock_cnx.networkEventRegisterAny.return_value = 10000
-
-- libvirt_events.start('test:///', 'test/prefix')
-+ libvirt_events.start("test:///", "test/prefix")
-
- # Check that the connection has been opened
-- mock_libvirt.openReadOnly.assert_called_once_with('test:///')
-+ mock_libvirt.openReadOnly.assert_called_once_with("test:///")
-
- # Check that the connection has been closed
- mock_cnx.close.assert_called_once()
-
- # Check events registration and deregistration
- mock_cnx.domainEventRegisterAny.assert_any_call(
-- None, mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
-- libvirt_events._domain_event_lifecycle_cb,
-- {'prefix': 'test/prefix', 'object': 'domain', 'event': 'lifecycle'})
-+ None,
-+ mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
-+ libvirt_events._domain_event_lifecycle_cb,
-+ {"prefix": "test/prefix", "object": "domain", "event": "lifecycle"},
-+ )
- mock_cnx.networkEventRegisterAny.assert_any_call(
-- None, mock_libvirt.VIR_NETWORK_EVENT_ID_LIFECYCLE,
-- libvirt_events._network_event_lifecycle_cb,
-- {'prefix': 'test/prefix', 'object': 'network', 'event': 'lifecycle'})
-+ None,
-+ mock_libvirt.VIR_NETWORK_EVENT_ID_LIFECYCLE,
-+ libvirt_events._network_event_lifecycle_cb,
-+ {"prefix": "test/prefix", "object": "network", "event": "lifecycle"},
-+ )
-+ mock_cnx.storagePoolEventRegisterAny.assert_any_call(
-+ None,
-+ mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE,
-+ libvirt_events._pool_event_lifecycle_cb,
-+ {"prefix": "test/prefix", "object": "pool", "event": "lifecycle"},
-+ )
-+ mock_cnx.storagePoolEventRegisterAny.assert_any_call(
-+ None,
-+ mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_REFRESH,
-+ libvirt_events._pool_event_refresh_cb,
-+ {"prefix": "test/prefix", "object": "pool", "event": "refresh"},
-+ )
-+ mock_cnx.nodeDeviceEventRegisterAny.assert_any_call(
-+ None,
-+ mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_LIFECYCLE,
-+ libvirt_events._nodedev_event_lifecycle_cb,
-+ {"prefix": "test/prefix", "object": "nodedev", "event": "lifecycle"},
-+ )
-+ mock_cnx.nodeDeviceEventRegisterAny.assert_any_call(
-+ None,
-+ mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_UPDATE,
-+ libvirt_events._nodedev_event_update_cb,
-+ {"prefix": "test/prefix", "object": "nodedev", "event": "update"},
-+ )
-
- # Check that the deregister events are called with the result of register
- mock_cnx.networkEventDeregisterAny.assert_called_with(
-- mock_cnx.networkEventRegisterAny.return_value)
-+ mock_cnx.networkEventRegisterAny.return_value
-+ )
-
- # Check that the default 'all' filter actually worked
-- counts = {obj: len(callback_def) for obj, callback_def in libvirt_events.CALLBACK_DEFS.items()}
-+ counts = {
-+ obj: len(callback_def)
-+ for obj, callback_def in libvirt_events.CALLBACK_DEFS.items()
-+ }
- for obj, count in counts.items():
- register = libvirt_events.REGISTER_FUNCTIONS[obj]
- assert getattr(mock_cnx, register).call_count == count
-
- def test_event_skipped(self):
-- '''
-+ """
- Test that events are skipped if their ID isn't defined in the libvirt
- module (older libvirt)
-- '''
-- self.mock_libvirt.mock_add_spec([
-- 'openReadOnly',
-- 'virEventRegisterDefaultImpl',
-- 'virEventRunDefaultImpl',
-- 'VIR_DOMAIN_EVENT_ID_LIFECYCLE'], spec_set=True)
--
-- libvirt_events.start('test:///', 'test/prefix')
-+ """
-+ self.mock_libvirt.mock_add_spec(
-+ [
-+ "openReadOnly",
-+ "virEventRegisterDefaultImpl",
-+ "virEventRunDefaultImpl",
-+ "VIR_DOMAIN_EVENT_ID_LIFECYCLE",
-+ ],
-+ spec_set=True,
-+ )
-+
-+ libvirt_events.start("test:///", "test/prefix")
-
- # Check events registration and deregistration
- mock_cnx = self.mock_libvirt.openReadOnly.return_value
-
- mock_cnx.domainEventRegisterAny.assert_any_call(
-- None, self.mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
-- libvirt_events._domain_event_lifecycle_cb,
-- {'prefix': 'test/prefix', 'object': 'domain', 'event': 'lifecycle'})
-+ None,
-+ self.mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
-+ libvirt_events._domain_event_lifecycle_cb,
-+ {"prefix": "test/prefix", "object": "domain", "event": "lifecycle"},
-+ )
-
- # Network events should have been skipped
- mock_cnx.networkEventRegisterAny.assert_not_called()
-
- def test_event_filtered(self):
-- '''
-+ """
- Test that events are skipped if their ID isn't defined in the libvirt
- module (older libvirt)
-- '''
-- libvirt_events.start('test', 'test/prefix', 'domain/lifecycle')
-+ """
-+ libvirt_events.start("test", "test/prefix", "domain/lifecycle")
-
- # Check events registration and deregistration
- mock_cnx = self.mock_libvirt.openReadOnly.return_value
-
- mock_cnx.domainEventRegisterAny.assert_any_call(
-- None, 0, libvirt_events._domain_event_lifecycle_cb,
-- {'prefix': 'test/prefix', 'object': 'domain', 'event': 'lifecycle'})
-+ None,
-+ 0,
-+ libvirt_events._domain_event_lifecycle_cb,
-+ {"prefix": "test/prefix", "object": "domain", "event": "lifecycle"},
-+ )
-
- # Network events should have been filtered out
- mock_cnx.networkEventRegisterAny.assert_not_called()
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
--
-2.27.0
+2.29.2
diff --git a/opensuse-3000-virt-defined-states-222.patch b/opensuse-3000-virt-defined-states-222.patch
index 7721acf..afaa867 100644
--- a/opensuse-3000-virt-defined-states-222.patch
+++ b/opensuse-3000-virt-defined-states-222.patch
@@ -1,4 +1,4 @@
-From e5d42c6313ba051f22f83cbde3da9410fd7fc3b9 Mon Sep 17 00:00:00 2001
+From 8deed909147041f8befad8fee9d27bb81595ed23 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat
Date: Fri, 13 Mar 2020 16:38:08 +0100
Subject: [PATCH] openSUSE-3000 virt-defined-states (#222)
@@ -44,756 +44,369 @@ virt.running state now may call virt.update with None mem and cpu
parameters. This was not handled in _gen_xml(). Also add some more tests
cases matching this for virt.update.
---
- salt/modules/virt.py | 16 +-
- salt/states/virt.py | 673 +++++++++++++++-----
- tests/unit/modules/test_virt.py | 26 +
- tests/unit/states/test_virt.py | 1346 ++++++++++++++++++++++++++++++++-------
- 4 files changed, 1665 insertions(+), 396 deletions(-)
+ salt/modules/virt.py | 44 +-
+ salt/states/virt.py | 268 +++++++---
+ tests/unit/modules/test_virt.py | 845 +-----------------------------
+ tests/unit/states/test_virt.py | 893 +++++++++++++++++++++++++++-----
+ 4 files changed, 971 insertions(+), 1079 deletions(-)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index 3889238ecd..f0820e8825 100644
+index 362c2a68b5..7314bf1d6e 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
-@@ -1783,6 +1783,7 @@ def update(name,
- graphics=None,
- live=True,
- boot=None,
-+ test=False,
- **kwargs):
- '''
- Update the definition of an existing domain.
-@@ -1835,6 +1836,10 @@ def update(name,
+@@ -2579,7 +2579,6 @@ def update(
+ live=True,
+ boot=None,
+ test=False,
+- boot_dev=None,
+ **kwargs
+ ):
+ """
+@@ -2653,17 +2652,9 @@ def update(
.. versionadded:: 3000
-+ :param test: run in dry-run mode if set to True
-+
+- :param boot_dev:
+- Space separated list of devices to boot from sorted by decreasing priority.
+- Values can be ``hd``, ``fd``, ``cdrom`` or ``network``.
+-
+- By default, the value will ``"hd"``.
+-
+- .. versionadded:: 3002
+-
+ :param test: run in dry-run mode if set to True
+
+- .. versionadded:: 3001
+ .. versionadded:: sodium
-+
+
:return:
- Returns a dictionary indicating the status of what has been done. It is structured in
-@@ -1880,8 +1885,8 @@ def update(name,
- boot = _handle_remote_boot_params(boot)
+@@ -2713,7 +2704,6 @@ def update(
- new_desc = ElementTree.fromstring(_gen_xml(name,
-- cpu,
-- mem,
-+ cpu or 0,
-+ mem or 0,
- all_disks,
- _get_merged_nics(hypervisor, nic_profile, interfaces),
- hypervisor,
-@@ -1973,11 +1978,12 @@ def update(name,
- if changes['disk']:
- for idx, item in enumerate(changes['disk']['sorted']):
- source_file = all_disks[idx]['source_file']
-- if item in changes['disk']['new'] and source_file and not os.path.isfile(source_file):
-+ if item in changes['disk']['new'] and source_file and not os.path.isfile(source_file) and not test:
- _qemu_image_create(all_disks[idx])
+ new_desc = ElementTree.fromstring(
+ _gen_xml(
+- conn,
+ name,
+ cpu or 0,
+ mem or 0,
+@@ -2879,26 +2869,22 @@ def update(
+ # Set the new definition
+ if need_update:
+ # Create missing disks if needed
+- try:
+- if changes["disk"]:
+- for idx, item in enumerate(changes["disk"]["sorted"]):
+- source_file = all_disks[idx].get("source_file")
+- # We don't want to create image disks for cdrom devices
+- if all_disks[idx].get("device", "disk") == "cdrom":
+- continue
+- if (
+- item in changes["disk"]["new"]
+- and source_file
+- and not os.path.isfile(source_file)
+- ):
+- _qemu_image_create(all_disks[idx])
+- elif item in changes["disk"]["new"] and not source_file:
+- _disk_volume_create(conn, all_disks[idx])
++ if changes["disk"]:
++ for idx, item in enumerate(changes["disk"]["sorted"]):
++ source_file = all_disks[idx]["source_file"]
++ if (
++ item in changes["disk"]["new"]
++ and source_file
++ and not os.path.isfile(source_file)
++ and not test
++ ):
++ _qemu_image_create(all_disks[idx])
- try:
-- conn.defineXML(salt.utils.stringutils.to_str(ElementTree.tostring(desc)))
-+ if not test:
-+ conn.defineXML(salt.utils.stringutils.to_str(ElementTree.tostring(desc)))
- status['definition'] = True
++ try:
+ if not test:
+- xml_desc = ElementTree.tostring(desc)
+- log.debug("Update virtual machine definition: %s", xml_desc)
+- conn.defineXML(salt.utils.stringutils.to_str(xml_desc))
++ conn.defineXML(
++ salt.utils.stringutils.to_str(ElementTree.tostring(desc))
++ )
+ status["definition"] = True
except libvirt.libvirtError as err:
conn.close()
-@@ -2010,7 +2016,7 @@ def update(name,
-
- for cmd in commands:
- try:
-- ret = getattr(domain, cmd['cmd'])(*cmd['args'])
-+ ret = getattr(domain, cmd['cmd'])(*cmd['args']) if not test else 0
- device_type = cmd['device']
- if device_type in ['cpu', 'mem']:
- status[device_type] = not bool(ret)
diff --git a/salt/states/virt.py b/salt/states/virt.py
-index 55a9ad2616..819776d707 100644
+index 200c79d35c..2394d0745e 100644
--- a/salt/states/virt.py
+++ b/salt/states/virt.py
-@@ -14,6 +14,7 @@ for the generation and signing of certificates for systems running libvirt:
+@@ -12,6 +12,7 @@ for the generation and signing of certificates for systems running libvirt:
+ """
+
- # Import Python libs
- from __future__ import absolute_import, print_function, unicode_literals
+import copy
import fnmatch
+ import logging
import os
+@@ -285,37 +286,15 @@ def defined(
+ arch=None,
+ boot=None,
+ update=True,
+- boot_dev=None,
+ ):
+ """
+ Starts an existing guest, or defines and starts a new VM with specified arguments.
-@@ -245,6 +246,187 @@ def powered_off(name, connection=None, username=None, password=None):
- connection=connection, username=username, password=password)
-
-
-+def defined(name,
-+ cpu=None,
-+ mem=None,
-+ vm_type=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ seed=True,
-+ install=True,
-+ pub_key=None,
-+ priv_key=None,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ os_type=None,
-+ arch=None,
-+ boot=None,
-+ update=True):
-+ '''
-+ Starts an existing guest, or defines and starts a new VM with specified arguments.
-+
+- .. versionadded:: 3001
+ .. versionadded:: sodium
-+
-+ :param name: name of the virtual machine to run
-+ :param cpu: number of CPUs for the virtual machine to create
+
+ :param name: name of the virtual machine to run
+ :param cpu: number of CPUs for the virtual machine to create
+- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to
+- contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``,
+- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The
+- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported.
+- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be
+- an integer.
+-
+- .. code-block:: python
+-
+- {
+- 'boot': 1g,
+- 'current': 1g,
+- 'max': 1g,
+- 'slots': 10,
+- 'hard_limit': '1024'
+- 'soft_limit': '512m'
+- 'swap_hard_limit': '1g'
+- 'min_guarantee': '512mib'
+- }
+-
+- .. versionchanged:: 3002
+-
+ :param mem: amount of memory in MiB for the new virtual machine
-+ :param vm_type: force virtual machine type for the new VM. The default value is taken from
-+ the host capabilities. This could be useful for example to use ``'qemu'`` type instead
-+ of the ``'kvm'`` one.
-+ :param disk_profile:
-+ Name of the disk profile to use for the new virtual machine
-+ :param disks:
-+ List of disk to create for the new virtual machine.
-+ See :ref:`init-disk-def` for more details on the items on this list.
-+ :param nic_profile:
-+ Name of the network interfaces profile to use for the new virtual machine
-+ :param interfaces:
-+ List of network interfaces to create for the new virtual machine.
-+ See :ref:`init-nic-def` for more details on the items on this list.
-+ :param graphics:
-+ Graphics device to create for the new virtual machine.
-+ See :ref:`init-graphics-def` for more details on this dictionary
-+ :param saltenv:
-+ Fileserver environment (Default: ``'base'``).
-+ See :mod:`cp module for more details `
-+ :param seed: ``True`` to seed the disk image. Only used when the ``image`` parameter is provided.
-+ (Default: ``True``)
-+ :param install: install salt minion if absent (Default: ``True``)
-+ :param pub_key: public key to seed with (Default: ``None``)
-+ :param priv_key: public key to seed with (Default: ``None``)
-+ :param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``)
-+ :param connection: libvirt connection URI, overriding defaults
-+ :param username: username to connect with, overriding defaults
-+ :param password: password to connect with, overriding defaults
-+ :param os_type:
-+ type of virtualization as found in the ``//os/type`` element of the libvirt definition.
-+ The default value is taken from the host capabilities, with a preference for ``hvm``.
-+ Only used when creating a new virtual machine.
-+ :param arch:
-+ architecture of the virtual machine. The default value is taken from the host capabilities,
-+ but ``x86_64`` is prefed over ``i686``. Only used when creating a new virtual machine.
-+
-+ :param boot:
+ :param vm_type: force virtual machine type for the new VM. The default value is taken from
+ the host capabilities. This could be useful for example to use ``'qemu'`` type instead
+ of the ``'kvm'`` one.
+@@ -353,27 +332,23 @@ def defined(
+ but ``x86_64`` is prefed over ``i686``. Only used when creating a new virtual machine.
+
+ :param boot:
+- Specifies kernel, initial ramdisk and kernel command line parameters for the virtual machine.
+- This is an optional parameter, all of the keys are optional within the dictionary.
+-
+- Refer to :ref:`init-boot-def` for the complete boot parameters description.
+ Specifies kernel for the virtual machine, as well as boot parameters
+ for the virtual machine. This is an optionl parameter, and all of the
+ keys are optional within the dictionary. If a remote path is provided
+ to kernel or initrd, salt will handle the downloading of the specified
+ remote fild, and will modify the XML accordingly.
-+
+
+- To update any boot parameters, specify the new path for each. To remove any boot parameters,
+- pass a None object, for instance: 'kernel': ``None``.
+ .. code-block:: python
-+
+
+- .. versionadded:: 3000
+ {
+ 'kernel': '/root/f8-i386-vmlinuz',
+ 'initrd': '/root/f8-i386-initrd',
+ 'cmdline': 'console=ttyS0 ks=http://example.com/f8-i386/os/'
+ }
-+
-+ :param update: set to ``False`` to prevent updating a defined domain. (Default: ``True``)
-+
+
+ :param update: set to ``False`` to prevent updating a defined domain. (Default: ``True``)
+
+- .. deprecated:: 3001
+-
+- :param boot_dev:
+- Space separated list of devices to boot from sorted by decreasing priority.
+- Values can be ``hd``, ``fd``, ``cdrom`` or ``network``.
+-
+- By default, the value will ``"hd"``.
+-
+- .. versionadded:: 3002
+ .. deprecated:: sodium
-+
-+ .. rubric:: Example States
-+
-+ Make sure a virtual machine called ``domain_name`` is defined:
-+
-+ .. code-block:: yaml
-+
-+ domain_name:
-+ virt.defined:
-+ - cpu: 2
-+ - mem: 2048
-+ - disk_profile: prod
-+ - disks:
-+ - name: system
-+ size: 8192
-+ overlay_image: True
-+ pool: default
-+ image: /path/to/image.qcow2
-+ - name: data
-+ size: 16834
-+ - nic_profile: prod
-+ - interfaces:
-+ - name: eth0
-+ mac: 01:23:45:67:89:AB
-+ - name: eth1
-+ type: network
-+ source: admin
-+ - graphics:
-+ type: spice
-+ listen:
-+ type: address
-+ address: 192.168.0.125
-+
-+ '''
-+
-+ ret = {'name': name,
-+ 'changes': {},
-+ 'result': True if not __opts__['test'] else None,
-+ 'comment': ''
-+ }
-+
-+ try:
-+ if name in __salt__['virt.list_domains'](connection=connection, username=username, password=password):
-+ status = {}
-+ if update:
-+ status = __salt__['virt.update'](name,
-+ cpu=cpu,
-+ mem=mem,
-+ disk_profile=disk_profile,
-+ disks=disks,
-+ nic_profile=nic_profile,
-+ interfaces=interfaces,
-+ graphics=graphics,
-+ live=True,
-+ connection=connection,
-+ username=username,
-+ password=password,
-+ boot=boot,
-+ test=__opts__['test'])
-+ ret['changes'][name] = status
-+ if not status.get('definition'):
-+ ret['comment'] = 'Domain {0} unchanged'.format(name)
-+ ret['result'] = True
-+ elif status.get('errors'):
-+ ret['comment'] = 'Domain {0} updated with live update(s) failures'.format(name)
-+ else:
-+ ret['comment'] = 'Domain {0} updated'.format(name)
-+ else:
-+ if not __opts__['test']:
-+ __salt__['virt.init'](name,
-+ cpu=cpu,
-+ mem=mem,
-+ os_type=os_type,
-+ arch=arch,
-+ hypervisor=vm_type,
-+ disk=disk_profile,
-+ disks=disks,
-+ nic=nic_profile,
-+ interfaces=interfaces,
-+ graphics=graphics,
-+ seed=seed,
-+ install=install,
-+ pub_key=pub_key,
-+ priv_key=priv_key,
-+ connection=connection,
-+ username=username,
-+ password=password,
-+ boot=boot,
-+ start=False)
-+ ret['changes'][name] = {'definition': True}
-+ ret['comment'] = 'Domain {0} defined'.format(name)
-+ except libvirt.libvirtError as err:
-+ # Something bad happened when defining / updating the VM, report it
-+ ret['comment'] = six.text_type(err)
-+ ret['result'] = False
-+
-+ return ret
-+
-+
- def running(name,
- cpu=None,
- mem=None,
-@@ -326,9 +508,10 @@ def running(name,
- :param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``)
-
- .. versionadded:: 2019.2.0
-- :param update: set to ``True`` to update a defined module. (Default: ``False``)
-+ :param update: set to ``True`` to update a defined domain. (Default: ``False``)
+
+ .. rubric:: Example States
+
+@@ -385,7 +360,6 @@ def defined(
+ virt.defined:
+ - cpu: 2
+ - mem: 2048
+- - boot_dev: network hd
+ - disk_profile: prod
+ - disks:
+ - name: system
+@@ -438,7 +412,6 @@ def defined(
+ password=password,
+ boot=boot,
+ test=__opts__["test"],
+- boot_dev=boot_dev,
+ )
+ ret["changes"][name] = status
+ if not status.get("definition"):
+@@ -473,7 +446,6 @@ def defined(
+ password=password,
+ boot=boot,
+ start=False,
+- boot_dev=boot_dev,
+ )
+ ret["changes"][name] = {"definition": True}
+ ret["comment"] = "Domain {} defined".format(name)
+@@ -489,6 +461,7 @@ def running(
+ name,
+ cpu=None,
+ mem=None,
++ image=None,
+ vm_type=None,
+ disk_profile=None,
+ disks=None,
+@@ -506,7 +479,6 @@ def running(
+ os_type=None,
+ arch=None,
+ boot=None,
+- boot_dev=None,
+ ):
+ """
+ Starts an existing guest, or defines and starts a new VM with specified arguments.
+@@ -584,7 +556,7 @@ def running(
+ :param update: set to ``True`` to update a defined domain. (Default: ``False``)
.. versionadded:: 2019.2.0
+- .. deprecated:: 3001
+ .. deprecated:: sodium
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
-@@ -424,93 +607,74 @@ def running(name,
- address: 192.168.0.125
+@@ -676,10 +648,32 @@ def running(
- '''
--
-- ret = {'name': name,
-- 'changes': {},
-- 'result': True,
-- 'comment': '{0} is running'.format(name)
-- }
--
-- try:
-+ merged_disks = disks
+ """
+ merged_disks = disks
+ if image:
-+ default_disks = [{'system': {}}]
-+ disknames = ['system']
++ default_disks = [{"system": {}}]
++ disknames = ["system"]
+ if disk_profile:
+ disklist = copy.deepcopy(
-+ __salt__['config.get']('virt:disk', {}).get(disk_profile, default_disks))
++ __salt__["config.get"]("virt:disk", {}).get(disk_profile, default_disks)
++ )
+ disknames = disklist.keys()
-+ disk = {'name': disknames[0], 'image': image}
++ disk = {"name": disknames[0], "image": image}
+ if merged_disks:
-+ first_disk = [d for d in merged_disks if d.get('name') == disknames[0]]
-+ if first_disk and 'image' not in first_disk[0]:
-+ first_disk[0]['image'] = image
++ first_disk = [d for d in merged_disks if d.get("name") == disknames[0]]
++ if first_disk and "image" not in first_disk[0]:
++ first_disk[0]["image"] = image
+ else:
+ merged_disks.append(disk)
+ else:
+ merged_disks = [disk]
+ salt.utils.versions.warn_until(
-+ 'Sodium',
-+ '\'image\' parameter has been deprecated. Rather use the \'disks\' parameter '
-+ 'to override or define the image. \'image\' will be removed in {version}.'
++ "Sodium",
++ "'image' parameter has been deprecated. Rather use the 'disks' parameter "
++ "to override or define the image. 'image' will be removed in {version}.",
+ )
-+
-+ if not update:
-+ salt.utils.versions.warn_until('Magnesium',
-+ '\'update\' parameter has been deprecated. Future behavior will be the one of update=True'
-+ 'It will be removed in {version}.')
-+ ret = defined(name,
-+ cpu=cpu,
-+ mem=mem,
-+ vm_type=vm_type,
-+ disk_profile=disk_profile,
-+ disks=merged_disks,
-+ nic_profile=nic_profile,
-+ interfaces=interfaces,
-+ graphics=graphics,
-+ seed=seed,
-+ install=install,
-+ pub_key=pub_key,
-+ priv_key=priv_key,
-+ os_type=os_type,
-+ arch=arch,
-+ boot=boot,
-+ update=update,
-+ connection=connection,
-+ username=username,
-+ password=password)
-+
-+ result = True if not __opts__['test'] else None
-+ if ret['result'] is None or ret['result']:
-+ changed = ret['changes'][name].get('definition', False)
- try:
- domain_state = __salt__['virt.vm_state'](name)
- if domain_state.get(name) != 'running':
-- action_msg = 'started'
-- if update:
-- status = __salt__['virt.update'](name,
-- cpu=cpu,
-- mem=mem,
-- disk_profile=disk_profile,
-- disks=disks,
-- nic_profile=nic_profile,
-- interfaces=interfaces,
-- graphics=graphics,
-- live=False,
-- connection=connection,
-- username=username,
-- password=password,
-- boot=boot)
-- if status['definition']:
-- action_msg = 'updated and started'
-- __salt__['virt.start'](name)
-- ret['changes'][name] = 'Domain {0}'.format(action_msg)
-- ret['comment'] = 'Domain {0} {1}'.format(name, action_msg)
-- else:
-- if update:
-- status = __salt__['virt.update'](name,
-- cpu=cpu,
-- mem=mem,
-- disk_profile=disk_profile,
-- disks=disks,
-- nic_profile=nic_profile,
-- interfaces=interfaces,
-- graphics=graphics,
-- connection=connection,
-- username=username,
-- password=password,
-- boot=boot)
-- ret['changes'][name] = status
-- if status.get('errors', None):
-- ret['comment'] = 'Domain {0} updated, but some live update(s) failed'.format(name)
-- elif not status['definition']:
-- ret['comment'] = 'Domain {0} exists and is running'.format(name)
-- else:
-- ret['comment'] = 'Domain {0} updated, restart to fully apply the changes'.format(name)
-- else:
-- ret['comment'] = 'Domain {0} exists and is running'.format(name)
-- except CommandExecutionError:
-- if image:
-- salt.utils.versions.warn_until(
-- 'Sodium',
-- '\'image\' parameter has been deprecated. Rather use the \'disks\' parameter '
-- 'to override or define the image. \'image\' will be removed in {version}.'
-- )
-- __salt__['virt.init'](name,
-- cpu=cpu,
-- mem=mem,
-- os_type=os_type,
-- arch=arch,
-- image=image,
-- hypervisor=vm_type,
-- disk=disk_profile,
-- disks=disks,
-- nic=nic_profile,
-- interfaces=interfaces,
-- graphics=graphics,
-- seed=seed,
-- install=install,
-- pub_key=pub_key,
-- priv_key=priv_key,
-- connection=connection,
-- username=username,
-- password=password,
-- boot=boot)
-- ret['changes'][name] = 'Domain defined and started'
-- ret['comment'] = 'Domain {0} defined and started'.format(name)
-- except libvirt.libvirtError as err:
-- # Something bad happened when starting / updating the VM, report it
-- ret['comment'] = six.text_type(err)
-- ret['result'] = False
-+ if not __opts__['test']:
-+ __salt__['virt.start'](name, connection=connection, username=username, password=password)
-+ comment = 'Domain {} started'.format(name)
-+ if not ret['comment'].endswith('unchanged'):
-+ comment = '{} and started'.format(ret['comment'])
-+ ret['comment'] = comment
-+ ret['changes'][name]['started'] = True
-+ elif not changed:
-+ ret['comment'] = 'Domain {0} exists and is running'.format(name)
-+
-+ except libvirt.libvirtError as err:
-+ # Something bad happened when starting / updating the VM, report it
-+ ret['comment'] = six.text_type(err)
-+ ret['result'] = False
- return ret
+ if not update:
+ salt.utils.versions.warn_until(
+- "Aluminium",
++ "Magnesium",
+ "'update' parameter has been deprecated. Future behavior will be the one of update=True"
+ "It will be removed in {version}.",
+ )
+@@ -701,7 +695,6 @@ def running(
+ arch=arch,
+ boot=boot,
+ update=update,
+- boot_dev=boot_dev,
+ connection=connection,
+ username=username,
+ password=password,
+@@ -953,7 +946,7 @@ def network_defined(
+ :param username: username to connect with, overriding defaults
+ :param password: password to connect with, overriding defaults
-@@ -670,6 +834,106 @@ def reverted(name, snapshot=None, cleanup=False): # pylint: disable=redefined-o
- return ret
-
-
-+def network_defined(name,
-+ bridge,
-+ forward,
-+ vport=None,
-+ tag=None,
-+ ipv4_config=None,
-+ ipv6_config=None,
-+ autostart=True,
-+ connection=None,
-+ username=None,
-+ password=None):
-+ '''
-+ Defines a new network with specified arguments.
-+
-+ :param bridge: Bridge name
-+ :param forward: Forward mode(bridge, router, nat)
-+ :param vport: Virtualport type (Default: ``'None'``)
-+ :param tag: Vlan tag (Default: ``'None'``)
-+ :param ipv4_config:
-+ IPv4 network configuration. See the :py:func`virt.network_define
-+ ` function corresponding parameter documentation
-+ for more details on this dictionary.
-+ (Default: None).
-+ :param ipv6_config:
-+ IPv6 network configuration. See the :py:func`virt.network_define
-+ ` function corresponding parameter documentation
-+ for more details on this dictionary.
-+ (Default: None).
-+ :param autostart: Network autostart (default ``'True'``)
-+ :param connection: libvirt connection URI, overriding defaults
-+ :param username: username to connect with, overriding defaults
-+ :param password: password to connect with, overriding defaults
-+
+- .. versionadded:: 3001
+ .. versionadded:: sodium
-+
-+ .. code-block:: yaml
-+
-+ network_name:
-+ virt.network_defined
-+
-+ .. code-block:: yaml
-+
-+ network_name:
-+ virt.network_defined:
-+ - bridge: main
-+ - forward: bridge
-+ - vport: openvswitch
-+ - tag: 180
-+ - autostart: True
-+
-+ .. code-block:: yaml
-+
-+ network_name:
-+ virt.network_defined:
-+ - bridge: natted
-+ - forward: nat
-+ - ipv4_config:
-+ cidr: 192.168.42.0/24
-+ dhcp_ranges:
-+ - start: 192.168.42.10
-+ end: 192.168.42.25
-+ - start: 192.168.42.100
-+ end: 192.168.42.150
-+ - autostart: True
-+
-+ '''
-+ ret = {'name': name,
-+ 'changes': {},
-+ 'result': True if not __opts__['test'] else None,
-+ 'comment': ''
-+ }
-+
-+ try:
-+ info = __salt__['virt.network_info'](name, connection=connection, username=username, password=password)
-+ if info and info[name]:
-+ ret['comment'] = 'Network {0} exists'.format(name)
-+ ret['result'] = True
-+ else:
-+ if not __opts__['test']:
-+ __salt__['virt.network_define'](name,
-+ bridge,
-+ forward,
-+ vport=vport,
-+ tag=tag,
-+ ipv4_config=ipv4_config,
-+ ipv6_config=ipv6_config,
-+ autostart=autostart,
-+ start=False,
-+ connection=connection,
-+ username=username,
-+ password=password)
-+ ret['changes'][name] = 'Network defined'
-+ ret['comment'] = 'Network {0} defined'.format(name)
-+ except libvirt.libvirtError as err:
-+ ret['result'] = False
-+ ret['comment'] = err.get_error_message()
-+
-+ return ret
-+
-+
- def network_running(name,
- bridge,
- forward,
-@@ -715,13 +979,13 @@ def network_running(name,
.. code-block:: yaml
-- domain_name:
-- virt.network_define
-+ network_name:
-+ virt.network_running
+@@ -1170,7 +1163,7 @@ def pool_defined(
+ """
+ Defines a new pool with specified arguments.
- .. code-block:: yaml
-
- network_name:
-- virt.network_define:
-+ virt.network_running:
- - bridge: main
- - forward: bridge
- - vport: openvswitch
-@@ -731,7 +995,7 @@ def network_running(name,
- .. code-block:: yaml
-
- network_name:
-- virt.network_define:
-+ virt.network_running:
- - bridge: natted
- - forward: nat
- - ipv4_config:
-@@ -744,44 +1008,46 @@ def network_running(name,
- - autostart: True
-
- '''
-- ret = {'name': name,
-- 'changes': {},
-- 'result': True,
-- 'comment': ''
-- }
--
-- try:
-- info = __salt__['virt.network_info'](name, connection=connection, username=username, password=password)
-- if info:
-- if info[name]['active']:
-- ret['comment'] = 'Network {0} exists and is running'.format(name)
-+ ret = network_defined(name,
-+ bridge,
-+ forward,
-+ vport=vport,
-+ tag=tag,
-+ ipv4_config=ipv4_config,
-+ ipv6_config=ipv6_config,
-+ autostart=autostart,
-+ connection=connection,
-+ username=username,
-+ password=password)
-+
-+ defined = name in ret['changes'] and ret['changes'][name].startswith('Network defined')
-+
-+ result = True if not __opts__['test'] else None
-+ if ret['result'] is None or ret['result']:
-+ try:
-+ info = __salt__['virt.network_info'](name, connection=connection, username=username, password=password)
-+ # In the corner case where test=True and the network wasn't defined
-+ # we may not get the network in the info dict and that is normal.
-+ if info.get(name, {}).get('active', False):
-+ ret['comment'] = '{} and is running'.format(ret['comment'])
- else:
-- __salt__['virt.network_start'](name, connection=connection, username=username, password=password)
-- ret['changes'][name] = 'Network started'
-- ret['comment'] = 'Network {0} started'.format(name)
-- else:
-- __salt__['virt.network_define'](name,
-- bridge,
-- forward,
-- vport=vport,
-- tag=tag,
-- ipv4_config=ipv4_config,
-- ipv6_config=ipv6_config,
-- autostart=autostart,
-- start=True,
-- connection=connection,
-- username=username,
-- password=password)
-- ret['changes'][name] = 'Network defined and started'
-- ret['comment'] = 'Network {0} defined and started'.format(name)
-- except libvirt.libvirtError as err:
-- ret['result'] = False
-- ret['comment'] = err.get_error_message()
-+ if not __opts__['test']:
-+ __salt__['virt.network_start'](name, connection=connection, username=username, password=password)
-+ change = 'Network started'
-+ if name in ret['changes']:
-+ change = '{} and started'.format(ret['changes'][name])
-+ ret['changes'][name] = change
-+ ret['comment'] = '{} and started'.format(ret['comment'])
-+ ret['result'] = result
-+
-+ except libvirt.libvirtError as err:
-+ ret['result'] = False
-+ ret['comment'] = err.get_error_message()
-
- return ret
-
-
--def pool_running(name,
-+def pool_defined(name,
- ptype=None,
- target=None,
- permissions=None,
-@@ -792,9 +1058,9 @@ def pool_running(name,
- username=None,
- password=None):
- '''
-- Defines and starts a new pool with specified arguments.
-+ Defines a new pool with specified arguments.
-
-- .. versionadded:: 2019.2.0
+- .. versionadded:: 3001
+ .. versionadded:: sodium
:param ptype: libvirt pool type
:param target: full path to the target device or folder. (Default: ``None``)
-@@ -816,12 +1082,7 @@ def pool_running(name,
- .. code-block:: yaml
+@@ -1269,24 +1262,14 @@ def pool_defined(
- pool_name:
-- virt.pool_define
--
-- .. code-block:: yaml
--
-- pool_name:
-- virt.pool_define:
-+ virt.pool_defined:
- - ptype: netfs
- - target: /mnt/cifs
- - permissions:
-@@ -884,29 +1145,19 @@ def pool_running(name,
- username=username,
- password=password)
+ action = ""
+ if info[name]["state"] != "running":
+- if ptype in BUILDABLE_POOL_TYPES:
+- if not __opts__["test"]:
+- # Storage pools build like disk or logical will fail if the disk or LV group
+- # was already existing. Since we can't easily figure that out, just log the
+- # possible libvirt error.
+- try:
+- __salt__["virt.pool_build"](
+- name,
+- connection=connection,
+- username=username,
+- password=password,
+- )
+- except libvirt.libvirtError as err:
+- log.warning(
+- "Failed to build libvirt storage pool: %s",
+- err.get_error_message(),
+- )
+- action = ", built"
++ if not __opts__["test"]:
++ __salt__["virt.pool_build"](
++ name,
++ connection=connection,
++ username=username,
++ password=password,
++ )
++ action = ", built"
-- action = "started"
-- if info[name]['state'] == 'running':
-- action = "restarted"
-+ action = ''
-+ if info[name]['state'] != 'running':
- if not __opts__['test']:
-- __salt__['virt.pool_stop'](name, connection=connection, username=username, password=password)
--
-- if not __opts__['test']:
-- __salt__['virt.pool_build'](name, connection=connection, username=username, password=password)
-- __salt__['virt.pool_start'](name, connection=connection, username=username, password=password)
-+ __salt__['virt.pool_build'](name, connection=connection, username=username, password=password)
-+ action = ', built'
+ action = (
+ "{}, autostart flag changed".format(action)
+@@ -1322,22 +1305,9 @@ def pool_defined(
+ password=password,
+ )
-- autostart_str = ', autostart flag changed' if needs_autostart else ''
-- ret['changes'][name] = 'Pool updated, built{0} and {1}'.format(autostart_str, action)
-- ret['comment'] = 'Pool {0} updated, built{1} and {2}'.format(name, autostart_str, action)
-+ action = '{}, autostart flag changed'.format(action) if needs_autostart else action
-+ ret['changes'][name] = 'Pool updated{0}'.format(action)
-+ ret['comment'] = 'Pool {0} updated{1}'.format(name, action)
-
- else:
-- if info[name]['state'] == 'running':
-- ret['comment'] = 'Pool {0} unchanged and is running'.format(name)
-- ret['result'] = True
-- else:
-- ret['changes'][name] = 'Pool started'
-- ret['comment'] = 'Pool {0} started'.format(name)
-- if not __opts__['test']:
-- __salt__['virt.pool_start'](name, connection=connection, username=username, password=password)
-+ ret['comment'] = 'Pool {0} unchanged'.format(name)
-+ ret['result'] = True
- else:
- needs_autostart = autostart
- if not __opts__['test']:
-@@ -932,17 +1183,12 @@ def pool_running(name,
- connection=connection,
- username=username,
- password=password)
--
-- __salt__['virt.pool_start'](name,
-- connection=connection,
-- username=username,
-- password=password)
+- if ptype in BUILDABLE_POOL_TYPES:
+- # Storage pools build like disk or logical will fail if the disk or LV group
+- # was already existing. Since we can't easily figure that out, just log the
+- # possible libvirt error.
+- try:
+- __salt__["virt.pool_build"](
+- name,
+- connection=connection,
+- username=username,
+- password=password,
+- )
+- except libvirt.libvirtError as err:
+- log.warning(
+- "Failed to build libvirt storage pool: %s",
+- err.get_error_message(),
+- )
++ __salt__["virt.pool_build"](
++ name, connection=connection, username=username, password=password
++ )
if needs_autostart:
-- ret['changes'][name] = 'Pool defined, started and marked for autostart'
-- ret['comment'] = 'Pool {0} defined, started and marked for autostart'.format(name)
-+ ret['changes'][name] = 'Pool defined, marked for autostart'
-+ ret['comment'] = 'Pool {0} defined, marked for autostart'.format(name)
- else:
-- ret['changes'][name] = 'Pool defined and started'
-- ret['comment'] = 'Pool {0} defined and started'.format(name)
-+ ret['changes'][name] = 'Pool defined'
-+ ret['comment'] = 'Pool {0} defined'.format(name)
-
- if needs_autostart:
- if not __opts__['test']:
-@@ -958,6 +1204,117 @@ def pool_running(name,
+ ret["changes"][name] = "Pool defined, marked for autostart"
+ ret["comment"] = "Pool {} defined, marked for autostart".format(name)
+@@ -1494,6 +1464,138 @@ def pool_running(
return ret
-+def pool_running(name,
-+ ptype=None,
-+ target=None,
-+ permissions=None,
-+ source=None,
-+ transient=False,
-+ autostart=True,
-+ connection=None,
-+ username=None,
-+ password=None):
-+ '''
++def pool_running(
++ name,
++ ptype=None,
++ target=None,
++ permissions=None,
++ source=None,
++ transient=False,
++ autostart=True,
++ connection=None,
++ username=None,
++ password=None,
++):
++ """
+ Defines and starts a new pool with specified arguments.
+
+ .. versionadded:: 2019.2.0
@@ -838,1540 +451,2026 @@ index 55a9ad2616..819776d707 100644
+ format: cifs
+ - autostart: True
+
-+ '''
-+ ret = pool_defined(name,
-+ ptype=ptype,
-+ target=target,
-+ permissions=permissions,
-+ source=source,
-+ transient=transient,
-+ autostart=autostart,
-+ connection=connection,
-+ username=username,
-+ password=password)
-+ defined = name in ret['changes'] and ret['changes'][name].startswith('Pool defined')
-+ updated = name in ret['changes'] and ret['changes'][name].startswith('Pool updated')
++ """
++ ret = pool_defined(
++ name,
++ ptype=ptype,
++ target=target,
++ permissions=permissions,
++ source=source,
++ transient=transient,
++ autostart=autostart,
++ connection=connection,
++ username=username,
++ password=password,
++ )
++ defined = name in ret["changes"] and ret["changes"][name].startswith("Pool defined")
++ updated = name in ret["changes"] and ret["changes"][name].startswith("Pool updated")
+
-+ result = True if not __opts__['test'] else None
-+ if ret['result'] is None or ret['result']:
++ result = True if not __opts__["test"] else None
++ if ret["result"] is None or ret["result"]:
+ try:
-+ info = __salt__['virt.pool_info'](name, connection=connection, username=username, password=password)
-+ action = 'started'
++ info = __salt__["virt.pool_info"](
++ name, connection=connection, username=username, password=password
++ )
++ action = "started"
+ # In the corner case where test=True and the pool wasn't defined
+ # we may get not get our pool in the info dict and that is normal.
-+ is_running = info.get(name, {}).get('state', 'stopped') == 'running'
++ is_running = info.get(name, {}).get("state", "stopped") == "running"
+ if is_running:
+ if updated:
-+ action = 'built, restarted'
-+ if not __opts__['test']:
-+ __salt__['virt.pool_stop'](name, connection=connection, username=username, password=password)
-+ if not __opts__['test']:
-+ __salt__['virt.pool_build'](name, connection=connection, username=username, password=password)
++ action = "built, restarted"
++ if not __opts__["test"]:
++ __salt__["virt.pool_stop"](
++ name,
++ connection=connection,
++ username=username,
++ password=password,
++ )
++ if not __opts__["test"]:
++ __salt__["virt.pool_build"](
++ name,
++ connection=connection,
++ username=username,
++ password=password,
++ )
+ else:
-+ action = 'already running'
++ action = "already running"
+ result = True
+
+ if not is_running or updated or defined:
-+ if not __opts__['test']:
-+ __salt__['virt.pool_start'](name, connection=connection, username=username, password=password)
++ if not __opts__["test"]:
++ __salt__["virt.pool_start"](
++ name,
++ connection=connection,
++ username=username,
++ password=password,
++ )
+
-+ comment = 'Pool {0}'.format(name)
-+ change = 'Pool'
-+ if name in ret['changes']:
-+ comment = '{0},'.format(ret['comment'])
-+ change = '{0},'.format(ret['changes'][name])
++ comment = "Pool {}".format(name)
++ change = "Pool"
++ if name in ret["changes"]:
++ comment = "{},".format(ret["comment"])
++ change = "{},".format(ret["changes"][name])
+
-+ if action != 'already running':
-+ ret['changes'][name] = '{0} {1}'.format(change, action)
++ if action != "already running":
++ ret["changes"][name] = "{} {}".format(change, action)
+
-+ ret['comment'] = '{0} {1}'.format(comment, action)
-+ ret['result'] = result
++ ret["comment"] = "{} {}".format(comment, action)
++ ret["result"] = result
+
+ except libvirt.libvirtError as err:
-+ ret['comment'] = err.get_error_message()
-+ ret['result'] = False
++ ret["comment"] = err.get_error_message()
++ ret["result"] = False
+
+ return ret
+
+
- def pool_deleted(name,
- purge=False,
- connection=None,
+ def pool_deleted(name, purge=False, connection=None, username=None, password=None):
+ """
+ Deletes a virtual storage pool.
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index d762dcc479..8690154662 100644
+index e9e73d7b5d..db6ba007b7 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
-@@ -1272,6 +1272,32 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- define_mock = MagicMock(return_value=True)
- self.mock_conn.defineXML = define_mock
+@@ -1849,40 +1849,21 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(
+ {
+ "definition": False,
+- "disk": {"attached": [], "detached": [], "updated": []},
++ "disk": {"attached": [], "detached": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm"),
++ virt.update("my vm"),
+ )
-+ # No parameter passed case
-+ self.assertEqual({
-+ 'definition': False,
-+ 'disk': {'attached': [], 'detached': []},
-+ 'interface': {'attached': [], 'detached': []}
-+ }, virt.update('my vm'))
-+
-+ # Same parameters passed than in default virt.defined state case
-+ self.assertEqual({
-+ 'definition': False,
-+ 'disk': {'attached': [], 'detached': []},
-+ 'interface': {'attached': [], 'detached': []}
-+ }, virt.update('my vm',
-+ cpu=None,
-+ mem=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ live=True,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ boot=None))
-+
- # Update vcpus case
- setvcpus_mock = MagicMock(return_value=0)
- domain_mock.setVcpusFlags = setvcpus_mock
+- # mem + cpu case
+- define_mock.reset_mock()
+- domain_mock.setMemoryFlags.return_value = 0
+- domain_mock.setVcpusFlags.return_value = 0
+- self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- "mem": True,
+- "cpu": True,
+- },
+- virt.update("my_vm", mem=2048, cpu=2),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual("2", setxml.find("vcpu").text)
+- self.assertEqual("2147483648", setxml.find("memory").text)
+- self.assertEqual(2048 * 1024, domain_mock.setMemoryFlags.call_args[0][0])
+-
+ # Same parameters passed than in default virt.defined state case
+ self.assertEqual(
+ {
+ "definition": False,
+- "disk": {"attached": [], "detached": [], "updated": []},
++ "disk": {"attached": [], "detached": []},
+ "interface": {"attached": [], "detached": []},
+ },
+ virt.update(
+- "my_vm",
++ "my vm",
+ cpu=None,
+ mem=None,
+ disk_profile=None,
+@@ -1905,829 +1886,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ {
+ "definition": True,
+ "cpu": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
++ "disk": {"attached": [], "detached": []},
+ "interface": {"attached": [], "detached": []},
+ },
+- virt.update("my_vm", cpu=2),
++ virt.update("my vm", cpu=2),
+ )
+ setxml = ET.fromstring(define_mock.call_args[0][0])
+ self.assertEqual(setxml.find("vcpu").text, "2")
+ self.assertEqual(setvcpus_mock.call_args[0][0], 2)
+
+- boot = {
+- "kernel": "/root/f8-i386-vmlinuz",
+- "initrd": "/root/f8-i386-initrd",
+- "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
+- }
+-
+- # Update boot devices case
+- define_mock.reset_mock()
+- self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", boot_dev="cdrom network hd"),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(
+- ["cdrom", "network", "hd"],
+- [node.get("dev") for node in setxml.findall("os/boot")],
+- )
+-
+- # Update unchanged boot devices case
+- define_mock.reset_mock()
+- self.assertEqual(
+- {
+- "definition": False,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", boot_dev="hd"),
+- )
+- define_mock.assert_not_called()
+-
+- # Update with boot parameter case
+- define_mock.reset_mock()
+- self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", boot=boot),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz")
+- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd")
+- self.assertEqual(
+- setxml.find("os").find("cmdline").text,
+- "console=ttyS0 ks=http://example.com/f8-i386/os/",
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz")
+- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd")
+- self.assertEqual(
+- setxml.find("os").find("cmdline").text,
+- "console=ttyS0 ks=http://example.com/f8-i386/os/",
+- )
+-
+- boot_uefi = {
+- "loader": "/usr/share/OVMF/OVMF_CODE.fd",
+- "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd",
+- }
+-
+- self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", boot=boot_uefi),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(
+- setxml.find("os").find("loader").text, "/usr/share/OVMF/OVMF_CODE.fd"
+- )
+- self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes")
+- self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash")
+- self.assertEqual(
+- setxml.find("os").find("nvram").attrib["template"],
+- "/usr/share/OVMF/OVMF_VARS.ms.fd",
+- )
+-
+- self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", boot={"efi": True}),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("os").attrib.get("firmware"), "efi")
+-
+- invalid_boot = {
+- "loader": "/usr/share/OVMF/OVMF_CODE.fd",
+- "initrd": "/root/f8-i386-initrd",
+- }
+-
+- with self.assertRaises(SaltInvocationError):
+- virt.update("my_vm", boot=invalid_boot)
+-
+- with self.assertRaises(SaltInvocationError):
+- virt.update("my_vm", boot={"efi": "Not a boolean value"})
+-
+- # Update memtune parameter case
+- memtune = {
+- "soft_limit": "0.5g",
+- "hard_limit": "1024",
+- "swap_hard_limit": "2048m",
+- "min_guarantee": "1 g",
+- }
+-
+- self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", mem=memtune),
+- )
+-
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(
+- setxml.find("memtune").find("soft_limit").text, str(int(0.5 * 1024 ** 3))
+- )
+- self.assertEqual(setxml.find("memtune").find("soft_limit").get("unit"), "bytes")
+- self.assertEqual(
+- setxml.find("memtune").find("hard_limit").text, str(1024 * 1024 ** 2)
+- )
+- self.assertEqual(
+- setxml.find("memtune").find("swap_hard_limit").text, str(2048 * 1024 ** 2)
+- )
+- self.assertEqual(
+- setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3)
+- )
+-
+- invalid_unit = {"soft_limit": "2HB"}
+-
+- with self.assertRaises(SaltInvocationError):
+- virt.update("my_vm", mem=invalid_unit)
+-
+- invalid_number = {
+- "soft_limit": "3.4.MB",
+- }
+-
+- with self.assertRaises(SaltInvocationError):
+- virt.update("my_vm", mem=invalid_number)
+-
+- # Update memory case
+- setmem_mock = MagicMock(return_value=0)
+- domain_mock.setMemoryFlags = setmem_mock
+-
+- self.assertEqual(
+- {
+- "definition": True,
+- "mem": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", mem=2048),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("memory").text, str(2048 * 1024 ** 2))
+- self.assertEqual(setxml.find("memory").get("unit"), "bytes")
+- self.assertEqual(setmem_mock.call_args[0][0], 2048 * 1024)
+-
+- mem_dict = {"boot": "0.5g", "current": "2g", "max": "1g", "slots": 12}
+- self.assertEqual(
+- {
+- "definition": True,
+- "mem": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", mem=mem_dict),
+- )
+-
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("memory").get("unit"), "bytes")
+- self.assertEqual(setxml.find("memory").text, str(int(0.5 * 1024 ** 3)))
+- self.assertEqual(setxml.find("maxMemory").text, str(1 * 1024 ** 3))
+- self.assertEqual(setxml.find("currentMemory").text, str(2 * 1024 ** 3))
+-
+- max_slot_reverse = {
+- "slots": "10",
+- "max": "3096m",
+- }
+- self.assertEqual(
+- {
+- "definition": True,
+- "disk": {"attached": [], "detached": [], "updated": []},
+- "interface": {"attached": [], "detached": []},
+- },
+- virt.update("my_vm", mem=max_slot_reverse),
+- )
+- setxml = ET.fromstring(define_mock.call_args[0][0])
+- self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2))
+- self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10")
+-
+- # Update disks case
+- devattach_mock = MagicMock(return_value=0)
+- devdetach_mock = MagicMock(return_value=0)
+- domain_mock.attachDevice = devattach_mock
+- domain_mock.detachDevice = devdetach_mock
+- mock_chmod = MagicMock()
+- mock_run = MagicMock()
+- with patch.dict(
+- os.__dict__, {"chmod": mock_chmod, "makedirs": MagicMock()}
+- ): # pylint: disable=no-member
+- with patch.dict(
+- virt.__salt__, {"cmd.run": mock_run}
+- ): # pylint: disable=no-member
+- ret = virt.update(
+- "my_vm",
+- disk_profile="default",
+- disks=[
+- {
+- "name": "cddrive",
+- "device": "cdrom",
+- "source_file": None,
+- "model": "ide",
+- },
+- {"name": "added", "size": 2048},
+- ],
+- )
+- added_disk_path = os.path.join(
+- virt.__salt__["config.get"]("virt:images"), "my_vm_added.qcow2"
+- ) # pylint: disable=no-member
+- self.assertEqual(
+- mock_run.call_args[0][0],
+- 'qemu-img create -f qcow2 "{}" 2048M'.format(added_disk_path),
+- )
+- self.assertEqual(mock_chmod.call_args[0][0], added_disk_path)
+- self.assertListEqual(
+- [None, os.path.join(root_dir, "my_vm_added.qcow2")],
+- [
+- ET.fromstring(disk).find("source").get("file")
+- if str(disk).find("
--'''
-+"""
- self.mock_conn.listAllStoragePools.return_value = [pool_mock]
- # pylint: enable=no-member
-
-- pool = virt.pool_info('ceph')
-- self.assertEqual({'ceph': {
-- 'uuid': 'some-uuid',
-- 'state': 'inactive',
-- 'capacity': 0,
-- 'allocation': 0,
-- 'free': 0,
-- 'autostart': True,
-- 'persistent': True,
-- 'type': 'rbd',
-- 'target_path': None}}, pool)
-+ pool = virt.pool_info("ceph")
-+ self.assertEqual(
-+ {
-+ "ceph": {
-+ "uuid": "some-uuid",
-+ "state": "inactive",
-+ "capacity": 0,
-+ "allocation": 0,
-+ "free": 0,
-+ "autostart": True,
-+ "persistent": True,
-+ "type": "rbd",
-+ "target_path": None,
-+ }
-+ },
-+ pool,
-+ )
-
- def test_pool_info_notfound(self):
-- '''
-+ """
- Test virt.pool_info() when the pool can't be found
-- '''
-+ """
- # pylint: disable=no-member
- self.mock_conn.listAllStoragePools.return_value = []
- # pylint: enable=no-member
-- pool = virt.pool_info('foo')
-+ pool = virt.pool_info("foo")
- self.assertEqual({}, pool)
-
- def test_pool_info_all(self):
-- '''
-+ """
- Test virt.pool_info()
-- '''
-+ """
- # pylint: disable=no-member
- pool_mocks = []
- for i in range(2):
- pool_mock = MagicMock()
-- pool_mock.name.return_value = 'pool{0}'.format(i)
-- pool_mock.UUIDString.return_value = 'some-uuid-{0}'.format(i)
-+ pool_mock.name.return_value = "pool{0}".format(i)
-+ pool_mock.UUIDString.return_value = "some-uuid-{0}".format(i)
- pool_mock.info.return_value = [0, 1234, 5678, 123]
- pool_mock.autostart.return_value = True
- pool_mock.isPersistent.return_value = True
-- pool_mock.XMLDesc.return_value = '''
-+ pool_mock.XMLDesc.return_value = """
- default
- d92682d0-33cf-4e10-9837-a216c463e158
- 854374301696
-@@ -2811,95 +4177,143 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- 0
-
-
--'''
-+"""
- pool_mocks.append(pool_mock)
- self.mock_conn.listAllStoragePools.return_value = pool_mocks
- # pylint: enable=no-member
-
- pool = virt.pool_info()
-- self.assertEqual({
-- 'pool0':
-+ self.assertEqual(
- {
-- 'uuid': 'some-uuid-0',
-- 'state': 'inactive',
-- 'capacity': 1234,
-- 'allocation': 5678,
-- 'free': 123,
-- 'autostart': True,
-- 'persistent': True,
-- 'type': 'dir',
-- 'target_path': '/srv/vms'
-- }, 'pool1': {
-- 'uuid': 'some-uuid-1',
-- 'state': 'inactive',
-- 'capacity': 1234,
-- 'allocation': 5678,
-- 'free': 123,
-- 'autostart': True,
-- 'persistent': True,
-- 'type': 'dir',
-- 'target_path': '/srv/vms'
-- }
-- }, pool)
-+ "pool0": {
-+ "uuid": "some-uuid-0",
-+ "state": "inactive",
-+ "capacity": 1234,
-+ "allocation": 5678,
-+ "free": 123,
-+ "autostart": True,
-+ "persistent": True,
-+ "type": "dir",
-+ "target_path": "/srv/vms",
-+ },
-+ "pool1": {
-+ "uuid": "some-uuid-1",
-+ "state": "inactive",
-+ "capacity": 1234,
-+ "allocation": 5678,
-+ "free": 123,
-+ "autostart": True,
-+ "persistent": True,
-+ "type": "dir",
-+ "target_path": "/srv/vms",
-+ },
-+ },
-+ pool,
-+ )
-
- def test_pool_get_xml(self):
-- '''
-+ """
- Test virt.pool_get_xml
-- '''
-+ """
- pool_mock = MagicMock()
-- pool_mock.XMLDesc.return_value = 'Raw XML'
-+ pool_mock.XMLDesc.return_value = "Raw XML"
- self.mock_conn.storagePoolLookupByName.return_value = pool_mock
-
-- self.assertEqual('Raw XML', virt.pool_get_xml('default'))
-+ self.assertEqual("Raw XML", virt.pool_get_xml("default"))
-
- def test_pool_list_volumes(self):
-- '''
-+ """
- Test virt.pool_list_volumes
-- '''
-- names = ['volume1', 'volume2']
-+ """
-+ names = ["volume1", "volume2"]
- mock_pool = MagicMock()
- # pylint: disable=no-member
- mock_pool.listVolumes.return_value = names
- self.mock_conn.storagePoolLookupByName.return_value = mock_pool
- # pylint: enable=no-member
-- self.assertEqual(names, virt.pool_list_volumes('default'))
-+ self.assertEqual(names, virt.pool_list_volumes("default"))
-
-- @patch('salt.modules.virt._is_kvm_hyper', return_value=True)
-- @patch('salt.modules.virt._is_xen_hyper', return_value=False)
-- def test_get_hypervisor(self, isxen_mock, iskvm_mock):
-- '''
-+ @patch("salt.modules.virt._is_bhyve_hyper", return_value=False)
-+ @patch("salt.modules.virt._is_kvm_hyper", return_value=True)
-+ @patch("salt.modules.virt._is_xen_hyper", return_value=False)
-+ def test_get_hypervisor(self, isxen_mock, iskvm_mock, is_bhyve_mock):
-+ """
- test the virt.get_hypervisor() function
-- '''
-- self.assertEqual('kvm', virt.get_hypervisor())
-+ """
-+ self.assertEqual("kvm", virt.get_hypervisor())
-
- iskvm_mock.return_value = False
- self.assertIsNone(virt.get_hypervisor())
-
-+ is_bhyve_mock.return_value = False
-+ self.assertIsNone(virt.get_hypervisor())
-+
- isxen_mock.return_value = True
-- self.assertEqual('xen', virt.get_hypervisor())
-+ self.assertEqual("xen", virt.get_hypervisor())
-
- def test_pool_delete(self):
-- '''
-+ """
- Test virt.pool_delete function
-- '''
-+ """
+@@ -3894,6 +4248,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ """
mock_pool = MagicMock()
mock_pool.delete = MagicMock(return_value=0)
+ mock_pool.XMLDesc.return_value = ""
self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mock_pool)
-- res = virt.pool_delete('test-pool')
-+ res = virt.pool_delete("test-pool")
+ res = virt.pool_delete("test-pool")
+@@ -3907,12 +4262,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL
+ )
+
+- def test_pool_undefine_secret(self):
++ def test_pool_delete_secret(self):
+ """
+- Test virt.pool_undefine function where the pool has a secret
++ Test virt.pool_delete function where the pool has a secret
+ """
+ mock_pool = MagicMock()
+- mock_pool.undefine = MagicMock(return_value=0)
++ mock_pool.delete = MagicMock(return_value=0)
+ mock_pool.XMLDesc.return_value = """
+
+ test-ses
+@@ -3929,11 +4284,16 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ mock_undefine = MagicMock(return_value=0)
+ self.mock_conn.secretLookupByUsage.return_value.undefine = mock_undefine
+
+- res = virt.pool_undefine("test-ses")
++ res = virt.pool_delete("test-ses")
self.assertTrue(res)
-- self.mock_conn.storagePoolLookupByName.assert_called_once_with('test-pool')
-+ self.mock_conn.storagePoolLookupByName.assert_called_once_with("test-pool")
-
- # Shouldn't be called with another parameter so far since those are not implemented
- # and thus throwing exceptions.
-- mock_pool.delete.assert_called_once_with(self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL)
-+ mock_pool.delete.assert_called_once_with(
-+ self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL
-+ )
-+
-+ def test_pool_delete_secret(self):
-+ """
-+ Test virt.pool_delete function where the pool has a secret
-+ """
-+ mock_pool = MagicMock()
-+ mock_pool.delete = MagicMock(return_value=0)
-+ mock_pool.XMLDesc.return_value = """
-+
-+ test-ses
-+
-+
-+ libvirt-pool
-+
-+
-+
-+
-+
-+ """
-+ self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mock_pool)
-+ mock_undefine = MagicMock(return_value=0)
-+ self.mock_conn.secretLookupByUsage.return_value.undefine = mock_undefine
-+
-+ res = virt.pool_delete("test-ses")
-+ self.assertTrue(res)
-+
-+ self.mock_conn.storagePoolLookupByName.assert_called_once_with("test-ses")
+ self.mock_conn.storagePoolLookupByName.assert_called_once_with("test-ses")
+- mock_pool.undefine.assert_called_once_with()
+
+ # Shouldn't be called with another parameter so far since those are not implemented
+ # and thus throwing exceptions.
+ mock_pool.delete.assert_called_once_with(
+ self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL
+ )
-+
-+ self.mock_conn.secretLookupByUsage.assert_called_once_with(
-+ self.mock_libvirt.VIR_SECRET_USAGE_TYPE_CEPH, "pool_test-ses"
-+ )
-+ mock_undefine.assert_called_once()
- def test_full_info(self):
-- '''
-+ """
- Test virt.full_info
-- '''
-- xml = '''
-+ """
-+ xml = """
- 28deee33-4859-4f23-891c-ee239cffec94
- test-vm
- destroy
-@@ -2928,10 +4342,10 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-
-
-
-- '''
-+ """
- self.set_mock_vm("test-vm", xml)
-
-- qemu_infos = '''[{
-+ qemu_infos = """[{
- "virtual-size": 25769803776,
- "filename": "/disks/test.qcow2",
- "cluster-size": 65536,
-@@ -2966,69 +4380,79 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- }
- },
- "dirty-flag": false
-- }]'''
-+ }]"""
-
-- self.mock_popen.communicate.return_value = [qemu_infos] # pylint: disable=no-member
-+ self.mock_popen.communicate.return_value = [
-+ qemu_infos
-+ ] # pylint: disable=no-member
-
-- self.mock_conn.getInfo = MagicMock(return_value=['x86_64', 4096, 8, 2712, 1, 2, 4, 2])
-+ self.mock_conn.getInfo = MagicMock(
-+ return_value=["x86_64", 4096, 8, 2712, 1, 2, 4, 2]
-+ )
-
- actual = virt.full_info()
-
- # Check that qemu-img was called with the proper parameters
-- qemu_img_call = [call for call in self.mock_subprocess.Popen.call_args_list if 'qemu-img' in call[0][0]][0]
-- self.assertIn('info', qemu_img_call[0][0])
-- self.assertIn('-U', qemu_img_call[0][0])
-+ qemu_img_call = [
-+ call
-+ for call in self.mock_subprocess.Popen.call_args_list
-+ if "qemu-img" in call[0][0]
-+ ][0]
-+ self.assertIn("info", qemu_img_call[0][0])
-+ self.assertIn("-U", qemu_img_call[0][0])
-
- # Test the hypervisor infos
-- self.assertEqual(2816, actual['freemem'])
-- self.assertEqual(6, actual['freecpu'])
-- self.assertEqual(4, actual['node_info']['cpucores'])
-- self.assertEqual(2712, actual['node_info']['cpumhz'])
-- self.assertEqual('x86_64', actual['node_info']['cpumodel'])
-- self.assertEqual(8, actual['node_info']['cpus'])
-- self.assertEqual(2, actual['node_info']['cputhreads'])
-- self.assertEqual(1, actual['node_info']['numanodes'])
-- self.assertEqual(4096, actual['node_info']['phymemory'])
-- self.assertEqual(2, actual['node_info']['sockets'])
-+ self.assertEqual(2816, actual["freemem"])
-+ self.assertEqual(6, actual["freecpu"])
-+ self.assertEqual(4, actual["node_info"]["cpucores"])
-+ self.assertEqual(2712, actual["node_info"]["cpumhz"])
-+ self.assertEqual("x86_64", actual["node_info"]["cpumodel"])
-+ self.assertEqual(8, actual["node_info"]["cpus"])
-+ self.assertEqual(2, actual["node_info"]["cputhreads"])
-+ self.assertEqual(1, actual["node_info"]["numanodes"])
-+ self.assertEqual(4096, actual["node_info"]["phymemory"])
-+ self.assertEqual(2, actual["node_info"]["sockets"])
-
- # Test the vm_info output:
-- self.assertEqual(2, actual['vm_info']['test-vm']['cpu'])
-- self.assertEqual(1234, actual['vm_info']['test-vm']['cputime'])
-- self.assertEqual(1024 * 1024, actual['vm_info']['test-vm']['mem'])
-- self.assertEqual(2048 * 1024, actual['vm_info']['test-vm']['maxMem'])
-- self.assertEqual('shutdown', actual['vm_info']['test-vm']['state'])
-- self.assertEqual('28deee33-4859-4f23-891c-ee239cffec94', actual['vm_info']['test-vm']['uuid'])
-- self.assertEqual('destroy', actual['vm_info']['test-vm']['on_crash'])
-- self.assertEqual('restart', actual['vm_info']['test-vm']['on_reboot'])
-- self.assertEqual('destroy', actual['vm_info']['test-vm']['on_poweroff'])
-+ self.assertEqual(2, actual["vm_info"]["test-vm"]["cpu"])
-+ self.assertEqual(1234, actual["vm_info"]["test-vm"]["cputime"])
-+ self.assertEqual(1024 * 1024, actual["vm_info"]["test-vm"]["mem"])
-+ self.assertEqual(2048 * 1024, actual["vm_info"]["test-vm"]["maxMem"])
-+ self.assertEqual("shutdown", actual["vm_info"]["test-vm"]["state"])
-+ self.assertEqual(
-+ "28deee33-4859-4f23-891c-ee239cffec94", actual["vm_info"]["test-vm"]["uuid"]
-+ )
-+ self.assertEqual("destroy", actual["vm_info"]["test-vm"]["on_crash"])
-+ self.assertEqual("restart", actual["vm_info"]["test-vm"]["on_reboot"])
-+ self.assertEqual("destroy", actual["vm_info"]["test-vm"]["on_poweroff"])
-
- # Test the nics
-- nic = actual['vm_info']['test-vm']['nics']['ac:de:48:b6:8b:59']
-- self.assertEqual('bridge', nic['type'])
-- self.assertEqual('ac:de:48:b6:8b:59', nic['mac'])
-+ nic = actual["vm_info"]["test-vm"]["nics"]["ac:de:48:b6:8b:59"]
-+ self.assertEqual("bridge", nic["type"])
-+ self.assertEqual("ac:de:48:b6:8b:59", nic["mac"])
-
- # Test the disks
-- disks = actual['vm_info']['test-vm']['disks']
-- disk = disks.get('vda')
-- self.assertEqual('/disks/test.qcow2', disk['file'])
-- self.assertEqual('disk', disk['type'])
-- self.assertEqual('/disks/mybacking.qcow2', disk['backing file']['file'])
-- cdrom = disks.get('hda')
-- self.assertEqual('/disks/test-cdrom.iso', cdrom['file'])
-- self.assertEqual('cdrom', cdrom['type'])
-- self.assertFalse('backing file' in cdrom.keys())
-+ disks = actual["vm_info"]["test-vm"]["disks"]
-+ disk = disks.get("vda")
-+ self.assertEqual("/disks/test.qcow2", disk["file"])
-+ self.assertEqual("disk", disk["type"])
-+ self.assertEqual("/disks/mybacking.qcow2", disk["backing file"]["file"])
-+ cdrom = disks.get("hda")
-+ self.assertEqual("/disks/test-cdrom.iso", cdrom["file"])
-+ self.assertEqual("cdrom", cdrom["type"])
-+ self.assertFalse("backing file" in cdrom.keys())
-
- # Test the graphics
-- graphics = actual['vm_info']['test-vm']['graphics']
-- self.assertEqual('vnc', graphics['type'])
-- self.assertEqual('5900', graphics['port'])
-- self.assertEqual('0.0.0.0', graphics['listen'])
-+ graphics = actual["vm_info"]["test-vm"]["graphics"]
-+ self.assertEqual("vnc", graphics["type"])
-+ self.assertEqual("5900", graphics["port"])
-+ self.assertEqual("0.0.0.0", graphics["listen"])
-
- def test_pool_update(self):
-- '''
-+ """
- Test the pool_update function
-- '''
-- current_xml = '''
-+ """
-+ current_xml = """
- default
- 20fbe05c-ab40-418a-9afa-136d512f0ede
- 1999421108224
-@@ -3044,29 +4468,31 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- 100
-
-
-- '''
--
-- expected_xml = '' \
-- 'default' \
-- '20fbe05c-ab40-418a-9afa-136d512f0ede' \
-- '1999421108224' \
-- '713207042048' \
-- '1286214066176' \
-- '' \
-- '/mnt/cifs' \
-- '' \
-- '0774' \
-- '1234' \
-- '123' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- ''
-+ """
-+
-+ expected_xml = (
-+ ''
-+ "default"
-+ "20fbe05c-ab40-418a-9afa-136d512f0ede"
-+ '1999421108224'
-+ '713207042048'
-+ '1286214066176'
-+ ""
-+ "/mnt/cifs"
-+ ""
-+ "0774"
-+ "1234"
-+ "123"
-+ ""
-+ ""
-+ ""
-+ ''
-+ ''
-+ ''
-+ ''
-+ ""
-+ ""
-+ )
-
- mocked_pool = MagicMock()
- mocked_pool.XMLDesc = MagicMock(return_value=current_xml)
-@@ -3074,21 +4500,24 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- self.mock_conn.storagePoolDefineXML = MagicMock()
-
- self.assertTrue(
-- virt.pool_update('default',
-- 'netfs',
-- target='/mnt/cifs',
-- permissions={'mode': '0774', 'owner': '1234', 'group': '123'},
-- source_format='cifs',
-- source_dir='samba_share',
-- source_hosts=['one.example.com', 'two.example.com']))
-+ virt.pool_update(
-+ "default",
-+ "netfs",
-+ target="/mnt/cifs",
-+ permissions={"mode": "0774", "owner": "1234", "group": "123"},
-+ source_format="cifs",
-+ source_dir="samba_share",
-+ source_hosts=["one.example.com", "two.example.com"],
-+ )
-+ )
- self.mock_conn.storagePoolDefineXML.assert_called_once_with(expected_xml)
-
- def test_pool_update_nochange(self):
-- '''
-+ """
- Test the pool_update function when no change is needed
-- '''
-+ """
-
-- current_xml = '''
-+ current_xml = """
- default
- 20fbe05c-ab40-418a-9afa-136d512f0ede
- 1999421108224
-@@ -3104,7 +4533,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- 100
-
-
-- '''
-+ """
-
- mocked_pool = MagicMock()
- mocked_pool.XMLDesc = MagicMock(return_value=current_xml)
-@@ -3112,18 +4541,21 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- self.mock_conn.storagePoolDefineXML = MagicMock()
-
- self.assertFalse(
-- virt.pool_update('default',
-- 'dir',
-- target='/path/to/pool',
-- permissions={'mode': '0775', 'owner': '0', 'group': '100'},
-- test=True))
-+ virt.pool_update(
-+ "default",
-+ "dir",
-+ target="/path/to/pool",
-+ permissions={"mode": "0775", "owner": "0", "group": "100"},
-+ test=True,
-+ )
-+ )
- self.mock_conn.storagePoolDefineXML.assert_not_called()
-
- def test_pool_update_password(self):
-- '''
-+ """
- Test the pool_update function, where the password only is changed
-- '''
-- current_xml = '''
-+ """
-+ current_xml = """
- default
- 20fbe05c-ab40-418a-9afa-136d512f0ede
- 1999421108224
-@@ -3137,23 +4569,25 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-
-
+ self.mock_conn.secretLookupByUsage.assert_called_once_with(
+ self.mock_libvirt.VIR_SECRET_USAGE_TYPE_CEPH, "pool_test-ses"
+@@ -4202,6 +4562,24 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-- '''
--
-- expected_xml = '' \
-- 'default' \
-- '20fbe05c-ab40-418a-9afa-136d512f0ede' \
-- '1999421108224' \
-- '713207042048' \
-- '1286214066176' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- 'iscsi-images' \
-- '' \
-- ''
-+ """
-+
+ """
+
+ expected_xml = (
+ ''
+ "default"
@@ -14977,959 +3313,38 @@ index 3e9bd5ef49..d3988464f6 100644
+ ""
+ ""
+ )
-
++
mock_secret = MagicMock()
self.mock_conn.secretLookupByUUIDString = MagicMock(return_value=mock_secret)
-@@ -3163,21 +4597,23 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mocked_pool)
- self.mock_conn.storagePoolDefineXML = MagicMock()
-- self.assertTrue(
-- virt.pool_update('default',
-- 'rbd',
-- source_name='iscsi-images',
-- source_hosts=['ses4.tf.local', 'ses5.tf.local'],
-- source_auth={'username': 'libvirt',
-- 'password': 'c2VjcmV0'}))
-- self.mock_conn.storagePoolDefineXML.assert_called_once_with(expected_xml)
-- mock_secret.setValue.assert_called_once_with(b'secret')
-+ self.assertFalse(
-+ virt.pool_update(
-+ "default",
-+ "rbd",
-+ source_name="iscsi-images",
-+ source_hosts=["ses4.tf.local", "ses5.tf.local"],
-+ source_auth={"username": "libvirt", "password": "c2VjcmV0"},
-+ )
-+ )
-+ self.mock_conn.storagePoolDefineXML.assert_not_called()
-+ mock_secret.setValue.assert_called_once_with(b"secret")
+@@ -4222,23 +4600,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ self.mock_conn.storagePoolDefineXML.assert_not_called()
+ mock_secret.setValue.assert_called_once_with(b"secret")
+- # Case where the secret can't be found
+- self.mock_conn.secretLookupByUUIDString = MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("secret not found")
+- )
+- self.assertFalse(
+- virt.pool_update(
+- "default",
+- "rbd",
+- source_name="iscsi-images",
+- source_hosts=["ses4.tf.local", "ses5.tf.local"],
+- source_auth={"username": "libvirt", "password": "c2VjcmV0"},
+- )
+- )
+- self.mock_conn.storagePoolDefineXML.assert_not_called()
+- self.mock_conn.secretDefineXML.assert_called_once()
+- mock_secret.setValue.assert_called_once_with(b"secret")
+-
def test_pool_update_password_create(self):
-- '''
-+ """
+ """
Test the pool_update function, where the password only is changed
-- '''
-- current_xml = '''
-+ """
-+ current_xml = """
- default
- 20fbe05c-ab40-418a-9afa-136d512f0ede
- 1999421108224
-@@ -3188,23 +4624,25 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-
-
-
-- '''
--
-- expected_xml = '' \
-- 'default' \
-- '20fbe05c-ab40-418a-9afa-136d512f0ede' \
-- '1999421108224' \
-- '713207042048' \
-- '1286214066176' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- '' \
-- 'iscsi-images' \
-- '' \
-- ''
-+ """
-+
-+ expected_xml = (
-+ ''
-+ "default"
-+ "20fbe05c-ab40-418a-9afa-136d512f0ede"
-+ '1999421108224'
-+ '713207042048'
-+ '1286214066176'
-+ ""
-+ ''
-+ ''
-+ ''
-+ ''
-+ ""
-+ "iscsi-images"
-+ ""
-+ ""
-+ )
-
- mock_secret = MagicMock()
- self.mock_conn.secretDefineXML = MagicMock(return_value=mock_secret)
-@@ -3215,316 +4653,399 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- self.mock_conn.storagePoolDefineXML = MagicMock()
-
- self.assertTrue(
-- virt.pool_update('default',
-- 'rbd',
-- source_name='iscsi-images',
-- source_hosts=['ses4.tf.local', 'ses5.tf.local'],
-- source_auth={'username': 'libvirt',
-- 'password': 'c2VjcmV0'}))
-+ virt.pool_update(
-+ "default",
-+ "rbd",
-+ source_name="iscsi-images",
-+ source_hosts=["ses4.tf.local", "ses5.tf.local"],
-+ source_auth={"username": "libvirt", "password": "c2VjcmV0"},
-+ )
-+ )
- self.mock_conn.storagePoolDefineXML.assert_called_once_with(expected_xml)
-- mock_secret.setValue.assert_called_once_with(b'secret')
-+ mock_secret.setValue.assert_called_once_with(b"secret")
-
- def test_volume_infos(self):
-- '''
-+ """
- Test virt.volume_infos
-- '''
-+ """
- vms_disks = [
-- '''
-+ """
-
-
-
-
-
-- ''',
-- '''
-+ """,
-+ """
-
-
-
-
-
-- ''',
-- '''
-+ """,
-+ """
-
-
-
-
-
-- '''
-+ """,
- ]
- mock_vms = []
- for idx, disk in enumerate(vms_disks):
- vm = MagicMock()
- # pylint: disable=no-member
-- vm.name.return_value = 'vm{0}'.format(idx)
-- vm.XMLDesc.return_value = '''
-+ vm.name.return_value = "vm{0}".format(idx)
-+ vm.XMLDesc.return_value = """
-
- vm{0}
- {1}
-
-- '''.format(idx, disk)
-+ """.format(
-+ idx, disk
-+ )
- # pylint: enable=no-member
- mock_vms.append(vm)
-
- mock_pool_data = [
- {
-- 'name': 'pool0',
-- 'state': self.mock_libvirt.VIR_STORAGE_POOL_RUNNING,
-- 'volumes': [
-+ "name": "pool0",
-+ "state": self.mock_libvirt.VIR_STORAGE_POOL_RUNNING,
-+ "volumes": [
- {
-- 'key': '/key/of/vol0',
-- 'name': 'vol0',
-- 'path': '/path/to/vol0.qcow2',
-- 'info': [0, 123456789, 123456],
-- 'backingStore': None
-+ "key": "/key/of/vol0",
-+ "name": "vol0",
-+ "path": "/path/to/vol0.qcow2",
-+ "info": [0, 123456789, 123456],
-+ "backingStore": None,
- }
-- ]
-+ ],
- },
- {
-- 'name': 'pool1',
-- 'state': self.mock_libvirt.VIR_STORAGE_POOL_RUNNING,
-- 'volumes': [
-+ "name": "pool1",
-+ "state": self.mock_libvirt.VIR_STORAGE_POOL_RUNNING,
-+ "volumes": [
- {
-- 'key': '/key/of/vol0bad',
-- 'name': 'vol0bad',
-- 'path': '/path/to/vol0bad.qcow2',
-- 'info': None,
-- 'backingStore': None
-+ "key": "/key/of/vol0bad",
-+ "name": "vol0bad",
-+ "path": "/path/to/vol0bad.qcow2",
-+ "info": None,
-+ "backingStore": None,
- },
- {
-- 'key': '/key/of/vol1',
-- 'name': 'vol1',
-- 'path': '/path/to/vol1.qcow2',
-- 'info': [0, 12345, 1234],
-- 'backingStore': None
-+ "key": "/key/of/vol1",
-+ "name": "vol1",
-+ "path": "/path/to/vol1.qcow2",
-+ "info": [0, 12345, 1234],
-+ "backingStore": None,
- },
- {
-- 'key': '/key/of/vol2',
-- 'name': 'vol2',
-- 'path': '/path/to/vol2.qcow2',
-- 'info': [0, 12345, 1234],
-- 'backingStore': '/path/to/vol0.qcow2'
-+ "key": "/key/of/vol2",
-+ "name": "vol2",
-+ "path": "/path/to/vol2.qcow2",
-+ "info": [0, 12345, 1234],
-+ "backingStore": "/path/to/vol0.qcow2",
- },
- ],
-- }
-+ },
- ]
- mock_pools = []
- for pool_data in mock_pool_data:
- mock_pool = MagicMock()
-- mock_pool.name.return_value = pool_data['name'] # pylint: disable=no-member
-- mock_pool.info.return_value = [pool_data['state']]
-+ mock_pool.name.return_value = pool_data["name"] # pylint: disable=no-member
-+ mock_pool.info.return_value = [pool_data["state"]]
- mock_volumes = []
-- for vol_data in pool_data['volumes']:
-+ for vol_data in pool_data["volumes"]:
- mock_volume = MagicMock()
- # pylint: disable=no-member
-- mock_volume.name.return_value = vol_data['name']
-- mock_volume.key.return_value = vol_data['key']
-- mock_volume.path.return_value = '/path/to/{0}.qcow2'.format(vol_data['name'])
-- if vol_data['info']:
-- mock_volume.info.return_value = vol_data['info']
-- backing_store = '''
-+ mock_volume.name.return_value = vol_data["name"]
-+ mock_volume.key.return_value = vol_data["key"]
-+ mock_volume.path.return_value = "/path/to/{0}.qcow2".format(
-+ vol_data["name"]
-+ )
-+ if vol_data["info"]:
-+ mock_volume.info.return_value = vol_data["info"]
-+ backing_store = (
-+ """
-
-- qcow2
-+
- {0}
-
-- '''.format(vol_data['backingStore']) if vol_data['backingStore'] else ''
-- mock_volume.XMLDesc.return_value = '''
-+ """.format(
-+ vol_data["backingStore"]
-+ )
-+ if vol_data["backingStore"]
-+ else ""
-+ )
-+ mock_volume.XMLDesc.return_value = """
-
- {0}
-
-- qcow2
-+
- /path/to/{0}.qcow2
-
- {1}
-
-- '''.format(vol_data['name'], backing_store)
-+ """.format(
-+ vol_data["name"], backing_store
-+ )
- else:
-- mock_volume.info.side_effect = self.mock_libvirt.libvirtError('No such volume')
-- mock_volume.XMLDesc.side_effect = self.mock_libvirt.libvirtError('No such volume')
-+ mock_volume.info.side_effect = self.mock_libvirt.libvirtError(
-+ "No such volume"
-+ )
-+ mock_volume.XMLDesc.side_effect = self.mock_libvirt.libvirtError(
-+ "No such volume"
-+ )
- mock_volumes.append(mock_volume)
- # pylint: enable=no-member
-- mock_pool.listAllVolumes.return_value = mock_volumes # pylint: disable=no-member
-+ mock_pool.listAllVolumes.return_value = (
-+ mock_volumes # pylint: disable=no-member
-+ )
- mock_pools.append(mock_pool)
-
- inactive_pool = MagicMock()
-- inactive_pool.name.return_value = 'pool2'
-+ inactive_pool.name.return_value = "pool2"
- inactive_pool.info.return_value = [self.mock_libvirt.VIR_STORAGE_POOL_INACTIVE]
-- inactive_pool.listAllVolumes.side_effect = self.mock_libvirt.libvirtError('pool is inactive')
-+ inactive_pool.listAllVolumes.side_effect = self.mock_libvirt.libvirtError(
-+ "pool is inactive"
-+ )
- mock_pools.append(inactive_pool)
-
-- self.mock_conn.listAllStoragePools.return_value = mock_pools # pylint: disable=no-member
-+ self.mock_conn.listAllStoragePools.return_value = (
-+ mock_pools # pylint: disable=no-member
-+ )
-
-- with patch('salt.modules.virt._get_domain', MagicMock(return_value=mock_vms)):
-- actual = virt.volume_infos('pool0', 'vol0')
-+ with patch("salt.modules.virt._get_domain", MagicMock(return_value=mock_vms)):
-+ actual = virt.volume_infos("pool0", "vol0")
- self.assertEqual(1, len(actual.keys()))
-- self.assertEqual(1, len(actual['pool0'].keys()))
-- self.assertEqual(['vm0', 'vm2'], sorted(actual['pool0']['vol0']['used_by']))
-- self.assertEqual('/path/to/vol0.qcow2', actual['pool0']['vol0']['path'])
-- self.assertEqual('file', actual['pool0']['vol0']['type'])
-- self.assertEqual('/key/of/vol0', actual['pool0']['vol0']['key'])
-- self.assertEqual(123456789, actual['pool0']['vol0']['capacity'])
-- self.assertEqual(123456, actual['pool0']['vol0']['allocation'])
--
-- self.assertEqual(virt.volume_infos('pool1', None), {
-- 'pool1': {
-- 'vol1': {
-- 'type': 'file',
-- 'key': '/key/of/vol1',
-- 'path': '/path/to/vol1.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': [],
-- },
-- 'vol2': {
-- 'type': 'file',
-- 'key': '/key/of/vol2',
-- 'path': '/path/to/vol2.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': ['vm2'],
-+ self.assertEqual(1, len(actual["pool0"].keys()))
-+ self.assertEqual(["vm0", "vm2"], sorted(actual["pool0"]["vol0"]["used_by"]))
-+ self.assertEqual("/path/to/vol0.qcow2", actual["pool0"]["vol0"]["path"])
-+ self.assertEqual("file", actual["pool0"]["vol0"]["type"])
-+ self.assertEqual("/key/of/vol0", actual["pool0"]["vol0"]["key"])
-+ self.assertEqual(123456789, actual["pool0"]["vol0"]["capacity"])
-+ self.assertEqual(123456, actual["pool0"]["vol0"]["allocation"])
-+
-+ self.assertEqual(
-+ virt.volume_infos("pool1", None),
-+ {
-+ "pool1": {
-+ "vol1": {
-+ "type": "file",
-+ "key": "/key/of/vol1",
-+ "path": "/path/to/vol1.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": [],
-+ "backing_store": None,
-+ "format": "qcow2",
-+ },
-+ "vol2": {
-+ "type": "file",
-+ "key": "/key/of/vol2",
-+ "path": "/path/to/vol2.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": ["vm2"],
-+ "backing_store": {
-+ "path": "/path/to/vol0.qcow2",
-+ "format": "qcow2",
-+ },
-+ "format": "qcow2",
-+ },
- }
-- }
-- })
--
-- self.assertEqual(virt.volume_infos(None, 'vol2'), {
-- 'pool1': {
-- 'vol2': {
-- 'type': 'file',
-- 'key': '/key/of/vol2',
-- 'path': '/path/to/vol2.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': ['vm2'],
-+ },
-+ )
-+
-+ self.assertEqual(
-+ virt.volume_infos(None, "vol2"),
-+ {
-+ "pool1": {
-+ "vol2": {
-+ "type": "file",
-+ "key": "/key/of/vol2",
-+ "path": "/path/to/vol2.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": ["vm2"],
-+ "backing_store": {
-+ "path": "/path/to/vol0.qcow2",
-+ "format": "qcow2",
-+ },
-+ "format": "qcow2",
-+ }
- }
-- }
-- })
-+ },
-+ )
-
- # Single VM test
-- with patch('salt.modules.virt._get_domain', MagicMock(return_value=mock_vms[0])):
-- actual = virt.volume_infos('pool0', 'vol0')
-+ with patch(
-+ "salt.modules.virt._get_domain", MagicMock(return_value=mock_vms[0])
-+ ):
-+ actual = virt.volume_infos("pool0", "vol0")
- self.assertEqual(1, len(actual.keys()))
-- self.assertEqual(1, len(actual['pool0'].keys()))
-- self.assertEqual(['vm0'], sorted(actual['pool0']['vol0']['used_by']))
-- self.assertEqual('/path/to/vol0.qcow2', actual['pool0']['vol0']['path'])
-- self.assertEqual('file', actual['pool0']['vol0']['type'])
-- self.assertEqual('/key/of/vol0', actual['pool0']['vol0']['key'])
-- self.assertEqual(123456789, actual['pool0']['vol0']['capacity'])
-- self.assertEqual(123456, actual['pool0']['vol0']['allocation'])
--
-- self.assertEqual(virt.volume_infos('pool1', None), {
-- 'pool1': {
-- 'vol1': {
-- 'type': 'file',
-- 'key': '/key/of/vol1',
-- 'path': '/path/to/vol1.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': [],
-- },
-- 'vol2': {
-- 'type': 'file',
-- 'key': '/key/of/vol2',
-- 'path': '/path/to/vol2.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': [],
-+ self.assertEqual(1, len(actual["pool0"].keys()))
-+ self.assertEqual(["vm0"], sorted(actual["pool0"]["vol0"]["used_by"]))
-+ self.assertEqual("/path/to/vol0.qcow2", actual["pool0"]["vol0"]["path"])
-+ self.assertEqual("file", actual["pool0"]["vol0"]["type"])
-+ self.assertEqual("/key/of/vol0", actual["pool0"]["vol0"]["key"])
-+ self.assertEqual(123456789, actual["pool0"]["vol0"]["capacity"])
-+ self.assertEqual(123456, actual["pool0"]["vol0"]["allocation"])
-+
-+ self.assertEqual(
-+ virt.volume_infos("pool1", None),
-+ {
-+ "pool1": {
-+ "vol1": {
-+ "type": "file",
-+ "key": "/key/of/vol1",
-+ "path": "/path/to/vol1.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": [],
-+ "backing_store": None,
-+ "format": "qcow2",
-+ },
-+ "vol2": {
-+ "type": "file",
-+ "key": "/key/of/vol2",
-+ "path": "/path/to/vol2.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": [],
-+ "backing_store": {
-+ "path": "/path/to/vol0.qcow2",
-+ "format": "qcow2",
-+ },
-+ "format": "qcow2",
-+ },
- }
-- }
-- })
--
-- self.assertEqual(virt.volume_infos(None, 'vol2'), {
-- 'pool1': {
-- 'vol2': {
-- 'type': 'file',
-- 'key': '/key/of/vol2',
-- 'path': '/path/to/vol2.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': [],
-+ },
-+ )
-+
-+ self.assertEqual(
-+ virt.volume_infos(None, "vol2"),
-+ {
-+ "pool1": {
-+ "vol2": {
-+ "type": "file",
-+ "key": "/key/of/vol2",
-+ "path": "/path/to/vol2.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": [],
-+ "backing_store": {
-+ "path": "/path/to/vol0.qcow2",
-+ "format": "qcow2",
-+ },
-+ "format": "qcow2",
-+ }
- }
-- }
-- })
-+ },
-+ )
-
- # No VM test
-- with patch('salt.modules.virt._get_domain', MagicMock(side_effect=CommandExecutionError('no VM'))):
-- actual = virt.volume_infos('pool0', 'vol0')
-+ with patch(
-+ "salt.modules.virt._get_domain",
-+ MagicMock(side_effect=CommandExecutionError("no VM")),
-+ ):
-+ actual = virt.volume_infos("pool0", "vol0")
- self.assertEqual(1, len(actual.keys()))
-- self.assertEqual(1, len(actual['pool0'].keys()))
-- self.assertEqual([], sorted(actual['pool0']['vol0']['used_by']))
-- self.assertEqual('/path/to/vol0.qcow2', actual['pool0']['vol0']['path'])
-- self.assertEqual('file', actual['pool0']['vol0']['type'])
-- self.assertEqual('/key/of/vol0', actual['pool0']['vol0']['key'])
-- self.assertEqual(123456789, actual['pool0']['vol0']['capacity'])
-- self.assertEqual(123456, actual['pool0']['vol0']['allocation'])
--
-- self.assertEqual(virt.volume_infos('pool1', None), {
-- 'pool1': {
-- 'vol1': {
-- 'type': 'file',
-- 'key': '/key/of/vol1',
-- 'path': '/path/to/vol1.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': [],
-- },
-- 'vol2': {
-- 'type': 'file',
-- 'key': '/key/of/vol2',
-- 'path': '/path/to/vol2.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': [],
-+ self.assertEqual(1, len(actual["pool0"].keys()))
-+ self.assertEqual([], sorted(actual["pool0"]["vol0"]["used_by"]))
-+ self.assertEqual("/path/to/vol0.qcow2", actual["pool0"]["vol0"]["path"])
-+ self.assertEqual("file", actual["pool0"]["vol0"]["type"])
-+ self.assertEqual("/key/of/vol0", actual["pool0"]["vol0"]["key"])
-+ self.assertEqual(123456789, actual["pool0"]["vol0"]["capacity"])
-+ self.assertEqual(123456, actual["pool0"]["vol0"]["allocation"])
-+
-+ self.assertEqual(
-+ virt.volume_infos("pool1", None),
-+ {
-+ "pool1": {
-+ "vol1": {
-+ "type": "file",
-+ "key": "/key/of/vol1",
-+ "path": "/path/to/vol1.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": [],
-+ "backing_store": None,
-+ "format": "qcow2",
-+ },
-+ "vol2": {
-+ "type": "file",
-+ "key": "/key/of/vol2",
-+ "path": "/path/to/vol2.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": [],
-+ "backing_store": {
-+ "path": "/path/to/vol0.qcow2",
-+ "format": "qcow2",
-+ },
-+ "format": "qcow2",
-+ },
- }
-- }
-- })
--
-- self.assertEqual(virt.volume_infos(None, 'vol2'), {
-- 'pool1': {
-- 'vol2': {
-- 'type': 'file',
-- 'key': '/key/of/vol2',
-- 'path': '/path/to/vol2.qcow2',
-- 'capacity': 12345,
-- 'allocation': 1234,
-- 'used_by': [],
-+ },
-+ )
-+
-+ self.assertEqual(
-+ virt.volume_infos(None, "vol2"),
-+ {
-+ "pool1": {
-+ "vol2": {
-+ "type": "file",
-+ "key": "/key/of/vol2",
-+ "path": "/path/to/vol2.qcow2",
-+ "capacity": 12345,
-+ "allocation": 1234,
-+ "used_by": [],
-+ "backing_store": {
-+ "path": "/path/to/vol0.qcow2",
-+ "format": "qcow2",
-+ },
-+ "format": "qcow2",
-+ }
- }
-- }
-- })
-+ },
-+ )
-
- def test_volume_delete(self):
-- '''
-+ """
- Test virt.volume_delete
-- '''
-+ """
- mock_delete = MagicMock(side_effect=[0, 1])
- mock_volume = MagicMock()
- mock_volume.delete = mock_delete # pylint: disable=no-member
- mock_pool = MagicMock()
- # pylint: disable=no-member
- mock_pool.storageVolLookupByName.side_effect = [
-- mock_volume,
-- mock_volume,
-- self.mock_libvirt.libvirtError("Missing volume"),
-- mock_volume,
-+ mock_volume,
-+ mock_volume,
-+ self.mock_libvirt.libvirtError("Missing volume"),
-+ mock_volume,
- ]
- self.mock_conn.storagePoolLookupByName.side_effect = [
-- mock_pool,
-- mock_pool,
-- mock_pool,
-- self.mock_libvirt.libvirtError("Missing pool"),
-+ mock_pool,
-+ mock_pool,
-+ mock_pool,
-+ self.mock_libvirt.libvirtError("Missing pool"),
- ]
-
- # pylint: enable=no-member
-- self.assertTrue(virt.volume_delete('default', 'test_volume'))
-- self.assertFalse(virt.volume_delete('default', 'test_volume'))
-+ self.assertTrue(virt.volume_delete("default", "test_volume"))
-+ self.assertFalse(virt.volume_delete("default", "test_volume"))
- with self.assertRaises(self.mock_libvirt.libvirtError):
-- virt.volume_delete('default', 'missing')
-- virt.volume_delete('missing', 'test_volume')
-+ virt.volume_delete("default", "missing")
-+ virt.volume_delete("missing", "test_volume")
- self.assertEqual(mock_delete.call_count, 2)
-
- def test_pool_capabilities(self):
-- '''
-+ """
- Test virt.pool_capabilities where libvirt has the pool-capabilities feature
-- '''
-- xml_caps = '''
-+ """
-+ xml_caps = """
-
-
-
-@@ -3555,113 +5076,170 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-
-
-
-- '''
-+ """
- self.mock_conn.getStoragePoolCapabilities = MagicMock(return_value=xml_caps)
-
- actual = virt.pool_capabilities()
-- self.assertEqual({
-- 'computed': False,
-- 'pool_types': [{
-- 'name': 'disk',
-- 'supported': True,
-- 'options': {
-- 'pool': {
-- 'default_format': 'unknown',
-- 'sourceFormatType': ['unknown', 'dos', 'dvh']
-- },
-- 'volume': {
-- 'default_format': 'none',
-- 'targetFormatType': ['none', 'linux']
-- }
-- }
-- },
-- {
-- 'name': 'iscsi',
-- 'supported': True,
-- },
-- {
-- 'name': 'rbd',
-- 'supported': True,
-- 'options': {
-- 'volume': {
-- 'default_format': 'raw',
-- 'targetFormatType': []
-- }
-- }
-- },
-+ self.assertEqual(
- {
-- 'name': 'sheepdog',
-- 'supported': False,
-+ "computed": False,
-+ "pool_types": [
-+ {
-+ "name": "disk",
-+ "supported": True,
-+ "options": {
-+ "pool": {
-+ "default_format": "unknown",
-+ "sourceFormatType": ["unknown", "dos", "dvh"],
-+ },
-+ "volume": {
-+ "default_format": "none",
-+ "targetFormatType": ["none", "linux"],
-+ },
-+ },
-+ },
-+ {"name": "iscsi", "supported": True},
-+ {
-+ "name": "rbd",
-+ "supported": True,
-+ "options": {
-+ "volume": {"default_format": "raw", "targetFormatType": []}
-+ },
-+ },
-+ {"name": "sheepdog", "supported": False},
-+ ],
- },
-- ]}, actual)
-+ actual,
-+ )
-
-- @patch('salt.modules.virt.get_hypervisor', return_value='kvm')
-+ @patch("salt.modules.virt.get_hypervisor", return_value="kvm")
- def test_pool_capabilities_computed(self, mock_get_hypervisor):
-- '''
-+ """
- Test virt.pool_capabilities where libvirt doesn't have the pool-capabilities feature
-- '''
-+ """
- self.mock_conn.getLibVersion = MagicMock(return_value=4006000)
- del self.mock_conn.getStoragePoolCapabilities
-
- actual = virt.pool_capabilities()
-
-- self.assertTrue(actual['computed'])
-- backends = actual['pool_types']
-+ self.assertTrue(actual["computed"])
-+ backends = actual["pool_types"]
-
- # libvirt version matching check
-- self.assertFalse([backend for backend in backends if backend['name'] == 'iscsi-direct'][0]['supported'])
-- self.assertTrue([backend for backend in backends if backend['name'] == 'gluster'][0]['supported'])
-- self.assertFalse([backend for backend in backends if backend['name'] == 'zfs'][0]['supported'])
-+ self.assertFalse(
-+ [backend for backend in backends if backend["name"] == "iscsi-direct"][0][
-+ "supported"
-+ ]
-+ )
-+ self.assertTrue(
-+ [backend for backend in backends if backend["name"] == "gluster"][0][
-+ "supported"
-+ ]
-+ )
-+ self.assertFalse(
-+ [backend for backend in backends if backend["name"] == "zfs"][0][
-+ "supported"
-+ ]
-+ )
-
- # test case matching other hypervisors
-- mock_get_hypervisor.return_value = 'xen'
-- backends = virt.pool_capabilities()['pool_types']
-- self.assertFalse([backend for backend in backends if backend['name'] == 'gluster'][0]['supported'])
-+ mock_get_hypervisor.return_value = "xen"
-+ backends = virt.pool_capabilities()["pool_types"]
-+ self.assertFalse(
-+ [backend for backend in backends if backend["name"] == "gluster"][0][
-+ "supported"
-+ ]
-+ )
-
-- mock_get_hypervisor.return_value = 'bhyve'
-- backends = virt.pool_capabilities()['pool_types']
-- self.assertFalse([backend for backend in backends if backend['name'] == 'gluster'][0]['supported'])
-- self.assertTrue([backend for backend in backends if backend['name'] == 'zfs'][0]['supported'])
-+ mock_get_hypervisor.return_value = "bhyve"
-+ backends = virt.pool_capabilities()["pool_types"]
-+ self.assertFalse(
-+ [backend for backend in backends if backend["name"] == "gluster"][0][
-+ "supported"
-+ ]
-+ )
-+ self.assertTrue(
-+ [backend for backend in backends if backend["name"] == "zfs"][0][
-+ "supported"
-+ ]
-+ )
-
- # Test options output
-- self.assertNotIn('options', [backend for backend in backends if backend['name'] == 'iscsi'][0])
-- self.assertNotIn('pool', [backend for backend in backends if backend['name'] == 'dir'][0]['options'])
-- self.assertNotIn('volume', [backend for backend in backends if backend['name'] == 'logical'][0]['options'])
-- self.assertEqual({
-- 'pool': {
-- 'default_format': 'auto',
-- 'sourceFormatType': ['auto', 'nfs', 'glusterfs', 'cifs']
-+ self.assertNotIn(
-+ "options",
-+ [backend for backend in backends if backend["name"] == "iscsi"][0],
-+ )
-+ self.assertNotIn(
-+ "pool",
-+ [backend for backend in backends if backend["name"] == "dir"][0]["options"],
-+ )
-+ self.assertNotIn(
-+ "volume",
-+ [backend for backend in backends if backend["name"] == "logical"][0][
-+ "options"
-+ ],
-+ )
-+ self.assertEqual(
-+ {
-+ "pool": {
-+ "default_format": "auto",
-+ "sourceFormatType": ["auto", "nfs", "glusterfs", "cifs"],
-+ },
-+ "volume": {
-+ "default_format": "raw",
-+ "targetFormatType": [
-+ "none",
-+ "raw",
-+ "dir",
-+ "bochs",
-+ "cloop",
-+ "dmg",
-+ "iso",
-+ "vpc",
-+ "vdi",
-+ "fat",
-+ "vhd",
-+ "ploop",
-+ "cow",
-+ "qcow",
-+ "qcow2",
-+ "qed",
-+ "vmdk",
-+ ],
- },
-- 'volume': {
-- 'default_format': 'raw',
-- 'targetFormatType': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi',
-- 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']
-- }
- },
-- [backend for backend in backends if backend['name'] == 'netfs'][0]['options'])
-+ [backend for backend in backends if backend["name"] == "netfs"][0][
-+ "options"
-+ ],
-+ )
-
- def test_get_domain(self):
-- '''
-+ """
- Test the virt._get_domain function
-- '''
-+ """
- # Tests with no VM
- self.mock_conn.listDomainsID.return_value = []
- self.mock_conn.listDefinedDomains.return_value = []
- self.assertEqual([], virt._get_domain(self.mock_conn))
-- self.assertRaisesRegex(CommandExecutionError, 'No virtual machines found.',
-- virt._get_domain, self.mock_conn, 'vm2')
-+ self.assertRaisesRegex(
-+ CommandExecutionError,
-+ "No virtual machines found.",
-+ virt._get_domain,
-+ self.mock_conn,
-+ "vm2",
-+ )
-
- # Test with active and inactive VMs
- self.mock_conn.listDomainsID.return_value = [1]
-
- def create_mock_vm(idx):
- mock_vm = MagicMock()
-- mock_vm.name.return_value = 'vm{0}'.format(idx)
-+ mock_vm.name.return_value = "vm{0}".format(idx)
- return mock_vm
-
- mock_vms = [create_mock_vm(idx) for idx in range(3)]
- self.mock_conn.lookupByID.return_value = mock_vms[0]
-- self.mock_conn.listDefinedDomains.return_value = ['vm1', 'vm2']
-+ self.mock_conn.listDefinedDomains.return_value = ["vm1", "vm2"]
-
- self.mock_conn.lookupByName.side_effect = mock_vms
- self.assertEqual(mock_vms, virt._get_domain(self.mock_conn))
-@@ -3672,11 +5250,297 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
-
- self.mock_conn.lookupByName.return_value = None
- self.mock_conn.lookupByName.side_effect = [mock_vms[1], mock_vms[2]]
-- self.assertEqual([mock_vms[1], mock_vms[2]], virt._get_domain(self.mock_conn, active=False))
-+ self.assertEqual(
-+ [mock_vms[1], mock_vms[2]], virt._get_domain(self.mock_conn, active=False)
-+ )
-
- self.mock_conn.reset_mock()
- self.mock_conn.lookupByName.return_value = None
- self.mock_conn.lookupByName.side_effect = [mock_vms[1], mock_vms[2]]
-- self.assertEqual([mock_vms[1], mock_vms[2]], virt._get_domain(self.mock_conn, 'vm1', 'vm2'))
-- self.assertRaisesRegex(CommandExecutionError, 'The VM "vm2" is not present',
-- virt._get_domain, self.mock_conn, 'vm2', inactive=False)
-+ self.assertEqual(
-+ [mock_vms[1], mock_vms[2]], virt._get_domain(self.mock_conn, "vm1", "vm2")
-+ )
-+ self.assertRaisesRegex(
-+ CommandExecutionError,
-+ 'The VM "vm2" is not present',
-+ virt._get_domain,
-+ self.mock_conn,
-+ "vm2",
-+ inactive=False,
-+ )
+@@ -4898,3 +5259,279 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "vm2",
+ inactive=False,
+ )
+
+ def test_volume_define(self):
+ """
@@ -16207,1043 +3622,514 @@ index 3e9bd5ef49..d3988464f6 100644
+ },
+ )
diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py
-index 6727704494..c76f8a5fc0 100644
+index 0a47562074..6d38829870 100644
--- a/tests/unit/states/test_virt.py
+++ b/tests/unit/states/test_virt.py
-@@ -1,55 +1,56 @@
- # -*- coding: utf-8 -*-
--'''
-+"""
- :codeauthor: Jayesh Kariya
--'''
-+"""
- # Import Python libs
- from __future__ import absolute_import, print_function, unicode_literals
--import tempfile
--import shutil
-
--# Import Salt Testing Libs
--from tests.support.runtests import RUNTIME_VARS
--from tests.support.mixins import LoaderModuleMockMixin
--from tests.support.unit import TestCase
--from tests.support.mock import (
-- MagicMock,
-- mock_open,
-- patch)
-+import shutil
-+import tempfile
-
- # Import Salt Libs
+@@ -8,6 +8,7 @@ import tempfile
import salt.states.virt as virt
import salt.utils.files
--from salt.exceptions import CommandExecutionError
-+from salt.exceptions import CommandExecutionError, SaltInvocationError
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
++from salt.ext import six
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, mock_open, patch
+ from tests.support.runtests import RUNTIME_VARS
+@@ -263,375 +264,6 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ ret,
+ )
- # Import 3rd-party libs
- from salt.ext import six
-+from tests.support.mixins import LoaderModuleMockMixin
-+from tests.support.mock import MagicMock, mock_open, patch
-+
-+# Import Salt Testing Libs
-+from tests.support.runtests import RUNTIME_VARS
-+from tests.support.unit import TestCase
-
-
- class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
-- '''
-+ """
- libvirt library mockup
-- '''
-+ """
-+
- class libvirtError(Exception): # pylint: disable=invalid-name
-- '''
-+ """
- libvirt error mockup
-- '''
-+ """
-+
- def get_error_message(self):
-- '''
-+ """
- Fake function return error message
-- '''
-+ """
- return six.text_type(self)
-
-
- class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
-- '''
-+ """
- Test cases for salt.states.libvirt
-- '''
-+ """
-+
- def setup_loader_modules(self):
-- self.mock_libvirt = LibvirtMock() # pylint: disable=attribute-defined-outside-init
-- self.addCleanup(delattr, self, 'mock_libvirt')
-- loader_globals = {
-- 'libvirt': self.mock_libvirt
+- def test_defined(self):
+- """
+- defined state test cases.
+- """
+- ret = {
+- "name": "myvm",
+- "changes": {},
+- "result": True,
+- "comment": "myvm is running",
- }
-+ self.mock_libvirt = (
-+ LibvirtMock()
-+ ) # pylint: disable=attribute-defined-outside-init
-+ self.addCleanup(delattr, self, "mock_libvirt")
-+ loader_globals = {"libvirt": self.mock_libvirt}
- return {virt: loader_globals}
-
- @classmethod
-@@ -64,1831 +65,3252 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
- # 'keys' function tests: 1
-
- def test_keys(self):
-- '''
-+ """
- Test to manage libvirt keys.
-- '''
-- with patch('os.path.isfile', MagicMock(return_value=False)):
-- name = 'sunrise'
+- with patch.dict(virt.__opts__, {"test": False}):
+- # no change test
+- init_mock = MagicMock(return_value=True)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(return_value={"definition": False}),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": False}},
+- "comment": "Domain myvm unchanged",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm"), ret)
-
-- ret = {'name': name,
-- 'result': True,
-- 'comment': '',
-- 'changes': {}}
+- # Test defining a guest with connection details
+- init_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=[]),
+- "virt.init": init_mock,
+- "virt.update": MagicMock(
+- side_effect=CommandExecutionError("not found")
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "comment": "Domain myvm defined",
+- }
+- )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/image.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ]
+- ifaces = [
+- {"name": "eth0", "mac": "01:23:45:67:89:AB"},
+- {"name": "eth1", "type": "network", "source": "admin"},
+- ]
+- graphics = {
+- "type": "spice",
+- "listen": {"type": "address", "address": "192.168.0.1"},
+- }
+- self.assertDictEqual(
+- virt.defined(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- boot_dev="cdrom hd",
+- os_type="linux",
+- arch="i686",
+- vm_type="qemu",
+- disk_profile="prod",
+- disks=disks,
+- nic_profile="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- seed=False,
+- install=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- ),
+- ret,
+- )
+- init_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- boot_dev="cdrom hd",
+- os_type="linux",
+- arch="i686",
+- disk="prod",
+- disks=disks,
+- nic="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- hypervisor="qemu",
+- seed=False,
+- boot=None,
+- install=False,
+- start=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- )
-
-- mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
-- {'libvirt.servercert.pem': 'A'}])
-- with patch.dict(virt.__salt__, {'pillar.ext': mock}):
-- comt = ('All keys are correct')
-- ret.update({'comment': comt})
-+ """
-+ with patch("os.path.isfile", MagicMock(return_value=False)):
-+ name = "sunrise"
-+
-+ ret = {"name": name, "result": True, "comment": "", "changes": {}}
-+
-+ mock = MagicMock(
-+ side_effect=[
-+ [],
-+ ["libvirt.servercert.pem"],
-+ {"libvirt.servercert.pem": "A"},
-+ ]
-+ )
-+ with patch.dict(virt.__salt__, {"pillar.ext": mock}):
-+ comt = "All keys are correct"
-+ ret.update({"comment": comt})
- self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
-
-- with patch.dict(virt.__opts__, {'test': True}):
-- comt = ('Libvirt keys are set to be updated')
-- ret.update({'comment': comt, 'result': None})
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ comt = "Libvirt keys are set to be updated"
-+ ret.update({"comment": comt, "result": None})
- self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
-
-- with patch.dict(virt.__opts__, {'test': False}):
-- with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
-- comt = ('Updated libvirt certs and keys')
-- ret.update({'comment': comt, 'result': True,
-- 'changes': {'servercert': 'new'}})
-- self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ with patch.object(
-+ salt.utils.files, "fopen", MagicMock(mock_open())
-+ ):
-+ comt = "Updated libvirt certs and keys"
-+ ret.update(
-+ {
-+ "comment": comt,
-+ "result": True,
-+ "changes": {"servercert": "new"},
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.keys(name, basepath=self.pki_dir), ret
-+ )
-
- def test_keys_with_expiration_days(self):
-- '''
-+ """
- Test to manage libvirt keys.
-- '''
-- with patch('os.path.isfile', MagicMock(return_value=False)):
-- name = 'sunrise'
+- # Working update case when running
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(
+- return_value={"definition": True, "cpu": True}
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "cpu": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-
-- ret = {'name': name,
-- 'result': True,
-- 'comment': '',
-- 'changes': {}}
+- # Working update case when running with boot params
+- boot = {
+- "kernel": "/root/f8-i386-vmlinuz",
+- "initrd": "/root/f8-i386-initrd",
+- "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
+- }
-
-- mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
-- {'libvirt.servercert.pem': 'A'}])
-- with patch.dict(virt.__salt__, {'pillar.ext': mock}):
-- comt = ('All keys are correct')
-- ret.update({'comment': comt})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- expiration_days=700), ret)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(
+- return_value={"definition": True, "cpu": True}
+- ),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "cpu": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", boot=boot), ret)
-
-- with patch.dict(virt.__opts__, {'test': True}):
-- comt = ('Libvirt keys are set to be updated')
-- ret.update({'comment': comt, 'result': None})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- expiration_days=700), ret)
+- # Working update case when stopped
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(return_value={"definition": True}),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "result": True,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-
-- with patch.dict(virt.__opts__, {'test': False}):
-- with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
-- comt = ('Updated libvirt certs and keys')
-- ret.update({'comment': comt, 'result': True,
-- 'changes': {'servercert': 'new'}})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- expiration_days=700), ret)
-+ """
-+ with patch("os.path.isfile", MagicMock(return_value=False)):
-+ name = "sunrise"
-+
-+ ret = {"name": name, "result": True, "comment": "", "changes": {}}
-+
-+ mock = MagicMock(
-+ side_effect=[
-+ [],
-+ ["libvirt.servercert.pem"],
-+ {"libvirt.servercert.pem": "A"},
-+ ]
-+ )
-+ with patch.dict(virt.__salt__, {"pillar.ext": mock}):
-+ comt = "All keys are correct"
-+ ret.update({"comment": comt})
-+ self.assertDictEqual(
-+ virt.keys(name, basepath=self.pki_dir, expiration_days=700), ret
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ comt = "Libvirt keys are set to be updated"
-+ ret.update({"comment": comt, "result": None})
-+ self.assertDictEqual(
-+ virt.keys(name, basepath=self.pki_dir, expiration_days=700), ret
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ with patch.object(
-+ salt.utils.files, "fopen", MagicMock(mock_open())
-+ ):
-+ comt = "Updated libvirt certs and keys"
-+ ret.update(
-+ {
-+ "comment": comt,
-+ "result": True,
-+ "changes": {"servercert": "new"},
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.keys(name, basepath=self.pki_dir, expiration_days=700),
-+ ret,
-+ )
-
- def test_keys_with_state(self):
-- '''
-+ """
- Test to manage libvirt keys.
-- '''
-- with patch('os.path.isfile', MagicMock(return_value=False)):
-- name = 'sunrise'
+- # Failed live update case
+- update_mock = MagicMock(
+- return_value={
+- "definition": True,
+- "cpu": False,
+- "errors": ["some error"],
+- }
+- )
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {
+- "myvm": {
+- "definition": True,
+- "cpu": False,
+- "errors": ["some error"],
+- }
+- },
+- "result": True,
+- "comment": "Domain myvm updated with live update(s) failures",
+- }
+- )
+- self.assertDictEqual(
+- virt.defined("myvm", cpu=2, boot_dev="cdrom hd"), ret
+- )
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- boot_dev="cdrom hd",
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- test=False,
+- )
-
-- ret = {'name': name,
-- 'result': True,
-- 'comment': '',
-- 'changes': {}}
+- # Failed definition update case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": MagicMock(
+- side_effect=[self.mock_libvirt.libvirtError("error message")]
+- ),
+- },
+- ):
+- ret.update({"changes": {}, "result": False, "comment": "error message"})
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-
-- mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
-- {'libvirt.servercert.pem': 'A'}])
-- with patch.dict(virt.__salt__, {'pillar.ext': mock}):
-- comt = ('All keys are correct')
-- ret.update({'comment': comt})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- st='California'), ret)
+- # Test dry-run mode
+- with patch.dict(virt.__opts__, {"test": True}):
+- # Guest defined case
+- init_mock = MagicMock(return_value=True)
+- update_mock = MagicMock(side_effect=CommandExecutionError("not found"))
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=[]),
+- "virt.init": init_mock,
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "result": None,
+- "comment": "Domain myvm defined",
+- }
+- )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/image.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ]
+- ifaces = [
+- {"name": "eth0", "mac": "01:23:45:67:89:AB"},
+- {"name": "eth1", "type": "network", "source": "admin"},
+- ]
+- graphics = {
+- "type": "spice",
+- "listen": {"type": "address", "address": "192.168.0.1"},
+- }
+- self.assertDictEqual(
+- virt.defined(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type="linux",
+- arch="i686",
+- vm_type="qemu",
+- disk_profile="prod",
+- disks=disks,
+- nic_profile="prod",
+- interfaces=ifaces,
+- graphics=graphics,
+- seed=False,
+- install=False,
+- pub_key="/path/to/key.pub",
+- priv_key="/path/to/key",
+- connection="someconnection",
+- username="libvirtuser",
+- password="supersecret",
+- ),
+- ret,
+- )
+- init_mock.assert_not_called()
+- update_mock.assert_not_called()
-
-- with patch.dict(virt.__opts__, {'test': True}):
-- comt = ('Libvirt keys are set to be updated')
-- ret.update({'comment': comt, 'result': None})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- st='California'), ret)
+- # Guest update case
+- update_mock = MagicMock(return_value={"definition": True})
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True}},
+- "result": None,
+- "comment": "Domain myvm updated",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- test=True,
+- boot_dev=None,
+- )
-
-- with patch.dict(virt.__opts__, {'test': False}):
-- with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
-- comt = ('Updated libvirt certs and keys')
-- ret.update({'comment': comt, 'result': True,
-- 'changes': {'servercert': 'new'}})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- st='California'), ret)
-+ """
-+ with patch("os.path.isfile", MagicMock(return_value=False)):
-+ name = "sunrise"
-+
-+ ret = {"name": name, "result": True, "comment": "", "changes": {}}
-+
-+ mock = MagicMock(
-+ side_effect=[
-+ [],
-+ ["libvirt.servercert.pem"],
-+ {"libvirt.servercert.pem": "A"},
-+ ]
-+ )
-+ with patch.dict(virt.__salt__, {"pillar.ext": mock}):
-+ comt = "All keys are correct"
-+ ret.update({"comment": comt})
-+ self.assertDictEqual(
-+ virt.keys(name, basepath=self.pki_dir, st="California"), ret
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ comt = "Libvirt keys are set to be updated"
-+ ret.update({"comment": comt, "result": None})
-+ self.assertDictEqual(
-+ virt.keys(name, basepath=self.pki_dir, st="California"), ret
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ with patch.object(
-+ salt.utils.files, "fopen", MagicMock(mock_open())
-+ ):
-+ comt = "Updated libvirt certs and keys"
-+ ret.update(
-+ {
-+ "comment": comt,
-+ "result": True,
-+ "changes": {"servercert": "new"},
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.keys(name, basepath=self.pki_dir, st="California"), ret
-+ )
-
- def test_keys_with_all_options(self):
-- '''
-+ """
- Test to manage libvirt keys.
-- '''
-- with patch('os.path.isfile', MagicMock(return_value=False)):
-- name = 'sunrise'
+- # No changes case
+- update_mock = MagicMock(return_value={"definition": False})
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- "virt.update": update_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": False}},
+- "result": True,
+- "comment": "Domain myvm unchanged",
+- }
+- )
+- self.assertDictEqual(virt.defined("myvm"), ret)
+- update_mock.assert_called_with(
+- "myvm",
+- cpu=None,
+- mem=None,
+- disk_profile=None,
+- disks=None,
+- nic_profile=None,
+- interfaces=None,
+- graphics=None,
+- live=True,
+- connection=None,
+- username=None,
+- password=None,
+- boot=None,
+- test=True,
+- boot_dev=None,
+- )
-
-- ret = {'name': name,
-- 'result': True,
-- 'comment': '',
-- 'changes': {}}
--
-- mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
-- {'libvirt.servercert.pem': 'A'}])
-- with patch.dict(virt.__salt__, {'pillar.ext': mock}):
-- comt = ('All keys are correct')
-- ret.update({'comment': comt})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- country='USA',
-- st='California',
-- locality='Los_Angeles',
-- organization='SaltStack',
-- expiration_days=700), ret)
--
-- with patch.dict(virt.__opts__, {'test': True}):
-- comt = ('Libvirt keys are set to be updated')
-- ret.update({'comment': comt, 'result': None})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- country='USA',
-- st='California',
-- locality='Los_Angeles',
-- organization='SaltStack',
-- expiration_days=700), ret)
--
-- with patch.dict(virt.__opts__, {'test': False}):
-- with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
-- comt = ('Updated libvirt certs and keys')
-- ret.update({'comment': comt, 'result': True,
-- 'changes': {'servercert': 'new'}})
-- self.assertDictEqual(virt.keys(name,
-- basepath=self.pki_dir,
-- country='USA',
-- st='California',
-- locality='Los_Angeles',
-- organization='SaltStack',
-- expiration_days=700), ret)
-+ """
-+ with patch("os.path.isfile", MagicMock(return_value=False)):
-+ name = "sunrise"
-+
-+ ret = {"name": name, "result": True, "comment": "", "changes": {}}
-+
-+ mock = MagicMock(
-+ side_effect=[
-+ [],
-+ ["libvirt.servercert.pem"],
-+ {"libvirt.servercert.pem": "A"},
-+ ]
-+ )
-+ with patch.dict(virt.__salt__, {"pillar.ext": mock}):
-+ comt = "All keys are correct"
-+ ret.update({"comment": comt})
-+ self.assertDictEqual(
-+ virt.keys(
-+ name,
-+ basepath=self.pki_dir,
-+ country="USA",
-+ st="California",
-+ locality="Los_Angeles",
-+ organization="SaltStack",
-+ expiration_days=700,
-+ ),
-+ ret,
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ comt = "Libvirt keys are set to be updated"
-+ ret.update({"comment": comt, "result": None})
-+ self.assertDictEqual(
-+ virt.keys(
-+ name,
-+ basepath=self.pki_dir,
-+ country="USA",
-+ st="California",
-+ locality="Los_Angeles",
-+ organization="SaltStack",
-+ expiration_days=700,
-+ ),
-+ ret,
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ with patch.object(
-+ salt.utils.files, "fopen", MagicMock(mock_open())
-+ ):
-+ comt = "Updated libvirt certs and keys"
-+ ret.update(
-+ {
-+ "comment": comt,
-+ "result": True,
-+ "changes": {"servercert": "new"},
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.keys(
-+ name,
-+ basepath=self.pki_dir,
-+ country="USA",
-+ st="California",
-+ locality="Los_Angeles",
-+ organization="SaltStack",
-+ expiration_days=700,
-+ ),
-+ ret,
-+ )
-
def test_defined(self):
-- '''
-+ """
+ """
defined state test cases.
-- '''
-- ret = {'name': 'myvm',
-- 'changes': {},
-- 'result': True,
-- 'comment': 'myvm is running'}
-- with patch.dict(virt.__opts__, {'test': False}):
-+ """
-+ ret = {
-+ "name": "myvm",
-+ "changes": {},
-+ "result": True,
-+ "comment": "myvm is running",
-+ }
-+ with patch.dict(virt.__opts__, {"test": False}):
- # no change test
- init_mock = MagicMock(return_value=True)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': MagicMock(return_value={'definition': False}),
-- }):
-- ret.update({'changes': {'myvm': {'definition': False}},
-- 'comment': 'Domain myvm unchanged'})
-- self.assertDictEqual(virt.defined('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": MagicMock(return_value={"definition": False}),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": False}},
-+ "comment": "Domain myvm unchanged",
-+ }
-+ )
-+ self.assertDictEqual(virt.defined("myvm"), ret)
-
- # Test defining a guest with connection details
- init_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=[]),
-- 'virt.init': init_mock,
-- 'virt.update': MagicMock(side_effect=CommandExecutionError('not found')),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True}},
-- 'comment': 'Domain myvm defined'})
-- disks = [{
-- 'name': 'system',
-- 'size': 8192,
-- 'overlay_image': True,
-- 'pool': 'default',
-- 'image': '/path/to/image.qcow2'
-- },
-- {
-- 'name': 'data',
-- 'size': 16834
-- }]
-- ifaces = [{
-- 'name': 'eth0',
-- 'mac': '01:23:45:67:89:AB'
-- },
-- {
-- 'name': 'eth1',
-- 'type': 'network',
-- 'source': 'admin'
-- }]
-- graphics = {'type': 'spice', 'listen': {'type': 'address', 'address': '192.168.0.1'}}
-- self.assertDictEqual(virt.defined('myvm',
-- cpu=2,
-- mem=2048,
-- os_type='linux',
-- arch='i686',
-- vm_type='qemu',
-- disk_profile='prod',
-- disks=disks,
-- nic_profile='prod',
-- interfaces=ifaces,
-- graphics=graphics,
-- seed=False,
-- install=False,
-- pub_key='/path/to/key.pub',
-- priv_key='/path/to/key',
-- connection='someconnection',
-- username='libvirtuser',
-- password='supersecret'), ret)
-- init_mock.assert_called_with('myvm',
-- cpu=2,
-- mem=2048,
-- os_type='linux',
-- arch='i686',
-- disk='prod',
-- disks=disks,
-- nic='prod',
-- interfaces=ifaces,
-- graphics=graphics,
-- hypervisor='qemu',
-- seed=False,
-- boot=None,
-- install=False,
-- start=False,
-- pub_key='/path/to/key.pub',
-- priv_key='/path/to/key',
-- connection='someconnection',
-- username='libvirtuser',
-- password='supersecret')
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=[]),
-+ "virt.init": init_mock,
-+ "virt.update": MagicMock(
-+ side_effect=CommandExecutionError("not found")
-+ ),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True}},
-+ "comment": "Domain myvm defined",
-+ }
-+ )
-+ disks = [
-+ {
-+ "name": "system",
-+ "size": 8192,
-+ "overlay_image": True,
-+ "pool": "default",
-+ "image": "/path/to/image.qcow2",
-+ },
-+ {"name": "data", "size": 16834},
-+ ]
-+ ifaces = [
-+ {"name": "eth0", "mac": "01:23:45:67:89:AB"},
-+ {"name": "eth1", "type": "network", "source": "admin"},
-+ ]
-+ graphics = {
-+ "type": "spice",
-+ "listen": {"type": "address", "address": "192.168.0.1"},
-+ }
-+ self.assertDictEqual(
-+ virt.defined(
-+ "myvm",
-+ cpu=2,
-+ mem=2048,
-+ os_type="linux",
-+ arch="i686",
-+ vm_type="qemu",
-+ disk_profile="prod",
-+ disks=disks,
-+ nic_profile="prod",
-+ interfaces=ifaces,
-+ graphics=graphics,
-+ seed=False,
-+ install=False,
-+ pub_key="/path/to/key.pub",
-+ priv_key="/path/to/key",
-+ connection="someconnection",
-+ username="libvirtuser",
-+ password="supersecret",
-+ ),
-+ ret,
-+ )
-+ init_mock.assert_called_with(
-+ "myvm",
-+ cpu=2,
-+ mem=2048,
-+ os_type="linux",
-+ arch="i686",
-+ disk="prod",
-+ disks=disks,
-+ nic="prod",
-+ interfaces=ifaces,
-+ graphics=graphics,
-+ hypervisor="qemu",
-+ seed=False,
-+ boot=None,
-+ install=False,
-+ start=False,
-+ pub_key="/path/to/key.pub",
-+ priv_key="/path/to/key",
-+ connection="someconnection",
-+ username="libvirtuser",
-+ password="supersecret",
-+ )
-
- # Working update case when running
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': MagicMock(return_value={'definition': True, 'cpu': True})
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'cpu': True}},
-- 'result': True,
-- 'comment': 'Domain myvm updated'})
-- self.assertDictEqual(virt.defined('myvm', cpu=2), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": MagicMock(
-+ return_value={"definition": True, "cpu": True}
-+ ),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "cpu": True}},
-+ "result": True,
-+ "comment": "Domain myvm updated",
-+ }
-+ )
-+ self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-
- # Working update case when running with boot params
- boot = {
-- 'kernel': '/root/f8-i386-vmlinuz',
-- 'initrd': '/root/f8-i386-initrd',
-- 'cmdline': 'console=ttyS0 ks=http://example.com/f8-i386/os/'
-+ "kernel": "/root/f8-i386-vmlinuz",
-+ "initrd": "/root/f8-i386-initrd",
-+ "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
- }
-
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': MagicMock(return_value={'definition': True, 'cpu': True})
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'cpu': True}},
-- 'result': True,
-- 'comment': 'Domain myvm updated'})
-- self.assertDictEqual(virt.defined('myvm', boot=boot), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": MagicMock(
-+ return_value={"definition": True, "cpu": True}
-+ ),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "cpu": True}},
-+ "result": True,
-+ "comment": "Domain myvm updated",
-+ }
-+ )
-+ self.assertDictEqual(virt.defined("myvm", boot=boot), ret)
-
- # Working update case when stopped
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': MagicMock(return_value={'definition': True})
-- }):
-- ret.update({'changes': {'myvm': {'definition': True}},
-- 'result': True,
-- 'comment': 'Domain myvm updated'})
-- self.assertDictEqual(virt.defined('myvm', cpu=2), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": MagicMock(return_value={"definition": True}),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True}},
-+ "result": True,
-+ "comment": "Domain myvm updated",
-+ }
-+ )
-+ self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-
- # Failed live update case
-- update_mock = MagicMock(return_value={'definition': True, 'cpu': False, 'errors': ['some error']})
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': update_mock,
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'cpu': False, 'errors': ['some error']}},
-- 'result': True,
-- 'comment': 'Domain myvm updated with live update(s) failures'})
-- self.assertDictEqual(virt.defined('myvm', cpu=2), ret)
-- update_mock.assert_called_with('myvm', cpu=2, mem=None,
-- disk_profile=None, disks=None, nic_profile=None, interfaces=None,
-- graphics=None, live=True,
-- connection=None, username=None, password=None,
-- boot=None, test=False)
-+ update_mock = MagicMock(
-+ return_value={
-+ "definition": True,
-+ "cpu": False,
-+ "errors": ["some error"],
-+ }
-+ )
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": update_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {
-+ "myvm": {
-+ "definition": True,
-+ "cpu": False,
-+ "errors": ["some error"],
-+ }
-+ },
-+ "result": True,
-+ "comment": "Domain myvm updated with live update(s) failures",
-+ }
-+ )
-+ self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-+ update_mock.assert_called_with(
-+ "myvm",
-+ cpu=2,
-+ mem=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ live=True,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ boot=None,
-+ test=False,
-+ )
-
- # Failed definition update case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': MagicMock(side_effect=[self.mock_libvirt.libvirtError('error message')])
-- }):
-- ret.update({'changes': {},
-- 'result': False,
-- 'comment': 'error message'})
-- self.assertDictEqual(virt.defined('myvm', cpu=2), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": MagicMock(
-+ side_effect=[self.mock_libvirt.libvirtError("error message")]
-+ ),
-+ },
-+ ):
-+ ret.update({"changes": {}, "result": False, "comment": "error message"})
-+ self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-
- # Test dry-run mode
-- with patch.dict(virt.__opts__, {'test': True}):
-+ with patch.dict(virt.__opts__, {"test": True}):
- # Guest defined case
- init_mock = MagicMock(return_value=True)
-- update_mock = MagicMock(side_effect=CommandExecutionError('not found'))
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=[]),
-- 'virt.init': init_mock,
-- 'virt.update': update_mock,
-- }):
-- ret.update({'changes': {'myvm': {'definition': True}},
-- 'result': None,
-- 'comment': 'Domain myvm defined'})
-- disks = [{
-- 'name': 'system',
-- 'size': 8192,
-- 'overlay_image': True,
-- 'pool': 'default',
-- 'image': '/path/to/image.qcow2'
-- },
-- {
-- 'name': 'data',
-- 'size': 16834
-- }]
-- ifaces = [{
-- 'name': 'eth0',
-- 'mac': '01:23:45:67:89:AB'
-- },
-- {
-- 'name': 'eth1',
-- 'type': 'network',
-- 'source': 'admin'
-- }]
-- graphics = {'type': 'spice', 'listen': {'type': 'address', 'address': '192.168.0.1'}}
-- self.assertDictEqual(virt.defined('myvm',
-- cpu=2,
-- mem=2048,
-- os_type='linux',
-- arch='i686',
-- vm_type='qemu',
-- disk_profile='prod',
-- disks=disks,
-- nic_profile='prod',
-- interfaces=ifaces,
-- graphics=graphics,
-- seed=False,
-- install=False,
-- pub_key='/path/to/key.pub',
-- priv_key='/path/to/key',
-- connection='someconnection',
-- username='libvirtuser',
-- password='supersecret'), ret)
-+ update_mock = MagicMock(side_effect=CommandExecutionError("not found"))
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=[]),
-+ "virt.init": init_mock,
-+ "virt.update": update_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True}},
-+ "result": None,
-+ "comment": "Domain myvm defined",
-+ }
-+ )
-+ disks = [
-+ {
-+ "name": "system",
-+ "size": 8192,
-+ "overlay_image": True,
-+ "pool": "default",
-+ "image": "/path/to/image.qcow2",
-+ },
-+ {"name": "data", "size": 16834},
-+ ]
-+ ifaces = [
-+ {"name": "eth0", "mac": "01:23:45:67:89:AB"},
-+ {"name": "eth1", "type": "network", "source": "admin"},
-+ ]
-+ graphics = {
-+ "type": "spice",
-+ "listen": {"type": "address", "address": "192.168.0.1"},
-+ }
-+ self.assertDictEqual(
-+ virt.defined(
-+ "myvm",
-+ cpu=2,
-+ mem=2048,
-+ os_type="linux",
-+ arch="i686",
-+ vm_type="qemu",
-+ disk_profile="prod",
-+ disks=disks,
-+ nic_profile="prod",
-+ interfaces=ifaces,
-+ graphics=graphics,
-+ seed=False,
-+ install=False,
-+ pub_key="/path/to/key.pub",
-+ priv_key="/path/to/key",
-+ connection="someconnection",
-+ username="libvirtuser",
-+ password="supersecret",
-+ ),
-+ ret,
-+ )
- init_mock.assert_not_called()
- update_mock.assert_not_called()
-
- # Guest update case
-- update_mock = MagicMock(return_value={'definition': True})
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': update_mock
-- }):
-- ret.update({'changes': {'myvm': {'definition': True}},
-- 'result': None,
-- 'comment': 'Domain myvm updated'})
-- self.assertDictEqual(virt.defined('myvm', cpu=2), ret)
-- update_mock.assert_called_with('myvm', cpu=2, mem=None,
-- disk_profile=None, disks=None, nic_profile=None, interfaces=None,
-- graphics=None, live=True,
-- connection=None, username=None, password=None,
-- boot=None, test=True)
-+ update_mock = MagicMock(return_value={"definition": True})
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": update_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True}},
-+ "result": None,
-+ "comment": "Domain myvm updated",
-+ }
-+ )
-+ self.assertDictEqual(virt.defined("myvm", cpu=2), ret)
-+ update_mock.assert_called_with(
-+ "myvm",
-+ cpu=2,
-+ mem=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ live=True,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ boot=None,
-+ test=True,
-+ )
-
- # No changes case
-- update_mock = MagicMock(return_value={'definition': False})
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- 'virt.update': update_mock,
-- }):
-- ret.update({'changes': {'myvm': {'definition': False}},
-- 'result': True,
-- 'comment': 'Domain myvm unchanged'})
-- self.assertDictEqual(virt.defined('myvm'), ret)
-- update_mock.assert_called_with('myvm', cpu=None, mem=None,
-- disk_profile=None, disks=None, nic_profile=None, interfaces=None,
-- graphics=None, live=True,
-- connection=None, username=None, password=None,
-- boot=None, test=True)
-+ update_mock = MagicMock(return_value={"definition": False})
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ "virt.update": update_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": False}},
-+ "result": True,
-+ "comment": "Domain myvm unchanged",
-+ }
-+ )
-+ self.assertDictEqual(virt.defined("myvm"), ret)
-+ update_mock.assert_called_with(
-+ "myvm",
-+ cpu=None,
-+ mem=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ live=True,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ boot=None,
-+ test=True,
-+ )
-
- def test_running(self):
-- '''
-+ """
- running state test cases.
-- '''
-- ret = {'name': 'myvm',
-- 'changes': {},
-- 'result': True,
-- 'comment': 'myvm is running'}
-- with patch.dict(virt.__opts__, {'test': False}):
-+ """
-+ ret = {
-+ "name": "myvm",
-+ "changes": {},
-+ "result": True,
-+ "comment": "myvm is running",
-+ }
-+ with patch.dict(virt.__opts__, {"test": False}):
- # Test starting an existing guest without changing it
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.start': MagicMock(return_value=0),
-- 'virt.update': MagicMock(return_value={'definition': False}),
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- }):
-- ret.update({'changes': {'myvm': {'started': True}},
-- 'comment': 'Domain myvm started'})
-- self.assertDictEqual(virt.running('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
-+ "virt.start": MagicMock(return_value=0),
-+ "virt.update": MagicMock(return_value={"definition": False}),
+@@ -1012,126 +644,18 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+ "virt.start": MagicMock(return_value=0),
+ "virt.update": MagicMock(return_value={"definition": False}),
+- "virt.list_domains": MagicMock(return_value=["myvm"]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"started": True}},
+- "comment": "Domain myvm started",
+- }
+- )
+- self.assertDictEqual(virt.running("myvm"), ret)
+-
+- # Test defining and starting a guest the old way
+- init_mock = MagicMock(return_value=True)
+- start_mock = MagicMock(return_value=0)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.init": init_mock,
+- "virt.start": start_mock,
+- "virt.list_domains": MagicMock(return_value=[]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "started": True}},
+- "comment": "Domain myvm defined and started",
+- }
+- )
+- self.assertDictEqual(
+- virt.running("myvm", cpu=2, mem=2048, image="/path/to/img.qcow2"),
+- ret,
+- )
+- init_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type=None,
+- arch=None,
+- boot=None,
+- disk=None,
+- disks=[{"name": "system", "image": "/path/to/img.qcow2"}],
+- nic=None,
+- interfaces=None,
+- graphics=None,
+- hypervisor=None,
+- start=False,
+- seed=True,
+- install=True,
+- pub_key=None,
+- priv_key=None,
+- connection=None,
+- username=None,
+- password=None,
+- )
+- start_mock.assert_called_with(
+- "myvm", connection=None, username=None, password=None
+- )
+-
+- # Test image parameter with disks with defined image
+- init_mock = MagicMock(return_value=True)
+- start_mock = MagicMock(return_value=0)
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
+- "virt.init": init_mock,
+- "virt.start": start_mock,
+- "virt.list_domains": MagicMock(return_value=[]),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"myvm": {"definition": True, "started": True}},
+- "comment": "Domain myvm defined and started",
+- }
+- )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/image.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ]
+- self.assertDictEqual(
+- virt.running(
+- "myvm", cpu=2, mem=2048, disks=disks, image="/path/to/img.qcow2"
+- ),
+- ret,
+- )
+- init_mock.assert_called_with(
+- "myvm",
+- cpu=2,
+- mem=2048,
+- os_type=None,
+- arch=None,
+- boot=None,
+- disk=None,
+- disks=disks,
+- nic=None,
+- interfaces=None,
+- graphics=None,
+- hypervisor=None,
+- start=False,
+- seed=True,
+- install=True,
+- pub_key=None,
+- priv_key=None,
+- connection=None,
+- username=None,
+- password=None,
+- )
+- start_mock.assert_called_with(
+- "myvm", connection=None, username=None, password=None
+ "virt.list_domains": MagicMock(return_value=["myvm"]),
+ },
+ ):
@@ -17252,3883 +4138,302 @@ index 6727704494..c76f8a5fc0 100644
+ "changes": {"myvm": {"started": True}},
+ "comment": "Domain myvm started",
+ }
-+ )
+ )
+ self.assertDictEqual(virt.running("myvm"), ret)
- # Test defining and starting a guest the old way
+- # Test image parameter with disks without defined image
++ # Test defining and starting a guest the old way
init_mock = MagicMock(return_value=True)
start_mock = MagicMock(return_value=0)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.init': init_mock,
-- 'virt.start': start_mock,
-- 'virt.list_domains': MagicMock(return_value=[]),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'started': True}},
-- 'comment': 'Domain myvm defined and started'})
-- self.assertDictEqual(virt.running('myvm',
-- cpu=2,
-- mem=2048,
-- image='/path/to/img.qcow2'), ret)
-- init_mock.assert_called_with('myvm', cpu=2, mem=2048,
-- os_type=None, arch=None, boot=None,
-- disk=None, disks=[{'name': 'system', 'image': '/path/to/img.qcow2'}], nic=None, interfaces=None,
-- graphics=None, hypervisor=None, start=False,
-- seed=True, install=True, pub_key=None, priv_key=None,
-- connection=None, username=None, password=None,)
-- start_mock.assert_called_with('myvm', connection=None, username=None, password=None)
--
-- # Test image parameter with disks with defined image
-- init_mock = MagicMock(return_value=True)
-- start_mock = MagicMock(return_value=0)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.init': init_mock,
-- 'virt.start': start_mock,
-- 'virt.list_domains': MagicMock(return_value=[]),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'started': True}},
-- 'comment': 'Domain myvm defined and started'})
-- disks = [{
-- 'name': 'system',
-- 'size': 8192,
-- 'overlay_image': True,
-- 'pool': 'default',
-- 'image': '/path/to/image.qcow2'
-- },
-- {
-- 'name': 'data',
-- 'size': 16834
-- }]
-- self.assertDictEqual(virt.running('myvm',
-- cpu=2,
-- mem=2048,
-- disks=disks,
-- image='/path/to/img.qcow2'), ret)
-- init_mock.assert_called_with('myvm', cpu=2, mem=2048,
-- os_type=None, arch=None, boot=None,
-- disk=None, disks=disks, nic=None, interfaces=None,
-- graphics=None, hypervisor=None, start=False,
-- seed=True, install=True, pub_key=None, priv_key=None,
-- connection=None, username=None, password=None,)
-- start_mock.assert_called_with('myvm', connection=None, username=None, password=None)
--
-- # Test image parameter with disks without defined image
-- init_mock = MagicMock(return_value=True)
-- start_mock = MagicMock(return_value=0)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.init': init_mock,
-- 'virt.start': start_mock,
-- 'virt.list_domains': MagicMock(return_value=[]),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'started': True}},
-- 'comment': 'Domain myvm defined and started'})
-- disks = [{
-- 'name': 'system',
-- 'size': 8192,
-- 'overlay_image': True,
-- 'pool': 'default',
-- },
-- {
-- 'name': 'data',
-- 'size': 16834
-- }]
-- self.assertDictEqual(virt.running('myvm',
-- cpu=2,
-- mem=2048,
-- disks=disks,
-- image='/path/to/img.qcow2'), ret)
-- init_mock.assert_called_with('myvm', cpu=2, mem=2048,
-- os_type=None, arch=None, boot=None,
-- disk=None,
-- disks=[{
-- 'name': 'system',
-- 'size': 8192,
-- 'overlay_image': True,
-- 'pool': 'default',
-- 'image': '/path/to/img.qcow2',
-- },
-- {
-- 'name': 'data',
-- 'size': 16834
-- }],
-- nic=None, interfaces=None,
-- graphics=None, hypervisor=None, start=False,
-- seed=True, install=True, pub_key=None, priv_key=None,
-- connection=None, username=None, password=None,)
-- start_mock.assert_called_with('myvm', connection=None, username=None, password=None)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
-+ "virt.init": init_mock,
-+ "virt.start": start_mock,
-+ "virt.list_domains": MagicMock(return_value=[]),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "started": True}},
-+ "comment": "Domain myvm defined and started",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.running(
+ with patch.dict(
+@@ -1149,18 +673,12 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ "comment": "Domain myvm defined and started",
+ }
+ )
+- disks = [
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- },
+- {"name": "data", "size": 16834},
+- ]
+ self.assertDictEqual(
+ virt.running(
+- "myvm", cpu=2, mem=2048, disks=disks, image="/path/to/img.qcow2"
+ "myvm",
+ cpu=2,
+ mem=2048,
+ disks=[{"name": "system", "image": "/path/to/img.qcow2"}],
-+ ),
-+ ret,
-+ )
-+ init_mock.assert_called_with(
-+ "myvm",
-+ cpu=2,
-+ mem=2048,
-+ os_type=None,
-+ arch=None,
-+ boot=None,
-+ disk=None,
+ ),
+ ret,
+ )
+@@ -1172,16 +690,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ arch=None,
+ boot=None,
+ disk=None,
+- disks=[
+- {
+- "name": "system",
+- "size": 8192,
+- "overlay_image": True,
+- "pool": "default",
+- "image": "/path/to/img.qcow2",
+- },
+- {"name": "data", "size": 16834},
+- ],
+ disks=[{"name": "system", "image": "/path/to/img.qcow2"}],
-+ nic=None,
-+ interfaces=None,
-+ graphics=None,
-+ hypervisor=None,
-+ start=False,
-+ seed=True,
-+ install=True,
-+ pub_key=None,
-+ priv_key=None,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ )
-+ start_mock.assert_called_with(
-+ "myvm", connection=None, username=None, password=None
-+ )
+ nic=None,
+ interfaces=None,
+ graphics=None,
+@@ -2100,199 +1609,6 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ virt.network_defined("mynet", "br2", "bridge"), ret
+ )
- # Test defining and starting a guest the new way with connection details
- init_mock.reset_mock()
- start_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.init': init_mock,
-- 'virt.start': start_mock,
-- 'virt.list_domains': MagicMock(return_value=[]),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'started': True}},
-- 'comment': 'Domain myvm defined and started'})
-- disks = [{
-- 'name': 'system',
-- 'size': 8192,
-- 'overlay_image': True,
-- 'pool': 'default',
-- 'image': '/path/to/image.qcow2'
-- },
-- {
-- 'name': 'data',
-- 'size': 16834
-- }]
-- ifaces = [{
-- 'name': 'eth0',
-- 'mac': '01:23:45:67:89:AB'
-- },
-- {
-- 'name': 'eth1',
-- 'type': 'network',
-- 'source': 'admin'
-- }]
-- graphics = {'type': 'spice', 'listen': {'type': 'address', 'address': '192.168.0.1'}}
-- self.assertDictEqual(virt.running('myvm',
-- cpu=2,
-- mem=2048,
-- os_type='linux',
-- arch='i686',
-- vm_type='qemu',
-- disk_profile='prod',
-- disks=disks,
-- nic_profile='prod',
-- interfaces=ifaces,
-- graphics=graphics,
-- seed=False,
-- install=False,
-- pub_key='/path/to/key.pub',
-- priv_key='/path/to/key',
-- connection='someconnection',
-- username='libvirtuser',
-- password='supersecret'), ret)
-- init_mock.assert_called_with('myvm',
-- cpu=2,
-- mem=2048,
-- os_type='linux',
-- arch='i686',
-- disk='prod',
-- disks=disks,
-- nic='prod',
-- interfaces=ifaces,
-- graphics=graphics,
-- hypervisor='qemu',
-- seed=False,
-- boot=None,
-- install=False,
-- start=False,
-- pub_key='/path/to/key.pub',
-- priv_key='/path/to/key',
-- connection='someconnection',
-- username='libvirtuser',
-- password='supersecret')
-- start_mock.assert_called_with('myvm',
-- connection='someconnection',
-- username='libvirtuser',
-- password='supersecret')
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
-+ "virt.init": init_mock,
-+ "virt.start": start_mock,
-+ "virt.list_domains": MagicMock(return_value=[]),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "started": True}},
-+ "comment": "Domain myvm defined and started",
-+ }
-+ )
-+ disks = [
-+ {
-+ "name": "system",
-+ "size": 8192,
-+ "overlay_image": True,
-+ "pool": "default",
-+ "image": "/path/to/image.qcow2",
-+ },
-+ {"name": "data", "size": 16834},
-+ ]
-+ ifaces = [
-+ {"name": "eth0", "mac": "01:23:45:67:89:AB"},
-+ {"name": "eth1", "type": "network", "source": "admin"},
-+ ]
-+ graphics = {
-+ "type": "spice",
-+ "listen": {"type": "address", "address": "192.168.0.1"},
-+ }
-+ self.assertDictEqual(
-+ virt.running(
-+ "myvm",
-+ cpu=2,
-+ mem=2048,
-+ os_type="linux",
-+ arch="i686",
-+ vm_type="qemu",
-+ disk_profile="prod",
-+ disks=disks,
-+ nic_profile="prod",
-+ interfaces=ifaces,
-+ graphics=graphics,
-+ seed=False,
-+ install=False,
-+ pub_key="/path/to/key.pub",
-+ priv_key="/path/to/key",
-+ connection="someconnection",
-+ username="libvirtuser",
-+ password="supersecret",
-+ ),
-+ ret,
-+ )
-+ init_mock.assert_called_with(
-+ "myvm",
-+ cpu=2,
-+ mem=2048,
-+ os_type="linux",
-+ arch="i686",
-+ disk="prod",
-+ disks=disks,
-+ nic="prod",
-+ interfaces=ifaces,
-+ graphics=graphics,
-+ hypervisor="qemu",
-+ seed=False,
-+ boot=None,
-+ install=False,
-+ start=False,
-+ pub_key="/path/to/key.pub",
-+ priv_key="/path/to/key",
-+ connection="someconnection",
-+ username="libvirtuser",
-+ password="supersecret",
-+ )
-+ start_mock.assert_called_with(
-+ "myvm",
-+ connection="someconnection",
-+ username="libvirtuser",
-+ password="supersecret",
-+ )
-
- # Test with existing guest, but start raising an error
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.update': MagicMock(return_value={'definition': False}),
-- 'virt.start': MagicMock(side_effect=[self.mock_libvirt.libvirtError('libvirt error msg')]),
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- }):
-- ret.update({'changes': {'myvm': {}}, 'result': False, 'comment': 'libvirt error msg'})
-- self.assertDictEqual(virt.running('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
-+ "virt.update": MagicMock(return_value={"definition": False}),
-+ "virt.start": MagicMock(
-+ side_effect=[
-+ self.mock_libvirt.libvirtError("libvirt error msg")
-+ ]
-+ ),
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {}},
-+ "result": False,
-+ "comment": "libvirt error msg",
-+ }
-+ )
-+ self.assertDictEqual(virt.running("myvm"), ret)
-
- # Working update case when running
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.update': MagicMock(return_value={'definition': True, 'cpu': True}),
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'cpu': True}},
-- 'result': True,
-- 'comment': 'Domain myvm updated'})
-- self.assertDictEqual(virt.running('myvm', cpu=2, update=True), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.update": MagicMock(
-+ return_value={"definition": True, "cpu": True}
-+ ),
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "cpu": True}},
-+ "result": True,
-+ "comment": "Domain myvm updated",
-+ }
-+ )
-+ self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
-
- # Working update case when running with boot params
- boot = {
-- 'kernel': '/root/f8-i386-vmlinuz',
-- 'initrd': '/root/f8-i386-initrd',
-- 'cmdline': 'console=ttyS0 ks=http://example.com/f8-i386/os/'
-+ "kernel": "/root/f8-i386-vmlinuz",
-+ "initrd": "/root/f8-i386-initrd",
-+ "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/",
- }
-
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.update': MagicMock(return_value={'definition': True, 'cpu': True}),
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'cpu': True}},
-- 'result': True,
-- 'comment': 'Domain myvm updated'})
-- self.assertDictEqual(virt.running('myvm', boot=boot, update=True), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.update": MagicMock(
-+ return_value={"definition": True, "cpu": True}
-+ ),
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "cpu": True}},
-+ "result": True,
-+ "comment": "Domain myvm updated",
-+ }
-+ )
-+ self.assertDictEqual(virt.running("myvm", boot=boot, update=True), ret)
-
- # Working update case when stopped
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.start': MagicMock(return_value=0),
-- 'virt.update': MagicMock(return_value={'definition': True}),
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'started': True}},
-- 'result': True,
-- 'comment': 'Domain myvm updated and started'})
-- self.assertDictEqual(virt.running('myvm', cpu=2, update=True), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
-+ "virt.start": MagicMock(return_value=0),
-+ "virt.update": MagicMock(return_value={"definition": True}),
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "started": True}},
-+ "result": True,
-+ "comment": "Domain myvm updated and started",
-+ }
-+ )
-+ self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
-
- # Failed live update case
-- update_mock = MagicMock(return_value={'definition': True, 'cpu': False, 'errors': ['some error']})
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.update': update_mock,
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'cpu': False, 'errors': ['some error']}},
-- 'result': True,
-- 'comment': 'Domain myvm updated with live update(s) failures'})
-- self.assertDictEqual(virt.running('myvm', cpu=2, update=True), ret)
-- update_mock.assert_called_with('myvm', cpu=2, mem=None,
-- disk_profile=None, disks=None, nic_profile=None, interfaces=None,
-- graphics=None, live=True,
-- connection=None, username=None, password=None,
-- boot=None, test=False)
-+ update_mock = MagicMock(
-+ return_value={
-+ "definition": True,
-+ "cpu": False,
-+ "errors": ["some error"],
-+ }
-+ )
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.update": update_mock,
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {
-+ "myvm": {
-+ "definition": True,
-+ "cpu": False,
-+ "errors": ["some error"],
-+ }
-+ },
-+ "result": True,
-+ "comment": "Domain myvm updated with live update(s) failures",
-+ }
-+ )
-+ self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
-+ update_mock.assert_called_with(
-+ "myvm",
-+ cpu=2,
-+ mem=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ live=True,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ boot=None,
-+ test=False,
-+ )
-
- # Failed definition update case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.update': MagicMock(side_effect=[self.mock_libvirt.libvirtError('error message')]),
-- 'virt.list_domains': MagicMock(return_value=['myvm']),
-- }):
-- ret.update({'changes': {},
-- 'result': False,
-- 'comment': 'error message'})
-- self.assertDictEqual(virt.running('myvm', cpu=2, update=True), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.update": MagicMock(
-+ side_effect=[self.mock_libvirt.libvirtError("error message")]
-+ ),
-+ "virt.list_domains": MagicMock(return_value=["myvm"]),
-+ },
-+ ):
-+ ret.update({"changes": {}, "result": False, "comment": "error message"})
-+ self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
-
- # Test dry-run mode
-- with patch.dict(virt.__opts__, {'test': True}):
-+ with patch.dict(virt.__opts__, {"test": True}):
- # Guest defined case
- init_mock = MagicMock(return_value=True)
- start_mock = MagicMock(return_value=0)
- list_mock = MagicMock(return_value=[])
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.init': init_mock,
-- 'virt.start': start_mock,
-- 'virt.list_domains': list_mock,
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'started': True}},
-- 'result': None,
-- 'comment': 'Domain myvm defined and started'})
-- disks = [{
-- 'name': 'system',
-- 'size': 8192,
-- 'overlay_image': True,
-- 'pool': 'default',
-- 'image': '/path/to/image.qcow2'
-- },
-- {
-- 'name': 'data',
-- 'size': 16834
-- }]
-- ifaces = [{
-- 'name': 'eth0',
-- 'mac': '01:23:45:67:89:AB'
-- },
-- {
-- 'name': 'eth1',
-- 'type': 'network',
-- 'source': 'admin'
-- }]
-- graphics = {'type': 'spice', 'listen': {'type': 'address', 'address': '192.168.0.1'}}
-- self.assertDictEqual(virt.running('myvm',
-- cpu=2,
-- mem=2048,
-- os_type='linux',
-- arch='i686',
-- vm_type='qemu',
-- disk_profile='prod',
-- disks=disks,
-- nic_profile='prod',
-- interfaces=ifaces,
-- graphics=graphics,
-- seed=False,
-- install=False,
-- pub_key='/path/to/key.pub',
-- priv_key='/path/to/key',
-- connection='someconnection',
-- username='libvirtuser',
-- password='supersecret'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
-+ "virt.init": init_mock,
-+ "virt.start": start_mock,
-+ "virt.list_domains": list_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "started": True}},
-+ "result": None,
-+ "comment": "Domain myvm defined and started",
-+ }
-+ )
-+ disks = [
-+ {
-+ "name": "system",
-+ "size": 8192,
-+ "overlay_image": True,
-+ "pool": "default",
-+ "image": "/path/to/image.qcow2",
-+ },
-+ {"name": "data", "size": 16834},
-+ ]
-+ ifaces = [
-+ {"name": "eth0", "mac": "01:23:45:67:89:AB"},
-+ {"name": "eth1", "type": "network", "source": "admin"},
-+ ]
-+ graphics = {
-+ "type": "spice",
-+ "listen": {"type": "address", "address": "192.168.0.1"},
-+ }
-+ self.assertDictEqual(
-+ virt.running(
-+ "myvm",
-+ cpu=2,
-+ mem=2048,
-+ os_type="linux",
-+ arch="i686",
-+ vm_type="qemu",
-+ disk_profile="prod",
-+ disks=disks,
-+ nic_profile="prod",
-+ interfaces=ifaces,
-+ graphics=graphics,
-+ seed=False,
-+ install=False,
-+ pub_key="/path/to/key.pub",
-+ priv_key="/path/to/key",
-+ connection="someconnection",
-+ username="libvirtuser",
-+ password="supersecret",
-+ ),
-+ ret,
-+ )
- init_mock.assert_not_called()
- start_mock.assert_not_called()
-
- # Guest update case
-- update_mock = MagicMock(return_value={'definition': True})
-+ update_mock = MagicMock(return_value={"definition": True})
- start_mock = MagicMock(return_value=0)
-- list_mock = MagicMock(return_value=['myvm'])
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}),
-- 'virt.start': start_mock,
-- 'virt.update': update_mock,
-- 'virt.list_domains': list_mock,
-- }):
-- ret.update({'changes': {'myvm': {'definition': True, 'started': True}},
-- 'result': None,
-- 'comment': 'Domain myvm updated and started'})
-- self.assertDictEqual(virt.running('myvm', cpu=2, update=True), ret)
-- update_mock.assert_called_with('myvm', cpu=2, mem=None,
-- disk_profile=None, disks=None, nic_profile=None, interfaces=None,
-- graphics=None, live=True,
-- connection=None, username=None, password=None,
-- boot=None, test=True)
-+ list_mock = MagicMock(return_value=["myvm"])
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "stopped"}),
-+ "virt.start": start_mock,
-+ "virt.update": update_mock,
-+ "virt.list_domains": list_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": True, "started": True}},
-+ "result": None,
-+ "comment": "Domain myvm updated and started",
-+ }
-+ )
-+ self.assertDictEqual(virt.running("myvm", cpu=2, update=True), ret)
-+ update_mock.assert_called_with(
-+ "myvm",
-+ cpu=2,
-+ mem=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ live=True,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ boot=None,
-+ test=True,
-+ )
- start_mock.assert_not_called()
-
- # No changes case
-- update_mock = MagicMock(return_value={'definition': False})
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.update': update_mock,
-- 'virt.list_domains': list_mock,
-- }):
-- ret.update({'changes': {'myvm': {'definition': False}},
-- 'result': True,
-- 'comment': 'Domain myvm exists and is running'})
-- self.assertDictEqual(virt.running('myvm', update=True), ret)
-- update_mock.assert_called_with('myvm', cpu=None, mem=None,
-- disk_profile=None, disks=None, nic_profile=None, interfaces=None,
-- graphics=None, live=True,
-- connection=None, username=None, password=None,
-- boot=None, test=True)
-+ update_mock = MagicMock(return_value={"definition": False})
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.update": update_mock,
-+ "virt.list_domains": list_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"myvm": {"definition": False}},
-+ "result": True,
-+ "comment": "Domain myvm exists and is running",
-+ }
-+ )
-+ self.assertDictEqual(virt.running("myvm", update=True), ret)
-+ update_mock.assert_called_with(
-+ "myvm",
-+ cpu=None,
-+ mem=None,
-+ disk_profile=None,
-+ disks=None,
-+ nic_profile=None,
-+ interfaces=None,
-+ graphics=None,
-+ live=True,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ boot=None,
-+ test=True,
-+ )
-
- def test_stopped(self):
-- '''
-+ """
- stopped state test cases.
-- '''
-- ret = {'name': 'myvm',
-- 'changes': {},
-- 'result': True}
-+ """
-+ ret = {"name": "myvm", "changes": {}, "result": True}
-
- shutdown_mock = MagicMock(return_value=True)
-
- # Normal case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.shutdown': shutdown_mock
-- }):
-- ret.update({'changes': {
-- 'stopped': [{'domain': 'myvm', 'shutdown': True}]
+- def test_network_defined(self):
+- """
+- network_defined state test cases.
+- """
+- ret = {"name": "mynet", "changes": {}, "result": True, "comment": ""}
+- with patch.dict(virt.__opts__, {"test": False}):
+- define_mock = MagicMock(return_value=True)
+- # Non-existing network case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- side_effect=[{}, {"mynet": {"active": False}}]
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network defined"},
+- "comment": "Network mynet defined",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_defined(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
- },
-- 'comment': 'Machine has been shut down'})
-- self.assertDictEqual(virt.stopped('myvm'), ret)
-- shutdown_mock.assert_called_with('myvm', connection=None, username=None, password=None)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.shutdown": shutdown_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"stopped": [{"domain": "myvm", "shutdown": True}]},
-+ "comment": "Machine has been shut down",
-+ }
-+ )
-+ self.assertDictEqual(virt.stopped("myvm"), ret)
-+ shutdown_mock.assert_called_with(
-+ "myvm", connection=None, username=None, password=None
-+ )
-
- # Normal case with user-provided connection parameters
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.shutdown': shutdown_mock,
-- }):
-- self.assertDictEqual(virt.stopped('myvm',
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- shutdown_mock.assert_called_with('myvm', connection='myconnection', username='user', password='secret')
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.shutdown": shutdown_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.stopped(
-+ "myvm",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ shutdown_mock.assert_called_with(
-+ "myvm", connection="myconnection", username="user", password="secret"
-+ )
-
- # Case where an error occurred during the shutdown
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.shutdown': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]},
-- 'result': False,
-- 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.stopped('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.shutdown": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
-+ "result": False,
-+ "comment": "No changes had happened",
-+ }
-+ )
-+ self.assertDictEqual(virt.stopped("myvm"), ret)
-
- # Case there the domain doesn't exist
-- with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member
-- ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.stopped('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
-+ ): # pylint: disable=no-member
-+ ret.update(
-+ {"changes": {}, "result": False, "comment": "No changes had happened"}
-+ )
-+ self.assertDictEqual(virt.stopped("myvm"), ret)
-
- # Case where the domain is already stopped
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'shutdown'})
-- }):
-- ret.update({'changes': {},
-- 'result': True,
-- 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.stopped('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "shutdown"}),
-+ },
-+ ):
-+ ret.update(
-+ {"changes": {}, "result": True, "comment": "No changes had happened"}
-+ )
-+ self.assertDictEqual(virt.stopped("myvm"), ret)
-
- def test_powered_off(self):
-- '''
-+ """
- powered_off state test cases.
-- '''
-- ret = {'name': 'myvm',
-- 'changes': {},
-- 'result': True}
-+ """
-+ ret = {"name": "myvm", "changes": {}, "result": True}
-
- stop_mock = MagicMock(return_value=True)
-
- # Normal case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.stop': stop_mock
-- }):
-- ret.update({'changes': {
-- 'unpowered': [{'domain': 'myvm', 'stop': True}]
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {
+- "start": "2001:db8:ca2:1::10",
+- "end": "2001:db8:ca2::1f",
+- },
+- ],
- },
-- 'comment': 'Machine has been powered off'})
-- self.assertDictEqual(virt.powered_off('myvm'), ret)
-- stop_mock.assert_called_with('myvm', connection=None, username=None, password=None)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.stop": stop_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"unpowered": [{"domain": "myvm", "stop": True}]},
-+ "comment": "Machine has been powered off",
-+ }
-+ )
-+ self.assertDictEqual(virt.powered_off("myvm"), ret)
-+ stop_mock.assert_called_with(
-+ "myvm", connection=None, username=None, password=None
-+ )
-
- # Normal case with user-provided connection parameters
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.stop': stop_mock,
-- }):
-- self.assertDictEqual(virt.powered_off('myvm',
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- stop_mock.assert_called_with('myvm', connection='myconnection', username='user', password='secret')
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.stop": stop_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.powered_off(
-+ "myvm",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ stop_mock.assert_called_with(
-+ "myvm", connection="myconnection", username="user", password="secret"
-+ )
-
- # Case where an error occurred during the poweroff
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}),
-- 'virt.stop': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]},
-- 'result': False,
-- 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.powered_off('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "running"}),
-+ "virt.stop": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
-+ "result": False,
-+ "comment": "No changes had happened",
-+ }
-+ )
-+ self.assertDictEqual(virt.powered_off("myvm"), ret)
-
- # Case there the domain doesn't exist
-- with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member
-- ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.powered_off('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
-+ ): # pylint: disable=no-member
-+ ret.update(
-+ {"changes": {}, "result": False, "comment": "No changes had happened"}
-+ )
-+ self.assertDictEqual(virt.powered_off("myvm"), ret)
-
- # Case where the domain is already stopped
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.vm_state': MagicMock(return_value={'myvm': 'shutdown'})
-- }):
-- ret.update({'changes': {},
-- 'result': True,
-- 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.powered_off('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.vm_state": MagicMock(return_value={"myvm": "shutdown"}),
-+ },
-+ ):
-+ ret.update(
-+ {"changes": {}, "result": True, "comment": "No changes had happened"}
-+ )
-+ self.assertDictEqual(virt.powered_off("myvm"), ret)
-
- def test_snapshot(self):
-- '''
-+ """
- snapshot state test cases.
-- '''
-- ret = {'name': 'myvm',
-- 'changes': {},
-- 'result': True}
-+ """
-+ ret = {"name": "myvm", "changes": {}, "result": True}
-
- snapshot_mock = MagicMock(return_value=True)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.snapshot': snapshot_mock
-- }):
-- ret.update({'changes': {
-- 'saved': [{'domain': 'myvm', 'snapshot': True}]
+- autostart=False,
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- define_mock.assert_called_with(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- autostart=False,
+- start=False,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
+- },
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
+- ],
+- },
+- connection="myconnection",
+- username="user",
+- password="secret",
+- )
+-
+- # Case where there is nothing to be done
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": True}}
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Network mynet exists"})
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
+-
+- # Error case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(return_value={}),
+- "virt.network_define": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- ),
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Some error", "result": False})
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
+-
+- # Test cases with __opt__['test'] set to True
+- with patch.dict(virt.__opts__, {"test": True}):
+- ret.update({"result": None})
+-
+- # Non-existing network case
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(return_value={}),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {
+- "changes": {"mynet": "Network defined"},
+- "comment": "Network mynet defined",
+- }
+- )
+- self.assertDictEqual(
+- virt.network_defined(
+- "mynet",
+- "br2",
+- "bridge",
+- vport="openvswitch",
+- tag=180,
+- ipv4_config={
+- "cidr": "192.168.2.0/24",
+- "dhcp_ranges": [
+- {"start": "192.168.2.10", "end": "192.168.2.25"},
+- {"start": "192.168.2.110", "end": "192.168.2.125"},
+- ],
- },
-- 'comment': 'Snapshot has been taken'})
-- self.assertDictEqual(virt.snapshot('myvm'), ret)
-- snapshot_mock.assert_called_with('myvm', suffix=None, connection=None, username=None, password=None)
--
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.snapshot': snapshot_mock,
-- }):
-- self.assertDictEqual(virt.snapshot('myvm',
-- suffix='snap',
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- snapshot_mock.assert_called_with('myvm',
-- suffix='snap',
-- connection='myconnection',
-- username='user',
-- password='secret')
--
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.snapshot': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]},
-- 'result': False,
-- 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.snapshot('myvm'), ret)
--
-- with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member
-- ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.snapshot('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.snapshot": snapshot_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"saved": [{"domain": "myvm", "snapshot": True}]},
-+ "comment": "Snapshot has been taken",
-+ }
-+ )
-+ self.assertDictEqual(virt.snapshot("myvm"), ret)
-+ snapshot_mock.assert_called_with(
-+ "myvm", suffix=None, connection=None, username=None, password=None
-+ )
-+
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.snapshot": snapshot_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.snapshot(
-+ "myvm",
-+ suffix="snap",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ snapshot_mock.assert_called_with(
-+ "myvm",
-+ suffix="snap",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.snapshot": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
-+ "result": False,
-+ "comment": "No changes had happened",
-+ }
-+ )
-+ self.assertDictEqual(virt.snapshot("myvm"), ret)
-+
-+ with patch.dict(
-+ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
-+ ): # pylint: disable=no-member
-+ ret.update(
-+ {"changes": {}, "result": False, "comment": "No changes had happened"}
-+ )
-+ self.assertDictEqual(virt.snapshot("myvm"), ret)
-
- def test_rebooted(self):
-- '''
-+ """
- rebooted state test cases.
-- '''
-- ret = {'name': 'myvm',
-- 'changes': {},
-- 'result': True}
-+ """
-+ ret = {"name": "myvm", "changes": {}, "result": True}
-
- reboot_mock = MagicMock(return_value=True)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.reboot': reboot_mock
-- }):
-- ret.update({'changes': {
-- 'rebooted': [{'domain': 'myvm', 'reboot': True}]
+- ipv6_config={
+- "cidr": "2001:db8:ca2:2::1/64",
+- "dhcp_ranges": [
+- {
+- "start": "2001:db8:ca2:1::10",
+- "end": "2001:db8:ca2::1f",
+- },
+- ],
- },
-- 'comment': 'Machine has been rebooted'})
-- self.assertDictEqual(virt.rebooted('myvm'), ret)
-- reboot_mock.assert_called_with('myvm', connection=None, username=None, password=None)
+- autostart=False,
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
+- define_mock.assert_not_called()
-
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.reboot': reboot_mock,
-- }):
-- self.assertDictEqual(virt.rebooted('myvm',
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- reboot_mock.assert_called_with('myvm',
-- connection='myconnection',
-- username='user',
-- password='secret')
+- # Case where there is nothing to be done
+- define_mock.reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- return_value={"mynet": {"active": True}}
+- ),
+- "virt.network_define": define_mock,
+- },
+- ):
+- ret.update(
+- {"changes": {}, "comment": "Network mynet exists", "result": True}
+- )
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
-
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']),
-- 'virt.reboot': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]},
-- 'result': False,
-- 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.rebooted('myvm'), ret)
+- # Error case
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.network_info": MagicMock(
+- side_effect=self.mock_libvirt.libvirtError("Some error")
+- )
+- },
+- ):
+- ret.update({"changes": {}, "comment": "Some error", "result": False})
+- self.assertDictEqual(
+- virt.network_defined("mynet", "br2", "bridge"), ret
+- )
-
-- with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member
-- ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'})
-- self.assertDictEqual(virt.rebooted('myvm'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.reboot": reboot_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"rebooted": [{"domain": "myvm", "reboot": True}]},
-+ "comment": "Machine has been rebooted",
-+ }
-+ )
-+ self.assertDictEqual(virt.rebooted("myvm"), ret)
-+ reboot_mock.assert_called_with(
-+ "myvm", connection=None, username=None, password=None
-+ )
-+
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.reboot": reboot_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.rebooted(
-+ "myvm",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ reboot_mock.assert_called_with(
-+ "myvm", connection="myconnection", username="user", password="secret"
-+ )
-+
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.list_domains": MagicMock(return_value=["myvm", "vm1"]),
-+ "virt.reboot": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"ignored": [{"domain": "myvm", "issue": "Some error"}]},
-+ "result": False,
-+ "comment": "No changes had happened",
-+ }
-+ )
-+ self.assertDictEqual(virt.rebooted("myvm"), ret)
-+
-+ with patch.dict(
-+ virt.__salt__, {"virt.list_domains": MagicMock(return_value=[])}
-+ ): # pylint: disable=no-member
-+ ret.update(
-+ {"changes": {}, "result": False, "comment": "No changes had happened"}
-+ )
-+ self.assertDictEqual(virt.rebooted("myvm"), ret)
-
- def test_network_defined(self):
-- '''
-+ """
- network_defined state test cases.
-- '''
-- ret = {'name': 'mynet', 'changes': {}, 'result': True, 'comment': ''}
-- with patch.dict(virt.__opts__, {'test': False}):
-+ """
-+ ret = {"name": "mynet", "changes": {}, "result": True, "comment": ""}
-+ with patch.dict(virt.__opts__, {"test": False}):
- define_mock = MagicMock(return_value=True)
- # Non-existing network case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(side_effect=[{}, {'mynet': {'active': False}}]),
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {'mynet': 'Network defined'},
-- 'comment': 'Network mynet defined'})
-- self.assertDictEqual(virt.network_defined('mynet',
-- 'br2',
-- 'bridge',
-- vport='openvswitch',
-- tag=180,
-- ipv4_config={
-- 'cidr': '192.168.2.0/24',
-- 'dhcp_ranges': [
-- {'start': '192.168.2.10', 'end': '192.168.2.25'},
-- {'start': '192.168.2.110', 'end': '192.168.2.125'},
-- ]
-- },
-- ipv6_config={
-- 'cidr': '2001:db8:ca2:2::1/64',
-- 'dhcp_ranges': [
-- {'start': '2001:db8:ca2:1::10', 'end': '2001:db8:ca2::1f'},
-- ]
-- },
-- autostart=False,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- define_mock.assert_called_with('mynet',
-- 'br2',
-- 'bridge',
-- vport='openvswitch',
-- tag=180,
-- autostart=False,
-- start=False,
-- ipv4_config={
-- 'cidr': '192.168.2.0/24',
-- 'dhcp_ranges': [
-- {'start': '192.168.2.10', 'end': '192.168.2.25'},
-- {'start': '192.168.2.110', 'end': '192.168.2.125'},
-- ]
-- },
-- ipv6_config={
-- 'cidr': '2001:db8:ca2:2::1/64',
-- 'dhcp_ranges': [
-- {'start': '2001:db8:ca2:1::10', 'end': '2001:db8:ca2::1f'},
-- ]
-- },
-- connection='myconnection',
-- username='user',
-- password='secret')
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ side_effect=[{}, {"mynet": {"active": False}}]
-+ ),
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mynet": "Network defined"},
-+ "comment": "Network mynet defined",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.network_defined(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ vport="openvswitch",
-+ tag=180,
-+ ipv4_config={
-+ "cidr": "192.168.2.0/24",
-+ "dhcp_ranges": [
-+ {"start": "192.168.2.10", "end": "192.168.2.25"},
-+ {"start": "192.168.2.110", "end": "192.168.2.125"},
-+ ],
-+ },
-+ ipv6_config={
-+ "cidr": "2001:db8:ca2:2::1/64",
-+ "dhcp_ranges": [
-+ {
-+ "start": "2001:db8:ca2:1::10",
-+ "end": "2001:db8:ca2::1f",
-+ },
-+ ],
-+ },
-+ autostart=False,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ define_mock.assert_called_with(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ vport="openvswitch",
-+ tag=180,
-+ autostart=False,
-+ start=False,
-+ ipv4_config={
-+ "cidr": "192.168.2.0/24",
-+ "dhcp_ranges": [
-+ {"start": "192.168.2.10", "end": "192.168.2.25"},
-+ {"start": "192.168.2.110", "end": "192.168.2.125"},
-+ ],
-+ },
-+ ipv6_config={
-+ "cidr": "2001:db8:ca2:2::1/64",
-+ "dhcp_ranges": [
-+ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
-+ ],
-+ },
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-
- # Case where there is nothing to be done
- define_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={'mynet': {'active': True}}),
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {}, 'comment': 'Network mynet exists'})
-- self.assertDictEqual(virt.network_defined('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ return_value={"mynet": {"active": True}}
-+ ),
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Network mynet exists"})
-+ self.assertDictEqual(
-+ virt.network_defined("mynet", "br2", "bridge"), ret
-+ )
-
- # Error case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={}),
-- 'virt.network_define': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {}, 'comment': 'Some error', 'result': False})
-- self.assertDictEqual(virt.network_defined('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(return_value={}),
-+ "virt.network_define": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Some error", "result": False})
-+ self.assertDictEqual(
-+ virt.network_defined("mynet", "br2", "bridge"), ret
-+ )
-
- # Test cases with __opt__['test'] set to True
-- with patch.dict(virt.__opts__, {'test': True}):
-- ret.update({'result': None})
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ ret.update({"result": None})
-
- # Non-existing network case
- define_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={}),
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {'mynet': 'Network defined'},
-- 'comment': 'Network mynet defined'})
-- self.assertDictEqual(virt.network_defined('mynet',
-- 'br2',
-- 'bridge',
-- vport='openvswitch',
-- tag=180,
-- ipv4_config={
-- 'cidr': '192.168.2.0/24',
-- 'dhcp_ranges': [
-- {'start': '192.168.2.10', 'end': '192.168.2.25'},
-- {'start': '192.168.2.110', 'end': '192.168.2.125'},
-- ]
-- },
-- ipv6_config={
-- 'cidr': '2001:db8:ca2:2::1/64',
-- 'dhcp_ranges': [
-- {'start': '2001:db8:ca2:1::10', 'end': '2001:db8:ca2::1f'},
-- ]
-- },
-- autostart=False,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(return_value={}),
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mynet": "Network defined"},
-+ "comment": "Network mynet defined",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.network_defined(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ vport="openvswitch",
-+ tag=180,
-+ ipv4_config={
-+ "cidr": "192.168.2.0/24",
-+ "dhcp_ranges": [
-+ {"start": "192.168.2.10", "end": "192.168.2.25"},
-+ {"start": "192.168.2.110", "end": "192.168.2.125"},
-+ ],
-+ },
-+ ipv6_config={
-+ "cidr": "2001:db8:ca2:2::1/64",
-+ "dhcp_ranges": [
-+ {
-+ "start": "2001:db8:ca2:1::10",
-+ "end": "2001:db8:ca2::1f",
-+ },
-+ ],
-+ },
-+ autostart=False,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
- define_mock.assert_not_called()
-
- # Case where there is nothing to be done
- define_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={'mynet': {'active': True}}),
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {}, 'comment': 'Network mynet exists', 'result': True})
-- self.assertDictEqual(virt.network_defined('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ return_value={"mynet": {"active": True}}
-+ ),
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update(
-+ {"changes": {}, "comment": "Network mynet exists", "result": True}
-+ )
-+ self.assertDictEqual(
-+ virt.network_defined("mynet", "br2", "bridge"), ret
-+ )
-
- # Error case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {}, 'comment': 'Some error', 'result': False})
-- self.assertDictEqual(virt.network_defined('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ )
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Some error", "result": False})
-+ self.assertDictEqual(
-+ virt.network_defined("mynet", "br2", "bridge"), ret
-+ )
-
def test_network_running(self):
-- '''
-+ """
+ """
network_running state test cases.
-- '''
-- ret = {'name': 'mynet', 'changes': {}, 'result': True, 'comment': ''}
-- with patch.dict(virt.__opts__, {'test': False}):
-+ """
-+ ret = {"name": "mynet", "changes": {}, "result": True, "comment": ""}
-+ with patch.dict(virt.__opts__, {"test": False}):
- define_mock = MagicMock(return_value=True)
- start_mock = MagicMock(return_value=True)
- # Non-existing network case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(side_effect=[{}, {'mynet': {'active': False}}]),
-- 'virt.network_define': define_mock,
-- 'virt.network_start': start_mock,
-- }):
-- ret.update({'changes': {'mynet': 'Network defined and started'},
-- 'comment': 'Network mynet defined and started'})
-- self.assertDictEqual(virt.network_running('mynet',
-- 'br2',
-- 'bridge',
-- vport='openvswitch',
-- tag=180,
-- ipv4_config={
-- 'cidr': '192.168.2.0/24',
-- 'dhcp_ranges': [
-- {'start': '192.168.2.10', 'end': '192.168.2.25'},
-- {'start': '192.168.2.110', 'end': '192.168.2.125'},
-- ]
-- },
-- ipv6_config={
-- 'cidr': '2001:db8:ca2:2::1/64',
-- 'dhcp_ranges': [
-- {'start': '2001:db8:ca2:1::10', 'end': '2001:db8:ca2::1f'},
-- ]
-- },
-- autostart=False,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- define_mock.assert_called_with('mynet',
-- 'br2',
-- 'bridge',
-- vport='openvswitch',
-- tag=180,
-- autostart=False,
-- start=False,
-- ipv4_config={
-- 'cidr': '192.168.2.0/24',
-- 'dhcp_ranges': [
-- {'start': '192.168.2.10', 'end': '192.168.2.25'},
-- {'start': '192.168.2.110', 'end': '192.168.2.125'},
-- ]
-- },
-- ipv6_config={
-- 'cidr': '2001:db8:ca2:2::1/64',
-- 'dhcp_ranges': [
-- {'start': '2001:db8:ca2:1::10', 'end': '2001:db8:ca2::1f'},
-- ]
-- },
-- connection='myconnection',
-- username='user',
-- password='secret')
-- start_mock.assert_called_with('mynet',
-- connection='myconnection',
-- username='user',
-- password='secret')
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ side_effect=[{}, {"mynet": {"active": False}}]
-+ ),
-+ "virt.network_define": define_mock,
-+ "virt.network_start": start_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mynet": "Network defined and started"},
-+ "comment": "Network mynet defined and started",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.network_running(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ vport="openvswitch",
-+ tag=180,
-+ ipv4_config={
-+ "cidr": "192.168.2.0/24",
-+ "dhcp_ranges": [
-+ {"start": "192.168.2.10", "end": "192.168.2.25"},
-+ {"start": "192.168.2.110", "end": "192.168.2.125"},
-+ ],
-+ },
-+ ipv6_config={
-+ "cidr": "2001:db8:ca2:2::1/64",
-+ "dhcp_ranges": [
-+ {
-+ "start": "2001:db8:ca2:1::10",
-+ "end": "2001:db8:ca2::1f",
-+ },
-+ ],
-+ },
-+ autostart=False,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ define_mock.assert_called_with(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ vport="openvswitch",
-+ tag=180,
-+ autostart=False,
-+ start=False,
-+ ipv4_config={
-+ "cidr": "192.168.2.0/24",
-+ "dhcp_ranges": [
-+ {"start": "192.168.2.10", "end": "192.168.2.25"},
-+ {"start": "192.168.2.110", "end": "192.168.2.125"},
-+ ],
-+ },
-+ ipv6_config={
-+ "cidr": "2001:db8:ca2:2::1/64",
-+ "dhcp_ranges": [
-+ {"start": "2001:db8:ca2:1::10", "end": "2001:db8:ca2::1f"},
-+ ],
-+ },
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+ start_mock.assert_called_with(
-+ "mynet",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
+@@ -3303,45 +2619,6 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
+ ret,
+ )
- # Case where there is nothing to be done
- define_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={'mynet': {'active': True}}),
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {}, 'comment': 'Network mynet exists and is running'})
-- self.assertDictEqual(virt.network_running('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ return_value={"mynet": {"active": True}}
-+ ),
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update(
-+ {"changes": {}, "comment": "Network mynet exists and is running"}
-+ )
-+ self.assertDictEqual(
-+ virt.network_running("mynet", "br2", "bridge"), ret
-+ )
-
- # Network existing and stopped case
- start_mock = MagicMock(return_value=True)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={'mynet': {'active': False}}),
-- 'virt.network_start': start_mock,
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {'mynet': 'Network started'}, 'comment': 'Network mynet exists and started'})
-- self.assertDictEqual(virt.network_running('mynet',
-- 'br2',
-- 'bridge',
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- start_mock.assert_called_with('mynet', connection='myconnection', username='user', password='secret')
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ return_value={"mynet": {"active": False}}
-+ ),
-+ "virt.network_start": start_mock,
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mynet": "Network started"},
-+ "comment": "Network mynet exists and started",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.network_running(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ start_mock.assert_called_with(
-+ "mynet",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-
- # Error case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={}),
-- 'virt.network_define': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {}, 'comment': 'Some error', 'result': False})
-- self.assertDictEqual(virt.network_running('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(return_value={}),
-+ "virt.network_define": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Some error", "result": False})
-+ self.assertDictEqual(
-+ virt.network_running("mynet", "br2", "bridge"), ret
-+ )
-
- # Test cases with __opt__['test'] set to True
-- with patch.dict(virt.__opts__, {'test': True}):
-- ret.update({'result': None})
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ ret.update({"result": None})
-
- # Non-existing network case
- define_mock.reset_mock()
- start_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={}),
-- 'virt.network_define': define_mock,
-- 'virt.network_start': start_mock,
-- }):
-- ret.update({'changes': {'mynet': 'Network defined and started'},
-- 'comment': 'Network mynet defined and started'})
-- self.assertDictEqual(virt.network_running('mynet',
-- 'br2',
-- 'bridge',
-- vport='openvswitch',
-- tag=180,
-- ipv4_config={
-- 'cidr': '192.168.2.0/24',
-- 'dhcp_ranges': [
-- {'start': '192.168.2.10', 'end': '192.168.2.25'},
-- {'start': '192.168.2.110', 'end': '192.168.2.125'},
-- ]
-- },
-- ipv6_config={
-- 'cidr': '2001:db8:ca2:2::1/64',
-- 'dhcp_ranges': [
-- {'start': '2001:db8:ca2:1::10', 'end': '2001:db8:ca2::1f'},
-- ]
-- },
-- autostart=False,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(return_value={}),
-+ "virt.network_define": define_mock,
-+ "virt.network_start": start_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mynet": "Network defined and started"},
-+ "comment": "Network mynet defined and started",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.network_running(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ vport="openvswitch",
-+ tag=180,
-+ ipv4_config={
-+ "cidr": "192.168.2.0/24",
-+ "dhcp_ranges": [
-+ {"start": "192.168.2.10", "end": "192.168.2.25"},
-+ {"start": "192.168.2.110", "end": "192.168.2.125"},
-+ ],
-+ },
-+ ipv6_config={
-+ "cidr": "2001:db8:ca2:2::1/64",
-+ "dhcp_ranges": [
-+ {
-+ "start": "2001:db8:ca2:1::10",
-+ "end": "2001:db8:ca2::1f",
-+ },
-+ ],
-+ },
-+ autostart=False,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
- define_mock.assert_not_called()
- start_mock.assert_not_called()
-
- # Case where there is nothing to be done
- define_mock.reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={'mynet': {'active': True}}),
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {}, 'comment': 'Network mynet exists and is running'})
-- self.assertDictEqual(virt.network_running('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ return_value={"mynet": {"active": True}}
-+ ),
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update(
-+ {"changes": {}, "comment": "Network mynet exists and is running"}
-+ )
-+ self.assertDictEqual(
-+ virt.network_running("mynet", "br2", "bridge"), ret
-+ )
-
- # Network existing and stopped case
- start_mock = MagicMock(return_value=True)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(return_value={'mynet': {'active': False}}),
-- 'virt.network_start': start_mock,
-- 'virt.network_define': define_mock,
-- }):
-- ret.update({'changes': {'mynet': 'Network started'}, 'comment': 'Network mynet exists and started'})
-- self.assertDictEqual(virt.network_running('mynet',
-- 'br2',
-- 'bridge',
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ return_value={"mynet": {"active": False}}
-+ ),
-+ "virt.network_start": start_mock,
-+ "virt.network_define": define_mock,
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mynet": "Network started"},
-+ "comment": "Network mynet exists and started",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.network_running(
-+ "mynet",
-+ "br2",
-+ "bridge",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
- start_mock.assert_not_called()
-
- # Error case
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.network_info': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {}, 'comment': 'Some error', 'result': False})
-- self.assertDictEqual(virt.network_running('mynet', 'br2', 'bridge'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.network_info": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ )
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Some error", "result": False})
-+ self.assertDictEqual(
-+ virt.network_running("mynet", "br2", "bridge"), ret
-+ )
-
- def test_pool_defined(self):
-- '''
-+ """
- pool_defined state test cases.
-- '''
-- ret = {'name': 'mypool', 'changes': {}, 'result': True, 'comment': ''}
-- mocks = {mock: MagicMock(return_value=True) for mock in ['define', 'autostart', 'build']}
-- with patch.dict(virt.__opts__, {'test': False}):
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(side_effect=[{}, {'mypool': {'state': 'stopped', 'autostart': True}}]),
-- 'virt.pool_define': mocks['define'],
-- 'virt.pool_build': mocks['build'],
-- 'virt.pool_set_autostart': mocks['autostart']
-- }):
-- ret.update({'changes': {'mypool': 'Pool defined, marked for autostart'},
-- 'comment': 'Pool mypool defined, marked for autostart'})
-- self.assertDictEqual(virt.pool_defined('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]},
-- transient=True,
-- autostart=True,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- mocks['define'].assert_called_with('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source_devices=[{'path': '/dev/sda'}],
-- source_dir=None,
-- source_adapter=None,
-- source_hosts=None,
-- source_auth=None,
-- source_name=None,
-- source_format=None,
-- source_initiator=None,
-- transient=True,
-- start=False,
-- connection='myconnection',
-- username='user',
-- password='secret')
-- mocks['autostart'].assert_called_with('mypool',
-- state='on',
-- connection='myconnection',
-- username='user',
-- password='secret')
-- mocks['build'].assert_called_with('mypool',
-- connection='myconnection',
-- username='user',
-- password='secret')
+- # test case with test=True and pool to be defined
+- for mock in mocks:
+- mocks[mock].reset_mock()
+- with patch.dict(
+- virt.__salt__,
+- { # pylint: disable=no-member
+- "virt.pool_info": MagicMock(return_value={}),
+- },
+- ):
+- ret.update(
+- {
+- "changes": {
+- "mypool": "Pool defined, marked for autostart, started"
+- },
+- "comment": "Pool mypool defined, marked for autostart, started",
+- "result": None,
+- }
+- )
+- self.assertDictEqual(
+- virt.pool_running(
+- "mypool",
+- ptype="logical",
+- target="/dev/base",
+- permissions={
+- "mode": "0770",
+- "owner": 1000,
+- "group": 100,
+- "label": "seclabel",
+- },
+- source={"devices": [{"path": "/dev/sda"}]},
+- transient=True,
+- autostart=True,
+- connection="myconnection",
+- username="user",
+- password="secret",
+- ),
+- ret,
+- )
-
-- mocks['update'] = MagicMock(return_value=False)
-+ """
-+ ret = {"name": "mypool", "changes": {}, "result": True, "comment": ""}
-+ mocks = {
-+ mock: MagicMock(return_value=True)
-+ for mock in ["define", "autostart", "build"]
-+ }
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ side_effect=[
-+ {},
-+ {"mypool": {"state": "stopped", "autostart": True}},
-+ ]
-+ ),
-+ "virt.pool_define": mocks["define"],
-+ "virt.pool_build": mocks["build"],
-+ "virt.pool_set_autostart": mocks["autostart"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mypool": "Pool defined, marked for autostart"},
-+ "comment": "Pool mypool defined, marked for autostart",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_defined(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ transient=True,
-+ autostart=True,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ mocks["define"].assert_called_with(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source_devices=[{"path": "/dev/sda"}],
-+ source_dir=None,
-+ source_adapter=None,
-+ source_hosts=None,
-+ source_auth=None,
-+ source_name=None,
-+ source_format=None,
-+ source_initiator=None,
-+ transient=True,
-+ start=False,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+ mocks["autostart"].assert_called_with(
-+ "mypool",
-+ state="on",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+ mocks["build"].assert_called_with(
-+ "mypool",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+
-+ mocks["update"] = MagicMock(return_value=False)
- for mock in mocks:
- mocks[mock].reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'stopped', 'autostart': True}}),
-- 'virt.pool_update': mocks['update'],
-- 'virt.pool_build': mocks['build'],
-- }):
-- ret.update({'changes': {}, 'comment': 'Pool mypool unchanged'})
-- self.assertDictEqual(virt.pool_defined('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-- mocks['build'].assert_not_called()
--
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={}),
-- 'virt.pool_define': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {}, 'comment': 'Some error', 'result': False})
-- self.assertDictEqual(virt.pool_defined('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "stopped", "autostart": True}}
-+ ),
-+ "virt.pool_update": mocks["update"],
-+ "virt.pool_build": mocks["build"],
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Pool mypool unchanged"})
-+ self.assertDictEqual(
-+ virt.pool_defined(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-+ mocks["build"].assert_not_called()
-+
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(return_value={}),
-+ "virt.pool_define": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Some error", "result": False})
-+ self.assertDictEqual(
-+ virt.pool_defined(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-
- # Test case with update and autostart change on stopped pool
- for mock in mocks:
- mocks[mock].reset_mock()
-- mocks['update'] = MagicMock(return_value=True)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'stopped', 'autostart': True}}),
-- 'virt.pool_update': mocks['update'],
-- 'virt.pool_set_autostart': mocks['autostart'],
-- 'virt.pool_build': mocks['build'],
-- }):
-- ret.update({'changes': {'mypool': 'Pool updated, built, autostart flag changed'},
-- 'comment': 'Pool mypool updated, built, autostart flag changed',
-- 'result': True})
-- self.assertDictEqual(virt.pool_defined('mypool',
-- ptype='logical',
-- target='/dev/base',
-- autostart=False,
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-- mocks['build'].assert_called_with('mypool', connection=None, username=None, password=None)
-- mocks['autostart'].assert_called_with('mypool', state='off',
-- connection=None, username=None, password=None)
-- mocks['update'].assert_called_with('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source_devices=[{'path': '/dev/sda'}],
-- source_dir=None,
-- source_adapter=None,
-- source_hosts=None,
-- source_auth=None,
-- source_name=None,
-- source_format=None,
-- source_initiator=None,
-- connection=None,
-- username=None,
-- password=None)
-+ mocks["update"] = MagicMock(return_value=True)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "stopped", "autostart": True}}
-+ ),
-+ "virt.pool_update": mocks["update"],
-+ "virt.pool_set_autostart": mocks["autostart"],
-+ "virt.pool_build": mocks["build"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {
-+ "mypool": "Pool updated, built, autostart flag changed"
-+ },
-+ "comment": "Pool mypool updated, built, autostart flag changed",
-+ "result": True,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_defined(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ autostart=False,
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-+ mocks["build"].assert_called_with(
-+ "mypool", connection=None, username=None, password=None
-+ )
-+ mocks["autostart"].assert_called_with(
-+ "mypool", state="off", connection=None, username=None, password=None
-+ )
-+ mocks["update"].assert_called_with(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source_devices=[{"path": "/dev/sda"}],
-+ source_dir=None,
-+ source_adapter=None,
-+ source_hosts=None,
-+ source_auth=None,
-+ source_name=None,
-+ source_format=None,
-+ source_initiator=None,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ )
-
- # test case with update and no autostart change on running pool
- for mock in mocks:
- mocks[mock].reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'running', 'autostart': False}}),
-- 'virt.pool_update': mocks['update'],
-- 'virt.pool_build': mocks['build'],
-- }):
-- ret.update({'changes': {'mypool': 'Pool updated'},
-- 'comment': 'Pool mypool updated',
-- 'result': True})
-- self.assertDictEqual(virt.pool_defined('mypool',
-- ptype='logical',
-- target='/dev/base',
-- autostart=False,
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-- mocks['update'].assert_called_with('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source_devices=[{'path': '/dev/sda'}],
-- source_dir=None,
-- source_adapter=None,
-- source_hosts=None,
-- source_auth=None,
-- source_name=None,
-- source_format=None,
-- source_initiator=None,
-- connection=None,
-- username=None,
-- password=None)
--
-- with patch.dict(virt.__opts__, {'test': True}):
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={
-+ "mypool": {"state": "running", "autostart": False}
-+ }
-+ ),
-+ "virt.pool_update": mocks["update"],
-+ "virt.pool_build": mocks["build"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mypool": "Pool updated"},
-+ "comment": "Pool mypool updated",
-+ "result": True,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_defined(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ autostart=False,
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-+ mocks["update"].assert_called_with(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source_devices=[{"path": "/dev/sda"}],
-+ source_dir=None,
-+ source_adapter=None,
-+ source_hosts=None,
-+ source_auth=None,
-+ source_name=None,
-+ source_format=None,
-+ source_initiator=None,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": True}):
- # test case with test=True and no change
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'running', 'autostart': True}}),
-- 'virt.pool_update': MagicMock(return_value=False),
-- }):
-- ret.update({'changes': {}, 'comment': 'Pool mypool unchanged',
-- 'result': True})
-- self.assertDictEqual(virt.pool_defined('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "running", "autostart": True}}
-+ ),
-+ "virt.pool_update": MagicMock(return_value=False),
-+ },
-+ ):
-+ ret.update(
-+ {"changes": {}, "comment": "Pool mypool unchanged", "result": True}
-+ )
-+ self.assertDictEqual(
-+ virt.pool_defined(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-
- # test case with test=True and pool to be defined
- for mock in mocks:
- mocks[mock].reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={}),
-- }):
-- ret.update({'changes': {'mypool': 'Pool defined, marked for autostart'},
-- 'comment': 'Pool mypool defined, marked for autostart',
-- 'result': None})
-- self.assertDictEqual(virt.pool_defined('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]},
-- transient=True,
-- autostart=True,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(return_value={}),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mypool": "Pool defined, marked for autostart"},
-+ "comment": "Pool mypool defined, marked for autostart",
-+ "result": None,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_defined(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ transient=True,
-+ autostart=True,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-
- def test_pool_running(self):
-- '''
-+ """
- pool_running state test cases.
-- '''
-- ret = {'name': 'mypool', 'changes': {}, 'result': True, 'comment': ''}
-- mocks = {mock: MagicMock(return_value=True) for mock in ['define', 'autostart', 'build', 'start', 'stop']}
-- with patch.dict(virt.__opts__, {'test': False}):
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(side_effect=[{}, {'mypool': {'state': 'stopped', 'autostart': True}}]),
-- 'virt.pool_define': mocks['define'],
-- 'virt.pool_build': mocks['build'],
-- 'virt.pool_start': mocks['start'],
-- 'virt.pool_set_autostart': mocks['autostart']
-- }):
-- ret.update({'changes': {'mypool': 'Pool defined, marked for autostart, started'},
-- 'comment': 'Pool mypool defined, marked for autostart, started'})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]},
-- transient=True,
-- autostart=True,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-- mocks['define'].assert_called_with('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source_devices=[{'path': '/dev/sda'}],
-- source_dir=None,
-- source_adapter=None,
-- source_hosts=None,
-- source_auth=None,
-- source_name=None,
-- source_format=None,
-- source_initiator=None,
-- transient=True,
-- start=False,
-- connection='myconnection',
-- username='user',
-- password='secret')
-- mocks['autostart'].assert_called_with('mypool',
-- state='on',
-- connection='myconnection',
-- username='user',
-- password='secret')
-- mocks['build'].assert_called_with('mypool',
-- connection='myconnection',
-- username='user',
-- password='secret')
-- mocks['start'].assert_called_with('mypool',
-- connection='myconnection',
-- username='user',
-- password='secret')
--
-- mocks['update'] = MagicMock(return_value=False)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'running', 'autostart': True}}),
-- 'virt.pool_update': MagicMock(return_value=False),
-- }):
-- ret.update({'changes': {}, 'comment': 'Pool mypool already running'})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-+ """
-+ ret = {"name": "mypool", "changes": {}, "result": True, "comment": ""}
-+ mocks = {
-+ mock: MagicMock(return_value=True)
-+ for mock in ["define", "autostart", "build", "start", "stop"]
-+ }
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ side_effect=[
-+ {},
-+ {"mypool": {"state": "stopped", "autostart": True}},
-+ ]
-+ ),
-+ "virt.pool_define": mocks["define"],
-+ "virt.pool_build": mocks["build"],
-+ "virt.pool_start": mocks["start"],
-+ "virt.pool_set_autostart": mocks["autostart"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {
-+ "mypool": "Pool defined, marked for autostart, started"
-+ },
-+ "comment": "Pool mypool defined, marked for autostart, started",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ transient=True,
-+ autostart=True,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-+ mocks["define"].assert_called_with(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source_devices=[{"path": "/dev/sda"}],
-+ source_dir=None,
-+ source_adapter=None,
-+ source_hosts=None,
-+ source_auth=None,
-+ source_name=None,
-+ source_format=None,
-+ source_initiator=None,
-+ transient=True,
-+ start=False,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+ mocks["autostart"].assert_called_with(
-+ "mypool",
-+ state="on",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+ mocks["build"].assert_called_with(
-+ "mypool",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+ mocks["start"].assert_called_with(
-+ "mypool",
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ )
-+
-+ mocks["update"] = MagicMock(return_value=False)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "running", "autostart": True}}
-+ ),
-+ "virt.pool_update": MagicMock(return_value=False),
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Pool mypool already running"})
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-
- for mock in mocks:
- mocks[mock].reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'stopped', 'autostart': True}}),
-- 'virt.pool_update': mocks['update'],
-- 'virt.pool_build': mocks['build'],
-- 'virt.pool_start': mocks['start']
-- }):
-- ret.update({'changes': {'mypool': 'Pool started'}, 'comment': 'Pool mypool started'})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-- mocks['start'].assert_called_with('mypool', connection=None, username=None, password=None)
-- mocks['build'].assert_not_called()
--
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={}),
-- 'virt.pool_define': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error'))
-- }):
-- ret.update({'changes': {}, 'comment': 'Some error', 'result': False})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "stopped", "autostart": True}}
-+ ),
-+ "virt.pool_update": mocks["update"],
-+ "virt.pool_build": mocks["build"],
-+ "virt.pool_start": mocks["start"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mypool": "Pool started"},
-+ "comment": "Pool mypool started",
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-+ mocks["start"].assert_called_with(
-+ "mypool", connection=None, username=None, password=None
-+ )
-+ mocks["build"].assert_not_called()
-+
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(return_value={}),
-+ "virt.pool_define": MagicMock(
-+ side_effect=self.mock_libvirt.libvirtError("Some error")
-+ ),
-+ },
-+ ):
-+ ret.update({"changes": {}, "comment": "Some error", "result": False})
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-
- # Test case with update and autostart change on stopped pool
- for mock in mocks:
- mocks[mock].reset_mock()
-- mocks['update'] = MagicMock(return_value=True)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'stopped', 'autostart': True}}),
-- 'virt.pool_update': mocks['update'],
-- 'virt.pool_set_autostart': mocks['autostart'],
-- 'virt.pool_build': mocks['build'],
-- 'virt.pool_start': mocks['start']
-- }):
-- ret.update({'changes': {'mypool': 'Pool updated, built, autostart flag changed, started'},
-- 'comment': 'Pool mypool updated, built, autostart flag changed, started',
-- 'result': True})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- autostart=False,
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-- mocks['start'].assert_called_with('mypool', connection=None, username=None, password=None)
-- mocks['build'].assert_called_with('mypool', connection=None, username=None, password=None)
-- mocks['autostart'].assert_called_with('mypool', state='off',
-- connection=None, username=None, password=None)
-- mocks['update'].assert_called_with('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source_devices=[{'path': '/dev/sda'}],
-- source_dir=None,
-- source_adapter=None,
-- source_hosts=None,
-- source_auth=None,
-- source_name=None,
-- source_format=None,
-- source_initiator=None,
-- connection=None,
-- username=None,
-- password=None)
-+ mocks["update"] = MagicMock(return_value=True)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "stopped", "autostart": True}}
-+ ),
-+ "virt.pool_update": mocks["update"],
-+ "virt.pool_set_autostart": mocks["autostart"],
-+ "virt.pool_build": mocks["build"],
-+ "virt.pool_start": mocks["start"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {
-+ "mypool": "Pool updated, built, autostart flag changed, started"
-+ },
-+ "comment": "Pool mypool updated, built, autostart flag changed, started",
-+ "result": True,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ autostart=False,
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-+ mocks["start"].assert_called_with(
-+ "mypool", connection=None, username=None, password=None
-+ )
-+ mocks["build"].assert_called_with(
-+ "mypool", connection=None, username=None, password=None
-+ )
-+ mocks["autostart"].assert_called_with(
-+ "mypool", state="off", connection=None, username=None, password=None
-+ )
-+ mocks["update"].assert_called_with(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source_devices=[{"path": "/dev/sda"}],
-+ source_dir=None,
-+ source_adapter=None,
-+ source_hosts=None,
-+ source_auth=None,
-+ source_name=None,
-+ source_format=None,
-+ source_initiator=None,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ )
-
- # test case with update and no autostart change on running pool
- for mock in mocks:
- mocks[mock].reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'running', 'autostart': False}}),
-- 'virt.pool_update': mocks['update'],
-- 'virt.pool_build': mocks['build'],
-- 'virt.pool_start': mocks['start'],
-- 'virt.pool_stop': mocks['stop']
-- }):
-- ret.update({'changes': {'mypool': 'Pool updated, built, restarted'},
-- 'comment': 'Pool mypool updated, built, restarted',
-- 'result': True})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- autostart=False,
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-- mocks['stop'].assert_called_with('mypool', connection=None, username=None, password=None)
-- mocks['start'].assert_called_with('mypool', connection=None, username=None, password=None)
-- mocks['build'].assert_called_with('mypool', connection=None, username=None, password=None)
-- mocks['update'].assert_called_with('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source_devices=[{'path': '/dev/sda'}],
-- source_dir=None,
-- source_adapter=None,
-- source_hosts=None,
-- source_auth=None,
-- source_name=None,
-- source_format=None,
-- source_initiator=None,
-- connection=None,
-- username=None,
-- password=None)
--
-- with patch.dict(virt.__opts__, {'test': True}):
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={
-+ "mypool": {"state": "running", "autostart": False}
-+ }
-+ ),
-+ "virt.pool_update": mocks["update"],
-+ "virt.pool_build": mocks["build"],
-+ "virt.pool_start": mocks["start"],
-+ "virt.pool_stop": mocks["stop"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mypool": "Pool updated, built, restarted"},
-+ "comment": "Pool mypool updated, built, restarted",
-+ "result": True,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ autostart=False,
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-+ mocks["stop"].assert_called_with(
-+ "mypool", connection=None, username=None, password=None
-+ )
-+ mocks["start"].assert_called_with(
-+ "mypool", connection=None, username=None, password=None
-+ )
-+ mocks["build"].assert_called_with(
-+ "mypool", connection=None, username=None, password=None
-+ )
-+ mocks["update"].assert_called_with(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source_devices=[{"path": "/dev/sda"}],
-+ source_dir=None,
-+ source_adapter=None,
-+ source_hosts=None,
-+ source_auth=None,
-+ source_name=None,
-+ source_format=None,
-+ source_initiator=None,
-+ connection=None,
-+ username=None,
-+ password=None,
-+ )
-+
-+ with patch.dict(virt.__opts__, {"test": True}):
- # test case with test=True and no change
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'running', 'autostart': True}}),
-- 'virt.pool_update': MagicMock(return_value=False),
-- }):
-- ret.update({'changes': {}, 'comment': 'Pool mypool already running',
-- 'result': True})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "running", "autostart": True}}
-+ ),
-+ "virt.pool_update": MagicMock(return_value=False),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {},
-+ "comment": "Pool mypool already running",
-+ "result": True,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-
- # test case with test=True and started
- for mock in mocks:
- mocks[mock].reset_mock()
-- mocks['update'] = MagicMock(return_value=False)
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'stopped', 'autostart': True}}),
-- 'virt.pool_update': mocks['update']
-- }):
-- ret.update({'changes': {'mypool': 'Pool started'},
-- 'comment': 'Pool mypool started',
-- 'result': None})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- source={'devices': [{'path': '/dev/sda'}]}), ret)
-+ mocks["update"] = MagicMock(return_value=False)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(
-+ return_value={"mypool": {"state": "stopped", "autostart": True}}
-+ ),
-+ "virt.pool_update": mocks["update"],
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {"mypool": "Pool started"},
-+ "comment": "Pool mypool started",
-+ "result": None,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ ),
-+ ret,
-+ )
-
- # test case with test=True and pool to be defined
- for mock in mocks:
- mocks[mock].reset_mock()
-- with patch.dict(virt.__salt__, { # pylint: disable=no-member
-- 'virt.pool_info': MagicMock(return_value={}),
-- }):
-- ret.update({'changes': {'mypool': 'Pool defined, marked for autostart, started'},
-- 'comment': 'Pool mypool defined, marked for autostart, started',
-- 'result': None})
-- self.assertDictEqual(virt.pool_running('mypool',
-- ptype='logical',
-- target='/dev/base',
-- permissions={'mode': '0770',
-- 'owner': 1000,
-- 'group': 100,
-- 'label': 'seclabel'},
-- source={'devices': [{'path': '/dev/sda'}]},
-- transient=True,
-- autostart=True,
-- connection='myconnection',
-- username='user',
-- password='secret'), ret)
-+ with patch.dict(
-+ virt.__salt__,
-+ { # pylint: disable=no-member
-+ "virt.pool_info": MagicMock(return_value={}),
-+ },
-+ ):
-+ ret.update(
-+ {
-+ "changes": {
-+ "mypool": "Pool defined, marked for autostart, started"
-+ },
-+ "comment": "Pool mypool defined, marked for autostart, started",
-+ "result": None,
-+ }
-+ )
-+ self.assertDictEqual(
-+ virt.pool_running(
-+ "mypool",
-+ ptype="logical",
-+ target="/dev/base",
-+ permissions={
-+ "mode": "0770",
-+ "owner": 1000,
-+ "group": 100,
-+ "label": "seclabel",
-+ },
-+ source={"devices": [{"path": "/dev/sda"}]},
-+ transient=True,
-+ autostart=True,
-+ connection="myconnection",
-+ username="user",
-+ password="secret",
-+ ),
-+ ret,
-+ )
-
def test_pool_deleted(self):
-- '''
-+ """
+ """
Test the pool_deleted state
-- '''
-+ """
- # purge=False test case, stopped pool
-- with patch.dict(virt.__salt__, {
-- 'virt.pool_info': MagicMock(return_value={'test01': {'state': 'stopped', 'type': 'dir'}}),
-- 'virt.pool_undefine': MagicMock(return_value=True)
-- }):
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.pool_info": MagicMock(
-+ return_value={"test01": {"state": "stopped", "type": "dir"}}
-+ ),
-+ "virt.pool_undefine": MagicMock(return_value=True),
-+ },
-+ ):
- expected = {
-- 'name': 'test01',
-- 'changes': {
-- 'stopped': False,
-- 'deleted_volumes': [],
-- 'deleted': False,
-- 'undefined': True,
-- },
-- 'result': True,
-- 'comment': '',
-+ "name": "test01",
-+ "changes": {
-+ "stopped": False,
-+ "deleted_volumes": [],
-+ "deleted": False,
-+ "undefined": True,
-+ },
-+ "result": True,
-+ "comment": "",
- }
-
-- with patch.dict(virt.__opts__, {'test': False}):
-- self.assertDictEqual(expected, virt.pool_deleted('test01'))
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ self.assertDictEqual(expected, virt.pool_deleted("test01"))
-
-- with patch.dict(virt.__opts__, {'test': True}):
-- expected['result'] = None
-- self.assertDictEqual(expected, virt.pool_deleted('test01'))
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ expected["result"] = None
-+ self.assertDictEqual(expected, virt.pool_deleted("test01"))
-
- # purge=False test case
-- with patch.dict(virt.__salt__, {
-- 'virt.pool_info': MagicMock(return_value={'test01': {'state': 'running', 'type': 'dir'}}),
-- 'virt.pool_undefine': MagicMock(return_value=True),
-- 'virt.pool_stop': MagicMock(return_value=True)
-- }):
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.pool_info": MagicMock(
-+ return_value={"test01": {"state": "running", "type": "dir"}}
-+ ),
-+ "virt.pool_undefine": MagicMock(return_value=True),
-+ "virt.pool_stop": MagicMock(return_value=True),
-+ },
-+ ):
- expected = {
-- 'name': 'test01',
-- 'changes': {
-- 'stopped': True,
-- 'deleted_volumes': [],
-- 'deleted': False,
-- 'undefined': True,
-- },
-- 'result': True,
-- 'comment': '',
-+ "name": "test01",
-+ "changes": {
-+ "stopped": True,
-+ "deleted_volumes": [],
-+ "deleted": False,
-+ "undefined": True,
-+ },
-+ "result": True,
-+ "comment": "",
- }
-
-- with patch.dict(virt.__opts__, {'test': False}):
-- self.assertDictEqual(expected, virt.pool_deleted('test01'))
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ self.assertDictEqual(expected, virt.pool_deleted("test01"))
-
-- with patch.dict(virt.__opts__, {'test': True}):
-- expected['result'] = None
-- self.assertDictEqual(expected, virt.pool_deleted('test01'))
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ expected["result"] = None
-+ self.assertDictEqual(expected, virt.pool_deleted("test01"))
-
- # purge=True test case
-
-- with patch.dict(virt.__salt__, {
-- 'virt.pool_info': MagicMock(return_value={'test01': {'state': 'running', 'type': 'dir'}}),
-- 'virt.pool_list_volumes': MagicMock(return_value=['vm01.qcow2', 'vm02.qcow2']),
-- 'virt.pool_refresh': MagicMock(return_value=True),
-- 'virt.volume_delete': MagicMock(return_value=True),
-- 'virt.pool_stop': MagicMock(return_value=True),
-- 'virt.pool_delete': MagicMock(return_value=True),
-- 'virt.pool_undefine': MagicMock(return_value=True)
-- }):
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.pool_info": MagicMock(
-+ return_value={"test01": {"state": "running", "type": "dir"}}
-+ ),
-+ "virt.pool_list_volumes": MagicMock(
-+ return_value=["vm01.qcow2", "vm02.qcow2"]
-+ ),
-+ "virt.pool_refresh": MagicMock(return_value=True),
-+ "virt.volume_delete": MagicMock(return_value=True),
-+ "virt.pool_stop": MagicMock(return_value=True),
-+ "virt.pool_delete": MagicMock(return_value=True),
-+ "virt.pool_undefine": MagicMock(return_value=True),
-+ },
-+ ):
- expected = {
-- 'name': 'test01',
-- 'changes': {
-- 'stopped': True,
-- 'deleted_volumes': ['vm01.qcow2', 'vm02.qcow2'],
-- 'deleted': True,
-- 'undefined': True,
-- },
-- 'result': True,
-- 'comment': '',
-+ "name": "test01",
-+ "changes": {
-+ "stopped": True,
-+ "deleted_volumes": ["vm01.qcow2", "vm02.qcow2"],
-+ "deleted": True,
-+ "undefined": True,
-+ },
-+ "result": True,
-+ "comment": "",
- }
-
-- with patch.dict(virt.__opts__, {'test': False}):
-- self.assertDictEqual(expected, virt.pool_deleted('test01', purge=True))
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ self.assertDictEqual(expected, virt.pool_deleted("test01", purge=True))
-
-- with patch.dict(virt.__opts__, {'test': True}):
-- expected['result'] = None
-- self.assertDictEqual(expected, virt.pool_deleted('test01', purge=True))
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ expected["result"] = None
-+ self.assertDictEqual(expected, virt.pool_deleted("test01", purge=True))
-
- # Case of backend not unsupporting delete operations
-- with patch.dict(virt.__salt__, {
-- 'virt.pool_info': MagicMock(return_value={'test01': {'state': 'running', 'type': 'iscsi'}}),
-- 'virt.pool_stop': MagicMock(return_value=True),
-- 'virt.pool_undefine': MagicMock(return_value=True)
-- }):
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.pool_info": MagicMock(
-+ return_value={"test01": {"state": "running", "type": "iscsi"}}
-+ ),
-+ "virt.pool_stop": MagicMock(return_value=True),
-+ "virt.pool_undefine": MagicMock(return_value=True),
-+ },
-+ ):
- expected = {
-- 'name': 'test01',
-- 'changes': {
-- 'stopped': True,
-- 'deleted_volumes': [],
-- 'deleted': False,
-- 'undefined': True,
-- },
-- 'result': True,
-- 'comment': 'Unsupported actions for pool of type "iscsi": deleting volume, deleting pool',
-+ "name": "test01",
-+ "changes": {
-+ "stopped": True,
-+ "deleted_volumes": [],
-+ "deleted": False,
-+ "undefined": True,
-+ },
-+ "result": True,
-+ "comment": 'Unsupported actions for pool of type "iscsi": deleting volume, deleting pool',
- }
-
-- with patch.dict(virt.__opts__, {'test': False}):
-- self.assertDictEqual(expected, virt.pool_deleted('test01', purge=True))
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ self.assertDictEqual(expected, virt.pool_deleted("test01", purge=True))
-+
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ expected["result"] = None
-+ self.assertDictEqual(expected, virt.pool_deleted("test01", purge=True))
-+
-+ def test_volume_defined(self):
-+ """
-+ test the virt.volume_defined state
-+ """
-+ with patch.dict(virt.__opts__, {"test": False}):
-+ # test case: creating a volume
-+ define_mock = MagicMock()
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(return_value={"mypool": {}}),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {"mypool/myvol": {"old": "", "new": "defined"}},
-+ "result": True,
-+ "comment": "Volume myvol defined in pool mypool",
-+ },
-+ )
-+ define_mock.assert_called_once_with(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ )
-+
-+ # test case: with existing volume
-+ define_mock.reset_mock()
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "qcow2",
-+ "capacity": "1293942784",
-+ "backing_store": {
-+ "path": "/path/to/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": True,
-+ "comment": "volume is existing",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different sizes
-+ define_mock.reset_mock()
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "qcow2",
-+ "capacity": "12345",
-+ "backing_store": {
-+ "path": "/path/to/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": True,
-+ "comment": "The capacity of the volume is different, but no resize performed",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different backing store
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "qcow2",
-+ "capacity": "1234",
-+ "backing_store": {
-+ "path": "/path/to/other/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": False,
-+ "comment": "A volume with the same name but different backing store or format is existing",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different format
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "raw",
-+ "capacity": "1234",
-+ "backing_store": {
-+ "path": "/path/to/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": False,
-+ "comment": "A volume with the same name but different backing store or format is existing",
-+ },
-+ )
-+ define_mock.assert_not_called()
-
-- with patch.dict(virt.__opts__, {'test': True}):
-- expected['result'] = None
-- self.assertDictEqual(expected, virt.pool_deleted('test01', purge=True))
-+ # test case: no pool
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "qcow2",
-+ "capacity": "1234",
-+ "backing_store": {
-+ "path": "/path/to/other/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": False,
-+ "comment": "A volume with the same name but different backing store or format is existing",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different format
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=[]),
-+ "virt.volume_infos": MagicMock(return_value={}),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertRaisesRegex(
-+ SaltInvocationError,
-+ "Storage pool mypool not existing",
-+ virt.volume_defined,
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ )
-+
-+ # Test mode cases
-+ with patch.dict(virt.__opts__, {"test": True}):
-+ # test case: creating a volume
-+ define_mock.reset_mock()
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(return_value={"mypool": {}}),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {"mypool/myvol": {"old": "", "new": "defined"}},
-+ "result": None,
-+ "comment": "Volume myvol would be defined in pool mypool",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different sizes
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "qcow2",
-+ "capacity": "12345",
-+ "backing_store": {
-+ "path": "/path/to/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": True,
-+ "comment": "The capacity of the volume is different, but no resize performed",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different backing store
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "qcow2",
-+ "capacity": "1234",
-+ "backing_store": {
-+ "path": "/path/to/other/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": False,
-+ "comment": "A volume with the same name but different backing store or format is existing",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different format
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "raw",
-+ "capacity": "1234",
-+ "backing_store": {
-+ "path": "/path/to/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": False,
-+ "comment": "A volume with the same name but different backing store or format is existing",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: no pool
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=["mypool"]),
-+ "virt.volume_infos": MagicMock(
-+ return_value={
-+ "mypool": {
-+ "myvol": {
-+ "format": "qcow2",
-+ "capacity": "1234",
-+ "backing_store": {
-+ "path": "/path/to/other/image",
-+ "format": "raw",
-+ },
-+ }
-+ }
-+ }
-+ ),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertDictEqual(
-+ virt.volume_defined(
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ ),
-+ {
-+ "name": "myvol",
-+ "changes": {},
-+ "result": False,
-+ "comment": "A volume with the same name but different backing store or format is existing",
-+ },
-+ )
-+ define_mock.assert_not_called()
-+
-+ # test case: with existing volume, different format
-+ with patch.dict(
-+ virt.__salt__,
-+ {
-+ "virt.list_pools": MagicMock(return_value=[]),
-+ "virt.volume_infos": MagicMock(return_value={}),
-+ "virt.volume_define": define_mock,
-+ },
-+ ):
-+ self.assertRaisesRegex(
-+ SaltInvocationError,
-+ "Storage pool mypool not existing",
-+ virt.volume_defined,
-+ "mypool",
-+ "myvol",
-+ "1234",
-+ allocation="12345",
-+ format="qcow2",
-+ type="file",
-+ permissions={"mode": "0755", "owner": "123", "group": "456"},
-+ backing_store={"path": "/path/to/image", "format": "raw"},
-+ nocow=True,
-+ connection="test:///",
-+ username="jdoe",
-+ password="supersecret",
-+ )
--
-2.28.0
+2.29.2
diff --git a/opensuse-3000.3-spacewalk-runner-parse-command-250.patch b/opensuse-3000.3-spacewalk-runner-parse-command-250.patch
index 4d12ea6..8b6fe4a 100644
--- a/opensuse-3000.3-spacewalk-runner-parse-command-250.patch
+++ b/opensuse-3000.3-spacewalk-runner-parse-command-250.patch
@@ -1,4 +1,4 @@
-From a7e1630d638a7e605a2372e923c0942c655480cd Mon Sep 17 00:00:00 2001
+From 140c237f7ffefe61258e9ab3c26d04bd1e8df78a Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Fri, 3 Jul 2020 14:08:03 +0200
Subject: [PATCH] openSUSE-3000.3 spacewalk runner parse command (#250)
@@ -20,9 +20,8 @@ Co-authored-by: Alexander Graul
Co-authored-by: Wayne Werner
---
changelog/57442.fixed | 1 +
- salt/runners/spacewalk.py | 6 +++-
tests/unit/runners/test_spacewalk.py | 50 ++++++++++++++++++++++++++++
- 3 files changed, 56 insertions(+), 1 deletion(-)
+ 2 files changed, 51 insertions(+)
create mode 100644 changelog/57442.fixed
create mode 100644 tests/unit/runners/test_spacewalk.py
@@ -33,23 +32,6 @@ index 0000000000..81f394880f
+++ b/changelog/57442.fixed
@@ -0,0 +1 @@
+Accept nested namespaces in spacewalk.api runner function.
-diff --git a/salt/runners/spacewalk.py b/salt/runners/spacewalk.py
-index 07ca9bd711..df4e568a28 100644
---- a/salt/runners/spacewalk.py
-+++ b/salt/runners/spacewalk.py
-@@ -172,7 +172,11 @@ def api(server, command, *args, **kwargs):
- log.error(err_msg)
- return {call: err_msg}
-
-- namespace, method = command.split('.')
-+ namespace, _, method = command.rpartition(".")
-+ if not namespace:
-+ return {
-+ call: "Error: command must use the following format: 'namespace.method'"
-+ }
- endpoint = getattr(getattr(client, namespace), method)
-
- try:
diff --git a/tests/unit/runners/test_spacewalk.py b/tests/unit/runners/test_spacewalk.py
new file mode 100644
index 0000000000..5b64069cc9
@@ -107,6 +89,6 @@ index 0000000000..5b64069cc9
+ ]
+ )
--
-2.27.0
+2.29.2
diff --git a/option-to-en-disable-force-refresh-in-zypper-215.patch b/option-to-en-disable-force-refresh-in-zypper-215.patch
index db2e3f2..be079b4 100644
--- a/option-to-en-disable-force-refresh-in-zypper-215.patch
+++ b/option-to-en-disable-force-refresh-in-zypper-215.patch
@@ -1,4 +1,4 @@
-From bb870d08a0268cb2be5309ee1a1b8facd2c885df Mon Sep 17 00:00:00 2001
+From c8c567a1540ae45012831718e945a1d54b328219 Mon Sep 17 00:00:00 2001
From: darix
Date: Tue, 12 May 2020 13:58:15 +0200
Subject: [PATCH] Option to en-/disable force refresh in zypper (#215)
@@ -22,103 +22,33 @@ The cmdline option will override the pillar as well.
Co-authored-by: Alexander Graul
---
- salt/modules/zypperpkg.py | 32 ++++++++++++++++++++--------
- tests/unit/modules/test_zypperpkg.py | 24 +++++++++++++++++++--
- 2 files changed, 45 insertions(+), 11 deletions(-)
+ salt/modules/zypperpkg.py | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index e3f802a911..ed8420f398 100644
+index c3342ab6d1..b75572f4ff 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -1279,25 +1279,39 @@ def mod_repo(repo, **kwargs):
+@@ -1348,7 +1348,7 @@ def mod_repo(repo, **kwargs):
return repo
--def refresh_db(root=None):
-- '''
-- Force a repository refresh by calling ``zypper refresh --force``, return a dict::
+-def refresh_db(force=None, root=None):
+def refresh_db(root=None, force=None):
-+ """
-+ Trigger a repository refresh by calling ``zypper refresh``. Refresh will run
-+ with ``--force`` if the "force=True" flag is passed on the CLI or
-+ ``refreshdb_force`` is set to ``true`` in the pillar. The CLI option
-+ overrides the pillar setting.
+ """
+ Trigger a repository refresh by calling ``zypper refresh``. Refresh will run
+ with ``--force`` if the "force=True" flag is passed on the CLI or
+@@ -1359,9 +1359,6 @@ def refresh_db(force=None, root=None):
-- {'': Bool}
-+ It will return a dict::
+ {'': Bool}
- root
- operate on a different root directory.
-+ {'': Bool}
-
+-
CLI Example:
.. code-block:: bash
-
-- salt '*' pkg.refresh_db
-- '''
-+ salt '*' pkg.refresh_db [force=true|false]
-+
-+ Pillar Example:
-+
-+ .. code-block:: yaml
-+
-+ zypper:
-+ refreshdb_force: false
-+ """
- # Remove rtag file to keep multiple refreshes from happening in pkg states
- salt.utils.pkg.clear_rtag(__opts__)
- ret = {}
-- out = __zypper__(root=root).refreshable.call('refresh', '--force')
-+ refresh_opts = ['refresh']
-+ if force is None:
-+ force = __pillar__.get('zypper', {}).get('refreshdb_force', True)
-+ if force:
-+ refresh_opts.append('--force')
-+ out = __zypper__(root=root).refreshable.call(*refresh_opts)
-
- for line in out.splitlines():
- if not line:
-diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 2a8e753b9d..9a5c59a857 100644
---- a/tests/unit/modules/test_zypperpkg.py
-+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -278,12 +278,32 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
- 'stderr': '', 'stdout': '\n'.join(ref_out), 'retcode': 0
- }
-
-- with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=run_out)}):
-- with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
-+ zypper_mock = MagicMock(return_value=run_out)
-+ call_kwargs = {
-+ "output_loglevel": "trace",
-+ "python_shell": False,
-+ "env": {}
-+ }
-+ with patch.dict(zypper.__salt__, {"cmd.run_all": zypper_mock}):
-+ with patch.object(salt.utils.pkg, "clear_rtag", Mock()):
- result = zypper.refresh_db()
- self.assertEqual(result.get("openSUSE-Leap-42.1-LATEST"), False)
- self.assertEqual(result.get("openSUSE-Leap-42.1-Update"), False)
- self.assertEqual(result.get("openSUSE-Leap-42.1-Update-Non-Oss"), True)
-+ zypper_mock.assert_called_with(
-+ ["zypper", "--non-interactive", "refresh", "--force"],
-+ **call_kwargs
-+ )
-+ zypper.refresh_db(force=False)
-+ zypper_mock.assert_called_with(
-+ ["zypper", "--non-interactive", "refresh"],
-+ **call_kwargs
-+ )
-+ zypper.refresh_db(force=True)
-+ zypper_mock.assert_called_with(
-+ ["zypper", "--non-interactive", "refresh", "--force"],
-+ **call_kwargs
-+ )
-
- def test_info_installed(self):
- '''
--
-2.26.2
+2.29.2
diff --git a/path-replace-functools.wraps-with-six.wraps-bsc-1177.patch b/path-replace-functools.wraps-with-six.wraps-bsc-1177.patch
index 43d0476..5c6e4bc 100644
--- a/path-replace-functools.wraps-with-six.wraps-bsc-1177.patch
+++ b/path-replace-functools.wraps-with-six.wraps-bsc-1177.patch
@@ -1,4 +1,4 @@
-From 9707eab7452a94e64f77ece707c31c37e43e47f2 Mon Sep 17 00:00:00 2001
+From 8959cee470dc2080fab6b9559ca71b9c24c57f0d Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 20 Oct 2020 13:13:24 +0200
Subject: [PATCH] path: replace functools.wraps with six.wraps
@@ -14,11 +14,11 @@ This patch uses six.wraps, that add this extra attribute.
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/salt/utils/decorators/path.py b/salt/utils/decorators/path.py
-index 8ee7fb1d11..a763c92d22 100644
+index 3682d0fb23..6763560122 100644
--- a/salt/utils/decorators/path.py
+++ b/salt/utils/decorators/path.py
@@ -4,11 +4,10 @@ Decorators for salt.utils.path
- '''
+ """
from __future__ import absolute_import, print_function, unicode_literals
-import functools
@@ -30,18 +30,18 @@ index 8ee7fb1d11..a763c92d22 100644
def which(exe):
-@@ -16,7 +15,7 @@ def which(exe):
- Decorator wrapper for salt.utils.path.which
- '''
+@@ -17,7 +16,7 @@ def which(exe):
+ """
+
def wrapper(function):
- @functools.wraps(function)
+ @salt.ext.six.wraps(function)
def wrapped(*args, **kwargs):
if salt.utils.path.which(exe) is None:
raise CommandNotFoundError(
-@@ -34,7 +33,7 @@ def which_bin(exes):
- Decorator wrapper for salt.utils.path.which_bin
- '''
+@@ -36,7 +35,7 @@ def which_bin(exes):
+ """
+
def wrapper(function):
- @functools.wraps(function)
+ @salt.ext.six.wraps(function)
@@ -49,6 +49,6 @@ index 8ee7fb1d11..a763c92d22 100644
if salt.utils.path.which_bin(exes) is None:
raise CommandNotFoundError(
--
-2.28.0
+2.29.2
diff --git a/pkgrepo-support-python-2.7-function-call-295.patch b/pkgrepo-support-python-2.7-function-call-295.patch
index e63b2a1..2a3a174 100644
--- a/pkgrepo-support-python-2.7-function-call-295.patch
+++ b/pkgrepo-support-python-2.7-function-call-295.patch
@@ -1,4 +1,4 @@
-From a487f19e6a0ed6b4b7e987e5b6d90852050eb2d9 Mon Sep 17 00:00:00 2001
+From 4a733724bd1af4f91ac575bfd0bc6fa0ac8e7831 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Mon, 16 Nov 2020 10:37:29 +0100
Subject: [PATCH] pkgrepo: support Python 2.7 function call (#295)
@@ -8,10 +8,10 @@ Subject: [PATCH] pkgrepo: support Python 2.7 function call (#295)
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
-index 6c42d17d32..504f1425c7 100644
+index d734bb9de9..852fc015b6 100644
--- a/salt/states/pkgrepo.py
+++ b/salt/states/pkgrepo.py
-@@ -818,7 +818,7 @@ def migrated(name, keys=True, drop=False, method=None, **kwargs):
+@@ -853,7 +853,7 @@ def migrated(name, keys=True, drop=False, method=None, **kwargs):
if method == "copy":
_copy_repository_to(name)
else:
@@ -21,6 +21,6 @@ index 6c42d17d32..504f1425c7 100644
__salt__["pkg.del_repo"](repo, root=name)
--
-2.28.0
+2.29.2
diff --git a/prevent-ansiblegate-unit-tests-to-fail-on-ubuntu.patch b/prevent-ansiblegate-unit-tests-to-fail-on-ubuntu.patch
index d620afc..e59c434 100644
--- a/prevent-ansiblegate-unit-tests-to-fail-on-ubuntu.patch
+++ b/prevent-ansiblegate-unit-tests-to-fail-on-ubuntu.patch
@@ -1,32 +1,55 @@
-From 73afbe5fe00c47427a032f8d94c113e1375e32ea Mon Sep 17 00:00:00 2001
+From bdeb632c49a0002d2b492816bcca112e371cc147 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Mon, 8 Jul 2019 14:46:10 +0100
Subject: [PATCH] Prevent ansiblegate unit tests to fail on Ubuntu
---
- tests/unit/modules/test_ansiblegate.py | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
+ tests/unit/modules/test_ansiblegate.py | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
diff --git a/tests/unit/modules/test_ansiblegate.py b/tests/unit/modules/test_ansiblegate.py
-index b7b43efda4..05dff4a4fa 100644
+index 61aad44b5c..6724d37c40 100644
--- a/tests/unit/modules/test_ansiblegate.py
+++ b/tests/unit/modules/test_ansiblegate.py
-@@ -169,9 +169,11 @@ description:
- with patch('salt.utils.timed_subprocess.TimedProc', proc):
- ret = _ansible_module_caller.call("one.two.three", "arg_1", kwarg1="foobar")
- if six.PY3:
-- proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}'], stdout=-1, timeout=1200)
- proc.assert_any_call(['python3', 'foofile'], stdin=ANSIBLE_MODULE_ARGS, stdout=-1, timeout=1200)
- else:
-- proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"_raw_params": "arg_1", "kwarg1": "foobar"}}'], stdout=-1, timeout=1200)
- proc.assert_any_call(['python', 'foofile'], stdin=ANSIBLE_MODULE_ARGS, stdout=-1, timeout=1200)
+@@ -184,18 +184,28 @@ description:
+ ret = _ansible_module_caller.call(
+ "one.two.three", "arg_1", kwarg1="foobar"
+ )
+- proc.assert_any_call(
+- [
+- "echo",
+- '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}',
+- ],
+- stdout=-1,
+- timeout=1200,
+- )
+ proc.assert_any_call(
+ ["python3", "foofile"],
+ stdin=ANSIBLE_MODULE_ARGS,
+ stdout=-1,
+ timeout=1200,
+ )
+ try:
-+ proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}'], stdout=-1, timeout=1200)
++ proc.assert_any_call(
++ [
++ "echo",
++ '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}',
++ ],
++ stdout=-1,
++ timeout=1200,
++ )
+ except AssertionError:
-+ proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"_raw_params": "arg_1", "kwarg1": "foobar"}}'], stdout=-1, timeout=1200)
++ proc.assert_any_call(
++ [
++ "echo",
++ '{"ANSIBLE_MODULE_ARGS": {"_raw_params": "arg_1", "kwarg1": "foobar"}}',
++ ],
++ stdout=-1,
++ timeout=1200,
++ )
assert ret == {"completed": True, "timeout": 1200}
--
-2.16.4
+2.29.2
diff --git a/prevent-import-errors-when-running-test_btrfs-unit-t.patch b/prevent-import-errors-when-running-test_btrfs-unit-t.patch
index 399bdf2..a0e376e 100644
--- a/prevent-import-errors-when-running-test_btrfs-unit-t.patch
+++ b/prevent-import-errors-when-running-test_btrfs-unit-t.patch
@@ -1,4 +1,4 @@
-From 116141f87df4179962dcd643d914260121447712 Mon Sep 17 00:00:00 2001
+From a3869f8e70bc8b03c49602f2ac58b0551f462cf5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 16 Sep 2020 12:40:47 +0100
@@ -6,22 +6,26 @@ Subject: [PATCH] Prevent import errors when running test_btrfs unit
tests
---
- tests/unit/states/test_btrfs.py | 1 +
- 1 file changed, 1 insertion(+)
+ tests/unit/states/test_btrfs.py | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py
-index c722630aef..c54579e681 100644
+index dceb971aa1..7bf91598dd 100644
--- a/tests/unit/states/test_btrfs.py
+++ b/tests/unit/states/test_btrfs.py
-@@ -37,6 +37,7 @@ from tests.support.mock import (
-
- from salt.exceptions import CommandExecutionError
+@@ -26,9 +26,10 @@
+ """
+ import pytest
import salt.states.btrfs as btrfs
+import salt.utils.platform
+ from salt.exceptions import CommandExecutionError
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
- import pytest
--
-2.28.0
+2.29.2
diff --git a/prevent-logging-deadlock-on-salt-api-subprocesses-bs.patch b/prevent-logging-deadlock-on-salt-api-subprocesses-bs.patch
index 3fd06d9..663162d 100644
--- a/prevent-logging-deadlock-on-salt-api-subprocesses-bs.patch
+++ b/prevent-logging-deadlock-on-salt-api-subprocesses-bs.patch
@@ -1,4 +1,4 @@
-From 217c5ba75b5de813ddf769e7eeebe4027c1c9a70 Mon Sep 17 00:00:00 2001
+From 4e6cc0c401bd6c9e47b324fc2df99e3bd679da33 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 22 Jan 2020 08:19:55 +0000
@@ -6,63 +6,126 @@ Subject: [PATCH] Prevent logging deadlock on salt-api subprocesses
(bsc#1159284)
---
- salt/_logging/impl.py | 60 ++++++++++++++++++++---------------
- salt/client/ssh/__init__.py | 16 +++++++---
- salt/client/ssh/client.py | 9 +++++-
- salt/client/ssh/wrapper/cp.py | 2 +-
- salt/loader.py | 2 +-
- salt/utils/lazy.py | 5 ++-
- 6 files changed, 61 insertions(+), 33 deletions(-)
+ salt/_logging/impl.py | 127 +++++++++++++++-------------------
+ salt/client/ssh/__init__.py | 12 +++-
+ salt/client/ssh/client.py | 21 +++---
+ salt/client/ssh/wrapper/cp.py | 10 +--
+ salt/loader.py | 2 +-
+ salt/utils/lazy.py | 10 +--
+ 6 files changed, 85 insertions(+), 97 deletions(-)
diff --git a/salt/_logging/impl.py b/salt/_logging/impl.py
-index 347259bcf506705e2ea1a24da030a7132eb8a527..fdfabf6d3b16619350107bd01f2c3a606fb93262 100644
+index 5a6292b445..2f15bf0025 100644
--- a/salt/_logging/impl.py
+++ b/salt/_logging/impl.py
-@@ -19,6 +19,7 @@ PROFILE = logging.PROFILE = 15
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ salt._logging.impl
+ ~~~~~~~~~~~~~~~~~~
+@@ -6,15 +5,12 @@
+ Salt's logging implementation classes/functionality
+ """
+
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+ import re
+ import sys
+ import types
+
+-# Import 3rd-party libs
+ import salt.ext.six as six
+
+ # Let's define these custom logging levels before importing the salt._logging.mixins
+@@ -23,8 +19,8 @@ PROFILE = logging.PROFILE = 15
TRACE = logging.TRACE = 5
GARBAGE = logging.GARBAGE = 1
QUIET = logging.QUIET = 1000
+DEBUG = logging.DEBUG = 10
- # Import Salt libs
- from salt._logging.handlers import StreamHandler
-@@ -187,11 +188,11 @@ class SaltLoggingClass(six.with_metaclass(LoggingMixinMeta, LOGGING_LOGGER_CLASS
- '''
- instance = super(SaltLoggingClass, cls).__new__(cls)
+-# Import Salt libs
+ from salt._logging.handlers import StreamHandler # isort:skip
+
+ # from salt._logging.handlers import SysLogHandler # isort:skip
+@@ -52,7 +48,7 @@ LOG_LEVELS = {
+ "warning": logging.WARNING,
+ }
+
+-LOG_VALUES_TO_LEVELS = dict((v, k) for (k, v) in LOG_LEVELS.items())
++LOG_VALUES_TO_LEVELS = {v: k for (k, v) in LOG_LEVELS.items()}
+
+ LOG_COLORS = {
+ "levels": {
+@@ -96,9 +92,7 @@ LOG_COLORS = {
+ }
+
+ # Make a list of log level names sorted by log level
+-SORTED_LEVEL_NAMES = [
+- l[0] for l in sorted(six.iteritems(LOG_LEVELS), key=lambda x: x[1])
+-]
++SORTED_LEVEL_NAMES = [l[0] for l in sorted(LOG_LEVELS.items(), key=lambda x: x[1])]
+
+ MODNAME_PATTERN = re.compile(r"(?P%%\(name\)(?:\-(?P[\d]+))?s)")
+
+@@ -168,8 +162,7 @@ def set_log_record_factory(factory):
+ Set the logging log record factory
+ """
+ get_log_record_factory.__factory__ = factory
+- if not six.PY2:
+- logging.setLogRecordFactory(factory)
++ logging.setLogRecordFactory(factory)
+
+
+ set_log_record_factory(SaltLogRecord)
+@@ -180,7 +173,7 @@ LOGGING_LOGGER_CLASS = logging.getLoggerClass()
+
+
+ class SaltLoggingClass(
+- six.with_metaclass(LoggingMixinMeta, LOGGING_LOGGER_CLASS, NewStyleClassMixin)
++ LOGGING_LOGGER_CLASS, NewStyleClassMixin, metaclass=LoggingMixinMeta
+ ):
+ def __new__(cls, *args):
+ """
+@@ -194,13 +187,11 @@ class SaltLoggingClass(
+ logging.getLogger(__name__)
+
+ """
+- instance = super(SaltLoggingClass, cls).__new__(cls)
++ instance = super().__new__(cls)
- try:
-- max_logger_length = len(max(
-- list(logging.Logger.manager.loggerDict), key=len
-- ))
+- max_logger_length = len(
+- max(list(logging.Logger.manager.loggerDict), key=len)
+- )
- for handler in logging.root.handlers:
-+ max_logger_length = len(max(
-+ list(logging.Logger.manager.loggerDict), key=len
-+ ))
++ max_logger_length = len(max(list(logging.Logger.manager.loggerDict), key=len))
+ for handler in logging.root.handlers:
+ try:
- if handler in (LOGGING_NULL_HANDLER,
- LOGGING_STORE_HANDLER,
- LOGGING_TEMP_HANDLER):
-@@ -210,18 +211,15 @@ class SaltLoggingClass(six.with_metaclass(LoggingMixinMeta, LOGGING_LOGGER_CLASS
+ if handler in (
+ LOGGING_NULL_HANDLER,
+ LOGGING_STORE_HANDLER,
+@@ -221,18 +212,15 @@ class SaltLoggingClass(
match = MODNAME_PATTERN.search(fmt)
if not match:
# Not matched. Release handler and return.
- handler.release()
return instance
- if 'digits' not in match.groupdict():
+ if "digits" not in match.groupdict():
# No digits group. Release handler and return.
- handler.release()
return instance
- digits = match.group('digits')
+ digits = match.group("digits")
if not digits or not (digits and digits.isdigit()):
# No valid digits. Release handler and return.
- handler.release()
return instance
if int(digits) < max_logger_length:
-@@ -233,9 +231,14 @@ class SaltLoggingClass(six.with_metaclass(LoggingMixinMeta, LOGGING_LOGGER_CLASS
+@@ -243,9 +231,14 @@ class SaltLoggingClass(
)
handler.setFormatter(formatter)
handler.release()
@@ -79,10 +142,19 @@ index 347259bcf506705e2ea1a24da030a7132eb8a527..fdfabf6d3b16619350107bd01f2c3a60
+ pass
return instance
- def _log(self, level, msg, args, exc_info=None,
-@@ -278,20 +281,26 @@ class SaltLoggingClass(six.with_metaclass(LoggingMixinMeta, LOGGING_LOGGER_CLASS
+ def _log(
+@@ -279,7 +272,7 @@ class SaltLoggingClass(
+ "Only one of 'exc_info' and 'exc_info_on_loglevel' is " "permitted"
+ )
+ if exc_info_on_loglevel is not None:
+- if isinstance(exc_info_on_loglevel, six.string_types):
++ if isinstance(exc_info_on_loglevel, str):
+ exc_info_on_loglevel = LOG_LEVELS.get(
+ exc_info_on_loglevel, logging.ERROR
+ )
+@@ -295,31 +288,37 @@ class SaltLoggingClass(
else:
- extra['exc_info_on_loglevel'] = exc_info_on_loglevel
+ extra["exc_info_on_loglevel"] = exc_info_on_loglevel
- if sys.version_info < (3,):
- LOGGING_LOGGER_CLASS._log(
@@ -90,13 +162,24 @@ index 347259bcf506705e2ea1a24da030a7132eb8a527..fdfabf6d3b16619350107bd01f2c3a60
- )
- elif sys.version_info < (3, 8):
- LOGGING_LOGGER_CLASS._log(
-- self, level, msg, args, exc_info=exc_info, extra=extra,
-- stack_info=stack_info
+- self,
+- level,
+- msg,
+- args,
+- exc_info=exc_info,
+- extra=extra,
+- stack_info=stack_info,
- )
- else:
- LOGGING_LOGGER_CLASS._log(
-- self, level, msg, args, exc_info=exc_info, extra=extra,
-- stack_info=stack_info, stacklevel=stacklevel
+- self,
+- level,
+- msg,
+- args,
+- exc_info=exc_info,
+- extra=extra,
+- stack_info=stack_info,
+- stacklevel=stacklevel,
- )
+ try:
+ logging._acquireLock()
@@ -106,89 +189,156 @@ index 347259bcf506705e2ea1a24da030a7132eb8a527..fdfabf6d3b16619350107bd01f2c3a60
+ )
+ elif sys.version_info < (3, 8):
+ LOGGING_LOGGER_CLASS._log(
-+ self, level, msg, args, exc_info=exc_info, extra=extra,
-+ stack_info=stack_info
++ self,
++ level,
++ msg,
++ args,
++ exc_info=exc_info,
++ extra=extra,
++ stack_info=stack_info,
+ )
+ else:
+ LOGGING_LOGGER_CLASS._log(
-+ self, level, msg, args, exc_info=exc_info, extra=extra,
-+ stack_info=stack_info, stacklevel=stacklevel
++ self,
++ level,
++ msg,
++ args,
++ exc_info=exc_info,
++ extra=extra,
++ stack_info=stack_info,
++ stacklevel=stacklevel,
+ )
+ except:
+ pass
+ finally:
+ logging._releaseLock()
- def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
- func=None, extra=None, sinfo=None):
-@@ -393,6 +402,7 @@ if logging.getLoggerClass() is not SaltLoggingClass:
- logging.addLevelName(PROFILE, 'PROFILE')
- logging.addLevelName(TRACE, 'TRACE')
- logging.addLevelName(GARBAGE, 'GARBAGE')
-+ logging.addLevelName(DEBUG, 'DEBUG')
+ def makeRecord(
+ self,
+@@ -357,7 +356,7 @@ class SaltLoggingClass(
+ except NameError:
+ salt_system_encoding = "utf-8"
+
+- if isinstance(msg, six.string_types) and not isinstance(msg, six.text_type):
++ if isinstance(msg, str) and not isinstance(msg, str):
+ try:
+ _msg = msg.decode(salt_system_encoding, "replace")
+ except UnicodeDecodeError:
+@@ -367,9 +366,7 @@ class SaltLoggingClass(
+
+ _args = []
+ for item in args:
+- if isinstance(item, six.string_types) and not isinstance(
+- item, six.text_type
+- ):
++ if isinstance(item, str) and not isinstance(item, str):
+ try:
+ _args.append(item.decode(salt_system_encoding, "replace"))
+ except UnicodeDecodeError:
+@@ -378,24 +375,9 @@ class SaltLoggingClass(
+ _args.append(item)
+ _args = tuple(_args)
+
+- if six.PY2:
+- # Recreate what's done for Py >= 3.5
+- _log_record_factory = get_log_record_factory()
+- logrecord = _log_record_factory(
+- name, level, fn, lno, _msg, _args, exc_info, func
+- )
+-
+- if extra is not None:
+- for key in extra:
+- if (key in ["message", "asctime"]) or (key in logrecord.__dict__):
+- raise KeyError(
+- "Attempt to overwrite '{}' in LogRecord".format(key)
+- )
+- logrecord.__dict__[key] = extra[key]
+- else:
+- logrecord = LOGGING_LOGGER_CLASS.makeRecord(
+- self, name, level, fn, lno, _msg, _args, exc_info, func, sinfo
+- )
++ logrecord = LOGGING_LOGGER_CLASS.makeRecord(
++ self, name, level, fn, lno, _msg, _args, exc_info, func, sinfo
++ )
+
+ if exc_info_on_loglevel is not None:
+ # Let's add some custom attributes to the LogRecord class in order
+@@ -419,6 +401,7 @@ if logging.getLoggerClass() is not SaltLoggingClass:
+ logging.addLevelName(PROFILE, "PROFILE")
+ logging.addLevelName(TRACE, "TRACE")
+ logging.addLevelName(GARBAGE, "GARBAGE")
++ logging.addLevelName(DEBUG, "DEBUG")
# ----- REMOVE ON REFACTORING COMPLETE -------------------------------------------------------------------------->
if not logging.root.handlers:
diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
-index d9e91b0f50bfaa76d519fcaa4bdc868bce80f554..e8aad093e0f6df32faa16a838f1db2c6746e1b8e 100644
+index ef9eb0c07e..6570fba5b1 100644
--- a/salt/client/ssh/__init__.py
+++ b/salt/client/ssh/__init__.py
-@@ -520,7 +520,9 @@ class SSH(object):
- mine=mine,
- **target)
- ret = {'id': single.id}
+@@ -532,7 +532,9 @@ class SSH:
+ **target
+ )
+ ret = {"id": single.id}
+ logging._acquireLock()
stdout, stderr, retcode = single.run()
+ logging._releaseLock()
# This job is done, yield
try:
data = salt.utils.json.find_json(stdout)
-@@ -586,10 +588,16 @@ class SSH(object):
- self.targets[host],
- mine,
- )
-- routine = Process(
-- target=self.handle_routine,
-- args=args)
+@@ -600,8 +602,14 @@ class SSH:
+ self.targets[host],
+ mine,
+ )
+- routine = Process(target=self.handle_routine, args=args)
- routine.start()
+ try:
+ logging._acquireLock()
-+ routine = Process(
-+ target=self.handle_routine,
-+ args=args)
++ routine = Process(target=self.handle_routine, args=args)
+ routine.start()
+ except:
+ pass
+ finally:
+ logging._releaseLock()
- running[host] = {'thread': routine}
+ running[host] = {"thread": routine}
continue
ret = {}
diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py
-index e8e634ca12d85f1e1a9e047f43eac8c041cc5666..d4a89cf4fbbde5282597dc6b82c66dde4288edf1 100644
+index 55436366cf..d2dbdeb00e 100644
--- a/salt/client/ssh/client.py
+++ b/salt/client/ssh/client.py
-@@ -6,6 +6,8 @@ import os
+@@ -1,14 +1,10 @@
+-# -*- coding: utf-8 -*-
+-
+-# Import Python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+-
import copy
import logging
++import multiprocessing
+ import os
import random
+import time
-+import multiprocessing
- # Import Salt libs
+-# Import Salt libs
import salt.config
-@@ -15,6 +17,7 @@ from salt.exceptions import SaltClientError # Temporary
+ import salt.syspaths as syspaths
+ import salt.utils.args
+@@ -16,8 +12,10 @@ from salt.exceptions import SaltClientError # Temporary
log = logging.getLogger(__name__)
+_LOCK = multiprocessing.Lock()
- class SSHClient(object):
- '''
-@@ -61,7 +64,11 @@ class SSHClient(object):
- opts['selected_target_option'] = tgt_type
- opts['tgt'] = tgt
- opts['arg'] = arg
+-class SSHClient(object):
++
++class SSHClient:
+ """
+ Create a client object for executing routines via the salt-ssh backend
+
+@@ -60,7 +58,11 @@ class SSHClient(object):
+ opts["selected_target_option"] = tgt_type
+ opts["tgt"] = tgt
+ opts["arg"] = arg
- return salt.client.ssh.SSH(opts)
+ _LOCK.acquire()
+ ret = salt.client.ssh.SSH(opts)
@@ -197,60 +347,122 @@ index e8e634ca12d85f1e1a9e047f43eac8c041cc5666..d4a89cf4fbbde5282597dc6b82c66dde
+ return ret
def cmd_iter(
- self,
+ self,
+@@ -80,8 +82,7 @@ class SSHClient(object):
+ .. versionadded:: 2015.5.0
+ """
+ ssh = self._prep_ssh(tgt, fun, arg, timeout, tgt_type, kwarg, **kwargs)
+- for ret in ssh.run_iter(jid=kwargs.get("jid", None)):
+- yield ret
++ yield from ssh.run_iter(jid=kwargs.get("jid", None))
+
+ def cmd(
+ self, tgt, fun, arg=(), timeout=None, tgt_type="glob", kwarg=None, **kwargs
diff --git a/salt/client/ssh/wrapper/cp.py b/salt/client/ssh/wrapper/cp.py
-index 894e62f94c87ae5b68c1f82fc3e80ec8f25ac118..9bf0c150a071b4bfa780fe0293e3d8e93ab8e6ef 100644
+index e369d8475f..eb37ae971b 100644
--- a/salt/client/ssh/wrapper/cp.py
+++ b/salt/client/ssh/wrapper/cp.py
-@@ -4,7 +4,7 @@ Wrap the cp module allowing for managed ssh file transfers
- '''
- # Import Python libs
- from __future__ import absolute_import, print_function
+@@ -1,15 +1,11 @@
+-# -*- coding: utf-8 -*-
+ """
+ Wrap the cp module allowing for managed ssh file transfers
+ """
+-# Import Python libs
+-from __future__ import absolute_import, print_function
+
-import logging
-+import salt.log.setup as logging
import os
- # Import salt libs
+-# Import salt libs
+ import salt.client.ssh
++import salt.log.setup as logging
+ import salt.utils.files
+ import salt.utils.stringutils
+ import salt.utils.templates
+@@ -107,7 +103,7 @@ def _render_filenames(path, dest, saltenv, template):
+ if template not in salt.utils.templates.TEMPLATE_REGISTRY:
+ raise CommandExecutionError(
+ "Attempted to render file paths with unavailable engine "
+- "{0}".format(template)
++ "{}".format(template)
+ )
+
+ kwargs = {}
+@@ -133,7 +129,7 @@ def _render_filenames(path, dest, saltenv, template):
+ if not data["result"]:
+ # Failed to render the template
+ raise CommandExecutionError(
+- "Failed to render file path with error: {0}".format(data["data"])
++ "Failed to render file path with error: {}".format(data["data"])
+ )
+ else:
+ return data["data"]
diff --git a/salt/loader.py b/salt/loader.py
-index 54dadb0b513dbaa4914b0d4b1d343dde709699ad..b824a70a0cc40128f3271f70f676f1551194236c 100644
+index 9a9c11b401..cfcd18f6f3 100644
--- a/salt/loader.py
+++ b/salt/loader.py
-@@ -11,7 +11,7 @@ import os
+@@ -8,7 +8,6 @@ import functools
+ import importlib.machinery # pylint: disable=no-name-in-module,import-error
+ import importlib.util # pylint: disable=no-name-in-module,import-error
+ import inspect
+-import logging
+ import os
import re
import sys
- import time
--import logging
+@@ -23,6 +22,7 @@ from zipimport import zipimporter
+ import salt.config
+ import salt.defaults.events
+ import salt.defaults.exitcodes
+import salt.log.setup as logging
- import inspect
- import tempfile
- import functools
+ import salt.syspaths
+ import salt.utils.args
+ import salt.utils.context
diff --git a/salt/utils/lazy.py b/salt/utils/lazy.py
-index 3cd6489d2d8c50ec4e6eb70c50407f1084db377b..bb4b38e1a3cfa05945cd438fc9d30e7c47c3391b 100644
+index 8fc538164a..c828dd2c32 100644
--- a/salt/utils/lazy.py
+++ b/salt/utils/lazy.py
-@@ -5,7 +5,8 @@ Lazily-evaluated data structures, primarily used by Salt's loader
+@@ -1,15 +1,13 @@
+-# -*- coding: utf-8 -*-
+ """
+ Lazily-evaluated data structures, primarily used by Salt's loader
+ """
+
+-# Import Python Libs
+-from __future__ import absolute_import, unicode_literals
- # Import Python Libs
- from __future__ import absolute_import, unicode_literals
-import logging
-+import salt.log.setup as logging
+import time
- import salt.exceptions
+ from collections.abc import MutableMapping
- try:
-@@ -102,9 +103,11 @@ class LazyDict(MutableMapping):
+ import salt.exceptions
++import salt.log.setup as logging
+
+ log = logging.getLogger(__name__)
+
+@@ -81,7 +79,7 @@ class LazyDict(MutableMapping):
+
+ Override this to return a more meaningfull error message if possible
+ """
+- return "'{0}' is not available.".format(function_name)
++ return "'{}' is not available.".format(function_name)
+
+ def __setitem__(self, key, val):
+ self._dict[key] = val
+@@ -100,11 +98,13 @@ class LazyDict(MutableMapping):
# load the item
if self._load(key):
- log.debug('LazyLoaded %s', key)
+ log.debug("LazyLoaded %s", key)
+ time.sleep(0.0001)
return self._dict[key]
else:
- log.debug('Could not LazyLoad %s: %s', key, self.missing_fun_string(key))
+ log.debug(
+ "Could not LazyLoad %s: %s", key, self.missing_fun_string(key)
+ )
+ time.sleep(0.0001)
raise KeyError(key)
else:
return self._dict[key]
--
-2.23.0
+2.29.2
diff --git a/prevent-systemd-run-description-issue-when-running-a.patch b/prevent-systemd-run-description-issue-when-running-a.patch
index 89ba6f8..59e0125 100644
--- a/prevent-systemd-run-description-issue-when-running-a.patch
+++ b/prevent-systemd-run-description-issue-when-running-a.patch
@@ -1,4 +1,4 @@
-From 29316e1e73972d7c30a7b125a27198fefc6b2fd7 Mon Sep 17 00:00:00 2001
+From c363b97dcddc9cb16102e5bfb9f3a9f40892704a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Mon, 30 Sep 2019 12:06:08 +0100
@@ -7,36 +7,37 @@ Subject: [PATCH] Prevent systemd-run description issue when running
---
salt/modules/aptpkg.py | 2 +-
- tests/unit/modules/test_aptpkg.py | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
+ tests/unit/modules/test_aptpkg.py | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index bafad40efe..2835d32263 100644
+index 658a16da4c..db0480b45d 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
-@@ -168,7 +168,7 @@ def _call_apt(args, scope=True, **kwargs):
- '''
- cmd = []
- if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
-- cmd.extend(['systemd-run', '--scope', '--description "{0}"'.format(__name__)])
-+ cmd.extend(['systemd-run', '--scope', '--description', '"{0}"'.format(__name__)])
+@@ -160,7 +160,7 @@ def _call_apt(args, scope=True, **kwargs):
+ and salt.utils.systemd.has_scope(__context__)
+ and __salt__["config.get"]("systemd.scope", True)
+ ):
+- cmd.extend(["systemd-run", "--scope", '--description "{}"'.format(__name__)])
++ cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)])
cmd.extend(args)
- params = {'output_loglevel': 'trace',
+ params = {
diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
-index 88eed062c4..2224aba9a1 100644
+index c3769a7df1..77d8b84896 100644
--- a/tests/unit/modules/test_aptpkg.py
+++ b/tests/unit/modules/test_aptpkg.py
-@@ -645,7 +645,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(aptpkg.__salt__, {'cmd.run_all': MagicMock(), 'config.get': MagicMock(return_value=True)}):
- aptpkg._call_apt(['apt-get', 'purge', 'vim']) # pylint: disable=W0106
- aptpkg.__salt__['cmd.run_all'].assert_called_once_with(
-- ['systemd-run', '--scope', '--description "salt.modules.aptpkg"', 'apt-get', 'purge', 'vim'], env={},
-+ ['systemd-run', '--scope', '--description', '"salt.modules.aptpkg"', 'apt-get', 'purge', 'vim'], env={},
- output_loglevel='trace', python_shell=False)
-
- def test_call_apt_with_kwargs(self):
+@@ -896,7 +896,8 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin):
+ [
+ "systemd-run",
+ "--scope",
+- '--description "salt.modules.aptpkg"',
++ "--description",
++ '"salt.modules.aptpkg"',
+ "apt-get",
+ "purge",
+ "vim",
--
-2.16.4
+2.29.2
diff --git a/prevent-test_mod_del_repo_multiline_values-to-fail.patch b/prevent-test_mod_del_repo_multiline_values-to-fail.patch
index af6aae0..9850070 100644
--- a/prevent-test_mod_del_repo_multiline_values-to-fail.patch
+++ b/prevent-test_mod_del_repo_multiline_values-to-fail.patch
@@ -1,18 +1,18 @@
-From c820b9e652474b4866fe099a709b52fe3b715ce9 Mon Sep 17 00:00:00 2001
+From 9cb1618a66e691d2cc53edcf64f475e829e701a5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 27 Nov 2019 15:41:57 +0000
Subject: [PATCH] Prevent test_mod_del_repo_multiline_values to fail
---
- tests/integration/modules/test_pkg.py | 20 ++++++++++++++------
- 1 file changed, 14 insertions(+), 6 deletions(-)
+ tests/integration/modules/test_pkg.py | 28 +++++++++++++++++++++------
+ 1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py
-index 61748f9477..6f3767bfbd 100644
+index e32013800d..3ece73074b 100644
--- a/tests/integration/modules/test_pkg.py
+++ b/tests/integration/modules/test_pkg.py
-@@ -167,17 +167,24 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
+@@ -179,17 +179,30 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
enabled=enabled,
failovermethod=failovermethod,
)
@@ -22,36 +22,44 @@ index 61748f9477..6f3767bfbd 100644
- repo_info = ret[next(iter(ret))]
+ repo_info = {repo: ret}
self.assertIn(repo, repo_info)
-- self.assertEqual(repo_info[repo]['baseurl'], my_baseurl)
-+ if os_grain == 'SUSE':
-+ self.assertEqual(repo_info[repo]['baseurl'], expected_get_repo_baseurl_zypp)
+- self.assertEqual(repo_info[repo]["baseurl"], my_baseurl)
++ if os_grain == "SUSE":
++ self.assertEqual(
++ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp
++ )
+ else:
-+ self.assertEqual(repo_info[repo]['baseurl'], my_baseurl)
- ret = self.run_function('pkg.get_repo', [repo])
-- self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
-+ if os_grain == 'SUSE':
-+ self.assertEqual(repo_info[repo]['baseurl'], expected_get_repo_baseurl_zypp)
++ self.assertEqual(repo_info[repo]["baseurl"], my_baseurl)
+ ret = self.run_function("pkg.get_repo", [repo])
+- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
++ if os_grain == "SUSE":
++ self.assertEqual(
++ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp
++ )
+ else:
-+ self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
- self.run_function('pkg.mod_repo', [repo])
- ret = self.run_function('pkg.get_repo', [repo])
-- self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
-+ if os_grain == 'SUSE':
-+ self.assertEqual(repo_info[repo]['baseurl'], expected_get_repo_baseurl_zypp)
++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
+ self.run_function("pkg.mod_repo", [repo])
+ ret = self.run_function("pkg.get_repo", [repo])
+- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
++ if os_grain == "SUSE":
++ self.assertEqual(
++ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp
++ )
+ else:
-+ self.assertEqual(ret['baseurl'], expected_get_repo_baseurl)
++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
finally:
if repo is not None:
- self.run_function('pkg.del_repo', [repo])
-@@ -191,6 +198,7 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
- try:
- if os_grain in ['CentOS', 'RedHat', 'SUSE']:
- my_baseurl = 'http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/'
-+ expected_get_repo_baseurl_zypp = 'http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/'
- expected_get_repo_baseurl = 'http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/'
- major_release = int(
- self.run_function(
+ self.run_function("pkg.del_repo", [repo])
+@@ -205,6 +218,9 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
+ my_baseurl = (
+ "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/"
+ )
++ expected_get_repo_baseurl_zypp = (
++ "http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/"
++ )
+ expected_get_repo_baseurl = (
+ "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/"
+ )
--
-2.16.4
+2.29.2
diff --git a/provide-the-missing-features-required-for-yomi-yet-o.patch b/provide-the-missing-features-required-for-yomi-yet-o.patch
index 5111adf..93764ba 100644
--- a/provide-the-missing-features-required-for-yomi-yet-o.patch
+++ b/provide-the-missing-features-required-for-yomi-yet-o.patch
@@ -1,523 +1,59 @@
-From d8e0602b36fcfc8b6a446ef56726eae08726e5ae Mon Sep 17 00:00:00 2001
+From 2a682f5ea32f6e37e778040032aff9332aac1a0e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 20 Jun 2019 12:52:45 +0100
-Subject: [PATCH] Provide the missing features required for Yomi (Yet one
- more installer)
+Subject: [PATCH] Provide the missing features required for Yomi (Yet
+ one more installer)
---
- doc/ref/modules/all/index.rst | 1 +
- doc/ref/modules/all/salt.modules.kubeadm.rst | 5 +
- salt/grains/core.py | 4 +
- salt/loader.py | 44 +-
- salt/modules/aixpkg.py | 2 +-
- salt/modules/apkpkg.py | 12 +-
- salt/modules/aptpkg.py | 14 +-
- salt/modules/dpkg_lowpkg.py | 6 +-
- salt/modules/ebuildpkg.py | 8 +-
- salt/modules/freebsdpkg.py | 6 +-
- salt/modules/kubeadm.py | 1265 ++++++++++++++++++++++++++
- salt/modules/mac_brew_pkg.py | 8 +-
- salt/modules/mac_portspkg.py | 6 +-
- salt/modules/openbsdpkg.py | 2 +-
- salt/modules/pacmanpkg.py | 10 +-
- salt/modules/pkgin.py | 8 +-
- salt/modules/pkgng.py | 4 +-
- salt/modules/rpm_lowpkg.py | 101 +-
- salt/modules/solarisipspkg.py | 4 +-
- salt/modules/solarispkg.py | 2 +-
- salt/modules/systemd_service.py | 55 ++
- salt/modules/xbpspkg.py | 12 +-
- salt/modules/yumpkg.py | 34 +-
- salt/modules/zypperpkg.py | 501 +++++++---
- salt/states/btrfs.py | 385 ++++++++
- salt/states/file.py | 6 +-
- salt/states/loop.py | 4 +
- salt/states/pkg.py | 26 +-
- salt/states/pkgrepo.py | 14 +-
- salt/utils/oset.py | 7 +-
- tests/unit/modules/test_kubeadm.py | 1144 +++++++++++++++++++++++
- tests/unit/modules/test_rpm_lowpkg.py | 87 +-
- tests/unit/modules/test_systemd_service.py | 53 ++
- tests/unit/modules/test_zypperpkg.py | 100 +-
- tests/unit/states/test_btrfs.py | 782 ++++++++++++++++
- tests/unit/states/test_pkg.py | 7 +-
- tests/unit/test_loader.py | 96 +-
- 37 files changed, 4550 insertions(+), 275 deletions(-)
- create mode 100644 doc/ref/modules/all/salt.modules.kubeadm.rst
- create mode 100644 salt/modules/kubeadm.py
- create mode 100644 salt/states/btrfs.py
- create mode 100644 tests/unit/modules/test_kubeadm.py
- create mode 100644 tests/unit/states/test_btrfs.py
+ salt/grains/core.py | 6 +-
+ salt/modules/kubeadm.py | 91 +++++++++------
+ salt/modules/rpm_lowpkg.py | 42 +++----
+ salt/modules/systemd_service.py | 24 ++--
+ salt/modules/zypperpkg.py | 87 ++++++++------
+ salt/states/btrfs.py | 44 +++++--
+ salt/states/file.py | 7 +-
+ salt/states/loop.py | 15 +--
+ salt/states/pkgrepo.py | 5 -
+ salt/utils/oset.py | 8 +-
+ tests/unit/modules/test_kubeadm.py | 43 ++++---
+ tests/unit/modules/test_rpm_lowpkg.py | 15 ++-
+ tests/unit/modules/test_systemd_service.py | 13 +--
+ tests/unit/modules/test_zypperpkg.py | 60 ++--------
+ tests/unit/states/test_btrfs.py | 130 ++++++---------------
+ tests/unit/states/test_pkg.py | 39 ++-----
+ tests/unit/test_loader.py | 97 ++++++++++++++-
+ 17 files changed, 373 insertions(+), 353 deletions(-)
-diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst
-index 359af7e1e0..8e1bf2ecf1 100644
---- a/doc/ref/modules/all/index.rst
-+++ b/doc/ref/modules/all/index.rst
-@@ -214,6 +214,7 @@ execution modules
- keystoneng
- keystore
- kmod
-+ kubeadm
- kubernetesmod
- launchctl_service
- layman
-diff --git a/doc/ref/modules/all/salt.modules.kubeadm.rst b/doc/ref/modules/all/salt.modules.kubeadm.rst
-new file mode 100644
-index 0000000000..137c779da2
---- /dev/null
-+++ b/doc/ref/modules/all/salt.modules.kubeadm.rst
-@@ -0,0 +1,5 @@
-+salt.modules.kubeadm module
-+===========================
-+
-+.. automodule:: salt.modules.kubeadm
-+ :members:
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index f1e3ebe9d2..b58c29dbc3 100644
+index bebb4581bc..d7d03c5e70 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -2611,6 +2611,10 @@ def _hw_data(osdata):
- grains[key] = salt.utils.stringutils.to_unicode(ifile.read().strip(), errors='replace')
- if key == 'uuid':
- grains['uuid'] = grains['uuid'].lower()
-+ except UnicodeDecodeError:
-+ # Some firmwares provide non-valid 'product_name'
-+ # files, ignore them
+@@ -2759,7 +2759,7 @@ def _hw_data(osdata):
+ contents_file = os.path.join("/sys/class/dmi/id", fw_file)
+ if os.path.exists(contents_file):
+ try:
+- with salt.utils.files.fopen(contents_file, "rb") as ifile:
++ with salt.utils.files.fopen(contents_file, "r") as ifile:
+ grains[key] = salt.utils.stringutils.to_unicode(
+ ifile.read().strip(), errors="replace"
+ )
+@@ -2768,9 +2768,7 @@ def _hw_data(osdata):
+ except UnicodeDecodeError:
+ # Some firmwares provide non-valid 'product_name'
+ # files, ignore them
+- log.debug(
+- "The content in /sys/devices/virtual/dmi/id/product_name is not valid"
+- )
+ pass
- except (IOError, OSError) as err:
+ except OSError as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
-diff --git a/salt/loader.py b/salt/loader.py
-index 860162b791..c68562988d 100644
---- a/salt/loader.py
-+++ b/salt/loader.py
-@@ -254,6 +254,7 @@ def minion_mods(
- whitelist=whitelist,
- loaded_base_name=loaded_base_name,
- static_modules=static_modules,
-+ extra_module_dirs=utils.module_dirs if utils else None,
- )
-
- ret.pack['__salt__'] = ret
-@@ -347,6 +348,7 @@ def engines(opts, functions, runners, utils, proxy=None):
- opts,
- tag='engines',
- pack=pack,
-+ extra_module_dirs=utils.module_dirs if utils else None,
- )
-
-
-@@ -359,6 +361,7 @@ def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
- opts,
- tag='proxy',
- pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
-+ extra_module_dirs=utils.module_dirs if utils else None,
- )
-
- ret.pack['__proxy__'] = ret
-@@ -396,12 +399,14 @@ def pillars(opts, functions, context=None):
- '''
- Returns the pillars modules
- '''
-+ _utils = utils(opts)
- ret = LazyLoader(_module_dirs(opts, 'pillar'),
- opts,
- tag='pillar',
- pack={'__salt__': functions,
- '__context__': context,
-- '__utils__': utils(opts)})
-+ '__utils__': _utils},
-+ extra_module_dirs=_utils.module_dirs)
- ret.pack['__ext_pillar__'] = ret
- return FilterDictWrapper(ret, '.ext_pillar')
-
-@@ -501,11 +506,13 @@ def fileserver(opts, backends):
- '''
- Returns the file server modules
- '''
-+ _utils = utils(opts)
- return LazyLoader(_module_dirs(opts, 'fileserver'),
- opts,
- tag='fileserver',
- whitelist=backends,
-- pack={'__utils__': utils(opts)})
-+ pack={'__utils__': _utils},
-+ extra_module_dirs=_utils.module_dirs)
-
-
- def roster(opts, runner=None, utils=None, whitelist=None):
-@@ -521,6 +528,7 @@ def roster(opts, runner=None, utils=None, whitelist=None):
- '__runner__': runner,
- '__utils__': utils,
- },
-+ extra_module_dirs=utils.module_dirs if utils else None,
- )
-
-
-@@ -562,6 +570,7 @@ def states(opts, functions, utils, serializers, whitelist=None, proxy=None, cont
- tag='states',
- pack={'__salt__': functions, '__proxy__': proxy or {}},
- whitelist=whitelist,
-+ extra_module_dirs=utils.module_dirs if utils else None,
- )
- ret.pack['__states__'] = ret
- ret.pack['__utils__'] = utils
-@@ -683,6 +692,7 @@ def grain_funcs(opts, proxy=None):
- ),
- opts,
- tag='grains',
-+ extra_module_dirs=_utils.module_dirs,
- )
- ret.pack['__utils__'] = utils(opts, proxy=proxy)
- return ret
-@@ -947,6 +957,7 @@ def runner(opts, utils=None, context=None, whitelist=None):
- tag='runners',
- pack={'__utils__': utils, '__context__': context},
- whitelist=whitelist,
-+ extra_module_dirs=utils.module_dirs if utils else None,
- )
- # TODO: change from __salt__ to something else, we overload __salt__ too much
- ret.pack['__salt__'] = ret
-@@ -982,6 +993,7 @@ def sdb(opts, functions=None, whitelist=None, utils=None):
- '__salt__': minion_mods(opts, utils=utils),
- },
- whitelist=whitelist,
-+ extra_module_dirs=utils.module_dirs if utils else None,
- )
-
-
-@@ -1023,6 +1035,7 @@ def clouds(opts):
- '''
- Return the cloud functions
- '''
-+ _utils = salt.loader.utils(opts)
- # Let's bring __active_provider_name__, defaulting to None, to all cloud
- # drivers. This will get temporarily updated/overridden with a context
- # manager when needed.
-@@ -1034,8 +1047,9 @@ def clouds(opts):
- int_type='clouds'),
- opts,
- tag='clouds',
-- pack={'__utils__': salt.loader.utils(opts),
-+ pack={'__utils__': _utils,
- '__active_provider_name__': None},
-+ extra_module_dirs=_utils.module_dirs,
- )
- for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
- log.trace(
-@@ -1149,6 +1163,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- :param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
- :param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
- If not true, the module will not load.
-+ :param list extra_module_dirs: A list of directories that will be able to import from
- :returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
- are function references themselves which are loaded on-demand.
- # TODO:
-@@ -1170,6 +1185,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- static_modules=None,
- proxy=None,
- virtual_funcs=None,
-+ extra_module_dirs=None,
- ): # pylint: disable=W0231
- '''
- In pack, if any of the values are None they will be replaced with an
-@@ -1211,6 +1227,9 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- virtual_funcs = []
- self.virtual_funcs = virtual_funcs
-
-+ self.extra_module_dirs = extra_module_dirs if extra_module_dirs else []
-+ self._clean_module_dirs = []
-+
- self.disabled = set(
- self.opts.get(
- 'disable_{0}{1}'.format(
-@@ -1517,12 +1536,30 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- reload_module(submodule)
- self._reload_submodules(submodule)
-
-+ def __populate_sys_path(self):
-+ for directory in self.extra_module_dirs:
-+ if directory not in sys.path:
-+ sys.path.append(directory)
-+ self._clean_module_dirs.append(directory)
-+
-+ def __clean_sys_path(self):
-+ for directory in self._clean_module_dirs:
-+ if directory in sys.path:
-+ sys.path.remove(directory)
-+ self._clean_module_dirs = []
-+
-+ # Be sure that sys.path_importer_cache do not contains any
-+ # invalid FileFinder references
-+ if USE_IMPORTLIB:
-+ importlib.invalidate_caches()
-+
- def _load_module(self, name):
- mod = None
- fpath, suffix = self.file_mapping[name][:2]
- self.loaded_files.add(name)
- fpath_dirname = os.path.dirname(fpath)
- try:
-+ self.__populate_sys_path()
- sys.path.append(fpath_dirname)
- if suffix == '.pyx':
- mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
-@@ -1645,6 +1682,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
- return False
- finally:
- sys.path.remove(fpath_dirname)
-+ self.__clean_sys_path()
-
- if hasattr(mod, '__opts__'):
- mod.__opts__.update(self.opts)
-diff --git a/salt/modules/aixpkg.py b/salt/modules/aixpkg.py
-index 4f9852b504..d35946f397 100644
---- a/salt/modules/aixpkg.py
-+++ b/salt/modules/aixpkg.py
-@@ -400,7 +400,7 @@ def latest_version(*names, **kwargs):
- available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
-
-
--def upgrade_available(name):
-+def upgrade_available(name, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
-
-diff --git a/salt/modules/apkpkg.py b/salt/modules/apkpkg.py
-index 2e9a2a952e..4f84642e02 100644
---- a/salt/modules/apkpkg.py
-+++ b/salt/modules/apkpkg.py
-@@ -83,7 +83,7 @@ def version(*names, **kwargs):
- return __salt__['pkg_resource.version'](*names, **kwargs)
-
-
--def refresh_db():
-+def refresh_db(**kwargs):
- '''
- Updates the package list
-
-@@ -425,7 +425,7 @@ def remove(name=None, pkgs=None, purge=False, **kwargs): # pylint: disable=unus
- return ret
-
-
--def upgrade(name=None, pkgs=None, refresh=True):
-+def upgrade(name=None, pkgs=None, refresh=True, **kwargs):
- '''
- Upgrades all packages via ``apk upgrade`` or a specific package if name or
- pkgs is specified. Name is ignored if pkgs is specified
-@@ -485,7 +485,7 @@ def upgrade(name=None, pkgs=None, refresh=True):
- return ret
-
-
--def list_upgrades(refresh=True):
-+def list_upgrades(refresh=True, **kwargs):
- '''
- List all available package upgrades.
-
-@@ -524,7 +524,7 @@ def list_upgrades(refresh=True):
- return ret
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- List the files that belong to a package. Not specifying any packages will
- return a list of _every_ file on the system's package database (not
-@@ -541,7 +541,7 @@ def file_list(*packages):
- return file_dict(*packages)
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- List the files that belong to a package, grouped by package. Not
- specifying any packages will return a list of _every_ file on the system's
-@@ -580,7 +580,7 @@ def file_dict(*packages):
- return {'errors': errors, 'packages': ret}
-
-
--def owner(*paths):
-+def owner(*paths, **kwargs):
- '''
- Return the name of the package that owns the file. Multiple file paths can
- be passed. Like :mod:`pkg.version pkg2. Return None if there was a problem
-@@ -1641,7 +1641,7 @@ def _skip_source(source):
- return False
-
-
--def list_repos():
-+def list_repos(**kwargs):
- '''
- Lists all repos in the sources.list (and sources.lists.d) files
-
-@@ -2411,7 +2411,7 @@ def mod_repo(repo, saltenv='base', **kwargs):
- }
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- List the files that belong to a package. Not specifying any packages will
- return a list of _every_ file on the system's package database (not
-@@ -2428,7 +2428,7 @@ def file_list(*packages):
- return __salt__['lowpkg.file_list'](*packages)
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- List the files that belong to a package, grouped by package. Not
- specifying any packages will return a list of _every_ file on the system's
-@@ -2712,7 +2712,7 @@ def _resolve_deps(name, pkgs, **kwargs):
- return
-
-
--def owner(*paths):
-+def owner(*paths, **kwargs):
- '''
- .. versionadded:: 2014.7.0
-
-diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py
-index b78e844830..a64e6d57da 100644
---- a/salt/modules/dpkg_lowpkg.py
-+++ b/salt/modules/dpkg_lowpkg.py
-@@ -135,7 +135,7 @@ def unpurge(*packages):
- return salt.utils.data.compare_dicts(old, new)
-
-
--def list_pkgs(*packages):
-+def list_pkgs(*packages, **kwargs):
- '''
- List the packages currently installed in a dict::
-
-@@ -169,7 +169,7 @@ def list_pkgs(*packages):
- return pkgs
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- List the files that belong to a package. Not specifying any packages will
- return a list of _every_ file on the system's package database (not
-@@ -211,7 +211,7 @@ def file_list(*packages):
- return {'errors': errors, 'files': list(ret)}
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- List the files that belong to a package, grouped by package. Not
- specifying any packages will return a list of _every_ file on the system's
-diff --git a/salt/modules/ebuildpkg.py b/salt/modules/ebuildpkg.py
-index cb77ff7852..205318f579 100644
---- a/salt/modules/ebuildpkg.py
-+++ b/salt/modules/ebuildpkg.py
-@@ -358,7 +358,7 @@ def list_upgrades(refresh=True, backtrack=3, **kwargs): # pylint: disable=W0613
- return _get_upgradable(backtrack)
-
-
--def upgrade_available(name):
-+def upgrade_available(name, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
-
-@@ -440,7 +440,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
- return ret
-
-
--def refresh_db():
-+def refresh_db(**kwargs):
- '''
- Update the portage tree using the first available method from the following
- list:
-@@ -765,7 +765,7 @@ def install(name=None,
- return changes
-
-
--def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
-+def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None, **kwargs):
- '''
- .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
- On minions running systemd>=205, `systemd-run(1)`_ is now used to
-@@ -858,7 +858,7 @@ def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
- return ret
-
-
--def upgrade(refresh=True, binhost=None, backtrack=3):
-+def upgrade(refresh=True, binhost=None, backtrack=3, **kwargs):
- '''
- .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
- On minions running systemd>=205, `systemd-run(1)`_ is now used to
-diff --git a/salt/modules/freebsdpkg.py b/salt/modules/freebsdpkg.py
-index 43f127ef35..0bae7a3bab 100644
---- a/salt/modules/freebsdpkg.py
-+++ b/salt/modules/freebsdpkg.py
-@@ -238,7 +238,7 @@ def version(*names, **kwargs):
- ])
-
-
--def refresh_db():
-+def refresh_db(**kwargs):
- '''
- ``pkg_add(1)`` does not use a local database of available packages, so this
- function simply returns ``True``. it exists merely for API compatibility.
-@@ -503,7 +503,7 @@ def _rehash():
- __salt__['cmd.shell']('rehash', output_loglevel='trace')
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- List the files that belong to a package. Not specifying any packages will
- return a list of _every_ file on the system's package database (not
-@@ -525,7 +525,7 @@ def file_list(*packages):
- return ret
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- List the files that belong to a package, grouped by package. Not
- specifying any packages will return a list of _every_ file on the
diff --git a/salt/modules/kubeadm.py b/salt/modules/kubeadm.py
-new file mode 100644
-index 0000000000..2b1e7906a1
---- /dev/null
+index 8baf5f85fd..966e9e848f 100644
+--- a/salt/modules/kubeadm.py
+++ b/salt/modules/kubeadm.py
-@@ -0,0 +1,1265 @@
-+# -*- coding: utf-8 -*-
+@@ -1,3 +1,25 @@
+#
+# Author: Alberto Planas
+#
@@ -540,2818 +76,580 @@ index 0000000000..2b1e7906a1
+# specific language governing permissions and limitations
+# under the License.
+
-+'''
-+:maintainer: Alberto Planas
-+:maturity: new
-+:depends: None
-+:platform: Linux
-+'''
-+from __future__ import absolute_import, print_function, unicode_literals
-+import json
-+import logging
-+import re
-+
-+from salt.exceptions import CommandExecutionError
+ """
+ :maintainer: Alberto Planas
+ :maturity: new
+@@ -11,6 +33,7 @@ import re
+
+ import salt.utils.files
+ from salt.exceptions import CommandExecutionError
+from salt.ext.six.moves import zip
-+import salt.utils.files
-+
-+ADMIN_CFG = '/etc/kubernetes/admin.conf'
-+
-+log = logging.getLogger(__name__)
-+
-+__virtualname__ = 'kubeadm'
-+
-+# Define not exported variables from Salt, so this can be imported as
-+# a normal module
-+try:
-+ __salt__
-+except NameError:
-+ __salt__ = {}
-+
-+
-+def _api_server_endpoint(config=None):
-+ '''
-+ Return the API server endpoint
-+ '''
-+ config = config if config else ADMIN_CFG
-+ endpoint = None
-+ try:
-+ with salt.utils.files.fopen(config, 'r') as fp_:
-+ endpoint = re.search(r'^\s*server: https?://(.*)$',
-+ fp_.read(),
-+ re.MULTILINE).group(1)
-+ except Exception:
-+ # Any error or exception is mapped to None
-+ pass
-+ return endpoint
-+
-+
+
+ ADMIN_CFG = "/etc/kubernetes/admin.conf"
+
+@@ -37,23 +60,22 @@ def _api_server_endpoint(config=None):
+ endpoint = re.search(
+ r"^\s*server: https?://(.*)$", fp_.read(), re.MULTILINE
+ ).group(1)
+- # pylint:disable=broad-except
+ except Exception:
+ # Any error or exception is mapped to None
+ pass
+ return endpoint
+
+
+-def _token(create_if_needed=False):
+def _token(create_if_needed=True):
-+ '''
-+ Return a valid bootstrap token
-+ '''
-+ tokens = token_list()
+ """
+ Return a valid bootstrap token
+ """
+ tokens = token_list()
+- if not tokens and create_if_needed:
+ if not tokens:
-+ token_create(description='Token created by kubeadm salt module')
-+ tokens = token_list()
+ token_create(description="Token created by kubeadm salt module")
+ tokens = token_list()
+- # We expect that the token is valid for authentication and signing
+- return tokens[0]["token"] if tokens else None
+ # We expect that the token is valid for authestication and signing
-+ return tokens[0]['token']
-+
-+
-+def _discovery_token_ca_cert_hash():
-+ cmd = ['openssl', 'x509', '-pubkey', '-in', '/etc/kubernetes/pki/ca.crt',
-+ '|', 'openssl', 'rsa', '-pubin', '-outform', 'der', '2>/dev/null',
-+ '|', 'openssl', 'dgst', '-sha256', '-hex',
-+ '|', 'sed', "'s/^.* //'"]
-+ result = __salt__['cmd.run_all'](' '.join(cmd), python_shell=True)
-+ if result['retcode']:
-+ raise CommandExecutionError(result['stderr'])
-+
-+ return 'sha256:{}'.format(result['stdout'])
-+
-+
-+def join_params(create_if_needed=False):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Return the parameters required for joining into the cluster
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.join_params
-+ salt '*' kubeadm.join_params create_if_needed=True
-+
-+ '''
-+
-+ params = {
-+ 'api-server-endpoint': _api_server_endpoint(),
-+ 'token': _token(create_if_needed),
-+ 'discovery-token-ca-cert-hash': _discovery_token_ca_cert_hash(),
-+ }
-+ return params
-+
-+
-+def version(kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Return the version of kubeadm
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.version
-+
-+ '''
-+ cmd = ['kubeadm', 'version']
-+
-+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ cmd.extend(['--output', 'json'])
-+
-+ return json.loads(__salt__['cmd.run_stdout'](cmd))
-+
-+
-+def _cmd(cmd):
-+ '''Utility function to run commands.'''
-+ result = __salt__['cmd.run_all'](cmd)
-+ if result['retcode']:
-+ raise CommandExecutionError(result['stderr'])
-+ return result['stdout']
-+
-+
-+def token_create(token=None, config=None, description=None,
-+ groups=None, ttl=None, usages=None, kubeconfig=None,
-+ rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Create bootstrap tokens on the server
-+
-+ token
++ return tokens[0]["token"]
+
+
+ def _discovery_token_ca_cert_hash():
+@@ -92,10 +114,6 @@ def join_params(create_if_needed=False):
+
+ Return the parameters required for joining into the cluster
+
+- create_if_needed
+- If the token bucket is empty and this parameter is True, a new
+- token will be created.
+-
+ CLI Example:
+
+ .. code-block:: bash
+@@ -169,7 +187,7 @@ def token_create(
+ Create bootstrap tokens on the server
+
+ token
+- Token to write, if None one will be generated. The token must
+ Token to write, if None one will be gerenared. The token must
-+ match a regular expression, that by default is
-+ [a-z0-9]{6}.[a-z0-9]{16}
-+
-+ config
-+ Path to kubeadm configuration file
-+
-+ description
-+ A human friendly description of how this token is used
-+
-+ groups
+ match a regular expression, that by default is
+ [a-z0-9]{6}.[a-z0-9]{16}
+
+@@ -180,7 +198,7 @@ def token_create(
+ A human friendly description of how this token is used
+
+ groups
+- List of extra groups that this token will authenticate, default
+ List of extra groups that this token will authenticate, defaut
-+ to ['system:bootstrappers:kubeadm:default-node-token']
-+
-+ ttl
-+ The duration defore the token is automatically deleted (1s, 2m,
-+ 3h). If set to '0' the token will never expire. Default value
-+ is 24h0m0s
-+
-+ usages
+ to ['system:bootstrappers:kubeadm:default-node-token']
+
+ ttl
+@@ -189,7 +207,7 @@ def token_create(
+ is 24h0m0s
+
+ usages
+- Describes the ways in which this token can be used. The default
+ Describes the ways in wich this token can be used. The default
-+ value is ['signing', 'authentication']
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.token_create
-+ salt '*' kubeadm.token_create a1b2c.0123456789abcdef
-+ salt '*' kubeadm.token_create ttl='6h'
-+ salt '*' kubeadm.token_create usages="['signing']"
-+
-+ '''
-+ cmd = ['kubeadm', 'token', 'create']
-+ if token:
-+ cmd.append(token)
-+
-+ parameters = [('config', config), ('description', description),
-+ ('groups', groups), ('ttl', ttl), ('usages', usages),
-+ ('kubeconfig', kubeconfig), ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ if parameter in ('groups', 'usages'):
-+ cmd.extend(['--{}'.format(parameter), json.dumps(value)])
-+ else:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def token_delete(token, kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Delete bootstrap tokens on the server
-+
-+ token
+ value is ['signing', 'authentication']
+
+ kubeconfig
+@@ -239,7 +257,7 @@ def token_delete(token, kubeconfig=None, rootfs=None):
+ Delete bootstrap tokens on the server
+
+ token
+- Token to write, if None one will be generated. The token must
+ Token to write, if None one will be gerenared. The token must
-+ match a regular expression, that by default is
-+ [a-z0-9]{6}.[a-z0-9]{16}
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.token_delete a1b2c
-+ salt '*' kubeadm.token_create a1b2c.0123456789abcdef
-+
-+ '''
-+ cmd = ['kubeadm', 'token', 'delete', token]
-+
-+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return bool(_cmd(cmd))
-+
-+
-+def token_generate(kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Generate and return a bootstrap token, but do not create it on the
-+ server
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.token_generate
-+
-+ '''
-+ cmd = ['kubeadm', 'token', 'generate']
-+
-+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def token_list(kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ List bootstrap tokens on the server
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.token_list
-+
-+ '''
-+ cmd = ['kubeadm', 'token', 'list']
-+
-+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ lines = _cmd(cmd).splitlines()
-+
+ match a regular expression, that by default is
+ [a-z0-9]{6}.[a-z0-9]{16}
+
+@@ -328,21 +346,20 @@ def token_list(kubeconfig=None, rootfs=None):
+
+ lines = _cmd(cmd).splitlines()
+
+ # Find the header and parse it. We do not need to validate the
+ # content, as the regex will take care of future changes.
+ header = lines.pop(0)
-+ header = [i.lower() for i in re.findall(r'(\w+(?:\s\w+)*)', header)]
++ header = [i.lower() for i in re.findall(r"(\w+(?:\s\w+)*)", header)]
+
-+ tokens = []
+ tokens = []
+- if lines:
+- # Find the header and parse it. We do not need to validate
+- # the content, as the regex will take care of future changes.
+- header = lines.pop(0)
+- header = [i.lower() for i in re.findall(r"(\w+(?:\s\w+)*)", header)]
+-
+- for line in lines:
+- # TODO(aplanas): descriptions with multiple spaces can
+- # break the parser.
+- values = re.findall(r"(\S+(?:\s\S+)*)", line)
+- if len(header) != len(values):
+- log.error("Error parsing line: {}".format(line))
+- continue
+- tokens.append({key: value for key, value in zip(header, values)})
+ for line in lines:
+ # TODO(aplanas): descriptions with multiple spaces can break
+ # the parser.
-+ values = re.findall(r'(\S+(?:\s\S+)*)', line)
++ values = re.findall(r"(\S+(?:\s\S+)*)", line)
+ if len(header) != len(values):
-+ log.error('Error parsing line: {}'.format(line))
++ log.error("Error parsing line: {}".format(line))
+ continue
+ tokens.append({key: value for key, value in zip(header, values)})
-+ return tokens
-+
-+
-+def alpha_certs_renew(rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Renews certificates for a Kubernetes cluster
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.alpha_certs_renew
-+
-+ '''
-+ cmd = ['kubeadm', 'alpha', 'certs', 'renew']
-+
-+ parameters = [('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def alpha_kubeconfig_user(client_name,
-+ apiserver_advertise_address=None,
-+ apiserver_bind_port=None, cert_dir=None,
-+ org=None, token=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Outputs a kubeconfig file for an additional user
-+
-+ client_name
-+ The name of the user. It will be used as the CN if client
-+ certificates are created
-+
-+ apiserver_advertise_address
-+ The IP address the API server is accessible on
-+
-+ apiserver_bind_port
-+ The port the API server is accessible on (default 6443)
-+
-+ cert_dir
-+ The path where certificates are stored (default
-+ "/etc/kubernetes/pki")
-+
-+ org
-+ The organization of the client certificate
-+
-+ token
-+ The token that show be used as the authentication mechanism for
-+ this kubeconfig, instead of client certificates
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.alpha_kubeconfig_user client_name=user
-+
-+ '''
-+ cmd = ['kubeadm', 'alpha', 'kubeconfig', 'user', '--client-name',
-+ client_name]
-+
-+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
-+ ('apiserver-bind-port', apiserver_bind_port),
-+ ('cert-dir', cert_dir), ('org', org),
-+ ('token', token), ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def alpha_kubelet_config_download(kubeconfig=None, kubelet_version=None,
-+ rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Downloads the kubelet configuration from the cluster ConfigMap
-+ kubelet-config-1.X
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ kubelet_version
-+ The desired version for the kubelet
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.alpha_kubelet_config_download
-+ salt '*' kubeadm.alpha_kubelet_config_download kubelet_version='1.14.0'
-+
-+ '''
-+ cmd = ['kubeadm', 'alpha', 'kubelet', 'config', 'download']
-+
-+ parameters = [('kubeconfig', kubeconfig),
-+ ('kubelet-version', kubelet_version),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def alpha_kubelet_config_enable_dynamic(node_name, kubeconfig=None,
-+ kubelet_version=None,
-+ rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Enables or updates dynamic kubelet configuration for a node
-+
-+ node_name
-+ Name of the node that should enable the dynamic kubelet
-+ configuration
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ kubelet_version
-+ The desired version for the kubelet
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.alpha_kubelet_config_enable_dynamic node-1
-+
-+ '''
-+ cmd = ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
-+ '--node-name', node_name]
-+
-+ parameters = [('kubeconfig', kubeconfig),
-+ ('kubelet-version', kubelet_version),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def alpha_selfhosting_pivot(cert_dir=None, config=None,
-+ kubeconfig=None,
-+ store_certs_in_secrets=False, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Converts a static Pod-hosted control plane into a selt-hosted one
-+
-+ cert_dir
-+ The path where certificates are stored (default
-+ "/etc/kubernetes/pki")
-+
-+ config
-+ Path to kubeadm configuration file
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ store_certs_in_secrets
-+ Enable storing certs in secrets
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.alpha_selfhost_pivot
-+
-+ '''
-+ cmd = ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force']
-+
-+ if store_certs_in_secrets:
-+ cmd.append('--store-certs-in-secrets')
-+
-+ parameters = [('cert-dir', cert_dir),
-+ ('config', config),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def config_images_list(config=None, feature_gates=None,
-+ kubernetes_version=None, kubeconfig=None,
-+ rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Print a list of images kubeadm will use
-+
-+ config
-+ Path to kubeadm configuration file
-+
-+ feature_gates
-+ A set of key=value pairs that describe feature gates for
-+ various features
-+
-+ kubernetes_version
-+ Choose a specifig Kubernetes version for the control plane
-+ (default "stable-1")
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_images_list
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'images', 'list']
-+
-+ parameters = [('config', config),
-+ ('feature-gates', feature_gates),
-+ ('kubernetes-version', kubernetes_version),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd).splitlines()
-+
-+
-+def config_images_pull(config=None, cri_socket=None,
-+ feature_gates=None, kubernetes_version=None,
-+ kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Pull images used by kubeadm
-+
-+ config
-+ Path to kubeadm configuration file
-+
-+ cri_socket
-+ Path to the CRI socket to connect
-+
-+ feature_gates
-+ A set of key=value pairs that describe feature gates for
-+ various features
-+
-+ kubernetes_version
-+ Choose a specifig Kubernetes version for the control plane
-+ (default "stable-1")
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_images_pull
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'images', 'pull']
-+
-+ parameters = [('config', config),
-+ ('cri-socket', cri_socket),
-+ ('feature-gates', feature_gates),
-+ ('kubernetes-version', kubernetes_version),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ prefix = '[config/images] Pulled '
-+ return [(line.replace(prefix, '')) for line in _cmd(cmd).splitlines()]
-+
-+
-+def config_migrate(old_config, new_config=None, kubeconfig=None,
-+ rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Read an older version of the kubeadm configuration API types from
-+ a file, and output the similar config object for the newer version
-+
-+ old_config
-+ Path to the kubeadm config file that is usin the old API
-+ version and should be converted
-+
-+ new_config
-+ Path to the resulting equivalent kubeadm config file using the
-+ new API version. If not specified the output will be returned
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_migrate /oldconfig.cfg
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'migrate', '--old-config', old_config]
-+
-+ parameters = [('new-config', new_config),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def config_print_init_defaults(component_configs=None,
-+ kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Return default init configuration, that can be used for 'kubeadm
-+ init'
-+
-+ component_config
-+ A comma-separated list for component config API object to print
-+ the default values for (valid values: KubeProxyConfiguration,
-+ KubeletConfiguration)
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_print_init_defaults
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'print', 'init-defaults']
-+
-+ parameters = [('component-configs', component_configs),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def config_print_join_defaults(component_configs=None,
-+ kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Return default join configuration, that can be used for 'kubeadm
-+ join'
-+
-+ component_config
-+ A comma-separated list for component config API object to print
-+ the default values for (valid values: KubeProxyConfiguration,
-+ KubeletConfiguration)
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_print_join_defaults
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'print', 'join-defaults']
-+
-+ parameters = [('component-configs', component_configs),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def config_upload_from_file(config, kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Upload a configuration file to the in-cluster ConfigMap for
-+ kubeadm configuration
-+
-+ config
-+ Path to a kubeadm configuration file
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_upload_from_file /config.cfg
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'upload', 'from-file', '--config', config]
-+
-+ parameters = [('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def config_upload_from_flags(apiserver_advertise_address=None,
-+ apiserver_bind_port=None,
-+ apiserver_cert_extra_sans=None,
-+ cert_dir=None, cri_socket=None,
-+ feature_gates=None,
-+ kubernetes_version=None, node_name=None,
-+ pod_network_cidr=None, service_cidr=None,
-+ service_dns_domain=None, kubeconfig=None,
-+ rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Create the in-cluster configuration file for the first time using
-+ flags
-+
-+ apiserver_advertise_address
+ return tokens
+
+
+@@ -869,7 +886,7 @@ def config_upload_from_flags(
+ flags
+
+ apiserver_advertise_address
+- The IP address the API server will advertise it's listening on
+ The IP address the API server will adversite it's listening on
-+
-+ apiserver_bind_port
-+ The port the API server is accessible on (default 6443)
-+
-+ apiserver_cert_extra_sans
-+ Optional extra Subject Alternative Names (SANs) to use for the
-+ API Server serving certificate
-+
-+ cert_dir
-+ The path where to save and store the certificates (default
-+ "/etc/kubernetes/pki")
-+
-+ cri_socket
-+ Path to the CRI socket to connect
-+
-+ feature_gates
-+ A set of key=value pairs that describe feature gates for
-+ various features
-+
-+ kubernetes_version
-+ Choose a specifig Kubernetes version for the control plane
-+ (default "stable-1")
-+
-+ node_name
-+ Specify the node name
-+
-+ pod_network_cidr
-+ Specify range of IP addresses for the pod network
-+
-+ service_cidr
+
+ apiserver_bind_port
+ The port the API server is accessible on (default 6443)
+@@ -900,11 +917,11 @@ def config_upload_from_flags(
+ Specify range of IP addresses for the pod network
+
+ service_cidr
+- Use alternative range of IP address for service VIPs (default
+ Use alternative range of IP address dor service VIPs (default
-+ "10.96.0.0/12")
-+
-+ service_dns_domain
+ "10.96.0.0/12")
+
+ service_dns_domain
+- Use alternative domain for services (default "cluster.local")
+ Use alternative domain for serivces (default "cluster.local")
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_upload_from_flags
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'upload', 'from-flags']
-+
-+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
-+ ('apiserver-bind-port', apiserver_bind_port),
-+ ('apiserver-cert-extra-sans', apiserver_cert_extra_sans),
-+ ('cert-dir', cert_dir),
-+ ('cri-socket', cri_socket),
-+ ('feature-gates', feature_gates),
-+ ('kubernetes-version', kubernetes_version),
-+ ('node-name', node_name),
-+ ('pod-network-cidr', pod_network_cidr),
-+ ('service-cidr', service_cidr),
-+ ('service-dns-domain', service_dns_domain),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def config_view(kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ View the kubeadm configuration stored inside the cluster
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.config_view
-+
-+ '''
-+ cmd = ['kubeadm', 'config', 'view']
-+
-+ parameters = [('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+def init(apiserver_advertise_address=None, apiserver_bind_port=None,
-+ apiserver_cert_extra_sans=None, cert_dir=None,
-+ certificate_key=None, config=None, cri_socket=None,
-+ experimental_upload_certs=False, feature_gates=None,
-+ ignore_preflight_errors=None, image_repository=None,
-+ kubernetes_version=None, node_name=None,
-+ pod_network_cidr=None, service_cidr=None,
-+ service_dns_domain=None, skip_certificate_key_print=False,
-+ skip_phases=None, skip_token_print=False, token=None,
-+ token_ttl=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Command to set up the Kubernetes control plane
-+
-+ apiserver_advertise_address
+
+ kubeconfig
+ The kubeconfig file to use when talking to the cluster. The
+@@ -1004,7 +1021,7 @@ def init(
+ Command to set up the Kubernetes control plane
+
+ apiserver_advertise_address
+- The IP address the API server will advertise it's listening on
+ The IP address the API server will adversite it's listening on
-+
-+ apiserver_bind_port
-+ The port the API server is accessible on (default 6443)
-+
-+ apiserver_cert_extra_sans
-+ Optional extra Subject Alternative Names (SANs) to use for the
-+ API Server serving certificate
-+
-+ cert_dir
-+ The path where to save and store the certificates (default
-+ "/etc/kubernetes/pki")
-+
-+ certificate_key
-+ Key used to encrypt the control-plane certificates in the
-+ kubeadm-certs Secret
-+
-+ config
-+ Path to a kubeadm configuration file
-+
-+ cri_socket
-+ Path to the CRI socket to connect
-+
-+ experimental_upload_certs
-+ Upload control-plane certificate to the kubeadm-certs Secret
-+
-+ feature_gates
-+ A set of key=value pairs that describe feature gates for
-+ various features
-+
-+ ignore_preflight_errors
+
+ apiserver_bind_port
+ The port the API server is accessible on (default 6443)
+@@ -1035,10 +1052,10 @@ def init(
+ various features
+
+ ignore_preflight_errors
+- A list of checks whose errors will be shown as warnings
+ A list of checkt whose errors will be shown as warnings
-+
-+ image_repository
+
+ image_repository
+- Choose a container registry to pull control plane images from
+ Choose a container registry to pull controll plane images from
-+
-+ kubernetes_version
-+ Choose a specifig Kubernetes version for the control plane
-+ (default "stable-1")
-+
-+ node_name
-+ Specify the node name
-+
-+ pod_network_cidr
-+ Specify range of IP addresses for the pod network
-+
-+ service_cidr
+
+ kubernetes_version
+ Choose a specifig Kubernetes version for the control plane
+@@ -1051,11 +1068,11 @@ def init(
+ Specify range of IP addresses for the pod network
+
+ service_cidr
+- Use alternative range of IP address for service VIPs (default
+ Use alternative range of IP address dor service VIPs (default
-+ "10.96.0.0/12")
-+
-+ service_dns_domain
+ "10.96.0.0/12")
+
+ service_dns_domain
+- Use alternative domain for services (default "cluster.local")
+ Use alternative domain for serivces (default "cluster.local")
-+
-+ skip_certificate_key_print
-+ Don't print the key used to encrypt the control-plane
-+ certificates
-+
-+ skip_phases
-+ List of phases to be skipped
-+
-+ skip_token_print
-+ Skip printing of the default bootstrap token generated by
-+ 'kubeadm init'
-+
-+ token
-+ The token to use for establishing bidirectional trust between
-+ nodes and control-plane nodes. The token must match a regular
-+ expression, that by default is [a-z0-9]{6}.[a-z0-9]{16}
-+
-+ token_ttl
-+ The duration defore the token is automatically deleted (1s, 2m,
-+ 3h). If set to '0' the token will never expire. Default value
-+ is 24h0m0s
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.init pod_network_cidr='10.244.0.0/16'
-+
-+ '''
-+ cmd = ['kubeadm', 'init']
-+
-+ if experimental_upload_certs:
-+ cmd.append('--experimental-upload-certs')
-+ if skip_certificate_key_print:
-+ cmd.append('--skip-certificate-key-print')
-+ if skip_token_print:
-+ cmd.append('--skip-token-print')
-+
-+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
-+ ('apiserver-bind-port', apiserver_bind_port),
-+ ('apiserver-cert-extra-sans', apiserver_cert_extra_sans),
-+ ('cert-dir', cert_dir),
-+ ('certificate-key', certificate_key),
-+ ('config', config),
-+ ('cri-socket', cri_socket),
-+ ('feature-gates', feature_gates),
-+ ('ignore-preflight-errors', ignore_preflight_errors),
-+ ('image-repository', image_repository),
-+ ('kubernetes-version', kubernetes_version),
-+ ('node-name', node_name),
-+ ('pod-network-cidr', pod_network_cidr),
-+ ('service-cidr', service_cidr),
-+ ('service-dns-domain', service_dns_domain),
-+ ('skip-phases', skip_phases),
-+ ('token', token),
-+ ('token-ttl', token_ttl),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+# TODO(aplanas):
-+# * init_phase_addon_all
-+# * init_phase_addon_coredns
-+# * init_phase_addon_kube_proxy
-+# * init_phase_bootstrap_token
-+# * init_phase_certs_all
-+# * init_phase_certs_apiserver
-+# * init_phase_certs_apiserver_etcd_client
-+# * init_phase_certs_apiserver_kubelet_client
-+# * init_phase_certs_ca
-+# * init_phase_certs_etcd_ca
-+# * init_phase_certs_etcd_healthcheck_client
-+# * init_phase_certs_etcd_peer
-+# * init_phase_certs_etcd_server
-+# * init_phase_certs_front_proxy_ca
-+# * init_phase_certs_front_proxy_client
-+# * init_phase_certs_sa
-+# * init_phase_control_plane_all
-+# * init_phase_control_plane_apiserver
-+# * init_phase_control_plane_controller_manager
-+# * init_phase_control_plane_scheduler
-+# * init_phase_etcd_local
-+# * init_phase_kubeconfig_admin
-+# * init_phase_kubeconfig_all
-+# * init_phase_kubeconfig_controller_manager
-+# * init_phase_kubeconfig_kubelet
-+# * init_phase_kubeconfig_scheduler
-+# * init_phase_kubelet_start
-+# * init_phase_mark_control_plane
-+# * init_phase_preflight
-+# * init_phase_upload_certs
-+# * init_phase_upload_config_all
-+# * init_phase_upload_config_kuneadm
-+# * init_phase_upload_config_kubelet
-+
-+
-+def join(api_server_endpoint=None,
-+ apiserver_advertise_address=None, apiserver_bind_port=None,
-+ certificate_key=None, config=None, cri_socket=None,
-+ discovery_file=None, discovery_token=None,
-+ discovery_token_ca_cert_hash=None,
-+ discovery_token_unsafe_skip_ca_verification=False,
-+ experimental_control_plane=False,
-+ ignore_preflight_errors=None, node_name=None,
-+ skip_phases=None, tls_bootstrap_token=None, token=None,
-+ rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Command to join to an existing cluster
-+
-+ api_server_endpoint
-+ IP address or domain name and port of the API Server
-+
-+ apiserver_advertise_address
-+ If the node should host a new control plane instance, the IP
+
+ skip_certificate_key_print
+ Don't print the key used to encrypt the control-plane
+@@ -1190,10 +1207,10 @@ def join(
+
+ apiserver_advertise_address
+ If the node should host a new control plane instance, the IP
+- address the API Server will advertise it's listening on
+ address the API Server will adversise it's listening on
-+
-+ apiserver_bind_port
+
+ apiserver_bind_port
+- If the node should host a new control plane instance, the port
+ If the node shoult host a new control plane instance, the port
-+ the API Server to bind to (default 6443)
-+
-+ certificate_key
-+ Use this key to decrypt the certificate secrets uploaded by
-+ init
-+
-+ config
-+ Path to a kubeadm configuration file
-+
-+ cri_socket
-+ Path to the CRI socket to connect
-+
-+ discovery_file
-+ For file-based discovery, a file or URL from which to load
-+ cluster information
-+
-+ discovery_token
-+ For token-based discovery, the token used to validate cluster
-+ information fetched from the API Server
-+
-+ discovery_token_ca_cert_hash
-+ For token-based discovery, validate that the root CA public key
-+ matches this hash (format: ":")
-+
-+ discovery_token_unsafe_skip_ca_verification
-+ For token-based discovery, allow joining without
-+ 'discovery-token-ca-cert-hash' pinning
-+
-+ experimental_control_plane
-+ Create a new control plane instance on this node
-+
-+ ignore_preflight_errors
-+ A list of checks whose errors will be shown as warnings
-+
-+ node_name
-+ Specify the node name
-+
-+ skip_phases
-+ List of phases to be skipped
-+
-+ tls_bootstrap_token
-+ Specify the token used to temporarily authenticate with the
-+ Kubernetes Control Plane while joining the node
-+
-+ token
-+ Use this token for both discovery-token and tls-bootstrap-token
-+ when those values are not provided
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.join 10.160.65.165:6443 token='token'
-+
-+ '''
-+ cmd = ['kubeadm', 'join']
-+
-+ if api_server_endpoint:
-+ cmd.append(api_server_endpoint)
-+ if discovery_token_unsafe_skip_ca_verification:
-+ cmd.append('--discovery-token-unsafe-skip-ca-verification')
-+ if experimental_control_plane:
-+ cmd.append('--experimental-control-plane')
-+
-+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
-+ ('apiserver-bind-port', apiserver_bind_port),
-+ ('certificate-key', certificate_key),
-+ ('config', config),
-+ ('cri-socket', cri_socket),
-+ ('discovery-file', discovery_file),
-+ ('discovery-token', discovery_token),
-+ ('discovery-token-ca-cert-hash',
-+ discovery_token_ca_cert_hash),
-+ ('ignore-preflight-errors', ignore_preflight_errors),
-+ ('node-name', node_name),
-+ ('skip-phases', skip_phases),
-+ ('tls-bootstrap-token', tls_bootstrap_token),
-+ ('token', token),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+# TODO(aplanas):
-+# * join_phase_control_plane_join_all
-+# * join_phase_control_plane_join_etcd
-+# * join_phase_control_plane_join_mark_control_plane
-+# * join_phase_control_plane_join_update_status
-+# * join_phase_control_plane_prepare_all
-+# * join_phase_control_plane_prepare_certs
-+# * join_phase_control_plane_prepare_control_plane
-+# * join_phase_control_plane_prepare_download_certs
-+# * join_phase_control_plane_prepare_kubeconfig
-+# * join_phase_kubelet_start
-+# * join_phase_preflight
-+
-+
-+def reset(cert_dir=None, cri_socket=None,
-+ ignore_preflight_errors=None, kubeconfig=None, rootfs=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Revert any changes made to this host by 'kubeadm init' or 'kubeadm
-+ join'
-+
-+ cert_dir
-+ The path to the directory where the certificates are stored
-+ (default "/etc/kubernetes/pki")
-+
-+ cri_socket
-+ Path to the CRI socket to connect
-+
-+ ignore_preflight_errors
-+ A list of checks whose errors will be shown as warnings
-+
-+ kubeconfig
-+ The kubeconfig file to use when talking to the cluster. The
-+ default values in /etc/kubernetes/admin.conf
-+
-+ rootfs
-+ The path to the real host root filesystem
-+
-+ CLI Example:
-+
-+ .. code-block:: bash
-+
-+ salt '*' kubeadm.join 10.160.65.165:6443 token='token'
-+
-+ '''
-+ cmd = ['kubeadm', 'reset', '--force']
-+
-+ parameters = [('cert-dir', cert_dir),
-+ ('cri-socket', cri_socket),
-+ ('ignore-preflight-errors', ignore_preflight_errors),
-+ ('kubeconfig', kubeconfig),
-+ ('rootfs', rootfs)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ return _cmd(cmd)
-+
-+
-+# TODO(aplanas):
-+# * upgrade_apply
-+# * upgrade_diff
-+# * upgrade_node
-+# * upgrade_plan
-diff --git a/salt/modules/mac_brew_pkg.py b/salt/modules/mac_brew_pkg.py
-index 5484955edc..ee13fc2102 100644
---- a/salt/modules/mac_brew_pkg.py
-+++ b/salt/modules/mac_brew_pkg.py
-@@ -290,7 +290,7 @@ def remove(name=None, pkgs=None, **kwargs):
- return ret
+ the API Server to bind to (default 6443)
-
--def refresh_db():
-+def refresh_db(**kwargs):
- '''
- Update the homebrew package repository.
-
-@@ -474,7 +474,7 @@ def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
- return ret
-
-
--def upgrade_available(pkg):
-+def upgrade_available(pkg, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
-
-@@ -487,7 +487,7 @@ def upgrade_available(pkg):
- return pkg in list_upgrades()
-
-
--def upgrade(refresh=True):
-+def upgrade(refresh=True, **kwargs):
- '''
- Upgrade outdated, unpinned brews.
-
-@@ -532,7 +532,7 @@ def upgrade(refresh=True):
- return ret
-
-
--def info_installed(*names):
-+def info_installed(*names, **kwargs):
- '''
- Return the information of the named package(s) installed on the system.
-
-diff --git a/salt/modules/mac_portspkg.py b/salt/modules/mac_portspkg.py
-index 78a38d54a9..d403d0e29b 100644
---- a/salt/modules/mac_portspkg.py
-+++ b/salt/modules/mac_portspkg.py
-@@ -376,7 +376,7 @@ def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
- return _list('outdated')
-
-
--def upgrade_available(pkg, refresh=True):
-+def upgrade_available(pkg, refresh=True, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
-
-@@ -389,7 +389,7 @@ def upgrade_available(pkg, refresh=True):
- return pkg in list_upgrades(refresh=refresh)
-
-
--def refresh_db():
-+def refresh_db(**kwargs):
- '''
- Update ports with ``port selfupdate``
-
-@@ -405,7 +405,7 @@ def refresh_db():
- return salt.utils.mac_utils.execute_return_success(cmd)
-
-
--def upgrade(refresh=True): # pylint: disable=W0613
-+def upgrade(refresh=True, **kwargs): # pylint: disable=W0613
- '''
- Run a full upgrade using MacPorts 'port upgrade outdated'
-
-diff --git a/salt/modules/openbsdpkg.py b/salt/modules/openbsdpkg.py
-index b3b6bab912..819a24afb1 100644
---- a/salt/modules/openbsdpkg.py
-+++ b/salt/modules/openbsdpkg.py
-@@ -344,7 +344,7 @@ def purge(name=None, pkgs=None, **kwargs):
- return remove(name=name, pkgs=pkgs, purge=True)
-
-
--def upgrade_available(name):
-+def upgrade_available(name, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
-
-diff --git a/salt/modules/pacmanpkg.py b/salt/modules/pacmanpkg.py
-index e30296e8c8..35007e27f5 100644
---- a/salt/modules/pacmanpkg.py
-+++ b/salt/modules/pacmanpkg.py
-@@ -111,7 +111,7 @@ def latest_version(*names, **kwargs):
- available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
-
-
--def upgrade_available(name):
-+def upgrade_available(name, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
-
-@@ -393,7 +393,7 @@ def group_diff(name):
- return ret
-
-
--def refresh_db(root=None):
-+def refresh_db(root=None, **kwargs):
- '''
- Just run a ``pacman -Sy``, return a dict::
-
-@@ -843,7 +843,7 @@ def purge(name=None, pkgs=None, **kwargs):
- return _uninstall(action='purge', name=name, pkgs=pkgs)
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- List the files that belong to a package. Not specifying any packages will
- return a list of _every_ file on the system's package database (not
-@@ -877,7 +877,7 @@ def file_list(*packages):
- return {'errors': errors, 'files': ret}
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- List the files that belong to a package, grouped by package. Not
- specifying any packages will return a list of _every_ file on the system's
-@@ -913,7 +913,7 @@ def file_dict(*packages):
- return {'errors': errors, 'packages': ret}
-
-
--def owner(*paths):
-+def owner(*paths, **kwargs):
- '''
- .. versionadded:: 2014.7.0
-
-diff --git a/salt/modules/pkgin.py b/salt/modules/pkgin.py
-index 240f79ca26..dd5257c80d 100644
---- a/salt/modules/pkgin.py
-+++ b/salt/modules/pkgin.py
-@@ -112,7 +112,7 @@ def _splitpkg(name):
- return name.split(';', 1)[0].rsplit('-', 1)
-
-
--def search(pkg_name):
-+def search(pkg_name, **kwargs):
- '''
- Searches for an exact match using pkgin ^package$
-
-@@ -225,7 +225,7 @@ def version(*names, **kwargs):
- return __salt__['pkg_resource.version'](*names, **kwargs)
-
-
--def refresh_db(force=False):
-+def refresh_db(force=False, **kwargs):
- '''
- Use pkg update to get latest pkg_summary
-
-@@ -637,7 +637,7 @@ def _rehash():
- __salt__['cmd.run']('rehash', output_loglevel='trace')
-
-
--def file_list(package):
-+def file_list(package, **kwargs):
- '''
- List the files that belong to a package.
-
-@@ -655,7 +655,7 @@ def file_list(package):
- return ret
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- .. versionchanged: 2016.3.0
-
-diff --git a/salt/modules/pkgng.py b/salt/modules/pkgng.py
-index 4a908084ea..7435628112 100644
---- a/salt/modules/pkgng.py
-+++ b/salt/modules/pkgng.py
-@@ -224,7 +224,7 @@ def version(*names, **kwargs):
- info = salt.utils.functools.alias_function(version, 'info')
-
-
--def refresh_db(jail=None, chroot=None, root=None, force=False):
-+def refresh_db(jail=None, chroot=None, root=None, force=False, **kwargs):
- '''
- Refresh PACKAGESITE contents
-
-@@ -2441,7 +2441,7 @@ def _parse_upgrade(stdout):
- return result
-
-
--def version_cmp(pkg1, pkg2, ignore_epoch=False):
-+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
- '''
- Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
- pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
+ certificate_key
diff --git a/salt/modules/rpm_lowpkg.py b/salt/modules/rpm_lowpkg.py
-index 439404ae90..c8a87276b2 100644
+index 54b7014440..393b0f453a 100644
--- a/salt/modules/rpm_lowpkg.py
+++ b/salt/modules/rpm_lowpkg.py
-@@ -76,7 +76,7 @@ def bin_pkg_info(path, saltenv='base'):
- minion so that it can be examined.
+@@ -1,17 +1,13 @@
+-# -*- coding: utf-8 -*-
+ """
+ Support for rpm
+ """
- saltenv : base
-- Salt fileserver envrionment from which to retrieve the package. Ignored
-+ Salt fileserver environment from which to retrieve the package. Ignored
- if ``path`` is a local file path on the minion.
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
- CLI Example:
-@@ -128,12 +128,15 @@ def bin_pkg_info(path, saltenv='base'):
- return ret
+ import datetime
+ import logging
+ import os
+ import re
-
--def list_pkgs(*packages):
-+def list_pkgs(*packages, **kwargs):
- '''
- List the packages currently installed in a dict::
-
- {'': ''}
-
-+ root
-+ use root as top level directory (default: "/")
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -141,8 +144,11 @@ def list_pkgs(*packages):
- salt '*' lowpkg.list_pkgs
- '''
- pkgs = {}
-- cmd = ['rpm', '-q' if packages else '-qa',
-- '--queryformat', r'%{NAME} %{VERSION}\n']
-+ cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
-+ cmd.extend(['-q' if packages else '-qa',
-+ '--queryformat', r'%{NAME} %{VERSION}\n'])
- if packages:
- cmd.extend(packages)
- out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
-@@ -158,6 +164,9 @@ def verify(*packages, **kwargs):
- '''
- Runs an rpm -Va on a system, and returns the results in a dict
-
-+ root
-+ use root as top level directory (default: "/")
-+
- Files with an attribute of config, doc, ghost, license or readme in the
- package header can be ignored using the ``ignore_types`` keyword argument
-
-@@ -199,6 +208,8 @@ def verify(*packages, **kwargs):
- verify_options = [x.strip() for x in six.text_type(verify_options).split(',')]
-
- cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
- cmd.extend(['--' + x for x in verify_options])
- if packages:
- cmd.append('-V')
-@@ -258,6 +269,9 @@ def modified(*packages, **flags):
-
- .. versionadded:: 2015.5.0
-
-+ root
-+ use root as top level directory (default: "/")
-+
- CLI examples:
-
- .. code-block:: bash
-@@ -266,10 +280,12 @@ def modified(*packages, **flags):
- salt '*' lowpkg.modified httpd postfix
- salt '*' lowpkg.modified
- '''
-- ret = __salt__['cmd.run_all'](
-- ['rpm', '-Va'] + list(packages),
-- output_loglevel='trace',
-- python_shell=False)
-+ cmd = ['rpm']
-+ if flags.get('root'):
-+ cmd.extend(['--root', flags.pop('root')])
-+ cmd.append('-Va')
-+ cmd.extend(packages)
-+ ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
-
- data = {}
-
-@@ -324,12 +340,15 @@ def modified(*packages, **flags):
- return filtered_data
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- List the files that belong to a package. Not specifying any packages will
- return a list of _every_ file on the system's rpm database (not generally
- recommended).
-
-+ root
-+ use root as top level directory (default: "/")
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -338,12 +357,15 @@ def file_list(*packages):
- salt '*' lowpkg.file_list httpd postfix
- salt '*' lowpkg.file_list
- '''
-- if not packages:
-- cmd = ['rpm', '-qla']
-- else:
-- cmd = ['rpm', '-ql']
-+ cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
-+
-+ cmd.append('-ql' if packages else '-qla')
-+ if packages:
- # Can't concatenate a tuple, must do a list.extend()
- cmd.extend(packages)
-+
- ret = __salt__['cmd.run'](
- cmd,
- output_loglevel='trace',
-@@ -351,12 +373,15 @@ def file_list(*packages):
- return {'errors': [], 'files': ret}
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- List the files that belong to a package, sorted by group. Not specifying
- any packages will return a list of _every_ file on the system's rpm
- database (not generally recommended).
-
-+ root
-+ use root as top level directory (default: "/")
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -368,8 +393,11 @@ def file_dict(*packages):
- errors = []
- ret = {}
- pkgs = {}
-- cmd = ['rpm', '-q' if packages else '-qa',
-- '--queryformat', r'%{NAME} %{VERSION}\n']
-+ cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
-+ cmd.extend(['-q' if packages else '-qa',
-+ '--queryformat', r'%{NAME} %{VERSION}\n'])
- if packages:
- cmd.extend(packages)
- out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
-@@ -380,8 +408,10 @@ def file_dict(*packages):
- comps = line.split()
- pkgs[comps[0]] = {'version': comps[1]}
- for pkg in pkgs:
-- files = []
-- cmd = ['rpm', '-ql', pkg]
-+ cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
-+ cmd.extend(['-ql', pkg])
- out = __salt__['cmd.run'](
- ['rpm', '-ql', pkg],
- output_loglevel='trace',
-@@ -390,7 +420,7 @@ def file_dict(*packages):
- return {'errors': errors, 'packages': ret}
-
-
--def owner(*paths):
-+def owner(*paths, **kwargs):
- '''
- Return the name of the package that owns the file. Multiple file paths can
- be passed. If a single path is passed, a string will be returned,
-@@ -400,6 +430,9 @@ def owner(*paths):
- If the file is not owned by a package, or is not present on the minion,
- then an empty string will be returned for that path.
-
-+ root
-+ use root as top level directory (default: "/")
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -411,7 +444,10 @@ def owner(*paths):
- return ''
- ret = {}
- for path in paths:
-- cmd = ['rpm', '-qf', '--queryformat', '%{name}', path]
-+ cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
-+ cmd.extend(['-qf', '--queryformat', '%{name}', path])
- ret[path] = __salt__['cmd.run_stdout'](cmd,
- output_loglevel='trace',
- python_shell=False)
-@@ -471,6 +507,9 @@ def info(*packages, **kwargs):
- :param all_versions:
- Return information for all installed versions of the packages
-
-+ :param root:
-+ use root as top level directory (default: "/")
-+
- :return:
-
- CLI example:
-@@ -493,7 +532,14 @@ def info(*packages, **kwargs):
+-# Import Salt libs
+ import salt.utils.decorators.path
+ import salt.utils.itertools
+ import salt.utils.path
+@@ -105,14 +101,14 @@ def bin_pkg_info(path, saltenv="base"):
+ newpath = __salt__["cp.cache_file"](path, saltenv)
+ if not newpath:
+ raise CommandExecutionError(
+- "Unable to retrieve {0} from saltenv '{1}'".format(path, saltenv)
++ "Unable to retrieve {} from saltenv '{}'".format(path, saltenv)
+ )
+ path = newpath
else:
- size_tag = '%{SIZE}'
+ if not os.path.exists(path):
+- raise CommandExecutionError("{0} does not exist on minion".format(path))
++ raise CommandExecutionError("{} does not exist on minion".format(path))
+ elif not os.path.isabs(path):
+- raise SaltInvocationError("{0} does not exist on minion".format(path))
++ raise SaltInvocationError("{} does not exist on minion".format(path))
-- cmd = packages and "rpm -q {0}".format(' '.join(packages)) or "rpm -qa"
-+ cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
-+ if packages:
-+ cmd.append('-q')
-+ cmd.extend(packages)
-+ else:
-+ cmd.append('-qa')
+ # REPOID is not a valid tag for the rpm command. Remove it and replace it
+ # with 'none'
+@@ -187,28 +183,26 @@ def verify(*packages, **kwargs):
+ ftypes = {"c": "config", "d": "doc", "g": "ghost", "l": "license", "r": "readme"}
+ ret = {}
+ ignore_types = kwargs.get("ignore_types", [])
+- if not isinstance(ignore_types, (list, six.string_types)):
++ if not isinstance(ignore_types, (list, (str,))):
+ raise SaltInvocationError(
+ "ignore_types must be a list or a comma-separated string"
+ )
+- if isinstance(ignore_types, six.string_types):
++ if isinstance(ignore_types, str):
+ try:
+ ignore_types = [x.strip() for x in ignore_types.split(",")]
+ except AttributeError:
+- ignore_types = [x.strip() for x in six.text_type(ignore_types).split(",")]
++ ignore_types = [x.strip() for x in str(ignore_types).split(",")]
- # Construct query format
- attr_map = {
-@@ -544,6 +590,7 @@ def info(*packages, **kwargs):
- query.append(attr_map['description'])
- query.append("-----\\n")
+ verify_options = kwargs.get("verify_options", [])
+- if not isinstance(verify_options, (list, six.string_types)):
++ if not isinstance(verify_options, (list, (str,))):
+ raise SaltInvocationError(
+ "verify_options must be a list or a comma-separated string"
+ )
+- if isinstance(verify_options, six.string_types):
++ if isinstance(verify_options, str):
+ try:
+ verify_options = [x.strip() for x in verify_options.split(",")]
+ except AttributeError:
+- verify_options = [
+- x.strip() for x in six.text_type(verify_options).split(",")
+- ]
++ verify_options = [x.strip() for x in str(verify_options).split(",")]
-+ cmd = ' '.join(cmd)
- call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))),
- output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True)
- if call['retcode'] != 0:
-@@ -744,10 +791,13 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
- return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False)
+ cmd = ["rpm"]
+ if kwargs.get("root"):
+@@ -229,7 +223,7 @@ def verify(*packages, **kwargs):
+ # succeeded, but if the retcode is nonzero, then the command failed.
+ msg = "Failed to verify package(s)"
+ if out["stderr"]:
+- msg += ": {0}".format(out["stderr"])
++ msg += ": {}".format(out["stderr"])
+ raise CommandExecutionError(msg)
+ for line in salt.utils.itertools.split(out["stdout"], "\n"):
+@@ -492,7 +486,7 @@ def diff(package_path, path):
+ )
+ res = __salt__["cmd.shell"](cmd.format(package_path, path), output_loglevel="trace")
+ if res and res.startswith("Binary file"):
+- return "File '{0}' is binary and its content has been " "modified.".format(path)
++ return "File '{}' is binary and its content has been " "modified.".format(path)
--def checksum(*paths):
-+def checksum(*paths, **kwargs):
- '''
- Return if the signature of a RPM file is valid.
+ return res
-+ root
-+ use root as top level directory (default: "/")
-+
- CLI Example:
+@@ -590,7 +584,7 @@ def info(*packages, **kwargs):
+ attr.append("edition")
+ query.append(attr_map["edition"])
+ else:
+- for attr_k, attr_v in six.iteritems(attr_map):
++ for attr_k, attr_v in attr_map.items():
+ if attr_k != "description":
+ query.append(attr_v)
+ if attr and "description" in attr or not attr:
+@@ -599,7 +593,7 @@ def info(*packages, **kwargs):
- .. code-block:: bash
-@@ -760,9 +810,14 @@ def checksum(*paths):
- if not paths:
- raise CommandExecutionError("No package files has been specified.")
+ cmd = " ".join(cmd)
+ call = __salt__["cmd.run_all"](
+- cmd + (" --queryformat '{0}'".format("".join(query))),
++ cmd + (" --queryformat '{}'".format("".join(query))),
+ output_loglevel="trace",
+ env={"TZ": "UTC"},
+ clean_env=True,
+@@ -706,11 +700,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
-+ cmd = ['rpm']
-+ if kwargs.get('root'):
-+ cmd.extend(['--root', kwargs['root']])
-+ cmd.extend(['-K', '--quiet'])
- for package_file in paths:
-+ cmd_ = cmd + [package_file]
- ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and
-- not __salt__['cmd.retcode'](["rpm", "-K", "--quiet", package_file],
-+ not __salt__['cmd.retcode'](cmd_,
- ignore_retcode=True,
- output_loglevel='trace',
- python_shell=False))
-diff --git a/salt/modules/solarisipspkg.py b/salt/modules/solarisipspkg.py
-index 3da1dbe5a2..43fd213726 100644
---- a/salt/modules/solarisipspkg.py
-+++ b/salt/modules/solarisipspkg.py
-@@ -105,7 +105,7 @@ def _ips_get_pkgversion(line):
- return line.split()[0].split('@')[1].strip()
+ salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002'
+ """
+- normalize = (
+- lambda x: six.text_type(x).split(":", 1)[-1]
+- if ignore_epoch
+- else six.text_type(x)
+- )
++ normalize = lambda x: str(x).split(":", 1)[-1] if ignore_epoch else str(x)
+ ver1 = normalize(ver1)
+ ver2 = normalize(ver2)
+@@ -747,7 +737,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
+ # rpmdev-vercmp always uses epochs, even when zero
+ def _ensure_epoch(ver):
+ def _prepend(ver):
+- return "0:{0}".format(ver)
++ return "0:{}".format(ver)
--def refresh_db(full=False):
-+def refresh_db(full=False, **kwargs):
- '''
- Updates the remote repos database.
-
-@@ -129,7 +129,7 @@ def refresh_db(full=False):
- return __salt__['cmd.retcode']('/bin/pkg refresh') == 0
-
-
--def upgrade_available(name):
-+def upgrade_available(name, **kwargs):
- '''
- Check if there is an upgrade available for a certain package
- Accepts full or partial FMRI. Returns all matches found.
-diff --git a/salt/modules/solarispkg.py b/salt/modules/solarispkg.py
-index 2a828f6e9c..b28349a7d8 100644
---- a/salt/modules/solarispkg.py
-+++ b/salt/modules/solarispkg.py
-@@ -169,7 +169,7 @@ def latest_version(*names, **kwargs):
- available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
-
-
--def upgrade_available(name):
-+def upgrade_available(name, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
+ try:
+ if ":" not in ver:
+@@ -798,7 +788,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
+ cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r))
+ if cmp_result not in (-1, 0, 1):
+ raise CommandExecutionError(
+- "Comparison result '{0}' is invalid".format(cmp_result)
++ "Comparison result '{}' is invalid".format(cmp_result)
+ )
+ return cmp_result
diff --git a/salt/modules/systemd_service.py b/salt/modules/systemd_service.py
-index 743758bf9c..e39962f9ac 100644
+index 176e1dabaa..03e7268cd4 100644
--- a/salt/modules/systemd_service.py
+++ b/salt/modules/systemd_service.py
-@@ -1364,3 +1364,58 @@ def execs(root=None):
- continue
- ret[service] = data['ExecStart']['path']
- return ret
-+
-+
-+def firstboot(locale=None, locale_message=None, keymap=None,
-+ timezone=None, hostname=None, machine_id=None,
-+ root=None):
-+ '''
-+ .. versionadded:: TBD
-+
-+ Call systemd-firstboot to configure basic settings of the system
-+
-+ locale
-+ Set primary locale (LANG=)
-+
-+ locale_message
-+ Set message locale (LC_MESSAGES=)
-+
-+ keymap
-+ Set keymap
-+
-+ timezone
-+ Set timezone
-+
-+ hostname
-+ Set host name
-+
-+ machine_id
-+ Set machine ID
-+
-+ root
-+ Operate on an alternative filesystem root
-+
-+ CLI Example:
-+
-+ salt '*' service.firstboot keymap=jp locale=en_US.UTF-8
-+
-+ '''
-+ cmd = ['systemd-firstboot']
-+ parameters = [('locale', locale),
-+ ('locale-message', locale_message),
-+ ('keymap', keymap),
-+ ('timezone', timezone),
-+ ('hostname', hostname),
-+ ('machine-ID', machine_id),
-+ ('root', root)]
-+ for parameter, value in parameters:
-+ if value:
-+ cmd.extend(['--{}'.format(parameter), str(value)])
-+
-+ out = __salt__['cmd.run_all'](cmd)
-+
-+ if out['retcode'] != 0:
-+ raise CommandExecutionError(
-+ 'systemd-firstboot error: {}'.format(out['stderr']))
-+
-+ return True
-diff --git a/salt/modules/xbpspkg.py b/salt/modules/xbpspkg.py
-index e493f8c80f..b5d7d8a477 100644
---- a/salt/modules/xbpspkg.py
-+++ b/salt/modules/xbpspkg.py
-@@ -121,7 +121,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
- return ret
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Provides the service module for systemd
+
+@@ -15,8 +14,6 @@ Provides the service module for systemd
+ call it under the name 'service' and NOT 'systemd'. You can see that also
+ in the examples below.
+ """
+-# Import Python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import errno
+ import fnmatch
+@@ -26,15 +23,12 @@ import os
+ import re
+ import shlex
+
+-# Import Salt libs
+ import salt.utils.files
+ import salt.utils.itertools
+ import salt.utils.path
+ import salt.utils.stringutils
+ import salt.utils.systemd
+ from salt.exceptions import CommandExecutionError
+-
+-# Import 3rd-party libs
+ from salt.ext import six
+
+ log = logging.getLogger(__name__)
+@@ -94,8 +88,8 @@ def _canonical_unit_name(name):
+ Build a canonical unit name treating unit names without one
+ of the valid suffixes as a service.
+ """
+- if not isinstance(name, six.string_types):
+- name = six.text_type(name)
++ if not isinstance(name, str):
++ name = str(name)
+ if any(name.endswith(suffix) for suffix in VALID_UNIT_TYPES):
+ return name
+ return "%s.service" % name
+@@ -137,7 +131,7 @@ def _check_for_unit_changes(name):
+ Check for modified/updated unit files, and run a daemon-reload if any are
+ found.
+ """
+- contextkey = "systemd._check_for_unit_changes.{0}".format(name)
++ contextkey = "systemd._check_for_unit_changes.{}".format(name)
+ if contextkey not in __context__:
+ if _untracked_custom_unit_found(name) or _unit_file_changed(name):
+ systemctl_reload()
+@@ -199,9 +193,7 @@ def _default_runlevel():
+
+ # The default runlevel can also be set via the kernel command-line.
+ try:
+- valid_strings = set(
+- ("0", "1", "2", "3", "4", "5", "6", "s", "S", "-s", "single")
+- )
++ valid_strings = {"0", "1", "2", "3", "4", "5", "6", "s", "S", "-s", "single"}
+ with salt.utils.files.fopen("/proc/cmdline") as fp_:
+ for line in fp_:
+ line = salt.utils.stringutils.to_unicode(line)
+@@ -291,7 +283,7 @@ def _get_service_exec():
+ break
+ else:
+ raise CommandExecutionError(
+- "Unable to find sysv service manager (tried {0})".format(
++ "Unable to find sysv service manager (tried {})".format(
+ ", ".join(executables)
+ )
+ )
+@@ -345,7 +337,7 @@ def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False, root=
+ ret.append("--no-block")
+ if root:
+ ret.extend(["--root", root])
+- if isinstance(action, six.string_types):
++ if isinstance(action, str):
+ action = shlex.split(action)
+ ret.extend(action)
+ if name is not None:
+@@ -507,7 +499,7 @@ def get_enabled(root=None):
+ ret.add(unit_name if unit_type == "service" else fullname)
+
+ # Add in any sysvinit services that are enabled
+- ret.update(set([x for x in _get_sysv_services(root) if _sysv_enabled(x, root)]))
++ ret.update({x for x in _get_sysv_services(root) if _sysv_enabled(x, root)})
+ return sorted(ret)
--def list_upgrades(refresh=True):
-+def list_upgrades(refresh=True, **kwargs):
- '''
- Check whether or not an upgrade is available for all packages
+@@ -549,7 +541,7 @@ def get_disabled(root=None):
+ ret.add(unit_name if unit_type == "service" else fullname)
-@@ -247,7 +247,7 @@ def latest_version(*names, **kwargs):
- available_version = latest_version
+ # Add in any sysvinit services that are disabled
+- ret.update(set([x for x in _get_sysv_services(root) if not _sysv_enabled(x, root)]))
++ ret.update({x for x in _get_sysv_services(root) if not _sysv_enabled(x, root)})
+ return sorted(ret)
--def upgrade_available(name):
-+def upgrade_available(name, **kwargs):
- '''
- Check whether or not an upgrade is available for a given package
-
-@@ -260,7 +260,7 @@ def upgrade_available(name):
- return latest_version(name) != ''
-
-
--def refresh_db():
-+def refresh_db(**kwargs):
- '''
- Update list of available packages from installed repos
-
-@@ -300,7 +300,7 @@ def version(*names, **kwargs):
- return __salt__['pkg_resource.version'](*names, **kwargs)
-
-
--def upgrade(refresh=True):
-+def upgrade(refresh=True, **kwargs):
- '''
- Run a full system upgrade
-
-@@ -484,7 +484,7 @@ def remove(name=None, pkgs=None, recursive=True, **kwargs):
- return salt.utils.data.compare_dicts(old, new)
-
-
--def list_repos():
-+def list_repos(**kwargs):
- '''
- List all repos known by XBPS
-
-@@ -607,7 +607,7 @@ def add_repo(repo, conffile='/usr/share/xbps.d/15-saltstack.conf'):
- return True
-
-
--def del_repo(repo):
-+def del_repo(repo, **kwargs):
- '''
- Remove an XBPS repository from the system.
-
-diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
-index 3ddf989511..88d74020b3 100644
---- a/salt/modules/yumpkg.py
-+++ b/salt/modules/yumpkg.py
-@@ -619,7 +619,7 @@ def version(*names, **kwargs):
- return __salt__['pkg_resource.version'](*names, **kwargs)
-
-
--def version_cmp(pkg1, pkg2, ignore_epoch=False):
-+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
- '''
- .. versionadded:: 2015.5.4
-
-@@ -1012,7 +1012,7 @@ def list_upgrades(refresh=True, **kwargs):
- list_updates = salt.utils.functools.alias_function(list_upgrades, 'list_updates')
-
-
--def list_downloaded():
-+def list_downloaded(**kwargs):
- '''
- .. versionadded:: 2017.7.0
-
-@@ -1948,13 +1948,13 @@ def upgrade(name=None,
-
-
- def update(name=None,
-- pkgs=None,
-- refresh=True,
-- skip_verify=False,
-- normalize=True,
-- minimal=False,
-- obsoletes=False,
-- **kwargs):
-+ pkgs=None,
-+ refresh=True,
-+ skip_verify=False,
-+ normalize=True,
-+ minimal=False,
-+ obsoletes=False,
-+ **kwargs):
- '''
- .. versionadded:: 2019.2.0
-
-@@ -2647,7 +2647,7 @@ def group_install(name,
- groupinstall = salt.utils.functools.alias_function(group_install, 'groupinstall')
-
-
--def list_repos(basedir=None):
-+def list_repos(basedir=None, **kwargs):
- '''
- Lists all repos in (default: all dirs in `reposdir` yum option).
-
-@@ -2969,7 +2969,7 @@ def _parse_repo_file(filename):
- return (headers, salt.utils.data.decode(config))
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- .. versionadded:: 2014.1.0
-
-@@ -2988,7 +2988,7 @@ def file_list(*packages):
- return __salt__['lowpkg.file_list'](*packages)
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- .. versionadded:: 2014.1.0
-
-@@ -3007,7 +3007,7 @@ def file_dict(*packages):
- return __salt__['lowpkg.file_dict'](*packages)
-
-
--def owner(*paths):
-+def owner(*paths, **kwargs):
- '''
- .. versionadded:: 2014.7.0
-
-@@ -3095,7 +3095,7 @@ def modified(*packages, **flags):
-
-
- @salt.utils.decorators.path.which('yumdownloader')
--def download(*packages):
-+def download(*packages, **kwargs):
- '''
- .. versionadded:: 2015.5.0
-
-@@ -3168,7 +3168,7 @@ def download(*packages):
- return ret
-
-
--def diff(*paths):
-+def diff(*paths, **kwargs):
- '''
- Return a formatted diff between current files and original in a package.
- NOTE: this function includes all files (configuration and not), but does
-@@ -3239,7 +3239,7 @@ def _get_patches(installed_only=False):
- return patches
-
-
--def list_patches(refresh=False):
-+def list_patches(refresh=False, **kwargs):
- '''
- .. versionadded:: 2017.7.0
-
-@@ -3262,7 +3262,7 @@ def list_patches(refresh=False):
- return _get_patches()
-
-
--def list_installed_patches():
-+def list_installed_patches(**kwargs):
- '''
- .. versionadded:: 2017.7.0
-
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 37428cf67c..582caffb59 100644
+index dfaaf420a1..75cb5ce4a8 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -99,6 +99,7 @@ class _Zypper(object):
-
- LOCK_EXIT_CODE = 7
- XML_DIRECTIVES = ['-x', '--xmlout']
-+ # ZYPPER_LOCK is not affected by --root
- ZYPPER_LOCK = '/var/run/zypp.pid'
- TAG_RELEASED = 'zypper/released'
- TAG_BLOCKED = 'zypper/blocked'
-@@ -107,7 +108,6 @@ class _Zypper(object):
- '''
- Constructor
- '''
-- self.__called = False
- self._reset()
-
- def _reset(self):
-@@ -129,6 +129,10 @@ class _Zypper(object):
- self.__refresh = False
- self.__ignore_repo_failure = False
- self.__systemd_scope = False
-+ self.__root = None
-+
-+ # Call status
-+ self.__called = False
-
- def __call__(self, *args, **kwargs):
- '''
-@@ -136,11 +140,17 @@ class _Zypper(object):
- :param kwargs:
- :return:
- '''
-+ # Reset after the call
-+ if self.__called:
-+ self._reset()
-+
- # Ignore exit code for 106 (repo is not available)
- if 'no_repo_failure' in kwargs:
- self.__ignore_repo_failure = kwargs['no_repo_failure']
- if 'systemd_scope' in kwargs:
- self.__systemd_scope = kwargs['systemd_scope']
-+ if 'root' in kwargs:
-+ self.__root = kwargs['root']
- return self
-
- def __getattr__(self, item):
-@@ -153,7 +163,6 @@ class _Zypper(object):
- # Reset after the call
- if self.__called:
- self._reset()
-- self.__called = False
-
- if item == 'xml':
- self.__xml = True
-@@ -284,6 +293,8 @@ class _Zypper(object):
- self.__cmd.append('--xmlout')
- if not self.__refresh and '--no-refresh' not in args:
- self.__cmd.append('--no-refresh')
-+ if self.__root:
-+ self.__cmd.extend(['--root', self.__root])
-
- self.__cmd.extend(args)
- kwargs['output_loglevel'] = 'trace'
-@@ -442,7 +453,7 @@ def _clean_cache():
- __context__.pop(cache_name, None)
-
-
--def list_upgrades(refresh=True, **kwargs):
-+def list_upgrades(refresh=True, root=None, **kwargs):
- '''
- List all available package upgrades on this system
-
-@@ -451,6 +462,9 @@ def list_upgrades(refresh=True, **kwargs):
- If set to False it depends on zypper if a refresh is
- executed.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -458,7 +472,7 @@ def list_upgrades(refresh=True, **kwargs):
- salt '*' pkg.list_upgrades
- '''
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
- ret = dict()
- cmd = ['list-updates']
-@@ -506,6 +520,9 @@ def info_installed(*names, **kwargs):
- :param all_versions:
- Include information for all versions of the packages installed on the minion.
-
-+ :param root:
-+ Operate on a different root directory.
-+
- CLI example:
-
- .. code-block:: bash
-@@ -546,6 +563,9 @@ def info_available(*names, **kwargs):
- If set to False it depends on zypper if a refresh is
- executed or not.
-
-+ root
-+ operate on a different root directory.
-+
- CLI example:
-
- .. code-block:: bash
-@@ -560,9 +580,11 @@ def info_available(*names, **kwargs):
- else:
- names = sorted(list(set(names)))
-
-+ root = kwargs.get('root', None)
-+
- # Refresh db before extracting the latest package
- if kwargs.get('refresh', True):
-- refresh_db()
-+ refresh_db(root)
-
- pkg_info = []
- batch = names[:]
-@@ -571,7 +593,8 @@ def info_available(*names, **kwargs):
- # Run in batches
- while batch:
- pkg_info.extend(re.split(r"Information for package*",
-- __zypper__.nolock.call('info', '-t', 'package', *batch[:batch_size])))
-+ __zypper__(root=root).nolock.call('info', '-t', 'package',
-+ *batch[:batch_size])))
- batch = batch[batch_size:]
-
- for pkg_data in pkg_info:
-@@ -631,6 +654,9 @@ def latest_version(*names, **kwargs):
- If set to False it depends on zypper if a refresh is
- executed or not.
-
-+ root
-+ operate on a different root directory.
-+
- CLI example:
-
- .. code-block:: bash
-@@ -673,6 +699,9 @@ def upgrade_available(name, **kwargs):
- If set to False it depends on zypper if a refresh is
- executed or not.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -689,6 +718,9 @@ def version(*names, **kwargs):
- installed. If more than one package name is specified, a dict of
- name/version pairs is returned.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -699,7 +731,7 @@ def version(*names, **kwargs):
- return __salt__['pkg_resource.version'](*names, **kwargs) or {}
-
-
--def version_cmp(ver1, ver2, ignore_epoch=False):
-+def version_cmp(ver1, ver2, ignore_epoch=False, **kwargs):
- '''
- .. versionadded:: 2015.5.4
-
-@@ -721,7 +753,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
- return __salt__['lowpkg.version_cmp'](ver1, ver2, ignore_epoch=ignore_epoch)
-
-
--def list_pkgs(versions_as_list=False, **kwargs):
-+def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
- '''
- List the packages currently installed as a dict. By default, the dict
- contains versions as a comma separated string::
-@@ -733,6 +765,13 @@ def list_pkgs(versions_as_list=False, **kwargs):
-
- {'': ['', '']}
-
-+ root:
-+ operate on a different root directory.
-+
-+ includes:
-+ List of types of packages to include (package, patch, pattern, product)
-+ By default packages are always included
-+
- attr:
- If a list of package attributes is specified, returned value will
- contain them in addition to version, eg.::
-@@ -770,12 +809,18 @@ def list_pkgs(versions_as_list=False, **kwargs):
- if attr is not None:
- attr = salt.utils.args.split_input(attr)
-
-+ includes = includes if includes else []
-+
- contextkey = 'pkg.list_pkgs'
+@@ -879,6 +879,7 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
+ # inclusion types are passed
+ contextkey = "pkg.list_pkgs_{}_{}".format(root, includes)
+ # TODO(aplanas): this cached value depends on the parameters
if contextkey not in __context__:
ret = {}
-- cmd = ['rpm', '-qa', '--queryformat',
-- salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n']
-+ cmd = ['rpm']
-+ if root:
-+ cmd.extend(['--root', root])
-+ cmd.extend(['-qa', '--queryformat',
-+ salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n'])
- output = __salt__['cmd.run'](cmd,
- python_shell=False,
- output_loglevel='trace')
-@@ -810,6 +855,28 @@ def list_pkgs(versions_as_list=False, **kwargs):
- continue
- _ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version'])
+ cmd = ["rpm"]
+@@ -958,6 +959,28 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
+ }
+ ]
+ for include in includes:
-+ if include in ('pattern', 'patch'):
-+ if include == 'pattern':
++ if include in ("pattern", "patch"):
++ if include == "pattern":
+ pkgs = list_installed_patterns(root=root)
-+ elif include == 'patch':
++ elif include == "patch":
+ pkgs = list_installed_patches(root=root)
+ else:
+ pkgs = []
+ for pkg in pkgs:
-+ pkg_extended_name = '{}:{}'.format(include, pkg)
-+ info = info_available(pkg_extended_name,
-+ refresh=False,
-+ root=root)
-+ _ret[pkg_extended_name] = [{
-+ 'epoch': None,
-+ 'version': info[pkg]['version'],
-+ 'release': None,
-+ 'arch': info[pkg]['arch'],
-+ 'install_date': None,
-+ 'install_date_time_t': None,
-+ }]
++ pkg_extended_name = "{}:{}".format(include, pkg)
++ info = info_available(pkg_extended_name, refresh=False, root=root)
++ _ret[pkg_extended_name] = [
++ {
++ "epoch": None,
++ "version": info[pkg]["version"],
++ "release": None,
++ "arch": info[pkg]["arch"],
++ "install_date": None,
++ "install_date_time_t": None,
++ }
++ ]
+
__context__[contextkey] = _ret
- return __salt__['pkg_resource.format_pkg_list'](
-@@ -861,6 +928,9 @@ def list_repo_pkgs(*args, **kwargs):
- When ``True``, the return data for each package will be organized by
- repository.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -893,7 +963,8 @@ def list_repo_pkgs(*args, **kwargs):
- return True
- return False
-
-- for node in __zypper__.xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
-+ root = kwargs.get('root') or None
-+ for node in __zypper__(root=root).xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
- pkginfo = dict(node.attributes.items())
- try:
- if pkginfo['kind'] != 'package':
-@@ -935,23 +1006,27 @@ def list_repo_pkgs(*args, **kwargs):
- return byrepo_ret
+ return __salt__["pkg_resource.format_pkg_list"](
+@@ -1401,7 +1424,9 @@ def refresh_db(force=None, root=None):
--def _get_configured_repos():
-+def _get_configured_repos(root=None):
- '''
- Get all the info about repositories from the configurations.
- '''
-
-+ repos = os.path.join(root, os.path.relpath(REPOS, os.path.sep)) if root else REPOS
- repos_cfg = configparser.ConfigParser()
-- repos_cfg.read([REPOS + '/' + fname for fname in os.listdir(REPOS) if fname.endswith(".repo")])
-+ if os.path.exists(repos):
-+ repos_cfg.read([repos + '/' + fname for fname in os.listdir(repos) if fname.endswith(".repo")])
-+ else:
-+ log.warning('Repositories not found in {}'.format(repos))
-
- return repos_cfg
+ def _find_types(pkgs):
+- """Form a package names list, find prefixes of packages types."""
++ """
++ Form a package names list, find prefixes of packages types.
++ """
+ return sorted({pkg.split(":", 1)[0] for pkg in pkgs if len(pkg.split(":", 1)) == 2})
--def _get_repo_info(alias, repos_cfg=None):
-+def _get_repo_info(alias, repos_cfg=None, root=None):
- '''
- Get one repo meta-data.
- '''
- try:
-- meta = dict((repos_cfg or _get_configured_repos()).items(alias))
-+ meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias))
- meta['alias'] = alias
- for key, val in six.iteritems(meta):
- if val in ['0', '1']:
-@@ -963,51 +1038,60 @@ def _get_repo_info(alias, repos_cfg=None):
- return {}
-
-
--def get_repo(repo, **kwargs): # pylint: disable=unused-argument
-+def get_repo(repo, root=None, **kwargs): # pylint: disable=unused-argument
- '''
- Display a repo.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-
- salt '*' pkg.get_repo alias
- '''
-- return _get_repo_info(repo)
-+ return _get_repo_info(repo, root=root)
-
-
--def list_repos():
-+def list_repos(root=None, **kwargs):
- '''
- Lists all repos.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-
- salt '*' pkg.list_repos
- '''
-- repos_cfg = _get_configured_repos()
-+ repos_cfg = _get_configured_repos(root=root)
- all_repos = {}
- for alias in repos_cfg.sections():
-- all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg)
-+ all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg, root=root)
-
- return all_repos
-
-
--def del_repo(repo):
-+def del_repo(repo, root=None):
- '''
- Delete a repo.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-
- salt '*' pkg.del_repo alias
- '''
-- repos_cfg = _get_configured_repos()
-+ repos_cfg = _get_configured_repos(root=root)
- for alias in repos_cfg.sections():
- if alias == repo:
-- doc = __zypper__.xml.call('rr', '--loose-auth', '--loose-query', alias)
-+ doc = __zypper__(root=root).xml.call('rr', '--loose-auth', '--loose-query', alias)
- msg = doc.getElementsByTagName('message')
- if doc.getElementsByTagName('progress') and msg:
- return {
-@@ -1046,6 +1130,9 @@ def mod_repo(repo, **kwargs):
- If set to True, automatically trust and import public GPG key for
- the repository.
-
-+ root
-+ operate on a different root directory.
-+
- Key/Value pairs may also be removed from a repo's configuration by setting
- a key to a blank value. Bear in mind that a name cannot be deleted, and a
- URL can only be deleted if a ``mirrorlist`` is specified (or vice versa).
-@@ -1058,7 +1145,8 @@ def mod_repo(repo, **kwargs):
- salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
- '''
-
-- repos_cfg = _get_configured_repos()
-+ root = kwargs.get('root') or None
-+ repos_cfg = _get_configured_repos(root=root)
- added = False
-
- # An attempt to add new one?
-@@ -1078,7 +1166,7 @@ def mod_repo(repo, **kwargs):
-
- # Is there already such repo under different alias?
- for alias in repos_cfg.sections():
-- repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg)
-+ repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg, root=root)
-
- # Complete user URL, in case it is not
- new_url = _urlparse(url)
-@@ -1100,17 +1188,17 @@ def mod_repo(repo, **kwargs):
+@@ -1596,12 +1621,7 @@ def install(
+ 'Advisory id "{}" not found'.format(advisory_id)
)
-
- # Add new repo
-- __zypper__.xml.call('ar', url, repo)
-+ __zypper__(root=root).xml.call('ar', url, repo)
-
- # Verify the repository has been added
-- repos_cfg = _get_configured_repos()
-+ repos_cfg = _get_configured_repos(root=root)
- if repo not in repos_cfg.sections():
- raise CommandExecutionError(
- 'Failed add new repository \'{0}\' for unspecified reason. '
- 'Please check zypper logs.'.format(repo))
- added = True
-
-- repo_info = _get_repo_info(repo)
-+ repo_info = _get_repo_info(repo, root=root)
- if (
- not added and 'baseurl' in kwargs and
- not (kwargs['baseurl'] == repo_info['baseurl'])
-@@ -1119,8 +1207,8 @@ def mod_repo(repo, **kwargs):
- # we need to remove the repository and add it again with the new baseurl
- repo_info.update(kwargs)
- repo_info.setdefault('cache', False)
-- del_repo(repo)
-- return mod_repo(repo, **repo_info)
-+ del_repo(repo, root=root)
-+ return mod_repo(repo, root=root, **repo_info)
-
- # Modify added or existing repo according to the options
- cmd_opt = []
-@@ -1153,7 +1241,7 @@ def mod_repo(repo, **kwargs):
-
- if cmd_opt:
- cmd_opt = global_cmd_opt + ['mr'] + cmd_opt + [repo]
-- __zypper__.refreshable.xml.call(*cmd_opt)
-+ __zypper__(root=root).refreshable.xml.call(*cmd_opt)
-
- comment = None
- if call_refresh:
-@@ -1161,23 +1249,26 @@ def mod_repo(repo, **kwargs):
- # --gpg-auto-import-keys is not doing anything
- # so we need to specifically refresh here with --gpg-auto-import-keys
- refresh_opts = global_cmd_opt + ['refresh'] + [repo]
-- __zypper__.xml.call(*refresh_opts)
-+ __zypper__(root=root).xml.call(*refresh_opts)
- elif not added and not cmd_opt:
- comment = 'Specified arguments did not result in modification of repo'
-
-- repo = get_repo(repo)
-+ repo = get_repo(repo, root=root)
- if comment:
- repo['comment'] = comment
-
- return repo
-
-
--def refresh_db():
-+def refresh_db(root=None):
- '''
- Force a repository refresh by calling ``zypper refresh --force``, return a dict::
-
- {'': Bool}
-
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -1187,7 +1278,7 @@ def refresh_db():
- # Remove rtag file to keep multiple refreshes from happening in pkg states
- salt.utils.pkg.clear_rtag(__opts__)
- ret = {}
-- out = __zypper__.refreshable.call('refresh', '--force')
-+ out = __zypper__(root=root).refreshable.call('refresh', '--force')
-
- for line in out.splitlines():
- if not line:
-@@ -1206,6 +1297,12 @@ def refresh_db():
- return ret
-
-
-+def _find_types(pkgs):
-+ '''Form a package names list, find prefixes of packages types.'''
-+ return sorted({pkg.split(':', 1)[0] for pkg in pkgs
-+ if len(pkg.split(':', 1)) == 2})
-+
-+
- def install(name=None,
- refresh=False,
- fromrepo=None,
-@@ -1215,6 +1312,8 @@ def install(name=None,
- skip_verify=False,
- version=None,
- ignore_repo_failure=False,
-+ no_recommends=False,
-+ root=None,
- **kwargs):
- '''
- .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
-@@ -1303,6 +1402,12 @@ def install(name=None,
- Zypper returns error code 106 if one of the repositories are not available for various reasons.
- In case to set strict check, this parameter needs to be set to True. Default: False.
-
-+ no_recommends
-+ Do not install recommended packages, only required ones.
-+
-+ root
-+ operate on a different root directory.
-+
- diff_attr:
- If a list of package attributes is specified, returned value will
- contain them, eg.::
-@@ -1342,7 +1447,7 @@ def install(name=None,
- 'arch': ''}}}
- '''
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
- try:
- pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs, sources, **kwargs)
-@@ -1352,7 +1457,7 @@ def install(name=None,
- if pkg_params is None or len(pkg_params) == 0:
- return {}
-
-- version_num = Wildcard(__zypper__)(name, version)
-+ version_num = Wildcard(__zypper__(root=root))(name, version)
-
- if version_num:
- if pkgs is None and sources is None:
-@@ -1377,17 +1482,20 @@ def install(name=None,
- targets.append(target)
- elif pkg_type == 'advisory':
- targets = []
-- cur_patches = list_patches()
-+ cur_patches = list_patches(root=root)
- for advisory_id in pkg_params:
- if advisory_id not in cur_patches:
- raise CommandExecutionError('Advisory id "{0}" not found'.format(advisory_id))
else:
+- # If we add here the `patch:` prefix, the
+- # `_find_types` helper will take the patches into the
+- # list of packages. Usually this is the correct thing
+- # to do, but we can break software the depends on the
+- # old behaviour.
- targets.append(advisory_id)
-+ targets.append('patch:{}'.format(advisory_id))
++ targets.append("patch:{}".format(advisory_id))
else:
targets = pkg_params
- diff_attr = kwargs.get("diff_attr")
-- old = list_pkgs(attr=diff_attr) if not downloadonly else list_downloaded()
-+
-+ includes = _find_types(targets)
-+ old = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root)
-+
- downgrades = []
- if fromrepo:
- fromrepoopt = ['--force', '--force-resolution', '--from', fromrepo]
-@@ -1406,10 +1514,10 @@ def install(name=None,
- cmd_install.append('--download-only')
- if fromrepo:
- cmd_install.extend(fromrepoopt)
-+ if no_recommends:
-+ cmd_install.append('--no-recommends')
+@@ -1639,16 +1659,6 @@ def install(
errors = []
-- if pkg_type == 'advisory':
-- targets = ["patch:{0}".format(t) for t in targets]
+- # If the type is 'advisory', we manually add the 'patch:'
+- # prefix. This kind of package will not appear in pkg_list in this
+- # way.
+- #
+- # Note that this enable a different mechanism to install a patch;
+- # if the name of the package is already prefixed with 'patch:' we
+- # can avoid listing them in the `advisory_ids` field.
+- if pkg_type == "advisory":
+- targets = ["patch:{}".format(t) for t in targets]
+-
# Split the targets into batches of 500 packages each, so that
# the maximal length of the command line is not broken
-@@ -1417,7 +1525,7 @@ def install(name=None,
- while targets:
- cmd = cmd_install + targets[:500]
- targets = targets[500:]
-- for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope).call(*cmd).splitlines():
-+ for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope, root=root).call(*cmd).splitlines():
- match = re.match(r"^The selected package '([^']+)'.+has lower version", line)
- if match:
- downgrades.append(match.group(1))
-@@ -1425,12 +1533,17 @@ def install(name=None,
- while downgrades:
- cmd = cmd_install + ['--force'] + downgrades[:500]
- downgrades = downgrades[500:]
-- __zypper__(no_repo_failure=ignore_repo_failure).call(*cmd)
-+ __zypper__(no_repo_failure=ignore_repo_failure, root=root).call(*cmd)
-
- _clean_cache()
-- new = list_pkgs(attr=diff_attr) if not downloadonly else list_downloaded()
-+ new = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root)
- ret = salt.utils.data.compare_dicts(old, new)
-
-+ # If something else from packages are included in the search,
-+ # better clean the cache.
-+ if includes:
-+ _clean_cache()
-+
- if errors:
- raise CommandExecutionError(
- 'Problem encountered {0} package(s)'.format(
-@@ -1448,6 +1561,8 @@ def upgrade(refresh=True,
- fromrepo=None,
- novendorchange=False,
- skip_verify=False,
-+ no_recommends=False,
-+ root=None,
- **kwargs): # pylint: disable=unused-argument
- '''
- .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
-@@ -1487,6 +1602,12 @@ def upgrade(refresh=True,
- skip_verify
- Skip the GPG verification check (e.g., ``--no-gpg-checks``)
-
-+ no_recommends
-+ Do not install recommended packages, only required ones.
-+
-+ root
-+ Operate on a different root directory.
-+
- Returns a dictionary containing the changes:
-
- .. code-block:: python
-@@ -1509,7 +1630,7 @@ def upgrade(refresh=True,
- cmd_update.insert(0, '--no-gpg-checks')
-
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
- if dryrun:
- cmd_update.append('--dry-run')
-@@ -1530,16 +1651,20 @@ def upgrade(refresh=True,
- else:
- log.warning('Disabling vendor changes is not supported on this Zypper version')
+ systemd_scope = _systemd_scope()
+@@ -1805,6 +1815,10 @@ def upgrade(
+ cmd_update.append("--no-recommends")
+ log.info("Disabling recommendations")
+ if no_recommends:
-+ cmd_update.append('--no-recommends')
-+ log.info('Disabling recommendations')
++ cmd_update.append("--no-recommends")
++ log.info("Disabling recommendations")
+
if dryrun:
# Creates a solver test case for debugging.
- log.info('Executing debugsolver and performing a dry-run dist-upgrade')
-- __zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update + ['--debug-solver'])
-+ __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update + ['--debug-solver'])
-
-- old = list_pkgs()
-+ old = list_pkgs(root=root)
-
-- __zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update)
-+ __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update)
- _clean_cache()
-- new = list_pkgs()
-+ new = list_pkgs(root=root)
- ret = salt.utils.data.compare_dicts(old, new)
-
- if __zypper__.exit_code not in __zypper__.SUCCESS_EXIT_CODES:
-@@ -1560,7 +1685,7 @@ def upgrade(refresh=True,
- return ret
-
-
--def _uninstall(name=None, pkgs=None):
-+def _uninstall(name=None, pkgs=None, root=None):
- '''
- Remove and purge do identical things but with different Zypper commands,
- this function performs the common logic.
-@@ -1570,7 +1695,8 @@ def _uninstall(name=None, pkgs=None):
- except MinionError as exc:
- raise CommandExecutionError(exc)
-
-- old = list_pkgs()
-+ includes = _find_types(pkg_params.keys())
-+ old = list_pkgs(root=root, includes=includes)
- targets = []
- for target in pkg_params:
- # Check if package version set to be removed is actually installed:
-@@ -1586,11 +1712,12 @@ def _uninstall(name=None, pkgs=None):
-
- errors = []
- while targets:
-- __zypper__(systemd_scope=systemd_scope).call('remove', *targets[:500])
-+ __zypper__(systemd_scope=systemd_scope, root=root).call('remove', *targets[:500])
- targets = targets[500:]
-
- _clean_cache()
-- ret = salt.utils.data.compare_dicts(old, list_pkgs())
-+ new = list_pkgs(root=root, includes=includes)
-+ ret = salt.utils.data.compare_dicts(old, new)
-
- if errors:
- raise CommandExecutionError(
-@@ -1627,7 +1754,7 @@ def normalize_name(name):
- return name
-
-
--def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
-+def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
- '''
- .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
- On minions running systemd>=205, `systemd-run(1)`_ is now used to
-@@ -1655,6 +1782,9 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
- A list of packages to delete. Must be passed as a python list. The
- ``name`` parameter will be ignored if this option is passed.
-
-+ root
-+ Operate on a different root directory.
-+
- .. versionadded:: 0.16.0
-
-
-@@ -1668,10 +1798,10 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
- salt '*' pkg.remove ,,
- salt '*' pkg.remove pkgs='["foo", "bar"]'
- '''
-- return _uninstall(name=name, pkgs=pkgs)
-+ return _uninstall(name=name, pkgs=pkgs, root=root)
-
-
--def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
-+def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
- '''
- .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
- On minions running systemd>=205, `systemd-run(1)`_ is now used to
-@@ -1700,6 +1830,9 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
- A list of packages to delete. Must be passed as a python list. The
- ``name`` parameter will be ignored if this option is passed.
-
-+ root
-+ Operate on a different root directory.
-+
- .. versionadded:: 0.16.0
-
-
-@@ -1713,13 +1846,16 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
- salt '*' pkg.purge ,,
- salt '*' pkg.purge pkgs='["foo", "bar"]'
- '''
-- return _uninstall(name=name, pkgs=pkgs)
-+ return _uninstall(name=name, pkgs=pkgs, root=root)
-
-
--def list_locks():
-+def list_locks(root=None):
- '''
- List current package locks.
-
-+ root
-+ operate on a different root directory.
-+
- Return a dict containing the locked package with attributes::
-
- {'': {'case_sensitive': '',
-@@ -1733,8 +1869,9 @@ def list_locks():
- salt '*' pkg.list_locks
- '''
- locks = {}
-- if os.path.exists(LOCKS):
-- with salt.utils.files.fopen(LOCKS) as fhr:
-+ _locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
-+ try:
-+ with salt.utils.files.fopen(_locks) as fhr:
- items = salt.utils.stringutils.to_unicode(fhr.read()).split('\n\n')
- for meta in [item.split('\n') for item in items]:
- lock = {}
-@@ -1743,15 +1880,22 @@ def list_locks():
- lock.update(dict([tuple([i.strip() for i in element.split(':', 1)]), ]))
- if lock.get('solvable_name'):
- locks[lock.pop('solvable_name')] = lock
-+ except IOError:
-+ pass
+ log.info("Executing debugsolver and performing a dry-run dist-upgrade")
+@@ -2035,13 +2049,13 @@ def list_locks(root=None):
+ for element in [el for el in meta if el]:
+ if ":" in element:
+ lock.update(
+- dict([tuple([i.strip() for i in element.split(":", 1)])])
++ dict([tuple([i.strip() for i in element.split(":", 1)]),])
+ )
+ if lock.get("solvable_name"):
+ locks[lock.pop("solvable_name")] = lock
+ except OSError:
+ pass
+- except Exception: # pylint: disable=broad-except
+ except Exception:
-+ log.warning('Detected a problem when accessing {}'.format(_locks))
+ log.warning("Detected a problem when accessing {}".format(_locks))
return locks
-
-
--def clean_locks():
-+def clean_locks(root=None):
- '''
- Remove unused locks that do not currently (with regard to repositories
- used) lock any package.
-
-+ root
-+ Operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -1760,10 +1904,11 @@ def clean_locks():
- '''
- LCK = "removed"
- out = {LCK: 0}
-- if not os.path.exists("/etc/zypp/locks"):
-+ locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
-+ if not os.path.exists(locks):
- return out
-
-- for node in __zypper__.xml.call('cl').getElementsByTagName("message"):
-+ for node in __zypper__(root=root).xml.call('cl').getElementsByTagName("message"):
- text = node.childNodes[0].nodeValue.lower()
- if text.startswith(LCK):
- out[LCK] = text.split(" ")[1]
-@@ -1776,6 +1921,9 @@ def unhold(name=None, pkgs=None, **kwargs):
- '''
- Remove specified package lock.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -1785,12 +1933,13 @@ def unhold(name=None, pkgs=None, **kwargs):
+@@ -2092,12 +2106,13 @@ def unhold(name=None, pkgs=None, **kwargs):
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
- '''
+ """
ret = {}
-+ root = kwargs.get('root')
++ root = kwargs.get("root")
if (not name and not pkgs) or (name and pkgs):
- raise CommandExecutionError('Name or packages must be specified.')
+ raise CommandExecutionError("Name or packages must be specified.")
elif name:
pkgs = [name]
- locks = list_locks()
+ locks = list_locks(root)
try:
- pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
+ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys())
except MinionError as exc:
-@@ -1807,15 +1956,18 @@ def unhold(name=None, pkgs=None, **kwargs):
- ret[pkg]['comment'] = 'Package {0} unable to be unheld.'.format(pkg)
+@@ -2114,15 +2129,18 @@ def unhold(name=None, pkgs=None, **kwargs):
+ ret[pkg]["comment"] = "Package {} unable to be unheld.".format(pkg)
if removed:
-- __zypper__.call('rl', *removed)
-+ __zypper__(root=root).call('rl', *removed)
+- __zypper__.call("rl", *removed)
++ __zypper__(root=root).call("rl", *removed)
return ret
-def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
+def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
- '''
+ """
Remove specified package lock.
+ root
@@ -3360,26 +658,17 @@ index 37428cf67c..582caffb59 100644
CLI Example:
.. code-block:: bash
-@@ -1825,7 +1977,7 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
- salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
- '''
- salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use unhold() instead.')
+@@ -2134,7 +2152,7 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
+ salt.utils.versions.warn_until(
+ "Sodium", "This function is deprecated. Please use unhold() instead."
+ )
- locks = list_locks()
+ locks = list_locks(root)
try:
- packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
+ packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys())
except MinionError as exc:
-@@ -1840,7 +1992,7 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
- missing.append(pkg)
-
- if removed:
-- __zypper__.call('rl', *removed)
-+ __zypper__(root=root).call('rl', *removed)
-
- return {'removed': len(removed), 'not_found': missing}
-
-@@ -1849,6 +2001,9 @@ def hold(name=None, pkgs=None, **kwargs):
- '''
+@@ -2158,6 +2176,9 @@ def hold(name=None, pkgs=None, **kwargs):
+ """
Add a package lock. Specify packages to lock by exact name.
+ root
@@ -3388,13 +677,13 @@ index 37428cf67c..582caffb59 100644
CLI Example:
.. code-block:: bash
-@@ -1863,12 +2018,13 @@ def hold(name=None, pkgs=None, **kwargs):
+@@ -2172,12 +2193,13 @@ def hold(name=None, pkgs=None, **kwargs):
:return:
- '''
+ """
ret = {}
-+ root = kwargs.get('root')
++ root = kwargs.get("root")
if (not name and not pkgs) or (name and pkgs):
- raise CommandExecutionError('Name or packages must be specified.')
+ raise CommandExecutionError("Name or packages must be specified.")
elif name:
pkgs = [name]
@@ -3402,569 +691,63 @@ index 37428cf67c..582caffb59 100644
+ locks = list_locks(root=root)
added = []
try:
- pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
-@@ -1884,15 +2040,18 @@ def hold(name=None, pkgs=None, **kwargs):
- ret[pkg]['comment'] = 'Package {0} is already set to be held.'.format(pkg)
+ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys())
+@@ -2193,12 +2215,12 @@ def hold(name=None, pkgs=None, **kwargs):
+ ret[pkg]["comment"] = "Package {} is already set to be held.".format(pkg)
if added:
-- __zypper__.call('al', *added)
-+ __zypper__(root=root).call('al', *added)
+- __zypper__.call("al", *added)
++ __zypper__(root=root).call("al", *added)
return ret
-def add_lock(packages, **kwargs): # pylint: disable=unused-argument
+def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
- '''
+ """
Add a package lock. Specify packages to lock by exact name.
-+ root
-+ operate on a different root directory.
-+
- CLI Example:
-
- .. code-block:: bash
-@@ -1902,7 +2061,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
- salt '*' pkg.add_lock pkgs='["foo", "bar"]'
- '''
- salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use hold() instead.')
+@@ -2216,7 +2238,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
+ salt.utils.versions.warn_until(
+ "Sodium", "This function is deprecated. Please use hold() instead."
+ )
- locks = list_locks()
+ locks = list_locks(root)
added = []
try:
- packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
-@@ -1914,7 +2073,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
- added.append(pkg)
+ packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys())
+@@ -2410,14 +2432,11 @@ def _get_installed_patterns(root=None):
+ # a real error.
+ output = __salt__["cmd.run"](cmd, ignore_retcode=True)
- if added:
-- __zypper__.call('al', *added)
-+ __zypper__(root=root).call('al', *added)
+- # On <= SLE12SP4 we have patterns that have multiple names (alias)
+- # and that are duplicated. The alias start with ".", so we filter
+- # them.
+- installed_patterns = {
++ installed_patterns = [
+ _pattern_name(line)
+ for line in output.splitlines()
+- if line.startswith("pattern() = ") and not _pattern_name(line).startswith(".")
+- }
++ if line.startswith("pattern() = ")
++ ]
- return {'added': len(added), 'packages': added}
-
-@@ -1924,7 +2083,9 @@ def verify(*names, **kwargs):
- Runs an rpm -Va on a system, and returns the results in a dict
-
- Files with an attribute of config, doc, ghost, license or readme in the
-- package header can be ignored using the ``ignore_types`` keyword argument
-+ package header can be ignored using the ``ignore_types`` keyword argument.
-+
-+ The root parameter can also be passed via the keyword argument.
-
- CLI Example:
-
-@@ -1938,12 +2099,14 @@ def verify(*names, **kwargs):
- return __salt__['lowpkg.verify'](*names, **kwargs)
-
-
--def file_list(*packages):
-+def file_list(*packages, **kwargs):
- '''
- List the files that belong to a package. Not specifying any packages will
- return a list of *every* file on the system's rpm database (not generally
- recommended).
-
-+ The root parameter can also be passed via the keyword argument.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -1952,15 +2115,17 @@ def file_list(*packages):
- salt '*' pkg.file_list httpd postfix
- salt '*' pkg.file_list
- '''
-- return __salt__['lowpkg.file_list'](*packages)
-+ return __salt__['lowpkg.file_list'](*packages, **kwargs)
-
-
--def file_dict(*packages):
-+def file_dict(*packages, **kwargs):
- '''
- List the files that belong to a package, grouped by package. Not
- specifying any packages will return a list of *every* file on the system's
- rpm database (not generally recommended).
-
-+ The root parameter can also be passed via the keyword argument.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -1969,7 +2134,7 @@ def file_dict(*packages):
- salt '*' pkg.file_list httpd postfix
- salt '*' pkg.file_list
- '''
-- return __salt__['lowpkg.file_dict'](*packages)
-+ return __salt__['lowpkg.file_dict'](*packages, **kwargs)
-
-
- def modified(*packages, **flags):
-@@ -2008,6 +2173,9 @@ def modified(*packages, **flags):
- capabilities
- Include only files where capabilities differ or not. Note: supported only on newer RPM versions.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -2021,7 +2189,7 @@ def modified(*packages, **flags):
- return __salt__['lowpkg.modified'](*packages, **flags)
-
-
--def owner(*paths):
-+def owner(*paths, **kwargs):
- '''
- Return the name of the package that owns the file. Multiple file paths can
- be passed. If a single path is passed, a string will be returned,
-@@ -2031,6 +2199,8 @@ def owner(*paths):
- If the file is not owned by a package, or is not present on the minion,
- then an empty string will be returned for that path.
-
-+ The root parameter can also be passed via the keyword argument.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -2038,26 +2208,69 @@ def owner(*paths):
- salt '*' pkg.owner /usr/bin/apachectl
- salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
- '''
-- return __salt__['lowpkg.owner'](*paths)
-+ return __salt__['lowpkg.owner'](*paths, **kwargs)
-
-
--def _get_patterns(installed_only=None):
-- '''
-- List all known patterns in repos.
-- '''
-+def _get_visible_patterns(root=None):
-+ '''Get all available patterns in the repo that are visible.'''
- patterns = {}
-- for element in __zypper__.nolock.xml.call('se', '-t', 'pattern').getElementsByTagName('solvable'):
-+ search_patterns = __zypper__(root=root).nolock.xml.call('se', '-t', 'pattern')
-+ for element in search_patterns.getElementsByTagName('solvable'):
- installed = element.getAttribute('status') == 'installed'
-- if (installed_only and installed) or not installed_only:
-- patterns[element.getAttribute('name')] = {
-- 'installed': installed,
-- 'summary': element.getAttribute('summary'),
-+ patterns[element.getAttribute('name')] = {
-+ 'installed': installed,
-+ 'summary': element.getAttribute('summary'),
-+ }
-+ return patterns
-+
-+
-+def _get_installed_patterns(root=None):
-+ '''
-+ List all installed patterns.
-+ '''
-+ # Some patterns are non visible (`pattern-visible()` capability is
-+ # not set), so they cannot be found via a normal `zypper se -t
-+ # pattern`.
-+ #
-+ # Also patterns are not directly searchable in the local rpmdb.
-+ #
-+ # The proposed solution is, first search all the packages that
-+ # containst the 'pattern()' capability, and deduce the name of the
-+ # pattern from this capability.
-+ #
-+ # For example:
-+ #
-+ # 'pattern() = base' -> 'base'
-+ # 'pattern() = microos_defaults' -> 'microos_defaults'
-+
-+ def _pattern_name(capability):
-+ '''Return from a suitable capability the pattern name.'''
-+ return capability.split('=')[-1].strip()
-+
-+ cmd = ['rpm']
-+ if root:
-+ cmd.extend(['--root', root])
-+ cmd.extend(['-q', '--provides', '--whatprovides', 'pattern()'])
-+ # If no `pattern()`s are found, RPM returns `1`, but for us is not
-+ # a real error.
-+ output = __salt__['cmd.run'](cmd, ignore_retcode=True)
-+
-+ installed_patterns = [_pattern_name(line) for line in output.splitlines()
-+ if line.startswith('pattern() = ')]
-+
-+ patterns = {k: v for k, v in _get_visible_patterns(root=root).items() if v['installed']}
-+
-+ for pattern in installed_patterns:
-+ if pattern not in patterns:
-+ patterns[pattern] = {
-+ 'installed': True,
-+ 'summary': 'Non-visible pattern',
- }
-
- return patterns
-
-
--def list_patterns(refresh=False):
-+def list_patterns(refresh=False, root=None):
- '''
- List all known patterns from available repos.
-
-@@ -2066,6 +2279,9 @@ def list_patterns(refresh=False):
- If set to False (default) it depends on zypper if a refresh is
- executed.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -2073,27 +2289,30 @@ def list_patterns(refresh=False):
- salt '*' pkg.list_patterns
- '''
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
-- return _get_patterns()
-+ return _get_visible_patterns(root=root)
-
-
--def list_installed_patterns():
-+def list_installed_patterns(root=None):
- '''
- List installed patterns on the system.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-
- salt '*' pkg.list_installed_patterns
- '''
-- return _get_patterns(installed_only=True)
-+ return _get_installed_patterns(root=root)
-
-
- def search(criteria, refresh=False, **kwargs):
- '''
-- List known packags, available to the system.
-+ List known packages, available to the system.
-
- refresh
- force a refresh if set to True.
-@@ -2141,6 +2360,9 @@ def search(criteria, refresh=False, **kwargs):
- details (bool)
- Show version and repository
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -2161,8 +2383,11 @@ def search(criteria, refresh=False, **kwargs):
- 'not_installed_only': '-u',
- 'details': '--details'
- }
-+
-+ root = kwargs.get('root', None)
-+
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
- cmd = ['search']
- if kwargs.get('match') == 'exact':
-@@ -2177,7 +2402,7 @@ def search(criteria, refresh=False, **kwargs):
- cmd.append(ALLOWED_SEARCH_OPTIONS.get(opt))
-
- cmd.append(criteria)
-- solvables = __zypper__.nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable')
-+ solvables = __zypper__(root=root).nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable')
- if not solvables:
- raise CommandExecutionError(
- 'No packages found matching \'{0}\''.format(criteria)
-@@ -2206,7 +2431,7 @@ def _get_first_aggregate_text(node_list):
- return '\n'.join(out)
-
-
--def list_products(all=False, refresh=False):
-+def list_products(all=False, refresh=False, root=None):
- '''
- List all available or installed SUSE products.
-
-@@ -2218,6 +2443,9 @@ def list_products(all=False, refresh=False):
- If set to False (default) it depends on zypper if a refresh is
- executed.
-
-+ root
-+ operate on a different root directory.
-+
- Includes handling for OEM products, which read the OEM productline file
- and overwrite the release value.
-
-@@ -2229,10 +2457,12 @@ def list_products(all=False, refresh=False):
- salt '*' pkg.list_products all=True
- '''
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
- ret = list()
-- OEM_PATH = "/var/lib/suseRegister/OEM"
-+ OEM_PATH = '/var/lib/suseRegister/OEM'
-+ if root:
-+ OEM_PATH = os.path.join(root, os.path.relpath(OEM_PATH, os.path.sep))
- cmd = list()
- if not all:
- cmd.append('--disable-repos')
-@@ -2240,7 +2470,7 @@ def list_products(all=False, refresh=False):
- if not all:
- cmd.append('-i')
-
-- product_list = __zypper__.nolock.xml.call(*cmd).getElementsByTagName('product-list')
-+ product_list = __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName('product-list')
- if not product_list:
- return ret # No products found
-
-@@ -2282,6 +2512,9 @@ def download(*packages, **kwargs):
- If set to False (default) it depends on zypper if a refresh is
- executed.
-
-+ root
-+ operate on a different root directory.
-+
- CLI example:
-
- .. code-block:: bash
-@@ -2292,12 +2525,14 @@ def download(*packages, **kwargs):
- if not packages:
- raise SaltInvocationError('No packages specified')
-
-+ root = kwargs.get('root', None)
-+
- refresh = kwargs.get('refresh', False)
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
- pkg_ret = {}
-- for dld_result in __zypper__.xml.call('download', *packages).getElementsByTagName("download-result"):
-+ for dld_result in __zypper__(root=root).xml.call('download', *packages).getElementsByTagName("download-result"):
- repo = dld_result.getElementsByTagName("repository")[0]
- path = dld_result.getElementsByTagName("localfile")[0].getAttribute("path")
- pkg_info = {
-@@ -2308,7 +2543,7 @@ def download(*packages, **kwargs):
- key = _get_first_aggregate_text(
- dld_result.getElementsByTagName('name')
- )
-- if __salt__['lowpkg.checksum'](pkg_info['path']):
-+ if __salt__['lowpkg.checksum'](pkg_info['path'], root=root):
- pkg_ret[key] = pkg_info
-
- if pkg_ret:
-@@ -2322,12 +2557,15 @@ def download(*packages, **kwargs):
+ patterns = {
+ k: v for k, v in _get_visible_patterns(root=root).items() if v["installed"]
+@@ -2735,7 +2754,7 @@ def download(*packages, **kwargs):
)
--def list_downloaded():
+-def list_downloaded(root=None, **kwargs):
+def list_downloaded(root=None):
- '''
+ """
.. versionadded:: 2017.7.0
- List prefetched packages downloaded by Zypper in the local disk.
-
-+ root
-+ operate on a different root directory.
-+
- CLI example:
-
- .. code-block:: bash
-@@ -2335,6 +2573,8 @@ def list_downloaded():
- salt '*' pkg.list_downloaded
- '''
- CACHE_DIR = '/var/cache/zypp/packages/'
-+ if root:
-+ CACHE_DIR = os.path.join(root, os.path.relpath(CACHE_DIR, os.path.sep))
-
- ret = {}
- for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
-@@ -2351,12 +2591,14 @@ def list_downloaded():
- return ret
-
-
--def diff(*paths):
-+def diff(*paths, **kwargs):
- '''
- Return a formatted diff between current files and original in a package.
- NOTE: this function includes all files (configuration and not), but does
- not work on binary content.
-
-+ The root parameter can also be passed via the keyword argument.
-+
- :param path: Full path to the installed file
- :return: Difference string or raises and exception if examined file is binary.
-
-@@ -2370,7 +2612,7 @@ def diff(*paths):
-
- pkg_to_paths = {}
- for pth in paths:
-- pth_pkg = __salt__['lowpkg.owner'](pth)
-+ pth_pkg = __salt__['lowpkg.owner'](pth, **kwargs)
- if not pth_pkg:
- ret[pth] = os.path.exists(pth) and 'Not managed' or 'N/A'
- else:
-@@ -2379,7 +2621,7 @@ def diff(*paths):
- pkg_to_paths[pth_pkg].append(pth)
-
- if pkg_to_paths:
-- local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys())
-+ local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys(), **kwargs)
- for pkg, files in six.iteritems(pkg_to_paths):
- for path in files:
- ret[path] = __salt__['lowpkg.diff'](
-@@ -2390,12 +2632,12 @@ def diff(*paths):
- return ret
-
-
--def _get_patches(installed_only=False):
-+def _get_patches(installed_only=False, root=None):
- '''
- List all known patches in repos.
- '''
- patches = {}
-- for element in __zypper__.nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'):
-+ for element in __zypper__(root=root).nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'):
- installed = element.getAttribute('status') == 'installed'
- if (installed_only and installed) or not installed_only:
- patches[element.getAttribute('name')] = {
-@@ -2406,7 +2648,7 @@ def _get_patches(installed_only=False):
- return patches
-
-
--def list_patches(refresh=False):
-+def list_patches(refresh=False, root=None, **kwargs):
- '''
- .. versionadded:: 2017.7.0
-
-@@ -2417,6 +2659,9 @@ def list_patches(refresh=False):
- If set to False (default) it depends on zypper if a refresh is
- executed.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -2424,33 +2669,39 @@ def list_patches(refresh=False):
- salt '*' pkg.list_patches
- '''
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
-- return _get_patches()
-+ return _get_patches(root=root)
-
-
--def list_installed_patches():
-+def list_installed_patches(root=None, **kwargs):
- '''
- .. versionadded:: 2017.7.0
-
- List installed advisory patches on the system.
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-
- salt '*' pkg.list_installed_patches
- '''
-- return _get_patches(installed_only=True)
-+ return _get_patches(installed_only=True, root=root)
-
-
--def list_provides(**kwargs):
-+def list_provides(root=None, **kwargs):
- '''
- .. versionadded:: 2018.3.0
-
- List package provides of installed packages as a dict.
- {'': ['', '', ...]}
-
-+ root
-+ operate on a different root directory.
-+
- CLI Examples:
-
- .. code-block:: bash
-@@ -2459,7 +2710,10 @@ def list_provides(**kwargs):
- '''
- ret = __context__.get('pkg.list_provides')
- if not ret:
-- cmd = ['rpm', '-qa', '--queryformat', '%{PROVIDES}_|-%{NAME}\n']
-+ cmd = ['rpm']
-+ if root:
-+ cmd.extend(['--root', root])
-+ cmd.extend(['-qa', '--queryformat', '%{PROVIDES}_|-%{NAME}\n'])
- ret = dict()
- for line in __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False).splitlines():
- provide, realname = line.split('_|-')
-@@ -2475,7 +2729,7 @@ def list_provides(**kwargs):
- return ret
-
-
--def resolve_capabilities(pkgs, refresh, **kwargs):
-+def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs):
- '''
- .. versionadded:: 2018.3.0
-
-@@ -2489,6 +2743,9 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
- If set to False (default) it depends on zypper if a refresh is
- executed.
-
-+ root
-+ operate on a different root directory.
-+
- resolve_capabilities
- If this option is set to True the input will be checked if
- a package with this name exists. If not, this function will
-@@ -2504,7 +2761,7 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
- salt '*' pkg.resolve_capabilities resolve_capabilities=True w3m_ssl
- '''
- if refresh:
-- refresh_db()
-+ refresh_db(root)
-
- ret = list()
- for pkg in pkgs:
-@@ -2517,12 +2774,12 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
-
- if kwargs.get('resolve_capabilities', False):
- try:
-- search(name, match='exact')
-+ search(name, root=root, match='exact')
- except CommandExecutionError:
- # no package this such a name found
- # search for a package which provides this name
- try:
-- result = search(name, provides=True, match='exact')
-+ result = search(name, root=root, provides=True, match='exact')
- if len(result) == 1:
- name = next(iter(result.keys()))
- elif len(result) > 1:
diff --git a/salt/states/btrfs.py b/salt/states/btrfs.py
-new file mode 100644
-index 0000000000..af78c8ae00
---- /dev/null
+index ec84d862c3..1374bbffb4 100644
+--- a/salt/states/btrfs.py
+++ b/salt/states/btrfs.py
-@@ -0,0 +1,385 @@
-+# -*- coding: utf-8 -*-
+@@ -1,10 +1,31 @@
+#
+# Author: Alberto Planas
+#
@@ -3987,392 +770,159 @@ index 0000000000..af78c8ae00
+# specific language governing permissions and limitations
+# under the License.
+
-+'''
-+:maintainer: Alberto Planas
-+:maturity: new
-+:depends: None
-+:platform: Linux
-+'''
-+from __future__ import absolute_import, print_function, unicode_literals
-+import functools
-+import logging
-+import os.path
-+import tempfile
-+import traceback
-+
-+from salt.exceptions import CommandExecutionError
-+
-+log = logging.getLogger(__name__)
-+
-+__virtualname__ = 'btrfs'
-+
-+
-+def _mount(device, use_default):
-+ '''
-+ Mount the device in a temporary place.
-+ '''
-+ opts = 'subvol=/' if not use_default else 'defaults'
-+ dest = tempfile.mkdtemp()
-+ res = __states__['mount.mounted'](dest, device=device, fstype='btrfs',
-+ opts=opts, persist=False)
-+ if not res['result']:
-+ log.error('Cannot mount device %s in %s', device, dest)
-+ _umount(dest)
-+ return None
-+ return dest
-+
-+
-+def _umount(path):
-+ '''
-+ Umount and clean the temporary place.
-+ '''
-+ __states__['mount.unmounted'](path)
-+ __utils__['files.rm_rf'](path)
-+
-+
-+def _is_default(path, dest, name):
-+ '''
-+ Check if the subvolume is the current default.
-+ '''
-+ subvol_id = __salt__['btrfs.subvolume_show'](path)[name]['subvolume id']
-+ def_id = __salt__['btrfs.subvolume_get_default'](dest)['id']
-+ return subvol_id == def_id
-+
-+
-+def _set_default(path, dest, name):
-+ '''
-+ Set the subvolume as the current default.
-+ '''
-+ subvol_id = __salt__['btrfs.subvolume_show'](path)[name]['subvolume id']
-+ return __salt__['btrfs.subvolume_set_default'](subvol_id, dest)
-+
-+
-+def _is_cow(path):
-+ '''
-+ Check if the subvolume is copy on write
-+ '''
-+ dirname = os.path.dirname(path)
-+ return 'C' not in __salt__['file.lsattr'](dirname)[path]
-+
-+
-+def _unset_cow(path):
-+ '''
-+ Disable the copy on write in a subvolume
-+ '''
-+ return __salt__['file.chattr'](path, operator='add', attributes='C')
-+
-+
-+def __mount_device(action):
-+ '''
-+ Small decorator to makes sure that the mount and umount happends in
-+ a transactional way.
-+ '''
-+ @functools.wraps(action)
-+ def wrapper(*args, **kwargs):
-+ name = kwargs['name']
-+ device = kwargs['device']
-+ use_default = kwargs.get('use_default', False)
-+
-+ ret = {
-+ 'name': name,
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': ['Some error happends during the operation.'],
-+ }
-+ try:
-+ if device:
-+ dest = _mount(device, use_default)
-+ if not dest:
-+ msg = 'Device {} cannot be mounted'.format(device)
-+ ret['comment'].append(msg)
-+ kwargs['__dest'] = dest
-+ ret = action(*args, **kwargs)
+ """
+ :maintainer: Alberto Planas
+ :maturity: new
+ :depends: None
+ :platform: Linux
+ """
+-
+ import functools
+ import logging
+ import os.path
+@@ -22,7 +43,7 @@ def _mount(device, use_default):
+ """
+ Mount the device in a temporary place.
+ """
+- opts = "defaults" if use_default else "subvol=/"
++ opts = "subvol=/" if not use_default else "defaults"
+ dest = tempfile.mkdtemp()
+ res = __states__["mount.mounted"](
+ dest, device=device, fstype="btrfs", opts=opts, persist=False
+@@ -82,8 +103,8 @@ def __mount_device(action):
+
+ @functools.wraps(action)
+ def wrapper(*args, **kwargs):
+- name = kwargs.get("name", args[0] if args else None)
+- device = kwargs.get("device", args[1] if len(args) > 1 else None)
++ name = kwargs["name"]
++ device = kwargs["device"]
+ use_default = kwargs.get("use_default", False)
+
+ ret = {
+@@ -100,9 +121,10 @@ def __mount_device(action):
+ ret["comment"].append(msg)
+ kwargs["__dest"] = dest
+ ret = action(*args, **kwargs)
+- except Exception as e: # pylint: disable=broad-except
+- log.error("""Traceback: {}""".format(traceback.format_exc()))
+- ret["comment"].append(e)
+ except Exception:
-+ tb = six.text_type(traceback.format_exc())
-+ log.exception('Exception captured in wrapper %s', tb)
-+ ret['comment'].append(tb)
-+ finally:
-+ if device:
-+ _umount(dest)
-+ return ret
-+ return wrapper
-+
-+
-+@__mount_device
-+def subvolume_created(name, device, qgroupids=None, set_default=False,
-+ copy_on_write=True, force_set_default=True,
-+ __dest=None):
-+ '''
-+ Makes sure that a btrfs subvolume is present.
-+
-+ name
-+ Name of the subvolume to add
-+
-+ device
-+ Device where to create the subvolume
-+
-+ qgroupids
-+ Add the newly created subcolume to a qgroup. This parameter
-+ is a list
-+
-+ set_default
-+ If True, this new subvolume will be set as default when
-+ mounted, unless subvol option in mount is used
-+
-+ copy_on_write
-+ If false, set the subvolume with chattr +C
-+
-+ force_set_default
-+ If false and the subvolume is already present, it will not
-+ force it as default if ``set_default`` is True
-+
-+ '''
-+ ret = {
-+ 'name': name,
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': [],
-+ }
-+ path = os.path.join(__dest, name)
-+
-+ exists = __salt__['btrfs.subvolume_exists'](path)
-+ if exists:
-+ ret['comment'].append('Subvolume {} already present'.format(name))
-+
-+ # Resolve first the test case. The check is not complete, but at
-+ # least we will report if a subvolume needs to be created. Can
-+ # happend that the subvolume is there, but we also need to set it
-+ # as default, or persist in fstab.
-+ if __opts__['test']:
-+ ret['result'] = None
-+ if not exists:
-+ ret['comment'].append('Subvolume {} will be created'.format(name))
-+ return ret
-+
-+ if not exists:
-+ # Create the directories where the subvolume lives
-+ _path = os.path.dirname(path)
-+ res = __states__['file.directory'](_path, makedirs=True)
-+ if not res['result']:
-+ ret['comment'].append('Error creating {} directory'.format(_path))
-+ return ret
-+
-+ try:
-+ __salt__['btrfs.subvolume_create'](name, dest=__dest,
-+ qgroupids=qgroupids)
-+ except CommandExecutionError:
-+ ret['comment'].append('Error creating subvolume {}'.format(name))
-+ return ret
-+
-+ ret['changes'][name] = 'Created subvolume {}'.format(name)
-+
-+ # If the volume was already present, we can opt-out the check for
-+ # default subvolume.
-+ if (not exists or (exists and force_set_default)) and \
-+ set_default and not _is_default(path, __dest, name):
-+ ret['changes'][name + '_default'] = _set_default(path, __dest, name)
-+
-+ if not copy_on_write and _is_cow(path):
-+ ret['changes'][name + '_no_cow'] = _unset_cow(path)
-+
-+ ret['result'] = True
-+ return ret
-+
-+
-+@__mount_device
-+def subvolume_deleted(name, device, commit=False, __dest=None):
-+ '''
-+ Makes sure that a btrfs subvolume is removed.
-+
-+ name
-+ Name of the subvolume to remove
-+
-+ device
-+ Device where to remove the subvolume
-+
-+ commit
-+ Wait until the transaction is over
-+
-+ '''
-+ ret = {
-+ 'name': name,
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': [],
-+ }
-+
-+ path = os.path.join(__dest, name)
-+
-+ exists = __salt__['btrfs.subvolume_exists'](path)
-+ if not exists:
-+ ret['comment'].append('Subvolume {} already missing'.format(name))
-+
-+ if __opts__['test']:
-+ ret['result'] = None
-+ if exists:
-+ ret['comment'].append('Subvolume {} will be removed'.format(name))
-+ return ret
-+
-+ # If commit is set, we wait until all is over
-+ commit = 'after' if commit else None
-+
-+ if not exists:
-+ try:
-+ __salt__['btrfs.subvolume_delete'](path, commit=commit)
-+ except CommandExecutionError:
-+ ret['comment'].append('Error removing subvolume {}'.format(name))
-+ return ret
-+
-+ ret['changes'][name] = 'Removed subvolume {}'.format(name)
-+
-+ ret['result'] = True
-+ return ret
-+
-+
-+def _diff_properties(expected, current):
-+ '''Calculate the difference between the current and the expected
-+ properties
-+
-+ * 'expected' is expressed in a dictionary like: {'property': value}
-+
-+ * 'current' contains the same format retuned by 'btrfs.properties'
-+
-+ If the property is not available, will throw an exception.
-+
-+ '''
-+ difference = {}
-+ for _property, value in expected.items():
-+ current_value = current[_property]['value']
-+ if value is False and current_value == 'N/A':
-+ needs_update = False
-+ elif value != current_value:
-+ needs_update = True
-+ else:
-+ needs_update = False
-+ if needs_update:
-+ difference[_property] = value
-+ return difference
-+
-+
-+@__mount_device
-+def properties(name, device, use_default=False, __dest=None, **properties):
-+ '''
-+ Makes sure that a list of properties are set in a subvolume, file
-+ or device.
-+
-+ name
-+ Name of the object to change
-+
-+ device
-+ Device where the object lives, if None, the device will be in
-+ name
-+
-+ use_default
-+ If True, this subvolume will be resolved to the default
-+ subvolume assigned during the create operation
-+
-+ properties
-+ Dictionary of properties
-+
-+ Valid properties are 'ro', 'label' or 'compression'. Check the
-+ documentation to see where those properties are valid for each
-+ object.
-+
-+ '''
-+ ret = {
-+ 'name': name,
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': [],
-+ }
-+
-+ # 'name' will have always the name of the object that we want to
-+ # change, but if the object is a device, we do not repeat it again
-+ # in 'device'. This makes device sometimes optional.
-+ if device:
-+ if os.path.isabs(name):
-+ path = os.path.join(__dest, os.path.relpath(name, os.path.sep))
-+ else:
-+ path = os.path.join(__dest, name)
-+ else:
-+ path = name
-+
-+ if not os.path.exists(path):
-+ ret['comment'].append('Object {} not found'.format(name))
-+ return ret
-+
-+ # Convert the booleans to lowercase
-+ properties = {k: v if type(v) is not bool else str(v).lower()
-+ for k, v in properties.items()}
-+
-+ current_properties = {}
-+ try:
-+ current_properties = __salt__['btrfs.properties'](path)
-+ except CommandExecutionError as e:
-+ ret['comment'].append('Error reading properties from {}'.format(name))
-+ ret['comment'].append('Current error {}'.format(e))
-+ return ret
-+
-+ try:
-+ properties_to_set = _diff_properties(properties, current_properties)
-+ except KeyError:
-+ ret['comment'].append('Some property not found in {}'.format(name))
-+ return ret
-+
-+ if __opts__['test']:
-+ ret['result'] = None
-+ if properties_to_set:
-+ msg = 'Properties {} will be changed in {}'.format(
-+ properties_to_set, name)
-+ else:
-+ msg = 'No properties will be changed in {}'.format(name)
-+ ret['comment'].append(msg)
-+ return ret
-+
-+ if properties_to_set:
-+ _properties = ','.join(
-+ '{}={}'.format(k, v) for k, v in properties_to_set.items())
-+ __salt__['btrfs.properties'](path, set=_properties)
-+
-+ current_properties = __salt__['btrfs.properties'](path)
-+ properties_failed = _diff_properties(properties, current_properties)
-+ if properties_failed:
-+ msg = 'Properties {} failed to be changed in {}'.format(
-+ properties_failed, name)
-+ ret['comment'].append(msg)
-+ return ret
-+
-+ ret['comment'].append('Properties changed in {}'.format(name))
-+ ret['changes'] = properties_to_set
-+ else:
-+ ret['comment'].append('Properties not changed in {}'.format(name))
-+
-+ ret['result'] = True
-+ return ret
++ tb = str(traceback.format_exc())
++ log.exception("Exception captured in wrapper %s", tb)
++ ret["comment"].append(tb)
+ finally:
+ if device:
+ _umount(dest)
+@@ -165,7 +187,7 @@ def subvolume_created(
+ if __opts__["test"]:
+ ret["result"] = None
+ if not exists:
+- ret["changes"][name] = "Subvolume {} will be created".format(name)
++ ret["comment"].append("Subvolume {} will be created".format(name))
+ return ret
+
+ if not exists:
+@@ -231,7 +253,7 @@ def subvolume_deleted(name, device, commit=False, __dest=None):
+ if __opts__["test"]:
+ ret["result"] = None
+ if exists:
+- ret["changes"][name] = "Subvolume {} will be removed".format(name)
++ ret["comment"].append("Subvolume {} will be removed".format(name))
+ return ret
+
+ # If commit is set, we wait until all is over
+@@ -344,10 +366,10 @@ def properties(name, device, use_default=False, __dest=None, **properties):
+ if __opts__["test"]:
+ ret["result"] = None
+ if properties_to_set:
+- ret["changes"] = properties_to_set
++ msg = "Properties {} will be changed in {}".format(properties_to_set, name)
+ else:
+ msg = "No properties will be changed in {}".format(name)
+- ret["comment"].append(msg)
++ ret["comment"].append(msg)
+ return ret
+
+ if properties_to_set:
diff --git a/salt/states/file.py b/salt/states/file.py
-index dd5bcec62a..0e925bb2ed 100644
+index 9873f8dcc7..9e24e389d8 100644
--- a/salt/states/file.py
+++ b/salt/states/file.py
-@@ -291,7 +291,11 @@ import shutil
- import sys
+@@ -290,7 +290,6 @@ import sys
import time
import traceback
--from collections import Iterable, Mapping, defaultdict
+ from collections import defaultdict
+-from collections.abc import Iterable, Mapping
+ from datetime import date, datetime # python3 problem in the making?
+
+ import salt.loader
+@@ -312,6 +311,12 @@ from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
+ from salt.serializers import DeserializationError
+ from salt.state import get_accumulator_dir as _get_accumulator_dir
+
+try:
+ from collections.abc import Iterable, Mapping
+except ImportError:
+ from collections import Iterable, Mapping
-+from collections import defaultdict
- from datetime import datetime, date # python3 problem in the making?
-
- # Import salt libs
++
++
+ if salt.utils.platform.is_windows():
+ import salt.utils.win_dacl
+ import salt.utils.win_functions
diff --git a/salt/states/loop.py b/salt/states/loop.py
-index 524fa56c1a..726c8c8016 100644
+index 25e54e1faf..de37b7d60c 100644
--- a/salt/states/loop.py
+++ b/salt/states/loop.py
-@@ -185,6 +185,10 @@ def until_no_eval(
- ''.format(name, expected))
- if ret['comment']:
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Loop state
+
+@@ -58,8 +57,6 @@ The function :py:func:`data.subdict_match ` check
+ instances: "{{ instance }}"
+ """
+
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+ import operator
+@@ -99,7 +96,7 @@ def until(name, m_args=None, m_kwargs=None, condition=None, period=1, timeout=60
+ m_kwargs = {}
+
+ if name not in __salt__:
+- ret["comment"] = "Cannot find module {0}".format(name)
++ ret["comment"] = "Cannot find module {}".format(name)
+ elif condition is None:
+ ret["comment"] = "An exit condition must be specified"
+ elif not isinstance(period, (int, float)):
+@@ -107,7 +104,7 @@ def until(name, m_args=None, m_kwargs=None, condition=None, period=1, timeout=60
+ elif not isinstance(timeout, (int, float)):
+ ret["comment"] = "Timeout must be specified as a float in seconds"
+ elif __opts__["test"]:
+- ret["comment"] = "The execution module {0} will be run".format(name)
++ ret["comment"] = "The execution module {} will be run".format(name)
+ ret["result"] = None
+ else:
+ if m_args is None:
+@@ -120,11 +117,11 @@ def until(name, m_args=None, m_kwargs=None, condition=None, period=1, timeout=60
+ m_ret = __salt__[name](*m_args, **m_kwargs)
+ if eval(condition): # pylint: disable=W0123
+ ret["result"] = True
+- ret["comment"] = "Condition {0} was met".format(condition)
++ ret["comment"] = "Condition {} was met".format(condition)
+ break
+ time.sleep(period)
+ else:
+- ret["comment"] = "Timed out while waiting for condition {0}".format(
++ ret["comment"] = "Timed out while waiting for condition {}".format(
+ condition
+ )
+ return ret
+@@ -185,6 +182,10 @@ def until_no_eval(
+ )
+ if ret["comment"]:
return ret
+ if not m_args:
+ m_args = []
@@ -4381,184 +931,58 @@ index 524fa56c1a..726c8c8016 100644
if init_wait:
time.sleep(init_wait)
-diff --git a/salt/states/pkg.py b/salt/states/pkg.py
-index a13d418400..71ba29a27c 100644
---- a/salt/states/pkg.py
-+++ b/salt/states/pkg.py
-@@ -236,7 +236,7 @@ def _fulfills_version_spec(versions, oper, desired_version,
- return False
-
-
--def _find_unpurge_targets(desired):
-+def _find_unpurge_targets(desired, **kwargs):
- '''
- Find packages which are marked to be purged but can't yet be removed
- because they are dependencies for other installed packages. These are the
-@@ -245,7 +245,7 @@ def _find_unpurge_targets(desired):
- '''
- return [
- x for x in desired
-- if x in __salt__['pkg.list_pkgs'](purge_desired=True)
-+ if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs)
- ]
-
-
-@@ -260,7 +260,7 @@ def _find_download_targets(name=None,
- Inspect the arguments to pkg.downloaded and discover what packages need to
- be downloaded. Return a dict of packages to download.
- '''
-- cur_pkgs = __salt__['pkg.list_downloaded']()
-+ cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs)
- if pkgs:
- to_download = _repack_pkgs(pkgs, normalize=normalize) # pylint: disable=not-callable
-
-@@ -378,7 +378,7 @@ def _find_advisory_targets(name=None,
- Inspect the arguments to pkg.patch_installed and discover what advisory
- patches need to be installed. Return a dict of advisory patches to install.
- '''
-- cur_patches = __salt__['pkg.list_installed_patches']()
-+ cur_patches = __salt__['pkg.list_installed_patches'](**kwargs)
- if advisory_ids:
- to_download = advisory_ids
- else:
-@@ -582,7 +582,7 @@ def _find_install_targets(name=None,
- 'minion log.'.format('pkgs' if pkgs
- else 'sources')}
-
-- to_unpurge = _find_unpurge_targets(desired)
-+ to_unpurge = _find_unpurge_targets(desired, **kwargs)
- else:
- if salt.utils.platform.is_windows():
- pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) # pylint: disable=not-callable
-@@ -602,7 +602,7 @@ def _find_install_targets(name=None,
- else:
- desired = {name: version}
-
-- to_unpurge = _find_unpurge_targets(desired)
-+ to_unpurge = _find_unpurge_targets(desired, **kwargs)
-
- # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
- origin = bool(re.search('/', name))
-@@ -761,7 +761,8 @@ def _find_install_targets(name=None,
- verify_result = __salt__['pkg.verify'](
- package_name,
- ignore_types=ignore_types,
-- verify_options=verify_options
-+ verify_options=verify_options,
-+ **kwargs
- )
- except (CommandExecutionError, SaltInvocationError) as exc:
- failed_verify = exc.strerror
-@@ -790,7 +791,9 @@ def _find_install_targets(name=None,
- verify_result = __salt__['pkg.verify'](
- package_name,
- ignore_types=ignore_types,
-- verify_options=verify_options)
-+ verify_options=verify_options,
-+ **kwargs
-+ )
- except (CommandExecutionError, SaltInvocationError) as exc:
- failed_verify = exc.strerror
- continue
-@@ -1974,7 +1977,8 @@ def installed(
- # have caught invalid arguments earlier.
- verify_result = __salt__['pkg.verify'](reinstall_pkg,
- ignore_types=ignore_types,
-- verify_options=verify_options)
-+ verify_options=verify_options,
-+ **kwargs)
- if verify_result:
- failed.append(reinstall_pkg)
- altered_files[reinstall_pkg] = verify_result
-@@ -3038,7 +3042,7 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs):
- pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs)
- try:
- packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs)
-- expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname)}
-+ expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)}
- for pkgname, pkgver in six.iteritems(packages)}
- if isinstance(pkgs, list):
- packages = [pkg for pkg in packages if pkg in pkgs]
-@@ -3220,7 +3224,7 @@ def group_installed(name, skip=None, include=None, **kwargs):
- .format(name, exc))
- return ret
-
-- failed = [x for x in targets if x not in __salt__['pkg.list_pkgs']()]
-+ failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)]
- if failed:
- ret['comment'] = (
- 'Failed to install the following packages: {0}'
diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
-index f1ae3a0f6f..c39e857580 100644
+index 99440874c2..70cb7a1c7e 100644
--- a/salt/states/pkgrepo.py
+++ b/salt/states/pkgrepo.py
-@@ -385,10 +385,7 @@ def managed(name, ppa=None, **kwargs):
- kwargs.pop(kwarg, None)
+@@ -92,7 +92,6 @@ package managers are APT, DNF, YUM and Zypper. Here is some example SLS:
- try:
-- pre = __salt__['pkg.get_repo'](
-- repo,
-- ppa_auth=kwargs.get('ppa_auth', None)
-- )
-+ pre = __salt__['pkg.get_repo'](repo=repo, **kwargs)
- except CommandExecutionError as exc:
- ret['result'] = False
- ret['comment'] = \
-@@ -504,10 +501,7 @@ def managed(name, ppa=None, **kwargs):
- return ret
+ """
+
+-# Import Python libs
+
+ import sys
+
+@@ -101,11 +100,7 @@ import salt.utils.files
+ import salt.utils.pkg.deb
+ import salt.utils.pkg.rpm
+ import salt.utils.versions
+-
+-# Import salt libs
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
+-
+-# Import 3rd-party libs
+ from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
- try:
-- post = __salt__['pkg.get_repo'](
-- repo,
-- ppa_auth=kwargs.get('ppa_auth', None)
-- )
-+ post = __salt__['pkg.get_repo'](repo=repo, **kwargs)
- if pre:
- for kwarg in sanitizedkwargs:
- if post.get(kwarg) != pre.get(kwarg):
-@@ -600,9 +594,7 @@ def absent(name, **kwargs):
- return ret
- try:
-- repo = __salt__['pkg.get_repo'](
-- name, ppa_auth=kwargs.get('ppa_auth', None)
-- )
-+ repo = __salt__['pkg.get_repo'](name, **kwargs)
- except CommandExecutionError as exc:
- ret['result'] = False
- ret['comment'] = \
diff --git a/salt/utils/oset.py b/salt/utils/oset.py
-index acfd59b53b..cd4e88be40 100644
+index d6fb961ede..31a6a4acca 100644
--- a/salt/utils/oset.py
+++ b/salt/utils/oset.py
-@@ -22,7 +22,10 @@ Rob Speer's changes are as follows:
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+
+ Available at repository https://github.com/LuminosoInsight/ordered-set
+@@ -21,9 +20,10 @@ Rob Speer's changes are as follows:
+ - added a __getstate__ and __setstate__ so it can be pickled
- added __getitem__
- '''
- from __future__ import absolute_import, unicode_literals, print_function
--import collections
+ """
+-from __future__ import absolute_import, print_function, unicode_literals
+-
+-from collections.abc import MutableSet
+try:
+ from collections.abc import MutableSet
+except ImportError:
+ from collections import MutableSet
SLICE_ALL = slice(None)
- __version__ = '2.0.1'
-@@ -44,7 +47,7 @@ def is_iterable(obj):
- return hasattr(obj, '__iter__') and not isinstance(obj, str) and not isinstance(obj, tuple)
-
-
--class OrderedSet(collections.MutableSet):
-+class OrderedSet(MutableSet):
- """
- An OrderedSet is a custom MutableSet that remembers its order, so that
- every entry has an index that can be looked up.
+ __version__ = "2.0.1"
diff --git a/tests/unit/modules/test_kubeadm.py b/tests/unit/modules/test_kubeadm.py
-new file mode 100644
-index 0000000000..a58f54f118
---- /dev/null
+index af319e01b1..91e4a9e68e 100644
+--- a/tests/unit/modules/test_kubeadm.py
+++ b/tests/unit/modules/test_kubeadm.py
-@@ -0,0 +1,1144 @@
-+# -*- coding: utf-8 -*-
+@@ -1,20 +1,41 @@
+#
+# Author: Alberto Planas
+#
@@ -4581,1559 +1005,228 @@ index 0000000000..a58f54f118
+# specific language governing permissions and limitations
+# under the License.
+
-+from __future__ import absolute_import, print_function, unicode_literals
-+import pytest
-+
-+# Import Salt Testing Libs
-+from tests.support.mixins import LoaderModuleMockMixin
+ import pytest
+ import salt.modules.kubeadm as kubeadm
+ from salt.exceptions import CommandExecutionError
+-
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import MagicMock, patch
+-from tests.support.unit import TestCase
++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
+from tests.support.unit import TestCase, skipIf
-+from tests.support.mock import (
-+ MagicMock,
-+ patch,
-+ NO_MOCK,
-+ NO_MOCK_REASON
-+)
-+
-+import salt.modules.kubeadm as kubeadm
-+from salt.exceptions import CommandExecutionError
-+
-+
+
+
+@skipIf(NO_MOCK, NO_MOCK_REASON)
-+class KubeAdmTestCase(TestCase, LoaderModuleMockMixin):
-+ '''
-+ Test cases for salt.modules.kubeadm
-+ '''
-+
-+ def setup_loader_modules(self):
-+ return {
-+ kubeadm: {
-+ '__salt__': {},
-+ '__utils__': {},
-+ }
-+ }
-+
-+ def test_version(self):
-+ '''
-+ Test kuebadm.version without parameters
-+ '''
-+ version = '{"clientVersion":{"major":"1"}}'
-+ salt_mock = {
-+ 'cmd.run_stdout': MagicMock(return_value=version),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.version() == {
-+ 'clientVersion': {'major': '1'}
-+ }
-+ salt_mock['cmd.run_stdout'].assert_called_with(
-+ ['kubeadm', 'version', '--output', 'json']
-+ )
-+
-+ def test_version_params(self):
-+ '''
-+ Test kuebadm.version with parameters
-+ '''
-+ version = '{"clientVersion":{"major":"1"}}'
-+ salt_mock = {
-+ 'cmd.run_stdout': MagicMock(return_value=version),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.version(kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == {
-+ 'clientVersion': {'major': '1'}
-+ }
-+ salt_mock['cmd.run_stdout'].assert_called_with(
-+ ['kubeadm', 'version',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt',
-+ '--output', 'json']
-+ )
-+
-+ def test_token_create(self):
-+ '''
-+ Test kuebadm.token_create without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'token'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_create() == 'token'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'create']
-+ )
-+
-+ def test_token_create_params(self):
-+ '''
-+ Test kuebadm.token_create with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'token'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_create(token='token',
-+ config='/kubeadm.cfg',
-+ description='a description',
-+ groups=['g:1', 'g:2'],
-+ ttl='1h1m1s',
-+ usages=['u1', 'u2'],
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'token'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'create', 'token',
-+ '--config', '/kubeadm.cfg',
-+ '--description', 'a description',
-+ '--groups', '["g:1", "g:2"]',
-+ '--ttl', '1h1m1s',
-+ '--usages', '["u1", "u2"]',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_token_create_error(self):
-+ '''
-+ Test kuebadm.token_create error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.token_create()
-+
-+ def test_token_delete(self):
-+ '''
-+ Test kuebadm.token_delete without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'deleted'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_delete('token')
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'delete', 'token']
-+ )
-+
-+ def test_token_delete_params(self):
-+ '''
-+ Test kuebadm.token_delete with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'deleted'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_delete('token',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt')
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'delete', 'token',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_token_delete_error(self):
-+ '''
-+ Test kuebadm.token_delete error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.token_delete('token')
-+
-+ def test_token_generate(self):
-+ '''
-+ Test kuebadm.token_generate without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'token'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_generate() == 'token'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'generate']
-+ )
-+
-+ def test_token_generate_params(self):
-+ '''
-+ Test kuebadm.token_generate with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'token'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_generate(kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'token'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'generate',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_token_generate_error(self):
-+ '''
-+ Test kuebadm.token_generate error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.token_generate()
-+
-+ def test_token_list(self):
-+ '''
-+ Test kuebadm.token_list without parameters
-+ '''
-+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4'
-+ result = {'retcode': 0, 'stdout': output}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_list() == [{
-+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
-+ }]
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'list']
-+ )
-+
-+ def test_token_list_multiple_lines(self):
-+ '''
-+ Test kuebadm.token_list with multiple tokens
-+ '''
-+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4\na b c d e'
-+ result = {'retcode': 0, 'stdout': output}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_list() == [{
-+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
-+ }, {
-+ 'h1': 'a', 'h2': 'b', 'h31 h32': 'c d', 'h4': 'e'
-+ }]
-+
-+ def test_token_list_broken_lines(self):
-+ '''
-+ Test kuebadm.token_list with multiple tokens, one broken
-+ '''
-+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4\na b c d e'
-+ result = {'retcode': 0, 'stdout': output}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.token_list() == [{
-+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
-+ }]
-+
-+ def test_token_list_params(self):
-+ '''
-+ Test kuebadm.token_list with parameters
-+ '''
-+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4'
-+ result = {'retcode': 0, 'stdout': output}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ result = kubeadm.token_list(kubeconfig='/kube.cfg',
-+ rootfs='/mnt')
-+ assert result == [{
-+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
-+ }]
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'token', 'list',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_token_list_error(self):
-+ '''
-+ Test kuebadm.token_generate error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.token_list()
-+
-+ def test_alpha_certs_renew(self):
-+ '''
-+ Test kuebadm.alpha_certs_renew without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_certs_renew() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'certs', 'renew']
-+ )
-+
-+ def test_alpha_certs_renew_params(self):
-+ '''
-+ Test kuebadm.alpha_certs_renew with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_certs_renew(rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'certs', 'renew',
-+ '--rootfs', '/mnt'])
-+
-+ def test_alpha_certs_renew_error(self):
-+ '''
-+ Test kuebadm.alpha_certs_renew error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.alpha_certs_renew()
-+
-+ def test_alpha_kubeconfig_user(self):
-+ '''
-+ Test kuebadm.alpha_kubeconfig_user without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_kubeconfig_user('user') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'kubeconfig', 'user',
-+ '--client-name', 'user']
-+ )
-+
-+ def test_alpha_kubeconfig_user_params(self):
-+ '''
-+ Test kuebadm.alpha_kubeconfig_user with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_kubeconfig_user(
-+ 'user',
-+ apiserver_advertise_address='127.0.0.1',
-+ apiserver_bind_port='1234',
-+ cert_dir='/pki',
-+ org='org',
-+ token='token',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'kubeconfig', 'user',
-+ '--client-name', 'user',
-+ '--apiserver-advertise-address', '127.0.0.1',
-+ '--apiserver-bind-port', '1234',
-+ '--cert-dir', '/pki',
-+ '--org', 'org',
-+ '--token', 'token',
-+ '--rootfs', '/mnt'])
-+
-+ def test_alpha_kubeconfig_user_error(self):
-+ '''
-+ Test kuebadm.alpha_kubeconfig_user error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.alpha_kubeconfig_user('user')
-+
-+ def test_alpha_kubelet_config_download(self):
-+ '''
-+ Test kuebadm.alpha_kubelet_config_download without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_kubelet_config_download() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'kubelet', 'config', 'download']
-+ )
-+
-+ def test_alpha_kubelet_config_download_params(self):
-+ '''
-+ Test kuebadm.alpha_kubelet_config_download with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_kubelet_config_download(
-+ kubeconfig='/kube.cfg',
-+ kubelet_version='version',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'kubelet', 'config', 'download',
-+ '--kubeconfig', '/kube.cfg',
-+ '--kubelet-version', 'version',
-+ '--rootfs', '/mnt'])
-+
-+ def test_alpha_kubelet_config_download_error(self):
-+ '''
-+ Test kuebadm.alpha_kubelet_config_download error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.alpha_kubelet_config_download()
-+
-+ def test_alpha_kubelet_config_enable_dynamic(self):
-+ '''
-+ Test kuebadm.alpha_kubelet_config_enable_dynamic without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ result = kubeadm.alpha_kubelet_config_enable_dynamic('node-1')
-+ assert result == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
-+ '--node-name', 'node-1']
-+ )
-+
-+ def test_alpha_kubelet_config_enable_dynamic_params(self):
-+ '''
-+ Test kuebadm.alpha_kubelet_config_enable_dynamic with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_kubelet_config_enable_dynamic(
-+ 'node-1',
-+ kubeconfig='/kube.cfg',
-+ kubelet_version='version',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
-+ '--node-name', 'node-1',
-+ '--kubeconfig', '/kube.cfg',
-+ '--kubelet-version', 'version',
-+ '--rootfs', '/mnt'])
-+
-+ def test_alpha_kubelet_config_enable_dynamic_error(self):
-+ '''
-+ Test kuebadm.alpha_kubelet_config_enable_dynamic error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.alpha_kubelet_config_enable_dynamic('node-1')
-+
-+ def test_alpha_selfhosting_pivot(self):
-+ '''
-+ Test kuebadm.alpha_selfhosting_pivot without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_selfhosting_pivot() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force']
-+ )
-+
-+ def test_alpha_selfhosting_pivot_params(self):
-+ '''
-+ Test kuebadm.alpha_selfhosting_pivot with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.alpha_selfhosting_pivot(
-+ cert_dir='/pki',
-+ config='/kubeadm.cfg',
-+ kubeconfig='/kube.cfg',
-+ store_certs_in_secrets=True,
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force',
-+ '--store-certs-in-secrets',
-+ '--cert-dir', '/pki',
-+ '--config', '/kubeadm.cfg',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_alpha_selfhosting_pivot_error(self):
-+ '''
-+ Test kuebadm.alpha_selfhosting_pivot error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.alpha_selfhosting_pivot()
-+
-+ def test_config_images_list(self):
-+ '''
-+ Test kuebadm.config_images_list without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'image1\nimage2\n'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_images_list() == ['image1', 'image2']
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'images', 'list']
-+ )
-+
-+ def test_config_images_list_params(self):
-+ '''
-+ Test kuebadm.config_images_list with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'image1\nimage2\n'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_images_list(
-+ config='/kubeadm.cfg',
-+ feature_gates='k=v',
-+ kubernetes_version='version',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == ['image1', 'image2']
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'images', 'list',
-+ '--config', '/kubeadm.cfg',
-+ '--feature-gates', 'k=v',
-+ '--kubernetes-version', 'version',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_images_list_error(self):
-+ '''
-+ Test kuebadm.config_images_list error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_images_list()
-+
-+ def test_config_images_pull(self):
-+ '''
-+ Test kuebadm.config_images_pull without parameters
-+ '''
-+ result = {'retcode': 0,
-+ 'stdout': '[config/images] Pulled image1\n'
-+ '[config/images] Pulled image2\n'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_images_pull() == ['image1', 'image2']
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'images', 'pull']
-+ )
-+
-+ def test_config_images_pull_params(self):
-+ '''
-+ Test kuebadm.config_images_pull with parameters
-+ '''
-+ result = {'retcode': 0,
-+ 'stdout': '[config/images] Pulled image1\n'
-+ '[config/images] Pulled image2\n'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_images_pull(
-+ config='/kubeadm.cfg',
-+ cri_socket='socket',
-+ feature_gates='k=v',
-+ kubernetes_version='version',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == ['image1', 'image2']
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'images', 'pull',
-+ '--config', '/kubeadm.cfg',
-+ '--cri-socket', 'socket',
-+ '--feature-gates', 'k=v',
-+ '--kubernetes-version', 'version',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_images_pull_error(self):
-+ '''
-+ Test kuebadm.config_images_pull error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_images_pull()
-+
-+ def test_config_migrate(self):
-+ '''
-+ Test kuebadm.config_migrate without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_migrate('/oldconfig.cfg') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'migrate',
-+ '--old-config', '/oldconfig.cfg']
-+ )
-+
-+ def test_config_migrate_params(self):
-+ '''
-+ Test kuebadm.config_migrate with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_migrate(
-+ '/oldconfig.cfg',
-+ new_config='/newconfig.cfg',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'migrate',
-+ '--old-config', '/oldconfig.cfg',
-+ '--new-config', '/newconfig.cfg',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_migrate_error(self):
-+ '''
-+ Test kuebadm.config_migrate error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_migrate('/oldconfig.cfg')
-+
-+ def test_config_print_init_defaults(self):
-+ '''
-+ Test kuebadm.config_print_init_defaults without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_print_init_defaults() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'print', 'init-defaults']
-+ )
-+
-+ def test_config_print_init_defaults_params(self):
-+ '''
-+ Test kuebadm.config_print_init_defaults with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_print_init_defaults(
-+ component_configs='component',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'print', 'init-defaults',
-+ '--component-configs', 'component',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_print_init_defaults_error(self):
-+ '''
-+ Test kuebadm.config_print_init_defaults error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_print_init_defaults()
-+
-+ def test_config_print_join_defaults(self):
-+ '''
-+ Test kuebadm.config_print_join_defaults without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_print_join_defaults() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'print', 'join-defaults']
-+ )
-+
-+ def test_config_print_join_defaults_params(self):
-+ '''
-+ Test kuebadm.config_print_join_defaults with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_print_join_defaults(
-+ component_configs='component',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'print', 'join-defaults',
-+ '--component-configs', 'component',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_print_join_defaults_error(self):
-+ '''
-+ Test kuebadm.config_print_join_defaults error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_print_join_defaults()
-+
-+ def test_config_upload_from_file(self):
-+ '''
-+ Test kuebadm.config_upload_from_file without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_upload_from_file('/config.cfg') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'upload', 'from-file',
-+ '--config', '/config.cfg']
-+ )
-+
-+ def test_config_upload_from_file_params(self):
-+ '''
-+ Test kuebadm.config_upload_from_file with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_upload_from_file(
-+ '/config.cfg',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'upload', 'from-file',
-+ '--config', '/config.cfg',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_upload_from_file_error(self):
-+ '''
-+ Test kuebadm.config_upload_from_file error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_upload_from_file('/config.cfg')
-+
-+ def test_config_upload_from_flags(self):
-+ '''
-+ Test kuebadm.config_upload_from_flags without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_upload_from_flags() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'upload', 'from-flags']
-+ )
-+
-+ def test_config_upload_from_flags_params(self):
-+ '''
-+ Test kuebadm.config_upload_from_flags with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_upload_from_flags(
-+ apiserver_advertise_address='127.0.0.1',
-+ apiserver_bind_port='1234',
-+ apiserver_cert_extra_sans='sans',
-+ cert_dir='/pki',
-+ cri_socket='socket',
-+ feature_gates='k=v',
-+ kubernetes_version='version',
-+ node_name='node-1',
-+ pod_network_cidr='10.1.0.0/12',
-+ service_cidr='10.2.0.0/12',
-+ service_dns_domain='example.org',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'upload', 'from-flags',
-+ '--apiserver-advertise-address', '127.0.0.1',
-+ '--apiserver-bind-port', '1234',
-+ '--apiserver-cert-extra-sans', 'sans',
-+ '--cert-dir', '/pki',
-+ '--cri-socket', 'socket',
-+ '--feature-gates', 'k=v',
-+ '--kubernetes-version', 'version',
-+ '--node-name', 'node-1',
-+ '--pod-network-cidr', '10.1.0.0/12',
-+ '--service-cidr', '10.2.0.0/12',
-+ '--service-dns-domain', 'example.org',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_upload_from_flags_error(self):
-+ '''
-+ Test kuebadm.config_upload_from_flags error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_upload_from_flags()
-+
-+ def test_config_view(self):
-+ '''
-+ Test kuebadm.config_view without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_view() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'view']
-+ )
-+
-+ def test_config_view_params(self):
-+ '''
-+ Test kuebadm.config_view with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.config_view(
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'config', 'view',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_config_view_error(self):
-+ '''
-+ Test kuebadm.config_view error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.config_view()
-+
-+ def test_init(self):
-+ '''
-+ Test kuebadm.init without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.init() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'init']
-+ )
-+
-+ def test_init_params(self):
-+ '''
-+ Test kuebadm.init with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.init(
-+ apiserver_advertise_address='127.0.0.1',
-+ apiserver_bind_port='1234',
-+ apiserver_cert_extra_sans='sans',
-+ cert_dir='/pki',
-+ certificate_key='secret',
-+ config='/config.cfg',
-+ cri_socket='socket',
-+ experimental_upload_certs=True,
-+ feature_gates='k=v',
-+ ignore_preflight_errors='all',
-+ image_repository='example.org',
-+ kubernetes_version='version',
-+ node_name='node-1',
-+ pod_network_cidr='10.1.0.0/12',
-+ service_cidr='10.2.0.0/12',
-+ service_dns_domain='example.org',
-+ skip_certificate_key_print=True,
-+ skip_phases='all',
-+ skip_token_print=True,
-+ token='token',
-+ token_ttl='1h1m1s',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'init',
-+ '--experimental-upload-certs',
-+ '--skip-certificate-key-print',
-+ '--skip-token-print',
-+ '--apiserver-advertise-address', '127.0.0.1',
-+ '--apiserver-bind-port', '1234',
-+ '--apiserver-cert-extra-sans', 'sans',
-+ '--cert-dir', '/pki',
-+ '--certificate-key', 'secret',
-+ '--config', '/config.cfg',
-+ '--cri-socket', 'socket',
-+ '--feature-gates', 'k=v',
-+ '--ignore-preflight-errors', 'all',
-+ '--image-repository', 'example.org',
-+ '--kubernetes-version', 'version',
-+ '--node-name', 'node-1',
-+ '--pod-network-cidr', '10.1.0.0/12',
-+ '--service-cidr', '10.2.0.0/12',
-+ '--service-dns-domain', 'example.org',
-+ '--skip-phases', 'all',
-+ '--token', 'token',
-+ '--token-ttl', '1h1m1s',
-+ '--rootfs', '/mnt'])
-+
-+ def test_init_error(self):
-+ '''
-+ Test kuebadm.init error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.init()
-+
-+ def test_join(self):
-+ '''
-+ Test kuebadm.join without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.join() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'join']
-+ )
-+
-+ def test_join_params(self):
-+ '''
-+ Test kuebadm.join with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.join(
-+ api_server_endpoint='10.160.65.165:6443',
-+ apiserver_advertise_address='127.0.0.1',
-+ apiserver_bind_port='1234',
-+ certificate_key='secret',
-+ config='/config.cfg',
-+ cri_socket='socket',
-+ discovery_file='/discovery.cfg',
-+ discovery_token='token',
-+ discovery_token_ca_cert_hash='type:value',
-+ discovery_token_unsafe_skip_ca_verification=True,
-+ experimental_control_plane=True,
-+ ignore_preflight_errors='all',
-+ node_name='node-1',
-+ skip_phases='all',
-+ tls_bootstrap_token='token',
-+ token='token',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'join',
-+ '10.160.65.165:6443',
-+ '--discovery-token-unsafe-skip-ca-verification',
-+ '--experimental-control-plane',
-+ '--apiserver-advertise-address', '127.0.0.1',
-+ '--apiserver-bind-port', '1234',
-+ '--certificate-key', 'secret',
-+ '--config', '/config.cfg',
-+ '--cri-socket', 'socket',
-+ '--discovery-file', '/discovery.cfg',
-+ '--discovery-token', 'token',
-+ '--discovery-token-ca-cert-hash', 'type:value',
-+ '--ignore-preflight-errors', 'all',
-+ '--node-name', 'node-1',
-+ '--skip-phases', 'all',
-+ '--tls-bootstrap-token', 'token',
-+ '--token', 'token',
-+ '--rootfs', '/mnt'])
-+
-+ def test_join_error(self):
-+ '''
-+ Test kuebadm.join error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.join()
-+
-+ def test_reset(self):
-+ '''
-+ Test kuebadm.reset without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.reset() == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'reset', '--force']
-+ )
-+
-+ def test_reset_params(self):
-+ '''
-+ Test kuebadm.reset with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ assert kubeadm.reset(
-+ cert_dir='/pki',
-+ cri_socket='socket',
-+ ignore_preflight_errors='all',
-+ kubeconfig='/kube.cfg',
-+ rootfs='/mnt') == 'stdout'
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['kubeadm', 'reset', '--force',
-+ '--cert-dir', '/pki',
-+ '--cri-socket', 'socket',
-+ '--ignore-preflight-errors', 'all',
-+ '--kubeconfig', '/kube.cfg',
-+ '--rootfs', '/mnt'])
-+
-+ def test_reset_error(self):
-+ '''
-+ Test kuebadm.reset error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(kubeadm.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert kubeadm.reset()
+ class KubeAdmTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ Test cases for salt.modules.kubeadm
+ """
+
+ def setup_loader_modules(self):
+- return {kubeadm: {"__salt__": {}, "__utils__": {}}}
++ return {kubeadm: {"__salt__": {}, "__utils__": {},}}
+
+ def test_version(self):
+ """
+@@ -223,18 +244,6 @@ class KubeAdmTestCase(TestCase, LoaderModuleMockMixin):
+ with pytest.raises(CommandExecutionError):
+ assert kubeadm.token_generate()
+
+- def test_token_empty(self):
+- """
+- Test kuebadm.token_list when no outout
+- """
+- result = {"retcode": 0, "stdout": ""}
+- salt_mock = {
+- "cmd.run_all": MagicMock(return_value=result),
+- }
+- with patch.dict(kubeadm.__salt__, salt_mock):
+- assert kubeadm.token_list() == []
+- salt_mock["cmd.run_all"].assert_called_with(["kubeadm", "token", "list"])
+-
+ def test_token_list(self):
+ """
+ Test kuebadm.token_list without parameters
diff --git a/tests/unit/modules/test_rpm_lowpkg.py b/tests/unit/modules/test_rpm_lowpkg.py
-index 527c8d3bf8..54b81f6972 100644
+index e7e8230510..b41e8daf17 100644
--- a/tests/unit/modules/test_rpm_lowpkg.py
+++ b/tests/unit/modules/test_rpm_lowpkg.py
-@@ -25,7 +25,7 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- def setup_loader_modules(self):
- return {rpm: {'rpm': MagicMock(return_value=MagicMock)}}
+@@ -1,15 +1,9 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Jayesh Kariya
+ """
-- # 'list_pkgs' function tests: 1
-+ # 'list_pkgs' function tests: 2
+-# Import Python Libs
+-from __future__ import absolute_import
- def test_list_pkgs(self):
- '''
-@@ -34,13 +34,24 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- mock = MagicMock(return_value='')
- with patch.dict(rpm.__salt__, {'cmd.run': mock}):
- self.assertDictEqual(rpm.list_pkgs(), {})
-+ self.assertFalse(_called_with_root(mock))
+-# Import Salt Libs
+ import salt.modules.rpm_lowpkg as rpm
+-
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
+@@ -108,6 +102,15 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertDictEqual(rpm.file_dict("httpd"), {"errors": [], "packages": {}})
+ self.assertFalse(_called_with_root(mock))
-- # 'verify' function tests: 1
-+ def test_list_pkgs_root(self):
-+ '''
-+ Test if it list the packages currently installed in a dict,
-+ called with root parameter
-+ '''
-+ mock = MagicMock(return_value='')
-+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
-+ rpm.list_pkgs(root='/')
-+ self.assertTrue(_called_with_root(mock))
-+
-+ # 'verify' function tests: 2
-
- def test_verify(self):
- '''
-- Test if it runs an rpm -Va on a system,
-- and returns the results in a dict
-+ Test if it runs an rpm -Va on a system, and returns the
-+ results in a dict
- '''
- mock = MagicMock(return_value={'stdout': '',
- 'stderr': '',
-@@ -48,8 +59,22 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- 'pid': 12345})
- with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
- self.assertDictEqual(rpm.verify('httpd'), {})
-+ self.assertFalse(_called_with_root(mock))
-
-- # 'file_list' function tests: 1
-+ def test_verify_root(self):
-+ '''
-+ Test if it runs an rpm -Va on a system, and returns the
-+ results in a dict, called with root parameter
-+ '''
-+ mock = MagicMock(return_value={'stdout': '',
-+ 'stderr': '',
-+ 'retcode': 0,
-+ 'pid': 12345})
-+ with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
-+ rpm.verify('httpd', root='/')
-+ self.assertTrue(_called_with_root(mock))
-+
-+ # 'file_list' function tests: 2
-
- def test_file_list(self):
- '''
-@@ -59,8 +84,20 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(rpm.__salt__, {'cmd.run': mock}):
- self.assertDictEqual(rpm.file_list('httpd'),
- {'errors': [], 'files': []})
-+ self.assertFalse(_called_with_root(mock))
-+
-+ def test_file_list_root(self):
-+ '''
-+ Test if it list the files that belong to a package, using the
-+ root parameter.
-+ '''
-+
-+ mock = MagicMock(return_value='')
-+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
-+ rpm.file_list('httpd', root='/')
-+ self.assertTrue(_called_with_root(mock))
-
-- # 'file_dict' function tests: 1
-+ # 'file_dict' function tests: 2
-
- def test_file_dict(self):
- '''
-@@ -70,6 +107,16 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(rpm.__salt__, {'cmd.run': mock}):
- self.assertDictEqual(rpm.file_dict('httpd'),
- {'errors': [], 'packages': {}})
-+ self.assertFalse(_called_with_root(mock))
-+
+ def test_file_dict_root(self):
-+ '''
++ """
+ Test if it list the files that belong to a package
-+ '''
-+ mock = MagicMock(return_value='')
-+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
-+ rpm.file_dict('httpd', root='/')
-+ self.assertTrue(_called_with_root(mock))
-
- # 'owner' function tests: 1
-
-@@ -83,6 +130,7 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- mock = MagicMock(return_value=ret)
- with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
- self.assertEqual(rpm.owner('/usr/bin/salt-jenkins-build'), '')
-+ self.assertFalse(_called_with_root(mock))
-
- ret = {'/usr/bin/vim': 'vim-enhanced-7.4.160-1.e17.x86_64',
- '/usr/bin/python': 'python-2.7.5-16.e17.x86_64'}
-@@ -91,8 +139,22 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
- self.assertDictEqual(rpm.owner('/usr/bin/python', '/usr/bin/vim'),
- ret)
++ """
++ mock = MagicMock(return_value="")
++ with patch.dict(rpm.__salt__, {"cmd.run": mock}):
++ self.assertDictEqual(rpm.file_dict("httpd"), {"errors": [], "packages": {}})
+ self.assertFalse(_called_with_root(mock))
+
-+ def test_owner_root(self):
-+ '''
-+ Test if it return the name of the package that owns the file,
-+ using the parameter root.
-+ '''
-+ self.assertEqual(rpm.owner(), '')
-+
-+ ret = 'file /usr/bin/salt-jenkins-build is not owned by any package'
-+ mock = MagicMock(return_value=ret)
-+ with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
-+ rpm.owner('/usr/bin/salt-jenkins-build', root='/')
-+ self.assertTrue(_called_with_root(mock))
-
-- # 'checksum' function tests: 1
-+ # 'checksum' function tests: 2
-
- def test_checksum(self):
- '''
-@@ -107,6 +169,17 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
- mock = MagicMock(side_effect=[True, 0, True, 1, False, 0])
- with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
- self.assertDictEqual(rpm.checksum("file1.rpm", "file2.rpm", "file3.rpm"), ret)
-+ self.assertFalse(_called_with_root(mock))
-+
-+ def test_checksum_root(self):
-+ '''
-+ Test if checksum validate as expected, using the parameter
-+ root
-+ '''
-+ mock = MagicMock(side_effect=[True, 0])
-+ with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
-+ rpm.checksum("file1.rpm", root='/')
-+ self.assertTrue(_called_with_root(mock))
-
- def test_version_cmp_rpm(self):
- '''
+ def test_file_dict_root(self):
+ """
+ Test if it list the files that belong to a package
diff --git a/tests/unit/modules/test_systemd_service.py b/tests/unit/modules/test_systemd_service.py
-index 13ddc394be..752fb1d659 100644
+index 32741969ce..bbd89bb3d0 100644
--- a/tests/unit/modules/test_systemd_service.py
+++ b/tests/unit/modules/test_systemd_service.py
-@@ -7,6 +7,8 @@
- from __future__ import absolute_import, unicode_literals, print_function
+@@ -1,23 +1,16 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Rahul Handay
+ """
+
+-# Import Python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
import os
-+import pytest
-+
- # Import Salt Testing Libs
+ import pytest
+-
+-# Import Salt Libs
+ import salt.modules.systemd_service as systemd
+ import salt.utils.systemd
+ from salt.exceptions import CommandExecutionError
from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import MagicMock, patch
+-
+-# Import Salt Testing Libs
++from tests.support.mock import MagicMock, Mock, patch
from tests.support.unit import TestCase
-@@ -643,3 +645,54 @@ class SystemdScopeTestCase(TestCase, LoaderModuleMockMixin):
- def test_unmask_runtime(self):
- self._mask_unmask('unmask_', True)
-+
-+ def test_firstboot(self):
-+ '''
-+ Test service.firstboot without parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(systemd.__salt__, salt_mock):
-+ assert systemd.firstboot()
-+ salt_mock['cmd.run_all'].assert_called_with(['systemd-firstboot'])
-+
-+ def test_firstboot_params(self):
-+ '''
-+ Test service.firstboot with parameters
-+ '''
-+ result = {'retcode': 0, 'stdout': 'stdout'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(systemd.__salt__, salt_mock):
-+ assert systemd.firstboot(
-+ locale='en_US.UTF-8',
-+ locale_message='en_US.UTF-8',
-+ keymap='jp',
-+ timezone='Europe/Berlin',
-+ hostname='node-001',
-+ machine_id='1234567890abcdef',
-+ root='/mnt')
-+ salt_mock['cmd.run_all'].assert_called_with(
-+ ['systemd-firstboot',
-+ '--locale', 'en_US.UTF-8',
-+ '--locale-message', 'en_US.UTF-8',
-+ '--keymap', 'jp',
-+ '--timezone', 'Europe/Berlin',
-+ '--hostname', 'node-001',
-+ '--machine-ID', '1234567890abcdef',
-+ '--root', '/mnt'])
-+
-+ def test_firstboot_error(self):
-+ '''
-+ Test service.firstboot error
-+ '''
-+ result = {'retcode': 1, 'stderr': 'error'}
-+ salt_mock = {
-+ 'cmd.run_all': MagicMock(return_value=result),
-+ }
-+ with patch.dict(systemd.__salt__, salt_mock):
-+ with pytest.raises(CommandExecutionError):
-+ assert systemd.firstboot()
+ _SYSTEMCTL_STATUS = {
+@@ -93,7 +86,7 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin):
+ cmd_mock = MagicMock(return_value=_LIST_UNIT_FILES)
+ listdir_mock = MagicMock(return_value=["foo", "bar", "baz", "README"])
+ sd_mock = MagicMock(
+- return_value=set([x.replace(".service", "") for x in _SYSTEMCTL_STATUS])
++ return_value={x.replace(".service", "") for x in _SYSTEMCTL_STATUS}
+ )
+ access_mock = MagicMock(
+ side_effect=lambda x, y: x
+@@ -124,7 +117,7 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin):
+ # only 'baz' will be considered an enabled sysv service).
+ listdir_mock = MagicMock(return_value=["foo", "bar", "baz", "README"])
+ sd_mock = MagicMock(
+- return_value=set([x.replace(".service", "") for x in _SYSTEMCTL_STATUS])
++ return_value={x.replace(".service", "") for x in _SYSTEMCTL_STATUS}
+ )
+ access_mock = MagicMock(
+ side_effect=lambda x, y: x
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 956902eab3..3a6466f061 100644
+index b07f9a3af7..032785395e 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -38,6 +38,9 @@ class ZyppCallMock(object):
- return self
-
- def __call__(self, *args, **kwargs):
-+ # If the call is for a configuration modifier, we return self
-+ if any(i in kwargs for i in ('no_repo_failure', 'systemd_scope', 'root')):
-+ return self
- return MagicMock(return_value=self.__return_value)()
-
-
-@@ -926,7 +929,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- 'pico': '0.1.1',
- }
-
-- def __call__(self):
-+ def __call__(self, root=None, includes=None):
- pkgs = self._pkgs.copy()
- for target in self._packages:
- if self._pkgs.get(target):
-@@ -992,10 +995,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- with zypper_patcher:
- zypper.mod_repo(name, **{'url': url})
- self.assertEqual(
-- zypper.__zypper__.xml.call.call_args_list,
-+ zypper.__zypper__(root=None).xml.call.call_args_list,
- [call('ar', url, name)]
+@@ -1639,6 +1639,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ self.assertTrue(
+ zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0
)
-- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
-+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
-
- def test_repo_noadd_nomod_noref(self):
- '''
-@@ -1017,8 +1020,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- self.assertEqual(
- out['comment'],
- 'Specified arguments did not result in modification of repo')
-- self.assertTrue(zypper.__zypper__.xml.call.call_count == 0)
-- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
-+ self.assertTrue(zypper.__zypper__(root=None).xml.call.call_count == 0)
-+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
-
- def test_repo_noadd_modbaseurl_ref(self):
- '''
-@@ -1046,9 +1049,11 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- 'priority': 1,
- 'cache': False,
- 'keeppackages': False,
-- 'type': 'rpm-md'}
-- self.assertTrue(zypper.mod_repo.call_count == 2)
-- self.assertTrue(zypper.mod_repo.mock_calls[1] == call(name, **expected_params))
-+ 'type': 'rpm-md',
-+ 'root': None,
-+ }
-+ self.assertEqual(zypper.mod_repo.call_count, 2)
-+ self.assertEqual(zypper.mod_repo.mock_calls[1], call(name, **expected_params))
-
- def test_repo_add_mod_noref(self):
- '''
-@@ -1064,10 +1069,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- with zypper_patcher:
- zypper.mod_repo(name, **{'url': url, 'refresh': True})
- self.assertEqual(
-- zypper.__zypper__.xml.call.call_args_list,
-+ zypper.__zypper__(root=None).xml.call.call_args_list,
- [call('ar', url, name)]
- )
-- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
-+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
- 'mr', '--refresh', name
- )
-
-@@ -1086,8 +1091,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- 'salt.modules.zypperpkg', **self.zypper_patcher_config)
- with zypper_patcher:
- zypper.mod_repo(name, **{'url': url, 'refresh': True})
-- self.assertTrue(zypper.__zypper__.xml.call.call_count == 0)
-- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
-+ self.assertTrue(zypper.__zypper__(root=None).xml.call.call_count == 0)
-+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
- 'mr', '--refresh', name
- )
-
-@@ -1106,13 +1111,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- with zypper_patcher:
- zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
- self.assertEqual(
-- zypper.__zypper__.xml.call.call_args_list,
-+ zypper.__zypper__(root=None).xml.call.call_args_list,
- [
- call('ar', url, name),
- call('--gpg-auto-import-keys', 'refresh', name)
- ]
- )
-- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
-+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
++ self.assertTrue(
++ zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0
++ )
def test_repo_noadd_nomod_ref(self):
- '''
-@@ -1133,10 +1138,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- with zypper_patcher:
- zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
- self.assertEqual(
-- zypper.__zypper__.xml.call.call_args_list,
-+ zypper.__zypper__(root=None).xml.call.call_args_list,
- [call('--gpg-auto-import-keys', 'refresh', name)]
- )
-- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
-+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
+ """
+@@ -1919,8 +1922,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ def test__get_installed_patterns(self, get_visible_patterns):
+ """Test installed patterns in the system"""
+ get_visible_patterns.return_value = {
+- "package-a": {"installed": True, "summary": "description a"},
+- "package-b": {"installed": False, "summary": "description b"},
++ "package-a": {"installed": True, "summary": "description a",},
++ "package-b": {"installed": False, "summary": "description b",},
+ }
- def test_repo_add_mod_ref(self):
- '''
-@@ -1157,13 +1162,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- **{'url': url, 'refresh': True, 'gpgautoimport': True}
- )
- self.assertEqual(
-- zypper.__zypper__.xml.call.call_args_list,
-+ zypper.__zypper__(root=None).xml.call.call_args_list,
- [
- call('ar', url, name),
- call('--gpg-auto-import-keys', 'refresh', name)
- ]
- )
-- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
-+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
- '--gpg-auto-import-keys', 'mr', '--refresh', name
- )
+ salt_mock = {
+@@ -1932,59 +1935,18 @@ pattern() = package-c"""
+ }
+ with patch.dict("salt.modules.zypperpkg.__salt__", salt_mock):
+ assert zypper._get_installed_patterns() == {
+- "package-a": {"installed": True, "summary": "description a"},
+- "package-c": {"installed": True, "summary": "Non-visible pattern"},
+- }
+-
+- @patch("salt.modules.zypperpkg._get_visible_patterns")
+- def test__get_installed_patterns_with_alias(self, get_visible_patterns):
+- """Test installed patterns in the system if they have alias"""
+- get_visible_patterns.return_value = {
+- "package-a": {"installed": True, "summary": "description a"},
+- "package-b": {"installed": False, "summary": "description b"},
+- }
+-
+- salt_mock = {
+- "cmd.run": MagicMock(
+- return_value="""pattern() = .package-a-alias
+-pattern() = package-a
+-pattern-visible()
+-pattern() = package-c"""
+- ),
+- }
+- with patch.dict("salt.modules.zypperpkg.__salt__", salt_mock):
+- assert zypper._get_installed_patterns() == {
+- "package-a": {"installed": True, "summary": "description a"},
+- "package-c": {"installed": True, "summary": "Non-visible pattern"},
++ "package-a": {"installed": True, "summary": "description a",},
++ "package-c": {"installed": True, "summary": "Non-visible pattern",},
+ }
-@@ -1189,10 +1194,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- **{'url': url, 'refresh': True, 'gpgautoimport': True}
- )
- self.assertEqual(
-- zypper.__zypper__.xml.call.call_args_list,
-+ zypper.__zypper__(root=None).xml.call.call_args_list,
- [call('--gpg-auto-import-keys', 'refresh', name)]
- )
-- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
-+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
- '--gpg-auto-import-keys', 'mr', '--refresh', name
- )
-
-@@ -1369,3 +1374,58 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- with self.assertRaises(CommandExecutionError):
- for op in ['>>', '==', '<<', '+']:
- zypper.Wildcard(_zpr)('libzypp', '{0}*.1'.format(op))
-+
-+ @patch('salt.modules.zypperpkg._get_visible_patterns')
-+ def test__get_installed_patterns(self, get_visible_patterns):
-+ '''Test installed patterns in the system'''
-+ get_visible_patterns.return_value = {
-+ 'package-a': {
-+ 'installed': True,
-+ 'summary': 'description a',
-+ },
-+ 'package-b': {
-+ 'installed': False,
-+ 'summary': 'description b',
-+ },
-+ }
-+
-+ salt_mock = {
-+ 'cmd.run': MagicMock(return_value='''pattern() = package-a
-+pattern-visible()
-+pattern() = package-c'''),
-+ }
-+ with patch.dict('salt.modules.zypperpkg.__salt__', salt_mock):
-+ assert zypper._get_installed_patterns() == {
-+ 'package-a': {
-+ 'installed': True,
-+ 'summary': 'description a',
-+ },
-+ 'package-c': {
-+ 'installed': True,
-+ 'summary': 'Non-visible pattern',
-+ },
-+ }
-+
-+ @patch('salt.modules.zypperpkg._get_visible_patterns')
-+ def test_list_patterns(self, get_visible_patterns):
-+ '''Test available patterns in the repo'''
-+ get_visible_patterns.return_value = {
-+ 'package-a': {
-+ 'installed': True,
-+ 'summary': 'description a',
-+ },
-+ 'package-b': {
-+ 'installed': False,
-+ 'summary': 'description b',
-+ },
-+ }
-+ assert zypper.list_patterns() == {
-+ 'package-a': {
-+ 'installed': True,
-+ 'summary': 'description a',
-+ },
-+ 'package-b': {
-+ 'installed': False,
-+ 'summary': 'description b',
-+ },
-+ }
+ @patch("salt.modules.zypperpkg._get_visible_patterns")
+ def test_list_patterns(self, get_visible_patterns):
+ """Test available patterns in the repo"""
+ get_visible_patterns.return_value = {
+- "package-a": {"installed": True, "summary": "description a"},
+- "package-b": {"installed": False, "summary": "description b"},
++ "package-a": {"installed": True, "summary": "description a",},
++ "package-b": {"installed": False, "summary": "description b",},
+ }
+ assert zypper.list_patterns() == {
+- "package-a": {"installed": True, "summary": "description a"},
+- "package-b": {"installed": False, "summary": "description b"},
+- }
+-
+- def test__clean_cache_empty(self):
+- """Test that an empty cached can be cleaned"""
+- context = {}
+- with patch.dict(zypper.__context__, context):
+- zypper._clean_cache()
+- assert context == {}
+-
+- def test__clean_cache_filled(self):
+- """Test that a filled cached can be cleaned"""
+- context = {
+- "pkg.list_pkgs_/mnt_[]": None,
+- "pkg.list_pkgs_/mnt_[patterns]": None,
+- "pkg.list_provides": None,
+- "pkg.other_data": None,
++ "package-a": {"installed": True, "summary": "description a",},
++ "package-b": {"installed": False, "summary": "description b",},
+ }
+- with patch.dict(zypper.__context__, context):
+- zypper._clean_cache()
+- self.assertEqual(zypper.__context__, {"pkg.other_data": None})
diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py
-new file mode 100644
-index 0000000000..3f45ed94f9
---- /dev/null
+index fdbf06bd13..74e44641b8 100644
+--- a/tests/unit/states/test_btrfs.py
+++ b/tests/unit/states/test_btrfs.py
-@@ -0,0 +1,782 @@
-+# -*- coding: utf-8 -*-
+@@ -1,27 +1,45 @@
+#
+# Author: Alberto Planas
+#
@@ -6156,853 +1249,443 @@ index 0000000000..3f45ed94f9
+# specific language governing permissions and limitations
+# under the License.
+
-+'''
-+:maintainer: Alberto Planas
-+:platform: Linux
-+'''
-+# Import Python Libs
-+from __future__ import absolute_import, print_function, unicode_literals
-+# Import Salt Testing Libs
-+from tests.support.mixins import LoaderModuleMockMixin
-+from tests.support.unit import skipIf, TestCase
-+from tests.support.mock import (
-+ MagicMock,
-+ NO_MOCK,
-+ NO_MOCK_REASON,
-+ patch,
-+)
-+
-+from salt.exceptions import CommandExecutionError
-+import salt.states.btrfs as btrfs
-+
-+import pytest
-+
-+
+ """
+ :maintainer: Alberto Planas
+ :platform: Linux
+ """
+-
+ import pytest
+ import salt.states.btrfs as btrfs
+-import salt.utils.platform
+ from salt.exceptions import CommandExecutionError
+-
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import MagicMock, patch
++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
+
+
+-@skipIf(salt.utils.platform.is_windows(), "Non-Windows feature")
+@skipIf(NO_MOCK, NO_MOCK_REASON)
-+class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
-+ '''
-+ Test cases for salt.states.btrfs
-+ '''
-+
-+ def setup_loader_modules(self):
-+ return {
-+ btrfs: {
-+ '__salt__': {},
-+ '__states__': {},
-+ '__utils__': {},
-+ }
-+ }
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('tempfile.mkdtemp')
-+ def test__mount_fails(self, mkdtemp, umount):
-+ '''
-+ Test mounting a device in a temporary place.
-+ '''
-+ mkdtemp.return_value = '/tmp/xxx'
-+ states_mock = {
-+ 'mount.mounted': MagicMock(return_value={'result': False}),
-+ }
-+ with patch.dict(btrfs.__states__, states_mock):
-+ assert btrfs._mount('/dev/sda1', use_default=False) is None
-+ mkdtemp.assert_called_once()
-+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
-+ device='/dev/sda1',
-+ fstype='btrfs',
-+ opts='subvol=/',
-+ persist=False)
-+ umount.assert_called_with('/tmp/xxx')
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('tempfile.mkdtemp')
-+ def test__mount(self, mkdtemp, umount):
-+ '''
-+ Test mounting a device in a temporary place.
-+ '''
-+ mkdtemp.return_value = '/tmp/xxx'
-+ states_mock = {
-+ 'mount.mounted': MagicMock(return_value={'result': True}),
-+ }
-+ with patch.dict(btrfs.__states__, states_mock):
-+ assert btrfs._mount('/dev/sda1', use_default=False) == '/tmp/xxx'
-+ mkdtemp.assert_called_once()
-+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
-+ device='/dev/sda1',
-+ fstype='btrfs',
-+ opts='subvol=/',
-+ persist=False)
-+ umount.assert_not_called()
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('tempfile.mkdtemp')
-+ def test__mount_use_default(self, mkdtemp, umount):
-+ '''
-+ Test mounting a device in a temporary place.
-+ '''
-+ mkdtemp.return_value = '/tmp/xxx'
-+ states_mock = {
-+ 'mount.mounted': MagicMock(return_value={'result': True}),
-+ }
-+ with patch.dict(btrfs.__states__, states_mock):
-+ assert btrfs._mount('/dev/sda1', use_default=True) == '/tmp/xxx'
-+ mkdtemp.assert_called_once()
-+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
-+ device='/dev/sda1',
-+ fstype='btrfs',
-+ opts='defaults',
-+ persist=False)
-+ umount.assert_not_called()
-+
-+ def test__umount(self):
-+ '''
-+ Test umounting and cleanning temporary place.
-+ '''
-+ states_mock = {
-+ 'mount.unmounted': MagicMock(),
-+ }
-+ utils_mock = {
-+ 'files.rm_rf': MagicMock(),
-+ }
-+ with patch.dict(btrfs.__states__, states_mock), \
-+ patch.dict(btrfs.__utils__, utils_mock):
-+ btrfs._umount('/tmp/xxx')
-+ states_mock['mount.unmounted'].assert_called_with('/tmp/xxx')
-+ utils_mock['files.rm_rf'].assert_called_with('/tmp/xxx')
-+
-+ def test__is_default_not_default(self):
-+ '''
-+ Test if the subvolume is the current default.
-+ '''
-+ salt_mock = {
-+ 'btrfs.subvolume_show': MagicMock(return_value={
-+ '@/var': {'subvolume id': '256'},
-+ }),
-+ 'btrfs.subvolume_get_default': MagicMock(return_value={
-+ 'id': '5',
-+ }),
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock):
-+ assert not btrfs._is_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
-+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
-+ salt_mock['btrfs.subvolume_get_default'].assert_called_with('/tmp/xxx')
-+
-+ def test__is_default(self):
-+ '''
-+ Test if the subvolume is the current default.
-+ '''
-+ salt_mock = {
-+ 'btrfs.subvolume_show': MagicMock(return_value={
-+ '@/var': {'subvolume id': '256'},
-+ }),
-+ 'btrfs.subvolume_get_default': MagicMock(return_value={
-+ 'id': '256',
-+ }),
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock):
-+ assert btrfs._is_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
-+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
-+ salt_mock['btrfs.subvolume_get_default'].assert_called_with('/tmp/xxx')
-+
-+ def test__set_default(self):
-+ '''
-+ Test setting a subvolume as the current default.
-+ '''
-+ salt_mock = {
-+ 'btrfs.subvolume_show': MagicMock(return_value={
-+ '@/var': {'subvolume id': '256'},
-+ }),
-+ 'btrfs.subvolume_set_default': MagicMock(return_value=True),
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock):
-+ assert btrfs._set_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
-+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
-+ salt_mock['btrfs.subvolume_set_default'].assert_called_with('256', '/tmp/xxx')
-+
-+ def test__is_cow_not_cow(self):
-+ '''
-+ Test if the subvolume is copy on write.
-+ '''
-+ salt_mock = {
-+ 'file.lsattr': MagicMock(return_value={
-+ '/tmp/xxx/@/var': ['C'],
-+ }),
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock):
-+ assert not btrfs._is_cow('/tmp/xxx/@/var')
-+ salt_mock['file.lsattr'].assert_called_with('/tmp/xxx/@')
-+
-+ def test__is_cow(self):
-+ '''
-+ Test if the subvolume is copy on write.
-+ '''
-+ salt_mock = {
-+ 'file.lsattr': MagicMock(return_value={
-+ '/tmp/xxx/@/var': [],
-+ }),
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock):
-+ assert btrfs._is_cow('/tmp/xxx/@/var')
-+ salt_mock['file.lsattr'].assert_called_with('/tmp/xxx/@')
-+
-+ def test__unset_cow(self):
-+ '''
-+ Test disabling the subvolume as copy on write.
-+ '''
-+ salt_mock = {
-+ 'file.chattr': MagicMock(return_value=True),
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock):
-+ assert btrfs._unset_cow('/tmp/xxx/@/var')
-+ salt_mock['file.chattr'].assert_called_with('/tmp/xxx/@/var',
-+ operator='add',
-+ attributes='C')
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_exists(self, mount, umount):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1') == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {},
-+ 'comment': ['Subvolume @/var already present'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_exists_test(self, mount, umount):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
-+ }
-+ opts_mock = {
-+ 'test': True,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1') == {
-+ 'name': '@/var',
-+ 'result': None,
-+ 'changes': {},
-+ 'comment': ['Subvolume @/var already present'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._is_default')
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_exists_was_default(self, mount, umount,
-+ is_default):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ is_default.return_value = True
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1',
-+ set_default=True) == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {},
-+ 'comment': ['Subvolume @/var already present'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._set_default')
-+ @patch('salt.states.btrfs._is_default')
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_exists_set_default(self, mount, umount,
-+ is_default, set_default):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ is_default.return_value = False
-+ set_default.return_value = True
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1',
-+ set_default=True) == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {
-+ '@/var_default': True
-+ },
-+ 'comment': ['Subvolume @/var already present'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._set_default')
-+ @patch('salt.states.btrfs._is_default')
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_exists_set_default_no_force(self,
-+ mount,
-+ umount,
-+ is_default,
-+ set_default):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ is_default.return_value = False
-+ set_default.return_value = True
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1',
-+ set_default=True,
-+ force_set_default=False) == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {},
-+ 'comment': ['Subvolume @/var already present'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._is_cow')
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_exists_no_cow(self, mount, umount, is_cow):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ is_cow.return_value = False
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1',
-+ copy_on_write=False) == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {},
-+ 'comment': ['Subvolume @/var already present'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._unset_cow')
-+ @patch('salt.states.btrfs._is_cow')
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_exists_unset_cow(self, mount, umount,
-+ is_cow, unset_cow):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ is_cow.return_value = True
-+ unset_cow.return_value = True
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1',
-+ copy_on_write=False) == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {
-+ '@/var_no_cow': True
-+ },
-+ 'comment': ['Subvolume @/var already present'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created(self, mount, umount):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
-+ 'btrfs.subvolume_create': MagicMock(),
-+ }
-+ states_mock = {
-+ 'file.directory': MagicMock(return_value={'result': True}),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__states__, states_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1') == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {
-+ '@/var': 'Created subvolume @/var'
-+ },
-+ 'comment': [],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ salt_mock['btrfs.subvolume_create'].assert_called_once()
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_fails_directory(self, mount, umount):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
-+ }
-+ states_mock = {
-+ 'file.directory': MagicMock(return_value={'result': False}),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__states__, states_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1') == {
-+ 'name': '@/var',
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': ['Error creating /tmp/xxx/@ directory'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ def test_subvolume_created_fails(self, mount, umount):
-+ '''
-+ Test creating a subvolume.
-+ '''
-+ mount.return_value = '/tmp/xxx'
-+ salt_mock = {
-+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
-+ 'btrfs.subvolume_create': MagicMock(side_effect=CommandExecutionError),
-+ }
-+ states_mock = {
-+ 'file.directory': MagicMock(return_value={'result': True}),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__states__, states_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.subvolume_created(name='@/var',
-+ device='/dev/sda1') == {
-+ 'name': '@/var',
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': ['Error creating subvolume @/var'],
-+ }
-+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
-+ salt_mock['btrfs.subvolume_create'].assert_called_once()
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ def test_diff_properties_fails(self):
-+ '''
-+ Test when diff_properties do not found a property
-+ '''
-+ expected = {
-+ 'wrong_property': True
-+ }
-+ current = {
-+ 'compression': {
-+ 'description': 'Set/get compression for a file or directory',
-+ 'value': 'N/A',
-+ },
-+ 'label': {
-+ 'description': 'Set/get label of device.',
-+ 'value': 'N/A',
-+ },
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': 'N/A',
-+ },
-+ }
-+ with pytest.raises(Exception):
-+ btrfs._diff_properties(expected, current)
-+
-+ def test_diff_properties_enable_ro(self):
-+ '''
-+ Test when diff_properties enable one single property
-+ '''
-+ expected = {
-+ 'ro': True
-+ }
-+ current = {
-+ 'compression': {
-+ 'description': 'Set/get compression for a file or directory',
-+ 'value': 'N/A',
-+ },
-+ 'label': {
-+ 'description': 'Set/get label of device.',
-+ 'value': 'N/A',
-+ },
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': 'N/A',
-+ },
-+ }
-+ assert btrfs._diff_properties(expected, current) == {'ro': True}
-+
-+ def test_diff_properties_only_enable_ro(self):
-+ '''
-+ Test when diff_properties is half ready
-+ '''
-+ expected = {
-+ 'ro': True,
-+ 'label': 'mylabel'
-+ }
-+ current = {
-+ 'compression': {
-+ 'description': 'Set/get compression for a file or directory',
-+ 'value': 'N/A',
-+ },
-+ 'label': {
-+ 'description': 'Set/get label of device.',
-+ 'value': 'mylabel',
-+ },
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': 'N/A',
-+ },
-+ }
-+ assert btrfs._diff_properties(expected, current) == {'ro': True}
-+
-+ def test_diff_properties_disable_ro(self):
-+ '''
-+ Test when diff_properties enable one single property
-+ '''
-+ expected = {
-+ 'ro': False
-+ }
-+ current = {
-+ 'compression': {
-+ 'description': 'Set/get compression for a file or directory',
-+ 'value': 'N/A',
-+ },
-+ 'label': {
-+ 'description': 'Set/get label of device.',
-+ 'value': 'N/A',
-+ },
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': True,
-+ },
-+ }
-+ assert btrfs._diff_properties(expected, current) == {'ro': False}
-+
-+ def test_diff_properties_emty_na(self):
-+ '''
-+ Test when diff_properties is already disabled as N/A
-+ '''
-+ expected = {
-+ 'ro': False
-+ }
-+ current = {
-+ 'compression': {
-+ 'description': 'Set/get compression for a file or directory',
-+ 'value': 'N/A',
-+ },
-+ 'label': {
-+ 'description': 'Set/get label of device.',
-+ 'value': 'N/A',
-+ },
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': 'N/A',
-+ },
-+ }
-+ assert btrfs._diff_properties(expected, current) == {}
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ @patch('os.path.exists')
-+ def test_properties_subvolume_not_exists(self, exists, mount, umount):
-+ '''
-+ Test when subvolume is not present
-+ '''
-+ exists.return_value = False
-+ mount.return_value = '/tmp/xxx'
-+ assert btrfs.properties(name='@/var',
-+ device='/dev/sda1') == {
-+ 'name': '@/var',
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': ['Object @/var not found'],
-+ }
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ @patch('os.path.exists')
-+ def test_properties_default_root_subvolume(self, exists, mount, umount):
-+ '''
-+ Test when root subvolume resolves to another subvolume
-+ '''
-+ exists.return_value = False
-+ mount.return_value = '/tmp/xxx'
-+ assert btrfs.properties(name='/',
-+ device='/dev/sda1') == {
-+ 'name': '/',
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': ['Object / not found'],
-+ }
-+ exists.assert_called_with('/tmp/xxx/.')
-+
-+ @patch('os.path.exists')
-+ def test_properties_device_fail(self, exists):
-+ '''
-+ Test when we try to set a device that is not pressent
-+ '''
-+ exists.return_value = False
-+ assert btrfs.properties(name='/dev/sda1',
-+ device=None) == {
-+ 'name': '/dev/sda1',
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': ['Object /dev/sda1 not found'],
-+ }
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ @patch('os.path.exists')
-+ def test_properties_subvolume_fail(self, exists, mount, umount):
-+ '''
-+ Test setting a wrong property in a subvolume
-+ '''
-+ exists.return_value = True
-+ mount.return_value = '/tmp/xxx'
-+ salt_mock = {
-+ 'btrfs.properties': MagicMock(side_effect=[
-+ {
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': 'N/A',
-+ },
-+ }
-+ ]),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.properties(name='@/var',
-+ device='/dev/sda1',
-+ wrond_property=True) == {
-+ 'name': '@/var',
-+ 'result': False,
-+ 'changes': {},
-+ 'comment': ['Some property not found in @/var'],
-+ }
-+ salt_mock['btrfs.properties'].assert_called_with('/tmp/xxx/@/var')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
-+
-+ @patch('salt.states.btrfs._umount')
-+ @patch('salt.states.btrfs._mount')
-+ @patch('os.path.exists')
-+ def test_properties_enable_ro_subvolume(self, exists, mount, umount):
-+ '''
-+ Test setting a ro property in a subvolume
-+ '''
-+ exists.return_value = True
-+ mount.return_value = '/tmp/xxx'
-+ salt_mock = {
-+ 'btrfs.properties': MagicMock(side_effect=[
-+ {
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': 'N/A',
-+ },
-+ },
-+ None,
-+ {
-+ 'ro': {
-+ 'description': 'Set/get read-only flag or subvolume',
-+ 'value': 'true',
-+ },
-+ }
-+ ]),
-+ }
-+ opts_mock = {
-+ 'test': False,
-+ }
-+ with patch.dict(btrfs.__salt__, salt_mock), \
-+ patch.dict(btrfs.__opts__, opts_mock):
-+ assert btrfs.properties(name='@/var',
-+ device='/dev/sda1', ro=True) == {
-+ 'name': '@/var',
-+ 'result': True,
-+ 'changes': {'ro': 'true'},
-+ 'comment': ['Properties changed in @/var'],
-+ }
-+ salt_mock['btrfs.properties'].assert_any_call('/tmp/xxx/@/var')
-+ salt_mock['btrfs.properties'].assert_any_call('/tmp/xxx/@/var',
-+ set='ro=true')
-+ mount.assert_called_once()
-+ umount.assert_called_once()
+ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ Test cases for salt.states.btrfs
+ """
+
+ def setup_loader_modules(self):
+- return {btrfs: {"__salt__": {}, "__states__": {}, "__utils__": {}}}
++ return {btrfs: {"__salt__": {}, "__states__": {}, "__utils__": {},}}
+
+ @patch("salt.states.btrfs._umount")
+ @patch("tempfile.mkdtemp")
+@@ -112,9 +130,9 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ salt_mock = {
+ "btrfs.subvolume_show": MagicMock(
+- return_value={"@/var": {"subvolume id": "256"}}
++ return_value={"@/var": {"subvolume id": "256"},}
+ ),
+- "btrfs.subvolume_get_default": MagicMock(return_value={"id": "5"}),
++ "btrfs.subvolume_get_default": MagicMock(return_value={"id": "5",}),
+ }
+ with patch.dict(btrfs.__salt__, salt_mock):
+ assert not btrfs._is_default("/tmp/xxx/@/var", "/tmp/xxx", "@/var")
+@@ -127,9 +145,9 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ salt_mock = {
+ "btrfs.subvolume_show": MagicMock(
+- return_value={"@/var": {"subvolume id": "256"}}
++ return_value={"@/var": {"subvolume id": "256"},}
+ ),
+- "btrfs.subvolume_get_default": MagicMock(return_value={"id": "256"}),
++ "btrfs.subvolume_get_default": MagicMock(return_value={"id": "256",}),
+ }
+ with patch.dict(btrfs.__salt__, salt_mock):
+ assert btrfs._is_default("/tmp/xxx/@/var", "/tmp/xxx", "@/var")
+@@ -142,7 +160,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ salt_mock = {
+ "btrfs.subvolume_show": MagicMock(
+- return_value={"@/var": {"subvolume id": "256"}}
++ return_value={"@/var": {"subvolume id": "256"},}
+ ),
+ "btrfs.subvolume_set_default": MagicMock(return_value=True),
+ }
+@@ -158,7 +176,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ Test if the subvolume is copy on write.
+ """
+ salt_mock = {
+- "file.lsattr": MagicMock(return_value={"/tmp/xxx/@/var": ["C"]}),
++ "file.lsattr": MagicMock(return_value={"/tmp/xxx/@/var": ["C"],}),
+ }
+ with patch.dict(btrfs.__salt__, salt_mock):
+ assert not btrfs._is_cow("/tmp/xxx/@/var")
+@@ -169,7 +187,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ Test if the subvolume is copy on write.
+ """
+ salt_mock = {
+- "file.lsattr": MagicMock(return_value={"/tmp/xxx/@/var": []}),
++ "file.lsattr": MagicMock(return_value={"/tmp/xxx/@/var": [],}),
+ }
+ with patch.dict(btrfs.__salt__, salt_mock):
+ assert btrfs._is_cow("/tmp/xxx/@/var")
+@@ -188,7 +206,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ "/tmp/xxx/@/var", operator="add", attributes="C"
+ )
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+ def test_subvolume_created_exists(self, mount, umount):
+@@ -215,34 +232,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+- @patch("salt.states.btrfs._umount")
+- @patch("salt.states.btrfs._mount")
+- def test_subvolume_created_exists_decorator(self, mount, umount):
+- """
+- Test creating a subvolume using a non-kwargs call
+- """
+- mount.return_value = "/tmp/xxx"
+- salt_mock = {
+- "btrfs.subvolume_exists": MagicMock(return_value=True),
+- }
+- opts_mock = {
+- "test": False,
+- }
+- with patch.dict(btrfs.__salt__, salt_mock), patch.dict(
+- btrfs.__opts__, opts_mock
+- ):
+- assert btrfs.subvolume_created("@/var", "/dev/sda1") == {
+- "name": "@/var",
+- "result": True,
+- "changes": {},
+- "comment": ["Subvolume @/var already present"],
+- }
+- salt_mock["btrfs.subvolume_exists"].assert_called_with("/tmp/xxx/@/var")
+- mount.assert_called_once()
+- umount.assert_called_once()
+-
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+ def test_subvolume_created_exists_test(self, mount, umount):
+@@ -269,7 +258,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._is_default")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+@@ -300,7 +288,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._set_default")
+ @patch("salt.states.btrfs._is_default")
+ @patch("salt.states.btrfs._umount")
+@@ -335,7 +322,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._set_default")
+ @patch("salt.states.btrfs._is_default")
+ @patch("salt.states.btrfs._umount")
+@@ -373,7 +359,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._is_cow")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+@@ -404,7 +389,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._unset_cow")
+ @patch("salt.states.btrfs._is_cow")
+ @patch("salt.states.btrfs._umount")
+@@ -437,7 +421,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+ def test_subvolume_created(self, mount, umount):
+@@ -469,7 +452,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+ def test_subvolume_created_fails_directory(self, mount, umount):
+@@ -499,7 +481,6 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ mount.assert_called_once()
+ umount.assert_called_once()
+
+- @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+ def test_subvolume_created_fails(self, mount, umount):
+@@ -541,7 +522,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ "description": "Set/get compression for a file or directory",
+ "value": "N/A",
+ },
+- "label": {"description": "Set/get label of device.", "value": "N/A"},
++ "label": {"description": "Set/get label of device.", "value": "N/A",},
+ "ro": {
+ "description": "Set/get read-only flag or subvolume",
+ "value": "N/A",
+@@ -560,7 +541,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ "description": "Set/get compression for a file or directory",
+ "value": "N/A",
+ },
+- "label": {"description": "Set/get label of device.", "value": "N/A"},
++ "label": {"description": "Set/get label of device.", "value": "N/A",},
+ "ro": {
+ "description": "Set/get read-only flag or subvolume",
+ "value": "N/A",
+@@ -578,7 +559,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ "description": "Set/get compression for a file or directory",
+ "value": "N/A",
+ },
+- "label": {"description": "Set/get label of device.", "value": "mylabel"},
++ "label": {"description": "Set/get label of device.", "value": "mylabel",},
+ "ro": {
+ "description": "Set/get read-only flag or subvolume",
+ "value": "N/A",
+@@ -596,7 +577,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ "description": "Set/get compression for a file or directory",
+ "value": "N/A",
+ },
+- "label": {"description": "Set/get label of device.", "value": "N/A"},
++ "label": {"description": "Set/get label of device.", "value": "N/A",},
+ "ro": {
+ "description": "Set/get read-only flag or subvolume",
+ "value": True,
+@@ -614,7 +595,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ "description": "Set/get compression for a file or directory",
+ "value": "N/A",
+ },
+- "label": {"description": "Set/get label of device.", "value": "N/A"},
++ "label": {"description": "Set/get label of device.", "value": "N/A",},
+ "ro": {
+ "description": "Set/get read-only flag or subvolume",
+ "value": "N/A",
+@@ -755,40 +736,3 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
+ )
+ mount.assert_called_once()
+ umount.assert_called_once()
+-
+- @patch("salt.states.btrfs._umount")
+- @patch("salt.states.btrfs._mount")
+- @patch("os.path.exists")
+- def test_properties_test(self, exists, mount, umount):
+- """
+- Test setting a property in test mode.
+- """
+- exists.return_value = True
+- mount.return_value = "/tmp/xxx"
+- salt_mock = {
+- "btrfs.properties": MagicMock(
+- side_effect=[
+- {
+- "ro": {
+- "description": "Set/get read-only flag or subvolume",
+- "value": "N/A",
+- },
+- },
+- ]
+- ),
+- }
+- opts_mock = {
+- "test": True,
+- }
+- with patch.dict(btrfs.__salt__, salt_mock), patch.dict(
+- btrfs.__opts__, opts_mock
+- ):
+- assert btrfs.properties(name="@/var", device="/dev/sda1", ro=True) == {
+- "name": "@/var",
+- "result": None,
+- "changes": {"ro": "true"},
+- "comment": [],
+- }
+- salt_mock["btrfs.properties"].assert_called_with("/tmp/xxx/@/var")
+- mount.assert_called_once()
+- umount.assert_called_once()
diff --git a/tests/unit/states/test_pkg.py b/tests/unit/states/test_pkg.py
-index 174ab65ab8..38f72353fa 100644
+index 15ca937e13..a7ddfece14 100644
--- a/tests/unit/states/test_pkg.py
+++ b/tests/unit/states/test_pkg.py
-@@ -43,7 +43,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
- pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
- })
- upgrade = MagicMock(return_value=self.pkgs)
-- version = MagicMock(side_effect=lambda pkgname: self.pkgs[pkgname]['old'])
-+ version = MagicMock(side_effect=lambda pkgname, **_: self.pkgs[pkgname]['old'])
-
- with patch.dict(pkg.__salt__,
- {'pkg.list_upgrades': list_upgrades,
-@@ -52,7 +52,6 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
-
- # Run state with test=false
- with patch.dict(pkg.__opts__, {'test': False}):
+@@ -1,15 +1,6 @@
+-# -*- coding: utf-8 -*-
-
- ret = pkg.uptodate('dummy', test=True)
- self.assertTrue(ret['result'])
- self.assertDictEqual(ret['changes'], self.pkgs)
-@@ -78,7 +77,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
- pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
- })
+-# Import Python libs
+-from __future__ import absolute_import
+-
+ import salt.states.pkg as pkg
+-
+-# Import Salt Libs
+ from salt.ext import six
+ from salt.ext.six.moves import zip
+-
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
+@@ -35,7 +26,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ list_upgrades = MagicMock(
+ return_value={
+- pkgname: pkgver["new"] for pkgname, pkgver in six.iteritems(self.pkgs)
++ pkgname: pkgver["new"] for pkgname, pkgver in self.pkgs.items()
+ }
+ )
upgrade = MagicMock(return_value=self.pkgs)
-- version = MagicMock(side_effect=lambda pkgname: pkgs[pkgname]['old'])
-+ version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]['old'])
+@@ -75,7 +66,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
- with patch.dict(pkg.__salt__,
- {'pkg.list_upgrades': list_upgrades,
-@@ -157,7 +156,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
- pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
- })
+ list_upgrades = MagicMock(
+ return_value={
+- pkgname: pkgver["new"] for pkgname, pkgver in six.iteritems(self.pkgs)
++ pkgname: pkgver["new"] for pkgname, pkgver in self.pkgs.items()
+ }
+ )
+ upgrade = MagicMock(return_value=self.pkgs)
+@@ -92,9 +83,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+ # Run state with test=false
+ with patch.dict(pkg.__opts__, {"test": False}):
+ ret = pkg.uptodate(
+- "dummy",
+- test=True,
+- pkgs=[pkgname for pkgname in six.iterkeys(self.pkgs)],
++ "dummy", test=True, pkgs=[pkgname for pkgname in self.pkgs.keys()],
+ )
+ self.assertTrue(ret["result"])
+ self.assertDictEqual(ret["changes"], pkgs)
+@@ -102,9 +91,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+ # Run state with test=true
+ with patch.dict(pkg.__opts__, {"test": True}):
+ ret = pkg.uptodate(
+- "dummy",
+- test=True,
+- pkgs=[pkgname for pkgname in six.iterkeys(self.pkgs)],
++ "dummy", test=True, pkgs=[pkgname for pkgname in self.pkgs.keys()],
+ )
+ self.assertIsNone(ret["result"])
+ self.assertDictEqual(ret["changes"], pkgs)
+@@ -146,9 +133,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+ # Run state with test=false
+ with patch.dict(pkg.__opts__, {"test": False}):
+ ret = pkg.uptodate(
+- "dummy",
+- test=True,
+- pkgs=[pkgname for pkgname in six.iterkeys(self.pkgs)],
++ "dummy", test=True, pkgs=[pkgname for pkgname in self.pkgs.keys()],
+ )
+ self.assertTrue(ret["result"])
+ self.assertDictEqual(ret["changes"], {})
+@@ -156,9 +141,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+ # Run state with test=true
+ with patch.dict(pkg.__opts__, {"test": True}):
+ ret = pkg.uptodate(
+- "dummy",
+- test=True,
+- pkgs=[pkgname for pkgname in six.iterkeys(self.pkgs)],
++ "dummy", test=True, pkgs=[pkgname for pkgname in self.pkgs.keys()],
+ )
+ self.assertTrue(ret["result"])
+ self.assertDictEqual(ret["changes"], {})
+@@ -176,7 +159,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+
+ list_upgrades = MagicMock(
+ return_value={
+- pkgname: pkgver["new"] for pkgname, pkgver in six.iteritems(self.pkgs)
++ pkgname: pkgver["new"] for pkgname, pkgver in self.pkgs.items()
+ }
+ )
upgrade = MagicMock(return_value={})
-- version = MagicMock(side_effect=lambda pkgname: pkgs[pkgname]['old'])
-+ version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]['old'])
-
- with patch.dict(pkg.__salt__,
- {'pkg.list_upgrades': list_upgrades,
+@@ -193,9 +176,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+ # Run state with test=false
+ with patch.dict(pkg.__opts__, {"test": False}):
+ ret = pkg.uptodate(
+- "dummy",
+- test=True,
+- pkgs=[pkgname for pkgname in six.iterkeys(self.pkgs)],
++ "dummy", test=True, pkgs=[pkgname for pkgname in self.pkgs.keys()],
+ )
+ self.assertFalse(ret["result"])
+ self.assertDictEqual(ret["changes"], {})
+@@ -203,9 +184,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
+ # Run state with test=true
+ with patch.dict(pkg.__opts__, {"test": True}):
+ ret = pkg.uptodate(
+- "dummy",
+- test=True,
+- pkgs=[pkgname for pkgname in six.iterkeys(self.pkgs)],
++ "dummy", test=True, pkgs=[pkgname for pkgname in self.pkgs.keys()],
+ )
+ self.assertIsNone(ret["result"])
+ self.assertDictEqual(ret["changes"], pkgs)
diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py
-index c0877ff811..fe11cd0681 100644
+index 9f826e007f..863e2182b9 100644
--- a/tests/unit/test_loader.py
+++ b/tests/unit/test_loader.py
-@@ -128,6 +128,97 @@ class LazyLoaderTest(TestCase):
- self.assertTrue(self.module_name + '.not_loaded' not in self.loader)
+@@ -215,6 +215,96 @@ class LazyLoaderUtilsTest(TestCase):
+ self.assertTrue(self.module_name + ".run" not in loader)
-+loader_template_module = '''
++loader_template_module = """
+import my_utils
+
+def run():
+ return my_utils.run()
-+'''
++"""
+
-+loader_template_utils = '''
++loader_template_utils = """
+def run():
+ return True
-+'''
++"""
+
+
+class LazyLoaderUtilsTest(TestCase):
-+ '''
++ """
+ Test the loader
-+ '''
-+ module_name = 'lazyloaderutilstest'
-+ utils_name = 'my_utils'
++ """
++
++ module_name = "lazyloaderutilstest"
++ utils_name = "my_utils"
+
+ @classmethod
+ def setUpClass(cls):
+ cls.opts = salt.config.minion_config(None)
-+ cls.opts['grains'] = salt.loader.grains(cls.opts)
++ cls.opts["grains"] = salt.loader.grains(cls.opts)
+ if not os.path.isdir(TMP):
+ os.makedirs(TMP)
+
+ def setUp(self):
+ # Setup the module
+ self.module_dir = tempfile.mkdtemp(dir=TMP)
-+ self.module_file = os.path.join(self.module_dir,
-+ '{}.py'.format(self.module_name))
-+ with salt.utils.files.fopen(self.module_file, 'w') as fh:
++ self.module_file = os.path.join(
++ self.module_dir, "{}.py".format(self.module_name)
++ )
++ with salt.utils.files.fopen(self.module_file, "w") as fh:
+ fh.write(salt.utils.stringutils.to_str(loader_template_module))
+ fh.flush()
+ os.fsync(fh.fileno())
+
+ self.utils_dir = tempfile.mkdtemp(dir=TMP)
-+ self.utils_file = os.path.join(self.utils_dir,
-+ '{}.py'.format(self.utils_name))
-+ with salt.utils.files.fopen(self.utils_file, 'w') as fh:
++ self.utils_file = os.path.join(self.utils_dir, "{}.py".format(self.utils_name))
++ with salt.utils.files.fopen(self.utils_file, "w") as fh:
+ fh.write(salt.utils.stringutils.to_str(loader_template_utils))
+ fh.flush()
+ os.fsync(fh.fileno())
@@ -7029,46 +1712,46 @@ index c0877ff811..fe11cd0681 100644
+ del cls.opts
+
+ def test_utils_found(self):
-+ '''
++ """
+ Test that the extra module directory is available for imports
-+ '''
++ """
+ loader = salt.loader.LazyLoader(
+ [self.module_dir],
+ copy.deepcopy(self.opts),
-+ tag='module',
-+ extra_module_dirs=[self.utils_dir])
-+ self.assertTrue(
-+ inspect.isfunction(
-+ loader[self.module_name + '.run']))
-+ self.assertTrue(loader[self.module_name + '.run']())
++ tag="module",
++ extra_module_dirs=[self.utils_dir],
++ )
++ self.assertTrue(inspect.isfunction(loader[self.module_name + ".run"]))
++ self.assertTrue(loader[self.module_name + ".run"]())
+
+ def test_utils_not_found(self):
-+ '''
++ """
+ Test that the extra module directory is not available for imports
-+ '''
++ """
+ loader = salt.loader.LazyLoader(
-+ [self.module_dir],
-+ copy.deepcopy(self.opts),
-+ tag='module')
-+ self.assertTrue(self.module_name + '.run' not in loader)
++ [self.module_dir], copy.deepcopy(self.opts), tag="module"
++ )
++ self.assertTrue(self.module_name + ".run" not in loader)
+
+
class LazyLoaderVirtualEnabledTest(TestCase):
- '''
+ """
Test the base loader of salt.
-@@ -1078,8 +1169,9 @@ class LoaderGlobalsTest(ModuleCase):
+@@ -1342,9 +1432,10 @@ class LoaderGlobalsTest(ModuleCase):
+ )
# Now, test each module!
- for item in global_vars:
+- for item in global_vars.values():
- for name in names:
- self.assertIn(name, list(item.keys()))
-+ if item['__name__'].startswith('salt.loaded'):
++ for item in global_vars:
++ if item["__name__"].startswith("salt.loaded"):
+ for name in names:
+ self.assertIn(name, list(item.keys()))
def test_auth(self):
- '''
+ """
--
-2.16.4
+2.29.2
diff --git a/python3.8-compatibility-pr-s-235.patch b/python3.8-compatibility-pr-s-235.patch
index a2fdf15..ea66978 100644
--- a/python3.8-compatibility-pr-s-235.patch
+++ b/python3.8-compatibility-pr-s-235.patch
@@ -1,4 +1,4 @@
-From 1721978eb0644c90a834493449ee27e1e4de03e1 Mon Sep 17 00:00:00 2001
+From c205f1b0d091866408ee1eae324260480a1b70b4 Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Fri, 8 May 2020 15:56:35 +0200
Subject: [PATCH] Python3.8 compatibility PR's (#235)
@@ -138,46 +138,25 @@ Signed-off-by: Benjamin Drung
Co-authored-by: Pedro Algarvio
Co-authored-by: Benjamin Drung
---
- .ci/kitchen-archlts-py3 | 2 +-
- .pre-commit-config.yaml | 228 ++++++++++++++++++++
- noxfile.py | 4 +-
- pkg/windows/req.txt | 2 +-
- pkg/windows/req_testing.txt | 12 --
- pkg/windows/req_win.txt | 2 +-
- requirements/pytest.txt | 2 +-
- requirements/static/py3.5/darwin.txt | 2 +-
- requirements/static/py3.5/linux.txt | 2 +-
- requirements/static/py3.5/windows.txt | 6 +-
- requirements/static/py3.6/darwin.txt | 2 +-
- requirements/static/py3.6/linux.txt | 2 +-
- requirements/static/py3.6/windows.txt | 6 +-
- requirements/static/py3.7/darwin.txt | 2 +-
- requirements/static/py3.7/linux.txt | 2 +-
- requirements/static/py3.7/windows.txt | 5 +-
- requirements/static/py3.8/cloud.txt | 115 ++++++++++
- requirements/static/py3.8/darwin-crypto.txt | 8 +
- requirements/static/py3.8/darwin.txt | 123 +++++++++++
- requirements/static/py3.8/docs.txt | 30 +++
- requirements/static/py3.8/lint.txt | 16 ++
- requirements/static/py3.8/linux-crypto.txt | 8 +
- requirements/static/py3.8/linux.txt | 119 ++++++++++
- requirements/static/py3.9/cloud.txt | 115 ++++++++++
- requirements/static/py3.9/darwin-crypto.txt | 8 +
- requirements/static/py3.9/darwin.txt | 123 +++++++++++
- requirements/static/py3.9/docs.txt | 30 +++
- requirements/static/py3.9/lint.txt | 16 ++
- requirements/static/py3.9/linux-crypto.txt | 8 +
- requirements/static/py3.9/linux.txt | 119 ++++++++++
- salt/client/mixins.py | 61 +++---
- salt/ext/tornado/httputil.py | 9 +-
- salt/grains/core.py | 28 ++-
- salt/renderers/stateconf.py | 4 +-
- salt/utils/args.py | 14 +-
- salt/utils/decorators/path.py | 13 +-
- salt/utils/jinja.py | 24 ++-
- salt/utils/oset.py | 4 +-
- 38 files changed, 1193 insertions(+), 83 deletions(-)
- delete mode 100644 pkg/windows/req_testing.txt
+ requirements/static/py3.8/cloud.txt | 115 ++++++++++++++++++
+ requirements/static/py3.8/darwin-crypto.txt | 8 ++
+ requirements/static/py3.8/darwin.txt | 123 ++++++++++++++++++++
+ requirements/static/py3.8/docs.txt | 30 +++++
+ requirements/static/py3.8/lint.txt | 16 +++
+ requirements/static/py3.8/linux-crypto.txt | 8 ++
+ requirements/static/py3.8/linux.txt | 119 +++++++++++++++++++
+ requirements/static/py3.9/cloud.txt | 115 ++++++++++++++++++
+ requirements/static/py3.9/darwin-crypto.txt | 8 ++
+ requirements/static/py3.9/darwin.txt | 123 ++++++++++++++++++++
+ requirements/static/py3.9/docs.txt | 30 +++++
+ requirements/static/py3.9/lint.txt | 16 +++
+ requirements/static/py3.9/linux-crypto.txt | 8 ++
+ requirements/static/py3.9/linux.txt | 119 +++++++++++++++++++
+ salt/ext/tornado/httputil.py | 38 +++---
+ salt/utils/args.py | 60 ++++------
+ salt/utils/jinja.py | 7 ++
+ salt/utils/oset.py | 2 +
+ 18 files changed, 893 insertions(+), 52 deletions(-)
create mode 100644 requirements/static/py3.8/cloud.txt
create mode 100644 requirements/static/py3.8/darwin-crypto.txt
create mode 100644 requirements/static/py3.8/darwin.txt
@@ -193,501 +172,6 @@ Co-authored-by: Benjamin Drung
create mode 100644 requirements/static/py3.9/linux-crypto.txt
create mode 100644 requirements/static/py3.9/linux.txt
-diff --git a/.ci/kitchen-archlts-py3 b/.ci/kitchen-archlts-py3
-index f529c5e36c..b360a350a6 100644
---- a/.ci/kitchen-archlts-py3
-+++ b/.ci/kitchen-archlts-py3
-@@ -8,7 +8,7 @@ runTestSuite(
- golden_images_branch: 'master',
- jenkins_slave_label: 'kitchen-slave',
- nox_env_name: 'runtests-zeromq',
-- nox_passthrough_opts: '-n integration.modules.test_pkg',
-+ nox_passthrough_opts: '',
- python_version: 'py3',
- testrun_timeout: 6,
- use_spot_instances: true)
-diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
-index c4de5c154c..9b704b1426 100644
---- a/.pre-commit-config.yaml
-+++ b/.pre-commit-config.yaml
-@@ -416,6 +416,234 @@ repos:
- - --py-version=3.7
- - --platform=linux
-
-+ - id: pip-tools-compile
-+ alias: compile-linux-py3.8-zmq-requirements
-+ name: Linux Py3.8 ZeroMQ Requirements
-+ files: ^requirements/((base|zeromq|pytest)\.txt|static/linux\.in)$
-+ exclude: ^requirements/static/(centos-6|amzn-2018\.03|lint|cloud|docs|darwin|windows)\.in$
-+ args:
-+ - -v
-+ - --py-version=3.8
-+ - --platform=linux
-+ - --include=requirements/base.txt
-+ - --include=requirements/zeromq.txt
-+ - --include=requirements/pytest.txt
-+
-+ - id: pip-tools-compile
-+ alias: compile-darwin-py3.8-zmq-requirements
-+ name: Darwin Py3.8 ZeroMQ Requirements
-+ files: ^(pkg/osx/(req|req_ext)\.txt|requirements/((base|zeromq|pytest)\.txt|static/darwin\.in))$
-+ args:
-+ - -v
-+ - --py-version=3.8
-+ - --platform=darwin
-+ - --include=pkg/osx/req.txt
-+ - --include=pkg/osx/req_ext.txt
-+ - --include=requirements/base.txt
-+ - --include=requirements/zeromq.txt
-+ - --include=requirements/pytest.txt
-+ - --passthrough-line-from-input=^pyobjc(.*)$
-+
-+# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
-+# - id: pip-tools-compile
-+# alias: compile-windows-py3.8-zmq-requirements
-+# name: Windows Py3.8 ZeroMQ Requirements
-+# files: ^(pkg/windows/(req|req_win)\.txt|requirements/((base|zeromq|pytest)\.txt|static/windows\.in))$
-+# args:
-+# - -v
-+# - --py-version=3.8
-+# - --platform=windows
-+# - --include=pkg/windows/req.txt
-+# - --include=pkg/windows/req_win.txt
-+# - --include=requirements/base.txt
-+# - --include=requirements/zeromq.txt
-+# - --include=requirements/pytest.txt
-+
-+ - id: pip-tools-compile
-+ alias: compile-cloud-py3.8-requirements
-+ name: Cloud Py3.8 Requirements
-+ files: ^requirements/(static/cloud\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.8
-+
-+ - id: pip-tools-compile
-+ alias: compile-doc-requirements
-+ name: Docs Py3.8 Requirements
-+ files: ^requirements/((base|zeromq|pytest)\.txt|static/docs\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.8
-+ - --platform=linux
-+
-+ - id: pip-tools-compile
-+ alias: compile-linux-crypto-py3.8-requirements
-+ name: Linux Py3.8 Crypto Requirements
-+ files: ^requirements/(crypto\.txt|static/crypto\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.8
-+ - --platform=linux
-+ - --out-prefix=linux
-+
-+ - id: pip-tools-compile
-+ alias: compile-darwin-crypto-py3.8-requirements
-+ name: Darwin Py3.8 Crypto Requirements
-+ files: ^requirements/(crypto\.txt|static/crypto\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.8
-+ - --platform=darwin
-+ - --out-prefix=darwin
-+
-+# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
-+# - id: pip-tools-compile
-+# alias: compile-windows-crypto-py3.8-requirements
-+# name: Windows Py3.8 Crypto Requirements
-+# files: ^requirements/(crypto\.txt|static/crypto\.in)$
-+# args:
-+# - -v
-+# - --py-version=3.8
-+# - --platform=windows
-+# - --out-prefix=windows
-+
-+ - id: pip-tools-compile
-+ alias: compile-lint-py3.8-requirements
-+ name: Lint Py3.8 Requirements
-+ files: ^requirements/static/lint\.in$
-+ args:
-+ - -v
-+ - --py-version=3.8
-+ - --platform=linux
-+
-+
-+ - id: pip-tools-compile
-+ alias: compile-linux-py3.9-zmq-requirements
-+ name: Linux Py3.9 ZeroMQ Requirements
-+ files: ^requirements/((base|zeromq|pytest)\.txt|static/linux\.in)$
-+ exclude: ^requirements/static/(centos-6|amzn-2018\.03|lint|cloud|docs|darwin|windows)\.in$
-+ args:
-+ - -v
-+ - --py-version=3.9
-+ - --platform=linux
-+ - --include=requirements/base.txt
-+ - --include=requirements/zeromq.txt
-+ - --include=requirements/pytest.txt
-+
-+ - id: pip-tools-compile
-+ alias: compile-darwin-py3.9-zmq-requirements
-+ name: Darwin Py3.9 ZeroMQ Requirements
-+ files: ^(pkg/osx/(req|req_ext)\.txt|requirements/((base|zeromq|pytest)\.txt|static/darwin\.in))$
-+ args:
-+ - -v
-+ - --py-version=3.9
-+ - --platform=darwin
-+ - --include=pkg/osx/req.txt
-+ - --include=pkg/osx/req_ext.txt
-+ - --include=requirements/base.txt
-+ - --include=requirements/zeromq.txt
-+ - --include=requirements/pytest.txt
-+ - --passthrough-line-from-input=^pyobjc(.*)$
-+
-+# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
-+# - id: pip-tools-compile
-+# alias: compile-windows-py3.9-zmq-requirements
-+# name: Windows Py3.9 ZeroMQ Requirements
-+# files: ^(pkg/windows/(req|req_win)\.txt|requirements/((base|zeromq|pytest)\.txt|static/windows\.in))$
-+# args:
-+# - -v
-+# - --py-version=3.9
-+# - --platform=windows
-+# - --include=pkg/windows/req.txt
-+# - --include=pkg/windows/req_win.txt
-+# - --include=requirements/base.txt
-+# - --include=requirements/zeromq.txt
-+# - --include=requirements/pytest.txt
-+
-+ - id: pip-tools-compile
-+ alias: compile-cloud-py3.9-requirements
-+ name: Cloud Py3.9 Requirements
-+ files: ^requirements/(static/cloud\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.9
-+
-+ - id: pip-tools-compile
-+ alias: compile-doc-requirements
-+ name: Docs Py3.9 Requirements
-+ files: ^requirements/((base|zeromq|pytest)\.txt|static/docs\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.9
-+ - --platform=linux
-+
-+ - id: pip-tools-compile
-+ alias: compile-linux-crypto-py3.9-requirements
-+ name: Linux Py3.9 Crypto Requirements
-+ files: ^requirements/(crypto\.txt|static/crypto\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.9
-+ - --platform=linux
-+ - --out-prefix=linux
-+
-+ - id: pip-tools-compile
-+ alias: compile-darwin-crypto-py3.9-requirements
-+ name: Darwin Py3.9 Crypto Requirements
-+ files: ^requirements/(crypto\.txt|static/crypto\.in)$
-+ args:
-+ - -v
-+ - --py-version=3.9
-+ - --platform=darwin
-+ - --out-prefix=darwin
-+
-+# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
-+# - id: pip-tools-compile
-+# alias: compile-windows-crypto-py3.9-requirements
-+# name: Windows Py3.9 Crypto Requirements
-+# files: ^requirements/(crypto\.txt|static/crypto\.in)$
-+# args:
-+# - -v
-+# - --py-version=3.9
-+# - --platform=windows
-+# - --out-prefix=windows
-+
-+ - id: pip-tools-compile
-+ alias: compile-lint-py3.9-requirements
-+ name: Lint Py3.9 Requirements
-+ files: ^requirements/static/lint\.in$
-+ args:
-+ - -v
-+ - --py-version=3.9
-+ - --platform=linux
-+
-+ - repo: https://github.com/timothycrosley/isort
-+ rev: "1e78a9acf3110e1f9721feb591f89a451fc9876a"
-+ hooks:
-+ - id: isort
-+ additional_dependencies: ['toml']
-+ # This tells pre-commit not to pass files to isort.
-+ # This should be kept in sync with pyproject.toml
-+ exclude: >
-+ (?x)^(
-+ templates/.*|
-+ salt/ext/.*|
-+ tests/kitchen/.*
-+ )$
-+
-+ - repo: https://github.com/psf/black
-+ rev: stable
-+ hooks:
-+ - id: black
-+ # This tells pre-commit not to pass files to black.
-+ # This should be kept in sync with pyproject.toml
-+ exclude: >
-+ (?x)^(
-+ templates/.*|
-+ salt/ext/.*|
-+ tests/kitchen/.*
-+ )$
-+
-
- - repo: https://github.com/saltstack/salt-nox-pre-commit
- rev: master
-diff --git a/noxfile.py b/noxfile.py
-index 355a069d92..65b013eb04 100644
---- a/noxfile.py
-+++ b/noxfile.py
-@@ -39,7 +39,7 @@ SITECUSTOMIZE_DIR = os.path.join(REPO_ROOT, 'tests', 'support', 'coverage')
- IS_DARWIN = sys.platform.lower().startswith('darwin')
- IS_WINDOWS = sys.platform.lower().startswith('win')
- # Python versions to run against
--_PYTHON_VERSIONS = ('2', '2.7', '3', '3.4', '3.5', '3.6', '3.7')
-+_PYTHON_VERSIONS = ("2", "2.7", "3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9")
-
- # Nox options
- # Reuse existing virtualenvs
-@@ -167,7 +167,7 @@ def _install_system_packages(session):
- '{id}-{version_parts[major]}'.format(**distro)
- ]
- version_info = _get_session_python_version_info(session)
-- py_version_keys = [
-+ py_version_keys = +SITECUSTOMIZE_DIR[
- '{}'.format(*version_info),
- '{}.{}'.format(*version_info)
- ]
-diff --git a/pkg/windows/req.txt b/pkg/windows/req.txt
-index 8b443e1717..3f2c1628d2 100644
---- a/pkg/windows/req.txt
-+++ b/pkg/windows/req.txt
-@@ -1,6 +1,6 @@
- -r req_win.txt
- backports-abc==0.5; python_version < '3.0'
--backports.ssl-match-hostname==3.7.0.1
-+backports.ssl-match-hostname==3.7.0.1; python_version < '3.7'
- certifi
- cffi==1.12.2
- CherryPy==17.4.1
-diff --git a/pkg/windows/req_testing.txt b/pkg/windows/req_testing.txt
-deleted file mode 100644
-index 7682a98c0c..0000000000
---- a/pkg/windows/req_testing.txt
-+++ /dev/null
-@@ -1,12 +0,0 @@
--mock
--boto
--boto3
--moto
--SaltPyLint>=v2017.6.22
--apache-libcloud
--virtualenv
--
--# Needed for archive, which is gated for Redhat
--# rarfile
--# Needed for keystone
--# python-keystoneclient
-diff --git a/pkg/windows/req_win.txt b/pkg/windows/req_win.txt
-index 7f279af526..8887884a87 100644
---- a/pkg/windows/req_win.txt
-+++ b/pkg/windows/req_win.txt
-@@ -1,2 +1,2 @@
--pywin32==224
-+pywin32==227
- WMI==1.4.9
-diff --git a/requirements/pytest.txt b/requirements/pytest.txt
-index 45aa4dc085..52dd16fc09 100644
---- a/requirements/pytest.txt
-+++ b/requirements/pytest.txt
-@@ -1,7 +1,7 @@
- mock >= 3.0.0
- # PyTest
- pytest >=4.6.6,<4.7 # PyTest 4.6.x are the last Py2 and Py3 releases
--pytest-salt >= 2019.12.27
-+pytest-salt >= 2020.1.27
- pytest-tempdir >= 2019.10.12
- pytest-helpers-namespace >= 2019.1.8
- pytest-salt-runtests-bridge >= 2019.7.10
-diff --git a/requirements/static/py3.5/darwin.txt b/requirements/static/py3.5/darwin.txt
-index ac04286204..d01d1c93ff 100644
---- a/requirements/static/py3.5/darwin.txt
-+++ b/requirements/static/py3.5/darwin.txt
-@@ -88,7 +88,7 @@ pyparsing==2.4.5 # via packaging
- pyserial==3.4 # via junos-eznc
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0
-diff --git a/requirements/static/py3.5/linux.txt b/requirements/static/py3.5/linux.txt
-index c3611cfbcc..9309059b6c 100644
---- a/requirements/static/py3.5/linux.txt
-+++ b/requirements/static/py3.5/linux.txt
-@@ -85,7 +85,7 @@ pyparsing==2.4.5 # via packaging
- pyserial==3.4 # via junos-eznc
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0 # via botocore, croniter, kubernetes, moto, vcert
-diff --git a/requirements/static/py3.5/windows.txt b/requirements/static/py3.5/windows.txt
-index 3c07a12f23..7918945e00 100644
---- a/requirements/static/py3.5/windows.txt
-+++ b/requirements/static/py3.5/windows.txt
-@@ -10,7 +10,7 @@ atomicwrites==1.3.0 # via pytest
- attrs==19.1.0 # via pytest
- aws-xray-sdk==0.95 # via moto
- backports.functools-lru-cache==1.5 # via cheroot
--backports.ssl-match-hostname==3.7.0.1
-+backports.ssl-match-hostname==3.7.0.1 ; python_version < "3.7"
- boto3==1.9.132
- boto==2.49.0
- botocore==1.12.132 # via boto3, moto, s3transfer
-@@ -78,7 +78,7 @@ pyopenssl==19.0.0
- pyparsing==2.4.5 # via packaging
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0
-@@ -88,7 +88,7 @@ python-jose==2.0.2 # via moto
- pythonnet==2.3.0
- pytz==2019.1 # via moto, tempora
- pyvmomi==6.7.1.2018.12
--pywin32==224
-+pywin32==227
- pyyaml==5.1.2
- pyzmq==18.0.1 ; python_version != "3.4"
- requests==2.21.0
-diff --git a/requirements/static/py3.6/darwin.txt b/requirements/static/py3.6/darwin.txt
-index a6dd0a120b..2dc822beb8 100644
---- a/requirements/static/py3.6/darwin.txt
-+++ b/requirements/static/py3.6/darwin.txt
-@@ -87,7 +87,7 @@ pyparsing==2.4.5 # via packaging
- pyserial==3.4 # via junos-eznc
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0
-diff --git a/requirements/static/py3.6/linux.txt b/requirements/static/py3.6/linux.txt
-index 247bdc6e2b..e5eead5572 100644
---- a/requirements/static/py3.6/linux.txt
-+++ b/requirements/static/py3.6/linux.txt
-@@ -84,7 +84,7 @@ pyparsing==2.4.5 # via packaging
- pyserial==3.4 # via junos-eznc
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0 # via botocore, croniter, kubernetes, moto, vcert
-diff --git a/requirements/static/py3.6/windows.txt b/requirements/static/py3.6/windows.txt
-index 1c9b6903c6..83896f9d3f 100644
---- a/requirements/static/py3.6/windows.txt
-+++ b/requirements/static/py3.6/windows.txt
-@@ -10,7 +10,7 @@ atomicwrites==1.3.0 # via pytest
- attrs==19.1.0 # via pytest
- aws-xray-sdk==0.95 # via moto
- backports.functools-lru-cache==1.5 # via cheroot
--backports.ssl-match-hostname==3.7.0.1
-+backports.ssl-match-hostname==3.7.0.1 ; python_version < "3.7"
- boto3==1.9.132
- boto==2.49.0
- botocore==1.12.132 # via boto3, moto, s3transfer
-@@ -77,7 +77,7 @@ pyopenssl==19.0.0
- pyparsing==2.4.5 # via packaging
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0
-@@ -87,7 +87,7 @@ python-jose==2.0.2 # via moto
- pythonnet==2.3.0
- pytz==2019.1 # via moto, tempora
- pyvmomi==6.7.1.2018.12
--pywin32==224
-+pywin32==227
- pyyaml==5.1.2
- pyzmq==18.0.1 ; python_version != "3.4"
- requests==2.21.0
-diff --git a/requirements/static/py3.7/darwin.txt b/requirements/static/py3.7/darwin.txt
-index 935b110db2..616563d7b6 100644
---- a/requirements/static/py3.7/darwin.txt
-+++ b/requirements/static/py3.7/darwin.txt
-@@ -86,7 +86,7 @@ pyparsing==2.4.5 # via packaging
- pyserial==3.4 # via junos-eznc
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0
-diff --git a/requirements/static/py3.7/linux.txt b/requirements/static/py3.7/linux.txt
-index 17e9bc785a..92eedc94d5 100644
---- a/requirements/static/py3.7/linux.txt
-+++ b/requirements/static/py3.7/linux.txt
-@@ -84,7 +84,7 @@ pyparsing==2.4.5 # via packaging
- pyserial==3.4 # via junos-eznc
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0 # via botocore, croniter, kubernetes, moto, vcert
-diff --git a/requirements/static/py3.7/windows.txt b/requirements/static/py3.7/windows.txt
-index 06c36960b3..d6499eaacd 100644
---- a/requirements/static/py3.7/windows.txt
-+++ b/requirements/static/py3.7/windows.txt
-@@ -10,7 +10,6 @@ atomicwrites==1.3.0 # via pytest
- attrs==19.1.0 # via pytest
- aws-xray-sdk==0.95 # via moto
- backports.functools-lru-cache==1.5 # via cheroot
--backports.ssl-match-hostname==3.7.0.1
- boto3==1.9.132
- boto==2.49.0
- botocore==1.12.132 # via boto3, moto, s3transfer
-@@ -77,7 +76,7 @@ pyopenssl==19.0.0
- pyparsing==2.4.5 # via packaging
- pytest-helpers-namespace==2019.1.8
- pytest-salt-runtests-bridge==2019.7.10
--pytest-salt==2019.12.27
-+pytest-salt==2020.1.27
- pytest-tempdir==2019.10.12
- pytest==4.6.6
- python-dateutil==2.8.0
-@@ -87,7 +86,7 @@ python-jose==2.0.2 # via moto
- pythonnet==2.3.0
- pytz==2019.1 # via moto, tempora
- pyvmomi==6.7.1.2018.12
--pywin32==224
-+pywin32==227
- pyyaml==5.1.2
- pyzmq==18.0.1 ; python_version != "3.4"
- requests==2.21.0
diff --git a/requirements/static/py3.8/cloud.txt b/requirements/static/py3.8/cloud.txt
new file mode 100644
index 0000000000..ab03f2b2f3
@@ -1610,103 +1094,21 @@ index 0000000000..b7eb8320de
+wrapt==1.11.1 # via aws-xray-sdk
+xmltodict==0.12.0 # via moto
+zc.lockfile==1.4 # via cherrypy
-diff --git a/salt/client/mixins.py b/salt/client/mixins.py
-index 131aa1e3de..8ca8c7fbf6 100644
---- a/salt/client/mixins.py
-+++ b/salt/client/mixins.py
-@@ -4,7 +4,9 @@ A collection of mixins useful for the various *Client interfaces
- '''
-
- # Import Python libs
--from __future__ import absolute_import, print_function, with_statement, unicode_literals
-+from __future__ import absolute_import, print_function, unicode_literals, with_statement
-+
-+import copy as pycopy
- import fnmatch
- import signal
- import logging
-@@ -34,31 +36,40 @@ from salt.ext import six
-
- # Import 3rd-party libs
- import salt.ext.tornado.stack_context
-+try:
-+ from collections.abc import Mapping, MutableMapping
-+except ImportError:
-+ # pylint: disable=no-name-in-module
-+ from collections import Mapping, MutableMapping
-+
-
- log = logging.getLogger(__name__)
-
--CLIENT_INTERNAL_KEYWORDS = frozenset([
-- 'client',
-- 'cmd',
-- 'eauth',
-- 'fun',
-- 'kwarg',
-- 'match',
-- 'token',
-- '__jid__',
-- '__tag__',
-- '__user__',
-- 'username',
-- 'password',
-- 'full_return',
-- 'print_event'
--])
--
--
--class ClientFuncsDict(collections.MutableMapping):
-- '''
-+CLIENT_INTERNAL_KEYWORDS = frozenset(
-+ [
-+ "client",
-+ "cmd",
-+ "eauth",
-+ "fun",
-+ "kwarg",
-+ "match",
-+ "token",
-+ "__jid__",
-+ "__tag__",
-+ "__user__",
-+ "username",
-+ "password",
-+ "full_return",
-+ "print_event",
-+ ]
-+)
-+
-+
-+class ClientFuncsDict(MutableMapping):
-+ """
- Class to make a read-only dict for accessing runner funcs "directly"
-- '''
-+ """
-+
- def __init__(self, client):
- self.client = client
-
-@@ -141,9 +152,9 @@ class SyncClientMixin(object):
- crypt='clear',
- usage='master_call') as channel:
- ret = channel.send(load)
-- if isinstance(ret, collections.Mapping):
-- if 'error' in ret:
-- salt.utils.error.raise_error(**ret['error'])
-+ if isinstance(ret, Mapping):
-+ if "error" in ret:
-+ salt.utils.error.raise_error(**ret["error"])
- return ret
-
- def cmd_sync(self, low, timeout=None, full_return=False):
diff --git a/salt/ext/tornado/httputil.py b/salt/ext/tornado/httputil.py
-index d49733481a..c5b9c242d5 100644
+index c7a5ac7c3c..35ed279143 100644
--- a/salt/ext/tornado/httputil.py
+++ b/salt/ext/tornado/httputil.py
-@@ -36,6 +36,13 @@ from salt.ext.tornado.escape import native_str, parse_qs_bytes, utf8
+@@ -21,7 +21,6 @@ via `tornado.web.RequestHandler.request`.
+ """
+ # pylint: skip-file
+
+-from __future__ import absolute_import, division, print_function
+
+ import calendar
+ import collections
+@@ -37,6 +36,13 @@ from salt.ext.tornado.escape import native_str, parse_qs_bytes, utf8
from salt.ext.tornado.log import gen_log
- from salt.ext.tornado.util import ObjectDict, PY3
+ from salt.ext.tornado.util import PY3, ObjectDict
+try:
+ from collections.abc import MutableMapping
@@ -1718,183 +1120,193 @@ index d49733481a..c5b9c242d5 100644
if PY3:
import http.cookies as Cookie
from http.client import responses
-@@ -104,7 +111,7 @@ class _NormalizedHeaderCache(dict):
- _normalized_headers = _NormalizedHeaderCache(1000)
+@@ -87,7 +93,7 @@ class _NormalizedHeaderCache(dict):
+ """
+
+ def __init__(self, size):
+- super(_NormalizedHeaderCache, self).__init__()
++ super().__init__()
+ self.size = size
+ self.queue = collections.deque()
+
+@@ -244,13 +250,13 @@ class HTTPHeaders(MutableMapping):
+ def __str__(self):
+ lines = []
+ for name, value in self.get_all():
+- lines.append("%s: %s\n" % (name, value))
++ lines.append("{}: {}\n".format(name, value))
+ return "".join(lines)
+
+ __unicode__ = __str__
--class HTTPHeaders(collections.MutableMapping):
-+class HTTPHeaders(MutableMapping):
- """A dictionary that maintains ``Http-Header-Case`` for all keys.
+-class HTTPServerRequest(object):
++class HTTPServerRequest:
+ """A single HTTP request.
- Supports multiple values per key via a pair of new methods,
-diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 358b66fdb0..38290d034a 100644
---- a/salt/grains/core.py
-+++ b/salt/grains/core.py
-@@ -47,13 +47,29 @@ try:
- # Extend the default list of supported distros. This will be used for the
- # /etc/DISTRO-release checking that is part of linux_distribution()
- from platform import _supported_dists
-- _supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
-- 'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void')
-+
-+ _supported_dists += (
-+ "arch",
-+ "mageia",
-+ "meego",
-+ "vmware",
-+ "bluewhite64",
-+ "slamd64",
-+ "ovs",
-+ "system",
-+ "mint",
-+ "oracle",
-+ "void",
-+ )
+ All attributes are type `str` unless otherwise noted.
+@@ -485,8 +491,8 @@ class HTTPServerRequest(object):
- def linux_distribution(**kwargs):
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
-- return _deprecated_linux_distribution(supported_dists=_supported_dists, **kwargs)
-+ return _deprecated_linux_distribution(
-+ supported_dists=_supported_dists, **kwargs
-+ )
-+
-+
- except ImportError:
- from distro import linux_distribution
+ def __repr__(self):
+ attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
+- args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
+- return "%s(%s, headers=%s)" % (
++ args = ", ".join(["{}={!r}".format(n, getattr(self, n)) for n in attrs])
++ return "{}({}, headers={})".format(
+ self.__class__.__name__,
+ args,
+ dict(self.headers),
+@@ -512,7 +518,7 @@ class HTTPOutputError(Exception):
+ pass
-@@ -1974,9 +1990,9 @@ def os_data():
- 'Getting OS name, release, and codename from '
- 'platform.linux_distribution()'
+
+-class HTTPServerConnectionDelegate(object):
++class HTTPServerConnectionDelegate:
+ """Implement this interface to handle requests from `.HTTPServer`.
+
+ .. versionadded:: 4.0
+@@ -539,7 +545,7 @@ class HTTPServerConnectionDelegate(object):
+ pass
+
+
+-class HTTPMessageDelegate(object):
++class HTTPMessageDelegate:
+ """Implement this interface to handle an HTTP request or response.
+
+ .. versionadded:: 4.0
+@@ -580,7 +586,7 @@ class HTTPMessageDelegate(object):
+ pass
+
+
+-class HTTPConnection(object):
++class HTTPConnection:
+ """Applications use this interface to write their responses.
+
+ .. versionadded:: 4.0
+@@ -640,7 +646,7 @@ def url_concat(url, args):
+ parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
+ parsed_query.extend(args)
+ else:
+- err = "'args' parameter should be dict, list or tuple. Not {0}".format(
++ err = "'args' parameter should be dict, list or tuple. Not {}".format(
+ type(args)
)
-- (osname, osrelease, oscodename) = \
-- [x.strip('"').strip("'") for x in
-- linux_distribution()]
-+ (osname, osrelease, oscodename) = [
-+ x.strip('"').strip("'") for x in linux_distribution()
-+ ]
- # Try to assign these three names based on the lsb info, they tend to
- # be more accurate than what python gets from /etc/DISTRO-release.
- # It's worth noting that Ubuntu has patched their Python distribution
-diff --git a/salt/renderers/stateconf.py b/salt/renderers/stateconf.py
-index 5c8a8322ed..97308087f5 100644
---- a/salt/renderers/stateconf.py
-+++ b/salt/renderers/stateconf.py
-@@ -223,9 +223,9 @@ def render(input, saltenv='base', sls='', argline='', **kws):
- if STATE_CONF:
- tmplctx = STATE_CONF.copy()
- if tmplctx:
-- prefix = sls + '::'
-+ prefix = sls + "::"
- tmplctx = {
-- k[len(prefix):] if k.startswith(prefix) else k: v
-+ k[len(prefix) :] if k.startswith(prefix) else k: v
- for k, v in six.iteritems(tmplctx)
- }
+ raise TypeError(err)
+@@ -733,7 +739,7 @@ def _get_content_range(start, end, total):
+ """
+ start = start or 0
+ end = (end or total) - 1
+- return "bytes %s-%s/%s" % (start, end, total)
++ return "bytes {}-{}/{}".format(start, end, total)
+
+
+ def _int_or_none(val):
+@@ -950,7 +956,7 @@ def _encode_header(key, pdict):
+ out.append(k)
else:
+ # TODO: quote if necessary.
+- out.append("%s=%s" % (k, v))
++ out.append("{}={}".format(k, v))
+ return "; ".join(out)
+
+
+@@ -1044,13 +1050,13 @@ def parse_cookie(cookie):
+ .. versionadded:: 4.4.2
+ """
+ cookiedict = {}
+- for chunk in cookie.split(str(";")):
+- if str("=") in chunk:
+- key, val = chunk.split(str("="), 1)
++ for chunk in cookie.split(";"):
++ if "=" in chunk:
++ key, val = chunk.split("=", 1)
+ else:
+ # Assume an empty name per
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=169091
+- key, val = str(""), chunk
++ key, val = "", chunk
+ key, val = key.strip(), val.strip()
+ if key or val:
+ # unquote using Python's algorithm.
diff --git a/salt/utils/args.py b/salt/utils/args.py
-index 666a502498..f7961cf012 100644
+index 102402500c..e43c527015 100644
--- a/salt/utils/args.py
+++ b/salt/utils/args.py
-@@ -238,8 +238,13 @@ if six.PY3:
+@@ -263,43 +263,31 @@ def get_function_argspec(func, is_class_method=None):
+ if hasattr(func, "__wrapped__"):
+ func = func.__wrapped__
+- if is_class_method is True:
+- aspec = _getargspec(func)
+- del aspec.args[0] # self
+- elif inspect.isfunction(func):
+- aspec = _getargspec(func)
+- elif inspect.ismethod(func):
+- aspec = _getargspec(func)
+- del aspec.args[0] # self
+- elif isinstance(func, object):
+- aspec = _getargspec(func.__call__)
+- del aspec.args[0] # self
++ try:
++ sig = inspect.signature(func)
++ except TypeError:
++ raise TypeError("Cannot inspect argument list for '{}'".format(func))
+ else:
+- try:
+- sig = inspect.signature(func)
+- except TypeError:
+- raise TypeError("Cannot inspect argument list for '{}'".format(func))
+- else:
+- # argspec-related functions are deprecated in Python 3 in favor of
+- # the new inspect.Signature class, and will be removed at some
+- # point in the Python 3 lifecycle. So, build a namedtuple which
+- # looks like the result of a Python 2 argspec.
+- _ArgSpec = namedtuple("ArgSpec", "args varargs keywords defaults")
+- args = []
+- defaults = []
+- varargs = keywords = None
+- for param in sig.parameters.values():
+- if param.kind == param.POSITIONAL_OR_KEYWORD:
+- args.append(param.name)
+- if param.default is not inspect._empty:
+- defaults.append(param.default)
+- elif param.kind == param.VAR_POSITIONAL:
+- varargs = param.name
+- elif param.kind == param.VAR_KEYWORD:
+- keywords = param.name
+- if is_class_method:
+- del args[0]
+- aspec = _ArgSpec(args, varargs, keywords, tuple(defaults) or None)
++ # argspec-related functions are deprecated in Python 3 in favor of
++ # the new inspect.Signature class, and will be removed at some
++ # point in the Python 3 lifecycle. So, build a namedtuple which
++ # looks like the result of a Python 2 argspec.
++ _ArgSpec = namedtuple("ArgSpec", "args varargs keywords defaults")
++ args = []
++ defaults = []
++ varargs = keywords = None
++ for param in sig.parameters.values():
++ if param.kind == param.POSITIONAL_OR_KEYWORD:
++ args.append(param.name)
++ if param.default is not inspect._empty:
++ defaults.append(param.default)
++ elif param.kind == param.VAR_POSITIONAL:
++ varargs = param.name
++ elif param.kind == param.VAR_KEYWORD:
++ keywords = param.name
++ if is_class_method:
++ del args[0]
++ aspec = _ArgSpec(args, varargs, keywords, tuple(defaults) or None)
- def get_function_argspec(func, is_class_method=None):
-- '''
-- A small wrapper around getargspec that also supports callable classes
-+ """
-+ A small wrapper around getargspec that also supports callable classes and wrapped functions
-+
-+ If the given function is a wrapper around another function (i.e. has a
-+ ``__wrapped__`` attribute), return the functions specification of the underlying
-+ function.
-+
- :param is_class_method: Pass True if you are sure that the function being passed
- is a class method. The reason for this is that on Python 3
- ``inspect.ismethod`` only returns ``True`` for bound methods,
-@@ -247,10 +252,13 @@ def get_function_argspec(func, is_class_method=None):
- methods. So, on Python 3, in case of a class method, you'd
- need the class to which the function belongs to be instantiated
- and this is not always wanted.
-- '''
-+ """
- if not callable(func):
- raise TypeError('{0} is not a callable'.format(func))
+ return aspec
-+ if hasattr(func, "__wrapped__"):
-+ func = func.__wrapped__
-+
- if six.PY2:
- if is_class_method is True:
- aspec = inspect.getargspec(func)
-diff --git a/salt/utils/decorators/path.py b/salt/utils/decorators/path.py
-index 4adacf0e4e..8ee7fb1d11 100644
---- a/salt/utils/decorators/path.py
-+++ b/salt/utils/decorators/path.py
-@@ -4,10 +4,11 @@ Decorators for salt.utils.path
- '''
- from __future__ import absolute_import, print_function, unicode_literals
-
-+import functools
-+
- # Import Salt libs
- import salt.utils.path
- from salt.exceptions import CommandNotFoundError
--from salt.utils.decorators.signature import identical_signature_wrapper
-
-
- def which(exe):
-@@ -15,13 +16,16 @@ def which(exe):
- Decorator wrapper for salt.utils.path.which
- '''
- def wrapper(function):
-+ @functools.wraps(function)
- def wrapped(*args, **kwargs):
- if salt.utils.path.which(exe) is None:
- raise CommandNotFoundError(
- 'The \'{0}\' binary was not found in $PATH.'.format(exe)
- )
- return function(*args, **kwargs)
-- return identical_signature_wrapper(function, wrapped)
-+
-+ return wrapped
-+
- return wrapper
-
-
-@@ -30,6 +34,7 @@ def which_bin(exes):
- Decorator wrapper for salt.utils.path.which_bin
- '''
- def wrapper(function):
-+ @functools.wraps(function)
- def wrapped(*args, **kwargs):
- if salt.utils.path.which_bin(exes) is None:
- raise CommandNotFoundError(
-@@ -39,5 +44,7 @@ def which_bin(exes):
- )
- )
- return function(*args, **kwargs)
-- return identical_signature_wrapper(function, wrapped)
-+
-+ return wrapped
-+
- return wrapper
diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py
-index 6e4261e68e..31ce179808 100644
+index c5a30ba04e..997d4b1697 100644
--- a/salt/utils/jinja.py
+++ b/salt/utils/jinja.py
-@@ -6,7 +6,6 @@ Jinja loading utils to enable a more powerful backend for jinja templates
- # Import python libs
- from __future__ import absolute_import, unicode_literals
- import atexit
--import collections
- import logging
- import os.path
- import pipes
-@@ -37,6 +36,13 @@ import salt.utils.yaml
- from salt.utils.decorators.jinja import jinja_filter, jinja_test, jinja_global
+@@ -34,6 +34,13 @@ from salt.utils.decorators.jinja import jinja_filter, jinja_global, jinja_test
from salt.utils.odict import OrderedDict
+ from salt.utils.versions import LooseVersion
+try:
+ from collections.abc import Hashable
@@ -1905,82 +1317,15 @@ index 6e4261e68e..31ce179808 100644
+
log = logging.getLogger(__name__)
- __all__ = [
-@@ -329,7 +335,7 @@ def to_bool(val):
- return val.lower() in ('yes', '1', 'true')
- if isinstance(val, six.integer_types):
- return val > 0
-- if not isinstance(val, collections.Hashable):
-+ if not isinstance(val, Hashable):
- return len(val) > 0
- return False
-
-@@ -500,7 +506,7 @@ def unique(values):
- ['a', 'b', 'c']
- '''
- ret = None
-- if isinstance(values, collections.Hashable):
-+ if isinstance(values, Hashable):
- ret = set(values)
- else:
- ret = []
-@@ -564,8 +570,8 @@ def lst_avg(lst):
-
- 2.5
- '''
-- if not isinstance(lst, collections.Hashable):
-- return float(sum(lst)/len(lst))
-+ if not isinstance(lst, Hashable):
-+ return float(sum(lst) / len(lst))
- return float(lst)
-
-
-@@ -585,7 +591,7 @@ def union(lst1, lst2):
-
- [1, 2, 3, 4, 6]
- '''
-- if isinstance(lst1, collections.Hashable) and isinstance(lst2, collections.Hashable):
-+ if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
- return set(lst1) | set(lst2)
- return unique(lst1 + lst2)
-
-@@ -606,7 +612,7 @@ def intersect(lst1, lst2):
-
- [2, 4]
- '''
-- if isinstance(lst1, collections.Hashable) and isinstance(lst2, collections.Hashable):
-+ if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
- return set(lst1) & set(lst2)
- return unique([ele for ele in lst1 if ele in lst2])
-
-@@ -627,7 +633,7 @@ def difference(lst1, lst2):
-
- [1, 3, 6]
- '''
-- if isinstance(lst1, collections.Hashable) and isinstance(lst2, collections.Hashable):
-+ if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
- return set(lst1) - set(lst2)
- return unique([ele for ele in lst1 if ele not in lst2])
-
-@@ -648,7 +654,7 @@ def symmetric_difference(lst1, lst2):
-
- [1, 3]
- '''
-- if isinstance(lst1, collections.Hashable) and isinstance(lst2, collections.Hashable):
-+ if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
- return set(lst1) ^ set(lst2)
- return unique([ele for ele in union(lst1, lst2) if ele not in intersect(lst1, lst2)])
-
+ __all__ = ["SaltCacheLoader", "SerializerExtension"]
diff --git a/salt/utils/oset.py b/salt/utils/oset.py
-index cd4e88be40..aa0d2a3af5 100644
+index 31a6a4acca..84c262e15f 100644
--- a/salt/utils/oset.py
+++ b/salt/utils/oset.py
-@@ -21,10 +21,12 @@ Rob Speer's changes are as follows:
+@@ -20,9 +20,11 @@ Rob Speer's changes are as follows:
- added a __getstate__ and __setstate__ so it can be pickled
- added __getitem__
- '''
--from __future__ import absolute_import, unicode_literals, print_function
-+from __future__ import absolute_import, print_function, unicode_literals
+ """
+
try:
from collections.abc import MutableSet
@@ -1990,6 +1335,6 @@ index cd4e88be40..aa0d2a3af5 100644
SLICE_ALL = slice(None)
--
-2.26.1
+2.29.2
diff --git a/re-adding-function-to-test-for-root.patch b/re-adding-function-to-test-for-root.patch
index 330b7b7..5d5a12a 100644
--- a/re-adding-function-to-test-for-root.patch
+++ b/re-adding-function-to-test-for-root.patch
@@ -1,4 +1,4 @@
-From a6792f951f8090d8326de049eb48bb4a11291e06 Mon Sep 17 00:00:00 2001
+From 4a63d9226a426f4734ff6906b33da36671c4e4e4 Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Fri, 20 Mar 2020 13:58:54 +0100
Subject: [PATCH] Re-adding function to test for root
@@ -8,11 +8,11 @@ Subject: [PATCH] Re-adding function to test for root
1 file changed, 5 insertions(+)
diff --git a/tests/unit/modules/test_rpm_lowpkg.py b/tests/unit/modules/test_rpm_lowpkg.py
-index 54b81f6972..b6cbd9e5cb 100644
+index b41e8daf17..ec9ecd40cb 100644
--- a/tests/unit/modules/test_rpm_lowpkg.py
+++ b/tests/unit/modules/test_rpm_lowpkg.py
-@@ -18,6 +18,11 @@ from tests.support.mock import (
- import salt.modules.rpm_lowpkg as rpm
+@@ -14,6 +14,11 @@ def _called_with_root(mock):
+ return cmd.startswith("rpm --root /")
+def _called_with_root(mock):
@@ -21,9 +21,9 @@ index 54b81f6972..b6cbd9e5cb 100644
+
+
class RpmTestCase(TestCase, LoaderModuleMockMixin):
- '''
+ """
Test cases for salt.modules.rpm
--
-2.16.4
+2.29.2
diff --git a/read-repo-info-without-using-interpolation-bsc-11356.patch b/read-repo-info-without-using-interpolation-bsc-11356.patch
index 0e4bce2..8bcd86e 100644
--- a/read-repo-info-without-using-interpolation-bsc-11356.patch
+++ b/read-repo-info-without-using-interpolation-bsc-11356.patch
@@ -1,27 +1,29 @@
-From b502d73be38aeb509a6c5324cdc9bb94d7220c0a Mon Sep 17 00:00:00 2001
+From c3a058842344dacd01b0a0c55483c22b35f449e8 Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Thu, 7 Nov 2019 15:11:49 +0100
Subject: [PATCH] Read repo info without using interpolation
(bsc#1135656)
---
- salt/modules/zypperpkg.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ salt/modules/zypperpkg.py | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 5f3b6d6855..0c15214e5e 100644
+index b5621174a4..c3342ab6d1 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -1045,7 +1045,7 @@ def _get_repo_info(alias, repos_cfg=None, root=None):
+@@ -1111,7 +1111,9 @@ def _get_repo_info(alias, repos_cfg=None, root=None):
Get one repo meta-data.
- '''
+ """
try:
- meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias))
-+ meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias, raw=True))
- meta['alias'] = alias
- for key, val in six.iteritems(meta):
- if val in ['0', '1']:
++ meta = dict(
++ (repos_cfg or _get_configured_repos(root=root)).items(alias, raw=True)
++ )
+ meta["alias"] = alias
+ for key, val in meta.items():
+ if val in ["0", "1"]:
--
-2.16.4
+2.29.2
diff --git a/reintroducing-reverted-changes.patch b/reintroducing-reverted-changes.patch
index 2dea6c7..4f1ed30 100644
--- a/reintroducing-reverted-changes.patch
+++ b/reintroducing-reverted-changes.patch
@@ -1,4 +1,4 @@
-From da91692b5a6cc0b895fa2a1a3a6d0c21d9913ebf Mon Sep 17 00:00:00 2001
+From ac6a77b6c617e28a092398a16dd550d5ef70d7ea Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Wed, 25 Mar 2020 15:18:51 +0100
Subject: [PATCH] Reintroducing reverted changes
@@ -11,21 +11,21 @@ https://github.com/openSUSE/salt/commit/d0ef24d113bdaaa29f180031b5da384cffe08c64
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
-index 2224aba9a1..ba1d874e69 100644
+index 77d8b84896..3c9744e224 100644
--- a/tests/unit/modules/test_aptpkg.py
+++ b/tests/unit/modules/test_aptpkg.py
-@@ -253,7 +253,9 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
- if installed['wget'].get(names[name], False):
- installed['wget'][name] = installed['wget'].pop(names[name])
+@@ -293,7 +293,9 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin):
+ if installed["wget"].get(names[name], False):
+ installed["wget"][name] = installed["wget"].pop(names[name])
-- assert aptpkg.info_installed('wget') == installed
-+ del installed['wget']['status']
-+ self.assertEqual(aptpkg.info_installed('wget'), installed)
+- assert aptpkg.info_installed("wget") == installed
++ del installed["wget"]["status"]
++ self.assertEqual(aptpkg.info_installed("wget"), installed)
+ self.assertEqual(len(aptpkg.info_installed()), 1)
- @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)})
- def test_info_installed_attr(self):
+ @patch(
+ "salt.modules.aptpkg.__salt__",
--
-2.16.4
+2.29.2
diff --git a/remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch b/remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch
index c6b4310..d19d4c4 100644
--- a/remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch
+++ b/remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch
@@ -1,9 +1,9 @@
-From dcaf5a98cfb4e4fd874dd0ec17630d8b7650f5f9 Mon Sep 17 00:00:00 2001
+From 06482f99ed8bad5ffe5fb67182fd3aea166b8a1a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Mon, 19 Nov 2018 11:46:26 +0000
-Subject: [PATCH] Remove arch from name when pkg.list_pkgs is called with
- 'attr' (bsc#1114029)
+Subject: [PATCH] Remove arch from name when pkg.list_pkgs is called
+ with 'attr' (bsc#1114029)
Add unit tests for pkg_resource.format_pkg_list
@@ -37,229 +37,161 @@ Remove unnecessary lambda
Return None instead empty string for arch and release in pkg.list_pkgs
---
- salt/modules/aptpkg.py | 4 +--
- salt/modules/pkg_resource.py | 13 ++++-----
- salt/modules/yumpkg.py | 4 +--
- salt/modules/zypperpkg.py | 4 +--
- tests/unit/modules/test_pkg_resource.py | 2 +-
- tests/unit/modules/test_yumpkg.py | 51 ++-------------------------------
- tests/unit/modules/test_zypperpkg.py | 4 +--
- 7 files changed, 18 insertions(+), 64 deletions(-)
+ tests/unit/modules/test_zypperpkg.py | 49 +++++++++-------------------
+ 1 file changed, 16 insertions(+), 33 deletions(-)
-diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index 3b0d8423db..345b8422d9 100644
---- a/salt/modules/aptpkg.py
-+++ b/salt/modules/aptpkg.py
-@@ -206,7 +206,7 @@ def normalize_name(name):
- return name
-
-
--def parse_arch(name):
-+def parse_arch_from_name(name):
- '''
- Parse name and architecture from the specified package name.
-
-@@ -214,7 +214,7 @@ def parse_arch(name):
-
- .. code-block:: bash
-
-- salt '*' pkg.parse_arch zsh:amd64
-+ salt '*' pkg.parse_arch_from_name zsh:amd64
- '''
- try:
- _name, _arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
-diff --git a/salt/modules/pkg_resource.py b/salt/modules/pkg_resource.py
-index 8fa3a074fa..0c872f1805 100644
---- a/salt/modules/pkg_resource.py
-+++ b/salt/modules/pkg_resource.py
-@@ -312,18 +312,17 @@ def format_pkg_list(packages, versions_as_list, attr):
- ret = copy.deepcopy(packages)
- if attr:
- ret_attr = {}
-- requested_attr = {'epoch', 'version', 'release', 'arch', 'install_date', 'install_date_time_t'}
-+ requested_attr = set(['epoch', 'version', 'release', 'arch',
-+ 'install_date', 'install_date_time_t'])
-
- if attr != 'all':
- requested_attr &= set(attr + ['version'] + ['arch'])
-
- for name in ret:
-- if 'pkg.parse_arch' in __salt__:
-- _parse_arch = __salt__['pkg.parse_arch'](name)
-- else:
-- _parse_arch = {'name': name, 'arch': None}
-- _name = _parse_arch['name']
-- _arch = _parse_arch['arch']
-+ _parse_arch_from_name = __salt__.get('pkg.parse_arch_from_name', lambda pkgname: {'name': pkgname, 'arch': None})
-+ name_arch_d = _parse_arch_from_name(name)
-+ _name = name_arch_d['name']
-+ _arch = name_arch_d['arch']
-
- versions = []
- pkgname = None
-diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
-index c89d321a1b..b1257d0de0 100644
---- a/salt/modules/yumpkg.py
-+++ b/salt/modules/yumpkg.py
-@@ -442,7 +442,7 @@ def normalize_name(name):
- return name
-
-
--def parse_arch(name):
-+def parse_arch_from_name(name):
- '''
- Parse name and architecture from the specified package name.
-
-@@ -450,7 +450,7 @@ def parse_arch(name):
-
- .. code-block:: bash
-
-- salt '*' pkg.parse_arch zsh.x86_64
-+ salt '*' pkg.parse_arch_from_name zsh.x86_64
- '''
- _name, _arch = None, None
- try:
-diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 08a9c2ed4d..04a6a6872d 100644
---- a/salt/modules/zypperpkg.py
-+++ b/salt/modules/zypperpkg.py
-@@ -593,7 +593,7 @@ def info_available(*names, **kwargs):
- return ret
-
-
--def parse_arch(name):
-+def parse_arch_from_name(name):
- '''
- Parse name and architecture from the specified package name.
-
-@@ -601,7 +601,7 @@ def parse_arch(name):
-
- .. code-block:: bash
-
-- salt '*' pkg.parse_arch zsh.x86_64
-+ salt '*' pkg.parse_arch_from_name zsh.x86_64
- '''
- _name, _arch = None, None
- try:
-diff --git a/tests/unit/modules/test_pkg_resource.py b/tests/unit/modules/test_pkg_resource.py
-index 6bb647082c..d5ccb2a7a2 100644
---- a/tests/unit/modules/test_pkg_resource.py
-+++ b/tests/unit/modules/test_pkg_resource.py
-@@ -236,7 +236,7 @@ class PkgresTestCase(TestCase, LoaderModuleMockMixin):
- }
- ]
- }
-- with patch.dict(pkg_resource.__salt__, {'pkg.parse_arch': NAME_ARCH_MAPPING.get}):
-+ with patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': NAME_ARCH_MAPPING.get}):
- if six.PY3:
- self.assertCountEqual(pkg_resource.format_pkg_list(packages, False, attr=['epoch', 'release']), expected_pkg_list)
- else:
-diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py
-index 5e652b7e53..9fbe3d051e 100644
---- a/tests/unit/modules/test_yumpkg.py
-+++ b/tests/unit/modules/test_yumpkg.py
-@@ -107,7 +107,7 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
- patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}), \
- patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \
- patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}), \
-- patch.dict(pkg_resource.__salt__, {'pkg.parse_arch': yumpkg.parse_arch}):
-+ patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': yumpkg.parse_arch_from_name}):
- pkgs = yumpkg.list_pkgs(versions_as_list=True)
- for pkg_name, pkg_version in {
- 'python-urlgrabber': '3.10-8.el7',
-@@ -155,7 +155,7 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
- patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}), \
- patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \
- patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}), \
-- patch.dict(pkg_resource.__salt__, {'pkg.parse_arch': yumpkg.parse_arch}):
-+ patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': yumpkg.parse_arch_from_name}):
- pkgs = yumpkg.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t'])
- for pkg_name, pkg_attr in {
- 'python-urlgrabber': {
-@@ -273,7 +273,7 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
- patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}), \
- patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \
- patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}), \
-- patch.dict(pkg_resource.__salt__, {'pkg.parse_arch': yumpkg.parse_arch}):
-+ patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': yumpkg.parse_arch_from_name}):
- pkgs = yumpkg.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t'])
- expected_pkg_list = {
- 'glibc': [
-@@ -315,51 +315,6 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
- else:
- self.assertItemsEqual(pkginfo, expected_pkg_list[pkgname])
-
-- def test_list_patches(self):
-- '''
-- Test patches listing.
--
-- :return:
-- '''
-- yum_out = [
-- 'i my-fake-patch-not-installed-1234 recommended spacewalk-usix-2.7.5.2-2.2.noarch',
-- ' my-fake-patch-not-installed-1234 recommended spacewalksd-5.0.26.2-21.2.x86_64',
-- 'i my-fake-patch-not-installed-1234 recommended suseRegisterInfo-3.1.1-18.2.x86_64',
-- 'i my-fake-patch-installed-1234 recommended my-package-one-1.1-0.1.x86_64',
-- 'i my-fake-patch-installed-1234 recommended my-package-two-1.1-0.1.x86_64',
-- ]
--
-- expected_patches = {
-- 'my-fake-patch-not-installed-1234': {
-- 'installed': False,
-- 'summary': [
-- 'spacewalk-usix-2.7.5.2-2.2.noarch',
-- 'spacewalksd-5.0.26.2-21.2.x86_64',
-- 'suseRegisterInfo-3.1.1-18.2.x86_64',
-- ]
-- },
-- 'my-fake-patch-installed-1234': {
-- 'installed': True,
-- 'summary': [
-- 'my-package-one-1.1-0.1.x86_64',
-- 'my-package-two-1.1-0.1.x86_64',
-- ]
-- }
-- }
--
-- with patch.dict(yumpkg.__grains__, {'osarch': 'x86_64'}), \
-- patch.dict(yumpkg.__salt__, {'cmd.run_stdout': MagicMock(return_value=os.linesep.join(yum_out))}):
-- patches = yumpkg.list_patches()
-- self.assertFalse(patches['my-fake-patch-not-installed-1234']['installed'])
-- self.assertTrue(len(patches['my-fake-patch-not-installed-1234']['summary']) == 3)
-- for _patch in expected_patches['my-fake-patch-not-installed-1234']['summary']:
-- self.assertTrue(_patch in patches['my-fake-patch-not-installed-1234']['summary'])
--
-- self.assertTrue(patches['my-fake-patch-installed-1234']['installed'])
-- self.assertTrue(len(patches['my-fake-patch-installed-1234']['summary']) == 2)
-- for _patch in expected_patches['my-fake-patch-installed-1234']['summary']:
-- self.assertTrue(_patch in patches['my-fake-patch-installed-1234']['summary'])
--
- def test_latest_version_with_options(self):
- with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})):
-
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 78414ca4ac..b3162f10cd 100644
+index 36261285cb..7bff7065c6 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -607,7 +607,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}), \
- patch.dict(zypper.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \
- patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}), \
-- patch.dict(pkg_resource.__salt__, {'pkg.parse_arch': zypper.parse_arch}):
-+ patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': zypper.parse_arch_from_name}):
- pkgs = zypper.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t'])
- self.assertFalse(pkgs.get('gpg-pubkey', False))
- for pkg_name, pkg_attr in {
-@@ -698,7 +698,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
- patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}), \
- patch.dict(zypper.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \
- patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}), \
-- patch.dict(pkg_resource.__salt__, {'pkg.parse_arch': zypper.parse_arch}):
-+ patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': zypper.parse_arch_from_name}):
- pkgs = zypper.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t'])
- expected_pkg_list = {
- 'glibc': [
+@@ -1,33 +1,24 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Bo Maryniuk
+ """
+
+-# Import Python Libs
+-from __future__ import absolute_import
+
+ import os
+ from xml.dom import minidom
+
+ import salt.modules.pkg_resource as pkg_resource
+ import salt.modules.zypperpkg as zypper
+-
+-# Import Salt libs
+ import salt.utils.files
+ import salt.utils.pkg
+ from salt.exceptions import CommandExecutionError
+ from salt.ext import six
+-
+-# Import 3rd-party libs
+ from salt.ext.six.moves import configparser
+-
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, Mock, call, patch
+ from tests.support.unit import TestCase
+
+
+-class ZyppCallMock(object):
++class ZyppCallMock:
+ def __init__(self, return_value=None):
+ self.__return_value = return_value
+
+@@ -124,7 +115,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ :return:
+ """
+
+- class RunSniffer(object):
++ class RunSniffer:
+ def __init__(self, stdout=None, stderr=None, retcode=None):
+ self.calls = list()
+ self._stdout = stdout or ""
+@@ -237,7 +228,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ ):
+ with self.assertRaisesRegex(
+ CommandExecutionError,
+- "^Zypper command failure: Some handled zypper internal error{0}Another zypper internal error$".format(
++ "^Zypper command failure: Some handled zypper internal error{}Another zypper internal error$".format(
+ os.linesep
+ ),
+ ):
+@@ -316,7 +307,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ "--no-refresh",
+ "--disable-repositories",
+ "products",
+- u"-i",
++ "-i",
+ ],
+ env={"ZYPP_READONLY_HACK": "1"},
+ output_loglevel="trace",
+@@ -338,14 +329,9 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ "eol_t",
+ "registerrelease",
+ ]:
+- if six.PY3:
+- self.assertCountEqual(
+- test_data[kwd], [prod.get(kwd) for prod in products]
+- )
+- else:
+- self.assertEqual(
+- test_data[kwd], sorted([prod.get(kwd) for prod in products])
+- )
++ self.assertCountEqual(
++ test_data[kwd], [prod.get(kwd) for prod in products]
++ )
+ cmd_run_all.assert_has_calls([mock_call])
+
+ def test_refresh_db(self):
+@@ -797,7 +783,7 @@ Use 'zypper repos' to get the list of defined repositories.
+ Repository 'DUMMY' not found by its alias, number, or URI.
+ """
+
+- class FailingZypperDummy(object):
++ class FailingZypperDummy:
+ def __init__(self):
+ self.stdout = zypper_out
+ self.stderr = ""
+@@ -1083,10 +1069,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ ],
+ }
+ for pkgname, pkginfo in pkgs.items():
+- if six.PY3:
+- self.assertCountEqual(pkginfo, expected_pkg_list[pkgname])
+- else:
+- self.assertItemsEqual(pkginfo, expected_pkg_list[pkgname])
++ self.assertCountEqual(pkginfo, expected_pkg_list[pkgname])
+
+ def test_list_patches(self):
+ """
+@@ -1446,7 +1429,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ :return:
+ """
+
+- class ListPackages(object):
++ class ListPackages:
+ def __init__(self):
+ self._packages = ["vim", "pico"]
+ self._pkgs = {
+@@ -1884,7 +1867,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ """
+ _zpr = MagicMock()
+ _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc))
+- assert isinstance(zypper.Wildcard(_zpr)("libzypp", "*.1"), six.string_types)
++ assert isinstance(zypper.Wildcard(_zpr)("libzypp", "*.1"), str)
+
+ def test_wildcard_to_query_condition_preservation(self):
+ """
+@@ -1904,14 +1887,14 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+
+ for op in zypper.Wildcard.Z_OP:
+ assert zypper.Wildcard(_zpr)(
+- "libzypp", "{0}*.1".format(op)
+- ) == "{0}17.2.6-27.9.1".format(op)
++ "libzypp", "{}*.1".format(op)
++ ) == "{}17.2.6-27.9.1".format(op)
+
+ # Auto-fix feature: moves operator from end to front
+ for op in zypper.Wildcard.Z_OP:
+ assert zypper.Wildcard(_zpr)(
+- "libzypp", "16*{0}".format(op)
+- ) == "{0}16.2.5-25.1".format(op)
++ "libzypp", "16*{}".format(op)
++ ) == "{}16.2.5-25.1".format(op)
+
+ def test_wildcard_to_query_unsupported_operators(self):
+ """
+@@ -1930,7 +1913,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc))
+ with self.assertRaises(CommandExecutionError):
+ for op in [">>", "==", "<<", "+"]:
+- zypper.Wildcard(_zpr)("libzypp", "{0}*.1".format(op))
++ zypper.Wildcard(_zpr)("libzypp", "{}*.1".format(op))
+
+ @patch("salt.modules.zypperpkg._get_visible_patterns")
+ def test__get_installed_patterns(self, get_visible_patterns):
--
-2.16.4
+2.29.2
diff --git a/remove-deprecated-usage-of-no_mock-and-no_mock_reaso.patch b/remove-deprecated-usage-of-no_mock-and-no_mock_reaso.patch
index daf6bd0..aee1445 100644
--- a/remove-deprecated-usage-of-no_mock-and-no_mock_reaso.patch
+++ b/remove-deprecated-usage-of-no_mock-and-no_mock_reaso.patch
@@ -1,4 +1,4 @@
-From 25b4e3ea983b2606b2fb3d3c0e42f9840208bf84 Mon Sep 17 00:00:00 2001
+From ccdef0b979c575d93e82088049ef81262320ed74 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 11 Mar 2020 16:14:16 +0000
@@ -7,215 +7,109 @@ Subject: [PATCH] Remove deprecated usage of NO_MOCK and NO_MOCK_REASON
---
tests/integration/pillar/test_git_pillar.py | 1 -
tests/unit/cli/test_batch_async.py | 3 +--
- tests/unit/cli/test_support.py | 6 +-----
tests/unit/modules/test_cmdmod.py | 1 -
- tests/unit/modules/test_kubeadm.py | 5 +----
- tests/unit/modules/test_saltsupport.py | 4 +---
- tests/unit/modules/test_xfs.py | 3 ---
- tests/unit/states/test_btrfs.py | 3 ---
- tests/unit/utils/test_pkg.py | 3 +--
- 9 files changed, 5 insertions(+), 24 deletions(-)
+ tests/unit/modules/test_kubeadm.py | 3 +--
+ tests/unit/modules/test_xfs.py | 2 +-
+ tests/unit/states/test_btrfs.py | 1 -
+ tests/unit/utils/test_pkg.py | 1 -
+ 7 files changed, 3 insertions(+), 9 deletions(-)
diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py
-index d417a7ebc3..9218f28d15 100644
+index 979dfebb94..987c5c5de3 100644
--- a/tests/integration/pillar/test_git_pillar.py
+++ b/tests/integration/pillar/test_git_pillar.py
-@@ -1383,7 +1383,6 @@ class TestPygit2SSH(GitPillarSSHTestBase):
+@@ -1601,7 +1601,6 @@ class TestPygit2SSH(GitPillarSSHTestBase):
)
-@skipIf(NO_MOCK, NO_MOCK_REASON)
- @skipIf(_windows_or_mac(), 'minion is windows or mac')
+ @skipIf(_windows_or_mac(), "minion is windows or mac")
@skip_if_not_root
- @skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER))
+ @skipIf(
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
-index 635dc689a8..0c66550d5b 100644
+index 82a712b15b..c0b708de76 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
-@@ -8,10 +8,9 @@ from salt.cli.batch_async import BatchAsync
+@@ -1,11 +1,10 @@
import salt.ext.tornado
+ from salt.cli.batch_async import BatchAsync
from salt.ext.tornado.testing import AsyncTestCase
- from tests.support.unit import skipIf, TestCase
--from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
+-from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
+from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
-@skipIf(NO_MOCK, NO_MOCK_REASON)
class AsyncBatchTestCase(AsyncTestCase, TestCase):
-
def setUp(self):
-diff --git a/tests/unit/cli/test_support.py b/tests/unit/cli/test_support.py
-index 85ea957d79..8d8c1cb11f 100644
---- a/tests/unit/cli/test_support.py
-+++ b/tests/unit/cli/test_support.py
-@@ -6,7 +6,7 @@
- from __future__ import absolute_import, print_function, unicode_literals
-
- from tests.support.unit import skipIf, TestCase
--from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
-+from tests.support.mock import MagicMock, patch
-
- from salt.cli.support.console import IndentOutput
- from salt.cli.support.collector import SupportDataCollector, SaltSupport
-@@ -26,7 +26,6 @@ except ImportError:
-
-
- @skipIf(not bool(pytest), 'Pytest needs to be installed')
--@skipIf(NO_MOCK, NO_MOCK_REASON)
- class SaltSupportIndentOutputTestCase(TestCase):
- '''
- Unit Tests for the salt-support indent output.
-@@ -90,7 +89,6 @@ class SaltSupportIndentOutputTestCase(TestCase):
-
-
- @skipIf(not bool(pytest), 'Pytest needs to be installed')
--@skipIf(NO_MOCK, NO_MOCK_REASON)
- class SaltSupportCollectorTestCase(TestCase):
- '''
- Collector tests.
-@@ -211,7 +209,6 @@ class SaltSupportCollectorTestCase(TestCase):
-
-
- @skipIf(not bool(pytest), 'Pytest needs to be installed')
--@skipIf(NO_MOCK, NO_MOCK_REASON)
- class SaltSupportRunnerTestCase(TestCase):
- '''
- Test runner class.
-@@ -404,7 +401,6 @@ class SaltSupportRunnerTestCase(TestCase):
-
-
- @skipIf(not bool(pytest), 'Pytest needs to be installed')
--@skipIf(NO_MOCK, NO_MOCK_REASON)
- class ProfileIntegrityTestCase(TestCase):
- '''
- Default profile integrity
+ self.io_loop = self.get_new_ioloop()
diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py
-index 8d763435f8..3d13fb9290 100644
+index f3348bc379..15b97f8568 100644
--- a/tests/unit/modules/test_cmdmod.py
+++ b/tests/unit/modules/test_cmdmod.py
-@@ -37,7 +37,6 @@ MOCK_SHELL_FILE = '# List of acceptable shells\n' \
- '/bin/bash\n'
+@@ -24,7 +24,6 @@ DEFAULT_SHELL = "foo/bar"
+ MOCK_SHELL_FILE = "# List of acceptable shells\n" "\n" "/bin/bash\n"
-@skipIf(NO_MOCK, NO_MOCK_REASON)
class CMDMODTestCase(TestCase, LoaderModuleMockMixin):
- '''
+ """
Unit tests for the salt.modules.cmdmod module
diff --git a/tests/unit/modules/test_kubeadm.py b/tests/unit/modules/test_kubeadm.py
-index a58f54f118..f17ba4ad64 100644
+index 91e4a9e68e..4940dbebb4 100644
--- a/tests/unit/modules/test_kubeadm.py
+++ b/tests/unit/modules/test_kubeadm.py
-@@ -29,16 +29,13 @@ from tests.support.mixins import LoaderModuleMockMixin
- from tests.support.unit import TestCase, skipIf
- from tests.support.mock import (
- MagicMock,
-- patch,
-- NO_MOCK,
-- NO_MOCK_REASON
-+ patch
- )
-
+@@ -24,11 +24,10 @@ import pytest
import salt.modules.kubeadm as kubeadm
from salt.exceptions import CommandExecutionError
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
-@skipIf(NO_MOCK, NO_MOCK_REASON)
class KubeAdmTestCase(TestCase, LoaderModuleMockMixin):
- '''
+ """
Test cases for salt.modules.kubeadm
-diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py
-index 7bd652a90e..75616ba949 100644
---- a/tests/unit/modules/test_saltsupport.py
-+++ b/tests/unit/modules/test_saltsupport.py
-@@ -9,7 +9,7 @@ from __future__ import absolute_import, print_function, unicode_literals
- # Import Salt Testing Libs
- from tests.support.mixins import LoaderModuleMockMixin
- from tests.support.unit import TestCase, skipIf
--from tests.support.mock import patch, MagicMock, NO_MOCK, NO_MOCK_REASON
-+from tests.support.mock import patch, MagicMock
- from salt.modules import saltsupport
- import salt.exceptions
- import datetime
-@@ -21,7 +21,6 @@ except ImportError:
-
-
- @skipIf(not bool(pytest), 'Pytest required')
--@skipIf(NO_MOCK, NO_MOCK_REASON)
- class SaltSupportModuleTestCase(TestCase, LoaderModuleMockMixin):
- '''
- Test cases for salt.modules.support::SaltSupportModule
-@@ -289,7 +288,6 @@ professor: Farnsworth
-
-
- @skipIf(not bool(pytest), 'Pytest required')
--@skipIf(NO_MOCK, NO_MOCK_REASON)
- class LogCollectorTestCase(TestCase, LoaderModuleMockMixin):
- '''
- Test cases for salt.modules.support::LogCollector
diff --git a/tests/unit/modules/test_xfs.py b/tests/unit/modules/test_xfs.py
-index 4b423d69d1..d680c4e317 100644
+index 778aff793d..28783260d0 100644
--- a/tests/unit/modules/test_xfs.py
+++ b/tests/unit/modules/test_xfs.py
-@@ -8,8 +8,6 @@ import textwrap
- from tests.support.mixins import LoaderModuleMockMixin
- from tests.support.unit import skipIf, TestCase
- from tests.support.mock import (
-- NO_MOCK,
-- NO_MOCK_REASON,
- MagicMock,
- patch)
-
-@@ -17,7 +15,6 @@ from tests.support.mock import (
+@@ -3,7 +3,7 @@ import textwrap
import salt.modules.xfs as xfs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+-from tests.support.unit import TestCase
++from tests.support.unit import TestCase, skipIf
--@skipIf(NO_MOCK, NO_MOCK_REASON)
- @patch('salt.modules.xfs._get_mounts', MagicMock(return_value={}))
- class XFSTestCase(TestCase, LoaderModuleMockMixin):
- '''
+ @patch("salt.modules.xfs._get_mounts", MagicMock(return_value={}))
diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py
-index 3f45ed94f9..c68f6279dc 100644
+index 74e44641b8..b8f70bccfe 100644
--- a/tests/unit/states/test_btrfs.py
+++ b/tests/unit/states/test_btrfs.py
-@@ -32,8 +32,6 @@ from tests.support.mixins import LoaderModuleMockMixin
- from tests.support.unit import skipIf, TestCase
- from tests.support.mock import (
- MagicMock,
-- NO_MOCK,
-- NO_MOCK_REASON,
- patch,
- )
-
-@@ -43,7 +41,6 @@ import salt.states.btrfs as btrfs
- import pytest
+@@ -32,7 +32,6 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
-@skipIf(NO_MOCK, NO_MOCK_REASON)
class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
- '''
+ """
Test cases for salt.states.btrfs
diff --git a/tests/unit/utils/test_pkg.py b/tests/unit/utils/test_pkg.py
-index 361e0bf92f..38c0cb8f84 100644
+index 404b01b12b..794b851df2 100644
--- a/tests/unit/utils/test_pkg.py
+++ b/tests/unit/utils/test_pkg.py
-@@ -3,7 +3,7 @@
- from __future__ import absolute_import, unicode_literals, print_function
-
- from tests.support.unit import TestCase, skipIf
--from tests.support.mock import Mock, MagicMock, patch, NO_MOCK, NO_MOCK_REASON
-+from tests.support.mock import Mock, MagicMock, patch
- import salt.utils.pkg
- from salt.utils.pkg import rpm
-
-@@ -13,7 +13,6 @@ except ImportError:
+@@ -9,7 +9,6 @@ except ImportError:
pytest = None
-@skipIf(NO_MOCK, NO_MOCK_REASON)
- @skipIf(pytest is None, 'PyTest is missing')
+ @skipIf(pytest is None, "PyTest is missing")
class PkgRPMTestCase(TestCase):
- '''
+ """
--
-2.23.0
+2.29.2
diff --git a/remove-deprecated-warning-that-breaks-miniion-execut.patch b/remove-deprecated-warning-that-breaks-miniion-execut.patch
new file mode 100644
index 0000000..d54b685
--- /dev/null
+++ b/remove-deprecated-warning-that-breaks-miniion-execut.patch
@@ -0,0 +1,43 @@
+From 73e357d7eee19a73cade22becb30d9689cae27ba Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 13 Jan 2021 13:38:32 +0000
+Subject: [PATCH] Remove deprecated warning that breaks miniion
+ execution when "server_id_use_crc" opts are missing
+
+---
+ salt/minion.py | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/salt/minion.py b/salt/minion.py
+index 4d271c6d08..4da665a130 100644
+--- a/salt/minion.py
++++ b/salt/minion.py
+@@ -82,7 +82,6 @@ from salt.utils.event import tagify
+ from salt.utils.network import parse_host_port
+ from salt.utils.odict import OrderedDict
+ from salt.utils.process import ProcessManager, SignalHandlingProcess, default_signals
+-from salt.utils.versions import warn_until
+ from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq
+
+ HAS_PSUTIL = False
+@@ -1097,15 +1096,6 @@ class MinionManager(MinionBase):
+ ):
+ masters = [masters]
+
+- if not self.opts.get("server_id_use_crc"):
+- warn_until(
+- "Sodium",
+- "This server_id is computed nor by Adler32 neither by CRC32. "
+- 'Please use "server_id_use_crc" option and define algorithm you'
+- 'prefer (default "Adler32"). The server_id will be computed with'
+- "Adler32 by default.",
+- )
+-
+ beacons_leader = True
+ for master in masters:
+ s_opts = copy.deepcopy(self.opts)
+--
+2.29.2
+
+
diff --git a/remove-msgpack-1.0.0-requirement-in-the-installed-me.patch b/remove-msgpack-1.0.0-requirement-in-the-installed-me.patch
index da34628..3c9eae6 100644
--- a/remove-msgpack-1.0.0-requirement-in-the-installed-me.patch
+++ b/remove-msgpack-1.0.0-requirement-in-the-installed-me.patch
@@ -1,4 +1,4 @@
-From 1b17b0afa2f9567e71062fb7bec20efa609cd471 Mon Sep 17 00:00:00 2001
+From acf0a8a159cd5d24e98c7b7181e97135d093f2e4 Mon Sep 17 00:00:00 2001
From: Duncan Mac-Vicar P
Date: Wed, 16 Sep 2020 12:08:40 +0200
Subject: [PATCH] Remove msgpack<1.0.0 requirement in the installed
@@ -35,239 +35,14 @@ raw=false move
clean up
---
- pkg/osx/req.txt | 2 +-
- requirements/base.txt | 2 +-
- requirements/static/py2.7/darwin.txt | 4 +-
- requirements/static/py2.7/linux.txt | 2 +-
- requirements/static/py2.7/windows.txt | 4 +-
- requirements/static/py3.4/linux.txt | 2 +-
- requirements/static/py3.5/darwin.txt | 4 +-
- requirements/static/py3.5/linux.txt | 2 +-
- requirements/static/py3.5/windows.txt | 4 +-
- requirements/static/py3.6/darwin.txt | 4 +-
- requirements/static/py3.6/linux.txt | 2 +-
- requirements/static/py3.6/windows.txt | 4 +-
- requirements/static/py3.7/darwin.txt | 4 +-
- requirements/static/py3.7/linux.txt | 2 +-
- requirements/static/py3.7/windows.txt | 4 +-
- requirements/static/py3.8/darwin.txt | 4 +-
- requirements/static/py3.8/linux.txt | 2 +-
- requirements/static/py3.9/darwin.txt | 4 +-
- requirements/static/py3.9/linux.txt | 2 +-
- salt/serializers/msgpack.py | 22 +++++++-
- salt/utils/msgpack.py | 9 +--
- tests/unit/utils/test_msgpack.py | 80 ++++++++++++++++++++++++++-
- 22 files changed, 131 insertions(+), 38 deletions(-)
+ requirements/static/py3.8/darwin.txt | 4 +-
+ requirements/static/py3.8/linux.txt | 2 +-
+ requirements/static/py3.9/darwin.txt | 4 +-
+ requirements/static/py3.9/linux.txt | 2 +-
+ salt/utils/msgpack.py | 2 +
+ tests/unit/utils/test_msgpack.py | 68 ++++++++++++++++++++++++++++
+ 6 files changed, 76 insertions(+), 6 deletions(-)
-diff --git a/pkg/osx/req.txt b/pkg/osx/req.txt
-index e65ac89f3e..5716834001 100644
---- a/pkg/osx/req.txt
-+++ b/pkg/osx/req.txt
-@@ -15,7 +15,7 @@ jinja2==2.10.1
- linode-python==1.1.1
- Mako==1.0.7
- markupsafe==1.1.1
--msgpack-python==0.5.6
-+msgpack==1.0.0
- psutil==5.6.1
- pyasn1==0.4.5
- pycparser==2.19
-diff --git a/requirements/base.txt b/requirements/base.txt
-index 8adf76a2a0..d822973bcb 100644
---- a/requirements/base.txt
-+++ b/requirements/base.txt
-@@ -1,5 +1,5 @@
- Jinja2
--msgpack>=0.5,!=0.5.5,<1.0.0
-+msgpack>=0.5,!=0.5.5
- PyYAML
- MarkupSafe
- requests>=1.0.0
-diff --git a/requirements/static/py2.7/darwin.txt b/requirements/static/py2.7/darwin.txt
-index 1139683ede..abad9aaacb 100644
---- a/requirements/static/py2.7/darwin.txt
-+++ b/requirements/static/py2.7/darwin.txt
-@@ -73,8 +73,8 @@ meld3==1.0.2 # via supervisor
- mock==3.0.5 ; python_version < "3.6"
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py2.7/linux.txt b/requirements/static/py2.7/linux.txt
-index b6bab5e0f6..512e843e56 100644
---- a/requirements/static/py2.7/linux.txt
-+++ b/requirements/static/py2.7/linux.txt
-@@ -69,7 +69,7 @@ meld3==1.0.2 # via supervisor
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack==0.5.6
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py2.7/windows.txt b/requirements/static/py2.7/windows.txt
-index 446e3c2cda..92374b3b07 100644
---- a/requirements/static/py2.7/windows.txt
-+++ b/requirements/static/py2.7/windows.txt
-@@ -64,8 +64,8 @@ meld3==1.0.2 # via supervisor
- mock==3.0.5 ; python_version < "3.6"
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- packaging==19.2 # via pytest
- patch==1.16
- pathlib2==2.3.3 # via importlib-metadata, pytest
-diff --git a/requirements/static/py3.4/linux.txt b/requirements/static/py3.4/linux.txt
-index 4bc5ed8cc9..697ec567f5 100644
---- a/requirements/static/py3.4/linux.txt
-+++ b/requirements/static/py3.4/linux.txt
-@@ -59,7 +59,7 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack==0.5.6
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py3.5/darwin.txt b/requirements/static/py3.5/darwin.txt
-index d01d1c93ff..967a7a1f52 100644
---- a/requirements/static/py3.5/darwin.txt
-+++ b/requirements/static/py3.5/darwin.txt
-@@ -64,8 +64,8 @@ markupsafe==1.1.1
- mock==3.0.5 ; python_version < "3.6"
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py3.5/linux.txt b/requirements/static/py3.5/linux.txt
-index 9309059b6c..464a2264f1 100644
---- a/requirements/static/py3.5/linux.txt
-+++ b/requirements/static/py3.5/linux.txt
-@@ -59,7 +59,7 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack==0.5.6
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py3.5/windows.txt b/requirements/static/py3.5/windows.txt
-index 7918945e00..a5ea817671 100644
---- a/requirements/static/py3.5/windows.txt
-+++ b/requirements/static/py3.5/windows.txt
-@@ -54,8 +54,8 @@ markupsafe==1.1.1
- mock==3.0.5 ; python_version < "3.6"
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- packaging==19.2 # via pytest
- patch==1.16
- pathlib2==2.3.3 # via pytest
-diff --git a/requirements/static/py3.6/darwin.txt b/requirements/static/py3.6/darwin.txt
-index 2dc822beb8..f07bc77fff 100644
---- a/requirements/static/py3.6/darwin.txt
-+++ b/requirements/static/py3.6/darwin.txt
-@@ -64,8 +64,8 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py3.6/linux.txt b/requirements/static/py3.6/linux.txt
-index e5eead5572..3f6b0795b5 100644
---- a/requirements/static/py3.6/linux.txt
-+++ b/requirements/static/py3.6/linux.txt
-@@ -59,7 +59,7 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack==0.5.6
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py3.6/windows.txt b/requirements/static/py3.6/windows.txt
-index 83896f9d3f..b6548ed329 100644
---- a/requirements/static/py3.6/windows.txt
-+++ b/requirements/static/py3.6/windows.txt
-@@ -54,8 +54,8 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- packaging==19.2 # via pytest
- patch==1.16
- pathtools==0.1.2 # via watchdog
-diff --git a/requirements/static/py3.7/darwin.txt b/requirements/static/py3.7/darwin.txt
-index 616563d7b6..490ba230f7 100644
---- a/requirements/static/py3.7/darwin.txt
-+++ b/requirements/static/py3.7/darwin.txt
-@@ -63,8 +63,8 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py3.7/linux.txt b/requirements/static/py3.7/linux.txt
-index 92eedc94d5..a3d00a0b83 100644
---- a/requirements/static/py3.7/linux.txt
-+++ b/requirements/static/py3.7/linux.txt
-@@ -59,7 +59,7 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack==0.5.6
-+msgpack==1.0.0
- ncclient==0.6.4 # via junos-eznc
- netaddr==0.7.19 # via junos-eznc
- oscrypto==1.2.0 # via certvalidator
-diff --git a/requirements/static/py3.7/windows.txt b/requirements/static/py3.7/windows.txt
-index d6499eaacd..27d619b1d6 100644
---- a/requirements/static/py3.7/windows.txt
-+++ b/requirements/static/py3.7/windows.txt
-@@ -53,8 +53,8 @@ markupsafe==1.1.1
- mock==3.0.5
- more-itertools==5.0.0
- moto==1.3.7
--msgpack-python==0.5.6
--msgpack==0.5.6
-+msgpack==1.0.0
-+msgpack==1.0.0
- packaging==19.2 # via pytest
- patch==1.16
- pathtools==0.1.2 # via watchdog
diff --git a/requirements/static/py3.8/darwin.txt b/requirements/static/py3.8/darwin.txt
index 95bcd7748d..3164168937 100644
--- a/requirements/static/py3.8/darwin.txt
@@ -324,80 +99,27 @@ index b7eb8320de..8db5efa224 100644
ncclient==0.6.4 # via junos-eznc
netaddr==0.7.19 # via junos-eznc
oscrypto==1.2.0 # via certvalidator
-diff --git a/salt/serializers/msgpack.py b/salt/serializers/msgpack.py
-index 6082686cba..667ff8b12e 100644
---- a/salt/serializers/msgpack.py
-+++ b/salt/serializers/msgpack.py
-@@ -32,6 +32,24 @@ if not available:
- def _deserialize(stream_or_string, **options):
- _fail()
-
-+
-+elif salt.utils.msgpack.version >= (1, 0, 0):
-+
-+ def _serialize(obj, **options):
-+ try:
-+ return salt.utils.msgpack.dumps(obj, **options)
-+ except Exception as error: # pylint: disable=broad-except
-+ raise SerializationError(error)
-+
-+ def _deserialize(stream_or_string, **options):
-+ try:
-+ options.setdefault("use_list", True)
-+ options.setdefault("raw", False)
-+ return salt.utils.msgpack.loads(stream_or_string, **options)
-+ except Exception as error: # pylint: disable=broad-except
-+ raise DeserializationError(error)
-+
-+
- elif salt.utils.msgpack.version >= (0, 2, 0):
-
- def _serialize(obj, **options):
-@@ -42,8 +60,8 @@ elif salt.utils.msgpack.version >= (0, 2, 0):
-
- def _deserialize(stream_or_string, **options):
- try:
-- options.setdefault('use_list', True)
-- options.setdefault('encoding', 'utf-8')
-+ options.setdefault("use_list", True)
-+ options.setdefault("encoding", "utf-8")
- return salt.utils.msgpack.loads(stream_or_string, **options)
- except Exception as error: # pylint: disable=broad-except
- raise DeserializationError(error)
diff --git a/salt/utils/msgpack.py b/salt/utils/msgpack.py
-index 027fe81a18..4a97a87522 100644
+index df4ac6cb9c..551e51f537 100644
--- a/salt/utils/msgpack.py
+++ b/salt/utils/msgpack.py
-@@ -76,10 +76,11 @@ def _sanitize_msgpack_unpack_kwargs(kwargs):
- https://github.com/msgpack/msgpack-python/blob/master/ChangeLog.rst
- """
- assert isinstance(kwargs, dict)
-- if version >= (1, 0, 0) and kwargs.get("raw", None) is None:
-- log.info("adding `raw=True` argument to msgpack call")
-- kwargs["raw"] = True
--
-+ if version >= (1, 0, 0):
-+ kwargs.setdefault("raw", True)
-+ kwargs.setdefault("strict_map_key", False)
+@@ -81,6 +81,8 @@ def _sanitize_msgpack_unpack_kwargs(kwargs):
+ if version >= (1, 0, 0):
+ kwargs.setdefault("raw", True)
+ kwargs.setdefault("strict_map_key", False)
+ if "encoding" in kwargs:
+ del kwargs["encoding"]
return _sanitize_msgpack_kwargs(kwargs)
diff --git a/tests/unit/utils/test_msgpack.py b/tests/unit/utils/test_msgpack.py
-index cac7c1e9b1..f3e7b74f64 100644
+index 3c5f54dd3f..0287becfc4 100644
--- a/tests/unit/utils/test_msgpack.py
+++ b/tests/unit/utils/test_msgpack.py
-@@ -182,7 +182,78 @@ class TestMsgpack(TestCase):
+@@ -200,6 +200,74 @@ class TestMsgpack(TestCase):
+ "msgpack functions with no alias in `salt.utils.msgpack`",
+ )
- msgpack_items = set(x for x in dir(msgpack) if not x.startswith('_') and sanitized(x))
- msgpack_util_items = set(dir(salt.utils.msgpack))
-- self.assertFalse(msgpack_items - msgpack_util_items, 'msgpack functions with no alias in `salt.utils.msgpack`')
-+ self.assertFalse(
-+ msgpack_items - msgpack_util_items,
-+ "msgpack functions with no alias in `salt.utils.msgpack`",
-+ )
-+
+ def test_sanitize_msgpack_kwargs(self):
+ """
+ Test helper function _sanitize_msgpack_kwargs
@@ -465,31 +187,11 @@ index cac7c1e9b1..f3e7b74f64 100644
+ salt.utils.msgpack._sanitize_msgpack_unpack_kwargs(kwargs.copy()), {}
+ )
+ salt.utils.msgpack.version = version
-
++
def _test_base(self, pack_func, unpack_func):
- '''
-@@ -207,7 +278,6 @@ class TestMsgpack(TestCase):
- # Sanity check, we are not borking the BytesIO read function
- self.assertNotEqual(BytesIO.read, buffer.read)
- buffer.read = buffer.getvalue
--
- pack_func(data, buffer)
- # Sanity Check
- self.assertTrue(buffer.getvalue())
-@@ -216,7 +286,11 @@ class TestMsgpack(TestCase):
-
- # Reverse the packing and the result should be equivalent to the original data
- unpacked = unpack_func(buffer)
-- self.assertEqual(data, unpacked.decode())
-+
-+ if isinstance(unpacked, bytes):
-+ unpacked = unpacked.decode()
-+
-+ self.assertEqual(data, unpacked)
-
- def test_buffered_base_pack(self):
- self._test_buffered_base(pack_func=salt.utils.msgpack.pack, unpack_func=msgpack.unpack)
+ """
+ In msgpack, 'dumps' is an alias for 'packb' and 'loads' is an alias for 'unpackb'.
--
-2.28.0
+2.29.2
diff --git a/remove-unnecessary-yield-causing-badyielderror-bsc-1.patch b/remove-unnecessary-yield-causing-badyielderror-bsc-1.patch
index 0524b6f..251afaf 100644
--- a/remove-unnecessary-yield-causing-badyielderror-bsc-1.patch
+++ b/remove-unnecessary-yield-causing-badyielderror-bsc-1.patch
@@ -1,4 +1,4 @@
-From bec0a06a069404c5043b1c59e3fe7cce2df177d3 Mon Sep 17 00:00:00 2001
+From a46471fcc26775f924599a5ef27e9716987739e4 Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Wed, 30 Oct 2019 10:19:12 +0100
Subject: [PATCH] Remove unnecessary yield causing BadYieldError
@@ -9,18 +9,18 @@ Subject: [PATCH] Remove unnecessary yield causing BadYieldError
1 file changed, 2 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index 6d0dca1da5..754c257b36 100644
+index b2d04f9d4d..f3d92b88f1 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -227,7 +227,6 @@ class BatchAsync(object):
- self.event.unsubscribe(pattern, match_type='glob')
+@@ -238,7 +238,6 @@ class BatchAsync:
+ self.event.unsubscribe(pattern, match_type="glob")
del self
gc.collect()
- yield
@tornado.gen.coroutine
def schedule_next(self):
-@@ -263,7 +262,6 @@ class BatchAsync(object):
+@@ -275,7 +274,6 @@ class BatchAsync:
else:
yield self.end_batch()
gc.collect()
@@ -29,6 +29,6 @@ index 6d0dca1da5..754c257b36 100644
def __del__(self):
self.local = None
--
-2.16.4
+2.29.2
diff --git a/remove-vendored-backports-abc-from-requirements.patch b/remove-vendored-backports-abc-from-requirements.patch
index 20dbe2f..710a88f 100644
--- a/remove-vendored-backports-abc-from-requirements.patch
+++ b/remove-vendored-backports-abc-from-requirements.patch
@@ -1,26 +1,26 @@
-From 3bbb31815bb3c6eacedb8891f6bf4ece8098deca Mon Sep 17 00:00:00 2001
+From 2c16fd0b7e6c3c7e4464cb502a37de63159cedcb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Mon, 27 Apr 2020 16:37:38 +0100
Subject: [PATCH] Remove vendored 'backports-abc' from requirements
---
- requirements/base.txt | 1 -
- 1 file changed, 1 deletion(-)
+ requirements/base.txt | 4 ++++
+ 1 file changed, 4 insertions(+)
diff --git a/requirements/base.txt b/requirements/base.txt
-index 922aec4c754178fd5c317ed636a0ebe487fcb25d..8adf76a2a045f4fca8695c584fedcfc913f54db2 100644
+index 8390d492d7..ffe4bc98f1 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
-@@ -4,7 +4,6 @@ PyYAML
+@@ -4,3 +4,7 @@ PyYAML
MarkupSafe
requests>=1.0.0
- # Requirements for Tornado 4.5.3 (vendored as salt.ext.tornado)
--backports-abc==0.5; python_version < '3.0'
- singledispatch==3.4.0.3; python_version < '3.4'
- # Required by Tornado to handle threads stuff.
- futures>=2.0; python_version < '3.0'
+ distro>=1.5
++# Requirements for Tornado 4.5.3 (vendored as salt.ext.tornado)
++singledispatch==3.4.0.3; python_version < '3.4'
++# Required by Tornado to handle threads stuff.
++futures>=2.0; python_version < '3.0'
--
-2.23.0
+2.29.2
diff --git a/removes-unresolved-merge-conflict-in-yumpkg-module.patch b/removes-unresolved-merge-conflict-in-yumpkg-module.patch
deleted file mode 100644
index 74e0987..0000000
--- a/removes-unresolved-merge-conflict-in-yumpkg-module.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 93c0630b84b9da89acaf549a5c79e5d834c70a65 Mon Sep 17 00:00:00 2001
-From: Jochen Breuer
-Date: Thu, 5 Mar 2020 21:01:31 +0100
-Subject: [PATCH] Removes unresolved merge conflict in yumpkg module
-
----
- salt/modules/yumpkg.py | 4 ----
- 1 file changed, 4 deletions(-)
-
-diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
-index 88d74020b3..04ab240cd4 100644
---- a/salt/modules/yumpkg.py
-+++ b/salt/modules/yumpkg.py
-@@ -3220,11 +3220,7 @@ def _get_patches(installed_only=False):
- for line in salt.utils.itertools.split(ret, os.linesep):
- inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)',
- line).groups()
--<<<<<<< HEAD
- if advisory_id not in patches:
--=======
-- if not advisory_id in patches:
-->>>>>>> Do not report patches as installed when not all the related packages are installed (bsc#1128061)
- patches[advisory_id] = {
- 'installed': True if inst == 'i' else False,
- 'summary': [pkg]
---
-2.16.4
-
-
diff --git a/restore-default-behaviour-of-pkg-list-return.patch b/restore-default-behaviour-of-pkg-list-return.patch
index f4f2fa5..677edf2 100644
--- a/restore-default-behaviour-of-pkg-list-return.patch
+++ b/restore-default-behaviour-of-pkg-list-return.patch
@@ -1,4 +1,4 @@
-From 8f9478ffba672767e77b9b263f279e0379ab1ed1 Mon Sep 17 00:00:00 2001
+From 67e8afba4c33e65470dae3e39908868620f3ed11 Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Fri, 30 Aug 2019 14:20:06 +0200
Subject: [PATCH] Restore default behaviour of pkg list return
@@ -9,34 +9,37 @@ is now parameter to also return patches if that is needed.
Co-authored-by: Mihai Dinca
---
- salt/modules/zypperpkg.py | 32 +++++++++++++++++++++++---------
- 1 file changed, 23 insertions(+), 9 deletions(-)
+ salt/modules/zypperpkg.py | 38 ++++++++++++++++++++++++++------------
+ 1 file changed, 26 insertions(+), 12 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 8179cd8c1d..f7158e0810 100644
+index b099f3e5d7..2daec0f380 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -1304,8 +1304,10 @@ def refresh_db(root=None):
+@@ -1422,10 +1422,10 @@ def refresh_db(force=None, root=None):
return ret
-def _find_types(pkgs):
+- """
+- Form a package names list, find prefixes of packages types.
+- """
+def _detect_includes(pkgs, inclusion_detection):
- '''Form a package names list, find prefixes of packages types.'''
++ """Form a package names list, find prefixes of packages types."""
+ if not inclusion_detection:
+ return None
- return sorted({pkg.split(':', 1)[0] for pkg in pkgs
- if len(pkg.split(':', 1)) == 2})
+ return sorted({pkg.split(":", 1)[0] for pkg in pkgs if len(pkg.split(":", 1)) == 2})
-@@ -1321,6 +1323,7 @@ def install(name=None,
- ignore_repo_failure=False,
- no_recommends=False,
- root=None,
-+ inclusion_detection=False,
- **kwargs):
- '''
- .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
-@@ -1435,6 +1438,9 @@ def install(name=None,
+
+@@ -1441,6 +1441,7 @@ def install(
+ ignore_repo_failure=False,
+ no_recommends=False,
+ root=None,
++ inclusion_detection=False,
+ **kwargs
+ ):
+ """
+@@ -1556,6 +1557,9 @@ def install(
.. versionadded:: 2018.3.0
@@ -46,26 +49,26 @@ index 8179cd8c1d..f7158e0810 100644
Returns a dict containing the new package names and versions::
-@@ -1500,7 +1506,8 @@ def install(name=None,
+@@ -1626,7 +1630,8 @@ def install(
diff_attr = kwargs.get("diff_attr")
- includes = _find_types(targets)
+ includes = _detect_includes(targets, inclusion_detection)
+
- old = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root)
-
- downgrades = []
-@@ -1692,7 +1699,7 @@ def upgrade(refresh=True,
+ old = (
+ list_pkgs(attr=diff_attr, root=root, includes=includes)
+ if not downloadonly
+@@ -1850,7 +1855,7 @@ def upgrade(
return ret
-def _uninstall(name=None, pkgs=None, root=None):
+def _uninstall(inclusion_detection, name=None, pkgs=None, root=None):
- '''
+ """
Remove and purge do identical things but with different Zypper commands,
this function performs the common logic.
-@@ -1702,7 +1709,7 @@ def _uninstall(name=None, pkgs=None, root=None):
+@@ -1860,7 +1865,7 @@ def _uninstall(name=None, pkgs=None, root=None):
except MinionError as exc:
raise CommandExecutionError(exc)
@@ -74,16 +77,16 @@ index 8179cd8c1d..f7158e0810 100644
old = list_pkgs(root=root, includes=includes)
targets = []
for target in pkg_params:
-@@ -1761,7 +1768,7 @@ def normalize_name(name):
- return name
+@@ -1923,7 +1928,7 @@ def normalize_name(name):
--def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
-+def remove(name=None, pkgs=None, root=None, inclusion_detection=False, **kwargs): # pylint: disable=unused-argument
- '''
+ def remove(
+- name=None, pkgs=None, root=None, **kwargs
++ name=None, pkgs=None, root=None, inclusion_detection=False, **kwargs
+ ): # pylint: disable=unused-argument
+ """
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
- On minions running systemd>=205, `systemd-run(1)`_ is now used to
-@@ -1792,8 +1799,11 @@ def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused
+@@ -1955,8 +1960,11 @@ def remove(
root
Operate on a different root directory.
@@ -96,20 +99,22 @@ index 8179cd8c1d..f7158e0810 100644
Returns a dict containing the changes.
-@@ -1805,10 +1815,10 @@ def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused
+@@ -1968,10 +1976,12 @@ def remove(
salt '*' pkg.remove ,,
salt '*' pkg.remove pkgs='["foo", "bar"]'
- '''
+ """
- return _uninstall(name=name, pkgs=pkgs, root=root)
+ return _uninstall(inclusion_detection, name=name, pkgs=pkgs, root=root)
-def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
-+def purge(name=None, pkgs=None, root=None, inclusion_detection=False, **kwargs): # pylint: disable=unused-argument
- '''
++def purge(
++ name=None, pkgs=None, root=None, inclusion_detection=False, **kwargs
++): # pylint: disable=unused-argument
+ """
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
-@@ -1840,6 +1850,10 @@ def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-
+@@ -2003,6 +2013,10 @@ def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-
root
Operate on a different root directory.
@@ -120,16 +125,16 @@ index 8179cd8c1d..f7158e0810 100644
.. versionadded:: 0.16.0
-@@ -1853,7 +1867,7 @@ def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-
+@@ -2016,7 +2030,7 @@ def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-
salt '*' pkg.purge ,,
salt '*' pkg.purge pkgs='["foo", "bar"]'
- '''
+ """
- return _uninstall(name=name, pkgs=pkgs, root=root)
+ return _uninstall(inclusion_detection, name=name, pkgs=pkgs, root=root)
def list_locks(root=None):
--
-2.16.4
+2.29.2
diff --git a/return-the-expected-powerpc-os-arch-bsc-1117995.patch b/return-the-expected-powerpc-os-arch-bsc-1117995.patch
index ab26e1c..6e27361 100644
--- a/return-the-expected-powerpc-os-arch-bsc-1117995.patch
+++ b/return-the-expected-powerpc-os-arch-bsc-1117995.patch
@@ -1,33 +1,91 @@
-From 27e90d416b89ac2c7839e1d03ded37f86df7290f Mon Sep 17 00:00:00 2001
+From 9365531537f2b80e0a0d1481edfa60de8331d07d Mon Sep 17 00:00:00 2001
From: Mihai Dinca
Date: Thu, 13 Dec 2018 12:17:35 +0100
Subject: [PATCH] Return the expected powerpc os arch (bsc#1117995)
---
- salt/utils/pkg/rpm.py | 9 ++++++---
- 1 file changed, 6 insertions(+), 3 deletions(-)
+ salt/utils/pkg/rpm.py | 22 +++++++++-------------
+ 1 file changed, 9 insertions(+), 13 deletions(-)
diff --git a/salt/utils/pkg/rpm.py b/salt/utils/pkg/rpm.py
-index bc5eb30eda..cb85eb99fe 100644
+index 2ee2bac4e5..d1b149ea0b 100644
--- a/salt/utils/pkg/rpm.py
+++ b/salt/utils/pkg/rpm.py
-@@ -52,9 +52,12 @@ def get_osarch():
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).communicate()[0]
- else:
-- ret = ''.join([x for x in platform.uname()[-2:] if x][-1:])
+@@ -1,10 +1,7 @@
+-# -*- coding: utf-8 -*-
+ """
+ Common functions for working with RPM packages
+ """
+
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import collections
+ import datetime
+@@ -14,8 +11,6 @@ import subprocess
+
+ import salt.utils.path
+ import salt.utils.stringutils
-
-- return salt.utils.stringutils.to_str(ret).strip() or 'unknown'
-+ ret = ''.join(list(filter(None, platform.uname()[-2:]))[-1:])
-+ ret = salt.utils.stringutils.to_str(ret).strip() or 'unknown'
-+ ARCH_FIXES_MAPPING = {
-+ "powerpc64le": "ppc64le"
-+ }
+-# Import 3rd-party libs
+ from salt.ext import six
+
+ log = logging.getLogger(__name__)
+@@ -68,9 +63,10 @@ def get_osarch():
+ stderr=subprocess.PIPE,
+ ).communicate()[0]
+ else:
+- ret = "".join([x for x in platform.uname()[-2:] if x][-1:])
+-
+- return salt.utils.stringutils.to_str(ret).strip() or "unknown"
++ ret = "".join(list(filter(None, platform.uname()[-2:]))[-1:])
++ ret = salt.utils.stringutils.to_str(ret).strip() or "unknown"
++ ARCH_FIXES_MAPPING = {"powerpc64le": "ppc64le"}
+ return ARCH_FIXES_MAPPING.get(ret, ret)
def check_32(arch, osarch=None):
+@@ -102,7 +98,7 @@ def resolve_name(name, arch, osarch=None):
+ osarch = get_osarch()
+
+ if not check_32(arch, osarch) and arch not in (osarch, "noarch"):
+- name += ".{0}".format(arch)
++ name += ".{}".format(arch)
+ return name
+
+
+@@ -120,7 +116,7 @@ def parse_pkginfo(line, osarch=None):
+
+ name = resolve_name(name, arch, osarch)
+ if release:
+- version += "-{0}".format(release)
++ version += "-{}".format(release)
+ if epoch not in ("(none)", "0"):
+ version = ":".join((epoch, version))
+
+@@ -146,10 +142,10 @@ def combine_comments(comments):
+ comments = [comments]
+ ret = []
+ for comment in comments:
+- if not isinstance(comment, six.string_types):
++ if not isinstance(comment, str):
+ comment = str(comment)
+ # Normalize for any spaces (or lack thereof) after the #
+- ret.append("# {0}\n".format(comment.lstrip("#").lstrip()))
++ ret.append("# {}\n".format(comment.lstrip("#").lstrip()))
+ return "".join(ret)
+
+
+@@ -171,7 +167,7 @@ def version_to_evr(verstring):
+ idx_e = verstring.find(":")
+ if idx_e != -1:
+ try:
+- epoch = six.text_type(int(verstring[:idx_e]))
++ epoch = str(int(verstring[:idx_e]))
+ except ValueError:
+ # look, garbage in the epoch field, how fun, kill it
+ epoch = "0" # this is our fallback, deal
--
-2.16.4
+2.29.2
diff --git a/revert-add-patch-support-for-allow-vendor-change-opt.patch b/revert-add-patch-support-for-allow-vendor-change-opt.patch
new file mode 100644
index 0000000..be684c4
--- /dev/null
+++ b/revert-add-patch-support-for-allow-vendor-change-opt.patch
@@ -0,0 +1,109 @@
+From 84214c3f48c35af01ca750908a5e2e6009ecc919 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 13 Jan 2021 09:52:05 +0000
+Subject: [PATCH] Revert "add patch support for allow vendor change
+ option with zypper"
+
+This reverts commit cee4cc182b4740c912861c712dea7bc44eb70ffb.
+---
+ salt/modules/zypperpkg.py | 46 ++++++++++++---------------------------
+ 1 file changed, 14 insertions(+), 32 deletions(-)
+
+diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
+index 4a5cb85e7c..6f22994bf0 100644
+--- a/salt/modules/zypperpkg.py
++++ b/salt/modules/zypperpkg.py
+@@ -35,6 +35,7 @@ import salt.utils.versions
+ from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
+
+ # pylint: disable=import-error,redefined-builtin,no-name-in-module
++from salt.ext import six
+ from salt.ext.six.moves import configparser
+ from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
+ from salt.utils.versions import LooseVersion
+@@ -1430,7 +1431,6 @@ def install(
+ no_recommends=False,
+ root=None,
+ inclusion_detection=False,
+- novendorchange=True,
+ **kwargs
+ ):
+ """
+@@ -1478,10 +1478,6 @@ def install(
+ skip_verify
+ Skip the GPG verification check (e.g., ``--no-gpg-checks``)
+
+-
+- novendorchange
+- Disallow vendor change
+-
+ version
+ Can be either a version number, or the combination of a comparison
+ operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4').
+@@ -1642,22 +1638,6 @@ def install(
+ cmd_install.append(
+ kwargs.get("resolve_capabilities") and "--capability" or "--name"
+ )
+- if novendorchange:
+- if __grains__["osrelease_info"][0] > 11:
+- cmd_install.append("--no-allow-vendor-change")
+- log.info("Disabling vendor changes")
+- else:
+- log.warning(
+- "Enabling/Disabling vendor changes is not supported on this Zypper version"
+- )
+- else:
+- if __grains__["osrelease_info"][0] > 11:
+- cmd_install.append("--allow-vendor-change")
+- log.info("Enabling vendor changes")
+- else:
+- log.warning(
+- "Enabling/Disabling vendor changes is not supported on this Zypper version"
+- )
+
+ if not refresh:
+ cmd_install.insert(0, "--no-refresh")
+@@ -1669,6 +1649,7 @@ def install(
+ cmd_install.extend(fromrepoopt)
+ if no_recommends:
+ cmd_install.append("--no-recommends")
++
+ errors = []
+
+ # Split the targets into batches of 500 packages each, so that
+@@ -1812,18 +1793,19 @@ def upgrade(
+ cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
+ log.info("Targeting repos: %s", fromrepo)
+
+- # TODO: Grains validation should be moved to Zypper class
+- if __grains__["osrelease_info"][0] > 11:
+- if novendorchange:
+- cmd_update.append("--no-allow-vendor-change")
+- log.info("Disabling vendor changes")
++ if dist_upgrade:
++ # TODO: Grains validation should be moved to Zypper class
++ if __grains__["osrelease_info"][0] > 11:
++ if novendorchange:
++ cmd_update.append("--no-allow-vendor-change")
++ log.info("Disabling vendor changes")
++ else:
++ cmd_update.append("--allow-vendor-change")
++ log.info("Enabling vendor changes")
+ else:
+- cmd_update.append("--allow-vendor-change")
+- log.info("Enabling vendor changes")
+- else:
+- log.warning(
+- "Enabling/Disabling vendor changes is not supported on this Zypper version"
+- )
++ log.warning(
++ "Enabling/Disabling vendor changes is not supported on this Zypper version"
++ )
+
+ if no_recommends:
+ cmd_update.append("--no-recommends")
+--
+2.29.2
+
+
diff --git a/revert-fixing-a-use-case-when-multiple-inotify-beaco.patch b/revert-fixing-a-use-case-when-multiple-inotify-beaco.patch
new file mode 100644
index 0000000..3538458
--- /dev/null
+++ b/revert-fixing-a-use-case-when-multiple-inotify-beaco.patch
@@ -0,0 +1,270 @@
+From 5ea2f10b15684dd417bad858642faafc92cd382a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 5 Jan 2021 12:31:26 +0000
+Subject: [PATCH] Revert "Fixing a use case when multiple inotify
+ beacons are defined but when notifications are fired the configuration fron
+ the first beacon are used." Revert "Adding a util function to remove hidden
+ (options starting with underscore) from the beacon configuration. This is
+ used when the beacons loop through the configuration, eg. status beacon, and
+ expect certain options."
+
+This reverts commit 68a891ab2fe53ebf329b9c83b875f3575e87e266.
+This reverts commit 66c58dedf8c364eaeb35c5adce8bcc8fe5c1219a.
+---
+ salt/beacons/__init__.py | 1 -
+ salt/beacons/diskusage.py | 3 ---
+ salt/beacons/inotify.py | 25 ++++++-------------
+ salt/beacons/napalm_beacon.py | 6 ++---
+ salt/beacons/status.py | 4 ---
+ tests/unit/beacons/test_inotify.py | 39 ------------------------------
+ tests/unit/test_beacons.py | 25 +++----------------
+ 7 files changed, 14 insertions(+), 89 deletions(-)
+
+diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py
+index 6951a0ce47..0570006348 100644
+--- a/salt/beacons/__init__.py
++++ b/salt/beacons/__init__.py
+@@ -72,7 +72,6 @@ class Beacon:
+ beacon_name = current_beacon_config["beacon_module"]
+ else:
+ beacon_name = mod
+- b_config[mod].append({"_beacon_name": mod})
+ fun_str = "{}.beacon".format(beacon_name)
+ validate_str = "{}.validate".format(beacon_name)
+ if fun_str in self.beacons:
+diff --git a/salt/beacons/diskusage.py b/salt/beacons/diskusage.py
+index c7d4acfa3a..475d520de6 100644
+--- a/salt/beacons/diskusage.py
++++ b/salt/beacons/diskusage.py
+@@ -10,7 +10,6 @@ Beacon to monitor disk usage.
+ import logging
+ import re
+
+-import salt.utils.beacons
+ import salt.utils.platform
+
+ try:
+@@ -83,8 +82,6 @@ def beacon(config):
+ it will override the previously defined threshold.
+
+ """
+- whitelist = []
+- config = salt.utils.beacons.remove_hidden_options(config, whitelist)
+ parts = psutil.disk_partitions(all=True)
+ ret = []
+ for mounts in config:
+diff --git a/salt/beacons/inotify.py b/salt/beacons/inotify.py
+index b4bb6def5b..fa2f73c35f 100644
+--- a/salt/beacons/inotify.py
++++ b/salt/beacons/inotify.py
+@@ -21,7 +21,6 @@ import os
+ import re
+
+ import salt.ext.six
+-import salt.utils.beacons
+
+ # pylint: disable=import-error
+ from salt.ext.six.moves import map
+@@ -71,19 +70,17 @@ def _get_notifier(config):
+ """
+ Check the context for the notifier and construct it if not present
+ """
+- beacon_name = config.get("_beacon_name", "inotify")
+- notifier = "{}.notifier".format(beacon_name)
+- if notifier not in __context__:
++ if "inotify.notifier" not in __context__:
+ __context__["inotify.queue"] = collections.deque()
+ wm = pyinotify.WatchManager()
+- __context__[notifier] = pyinotify.Notifier(wm, _enqueue)
++ __context__["inotify.notifier"] = pyinotify.Notifier(wm, _enqueue)
+ if (
+ "coalesce" in config
+ and isinstance(config["coalesce"], bool)
+ and config["coalesce"]
+ ):
+- __context__[notifier].coalesce_events()
+- return __context__[notifier]
++ __context__["inotify.notifier"].coalesce_events()
++ return __context__["inotify.notifier"]
+
+
+ def validate(config):
+@@ -259,10 +256,6 @@ def beacon(config):
+ affects all paths that are being watched. This is due to this option
+ being at the Notifier level in pyinotify.
+ """
+-
+- whitelist = ["_beacon_name"]
+- config = salt.utils.beacons.remove_hidden_options(config, whitelist)
+-
+ _config = {}
+ list(map(_config.update, config))
+
+@@ -286,7 +279,7 @@ def beacon(config):
+ break
+ path = os.path.dirname(path)
+
+- excludes = _config["files"].get(path, {}).get("exclude", "")
++ excludes = _config["files"][path].get("exclude", "")
+
+ if excludes and isinstance(excludes, list):
+ for exclude in excludes:
+@@ -373,8 +366,6 @@ def beacon(config):
+
+
+ def close(config):
+- beacon_name = config.get("_beacon_name", "inotify")
+- notifier = "{}.notifier".format(beacon_name)
+- if notifier in __context__:
+- __context__[notifier].stop()
+- del __context__[notifier]
++ if "inotify.notifier" in __context__:
++ __context__["inotify.notifier"].stop()
++ del __context__["inotify.notifier"]
+diff --git a/salt/beacons/napalm_beacon.py b/salt/beacons/napalm_beacon.py
+index 3ca4d10512..d1bddccb8e 100644
+--- a/salt/beacons/napalm_beacon.py
++++ b/salt/beacons/napalm_beacon.py
+@@ -168,9 +168,10 @@ with a NTP server at a stratum level greater than 5.
+ """
+
+ import logging
++
++# Import Python std lib
+ import re
+
+-import salt.utils.beacons
+ import salt.utils.napalm
+
+ log = logging.getLogger(__name__)
+@@ -302,9 +303,6 @@ def beacon(config):
+ """
+ Watch napalm function and fire events.
+ """
+- whitelist = []
+- config = salt.utils.beacons.remove_hidden_options(config, whitelist)
+-
+ log.debug("Executing napalm beacon with config:")
+ log.debug(config)
+ ret = []
+diff --git a/salt/beacons/status.py b/salt/beacons/status.py
+index d6b6150f28..82ed19bc47 100644
+--- a/salt/beacons/status.py
++++ b/salt/beacons/status.py
+@@ -93,7 +93,6 @@ import datetime
+ import logging
+
+ import salt.exceptions
+-import salt.utils.beacons
+ import salt.utils.platform
+
+ log = logging.getLogger(__name__)
+@@ -121,9 +120,6 @@ def beacon(config):
+ log.debug(config)
+ ctime = datetime.datetime.utcnow().isoformat()
+
+- whitelist = []
+- config = salt.utils.beacons.remove_hidden_options(config, whitelist)
+-
+ if not config:
+ config = [
+ {
+diff --git a/tests/unit/beacons/test_inotify.py b/tests/unit/beacons/test_inotify.py
+index 665e334fbc..d91a2daebf 100644
+--- a/tests/unit/beacons/test_inotify.py
++++ b/tests/unit/beacons/test_inotify.py
+@@ -273,42 +273,3 @@ class INotifyBeaconTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertEqual(len(ret), 1)
+ self.assertEqual(ret[0]["path"], fp)
+ self.assertEqual(ret[0]["change"], "IN_DELETE")
+-
+- # Check __get_notifier and ensure that the right bits are in __context__
+- # including a beacon_name specific notifier is found.
+- def test__get_notifier(self):
+- config = {
+- "files": {
+- "/tmp/httpd/vhost.d": {
+- "mask": ["delete", "modify"],
+- "recurse": True,
+- "auto_add": True,
+- "exclude": [
+- {"/tmp/httpd/vhost.d/.+?\\.sw[px]*$|4913|~$": {"regex": True}}
+- ],
+- },
+- "/tmp/httpd/conf.d": {
+- "mask": ["delete", "modify"],
+- "recurse": True,
+- "auto_add": True,
+- "exclude": [
+- {"/tmp/httpd/vhost.d/.+?\\.sw[px]*$|4913|~$": {"regex": True}}
+- ],
+- },
+- "/tmp/httpd/conf": {
+- "mask": ["delete", "modify"],
+- "recurse": True,
+- "auto_add": True,
+- "exclude": [
+- {"/tmp/httpd/vhost.d/.+?\\.sw[px]*$|4913|~$": {"regex": True}}
+- ],
+- },
+- },
+- "coalesce": True,
+- "beacon_module": "inotify",
+- "_beacon_name": "httpd.inotify",
+- }
+-
+- ret = inotify._get_notifier(config)
+- self.assertIn("inotify.queue", inotify.__context__)
+- self.assertIn("httpd.inotify.notifier", inotify.__context__)
+diff --git a/tests/unit/test_beacons.py b/tests/unit/test_beacons.py
+index b7a5127179..be629f49d4 100644
+--- a/tests/unit/test_beacons.py
++++ b/tests/unit/test_beacons.py
+@@ -7,7 +7,7 @@ import logging
+ import salt.beacons as beacons
+ import salt.config
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import MagicMock, call, patch
++from tests.support.mock import patch
+ from tests.support.unit import TestCase
+
+ log = logging.getLogger(__name__)
+@@ -35,9 +35,9 @@ class BeaconsTestCase(TestCase, LoaderModuleMockMixin):
+ ]
+ }
+ with patch.dict(beacons.__opts__, mock_opts):
+- beacon = salt.beacons.Beacon(mock_opts, [])
+- ret = beacon.process(mock_opts["beacons"], mock_opts["grains"])
+-
++ ret = salt.beacons.Beacon(mock_opts, []).process(
++ mock_opts["beacons"], mock_opts["grains"]
++ )
+ _expected = [
+ {
+ "tag": "salt/beacon/minion/watch_apache/",
+@@ -46,20 +46,3 @@ class BeaconsTestCase(TestCase, LoaderModuleMockMixin):
+ }
+ ]
+ self.assertEqual(ret, _expected)
+-
+- # Ensure that "beacon_name" is available in the call to the beacon function
+- name = "ps.beacon"
+- mocked = {name: MagicMock(return_value=_expected)}
+- mocked[name].__globals__ = {}
+- calls = [
+- call(
+- [
+- {"processes": {"apache2": "stopped"}},
+- {"beacon_module": "ps"},
+- {"_beacon_name": "watch_apache"},
+- ]
+- )
+- ]
+- with patch.object(beacon, "beacons", mocked) as patched:
+- beacon.process(mock_opts["beacons"], mock_opts["grains"])
+- patched[name].assert_has_calls(calls)
+--
+2.29.2
+
+
diff --git a/run-salt-api-as-user-salt-bsc-1064520.patch b/run-salt-api-as-user-salt-bsc-1064520.patch
index 4efbfab..ddc6df0 100644
--- a/run-salt-api-as-user-salt-bsc-1064520.patch
+++ b/run-salt-api-as-user-salt-bsc-1064520.patch
@@ -1,4 +1,4 @@
-From 4e9b3808b5a27fcdc857b26d73e0f6716243ca92 Mon Sep 17 00:00:00 2001
+From cdecbbdf5db3f1cb6b603916fecd80738f5fae9a Mon Sep 17 00:00:00 2001
From: Christian Lanig
Date: Mon, 27 Nov 2017 13:10:26 +0100
Subject: [PATCH] Run salt-api as user salt (bsc#1064520)
@@ -20,6 +20,6 @@ index 7ca582dfb4..bf513e4dbd 100644
ExecStart=/usr/bin/salt-api
TimeoutStopSec=3
--
-2.16.4
+2.29.2
diff --git a/run-salt-master-as-dedicated-salt-user.patch b/run-salt-master-as-dedicated-salt-user.patch
index aab3edd..754ef80 100644
--- a/run-salt-master-as-dedicated-salt-user.patch
+++ b/run-salt-master-as-dedicated-salt-user.patch
@@ -1,4 +1,4 @@
-From 497acb852b0d4519984d981dfefdc0848c3e4159 Mon Sep 17 00:00:00 2001
+From 88f40fff3b81edaa55f37949f56c67112ca2dcad Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Klaus=20K=C3=A4mpf?=
Date: Wed, 20 Jan 2016 11:01:06 +0100
Subject: [PATCH] Run salt master as dedicated salt user
@@ -10,7 +10,7 @@ Subject: [PATCH] Run salt master as dedicated salt user
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/conf/master b/conf/master
-index ce2e26872a..22a4a7bdb4 100644
+index 41a62f2f34..943c5b5846 100644
--- a/conf/master
+++ b/conf/master
@@ -25,7 +25,8 @@
@@ -24,7 +24,7 @@ index ce2e26872a..22a4a7bdb4 100644
# Tell the master to also use salt-ssh when running commands against minions.
#enable_ssh_minions: False
diff --git a/pkg/salt-common.logrotate b/pkg/salt-common.logrotate
-index 3cd002308e..0d99d1b801 100644
+index a0306ff370..97d158db18 100644
--- a/pkg/salt-common.logrotate
+++ b/pkg/salt-common.logrotate
@@ -1,4 +1,5 @@
@@ -42,6 +42,6 @@ index 3cd002308e..0d99d1b801 100644
missingok
rotate 7
--
-2.16.4
+2.29.2
diff --git a/salt.changes b/salt.changes
index 54442a5..4e72674 100644
--- a/salt.changes
+++ b/salt.changes
@@ -1,3 +1,269 @@
+-------------------------------------------------------------------
+Wed Jan 13 13:49:34 UTC 2021 - Pablo Suárez Hernández
+
+- Remove deprecated warning that breaks minion execution when "server_id_use_crc" opts is missing
+
+- Added:
+ * remove-deprecated-warning-that-breaks-miniion-execut.patch
+
+-------------------------------------------------------------------
+Wed Jan 13 10:13:13 UTC 2021 - Pablo Suárez Hernández
+
+- Revert wrong zypper patch to support vendorchanges flags on pkg.install
+
+- Added:
+ * revert-add-patch-support-for-allow-vendor-change-opt.patch
+
+-------------------------------------------------------------------
+Tue Jan 12 12:09:35 UTC 2021 - Pablo Suárez Hernández
+
+- Force zyppnotify to prefer Packages.db than Packages if it exists
+- Allow vendor change option with zypper
+- Add pkg.services_need_restart
+- Fix for file.check_perms to work with numeric uid/gid
+
+- Added:
+ * force-zyppnotify-to-prefer-packages.db-than-packages.patch
+ * fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
+ * add-patch-support-for-allow-vendor-change-option-wit.patch
+ * add-pkg.services_need_restart-302.patch
+
+-------------------------------------------------------------------
+Tue Jan 12 10:31:02 UTC 2021 - Pablo Suárez Hernández
+
+- virt: more network support
+ Add more network and PCI/USB host devices passthrough support
+ to virt module and states
+
+- Added:
+ * open-suse-3002.2-virt-network-311.patch
+
+-------------------------------------------------------------------
+Tue Jan 12 09:55:36 UTC 2021 - Pablo Suárez Hernández
+
+- Bigvm backports
+ virt consoles, CPU tuning and topology, and memory tuning.
+
+- Added:
+ * open-suse-3002.2-bigvm-310.patch
+
+-------------------------------------------------------------------
+Mon Jan 11 16:11:22 UTC 2021 - Pablo Suárez Hernández
+
+- Fix pkg states when DEB package has "all" arch
+
+- Added:
+ * fix-aptpkg.normalize_name-when-package-arch-is-all.patch
+
+-------------------------------------------------------------------
+Tue Jan 5 12:49:42 UTC 2021 - Pablo Suárez Hernández
+
+- Do not force beacons configuration to be a list.
+ Revert https://github.com/saltstack/salt/pull/58655
+
+- Added:
+ * revert-fixing-a-use-case-when-multiple-inotify-beaco.patch
+
+-------------------------------------------------------------------
+Tue Jan 5 10:15:08 UTC 2021 - Pablo Suárez Hernández
+
+- Drop wrong virt capabilities code after rebasing patches
+
+- Added:
+ * drop-wrong-virt-capabilities-code-after-rebasing-pat.patch
+
+-------------------------------------------------------------------
+Fri Dec 18 12:13:49 UTC 2020 - Pablo Suárez Hernández
+
+- Update to Salt release version 3002.2
+- See release notes: https://docs.saltstack.com/en/latest/topics/releases/3002.2.html
+
+- Modified:
+ * add-environment-variable-to-know-if-yum-is-invoked-f.patch
+ * let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
+ * fix-__mount_device-wrapper-254.patch
+ * opensuse-3000.2-virt-backports-236-257.patch
+ * fixes-cve-2018-15750-cve-2018-15751.patch
+ * strip-trailing-from-repo.uri-when-comparing-repos-in.patch
+ * include-aliases-in-the-fqdns-grains.patch
+ * support-config-non-root-permission-issues-fixes-u-50.patch
+ * support-for-btrfs-and-xfs-in-parted-and-mkfs.patch
+ * fix-batch_async-obsolete-test.patch
+ * early-feature-support-config.patch
+ * changed-imports-to-vendored-tornado.patch
+ * avoid-excessive-syslogging-by-watchdog-cronjob-58.patch
+ * add-hold-unhold-functions.patch
+ * do-not-crash-when-there-are-ipv6-established-connect.patch
+ * add-docker-logout-237.patch
+ * add-saltssh-multi-version-support-across-python-inte.patch
+ * fix-a-test-and-some-variable-names-229.patch
+ * implement-network.fqdns-module-function-bsc-1134860-.patch
+ * debian-info_installed-compatibility-50453.patch
+ * fix-bsc-1065792.patch
+ * use-current-ioloop-for-the-localclient-instance-of-b.patch
+ * restore-default-behaviour-of-pkg-list-return.patch
+ * virt-adding-kernel-boot-parameters-to-libvirt-xml-55.patch
+ * use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
+ * add-migrated-state-and-gpg-key-management-functions-.patch
+ * info_installed-works-without-status-attr-now.patch
+ * bsc-1176024-fix-file-directory-user-and-group-owners.patch
+ * opensuse-3000.3-spacewalk-runner-parse-command-250.patch
+ * fix-aptpkg-systemd-call-bsc-1143301.patch
+ * fix-memory-leak-produced-by-batch-async-find_jobs-me.patch
+ * ansiblegate-take-care-of-failed-skipped-and-unreacha.patch
+ * calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
+ * add-cpe_name-for-osversion-grain-parsing-u-49946.patch
+ * python3.8-compatibility-pr-s-235.patch
+ * backport-virt-patches-from-3001-256.patch
+ * do-not-break-repo-files-with-multiple-line-values-on.patch
+ * enable-passing-a-unix_socket-for-mysql-returners-bsc.patch
+ * accumulated-changes-required-for-yomi-165.patch
+ * support-transactional-systems-microos-271.patch
+ * use-adler32-algorithm-to-compute-string-checksums.patch
+ * remove-vendored-backports-abc-from-requirements.patch
+ * fall-back-to-pymysql.patch
+ * xen-disk-fixes-264.patch
+ * fix-for-temp-folder-definition-in-loader-unit-test.patch
+ * batch.py-avoid-exception-when-minion-does-not-respon.patch
+ * move-server_id-deprecation-warning-to-reduce-log-spa.patch
+ * avoid-traceback-when-http.query-request-cannot-be-pe.patch
+ * fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch
+ * fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch
+ * grains-master-can-read-grains.patch
+ * remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch
+ * fix-wrong-test_mod_del_repo_multiline_values-test-af.patch
+ * accumulated-changes-from-yomi-167.patch
+ * allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch
+ * loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch
+ * add-astra-linux-common-edition-to-the-os-family-list.patch
+ * fix-async-batch-race-conditions.patch
+ * batch-async-catch-exceptions-and-safety-unregister-a.patch
+ * activate-all-beacons-sources-config-pillar-grains.patch
+ * drop-wrong-mock-from-chroot-unit-test.patch
+ * fix-for-suse-expanded-support-detection.patch
+ * fix-novendorchange-option-284.patch
+ * fix-virt.update-with-cpu-defined-263.patch
+ * add-batch_presence_ping_timeout-and-batch_presence_p.patch
+ * fix-git_pillar-merging-across-multiple-__env__-repos.patch
+ * add-publish_batch-to-clearfuncs-exposed-methods.patch
+ * fix-unit-tests-for-batch-async-after-refactor.patch
+ * add-new-custom-suse-capability-for-saltutil-state-mo.patch
+ * prevent-test_mod_del_repo_multiline_values-to-fail.patch
+ * x509-fixes-111.patch
+ * adds-explicit-type-cast-for-port.patch
+ * run-salt-master-as-dedicated-salt-user.patch
+ * remove-msgpack-1.0.0-requirement-in-the-installed-me.patch
+ * switch-firewalld-state-to-use-change_interface.patch
+ * option-to-en-disable-force-refresh-in-zypper-215.patch
+ * fix-async-batch-multiple-done-events.patch
+ * make-setup.py-script-to-not-require-setuptools-9.1.patch
+ * add-custom-suse-capabilities-as-grains.patch
+ * don-t-call-zypper-with-more-than-one-no-refresh.patch
+ * transactional_update-unify-with-chroot.call.patch
+ * fix-ipv6-scope-bsc-1108557.patch
+ * temporary-fix-extend-the-whitelist-of-allowed-comman.patch
+ * opensuse-3000-libvirt-engine-fixes-251.patch
+ * fix-grains.test_core-unit-test-277.patch
+ * pkgrepo-support-python-2.7-function-call-295.patch
+ * prevent-import-errors-when-running-test_btrfs-unit-t.patch
+ * do-not-make-ansiblegate-to-crash-on-python3-minions.patch
+ * fix-issue-2068-test.patch
+ * ensure-virt.update-stop_on_reboot-is-updated-with-it.patch
+ * remove-deprecated-usage-of-no_mock-and-no_mock_reaso.patch
+ * read-repo-info-without-using-interpolation-bsc-11356.patch
+ * fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch
+ * fixing-streamclosed-issue.patch
+ * virt._get_domain-don-t-raise-an-exception-if-there-i.patch
+ * loop-fix-variable-names-for-until_no_eval.patch
+ * improve-batch_async-to-release-consumed-memory-bsc-1.patch
+ * prevent-systemd-run-description-issue-when-running-a.patch
+ * integration-of-msi-authentication-with-azurearm-clou.patch
+ * add-all_versions-parameter-to-include-all-installed-.patch
+ * sanitize-grains-loaded-from-roster_grains.json.patch
+ * fix-failing-unit-tests-for-batch-async.patch
+ * reintroducing-reverted-changes.patch
+ * fix-for-log-checking-in-x509-test.patch
+ * do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch
+ * opensuse-3000-virt-defined-states-222.patch
+ * add-virt.all_capabilities.patch
+ * prevent-logging-deadlock-on-salt-api-subprocesses-bs.patch
+ * fix-cve-2020-25592-and-add-tests-bsc-1178319.patch
+ * fix-unit-test-for-grains-core.patch
+ * async-batch-implementation.patch
+ * apply-patch-from-upstream-to-support-python-3.8.patch
+ * remove-unnecessary-yield-causing-badyielderror-bsc-1.patch
+ * re-adding-function-to-test-for-root.patch
+ * zypperpkg-filter-patterns-that-start-with-dot-244.patch
+ * fix-a-wrong-rebase-in-test_core.py-180.patch
+ * add-multi-file-support-and-globbing-to-the-filetree-.patch
+ * fix-the-removed-six.itermitems-and-six.-_type-262.patch
+ * zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch
+ * add-standalone-configuration-file-for-enabling-packa.patch
+ * make-profiles-a-package.patch
+ * return-the-expected-powerpc-os-arch-bsc-1117995.patch
+ * batch_async-avoid-using-fnmatch-to-match-event-217.patch
+ * do-not-raise-streamclosederror-traceback-but-only-lo.patch
+ * provide-the-missing-features-required-for-yomi-yet-o.patch
+ * make-aptpkg.list_repos-compatible-on-enabled-disable.patch
+ * backport-a-few-virt-prs-272.patch
+ * add-supportconfig-module-for-remote-calls-and-saltss.patch
+ * run-salt-api-as-user-salt-bsc-1064520.patch
+ * path-replace-functools.wraps-with-six.wraps-bsc-1177.patch
+ * get-os_arch-also-without-rpm-package-installed.patch
+ * invalidate-file-list-cache-when-cache-file-modified-.patch
+ * xfs-do-not-fails-if-type-is-not-present.patch
+ * prevent-ansiblegate-unit-tests-to-fail-on-ubuntu.patch
+
+- Removed:
+ * do-not-report-patches-as-installed-when-not-all-the-.patch
+ * add-pkg.services_need_restart-302.patch
+ * removes-unresolved-merge-conflict-in-yumpkg-module.patch
+ * add-missing-fun-for-returns-from-wfunc-executions.patch
+ * force-zyppnotify-to-prefer-packages.db-than-packages.patch
+ * decide-if-the-source-should-be-actually-skipped.patch
+ * make-lazyloader.__init__-call-to-_refresh_file_mappi.patch
+ * avoid-has_docker-true-if-import-messes-with-salt.uti.patch
+ * fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch
+ * set-passphrase-for-salt-ssh-keys-to-empty-string-293.patch
+ * fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
+ * add-patch-support-for-allow-vendor-change-option-wit.patch
+ * opensuse-3000.3-bigvm-backports-303.patch
+ * msgpack-support-versions-1.0.0.patch
+ * fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch
+ * use-full-option-name-instead-of-undocumented-abbrevi.patch
+ * add-missing-_utils-at-loader-grains_func.patch
+ * loader-invalidate-the-import-cachefor-extra-modules.patch
+ * fix-for-return-value-ret-vs-return-in-batch-mode.patch
+ * make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch
+
+-------------------------------------------------------------------
+Thu Dec 10 10:47:12 UTC 2020 - Jochen Breuer
+
+- Force zyppnotify to prefer Packages.db than Packages if it exists
+- Allow vendor change option with zypper
+
+- Added:
+ * add-patch-support-for-allow-vendor-change-option-wit.patch
+ * force-zyppnotify-to-prefer-packages.db-than-packages.patch
+
+-------------------------------------------------------------------
+Tue Dec 8 14:56:28 UTC 2020 - Jochen Breuer
+
+- Add pkg.services_need_restart
+- Bigvm backports:
+ virt consoles, CPU tuning and topology, and memory tuning.
+- Fix for file.check_perms to work with numeric uid/gid
+
+- Added:
+ * add-pkg.services_need_restart-302.patch
+ * fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
+ * opensuse-3000.3-bigvm-backports-303.patch
+
+-------------------------------------------------------------------
+Fri Nov 27 10:47:14 UTC 2020 - Victor Zhestkov
+
+- Change 'Requires(pre)' to 'Requires' for salt-minion package (bsc#1083110)
+
-------------------------------------------------------------------
Mon Nov 16 09:48:45 UTC 2020 - Pablo Suárez Hernández
diff --git a/salt.spec b/salt.spec
index 5ec08a9..49be134 100644
--- a/salt.spec
+++ b/salt.spec
@@ -15,33 +15,6 @@
# Please submit bugfixes or comments via http://bugs.opensuse.org/
#
%global debug_package %{nil}
-%if 0%{?suse_version} > 1500
-%global build_py3 1
-%global build_py2 0
-%global default_py3 1
-%else
-%if 0%{?suse_version} >= 1500
-# SLE15
-%global build_py3 1
-%global build_py2 1
-%global default_py3 1
-%else
-%if 0%{?suse_version} == 1315
-# SLE12
-%global build_py3 1
-%global build_py2 1
-%else
-%if 0%{?rhel} == 7
-# RES7
-%global build_py2 1
-%else
-%global build_py3 1
-%global default_py3 1
-%endif
-%endif
-%endif
-%endif
-%define pythonX %{?default_py3: python3}%{!?default_py3: python2}
%if 0%{?suse_version} > 1210 || 0%{?rhel} >= 7 || 0%{?fedora} >=28
%bcond_without systemd
@@ -63,7 +36,7 @@
%bcond_with builddocs
Name: salt
-Version: 3000.3
+Version: 3002.2
Release: 0
Summary: A parallel remote execution system
License: Apache-2.0
@@ -132,256 +105,249 @@ Patch28: get-os_arch-also-without-rpm-package-installed.patch
Patch29: make-aptpkg.list_repos-compatible-on-enabled-disable.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50453
Patch30: debian-info_installed-compatibility-50453.patch
-# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50742
-Patch31: decide-if-the-source-should-be-actually-skipped.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50773
-Patch32: add-hold-unhold-functions.patch
+Patch31: add-hold-unhold-functions.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50401
# NOTE: This is a techpreview as well as in Fluorine! Release only in Neon.
-Patch33: add-supportconfig-module-for-remote-calls-and-saltss.patch
+Patch32: add-supportconfig-module-for-remote-calls-and-saltss.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/116
-Patch34: return-the-expected-powerpc-os-arch-bsc-1117995.patch
+Patch33: return-the-expected-powerpc-os-arch-bsc-1117995.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51108
-Patch35: remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch
+Patch34: remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51119
-Patch36: fix-issue-2068-test.patch
+Patch35: fix-issue-2068-test.patch
# PATCH_FIX_OPENSUSE: Temporary fix allowing "id_" and "force" params while upstrem figures it out
-Patch37: temporary-fix-extend-the-whitelist-of-allowed-comman.patch
+Patch36: temporary-fix-extend-the-whitelist-of-allowed-comman.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51382
-Patch38: don-t-call-zypper-with-more-than-one-no-refresh.patch
+Patch37: don-t-call-zypper-with-more-than-one-no-refresh.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50109
# PATCH_FIX_OPENSUSE https://github.com/openSUSE/salt/pull/121
-Patch39: add-virt.all_capabilities.patch
+Patch38: add-virt.all_capabilities.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51384
-Patch40: include-aliases-in-the-fqdns-grains.patch
+Patch39: include-aliases-in-the-fqdns-grains.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50546
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51863
-Patch41: async-batch-implementation.patch
+Patch40: async-batch-implementation.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/52527
-Patch42: calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
+Patch41: calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
#PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/139
-Patch43: fix-async-batch-race-conditions.patch
+Patch42: fix-async-batch-race-conditions.patch
#PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/141
-Patch44: add-batch_presence_ping_timeout-and-batch_presence_p.patch
-#PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/52657
-Patch45: do-not-report-patches-as-installed-when-not-all-the-.patch
+Patch43: add-batch_presence_ping_timeout-and-batch_presence_p.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/52527
-Patch46: use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
+Patch44: use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/52888
-Patch47: do-not-crash-when-there-are-ipv6-established-connect.patch
+Patch45: do-not-crash-when-there-are-ipv6-established-connect.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/144
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/52855
-Patch48: fix-async-batch-multiple-done-events.patch
+Patch46: fix-async-batch-multiple-done-events.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/52743
-Patch49: switch-firewalld-state-to-use-change_interface.patch
+Patch47: switch-firewalld-state-to-use-change_interface.patch
# PATCH-FIX_OPENSUSE
-Patch50: add-standalone-configuration-file-for-enabling-packa.patch
+Patch48: add-standalone-configuration-file-for-enabling-packa.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53293
-Patch51: do-not-break-repo-files-with-multiple-line-values-on.patch
+Patch49: do-not-break-repo-files-with-multiple-line-values-on.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53159
-Patch52: batch.py-avoid-exception-when-minion-does-not-respon.patch
+Patch50: batch.py-avoid-exception-when-minion-does-not-respon.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53471
-Patch53: fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch
+Patch51: fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/161
-Patch54: provide-the-missing-features-required-for-yomi-yet-o.patch
+Patch52: provide-the-missing-features-required-for-yomi-yet-o.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53661
-Patch55: do-not-make-ansiblegate-to-crash-on-python3-minions.patch
+Patch53: do-not-make-ansiblegate-to-crash-on-python3-minions.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53693
-Patch56: allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch
+Patch54: allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53661
-Patch57: prevent-ansiblegate-unit-tests-to-fail-on-ubuntu.patch
+Patch55: prevent-ansiblegate-unit-tests-to-fail-on-ubuntu.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/54048
-Patch58: avoid-traceback-when-http.query-request-cannot-be-pe.patch
+Patch56: avoid-traceback-when-http.query-request-cannot-be-pe.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53992
# https://github.com/saltstack/salt/pull/53996
# https://github.com/saltstack/salt/pull/54022
# https://github.com/saltstack/salt/pull/54024
-Patch59: accumulated-changes-required-for-yomi-165.patch
+Patch57: accumulated-changes-required-for-yomi-165.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/159
-Patch60: move-server_id-deprecation-warning-to-reduce-log-spa.patch
+Patch58: move-server_id-deprecation-warning-to-reduce-log-spa.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/54077
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/166
-Patch61: fix-aptpkg-systemd-call-bsc-1143301.patch
+Patch59: fix-aptpkg-systemd-call-bsc-1143301.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/170
-Patch62: strip-trailing-from-repo.uri-when-comparing-repos-in.patch
+Patch60: strip-trailing-from-repo.uri-when-comparing-repos-in.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/177
-Patch63: restore-default-behaviour-of-pkg-list-return.patch
+Patch61: restore-default-behaviour-of-pkg-list-return.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/172
-Patch64: implement-network.fqdns-module-function-bsc-1134860-.patch
+Patch62: implement-network.fqdns-module-function-bsc-1134860-.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/6af07030a502c427781991fc9a2b994fa04ef32e
-Patch65: fix-memory-leak-produced-by-batch-async-find_jobs-me.patch
+Patch63: fix-memory-leak-produced-by-batch-async-find_jobs-me.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/002543df392f65d95dbc127dc058ac897f2035ed
-Patch66: improve-batch_async-to-release-consumed-memory-bsc-1.patch
+Patch64: improve-batch_async-to-release-consumed-memory-bsc-1.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/54077
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/44a91c2ce6df78d93ce0ef659dedb0e41b1c2e04
-Patch67: prevent-systemd-run-description-issue-when-running-a.patch
+Patch65: prevent-systemd-run-description-issue-when-running-a.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/55d8a777d6a9b19c959e14a4060e5579e92cd106
-Patch68: use-current-ioloop-for-the-localclient-instance-of-b.patch
+Patch66: use-current-ioloop-for-the-localclient-instance-of-b.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/8378bb24a5a53973e8dba7658b8b3465d967329f
-Patch69: fix-failing-unit-tests-for-batch-async.patch
-# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/54935
-Patch70: add-missing-fun-for-returns-from-wfunc-executions.patch
+Patch67: fix-failing-unit-tests-for-batch-async.patch
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53326
# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/54954
-Patch71: accumulated-changes-from-yomi-167.patch
+Patch68: accumulated-changes-from-yomi-167.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/180
-Patch72: fix-a-wrong-rebase-in-test_core.py-180.patch
+Patch69: fix-a-wrong-rebase-in-test_core.py-180.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/182
-Patch73: remove-unnecessary-yield-causing-badyielderror-bsc-1.patch
+Patch70: remove-unnecessary-yield-causing-badyielderror-bsc-1.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/186
-Patch74: read-repo-info-without-using-interpolation-bsc-11356.patch
+Patch71: read-repo-info-without-using-interpolation-bsc-11356.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53293
-Patch75: prevent-test_mod_del_repo_multiline_values-to-fail.patch
-Patch76: fix-for-log-checking-in-x509-test.patch
+Patch72: prevent-test_mod_del_repo_multiline_values-to-fail.patch
+Patch73: fix-for-log-checking-in-x509-test.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/190
-Patch77: fixing-streamclosed-issue.patch
-Patch78: fix-batch_async-obsolete-test.patch
+Patch74: fixing-streamclosed-issue.patch
+Patch75: fix-batch_async-obsolete-test.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/191
-Patch79: let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
+Patch76: let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/193
-Patch80: xfs-do-not-fails-if-type-is-not-present.patch
+Patch77: xfs-do-not-fails-if-type-is-not-present.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/55245
-Patch81: virt-adding-kernel-boot-parameters-to-libvirt-xml-55.patch
+Patch78: virt-adding-kernel-boot-parameters-to-libvirt-xml-55.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/200
-Patch82: support-for-btrfs-and-xfs-in-parted-and-mkfs.patch
+Patch79: support-for-btrfs-and-xfs-in-parted-and-mkfs.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56125
-Patch83: add-astra-linux-common-edition-to-the-os-family-list.patch
+Patch80: add-astra-linux-common-edition-to-the-os-family-list.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/211
-Patch84: apply-patch-from-upstream-to-support-python-3.8.patch
+Patch81: apply-patch-from-upstream-to-support-python-3.8.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/217
-Patch85: batch_async-avoid-using-fnmatch-to-match-event-217.patch
+Patch82: batch_async-avoid-using-fnmatch-to-match-event-217.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/8a23030d347b7487328c0395f5e30ef29daf1455
-Patch86: batch-async-catch-exceptions-and-safety-unregister-a.patch
+Patch83: batch-async-catch-exceptions-and-safety-unregister-a.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/a38adfa2efe40c2b1508b685af0b5d28a6bbcfc8
-Patch87: fix-unit-tests-for-batch-async-after-refactor.patch
-# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/218
-Patch88: use-full-option-name-instead-of-undocumented-abbrevi.patch
-# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/93c0630b84b9da89acaf549a5c79e5d834c70a65
-Patch89: removes-unresolved-merge-conflict-in-yumpkg-module.patch
+Patch84: fix-unit-tests-for-batch-async-after-refactor.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/b4c401cfe6031b61e27f7795bfa1aca6e8341e52
-Patch90: changed-imports-to-vendored-tornado.patch
-# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/082fa07e5301414b5b834b731aaa96bd5d966de7
-Patch91: add-missing-_utils-at-loader-grains_func.patch
+Patch85: changed-imports-to-vendored-tornado.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/25b4e3ea983b2606b2fb3d3c0e42f9840208bf84
-Patch92: remove-deprecated-usage-of-no_mock-and-no_mock_reaso.patch
+Patch86: remove-deprecated-usage-of-no_mock-and-no_mock_reaso.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/a8f0a15e4067ec278c8a2d690e3bf815523286ca
-Patch93: fix-wrong-test_mod_del_repo_multiline_values-test-af.patch
-# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56369
-Patch94: make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch
-# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/221
-Patch95: loader-invalidate-the-import-cachefor-extra-modules.patch
+Patch87: fix-wrong-test_mod_del_repo_multiline_values-test-af.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/55814
-Patch96: opensuse-3000-virt-defined-states-222.patch
+Patch88: opensuse-3000-virt-defined-states-222.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/223
-Patch97: fix-for-temp-folder-definition-in-loader-unit-test.patch
+Patch89: fix-for-temp-folder-definition-in-loader-unit-test.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56392
-Patch98: virt._get_domain-don-t-raise-an-exception-if-there-i.patch
+Patch90: virt._get_domain-don-t-raise-an-exception-if-there-i.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/226
-Patch99: re-adding-function-to-test-for-root.patch
+Patch91: re-adding-function-to-test-for-root.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/227
-Patch100: loop-fix-variable-names-for-until_no_eval.patch
+Patch92: loop-fix-variable-names-for-until_no_eval.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/226
-Patch101: make-setup.py-script-to-not-require-setuptools-9.1.patch
+Patch93: make-setup.py-script-to-not-require-setuptools-9.1.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/50453
# https://github.com/saltstack/salt/commit/e20362f6f053eaa4144583604e6aac3d62838419
# Can be dropped one pull/50453 is in released version.
-Patch102: reintroducing-reverted-changes.patch
+Patch94: reintroducing-reverted-changes.patch
# PATCH_FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/b713d0b3031faadc17cd9cf09977ccc19e50bef7
-Patch103: add-new-custom-suse-capability-for-saltutil-state-mo.patch
-# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56463
-Patch104: fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch
+Patch95: add-new-custom-suse-capability-for-saltutil-state-mo.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56491
-Patch105: sanitize-grains-loaded-from-roster_grains.json.patch
+Patch96: sanitize-grains-loaded-from-roster_grains.json.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/228
-Patch106: adds-explicit-type-cast-for-port.patch
+Patch97: adds-explicit-type-cast-for-port.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/53882
-Patch107: fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch
+Patch98: fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/4f80e969e31247a4755d98d25f29b5d8b1b916c3
-Patch108: remove-vendored-backports-abc-from-requirements.patch
-# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57119
-Patch109: make-lazyloader.__init__-call-to-_refresh_file_mappi.patch
+Patch99: remove-vendored-backports-abc-from-requirements.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57123
-Patch110: prevent-logging-deadlock-on-salt-api-subprocesses-bs.patch
-# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57122
-Patch111: msgpack-support-versions-1.0.0.patch
+Patch100: prevent-logging-deadlock-on-salt-api-subprocesses-bs.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/235
-Patch112: python3.8-compatibility-pr-s-235.patch
+Patch101: python3.8-compatibility-pr-s-235.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56419
-Patch113: option-to-en-disable-force-refresh-in-zypper-215.patch
+Patch102: option-to-en-disable-force-refresh-in-zypper-215.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/229
-Patch114: fix-a-test-and-some-variable-names-229.patch
+Patch103: fix-a-test-and-some-variable-names-229.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56439
-Patch115: add-docker-logout-237.patch
-# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/56595
-Patch116: fix-for-return-value-ret-vs-return-in-batch-mode.patch
+Patch104: add-docker-logout-237.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57392
-Patch117: zypperpkg-filter-patterns-that-start-with-dot-244.patch
+Patch105: zypperpkg-filter-patterns-that-start-with-dot-244.patch
# PATCH-FIX_OPENSUSE: hhttps://github.com/openSUSE/salt/commit/da936daeebd701e147707ad814c07bfc259d4be
-Patch118: add-publish_batch-to-clearfuncs-exposed-methods.patch
-# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57489
-Patch119: avoid-has_docker-true-if-import-messes-with-salt.uti.patch
+Patch106: add-publish_batch-to-clearfuncs-exposed-methods.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57779
-Patch120: info_installed-works-without-status-attr-now.patch
+Patch107: info_installed-works-without-status-attr-now.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57491
-Patch121: opensuse-3000.3-spacewalk-runner-parse-command-250.patch
+Patch108: opensuse-3000.3-spacewalk-runner-parse-command-250.patch
# PATCH-FIX_UPSTREAM: https://github.com/openSUSE/salt/pull/251
-Patch122: opensuse-3000-libvirt-engine-fixes-251.patch
+Patch109: opensuse-3000-libvirt-engine-fixes-251.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58013
-Patch123: fix-__mount_device-wrapper-254.patch
+Patch110: fix-__mount_device-wrapper-254.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58214
-Patch124: ansiblegate-take-care-of-failed-skipped-and-unreacha.patch
+Patch111: ansiblegate-take-care-of-failed-skipped-and-unreacha.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58301
-Patch125: do-not-raise-streamclosederror-traceback-but-only-lo.patch
+Patch112: do-not-raise-streamclosederror-traceback-but-only-lo.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/257
-Patch126: opensuse-3000.2-virt-backports-236-257.patch
+Patch113: opensuse-3000.2-virt-backports-236-257.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/256
-Patch127: backport-virt-patches-from-3001-256.patch
+Patch114: backport-virt-patches-from-3001-256.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/262
-Patch128: fix-the-removed-six.itermitems-and-six.-_type-262.patch
+Patch115: fix-the-removed-six.itermitems-and-six.-_type-262.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/263
-Patch129: fix-virt.update-with-cpu-defined-263.patch
+Patch116: fix-virt.update-with-cpu-defined-263.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/261
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/57571
-Patch130: remove-msgpack-1.0.0-requirement-in-the-installed-me.patch
+Patch117: remove-msgpack-1.0.0-requirement-in-the-installed-me.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/bc20f38d0fa492af70321fef7fe2530937dfc86a
-Patch131: prevent-import-errors-when-running-test_btrfs-unit-t.patch
+Patch118: prevent-import-errors-when-running-test_btrfs-unit-t.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58529
-Patch132: invalidate-file-list-cache-when-cache-file-modified-.patch
+Patch119: invalidate-file-list-cache-when-cache-file-modified-.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58400
-Patch133: xen-disk-fixes-264.patch
+Patch120: xen-disk-fixes-264.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58552
-Patch134: zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch
+Patch121: zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58520
-Patch135: support-transactional-systems-microos-271.patch
+Patch122: support-transactional-systems-microos-271.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/272
-Patch136: backport-a-few-virt-prs-272.patch
+Patch123: backport-a-few-virt-prs-272.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/275
-Patch137: bsc-1176024-fix-file-directory-user-and-group-owners.patch
+Patch124: bsc-1176024-fix-file-directory-user-and-group-owners.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/277
-Patch138: fix-grains.test_core-unit-test-277.patch
+Patch125: fix-grains.test_core-unit-test-277.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/e2c3b1cb72b796fe12f94af64baa2e64cbe5db0b
-Patch139: drop-wrong-mock-from-chroot-unit-test.patch
+Patch126: drop-wrong-mock-from-chroot-unit-test.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/280
-Patch140: ensure-virt.update-stop_on_reboot-is-updated-with-it.patch
+Patch127: ensure-virt.update-stop_on_reboot-is-updated-with-it.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/281
-Patch141: path-replace-functools.wraps-with-six.wraps-bsc-1177.patch
+Patch128: path-replace-functools.wraps-with-six.wraps-bsc-1177.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58560
-Patch142: fix-novendorchange-option-284.patch
+Patch129: fix-novendorchange-option-284.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58871
-Patch143: fix-cve-2020-25592-and-add-tests-bsc-1178319.patch
-# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/293
-Patch144: set-passphrase-for-salt-ssh-keys-to-empty-string-293.patch
-# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/279
-Patch145: fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch
+Patch130: fix-cve-2020-25592-and-add-tests-bsc-1178319.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58520
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/286
-Patch146: grains-master-can-read-grains.patch
+Patch131: grains-master-can-read-grains.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58784
-Patch147: add-migrated-state-and-gpg-key-management-functions-.patch
+Patch132: add-migrated-state-and-gpg-key-management-functions-.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/292
-Patch148: transactional_update-unify-with-chroot.call.patch
+Patch133: transactional_update-unify-with-chroot.call.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/295
-Patch149: pkgrepo-support-python-2.7-function-call-295.patch
+Patch134: pkgrepo-support-python-2.7-function-call-295.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/79ae019ac7515614c6fbc620e66575f015bc447
+Patch135: drop-wrong-virt-capabilities-code-after-rebasing-pat.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/5ea2f10b15684dd417bad858642faafc92cd382
+# (revert https://github.com/saltstack/salt/pull/58655)
+Patch136: revert-fixing-a-use-case-when-multiple-inotify-beaco.patch
+# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/59269
+Patch137: fix-aptpkg.normalize_name-when-package-arch-is-all.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/310
+Patch138: open-suse-3002.2-bigvm-310.patch
+# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/59146
+Patch139: open-suse-3002.2-virt-network-311.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/298
+Patch140: fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
+# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58262
+Patch141: add-pkg.services_need_restart-302.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/301
+Patch142: add-patch-support-for-allow-vendor-change-option-wit.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/304
+Patch143: force-zyppnotify-to-prefer-packages.db-than-packages.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/4028fd6e84d882b6dcee695d409c7e1ed6c83bdc
+Patch144: revert-add-patch-support-for-allow-vendor-change-opt.patch
+# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/73e357d7eee19a73cade22becb30d9689cae27ba
+Patch145: remove-deprecated-warning-that-breaks-miniion-execut.patch
BuildRoot: %{_tmppath}/%{name}-%{version}-build
BuildRequires: logrotate
@@ -389,7 +355,7 @@ BuildRequires: logrotate
BuildRequires: fdupes
%endif
-Requires: %{pythonX}-%{name} = %{version}-%{release}
+Requires: python3-%{name} = %{version}-%{release}
Requires(pre): %{_sbindir}/groupadd
Requires(pre): %{_sbindir}/useradd
@@ -456,107 +422,6 @@ malleable. Salt accomplishes this via its ability to handle larger loads of
information, and not just dozens, but hundreds or even thousands of individual
servers, handle them quickly and through a simple and manageable interface.
-%if 0%{?build_py2}
-%package -n python2-salt
-Summary: python2 library for salt
-Group: System/Management
-Requires: %{name} = %{version}-%{release}
-BuildRequires: python >= 2.7
-BuildRequires: python-devel >= 2.7
-BuildRequires: python-setuptools
-# requirements/base.txt
-%if 0%{?rhel} || 0%{?fedora}
-BuildRequires: python-jinja2
-BuildRequires: python-yaml
-BuildRequires: python-markupsafe
-%else
-BuildRequires: python-Jinja2
-BuildRequires: python-PyYAML
-BuildRequires: python-MarkupSafe
-%endif
-
-BuildRequires: python-futures >= 2.0
-BuildRequires: python-msgpack-python > 0.3
-BuildRequires: python-psutil
-BuildRequires: python-requests >= 1.0.0
-BuildRequires: python-singledispatch
-
-# requirements/zeromq.txt
-%if 0%{?suse_version} >= 1500
-BuildRequires: python2-M2Crypto
-%else
-BuildRequires: python-pycrypto >= 2.6.1
-%endif
-BuildRequires: python-pyzmq >= 2.2.0
-%if %{with test}
-# requirements/dev_python27.txt
-BuildRequires: python-boto >= 2.32.1
-BuildRequires: python-mock
-BuildRequires: python-moto >= 0.3.6
-BuildRequires: python-pip
-BuildRequires: python-salt-testing >= 2015.2.16
-BuildRequires: python-unittest2
-BuildRequires: python-xml
-%endif
-%if %{with builddocs}
-BuildRequires: python-sphinx
-%endif
-Requires: python >= 2.7
-#
-%if ! 0%{?suse_version} > 1110
-Requires: python-certifi
-%endif
-# requirements/base.txt
-%if 0%{?rhel} || 0%{?fedora}
-Requires: python-jinja2
-Requires: python-yaml
-Requires: python-markupsafe
-Requires: yum
-%if 0%{?rhel} == 6
-Requires: yum-plugin-security
-%endif
-%else
-Requires: python-Jinja2
-Requires: python-PyYAML
-Requires: python-MarkupSafe
-%endif
-
-Requires: python-futures >= 2.0
-Requires: python-msgpack-python > 0.3
-Requires: python-psutil
-Requires: python-requests >= 1.0.0
-Requires: python-singledispatch
-%if 0%{?suse_version}
-# required for zypper.py
-Requires: rpm-python
-Requires(pre): libzypp(plugin:system) >= 0
-Requires: zypp-plugin-python
-# requirements/opt.txt (not all)
-# Suggests: python-MySQL-python ## Disabled for now, originally Recommended
-Suggests: python-timelib
-Suggests: python-gnupg
-# requirements/zeromq.txt
-%endif
-%if 0%{?suse_version} >= 1500
-Requires: python2-M2Crypto
-%else
-Requires: python-pycrypto >= 2.6.1
-%endif
-Requires: python-pyzmq >= 2.2.0
-#
-%if 0%{?suse_version}
-# python-xml is part of python-base in all rhel versions
-Requires: python-xml
-Suggests: python-Mako
-Recommends: python-netaddr
-%endif
-
-%description -n python2-salt
-Python2 specific files for salt
-
-%endif
-
-%if 0%{?build_py3}
%package -n python3-salt
Summary: python3 library for salt
Group: System/Management
@@ -594,7 +459,6 @@ BuildRequires: python3-requests >= 1.0.0
# requirements/zeromq.txt
%if %{with test}
-# requirements/dev_python27.txt
BuildRequires: python3-boto >= 2.32.1
BuildRequires: python3-mock
BuildRequires: python3-moto >= 0.3.6
@@ -667,18 +531,12 @@ Recommends: python3-netaddr
%description -n python3-salt
Python3 specific files for salt
-%endif
-
%package api
Summary: The api for Salt a parallel remote execution system
Group: System/Management
Requires: %{name} = %{version}-%{release}
Requires: %{name}-master = %{version}-%{release}
-%if 0%{?default_py3}
Requires: python3-CherryPy >= 3.2.2
-%else
-Requires: python-CherryPy >= 3.2.2
-%endif
%description api
salt-api is a modular interface on top of Salt that can provide a variety of entry points into a running Salt system.
@@ -745,7 +603,7 @@ than serially.
%package minion
Summary: The client component for Saltstack
Group: System/Management
-Requires(pre): %{name} = %{version}-%{release}
+Requires: %{name} = %{version}-%{release}
%if %{with systemd}
%{?systemd_requires}
@@ -1034,10 +892,6 @@ cp %{S:5} ./.travis.yml
%patch143 -p1
%patch144 -p1
%patch145 -p1
-%patch146 -p1
-%patch147 -p1
-%patch148 -p1
-%patch149 -p1
%build
# Putting /usr/bin at the front of $PATH is needed for RHEL/RES 7. Without this
@@ -1046,16 +900,9 @@ cp %{S:5} ./.travis.yml
%if 0%{?fedora} || 0%{?rhel}
export PATH=/usr/bin:$PATH
%endif
-%if 0%{?build_py2}
-python setup.py --with-salt-version=%{version} --salt-transport=both build
-cp ./build/lib/salt/_version.py ./salt
-mv build _build.python2
-%endif
-%if 0%{?build_py3}
python3 setup.py --with-salt-version=%{version} --salt-transport=both build
cp ./build/lib/salt/_version.py ./salt
mv build _build.python3
-%endif
%if %{with docs} && %{without builddocs}
# extract docs from the tarball
@@ -1071,22 +918,11 @@ cd doc && make html && rm _build/html/.buildinfo && rm _build/html/_images/proxy
%endif
%install
-%if 0%{?build_py2}
-mv _build.python2 build
-python setup.py --salt-transport=both install --prefix=%{_prefix} --root=%{buildroot}
-mv build _build.python2
-%endif
-%if 0%{?build_py3}
mv _build.python3 build
python3 setup.py --salt-transport=both install --prefix=%{_prefix} --root=%{buildroot}
mv build _build.python3
-%endif
-%if 0%{?default_py3}
DEF_PYPATH=_build.python3/scripts-*/
-%else
-DEF_PYPATH=_build.python2/scripts-*/
-%endif
rm -f %{buildroot}%{_bindir}/*
for script in $DEF_PYPATH/*; do
@@ -1133,22 +969,15 @@ install -Dd -m 0750 %{buildroot}%{_sysconfdir}/salt/pki/master/minions_rejected
install -Dd -m 0750 %{buildroot}%{_sysconfdir}/salt/pki/minion
# Install salt-support profiles
-%if 0%{?build_py2}
-install -Dpm 0644 salt/cli/support/profiles/* %{buildroot}%{python_sitelib}/salt/cli/support/profiles
-%endif
-%if 0%{?build_py3}
install -Dpm 0644 salt/cli/support/profiles/* %{buildroot}%{python3_sitelib}/salt/cli/support/profiles
-%endif
## Install Zypper plugins only on SUSE machines
%if 0%{?suse_version}
install -Dd -m 0750 %{buildroot}%{_prefix}/lib/zypp/plugins/commit
%{__install} scripts/suse/zypper/plugins/commit/zyppnotify %{buildroot}%{_prefix}/lib/zypp/plugins/commit/zyppnotify
-%if 0%{?default_py3}
sed -i '1s=^#!/usr/bin/\(python\|env python\)[0-9.]*=#!/usr/bin/python3=' %{buildroot}%{_prefix}/lib/zypp/plugins/commit/zyppnotify
%endif
-%endif
# Install Yum plugins only on RH machines
%if 0%{?fedora} || 0%{?rhel}
@@ -1235,21 +1064,12 @@ install -Dpm 0640 conf/suse/standalone-formulas-configuration.conf %{buildroot}%
%if 0%{?suse_version} > 1020
%fdupes %{buildroot}%{_docdir}
-%if 0%{?build_py2}
-%fdupes %{buildroot}%{python_sitelib}
-%endif
-%if 0%{?build_py3}
%fdupes %{buildroot}%{python3_sitelib}
%endif
-%endif
%check
%if %{with test}
-%if 0%{?default_py3}
python3 setup.py test --runtests-opts=-u
-%else
-python setup.py test --runtests-opts=-u
-%endif
%endif
%pre
@@ -1555,19 +1375,10 @@ fi
%endif
%endif
-%if 0%{?build_py2}
-%posttrans -n python2-salt
-# force re-generate a new thin.tgz
-rm -f %{_localstatedir}/cache/salt/master/thin/version
-rm -f %{_localstatedir}/cache/salt/minion/thin/version
-%endif
-
-%if 0%{?build_py3}
%posttrans -n python3-salt
# force re-generate a new thin.tgz
rm -f %{_localstatedir}/cache/salt/master/thin/version
rm -f %{_localstatedir}/cache/salt/minion/thin/version
-%endif
%files api
%defattr(-,root,root)
@@ -1590,11 +1401,7 @@ rm -f %{_localstatedir}/cache/salt/minion/thin/version
%config(noreplace) %attr(0640, root, salt) %{_sysconfdir}/salt/cloud.profiles
%config(noreplace) %attr(0640, root, salt) %{_sysconfdir}/salt/cloud.providers
%dir %attr(0750, root, salt) %{_localstatedir}/cache/salt/cloud
-%if 0%{?default_py3}
%attr(755,root,root)%{python3_sitelib}/salt/cloud/deploy/bootstrap-salt.sh
-%else
-%attr(755,root,root)%{python_sitelib}/salt/cloud/deploy/bootstrap-salt.sh
-%endif
%{_mandir}/man1/salt-cloud.1.*
%files ssh
@@ -1723,19 +1530,10 @@ rm -f %{_localstatedir}/cache/salt/minion/thin/version
%endif
%{_mandir}/man1/salt.1.*
-%if 0%{?build_py2}
-%files -n python2-salt
-%defattr(-,root,root,-)
-%{python_sitelib}/*
-%exclude %{python_sitelib}/salt/cloud/deploy/*.sh
-%endif
-
-%if 0%{?build_py3}
%files -n python3-salt
%defattr(-,root,root,-)
%{python3_sitelib}/*
%exclude %{python3_sitelib}/salt/cloud/deploy/*.sh
-%endif
%if %{with docs}
%files doc
diff --git a/sanitize-grains-loaded-from-roster_grains.json.patch b/sanitize-grains-loaded-from-roster_grains.json.patch
index fc7ff42..e597f59 100644
--- a/sanitize-grains-loaded-from-roster_grains.json.patch
+++ b/sanitize-grains-loaded-from-roster_grains.json.patch
@@ -1,4 +1,4 @@
-From 83a2a79ed3834a1cfd90941d0075d1c38341dc1d Mon Sep 17 00:00:00 2001
+From 2ae9fa97c88889a1a99f0ccd43aea0fe996aad7a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Wed, 1 Apr 2020 12:27:30 +0100
@@ -6,48 +6,508 @@ Subject: [PATCH] Sanitize grains loaded from roster_grains.json
Ensure _format_cached_grains is called on state.pkg test
---
- salt/modules/state.py | 3 ++-
- tests/unit/modules/test_state.py | 4 +++-
- 2 files changed, 5 insertions(+), 2 deletions(-)
+ salt/modules/state.py | 76 +++++++++++++++-----------------
+ tests/unit/modules/test_state.py | 57 ++++++++++--------------
+ 2 files changed, 59 insertions(+), 74 deletions(-)
diff --git a/salt/modules/state.py b/salt/modules/state.py
-index ec1e1edb42e9d8d5bc1e991434eb187e3b65ab89..a4f3f8c37086a79a60f85b5ca4b71d2af1e1f90f 100644
+index 1c864f7504..b439f79e57 100644
--- a/salt/modules/state.py
+++ b/salt/modules/state.py
-@@ -43,6 +43,7 @@ import salt.defaults.exitcodes
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Control the state system on the minion.
+
+@@ -11,8 +10,6 @@ highdata and won't hit the fileserver except for ``salt://`` links in the
+ states themselves.
+ """
+
+-# Import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+ import os
+@@ -22,7 +19,6 @@ import tarfile
+ import tempfile
+ import time
+
+-# Import salt libs
+ import salt.config
+ import salt.defaults.exitcodes
+ import salt.payload
+@@ -42,8 +38,6 @@ import salt.utils.stringutils
+ import salt.utils.url
+ import salt.utils.versions
from salt.exceptions import CommandExecutionError, SaltInvocationError
- from salt.runners.state import orchestrate as _orchestrate
- from salt.utils.odict import OrderedDict
-+from salt.loader import _format_cached_grains
-
- # Import 3rd-party libs
+-
+-# Import 3rd-party libs
from salt.ext import six
-@@ -2188,7 +2189,7 @@ def pkg(pkg_path,
- roster_grains_json = os.path.join(root, 'roster_grains.json')
- if os.path.isfile(roster_grains_json):
- with salt.utils.files.fopen(roster_grains_json, 'r') as fp_:
-- roster_grains = salt.utils.json.load(fp_)
-+ roster_grains = _format_cached_grains(salt.utils.json.load(fp_))
+ from salt.loader import _format_cached_grains
+ from salt.runners.state import orchestrate as _orchestrate
+@@ -89,11 +83,11 @@ def _filter_running(runnings):
+ """
+ Filter out the result: True + no changes data
+ """
+- ret = dict(
+- (tag, value)
+- for tag, value in six.iteritems(runnings)
++ ret = {
++ tag: value
++ for tag, value in runnings.items()
+ if not value["result"] or value["changes"]
+- )
++ }
+ return ret
- if os.path.isfile(roster_grains_json):
- popts['grains'] = roster_grains
+
+@@ -151,7 +145,7 @@ def _snapper_pre(opts, jid):
+ snapper_pre = __salt__["snapper.create_snapshot"](
+ config=__opts__.get("snapper_states_config", "root"),
+ snapshot_type="pre",
+- description="Salt State run for jid {0}".format(jid),
++ description="Salt State run for jid {}".format(jid),
+ __pub_jid=jid,
+ )
+ except Exception: # pylint: disable=broad-except
+@@ -170,7 +164,7 @@ def _snapper_post(opts, jid, pre_num):
+ config=__opts__.get("snapper_states_config", "root"),
+ snapshot_type="post",
+ pre_number=pre_num,
+- description="Salt State run for jid {0}".format(jid),
++ description="Salt State run for jid {}".format(jid),
+ __pub_jid=jid,
+ )
+ except Exception: # pylint: disable=broad-except
+@@ -216,7 +210,7 @@ def get_pauses(jid=None):
+ elif isinstance(jid, list):
+ jids = salt.utils.data.stringify(jid)
+ else:
+- jids = [six.text_type(jid)]
++ jids = [str(jid)]
+ for scan_jid in jids:
+ is_active = False
+ for active_data in active:
+@@ -260,7 +254,7 @@ def soft_kill(jid, state_id=None):
+ salt '*' state.soft_kill 20171130110407769519
+ salt '*' state.soft_kill 20171130110407769519 vim
+ """
+- jid = six.text_type(jid)
++ jid = str(jid)
+ if state_id is None:
+ state_id = "__all__"
+ data, pause_path = _get_pause(jid, state_id)
+@@ -294,7 +288,7 @@ def pause(jid, state_id=None, duration=None):
+ salt '*' state.pause 20171130110407769519 vim
+ salt '*' state.pause 20171130110407769519 vim 20
+ """
+- jid = six.text_type(jid)
++ jid = str(jid)
+ if state_id is None:
+ state_id = "__all__"
+ data, pause_path = _get_pause(jid, state_id)
+@@ -326,7 +320,7 @@ def resume(jid, state_id=None):
+ salt '*' state.resume 20171130110407769519
+ salt '*' state.resume 20171130110407769519 vim
+ """
+- jid = six.text_type(jid)
++ jid = str(jid)
+ if state_id is None:
+ state_id = "__all__"
+ data, pause_path = _get_pause(jid, state_id)
+@@ -387,8 +381,8 @@ def running(concurrent=False):
+ active = __salt__["saltutil.is_running"]("state.*")
+ for data in active:
+ err = (
+- 'The function "{0}" is running as PID {1} and was started at '
+- "{2} with jid {3}"
++ 'The function "{}" is running as PID {} and was started at '
++ "{} with jid {}"
+ ).format(
+ data["fun"],
+ data["pid"],
+@@ -850,10 +844,10 @@ def request(mods=None, **kwargs):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+- __salt__["cmd.run"]('attrib -R "{0}"'.format(notify_path))
++ __salt__["cmd.run"]('attrib -R "{}"'.format(notify_path))
+ with salt.utils.files.fopen(notify_path, "w+b") as fp_:
+ serial.dump(req, fp_)
+- except (IOError, OSError):
++ except OSError:
+ log.error(
+ "Unable to write state request file %s. Check permission.", notify_path
+ )
+@@ -902,7 +896,7 @@ def clear_request(name=None):
+ if not name:
+ try:
+ os.remove(notify_path)
+- except (IOError, OSError):
++ except OSError:
+ pass
+ else:
+ req = check_request()
+@@ -914,10 +908,10 @@ def clear_request(name=None):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+- __salt__["cmd.run"]('attrib -R "{0}"'.format(notify_path))
++ __salt__["cmd.run"]('attrib -R "{}"'.format(notify_path))
+ with salt.utils.files.fopen(notify_path, "w+b") as fp_:
+ serial.dump(req, fp_)
+- except (IOError, OSError):
++ except OSError:
+ log.error(
+ "Unable to write state request file %s. Check permission.",
+ notify_path,
+@@ -950,7 +944,7 @@ def run_request(name="default", **kwargs):
+ ret = apply_(n_req["mods"], **n_req["kwargs"])
+ try:
+ os.remove(os.path.join(__opts__["cachedir"], "req_state.p"))
+- except (IOError, OSError):
++ except OSError:
+ pass
+ return ret
+ return {}
+@@ -1319,7 +1313,7 @@ def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
+ serial = salt.payload.Serial(__opts__)
+ cfn = os.path.join(
+ __opts__["cachedir"],
+- "{0}.cache.p".format(kwargs.get("cache_name", "highstate")),
++ "{}.cache.p".format(kwargs.get("cache_name", "highstate")),
+ )
+
+ if sync_mods is True:
+@@ -1335,7 +1329,7 @@ def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
+
+ for module_type in sync_mods:
+ try:
+- __salt__["saltutil.sync_{0}".format(module_type)](saltenv=opts["saltenv"])
++ __salt__["saltutil.sync_{}".format(module_type)](saltenv=opts["saltenv"])
+ except KeyError:
+ log.warning("Invalid custom module type '%s', ignoring", module_type)
+
+@@ -1374,7 +1368,7 @@ def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
+ return st_.state.call_high(high_, orchestration_jid)
+
+ # If the state file is an integer, convert to a string then to unicode
+- if isinstance(mods, six.integer_types):
++ if isinstance(mods, int):
+ mods = salt.utils.stringutils.to_unicode(
+ str(mods)
+ ) # future lint: disable=blacklisted-function
+@@ -1409,7 +1403,7 @@ def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
+ __salt__["cmd.run"](["attrib", "-R", cache_file], python_shell=False)
+ with salt.utils.files.fopen(cache_file, "w+b") as fp_:
+ serial.dump(ret, fp_)
+- except (IOError, OSError):
++ except OSError:
+ log.error(
+ "Unable to write to SLS cache file %s. Check permission.", cache_file
+ )
+@@ -1425,7 +1419,7 @@ def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
+ except TypeError:
+ # Can't serialize pydsl
+ pass
+- except (IOError, OSError):
++ except OSError:
+ log.error(
+ "Unable to write to highstate cache file %s. Do you have permissions?",
+ cfn,
+@@ -1830,8 +1824,8 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
+ __opts__["test"] = orig_test
+ if not ret:
+ raise SaltInvocationError(
+- "No matches for ID '{0}' found in SLS '{1}' within saltenv "
+- "'{2}'".format(id_, mods, opts["saltenv"])
++ "No matches for ID '{}' found in SLS '{}' within saltenv "
++ "'{}'".format(id_, mods, opts["saltenv"])
+ )
+ return ret
+
+@@ -2067,9 +2061,9 @@ def id_exists(ids, mods, test=None, queue=False, **kwargs):
+ """
+ ids = salt.utils.args.split_input(ids)
+ ids = set(ids)
+- sls_ids = set(
++ sls_ids = {
+ x["__id__"] for x in show_low_sls(mods, test=test, queue=queue, **kwargs)
+- )
++ }
+ return ids.issubset(sls_ids)
+
+
+@@ -2239,10 +2233,10 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
+ members = s_pkg.getmembers()
+ for member in members:
+ if salt.utils.stringutils.to_unicode(member.path).startswith(
+- (os.sep, "..{0}".format(os.sep))
++ (os.sep, "..{}".format(os.sep))
+ ):
+ return {}
+- elif "..{0}".format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
++ elif "..{}".format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
+ return {}
+ s_pkg.extractall(root)
+ s_pkg.close()
+@@ -2282,7 +2276,7 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
+ ret = st_.call_listen(lowstate, ret)
+ try:
+ shutil.rmtree(root)
+- except (IOError, OSError):
++ except OSError:
+ pass
+ _set_retcode(ret)
+ _snapper_post(popts, kwargs.get("__pub_jid", "called localy"), snapper_pre)
+@@ -2320,9 +2314,9 @@ def disable(states):
+ _changed = False
+ for _state in states:
+ if _state in _disabled:
+- msg.append("Info: {0} state already disabled.".format(_state))
++ msg.append("Info: {} state already disabled.".format(_state))
+ else:
+- msg.append("Info: {0} state disabled.".format(_state))
++ msg.append("Info: {} state disabled.".format(_state))
+ _disabled.append(_state)
+ _changed = True
+
+@@ -2370,9 +2364,9 @@ def enable(states):
+ for _state in states:
+ log.debug("_state %s", _state)
+ if _state not in _disabled:
+- msg.append("Info: {0} state already enabled.".format(_state))
++ msg.append("Info: {} state already enabled.".format(_state))
+ else:
+- msg.append("Info: {0} state enabled.".format(_state))
++ msg.append("Info: {} state enabled.".format(_state))
+ _disabled.remove(_state)
+ _changed = True
+
+@@ -2481,7 +2475,7 @@ def event(
+ if salt.utils.stringutils.expr_match(ret["tag"], tagmatch):
+ if not quiet:
+ salt.utils.stringutils.print_cli(
+- str("{0}\t{1}").format( # future lint: blacklisted-function
++ "{}\t{}".format( # future lint: blacklisted-function
+ salt.utils.stringutils.to_str(ret["tag"]),
+ salt.utils.json.dumps(
+ ret["data"],
diff --git a/tests/unit/modules/test_state.py b/tests/unit/modules/test_state.py
-index e3c3dc8fc62efa848603082c3d8f3a8f09d5c426..cda846595eeec9788d17b55fcad5cab7a49a62c2 100644
+index 157687c7e8..065b24a84d 100644
--- a/tests/unit/modules/test_state.py
+++ b/tests/unit/modules/test_state.py
-@@ -1129,8 +1129,10 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1,8 +1,6 @@
+-# -*- coding: utf-8 -*-
+ """
+ :codeauthor: Rahul Handay
+ """
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import copy
+ import os
+@@ -32,7 +30,7 @@ from tests.support.runtests import RUNTIME_VARS
+ from tests.support.unit import TestCase
+
+
+-class MockState(object):
++class MockState:
+ """
+ Mock class
+ """
+@@ -40,7 +38,7 @@ class MockState(object):
+ def __init__(self):
+ pass
+
+- class State(object):
++ class State:
+ """
+ Mock state class
+ """
+@@ -129,7 +127,7 @@ class MockState(object):
+ def requisite_in(self, data): # pylint: disable=unused-argument
+ return data, []
+
+- class HighState(object):
++ class HighState:
+ """
+ Mock HighState class
+ """
+@@ -232,7 +230,7 @@ class MockState(object):
+ return True
+
+
+-class MockSerial(object):
++class MockSerial:
+ """
+ Mock Class
+ """
+@@ -240,7 +238,7 @@ class MockSerial(object):
+ def __init__(self):
+ pass
+
+- class Serial(object):
++ class Serial:
+ """
+ Mock Serial class
+ """
+@@ -263,7 +261,7 @@ class MockSerial(object):
+ return True
+
+
+-class MockTarFile(object):
++class MockTarFile:
+ """
+ Mock tarfile class
+ """
+@@ -950,57 +948,57 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ with patch.dict(state.__opts__, {test_arg: True}):
+ self.assertTrue(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is True in __opts__".format(test_arg),
++ msg="Failure when {} is True in __opts__".format(test_arg),
+ )
+
+ with patch.dict(config.__pillar__, {test_arg: "blah"}):
+ self.assertFalse(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is blah in __opts__".format(test_arg),
++ msg="Failure when {} is blah in __opts__".format(test_arg),
+ )
+
+ with patch.dict(config.__pillar__, {test_arg: "true"}):
+ self.assertFalse(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is true in __opts__".format(test_arg),
++ msg="Failure when {} is true in __opts__".format(test_arg),
+ )
+
+ with patch.dict(config.__opts__, {test_arg: False}):
+ self.assertFalse(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is False in __opts__".format(test_arg),
++ msg="Failure when {} is False in __opts__".format(test_arg),
+ )
+
+ with patch.dict(config.__opts__, {}):
+ self.assertFalse(
+ state._get_test_value(test=None),
+- msg="Failure when {0} does not exist in __opts__".format(test_arg),
++ msg="Failure when {} does not exist in __opts__".format(test_arg),
+ )
+
+ with patch.dict(config.__pillar__, {test_arg: None}):
+ self.assertEqual(
+ state._get_test_value(test=None),
+ None,
+- msg="Failure when {0} is None in __opts__".format(test_arg),
++ msg="Failure when {} is None in __opts__".format(test_arg),
+ )
+
+ with patch.dict(config.__pillar__, {test_arg: True}):
+ self.assertTrue(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is True in __pillar__".format(test_arg),
++ msg="Failure when {} is True in __pillar__".format(test_arg),
+ )
+
+ with patch.dict(config.__pillar__, {"master": {test_arg: True}}):
+ self.assertTrue(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is True in master __pillar__".format(test_arg),
++ msg="Failure when {} is True in master __pillar__".format(test_arg),
+ )
+
+ with patch.dict(config.__pillar__, {"master": {test_arg: False}}):
+ with patch.dict(config.__pillar__, {test_arg: True}):
+ self.assertTrue(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is False in master __pillar__ and True in pillar".format(
++ msg="Failure when {} is False in master __pillar__ and True in pillar".format(
+ test_arg
+ ),
+ )
+@@ -1009,7 +1007,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ with patch.dict(config.__pillar__, {test_arg: False}):
+ self.assertFalse(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is True in master __pillar__ and False in pillar".format(
++ msg="Failure when {} is True in master __pillar__ and False in pillar".format(
+ test_arg
+ ),
+ )
+@@ -1017,14 +1015,14 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ with patch.dict(state.__opts__, {"test": False}):
+ self.assertFalse(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is False in __opts__".format(test_arg),
++ msg="Failure when {} is False in __opts__".format(test_arg),
+ )
+
+ with patch.dict(state.__opts__, {"test": False}):
+ with patch.dict(config.__pillar__, {"master": {test_arg: True}}):
+ self.assertTrue(
+ state._get_test_value(test=None),
+- msg="Failure when {0} is False in __opts__".format(test_arg),
++ msg="Failure when {} is False in __opts__".format(test_arg),
+ )
+
+ with patch.dict(state.__opts__, {}):
+@@ -1077,7 +1075,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ expected = 1
+ assert (
+ call_count == expected
+- ), "{0} called {1} time(s) (expected: {2})".format(
++ ), "{} called {} time(s) (expected: {})".format(
+ key, call_count, expected
+ )
+
+@@ -1091,7 +1089,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ expected = 1
+ assert (
+ call_count == expected
+- ), "{0} called {1} time(s) (expected: {2})".format(
++ ), "{} called {} time(s) (expected: {})".format(
+ key, call_count, expected
+ )
+
+@@ -1105,7 +1103,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ expected = 1
+ assert (
+ call_count == expected
+- ), "{0} called {1} time(s) (expected: {2})".format(
++ ), "{} called {} time(s) (expected: {})".format(
+ key, call_count, expected
+ )
+
+@@ -1121,7 +1119,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ expected = 1
+ assert (
+ call_count == expected
+- ), "{0} called {1} time(s) (expected: {2})".format(
++ ), "{} called {} time(s) (expected: {})".format(
+ key, call_count, expected
+ )
+
+@@ -1168,15 +1166,8 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
+ state._format_cached_grains.assert_called_once()
MockTarFile.path = ""
- with patch('salt.utils.files.fopen', mock_open()), \
-- patch.object(salt.utils.json, 'loads', mock_json_loads_true):
-+ patch.object(salt.utils.json, 'loads', mock_json_loads_true), \
-+ patch.object(state, '_format_cached_grains', MagicMock()):
- self.assertEqual(state.pkg(tar_file, 0, "md5"), True)
-+ state._format_cached_grains.assert_called_once()
+- if six.PY2:
+- with patch("salt.utils.files.fopen", mock_open()), patch.dict(
+- state.__utils__,
+- {"state.check_result": MagicMock(return_value=True)},
+- ):
+- self.assertTrue(state.pkg(tar_file, 0, "md5"))
+- else:
+- with patch("salt.utils.files.fopen", mock_open()):
+- self.assertTrue(state.pkg(tar_file, 0, "md5"))
++ with patch("salt.utils.files.fopen", mock_open()):
++ self.assertTrue(state.pkg(tar_file, 0, "md5"))
- MockTarFile.path = ""
- if six.PY2:
+ def test_lock_saltenv(self):
+ """
--
-2.23.0
+2.29.2
diff --git a/set-passphrase-for-salt-ssh-keys-to-empty-string-293.patch b/set-passphrase-for-salt-ssh-keys-to-empty-string-293.patch
deleted file mode 100644
index 884eb1a..0000000
--- a/set-passphrase-for-salt-ssh-keys-to-empty-string-293.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From 677b7a8881a2e9ebab58cead29b1a6d83850c888 Mon Sep 17 00:00:00 2001
-From: Alexander Graul
-Date: Thu, 5 Nov 2020 16:54:44 +0100
-Subject: [PATCH] Set passphrase for salt-ssh keys to empty string
- (#293)
-
-Since the cmd is not passed to a shell anymore, the "" are taken
-literally and not as an empty string.
-
-Bugzilla report: https://bugzilla.suse.com/show_bug.cgi?id=1178485
----
- salt/client/ssh/shell.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/salt/client/ssh/shell.py b/salt/client/ssh/shell.py
-index 27aba7b382..27ab9f4f1b 100644
---- a/salt/client/ssh/shell.py
-+++ b/salt/client/ssh/shell.py
-@@ -44,7 +44,7 @@ def gen_key(path):
- '''
- Generate a key for use with salt-ssh
- '''
-- cmd = ["ssh-keygen", "-P", '""', "-f", path, "-t", "rsa", "-q"]
-+ cmd = ["ssh-keygen", "-P", "", "-f", path, "-t", "rsa", "-q"]
- if not os.path.isdir(os.path.dirname(path)):
- os.makedirs(os.path.dirname(path))
- subprocess.call(cmd)
---
-2.28.0
-
-
diff --git a/strip-trailing-from-repo.uri-when-comparing-repos-in.patch b/strip-trailing-from-repo.uri-when-comparing-repos-in.patch
index b8628f2..cf1b3f6 100644
--- a/strip-trailing-from-repo.uri-when-comparing-repos-in.patch
+++ b/strip-trailing-from-repo.uri-when-comparing-repos-in.patch
@@ -1,4 +1,4 @@
-From f2b465f41575a8a28d4762f9647ea30df6a64637 Mon Sep 17 00:00:00 2001
+From 210846eb33734a453b99ffb5ac9ef3f59dd0d742 Mon Sep 17 00:00:00 2001
From: Matei Albu
Date: Fri, 15 Feb 2019 14:34:13 +0100
Subject: [PATCH] Strip trailing "/" from repo.uri when comparing repos
@@ -10,19 +10,19 @@ Subject: [PATCH] Strip trailing "/" from repo.uri when comparing repos
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
-index a5b039fc79..bafad40efe 100644
+index c47ee852f4..658a16da4c 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
-@@ -2365,7 +2365,7 @@ def mod_repo(repo, saltenv='base', **kwargs):
- # and the resulting source line. The idea here is to ensure
- # we are not returning bogus data because the source line
+@@ -2429,7 +2429,7 @@ def mod_repo(repo, saltenv="base", **kwargs):
# has already been modified on a previous run.
-- repo_matches = source.type == repo_type and source.uri == repo_uri and source.dist == repo_dist
-+ repo_matches = source.type == repo_type and source.uri.rstrip('/') == repo_uri.rstrip('/') and source.dist == repo_dist
+ repo_matches = (
+ source.type == repo_type
+- and source.uri == repo_uri
++ and source.uri.rstrip("/") == repo_uri.rstrip("/")
+ and source.dist == repo_dist
+ )
kw_matches = source.dist == kw_dist and source.type == kw_type
-
- if repo_matches or kw_matches:
--
-2.16.4
+2.29.2
diff --git a/support-config-non-root-permission-issues-fixes-u-50.patch b/support-config-non-root-permission-issues-fixes-u-50.patch
index 3e0d2d7..097e98e 100644
--- a/support-config-non-root-permission-issues-fixes-u-50.patch
+++ b/support-config-non-root-permission-issues-fixes-u-50.patch
@@ -1,4 +1,4 @@
-From be2f4d3da3612ca02f215f987e4055d2bd177a7b Mon Sep 17 00:00:00 2001
+From 8c7ee66166b9b5a47cdd5150a0db35052e5afbac Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Wed, 17 Oct 2018 14:10:47 +0200
Subject: [PATCH] Support-config non-root permission issues fixes
@@ -12,25 +12,25 @@ Catch overwriting exiting archive error by other users
Suppress excessive tracebacks on error log level
---
- salt/cli/support/collector.py | 39 ++++++++++++++++++++++++++++++++++++---
+ salt/cli/support/collector.py | 43 +++++++++++++++++++++++++++++++----
salt/utils/parsers.py | 2 +-
- 2 files changed, 37 insertions(+), 4 deletions(-)
+ 2 files changed, 39 insertions(+), 6 deletions(-)
diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py
-index 478d07e13b..a4343297b6 100644
+index bfbf491f5b..a08a0b8c6e 100644
--- a/salt/cli/support/collector.py
+++ b/salt/cli/support/collector.py
-@@ -125,6 +125,31 @@ class SupportDataCollector(object):
+@@ -124,6 +124,31 @@ class SupportDataCollector:
self.__current_section = []
self.__current_section_name = name
+ def _printout(self, data, output):
-+ '''
++ """
+ Use salt outputter to printout content.
+
+ :return:
-+ '''
-+ opts = {'extension_modules': '', 'color': False}
++ """
++ opts = {"extension_modules": "", "color": False}
+ try:
+ printout = salt.output.get_printout(output, opts)(data)
+ if printout is not None:
@@ -38,61 +38,65 @@ index 478d07e13b..a4343297b6 100644
+ except (KeyError, AttributeError, TypeError) as err:
+ log.debug(err, exc_info=True)
+ try:
-+ printout = salt.output.get_printout('nested', opts)(data)
++ printout = salt.output.get_printout("nested", opts)(data)
+ if printout is not None:
+ return printout.rstrip()
+ except (KeyError, AttributeError, TypeError) as err:
+ log.debug(err, exc_info=True)
-+ printout = salt.output.get_printout('raw', opts)(data)
++ printout = salt.output.get_printout("raw", opts)(data)
+ if printout is not None:
+ return printout.rstrip()
+
+ return salt.output.try_printout(data, output, opts)
+
def write(self, title, data, output=None):
- '''
+ """
Add a data to the current opened section.
-@@ -138,7 +163,7 @@ class SupportDataCollector(object):
+@@ -137,9 +162,7 @@ class SupportDataCollector:
try:
- if isinstance(data, dict) and 'return' in data:
- data = data['return']
-- content = salt.output.try_printout(data, output, {'extension_modules': '', 'color': False})
+ if isinstance(data, dict) and "return" in data:
+ data = data["return"]
+- content = salt.output.try_printout(
+- data, output, {"extension_modules": "", "color": False}
+- )
+ content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
-@@ -406,7 +431,11 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
- and self.config.get('support_archive')
- and os.path.exists(self.config['support_archive'])):
- self.out.warning('Terminated earlier, cleaning up')
-- os.unlink(self.config['support_archive'])
+@@ -436,7 +459,11 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
+ and os.path.exists(self.config["support_archive"])
+ ):
+ self.out.warning("Terminated earlier, cleaning up")
+- os.unlink(self.config["support_archive"])
+ try:
-+ os.unlink(self.config['support_archive'])
++ os.unlink(self.config["support_archive"])
+ except Exception as err:
+ log.debug(err)
-+ self.out.error('{} while cleaning up.'.format(err))
++ self.out.error("{} while cleaning up.".format(err))
def _check_existing_archive(self):
- '''
-@@ -418,7 +447,11 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
- if os.path.exists(self.config['support_archive']):
- if self.config['support_archive_force_overwrite']:
- self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
-- os.unlink(self.config['support_archive'])
+ """
+@@ -452,7 +479,13 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
+ self.config["support_archive"]
+ )
+ )
+- os.unlink(self.config["support_archive"])
+ try:
-+ os.unlink(self.config['support_archive'])
++ os.unlink(self.config["support_archive"])
+ except Exception as err:
+ log.debug(err)
-+ self.out.error('{} while trying to overwrite existing archive.'.format(err))
++ self.out.error(
++ "{} while trying to overwrite existing archive.".format(err)
++ )
ret = True
else:
- self.out.warning('File {} already exists.'.format(self.config['support_archive']))
+ self.out.warning(
diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py
-index 83dfe717f6..5f98c73291 100644
+index c1422a9556..cea59b387e 100644
--- a/salt/utils/parsers.py
+++ b/salt/utils/parsers.py
-@@ -1972,7 +1972,7 @@ class SaltSupportOptionParser(six.with_metaclass(OptionParserMeta, OptionParser,
- '''
+@@ -2157,7 +2157,7 @@ class SaltSupportOptionParser(
+ """
_opts, _args = optparse.OptionParser.parse_args(self)
configs = self.find_existing_configs(_opts.support_unit)
- if cfg not in configs:
@@ -101,6 +105,6 @@ index 83dfe717f6..5f98c73291 100644
return config.master_config(self.get_config_file_path(cfg))
--
-2.16.4
+2.29.2
diff --git a/support-for-btrfs-and-xfs-in-parted-and-mkfs.patch b/support-for-btrfs-and-xfs-in-parted-and-mkfs.patch
index 05638f5..d0d5bb7 100644
--- a/support-for-btrfs-and-xfs-in-parted-and-mkfs.patch
+++ b/support-for-btrfs-and-xfs-in-parted-and-mkfs.patch
@@ -1,56 +1,75 @@
-From 570b45e5a1f1786fe0f449a038f8f8a19b6b9ce2 Mon Sep 17 00:00:00 2001
+From be69780e4cb3047e8024fb87fdf2e15f049db154 Mon Sep 17 00:00:00 2001
From: Jochen Breuer
Date: Fri, 10 Jan 2020 17:18:14 +0100
Subject: [PATCH] Support for Btrfs and XFS in parted and mkfs
---
- salt/modules/parted_partition.py | 4 ++--
- tests/unit/modules/test_parted_partition.py | 16 ++++++++++++++++
- 2 files changed, 18 insertions(+), 2 deletions(-)
+ salt/modules/parted_partition.py | 8 ++------
+ tests/unit/modules/test_parted_partition.py | 5 -----
+ 2 files changed, 2 insertions(+), 11 deletions(-)
diff --git a/salt/modules/parted_partition.py b/salt/modules/parted_partition.py
-index c991530aba..9441fec49f 100644
+index 9f321a822c..015d4cbc29 100644
--- a/salt/modules/parted_partition.py
+++ b/salt/modules/parted_partition.py
-@@ -390,8 +390,8 @@ def _is_fstype(fs_type):
+@@ -16,14 +16,11 @@ reference the man page for ``sfdisk(8)``.
+ """
+
+ import logging
+-
+-# Import python libs
+ import os
+ import re
+ import stat
+ import string
+
+-# Import Salt libs
+ import salt.utils.path
+ import salt.utils.platform
+ from salt.exceptions import CommandExecutionError
+@@ -412,12 +409,11 @@ def _is_fstype(fs_type):
:param fs_type: file system type
:return: True if fs_type is supported in this module, False otherwise
- '''
-- return fs_type in set(['ext2', 'ext3', 'ext4', 'fat32', 'fat16', 'linux-swap', 'reiserfs',
-- 'hfs', 'hfs+', 'hfsx', 'NTFS', 'ntfs', 'ufs'])
-+ return fs_type in set(['btrfs', 'ext2', 'ext3', 'ext4', 'fat32', 'fat16', 'linux-swap', 'reiserfs',
-+ 'hfs', 'hfs+', 'hfsx', 'NTFS', 'ntfs', 'ufs', 'xfs'])
+ """
+- return fs_type in (
++ return fs_type in {
+ "btrfs",
+ "ext2",
+ "ext3",
+ "ext4",
+- "fat",
+ "fat32",
+ "fat16",
+ "linux-swap",
+@@ -429,7 +425,7 @@ def _is_fstype(fs_type):
+ "ntfs",
+ "ufs",
+ "xfs",
+- )
++ }
def mkfs(device, fs_type):
diff --git a/tests/unit/modules/test_parted_partition.py b/tests/unit/modules/test_parted_partition.py
-index aad2829867..571e30292b 100644
+index 3fff6acee8..073d29ba38 100644
--- a/tests/unit/modules/test_parted_partition.py
+++ b/tests/unit/modules/test_parted_partition.py
-@@ -376,6 +376,22 @@ class PartedTestCase(TestCase, LoaderModuleMockMixin):
- }
- self.assertEqual(output, expected)
+@@ -6,14 +6,9 @@
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ """
-+ def test_btrfs_fstypes(self):
-+ '''Tests if we see btrfs as valid fs type'''
-+ with patch('salt.modules.parted_partition._validate_device', MagicMock()):
-+ try:
-+ parted.mkfs('/dev/foo', 'btrfs')
-+ except CommandExecutionError:
-+ self.fail("Btrfs is not in the supported fstypes")
-+
-+ def test_xfs_fstypes(self):
-+ '''Tests if we see xfs as valid fs type'''
-+ with patch('salt.modules.parted_partition._validate_device', MagicMock()):
-+ try:
-+ parted.mkfs('/dev/foo', 'xfs')
-+ except CommandExecutionError:
-+ self.fail("XFS is not in the supported fstypes")
-+
- def test_disk_set(self):
- with patch('salt.modules.parted_partition._validate_device', MagicMock()):
- self.cmdrun.return_value = ''
+-# Import Python libs
+
+ import salt.modules.parted_partition as parted
+-
+-# Import Salt libs
+ from salt.exceptions import CommandExecutionError
+-
+-# Import Salt Testing libs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
--
-2.16.4
+2.29.2
diff --git a/support-transactional-systems-microos-271.patch b/support-transactional-systems-microos-271.patch
index 5f7cdbd..12eb2fd 100644
--- a/support-transactional-systems-microos-271.patch
+++ b/support-transactional-systems-microos-271.patch
@@ -1,4 +1,4 @@
-From 479ec4e978d81da75e45e2ead3193ca96e075753 Mon Sep 17 00:00:00 2001
+From aa0d6604a7c6e2a25e88679ec64855723e6cabbf Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Mon, 5 Oct 2020 16:32:44 +0200
Subject: [PATCH] Support transactional systems (MicroOS) (#271)
@@ -38,16 +38,16 @@ Closes #58519
.../modules/all/salt.modules.rebootmgr.rst | 5 +
.../all/salt.modules.transactional_update.rst | 5 +
salt/executors/transactional_update.py | 126 ++
- salt/grains/extra.py | 29 +
- salt/modules/chroot.py | 39 +-
+ salt/grains/extra.py | 42 +-
+ salt/modules/chroot.py | 46 +-
salt/modules/rebootmgr.py | 357 +++++
- salt/modules/systemd_service.py | 22 +-
+ salt/modules/systemd_service.py | 21 +-
salt/modules/transactional_update.py | 1270 +++++++++++++++++
- salt/utils/systemd.py | 22 +
- tests/unit/modules/test_chroot.py | 15 +
- tests/unit/modules/test_rebootmgr.py | 304 ++++
- .../unit/modules/test_transactional_update.py | 683 +++++++++
- 16 files changed, 2882 insertions(+), 5 deletions(-)
+ salt/utils/systemd.py | 27 +-
+ tests/unit/modules/test_chroot.py | 18 +-
+ tests/unit/modules/test_rebootmgr.py | 302 ++++
+ .../unit/modules/test_transactional_update.py | 681 +++++++++
+ 16 files changed, 2880 insertions(+), 30 deletions(-)
create mode 100644 changelog/58519.added
create mode 100644 doc/ref/executors/all/salt.executors.transactional_update.rst
create mode 100644 doc/ref/modules/all/salt.modules.rebootmgr.rst
@@ -88,10 +88,10 @@ index 0000000000..17f00b2d27
+ :members:
+
diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst
-index 8e1bf2ecf1..ec5f4b9cd9 100644
+index 9fea7af07f..f6780e1694 100644
--- a/doc/ref/modules/all/index.rst
+++ b/doc/ref/modules/all/index.rst
-@@ -371,6 +371,7 @@ execution modules
+@@ -394,6 +394,7 @@ execution modules
rbac_solaris
rbenv
rdp
@@ -99,7 +99,7 @@ index 8e1bf2ecf1..ec5f4b9cd9 100644
redismod
reg
rest_pkg
-@@ -457,6 +458,7 @@ execution modules
+@@ -480,6 +481,7 @@ execution modules
tls
tomcat
trafficserver
@@ -262,21 +262,22 @@ index 0000000000..ef7d92bc05
+
+ return result
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
-index b30ab0091f..6a26aece77 100644
+index 0eec27e628..d25faac3b7 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
-@@ -3,14 +3,18 @@
- from __future__ import absolute_import, print_function, unicode_literals
-
- # Import python libs
+@@ -1,16 +1,11 @@
+-# -*- coding: utf-8 -*-
+-
+-from __future__ import absolute_import, print_function, unicode_literals
+-
+-# Import third party libs
+import glob
-+import logging
+ import logging
+-
+-# Import python libs
import os
- # Import third party libs
- import logging
-
- # Import salt libs
+-# Import salt libs
+import salt.utils
import salt.utils.data
import salt.utils.files
@@ -284,9 +285,16 @@ index b30ab0091f..6a26aece77 100644
import salt.utils.platform
import salt.utils.yaml
-@@ -83,3 +87,28 @@ def suse_backported_capabilities():
- '__suse_reserved_pkg_patches_support': True,
- '__suse_reserved_saltutil_states_support': True
+@@ -70,7 +65,32 @@ def config():
+
+ def suse_backported_capabilities():
+ return {
+- '__suse_reserved_pkg_all_versions_support': True,
+- '__suse_reserved_pkg_patches_support': True,
+- '__suse_reserved_saltutil_states_support': True
++ "__suse_reserved_pkg_all_versions_support": True,
++ "__suse_reserved_pkg_patches_support": True,
++ "__suse_reserved_saltutil_states_support": True,
}
+
+
@@ -314,18 +322,31 @@ index b30ab0091f..6a26aece77 100644
+ """Determine if the system in transactional."""
+ return {"transactional": bool(salt.utils.path.which("transactional-update"))}
diff --git a/salt/modules/chroot.py b/salt/modules/chroot.py
-index bc089ebf18..5e890b5c35 100644
+index 6512a70f88..1e2948607e 100644
--- a/salt/modules/chroot.py
+++ b/salt/modules/chroot.py
-@@ -21,6 +21,7 @@ import salt.defaults.exitcodes
+@@ -1,12 +1,9 @@
+-# -*- coding: utf-8 -*-
+-
+ """
+ :maintainer: Alberto Planas
+ :maturity: new
+ :depends: None
+ :platform: Linux
+ """
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import copy
+ import logging
+@@ -21,6 +18,7 @@ import salt.defaults.exitcodes
import salt.exceptions
import salt.ext.six as six
import salt.utils.args
+import salt.utils.files
+ __func_alias__ = {"apply_": "apply"}
- __func_alias__ = {
-@@ -82,6 +83,38 @@ def create(root):
+@@ -79,6 +77,38 @@ def create(root):
return True
@@ -362,10 +383,10 @@ index bc089ebf18..5e890b5c35 100644
+
+
def call(root, function, *args, **kwargs):
- '''
+ """
Executes a Salt function inside a chroot environment.
-@@ -121,7 +154,7 @@ def call(root, function, *args, **kwargs):
- so_mods=__salt__['config.option']('thin_so_mods', '')
+@@ -116,7 +146,7 @@ def call(root, function, *args, **kwargs):
+ so_mods=__salt__["config.option"]("thin_so_mods", ""),
)
# Some bug in Salt is preventing us to use `archive.tar` here. A
- # AsyncZeroMQReqChannel is not closed at the end os the salt-call,
@@ -373,24 +394,39 @@ index bc089ebf18..5e890b5c35 100644
# and makes the client never exit.
#
# stdout = __salt__['archive.tar']('xzf', thin_path, dest=thin_dest_path)
-@@ -198,7 +231,7 @@ def apply_(root, mods=None, **kwargs):
+@@ -194,7 +224,7 @@ def apply_(root, mods=None, **kwargs):
def _create_and_execute_salt_state(root, chunks, file_refs, test, hash_type):
- '''
+ """
- Create the salt_stage tarball, and execute in the chroot
+ Create the salt_state tarball, and execute in the chroot
- '''
+ """
# Create the tar containing the state pkg and relevant files.
salt.client.ssh.wrapper.state._cleanup_slsmod_low_data(chunks)
-@@ -210,7 +243,7 @@ def _create_and_execute_salt_state(root, chunks, file_refs, test, hash_type):
+@@ -206,7 +236,7 @@ def _create_and_execute_salt_state(root, chunks, file_refs, test, hash_type):
ret = None
# Create a temporary directory inside the chroot where we can move
- # the salt_stage.tgz
+ # the salt_state.tgz
salt_state_path = tempfile.mkdtemp(dir=root)
- salt_state_path = os.path.join(salt_state_path, 'salt_state.tgz')
- salt_state_path_in_chroot = salt_state_path.replace(root, '', 1)
+ salt_state_path = os.path.join(salt_state_path, "salt_state.tgz")
+ salt_state_path_in_chroot = salt_state_path.replace(root, "", 1)
+@@ -270,12 +300,12 @@ def sls(root, mods, saltenv="base", test=None, exclude=None, **kwargs):
+ opts, pillar, __salt__, salt.fileclient.get_file_client(__opts__)
+ )
+
+- if isinstance(mods, six.string_types):
++ if isinstance(mods, str):
+ mods = mods.split(",")
+
+ high_data, errors = st_.render_highstate({saltenv: mods})
+ if exclude:
+- if isinstance(exclude, six.string_types):
++ if isinstance(exclude, str):
+ exclude = exclude.split(",")
+ if "__exclude__" in high_data:
+ high_data["__exclude__"].extend(exclude)
diff --git a/salt/modules/rebootmgr.py b/salt/modules/rebootmgr.py
new file mode 100644
index 0000000000..96133c754b
@@ -755,15 +791,14 @@ index 0000000000..96133c754b
+
+ return _cmd(cmd)
diff --git a/salt/modules/systemd_service.py b/salt/modules/systemd_service.py
-index e39962f9ac..a684ec0778 100644
+index 03e7268cd4..49e5bd813f 100644
--- a/salt/modules/systemd_service.py
+++ b/salt/modules/systemd_service.py
-@@ -56,8 +56,10 @@ def __virtual__():
- '''
+@@ -64,7 +64,10 @@ def __virtual__():
+ """
Only work on systems that have been booted with systemd
- '''
-- if __grains__['kernel'] == 'Linux' \
-- and salt.utils.systemd.booted(__context__):
+ """
+- if __grains__.get("kernel") == "Linux" and salt.utils.systemd.booted(__context__):
+ is_linux = __grains__.get("kernel") == "Linux"
+ is_booted = salt.utils.systemd.booted(__context__)
+ is_offline = salt.utils.systemd.offline(__context__)
@@ -771,8 +806,8 @@ index e39962f9ac..a684ec0778 100644
return __virtualname__
return (
False,
-@@ -1419,3 +1421,19 @@ def firstboot(locale=None, locale_message=None, keymap=None,
- 'systemd-firstboot error: {}'.format(out['stderr']))
+@@ -1447,3 +1450,19 @@ def firstboot(
+ raise CommandExecutionError("systemd-firstboot error: {}".format(out["stderr"]))
return True
+
@@ -2068,18 +2103,30 @@ index 0000000000..9b14557e07
+ chunks, file_refs, test, hash_type, activate_transaction
+ )
diff --git a/salt/utils/systemd.py b/salt/utils/systemd.py
-index 060bc1e3fb..674b6d419f 100644
+index e830d36ed4..4d902bc920 100644
--- a/salt/utils/systemd.py
+++ b/salt/utils/systemd.py
-@@ -11,6 +11,7 @@ import subprocess
+@@ -1,18 +1,14 @@
+-# -*- coding: utf-8 -*-
+ """
+ Contains systemd related help files
+ """
+-# import python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+ import os
+ import re
+ import subprocess
- # Import Salt libs
- from salt.exceptions import SaltInvocationError
+import salt.utils.path
import salt.utils.stringutils
+-
+-# Import Salt libs
+ from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
-@@ -47,6 +48,27 @@ def booted(context=None):
+@@ -49,6 +45,27 @@ def booted(context=None):
return ret
@@ -2105,24 +2152,37 @@ index 060bc1e3fb..674b6d419f 100644
+
+
def version(context=None):
- '''
+ """
Attempts to run systemctl --version. Returns None if unable to determine
diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py
-index de3041e98f..62808ed680 100644
+index 76811df46e..196e3ad27f 100644
--- a/tests/unit/modules/test_chroot.py
+++ b/tests/unit/modules/test_chroot.py
-@@ -31,6 +31,9 @@ from __future__ import absolute_import, print_function, unicode_literals
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ #
+ # Author: Alberto Planas
+ #
+@@ -26,16 +25,13 @@
+ :platform: Linux
+ """
+
+-# Import Python Libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
import sys
- # Import Salt Testing Libs
+import salt.modules.chroot as chroot
-+import salt.utils.platform
-+from salt.exceptions import CommandExecutionError
+ import salt.modules.chroot as chroot
+ import salt.utils.platform
+ from salt.exceptions import CommandExecutionError
+-
+-# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
- from tests.support.unit import skipIf, TestCase
from tests.support.mock import MagicMock, patch
-@@ -80,6 +83,18 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
- self.assertTrue(chroot.create('/chroot'))
+ from tests.support.unit import TestCase, skipIf
+@@ -75,6 +71,18 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
+ self.assertTrue(chroot.create("/chroot"))
makedirs.assert_called()
+ @patch("salt.modules.chroot.exist")
@@ -2137,20 +2197,18 @@ index de3041e98f..62808ed680 100644
+ fopen.read = MagicMock(side_effect=(root_mountinfo, self_mountinfo))
+ self.assertEqual(chroot.in_chroot(), result)
+
- @patch('salt.modules.chroot.exist')
+ @patch("salt.modules.chroot.exist")
def test_call_fails_input_validation(self, exist):
- '''
+ """
diff --git a/tests/unit/modules/test_rebootmgr.py b/tests/unit/modules/test_rebootmgr.py
new file mode 100644
-index 0000000000..4cf573997c
+index 0000000000..a84dec2c1c
--- /dev/null
+++ b/tests/unit/modules/test_rebootmgr.py
-@@ -0,0 +1,304 @@
+@@ -0,0 +1,302 @@
+import pytest
+import salt.modules.rebootmgr as rebootmgr
+from salt.exceptions import CommandExecutionError
-+
-+# Import Salt Testing Libs
+from tests.support.mixins import LoaderModuleMockMixin
+from tests.support.mock import MagicMock, patch
+from tests.support.unit import TestCase
@@ -2452,18 +2510,16 @@ index 0000000000..4cf573997c
+ )
diff --git a/tests/unit/modules/test_transactional_update.py b/tests/unit/modules/test_transactional_update.py
new file mode 100644
-index 0000000000..b42734a53d
+index 0000000000..08a704c212
--- /dev/null
+++ b/tests/unit/modules/test_transactional_update.py
-@@ -0,0 +1,683 @@
+@@ -0,0 +1,681 @@
+import sys
+
+import pytest
+import salt.modules.transactional_update as tu
+import salt.utils.platform
+from salt.exceptions import CommandExecutionError
-+
-+# Import Salt Testing Libs
+from tests.support.mixins import LoaderModuleMockMixin
+from tests.support.mock import MagicMock, patch
+from tests.support.unit import TestCase, skipIf
@@ -3140,6 +3196,6 @@ index 0000000000..b42734a53d
+ assert tu.single("pkg.installed", name="emacs") == "result"
+ _create_and_execute_salt_state.assert_called_once()
--
-2.28.0
+2.29.2
diff --git a/switch-firewalld-state-to-use-change_interface.patch b/switch-firewalld-state-to-use-change_interface.patch
index b12059b..c4859d2 100644
--- a/switch-firewalld-state-to-use-change_interface.patch
+++ b/switch-firewalld-state-to-use-change_interface.patch
@@ -1,4 +1,4 @@
-From c48d54fe6243614aba481c887208e473f58a5057 Mon Sep 17 00:00:00 2001
+From 74d5d84ada50609c60008d3160492c1f4a29d72d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Mon, 20 May 2019 11:59:39 +0100
@@ -14,20 +14,298 @@ zone to another.
This PR adds `firewalld.change_interface` call to firewalld module
and updates `firewalld.present` state to use this call.
---
- salt/modules/firewalld.py | 23 +++++++++++++++++++++++
- salt/states/firewalld.py | 4 ++--
- 2 files changed, 25 insertions(+), 2 deletions(-)
+ salt/modules/firewalld.py | 100 ++++++++++++++++++++++----------------
+ salt/states/firewalld.py | 93 +++++++++++++++++------------------
+ 2 files changed, 104 insertions(+), 89 deletions(-)
diff --git a/salt/modules/firewalld.py b/salt/modules/firewalld.py
-index a6d90d38b8..c8b646024b 100644
+index ca35568c3f..12808cb038 100644
--- a/salt/modules/firewalld.py
+++ b/salt/modules/firewalld.py
-@@ -932,6 +932,29 @@ def remove_interface(zone, interface, permanent=True):
+@@ -1,19 +1,14 @@
+-# -*- coding: utf-8 -*-
+ """
+ Support for firewalld.
+
+ .. versionadded:: 2015.2.0
+ """
+
+-# Import Python Libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+ import re
+
+ import salt.utils.path
+-
+-# Import Salt Libs
+ from salt.exceptions import CommandExecutionError
+
+ log = logging.getLogger(__name__)
+@@ -36,7 +31,7 @@ def __firewall_cmd(cmd):
+ """
+ Return the firewall-cmd location
+ """
+- firewall_cmd = "{0} {1}".format(salt.utils.path.which("firewall-cmd"), cmd)
++ firewall_cmd = "{} {}".format(salt.utils.path.which("firewall-cmd"), cmd)
+ out = __salt__["cmd.run_all"](firewall_cmd)
+
+ if out["retcode"] != 0:
+@@ -44,7 +39,7 @@ def __firewall_cmd(cmd):
+ msg = out["stdout"]
+ else:
+ msg = out["stderr"]
+- raise CommandExecutionError("firewall-cmd failed: {0}".format(msg))
++ raise CommandExecutionError("firewall-cmd failed: {}".format(msg))
+ return out["stdout"]
+
+
+@@ -53,7 +48,7 @@ def __mgmt(name, _type, action):
+ Perform zone management
+ """
+ # It's permanent because the 4 concerned functions need the permanent option, it's wrong without
+- cmd = "--{0}-{1}={2} --permanent".format(action, _type, name)
++ cmd = "--{}-{}={} --permanent".format(action, _type, name)
+
+ return __firewall_cmd(cmd)
+
+@@ -250,7 +245,7 @@ def set_default_zone(zone):
+
+ salt '*' firewalld.set_default_zone damian
+ """
+- return __firewall_cmd("--set-default-zone={0}".format(zone))
++ return __firewall_cmd("--set-default-zone={}".format(zone))
+
+
+ def new_service(name, restart=True):
+@@ -327,7 +322,7 @@ def list_all(zone=None, permanent=True):
+ id_ = ""
+
+ if zone:
+- cmd = "--zone={0} --list-all".format(zone)
++ cmd = "--zone={} --list-all".format(zone)
+ else:
+ cmd = "--list-all"
+
+@@ -372,7 +367,7 @@ def list_services(zone=None, permanent=True):
+ salt '*' firewalld.list_services my_zone
+ """
+ if zone:
+- cmd = "--zone={0} --list-services".format(zone)
++ cmd = "--zone={} --list-services".format(zone)
+ else:
+ cmd = "--list-services"
+
+@@ -399,9 +394,9 @@ def add_service(service, zone=None, permanent=True):
+ salt '*' firewalld.add_service ssh my_zone
+ """
+ if zone:
+- cmd = "--zone={0} --add-service={1}".format(zone, service)
++ cmd = "--zone={} --add-service={}".format(zone, service)
+ else:
+- cmd = "--add-service={0}".format(service)
++ cmd = "--add-service={}".format(service)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -427,9 +422,9 @@ def remove_service(service, zone=None, permanent=True):
+ salt '*' firewalld.remove_service ssh dmz
+ """
+ if zone:
+- cmd = "--zone={0} --remove-service={1}".format(zone, service)
++ cmd = "--zone={} --remove-service={}".format(zone, service)
+ else:
+- cmd = "--remove-service={0}".format(service)
++ cmd = "--remove-service={}".format(service)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -452,7 +447,7 @@ def add_service_port(service, port):
+ if service not in get_services(permanent=True):
+ raise CommandExecutionError("The service does not exist.")
+
+- cmd = "--permanent --service={0} --add-port={1}".format(service, port)
++ cmd = "--permanent --service={} --add-port={}".format(service, port)
return __firewall_cmd(cmd)
+@@ -471,7 +466,7 @@ def remove_service_port(service, port):
+ if service not in get_services(permanent=True):
+ raise CommandExecutionError("The service does not exist.")
+
+- cmd = "--permanent --service={0} --remove-port={1}".format(service, port)
++ cmd = "--permanent --service={} --remove-port={}".format(service, port)
+ return __firewall_cmd(cmd)
+
+
+@@ -487,7 +482,7 @@ def get_service_ports(service):
+
+ salt '*' firewalld.get_service_ports zone
+ """
+- cmd = "--permanent --service={0} --get-ports".format(service)
++ cmd = "--permanent --service={} --get-ports".format(service)
+ return __firewall_cmd(cmd).split()
+
+
+@@ -503,7 +498,7 @@ def add_service_protocol(service, protocol):
+
+ salt '*' firewalld.add_service_protocol zone ssh
+ """
+- cmd = "--permanent --service={0} --add-protocol={1}".format(service, protocol)
++ cmd = "--permanent --service={} --add-protocol={}".format(service, protocol)
+ return __firewall_cmd(cmd)
+
+
+@@ -519,7 +514,7 @@ def remove_service_protocol(service, protocol):
+
+ salt '*' firewalld.remove_service_protocol zone ssh
+ """
+- cmd = "--permanent --service={0} --remove-protocol={1}".format(service, protocol)
++ cmd = "--permanent --service={} --remove-protocol={}".format(service, protocol)
+ return __firewall_cmd(cmd)
+
+
+@@ -535,7 +530,7 @@ def get_service_protocols(service):
+
+ salt '*' firewalld.get_service_protocols zone
+ """
+- cmd = "--permanent --service={0} --get-protocols".format(service)
++ cmd = "--permanent --service={} --get-protocols".format(service)
+ return __firewall_cmd(cmd).split()
+
+
+@@ -578,7 +573,7 @@ def add_masquerade(zone=None, permanent=True):
+ salt '*' firewalld.add_masquerade dmz
+ """
+ if zone:
+- cmd = "--zone={0} --add-masquerade".format(zone)
++ cmd = "--zone={} --add-masquerade".format(zone)
+ else:
+ cmd = "--add-masquerade"
+
+@@ -608,7 +603,7 @@ def remove_masquerade(zone=None, permanent=True):
+ salt '*' firewalld.remove_masquerade dmz
+ """
+ if zone:
+- cmd = "--zone={0} --remove-masquerade".format(zone)
++ cmd = "--zone={} --remove-masquerade".format(zone)
+ else:
+ cmd = "--remove-masquerade"
+
+@@ -637,7 +632,7 @@ def add_port(zone, port, permanent=True, force_masquerade=False):
+ if force_masquerade and not get_masquerade(zone):
+ add_masquerade(zone)
+
+- cmd = "--zone={0} --add-port={1}".format(zone, port)
++ cmd = "--zone={} --add-port={}".format(zone, port)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -657,7 +652,7 @@ def remove_port(zone, port, permanent=True):
+
+ salt '*' firewalld.remove_port internal 443/tcp
+ """
+- cmd = "--zone={0} --remove-port={1}".format(zone, port)
++ cmd = "--zone={} --remove-port={}".format(zone, port)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -677,7 +672,7 @@ def list_ports(zone, permanent=True):
+
+ salt '*' firewalld.list_ports
+ """
+- cmd = "--zone={0} --list-ports".format(zone)
++ cmd = "--zone={} --list-ports".format(zone)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -706,7 +701,7 @@ def add_port_fwd(
+ if force_masquerade and not get_masquerade(zone):
+ add_masquerade(zone)
+
+- cmd = "--zone={0} --add-forward-port=port={1}:proto={2}:toport={3}:toaddr={4}".format(
++ cmd = "--zone={} --add-forward-port=port={}:proto={}:toport={}:toaddr={}".format(
+ zone, src, proto, dest, dstaddr
+ )
+
+@@ -728,7 +723,7 @@ def remove_port_fwd(zone, src, dest, proto="tcp", dstaddr="", permanent=True):
+
+ salt '*' firewalld.remove_port_fwd public 80 443 tcp
+ """
+- cmd = "--zone={0} --remove-forward-port=port={1}:proto={2}:toport={3}:toaddr={4}".format(
++ cmd = "--zone={} --remove-forward-port=port={}:proto={}:toport={}:toaddr={}".format(
+ zone, src, proto, dest, dstaddr
+ )
+
+@@ -752,7 +747,7 @@ def list_port_fwd(zone, permanent=True):
+ """
+ ret = []
+
+- cmd = "--zone={0} --list-forward-ports".format(zone)
++ cmd = "--zone={} --list-forward-ports".format(zone)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -792,7 +787,7 @@ def block_icmp(zone, icmp, permanent=True):
+ log.info("ICMP block already exists")
+ return "success"
+
+- cmd = "--zone={0} --add-icmp-block={1}".format(zone, icmp)
++ cmd = "--zone={} --add-icmp-block={}".format(zone, icmp)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -820,7 +815,7 @@ def allow_icmp(zone, icmp, permanent=True):
+ log.info("ICMP Type is already permitted")
+ return "success"
+
+- cmd = "--zone={0} --remove-icmp-block={1}".format(zone, icmp)
++ cmd = "--zone={} --remove-icmp-block={}".format(zone, icmp)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -840,7 +835,7 @@ def list_icmp_block(zone, permanent=True):
+
+ salt '*' firewlld.list_icmp_block zone
+ """
+- cmd = "--zone={0} --list-icmp-blocks".format(zone)
++ cmd = "--zone={} --list-icmp-blocks".format(zone)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -875,7 +870,7 @@ def get_interfaces(zone, permanent=True):
+
+ salt '*' firewalld.get_interfaces zone
+ """
+- cmd = "--zone={0} --list-interfaces".format(zone)
++ cmd = "--zone={} --list-interfaces".format(zone)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -898,7 +893,7 @@ def add_interface(zone, interface, permanent=True):
+ if interface in get_interfaces(zone, permanent):
+ log.info("Interface is already bound to zone.")
+
+- cmd = "--zone={0} --add-interface={1}".format(zone, interface)
++ cmd = "--zone={} --add-interface={}".format(zone, interface)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -921,7 +916,30 @@ def remove_interface(zone, interface, permanent=True):
+ if interface not in get_interfaces(zone, permanent):
+ log.info("Interface is not bound to zone.")
+
+- cmd = "--zone={0} --remove-interface={1}".format(zone, interface)
++ cmd = "--zone={} --remove-interface={}".format(zone, interface)
++
++ if permanent:
++ cmd += " --permanent"
++
++ return __firewall_cmd(cmd)
++
++
+def change_interface(zone, interface, permanent=True):
-+ '''
++ """
+ Change zone the interface bound to
+
+ .. versionadded:: 2019.?.?
@@ -37,37 +315,462 @@ index a6d90d38b8..c8b646024b 100644
+ .. code-block:: bash
+
+ salt '*' firewalld.change_interface zone eth0
-+ '''
++ """
+ if interface in get_interfaces(zone, permanent):
-+ log.info('Interface is already bound to zone.')
++ log.info("Interface is already bound to zone.")
+
-+ cmd = '--zone={0} --change-interface={1}'.format(zone, interface)
-+
-+ if permanent:
-+ cmd += ' --permanent'
-+
-+ return __firewall_cmd(cmd)
-+
-+
- def get_sources(zone, permanent=True):
- '''
- List sources bound to a zone
++ cmd = "--zone={} --change-interface={}".format(zone, interface)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -941,7 +959,7 @@ def get_sources(zone, permanent=True):
+
+ salt '*' firewalld.get_sources zone
+ """
+- cmd = "--zone={0} --list-sources".format(zone)
++ cmd = "--zone={} --list-sources".format(zone)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -964,7 +982,7 @@ def add_source(zone, source, permanent=True):
+ if source in get_sources(zone, permanent):
+ log.info("Source is already bound to zone.")
+
+- cmd = "--zone={0} --add-source={1}".format(zone, source)
++ cmd = "--zone={} --add-source={}".format(zone, source)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -987,7 +1005,7 @@ def remove_source(zone, source, permanent=True):
+ if source not in get_sources(zone, permanent):
+ log.info("Source is not bound to zone.")
+
+- cmd = "--zone={0} --remove-source={1}".format(zone, source)
++ cmd = "--zone={} --remove-source={}".format(zone, source)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -1007,7 +1025,7 @@ def get_rich_rules(zone, permanent=True):
+
+ salt '*' firewalld.get_rich_rules zone
+ """
+- cmd = "--zone={0} --list-rich-rules".format(zone)
++ cmd = "--zone={} --list-rich-rules".format(zone)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -1027,7 +1045,7 @@ def add_rich_rule(zone, rule, permanent=True):
+
+ salt '*' firewalld.add_rich_rule zone 'rule'
+ """
+- cmd = "--zone={0} --add-rich-rule='{1}'".format(zone, rule)
++ cmd = "--zone={} --add-rich-rule='{}'".format(zone, rule)
+
+ if permanent:
+ cmd += " --permanent"
+@@ -1047,7 +1065,7 @@ def remove_rich_rule(zone, rule, permanent=True):
+
+ salt '*' firewalld.remove_rich_rule zone 'rule'
+ """
+- cmd = "--zone={0} --remove-rich-rule='{1}'".format(zone, rule)
++ cmd = "--zone={} --remove-rich-rule='{}'".format(zone, rule)
+
+ if permanent:
+ cmd += " --permanent"
diff --git a/salt/states/firewalld.py b/salt/states/firewalld.py
-index 25cbad170a..e4338beaf2 100644
+index 4114e99f43..425defcfb1 100644
--- a/salt/states/firewalld.py
+++ b/salt/states/firewalld.py
-@@ -633,8 +633,8 @@ def _present(name,
- for interface in new_interfaces:
- if not __opts__['test']:
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Management of firewalld
+
+@@ -76,21 +75,17 @@ would allow access to the salt master from the 10.0.0.0/8 subnet:
+ - 10.0.0.0/8
+ """
+
+-# Import Python Libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+
+ import salt.utils.path
+-
+-# Import Salt Libs
+ from salt.exceptions import CommandExecutionError
+ from salt.output import nested
+
+ log = logging.getLogger(__name__)
+
+
+-class ForwardingMapping(object):
++class ForwardingMapping:
+ """
+ Represents a port forwarding statement mapping a local port to a remote
+ port for a specific protocol (TCP or UDP)
+@@ -282,7 +277,7 @@ def service(name, ports=None, protocols=None):
+ try:
+ _current_ports = __salt__["firewalld.get_service_ports"](name)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ new_ports = set(ports) - set(_current_ports)
+@@ -293,7 +288,7 @@ def service(name, ports=None, protocols=None):
+ try:
+ __salt__["firewalld.add_service_port"](name, port)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ for port in old_ports:
+@@ -301,7 +296,7 @@ def service(name, ports=None, protocols=None):
+ try:
+ __salt__["firewalld.remove_service_port"](name, port)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_ports or old_ports:
+@@ -312,7 +307,7 @@ def service(name, ports=None, protocols=None):
+ try:
+ _current_protocols = __salt__["firewalld.get_service_protocols"](name)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ new_protocols = set(protocols) - set(_current_protocols)
+@@ -323,7 +318,7 @@ def service(name, ports=None, protocols=None):
+ try:
+ __salt__["firewalld.add_service_protocol"](name, protocol)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ for protocol in old_protocols:
+@@ -331,7 +326,7 @@ def service(name, ports=None, protocols=None):
+ try:
+ __salt__["firewalld.remove_service_protocol"](name, protocol)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_protocols or old_protocols:
+@@ -344,15 +339,15 @@ def service(name, ports=None, protocols=None):
+
+ ret["result"] = True
+ if ret["changes"] == {}:
+- ret["comment"] = "'{0}' is already in the desired state.".format(name)
++ ret["comment"] = "'{}' is already in the desired state.".format(name)
+ return ret
+
+ if __opts__["test"]:
+ ret["result"] = None
+- ret["comment"] = "Configuration for '{0}' will change.".format(name)
++ ret["comment"] = "Configuration for '{}' will change.".format(name)
+ return ret
+
+- ret["comment"] = "'{0}' was configured.".format(name)
++ ret["comment"] = "'{}' was configured.".format(name)
+ return ret
+
+
+@@ -385,7 +380,7 @@ def _present(
+ try:
+ zones = __salt__["firewalld.get_zones"](permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if name not in zones:
+@@ -393,7 +388,7 @@ def _present(
+ try:
+ __salt__["firewalld.new_zone"](name)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ ret["changes"].update({name: {"old": zones, "new": name}})
+@@ -408,14 +403,14 @@ def _present(
+ name, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if block_icmp:
+ try:
+ _valid_icmp_types = __salt__["firewalld.get_icmp_types"](permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ # log errors for invalid ICMP types in block_icmp input
+@@ -431,7 +426,7 @@ def _present(
+ name, icmp_type, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if prune_block_icmp:
+@@ -446,7 +441,7 @@ def _present(
+ name, icmp_type, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_icmp_types or old_icmp_types:
+@@ -464,21 +459,21 @@ def _present(
+ try:
+ default_zone = __salt__["firewalld.default_zone"]()
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+ if name != default_zone:
+ if not __opts__["test"]:
try:
-- __salt__['firewalld.add_interface'](name, interface,
-- permanent=True)
-+ __salt__['firewalld.change_interface'](name, interface,
-+ permanent=True)
+ __salt__["firewalld.set_default_zone"](name)
except CommandExecutionError as err:
- ret['comment'] = 'Error: {0}'.format(err)
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
return ret
+ ret["changes"].update({"default": {"old": default_zone, "new": name}})
+
+ try:
+ masquerade_ret = __salt__["firewalld.get_masquerade"](name, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if masquerade and not masquerade_ret:
+@@ -486,7 +481,7 @@ def _present(
+ try:
+ __salt__["firewalld.add_masquerade"](name, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+ ret["changes"].update(
+ {"masquerade": {"old": "", "new": "Masquerading successfully set."}}
+@@ -496,7 +491,7 @@ def _present(
+ try:
+ __salt__["firewalld.remove_masquerade"](name, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+ ret["changes"].update(
+ {"masquerade": {"old": "", "new": "Masquerading successfully " "disabled."}}
+@@ -507,7 +502,7 @@ def _present(
+ try:
+ _current_ports = __salt__["firewalld.list_ports"](name, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ new_ports = set(ports) - set(_current_ports)
+@@ -520,7 +515,7 @@ def _present(
+ name, port, permanent=True, force_masquerade=False
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if prune_ports:
+@@ -530,7 +525,7 @@ def _present(
+ try:
+ __salt__["firewalld.remove_port"](name, port, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_ports or old_ports:
+@@ -547,7 +542,7 @@ def _present(
+ name, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ port_fwd = [_parse_forward(fwd) for fwd in port_fwd]
+@@ -577,7 +572,7 @@ def _present(
+ force_masquerade=False,
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if prune_port_fwd:
+@@ -594,7 +589,7 @@ def _present(
+ permanent=True,
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_port_fwd or old_port_fwd:
+@@ -618,7 +613,7 @@ def _present(
+ name, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ new_services = set(services) - set(_current_services)
+@@ -629,7 +624,7 @@ def _present(
+ try:
+ __salt__["firewalld.add_service"](new_service, name, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if prune_services:
+@@ -641,7 +636,7 @@ def _present(
+ old_service, name, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_services or old_services:
+@@ -660,7 +655,7 @@ def _present(
+ name, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ new_interfaces = set(interfaces) - set(_current_interfaces)
+@@ -669,9 +664,11 @@ def _present(
+ for interface in new_interfaces:
+ if not __opts__["test"]:
+ try:
+- __salt__["firewalld.add_interface"](name, interface, permanent=True)
++ __salt__["firewalld.change_interface"](
++ name, interface, permanent=True
++ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if prune_interfaces:
+@@ -683,7 +680,7 @@ def _present(
+ name, interface, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_interfaces or old_interfaces:
+@@ -700,7 +697,7 @@ def _present(
+ try:
+ _current_sources = __salt__["firewalld.get_sources"](name, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ new_sources = set(sources) - set(_current_sources)
+@@ -711,7 +708,7 @@ def _present(
+ try:
+ __salt__["firewalld.add_source"](name, source, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if prune_sources:
+@@ -723,7 +720,7 @@ def _present(
+ name, source, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_sources or old_sources:
+@@ -742,7 +739,7 @@ def _present(
+ name, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ new_rich_rules = set(rich_rules) - set(_current_rich_rules)
+@@ -753,7 +750,7 @@ def _present(
+ try:
+ __salt__["firewalld.add_rich_rule"](name, rich_rule, permanent=True)
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if prune_rich_rules:
+@@ -765,7 +762,7 @@ def _present(
+ name, rich_rule, permanent=True
+ )
+ except CommandExecutionError as err:
+- ret["comment"] = "Error: {0}".format(err)
++ ret["comment"] = "Error: {}".format(err)
+ return ret
+
+ if new_rich_rules or old_rich_rules:
+@@ -780,7 +777,7 @@ def _present(
+ # No changes
+ if ret["changes"] == {}:
+ ret["result"] = True
+- ret["comment"] = "'{0}' is already in the desired state.".format(name)
++ ret["comment"] = "'{}' is already in the desired state.".format(name)
+ return ret
+
+ # test=True and changes predicted
+@@ -789,7 +786,7 @@ def _present(
+ # build comment string
+ nested.__opts__ = __opts__
+ comment = []
+- comment.append("Configuration for '{0}' will change:".format(name))
++ comment.append("Configuration for '{}' will change:".format(name))
+ comment.append(nested.output(ret["changes"]).rstrip())
+ ret["comment"] = "\n".join(comment)
+ ret["changes"] = {}
+@@ -797,5 +794,5 @@ def _present(
+
+ # Changes were made successfully
+ ret["result"] = True
+- ret["comment"] = "'{0}' was configured.".format(name)
++ ret["comment"] = "'{}' was configured.".format(name)
+ return ret
--
-2.16.4
+2.29.2
diff --git a/temporary-fix-extend-the-whitelist-of-allowed-comman.patch b/temporary-fix-extend-the-whitelist-of-allowed-comman.patch
index 78fb15b..08ad207 100644
--- a/temporary-fix-extend-the-whitelist-of-allowed-comman.patch
+++ b/temporary-fix-extend-the-whitelist-of-allowed-comman.patch
@@ -1,26 +1,222 @@
-From 89c188107bc60d4e84879c3f3c2fde7489a14153 Mon Sep 17 00:00:00 2001
+From 7605781decd03cb493e09893aa60a5cdbed15d35 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Thu, 24 Jan 2019 18:12:35 +0100
-Subject: [PATCH] temporary fix: extend the whitelist of allowed commands
+Subject: [PATCH] temporary fix: extend the whitelist of allowed
+ commands
---
- salt/auth/__init__.py | 2 ++
- 1 file changed, 2 insertions(+)
+ salt/auth/__init__.py | 48 +++++++++++++++++++++----------------------
+ 1 file changed, 24 insertions(+), 24 deletions(-)
diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py
-index 329e4a62c9..ecbd1c808c 100644
+index c4cf163a67..ee1eac7ce4 100644
--- a/salt/auth/__init__.py
+++ b/salt/auth/__init__.py
-@@ -47,6 +47,8 @@ AUTH_INTERNAL_KEYWORDS = frozenset([
- 'gather_job_timeout',
- 'kwarg',
- 'match',
-+ "id_",
-+ "force",
- 'metadata',
- 'print_event',
- 'raw',
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ """
+ Salt's pluggable authentication system
+
+@@ -13,7 +12,6 @@ so that any external authentication system can be used inside of Salt
+ # 5. Cache auth token with relative data opts['token_dir']
+ # 6. Interface to verify tokens
+
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import collections
+ import getpass
+@@ -48,6 +46,8 @@ AUTH_INTERNAL_KEYWORDS = frozenset(
+ "gather_job_timeout",
+ "kwarg",
+ "match",
++ "id_",
++ "force",
+ "metadata",
+ "print_event",
+ "raw",
+@@ -56,7 +56,7 @@ AUTH_INTERNAL_KEYWORDS = frozenset(
+ )
+
+
+-class LoadAuth(object):
++class LoadAuth:
+ """
+ Wrap the authentication system to handle peripheral components
+ """
+@@ -76,7 +76,7 @@ class LoadAuth(object):
+ """
+ if "eauth" not in load:
+ return ""
+- fstr = "{0}.auth".format(load["eauth"])
++ fstr = "{}.auth".format(load["eauth"])
+ if fstr not in self.auth:
+ return ""
+ try:
+@@ -94,7 +94,7 @@ class LoadAuth(object):
+ """
+ if "eauth" not in load:
+ return False
+- fstr = "{0}.auth".format(load["eauth"])
++ fstr = "{}.auth".format(load["eauth"])
+ if fstr not in self.auth:
+ return False
+ # When making auth calls, only username, password, auth, and token
+@@ -144,7 +144,7 @@ class LoadAuth(object):
+ mod = self.opts["eauth_acl_module"]
+ if not mod:
+ mod = load["eauth"]
+- fstr = "{0}.acl".format(mod)
++ fstr = "{}.acl".format(mod)
+ if fstr not in self.auth:
+ return None
+ fcall = salt.utils.args.format_call(
+@@ -163,7 +163,7 @@ class LoadAuth(object):
+ """
+ if "eauth" not in load:
+ return auth_list
+- fstr = "{0}.process_acl".format(load["eauth"])
++ fstr = "{}.process_acl".format(load["eauth"])
+ if fstr not in self.auth:
+ return auth_list
+ try:
+@@ -179,7 +179,7 @@ class LoadAuth(object):
+ """
+ if "eauth" not in load:
+ return False
+- fstr = "{0}.groups".format(load["eauth"])
++ fstr = "{}.groups".format(load["eauth"])
+ if fstr not in self.auth:
+ return False
+ fcall = salt.utils.args.format_call(
+@@ -237,7 +237,7 @@ class LoadAuth(object):
+ if groups:
+ tdata["groups"] = groups
+
+- return self.tokens["{0}.mk_token".format(self.opts["eauth_tokens"])](
++ return self.tokens["{}.mk_token".format(self.opts["eauth_tokens"])](
+ self.opts, tdata
+ )
+
+@@ -248,7 +248,7 @@ class LoadAuth(object):
+ """
+ tdata = {}
+ try:
+- tdata = self.tokens["{0}.get_token".format(self.opts["eauth_tokens"])](
++ tdata = self.tokens["{}.get_token".format(self.opts["eauth_tokens"])](
+ self.opts, tok
+ )
+ except salt.exceptions.SaltDeserializationError:
+@@ -275,7 +275,7 @@ class LoadAuth(object):
+ """
+ List all tokens in eauth_tokn storage.
+ """
+- return self.tokens["{0}.list_tokens".format(self.opts["eauth_tokens"])](
++ return self.tokens["{}.list_tokens".format(self.opts["eauth_tokens"])](
+ self.opts
+ )
+
+@@ -283,7 +283,7 @@ class LoadAuth(object):
+ """
+ Remove the given token from token storage.
+ """
+- self.tokens["{0}.rm_token".format(self.opts["eauth_tokens"])](self.opts, tok)
++ self.tokens["{}.rm_token".format(self.opts["eauth_tokens"])](self.opts, tok)
+
+ def authenticate_token(self, load):
+ """
+@@ -459,7 +459,7 @@ class LoadAuth(object):
+ ret["error"] = {
+ "name": "EauthAuthenticationError",
+ "message": 'Authentication failure of type "eauth" occurred for '
+- "user {0}.".format(username),
++ "user {}.".format(username),
+ }
+ return ret
+
+@@ -469,7 +469,7 @@ class LoadAuth(object):
+ msg = 'Authentication failure of type "user" occurred'
+ if not auth_ret: # auth_ret can be a boolean or the effective user id
+ if show_username:
+- msg = "{0} for user {1}.".format(msg, username)
++ msg = "{} for user {}.".format(msg, username)
+ ret["error"] = {"name": "UserAuthenticationError", "message": msg}
+ return ret
+
+@@ -501,7 +501,7 @@ class LoadAuth(object):
+ return ret
+
+
+-class Resolver(object):
++class Resolver:
+ """
+ The class used to resolve options for the command line and for generic
+ interactive interfaces
+@@ -514,7 +514,7 @@ class Resolver(object):
+ def _send_token_request(self, load):
+ master_uri = "tcp://{}:{}".format(
+ salt.utils.zeromq.ip_bracket(self.opts["interface"]),
+- six.text_type(self.opts["ret_port"]),
++ str(self.opts["ret_port"]),
+ )
+ with salt.transport.client.ReqChannel.factory(
+ self.opts, crypt="clear", master_uri=master_uri
+@@ -530,16 +530,16 @@ class Resolver(object):
+ if not eauth:
+ print("External authentication system has not been specified")
+ return ret
+- fstr = "{0}.auth".format(eauth)
++ fstr = "{}.auth".format(eauth)
+ if fstr not in self.auth:
+ print(
+ (
+- 'The specified external authentication system "{0}" is '
++ 'The specified external authentication system "{}" is '
+ "not available"
+ ).format(eauth)
+ )
+ print(
+- "Available eauth types: {0}".format(
++ "Available eauth types: {}".format(
+ ", ".join([k[:-5] for k in self.auth if k.endswith(".auth")])
+ )
+ )
+@@ -550,14 +550,14 @@ class Resolver(object):
+ if arg in self.opts:
+ ret[arg] = self.opts[arg]
+ elif arg.startswith("pass"):
+- ret[arg] = getpass.getpass("{0}: ".format(arg))
++ ret[arg] = getpass.getpass("{}: ".format(arg))
+ else:
+- ret[arg] = input("{0}: ".format(arg))
++ ret[arg] = input("{}: ".format(arg))
+ for kwarg, default in list(args["kwargs"].items()):
+ if kwarg in self.opts:
+ ret["kwarg"] = self.opts[kwarg]
+ else:
+- ret[kwarg] = input("{0} [{1}]: ".format(kwarg, default))
++ ret[kwarg] = input("{} [{}]: ".format(kwarg, default))
+
+ # Use current user if empty
+ if "username" in ret and not ret["username"]:
+@@ -579,7 +579,7 @@ class Resolver(object):
+ with salt.utils.files.set_umask(0o177):
+ with salt.utils.files.fopen(self.opts["token_file"], "w+") as fp_:
+ fp_.write(tdata["token"])
+- except (IOError, OSError):
++ except OSError:
+ pass
+ return tdata
+
+@@ -602,7 +602,7 @@ class Resolver(object):
+ return tdata
+
+
+-class AuthUser(object):
++class AuthUser:
+ """
+ Represents a user requesting authentication to the salt master
+ """
--
-2.16.4
+2.29.2
diff --git a/transactional_update-unify-with-chroot.call.patch b/transactional_update-unify-with-chroot.call.patch
index a822683..09c097d 100644
--- a/transactional_update-unify-with-chroot.call.patch
+++ b/transactional_update-unify-with-chroot.call.patch
@@ -1,34 +1,35 @@
-From 383cb53b9936b8ff1d8707c74daf5001add2dd20 Mon Sep 17 00:00:00 2001
+From c7041ad87261ae9c41f02f38ba0dbe1bab197f15 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Wed, 4 Nov 2020 16:34:47 +0100
Subject: [PATCH] transactional_update: unify with chroot.call
Return for both .call() "retcode" when fail
---
- salt/modules/chroot.py | 5 +++--
+ salt/modules/chroot.py | 8 ++++++--
salt/modules/transactional_update.py | 4 ++--
- tests/unit/modules/test_chroot.py | 6 ++++--
+ tests/unit/modules/test_chroot.py | 12 +++++++++---
tests/unit/modules/test_transactional_update.py | 12 ++++++++++--
- 4 files changed, 19 insertions(+), 8 deletions(-)
+ 4 files changed, 27 insertions(+), 9 deletions(-)
diff --git a/salt/modules/chroot.py b/salt/modules/chroot.py
-index 5e890b5c35..fbec3ea788 100644
+index 1e2948607e..fda5b0c63c 100644
--- a/salt/modules/chroot.py
+++ b/salt/modules/chroot.py
-@@ -192,10 +192,11 @@ def call(root, function, *args, **kwargs):
- if isinstance(local, dict) and 'retcode' in local:
- __context__['retcode'] = local['retcode']
- return local.get('return', data)
+@@ -188,8 +188,12 @@ def call(root, function, *args, **kwargs):
+ if isinstance(local, dict) and "retcode" in local:
+ __context__["retcode"] = local["retcode"]
+ return local.get("return", data)
- except (KeyError, ValueError):
+- return {"result": False, "comment": "Can't parse container command output"}
+ except ValueError:
- return {
- 'result': False,
-- 'comment': "Can't parse container command output"
-+ 'retcode': ret['retcode'],
-+ 'comment': {'stdout': ret['stdout'], 'stderr': ret['stderr']},
- }
++ return {
++ "result": False,
++ "retcode": ret["retcode"],
++ "comment": {"stdout": ret["stdout"], "stderr": ret["stderr"]},
++ }
finally:
- __utils__['files.rm_rf'](thin_dest_path)
+ __utils__["files.rm_rf"](thin_dest_path)
+
diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py
index 9b14557e07..7bbdb697b8 100644
--- a/salt/modules/transactional_update.py
@@ -45,40 +46,43 @@ index 9b14557e07..7bbdb697b8 100644
__utils__["files.rm_rf"](thin_dest_path)
diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py
-index 045d56c5b0..eb7b8cb4aa 100644
+index a0f3f8e6af..7d3724e0c4 100644
--- a/tests/unit/modules/test_chroot.py
+++ b/tests/unit/modules/test_chroot.py
-@@ -145,13 +145,14 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
+@@ -133,19 +133,25 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
utils_mock = {
- 'thin.gen_thin': MagicMock(return_value='/salt-thin.tgz'),
- 'files.rm_rf': MagicMock(),
-- 'json.find_json': MagicMock(return_value={'return': {}})
-+ 'json.find_json': MagicMock(side_effect=ValueError())
+ "thin.gen_thin": MagicMock(return_value="/salt-thin.tgz"),
+ "files.rm_rf": MagicMock(),
+- "json.find_json": MagicMock(return_value={"return": {}}),
++ "json.find_json": MagicMock(side_effect=ValueError()),
}
salt_mock = {
- 'cmd.run': MagicMock(return_value=''),
- 'config.option': MagicMock(),
- 'cmd.run_chroot': MagicMock(return_value={
- 'retcode': 1,
-+ 'stdout': '',
- 'stderr': 'Error',
- }),
+ "cmd.run": MagicMock(return_value=""),
+ "config.option": MagicMock(),
+- "cmd.run_chroot": MagicMock(return_value={"retcode": 1, "stderr": "Error"}),
++ "cmd.run_chroot": MagicMock(
++ return_value={"retcode": 1, "stdout": "", "stderr": "Error",}
++ ),
}
-@@ -159,7 +160,8 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
- patch.dict(chroot.__salt__, salt_mock):
- self.assertEqual(chroot.call('/chroot', 'test.ping'), {
- 'result': False,
-- 'comment': "Can't parse container command output"
-+ 'retcode': 1,
-+ 'comment': {'stdout': '', 'stderr': 'Error'},
- })
- utils_mock['thin.gen_thin'].assert_called_once()
- salt_mock['config.option'].assert_called()
+ with patch.dict(chroot.__utils__, utils_mock), patch.dict(
+ chroot.__salt__, salt_mock
+ ):
+ self.assertEqual(
+ chroot.call("/chroot", "test.ping"),
+- {"result": False, "comment": "Can't parse container command output"},
++ {
++ "result": False,
++ "retcode": 1,
++ "comment": {"stdout": "", "stderr": "Error"},
++ },
+ )
+ utils_mock["thin.gen_thin"].assert_called_once()
+ salt_mock["config.option"].assert_called()
diff --git a/tests/unit/modules/test_transactional_update.py b/tests/unit/modules/test_transactional_update.py
-index b42734a53d..4616e0968f 100644
+index 08a704c212..19e477d02f 100644
--- a/tests/unit/modules/test_transactional_update.py
+++ b/tests/unit/modules/test_transactional_update.py
-@@ -372,7 +372,11 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
+@@ -370,7 +370,11 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(tu.__utils__, utils_mock), patch.dict(
tu.__opts__, opts_mock
), patch.dict(tu.__salt__, salt_mock):
@@ -91,7 +95,7 @@ index b42734a53d..4616e0968f 100644
utils_mock["thin.gen_thin"].assert_called_once()
salt_mock["config.option"].assert_called()
-@@ -424,7 +428,11 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
+@@ -422,7 +426,11 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(tu.__utils__, utils_mock), patch.dict(
tu.__opts__, opts_mock
), patch.dict(tu.__salt__, salt_mock):
@@ -105,6 +109,6 @@ index b42734a53d..4616e0968f 100644
utils_mock["thin.gen_thin"].assert_called_once()
salt_mock["config.option"].assert_called()
--
-2.29.1
+2.29.2
diff --git a/update-documentation.sh b/update-documentation.sh
index f0e66f4..ada0b73 100644
--- a/update-documentation.sh
+++ b/update-documentation.sh
@@ -39,7 +39,7 @@ EOF
}
function quilt_setup() {
- quilt setup salt.spec
+ quilt setup -v salt.spec
cd $1
quilt push -a
}
@@ -81,14 +81,14 @@ START=$(pwd)
V_ENV="sphinx_doc_gen"
V_TMP=$(mktemp -d)
-for f in "salt.spec" "salt*tar.gz"; do
+for f in "salt.spec" "v*tar.gz" "*"; do
cp -v $f $V_TMP
done
cd $V_TMP;
build_virtenv $V_ENV $2;
-SRC_DIR="salt-$(cat salt.spec | grep ^Version: | cut -d: -f2 | sed -e 's/[[:blank:]]//g')";
+SRC_DIR="salt-$(cat salt.spec | grep ^Version: | cut -d: -f2 | sed -e 's/[[:blank:]]//g')-suse";
quilt_setup $SRC_DIR
build_docs doc $V_TMP
diff --git a/use-adler32-algorithm-to-compute-string-checksums.patch b/use-adler32-algorithm-to-compute-string-checksums.patch
index 8961ffb..e9793cf 100644
--- a/use-adler32-algorithm-to-compute-string-checksums.patch
+++ b/use-adler32-algorithm-to-compute-string-checksums.patch
@@ -1,4 +1,4 @@
-From a8e3defcb484296e18343c6447649fe508ab2644 Mon Sep 17 00:00:00 2001
+From 267868c148619be1eb89bcfa9c1184fa809fce2d Mon Sep 17 00:00:00 2001
From: Bo Maryniuk
Date: Sat, 28 Jul 2018 22:59:04 +0200
Subject: [PATCH] Use Adler32 algorithm to compute string checksums
@@ -15,130 +15,121 @@ Choose CRC method, default to faster but less reliable "adler32", if crc is in u
Add warning for Sodium.
---
- salt/config/__init__.py | 7 ++++++-
- salt/grains/core.py | 53 ++++++++++++++++++++++++++++++++-----------------
- 2 files changed, 41 insertions(+), 19 deletions(-)
+ salt/config/__init__.py | 4 +++
+ salt/grains/core.py | 56 ++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 56 insertions(+), 4 deletions(-)
diff --git a/salt/config/__init__.py b/salt/config/__init__.py
-index 70b34ec949..0ebe1181dd 100644
+index 8b498ab9e2..6d30fca072 100644
--- a/salt/config/__init__.py
+++ b/salt/config/__init__.py
-@@ -1190,6 +1190,10 @@ VALID_OPTS = immutabletypes.freeze({
- # Allow raw_shell option when using the ssh
- # client via the Salt API
- 'netapi_allow_raw_shell': bool,
-+
-+ # Use Adler32 hashing algorithm for server_id (default False until Sodium, "adler32" after)
-+ # Possible values are: False, adler32, crc32
-+ 'server_id_use_crc': (bool, six.string_types),
- })
+@@ -945,6 +945,9 @@ VALID_OPTS = immutabletypes.freeze(
+ "disabled_requisites": (str, list),
+ # Feature flag config
+ "features": dict,
++ # Use Adler32 hashing algorithm for server_id (default False until Sodium, "adler32" after)
++ # Possible values are: False, adler32, crc32
++ "server_id_use_crc": (bool, str),
+ }
+ )
- # default configurations
-@@ -1480,7 +1484,8 @@ DEFAULT_MINION_OPTS = immutabletypes.freeze({
- 'minion_sign_messages': False,
- 'discovery': False,
- 'schedule': {},
-- 'ssh_merge_pillar': True
-+ 'ssh_merge_pillar': True,
-+ 'server_id_use_crc': False,
- })
+@@ -1243,6 +1246,7 @@ DEFAULT_MINION_OPTS = immutabletypes.freeze(
+ "schedule": {},
+ "ssh_merge_pillar": True,
+ "disabled_requisites": [],
++ "server_id_use_crc": False,
+ }
+ )
- DEFAULT_MASTER_OPTS = immutabletypes.freeze({
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 2851809472..9c1b5d930e 100644
+index 00bd0565bf..5535584d1b 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -20,6 +20,7 @@ import platform
- import logging
- import locale
+@@ -20,6 +20,7 @@ import socket
+ import sys
+ import time
import uuid
+import zlib
from errno import EACCES, EPERM
- import datetime
- import warnings
-@@ -62,6 +63,7 @@ import salt.utils.path
+
+ import distro
+@@ -39,6 +40,7 @@ import salt.utils.path
import salt.utils.pkg.rpm
import salt.utils.platform
import salt.utils.stringutils
+import salt.utils.versions
- from salt.ext import six
from salt.ext.six.moves import range
+ from salt.utils.network import _get_interfaces
-@@ -2792,40 +2794,55 @@ def _hw_data(osdata):
+@@ -2935,6 +2937,36 @@ def _hw_data(osdata):
return grains
--def get_server_id():
+def _get_hash_by_shell():
- '''
-- Provides an integer based on the FQDN of a machine.
-- Useful as server-id in MySQL replication or anywhere else you'll need an ID
-- like this.
++ """
+ Shell-out Python 3 for compute reliable hash
+ :return:
- '''
-- # Provides:
-- # server_id
--
-- if salt.utils.platform.is_proxy():
-- return {}
- id_ = __opts__.get('id', '')
- id_hash = None
- py_ver = sys.version_info[:2]
- if py_ver >= (3, 3):
- # Python 3.3 enabled hash randomization, so we need to shell out to get
- # a reliable hash.
-- id_hash = __salt__['cmd.run'](
-- [sys.executable, '-c', 'print(hash("{0}"))'.format(id_)],
-- env={'PYTHONHASHSEED': '0'}
-- )
-+ id_hash = __salt__['cmd.run']([sys.executable, '-c', 'print(hash("{0}"))'.format(id_)],
-+ env={'PYTHONHASHSEED': '0'})
- try:
- id_hash = int(id_hash)
- except (TypeError, ValueError):
-- log.debug(
-- 'Failed to hash the ID to get the server_id grain. Result of '
-- 'hash command: %s', id_hash
-- )
-+ log.debug('Failed to hash the ID to get the server_id grain. Result of hash command: %s', id_hash)
- id_hash = None
- if id_hash is None:
- # Python < 3.3 or error encountered above
- id_hash = hash(id_)
-
-- return {'server_id': abs(id_hash % (2 ** 31))}
++ """
++ id_ = __opts__.get("id", "")
++ id_hash = None
++ py_ver = sys.version_info[:2]
++ if py_ver >= (3, 3):
++ # Python 3.3 enabled hash randomization, so we need to shell out to get
++ # a reliable hash.
++ id_hash = __salt__["cmd.run"](
++ [sys.executable, "-c", 'print(hash("{}"))'.format(id_)],
++ env={"PYTHONHASHSEED": "0"},
++ )
++ try:
++ id_hash = int(id_hash)
++ except (TypeError, ValueError):
++ log.debug(
++ "Failed to hash the ID to get the server_id grain. Result of hash command: %s",
++ id_hash,
++ )
++ id_hash = None
++ if id_hash is None:
++ # Python < 3.3 or error encountered above
++ id_hash = hash(id_)
++
+ return abs(id_hash % (2 ** 31))
+
+
-+def get_server_id():
-+ '''
-+ Provides an integer based on the FQDN of a machine.
-+ Useful as server-id in MySQL replication or anywhere else you'll need an ID
-+ like this.
-+ '''
-+ # Provides:
-+ # server_id
-+
-+ if salt.utils.platform.is_proxy():
+ def get_server_id():
+ """
+ Provides an integer based on the FQDN of a machine.
+@@ -2945,10 +2977,26 @@ def get_server_id():
+ # server_id
+
+ if salt.utils.platform.is_proxy():
+- return {}
+- id_ = __opts__.get("id", "")
+- hash_ = int(hashlib.sha256(id_.encode()).hexdigest(), 16)
+- return {"server_id": abs(hash_ % (2 ** 31))}
+ server_id = {}
+ else:
-+ use_crc = __opts__.get('server_id_use_crc')
++ use_crc = __opts__.get("server_id_use_crc")
+ if bool(use_crc):
-+ id_hash = getattr(zlib, use_crc, zlib.adler32)(__opts__.get('id', '').encode()) & 0xffffffff
++ id_hash = (
++ getattr(zlib, use_crc, zlib.adler32)(__opts__.get("id", "").encode())
++ & 0xFFFFFFFF
++ )
+ else:
-+ salt.utils.versions.warn_until('Sodium', 'This server_id is computed nor by Adler32 neither by CRC32. '
-+ 'Please use "server_id_use_crc" option and define algorithm you'
-+ 'prefer (default "Adler32"). The server_id will be computed with'
-+ 'Adler32 by default.')
++ salt.utils.versions.warn_until(
++ "Sodium",
++ "This server_id is computed nor by Adler32 neither by CRC32. "
++ 'Please use "server_id_use_crc" option and define algorithm you'
++ 'prefer (default "Adler32"). The server_id will be computed with'
++ "Adler32 by default.",
++ )
+ id_hash = _get_hash_by_shell()
-+ server_id = {'server_id': id_hash}
++ server_id = {"server_id": id_hash}
+
+ return server_id
def get_master():
--
-2.16.4
+2.29.2
diff --git a/use-current-ioloop-for-the-localclient-instance-of-b.patch b/use-current-ioloop-for-the-localclient-instance-of-b.patch
index 48d6eea..cf5348f 100644
--- a/use-current-ioloop-for-the-localclient-instance-of-b.patch
+++ b/use-current-ioloop-for-the-localclient-instance-of-b.patch
@@ -1,4 +1,4 @@
-From 1ab46d5f9ed435021aa8eeb40ada984f42c8e93d Mon Sep 17 00:00:00 2001
+From 1558a1b092e56da005a26bc381238a3304904622 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Thu, 3 Oct 2019 15:19:02 +0100
@@ -6,23 +6,25 @@ Subject: [PATCH] Use current IOLoop for the LocalClient instance of
BatchAsync (bsc#1137642)
---
- salt/cli/batch_async.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
+ salt/cli/batch_async.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
-index 2bb50459c8..f9e736f804 100644
+index 0a0b8f5f83..89405ba917 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
-@@ -52,7 +52,7 @@ class BatchAsync(object):
- '''
+@@ -47,7 +47,9 @@ class BatchAsync:
+
def __init__(self, parent_opts, jid_gen, clear_load):
ioloop = tornado.ioloop.IOLoop.current()
-- self.local = salt.client.get_local_client(parent_opts['conf_file'])
-+ self.local = salt.client.get_local_client(parent_opts['conf_file'], io_loop=ioloop)
- if 'gather_job_timeout' in clear_load['kwargs']:
- clear_load['gather_job_timeout'] = clear_load['kwargs'].pop('gather_job_timeout')
- else:
-@@ -266,6 +266,7 @@ class BatchAsync(object):
+- self.local = salt.client.get_local_client(parent_opts["conf_file"])
++ self.local = salt.client.get_local_client(
++ parent_opts["conf_file"], io_loop=ioloop
++ )
+ if "gather_job_timeout" in clear_load["kwargs"]:
+ clear_load["gather_job_timeout"] = clear_load["kwargs"].pop(
+ "gather_job_timeout"
+@@ -276,6 +278,7 @@ class BatchAsync:
yield
def __del__(self):
@@ -31,6 +33,6 @@ index 2bb50459c8..f9e736f804 100644
self.ioloop = None
gc.collect()
--
-2.16.4
+2.29.2
diff --git a/use-full-option-name-instead-of-undocumented-abbrevi.patch b/use-full-option-name-instead-of-undocumented-abbrevi.patch
deleted file mode 100644
index b800758..0000000
--- a/use-full-option-name-instead-of-undocumented-abbrevi.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-From c4742f553fe60aee82577622def1eeca0e2abf93 Mon Sep 17 00:00:00 2001
-From: Michael Calmer
-Date: Sun, 1 Mar 2020 16:22:54 +0100
-Subject: [PATCH] use full option name instead of undocumented
- abbreviation
-
----
- salt/modules/zypperpkg.py | 2 +-
- tests/unit/modules/test_zypperpkg.py | 14 +++++++++++++-
- 2 files changed, 14 insertions(+), 2 deletions(-)
-
-diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 0c15214e5e..e3f802a911 100644
---- a/salt/modules/zypperpkg.py
-+++ b/salt/modules/zypperpkg.py
-@@ -2498,7 +2498,7 @@ def list_products(all=False, refresh=False, root=None):
- OEM_PATH = os.path.join(root, os.path.relpath(OEM_PATH, os.path.sep))
- cmd = list()
- if not all:
-- cmd.append('--disable-repos')
-+ cmd.append('--disable-repositories')
- cmd.append('products')
- if not all:
- cmd.append('-i')
-diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 76937cc358..2a8e753b9d 100644
---- a/tests/unit/modules/test_zypperpkg.py
-+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -238,7 +238,18 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
- 'stdout': get_test_data(filename)
- }
-
-- with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=ref_out)}):
-+ cmd_run_all = MagicMock(return_value=ref_out)
-+ mock_call = call(['zypper',
-+ '--non-interactive',
-+ '--xmlout',
-+ '--no-refresh',
-+ '--disable-repositories',
-+ 'products', u'-i'],
-+ env={'ZYPP_READONLY_HACK': '1'},
-+ output_loglevel='trace',
-+ python_shell=False)
-+
-+ with patch.dict(zypper.__salt__, {'cmd.run_all': cmd_run_all}):
- products = zypper.list_products()
- self.assertEqual(len(products), 7)
- self.assertIn(test_data['vendor'], [product['vendor'] for product in products])
-@@ -247,6 +258,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
- self.assertCountEqual(test_data[kwd], [prod.get(kwd) for prod in products])
- else:
- self.assertEqual(test_data[kwd], sorted([prod.get(kwd) for prod in products]))
-+ cmd_run_all.assert_has_calls([mock_call])
-
- def test_refresh_db(self):
- '''
---
-2.16.4
-
-
diff --git a/use-threadpool-from-multiprocessing.pool-to-avoid-le.patch b/use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
index 566ca90..a56fda1 100644
--- a/use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
+++ b/use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
@@ -1,4 +1,4 @@
-From 1f50b796dd551c25a8fc87fe825d1508f340858e Mon Sep 17 00:00:00 2001
+From 9ebf39f96704f17886e306ee270a06ef7126c381 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
Date: Tue, 30 Apr 2019 10:51:42 +0100
@@ -10,19 +10,19 @@ Subject: [PATCH] Use ThreadPool from multiprocessing.pool to avoid
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
-index 4600f055dd..f1e3ebe9d2 100644
+index 883e3ebe09..bebb4581bc 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
-@@ -27,7 +27,7 @@ import datetime
+@@ -23,7 +23,7 @@ import uuid
import warnings
- import time
-
+ import zlib
+ from errno import EACCES, EPERM
-from multiprocessing.dummy import Pool as ThreadPool
+from multiprocessing.pool import ThreadPool
- # pylint: disable=import-error
- try:
-@@ -2300,10 +2300,14 @@ def fqdns():
+ import distro
+ import salt.exceptions
+@@ -2442,10 +2442,14 @@ def fqdns():
# Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel.
# This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing
# that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds.
@@ -32,16 +32,16 @@ index 4600f055dd..f1e3ebe9d2 100644
- pool.join()
+
+ try:
-+ pool = ThreadPool(8)
-+ results = pool.map(_lookup_fqdn, addresses)
-+ pool.close()
-+ pool.join()
++ pool = ThreadPool(8)
++ results = pool.map(_lookup_fqdn, addresses)
++ pool.close()
++ pool.join()
+ except Exception as exc:
-+ log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc)
++ log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc)
for item in results:
if item:
--
-2.16.4
+2.29.2
diff --git a/v3000.3.tar.gz b/v3000.3.tar.gz
deleted file mode 100644
index bd78a9c..0000000
--- a/v3000.3.tar.gz
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:923b9c43c53a9ac290dc3e2176d998b1f5e09742e6ff26a1a9d7275db1cee4ad
-size 15256160
diff --git a/v3002.2.tar.gz b/v3002.2.tar.gz
new file mode 100644
index 0000000..0abf8b6
--- /dev/null
+++ b/v3002.2.tar.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99af557351f56689770acbd6a7455b0c0f2acd4cf8daf4e0cb4bbf141407a678
+size 15839320
diff --git a/virt-adding-kernel-boot-parameters-to-libvirt-xml-55.patch b/virt-adding-kernel-boot-parameters-to-libvirt-xml-55.patch
index 9c811c6..5faef42 100644
--- a/virt-adding-kernel-boot-parameters-to-libvirt-xml-55.patch
+++ b/virt-adding-kernel-boot-parameters-to-libvirt-xml-55.patch
@@ -1,4 +1,4 @@
-From f8ccfae9908d6a1001d68a1b8e5e8cee495b5aef Mon Sep 17 00:00:00 2001
+From 7bc4876fae850062b06bf4e3c5a56310addf52d3 Mon Sep 17 00:00:00 2001
From: Larry Dewey
Date: Tue, 7 Jan 2020 02:48:11 -0700
Subject: [PATCH] virt: adding kernel boot parameters to libvirt xml
@@ -17,26 +17,31 @@ Signed-off-by: Larry Dewey
Signed-off-by: Larry Dewey
---
- salt/states/virt.py | 17 +++++++++++++++++
- 1 file changed, 17 insertions(+)
+ salt/states/virt.py | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/salt/states/virt.py b/salt/states/virt.py
-index 500509fcc0..55a9ad2616 100644
+index 486aeb21b3..200c79d35c 100644
--- a/salt/states/virt.py
+++ b/salt/states/virt.py
-@@ -367,6 +367,23 @@ def running(name,
+@@ -617,13 +617,22 @@ def running(
.. versionadded:: 3000
+- :param boot_dev:
+- Space separated list of devices to boot from sorted by decreasing priority.
+- Values can be ``hd``, ``fd``, ``cdrom`` or ``network``.
+ :param boot:
+ Specifies kernel for the virtual machine, as well as boot parameters
+ for the virtual machine. This is an optionl parameter, and all of the
+ keys are optional within the dictionary. If a remote path is provided
+ to kernel or initrd, salt will handle the downloading of the specified
+ remote fild, and will modify the XML accordingly.
-+
+
+- By default, the value will ``"hd"``.
+ .. code-block:: python
-+
+
+- .. versionadded:: 3002
+ {
+ 'kernel': '/root/f8-i386-vmlinuz',
+ 'initrd': '/root/f8-i386-initrd',
@@ -44,11 +49,10 @@ index 500509fcc0..55a9ad2616 100644
+ }
+
+ .. versionadded:: 3000
-+
+
.. rubric:: Example States
- Make sure an already-defined virtual machine called ``domain_name`` is running:
--
-2.16.4
+2.29.2
diff --git a/virt._get_domain-don-t-raise-an-exception-if-there-i.patch b/virt._get_domain-don-t-raise-an-exception-if-there-i.patch
index f95a9b8..697a4a5 100644
--- a/virt._get_domain-don-t-raise-an-exception-if-there-i.patch
+++ b/virt._get_domain-don-t-raise-an-exception-if-there-i.patch
@@ -1,4 +1,4 @@
-From ef376e2d9a8360367a9a214d8f50d56889f3a664 Mon Sep 17 00:00:00 2001
+From 3cb366a1f777a9c8ee1a1c679c65d67fe5b89db0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?=
Date: Tue, 17 Mar 2020 11:01:48 +0100
Subject: [PATCH] virt._get_domain: don't raise an exception if there
@@ -7,73 +7,173 @@ Subject: [PATCH] virt._get_domain: don't raise an exception if there
Raising an exception if there is no VM in _get_domain makes sense if
looking for some VMs, but not when listing all VMs.
---
- salt/modules/virt.py | 2 +-
- tests/unit/modules/test_virt.py | 41 +++++++++++++++++++++++++++++++++
- 2 files changed, 42 insertions(+), 1 deletion(-)
+ tests/unit/modules/test_virt.py | 155 --------------------------------
+ 1 file changed, 155 deletions(-)
-diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index f0820e882524e1ebaae335e0a72940d6ff85c1b2..c8e046a47ae76b50651871fe1d149590d5d1e930 100644
---- a/salt/modules/virt.py
-+++ b/salt/modules/virt.py
-@@ -268,7 +268,7 @@ def _get_domain(conn, *vms, **kwargs):
- for id_ in conn.listDefinedDomains():
- all_vms.append(id_)
-
-- if not all_vms:
-+ if vms and not all_vms:
- raise CommandExecutionError('No virtual machines found.')
-
- if vms:
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index 8690154662f41c8d9699fba62fcda6d83208a7d7..3e9bd5ef49dfddc019f9b4da1b505d81018e7eed 100644
+index db6ba007b7..a5c876e27d 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
-@@ -3639,3 +3639,44 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
- }
- },
- [backend for backend in backends if backend['name'] == 'netfs'][0]['options'])
-+
-+ def test_get_domain(self):
-+ '''
-+ Test the virt._get_domain function
-+ '''
-+ # Tests with no VM
-+ self.mock_conn.listDomainsID.return_value = []
-+ self.mock_conn.listDefinedDomains.return_value = []
-+ self.assertEqual([], virt._get_domain(self.mock_conn))
-+ self.assertRaisesRegex(CommandExecutionError, 'No virtual machines found.',
-+ virt._get_domain, self.mock_conn, 'vm2')
-+
-+ # Test with active and inactive VMs
-+ self.mock_conn.listDomainsID.return_value = [1]
-+
-+ def create_mock_vm(idx):
-+ mock_vm = MagicMock()
-+ mock_vm.name.return_value = 'vm{0}'.format(idx)
-+ return mock_vm
-+
-+ mock_vms = [create_mock_vm(idx) for idx in range(3)]
-+ self.mock_conn.lookupByID.return_value = mock_vms[0]
-+ self.mock_conn.listDefinedDomains.return_value = ['vm1', 'vm2']
-+
-+ self.mock_conn.lookupByName.side_effect = mock_vms
-+ self.assertEqual(mock_vms, virt._get_domain(self.mock_conn))
-+
-+ self.mock_conn.lookupByName.side_effect = None
-+ self.mock_conn.lookupByName.return_value = mock_vms[0]
-+ self.assertEqual(mock_vms[0], virt._get_domain(self.mock_conn, inactive=False))
-+
-+ self.mock_conn.lookupByName.return_value = None
-+ self.mock_conn.lookupByName.side_effect = [mock_vms[1], mock_vms[2]]
-+ self.assertEqual([mock_vms[1], mock_vms[2]], virt._get_domain(self.mock_conn, active=False))
-+
-+ self.mock_conn.reset_mock()
-+ self.mock_conn.lookupByName.return_value = None
-+ self.mock_conn.lookupByName.side_effect = [mock_vms[1], mock_vms[2]]
-+ self.assertEqual([mock_vms[1], mock_vms[2]], virt._get_domain(self.mock_conn, 'vm1', 'vm2'))
-+ self.assertRaisesRegex(CommandExecutionError, 'The VM "vm2" is not present',
-+ virt._get_domain, self.mock_conn, 'vm2', inactive=False)
+@@ -4898,158 +4898,3 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+ "vm2",
+ inactive=False,
+ )
+-
+- def test_volume_define(self):
+- """
+- Test virt.volume_define function
+- """
+- # Normal test case
+- pool_mock = MagicMock()
+- pool_mock.XMLDesc.return_value = ""
+- self.mock_conn.storagePoolLookupByName.return_value = pool_mock
+-
+- self.assertTrue(
+- virt.volume_define(
+- "testpool",
+- "myvm_system.qcow2",
+- 8192,
+- allocation=4096,
+- format="qcow2",
+- type="file",
+- )
+- )
+-
+- expected_xml = (
+- "\n"
+- " myvm_system.qcow2\n"
+- " \n"
+- " \n"
+- " 8388608\n"
+- " 4194304\n"
+- " \n"
+- " \n"
+- " \n"
+- ""
+- )
+-
+- pool_mock.createXML.assert_called_once_with(expected_xml, 0)
+-
+- # backing store test case
+- pool_mock.reset_mock()
+- self.assertTrue(
+- virt.volume_define(
+- "testpool",
+- "myvm_system.qcow2",
+- 8192,
+- allocation=4096,
+- format="qcow2",
+- type="file",
+- backing_store={"path": "/path/to/base.raw", "format": "raw"},
+- )
+- )
+-
+- expected_xml = (
+- "\n"
+- " myvm_system.qcow2\n"
+- " \n"
+- " \n"
+- " 8388608\n"
+- " 4194304\n"
+- " \n"
+- " \n"
+- " \n"
+- " \n"
+- " /path/to/base.raw\n"
+- " \n"
+- " \n"
+- ""
+- )
+-
+- pool_mock.createXML.assert_called_once_with(expected_xml, 0)
+-
+- # logical pool test case
+- pool_mock.reset_mock()
+- pool_mock.XMLDesc.return_value = ""
+- self.mock_conn.storagePoolLookupByName.return_value = pool_mock
+-
+- self.assertTrue(
+- virt.volume_define(
+- "testVG",
+- "myvm_system",
+- 8192,
+- backing_store={"path": "/dev/testVG/base"},
+- )
+- )
+-
+- expected_xml = (
+- "\n"
+- " myvm_system\n"
+- " \n"
+- " \n"
+- " 8388608\n"
+- " 8388608\n"
+- " \n"
+- " \n"
+- " \n"
+- " /dev/testVG/base\n"
+- " \n"
+- ""
+- )
+-
+- pool_mock.createXML.assert_called_once_with(expected_xml, 0)
+-
+- def test_volume_upload(self):
+- """
+- Test virt.volume_upload function
+- """
+- pool_mock = MagicMock()
+- vol_mock = MagicMock()
+- pool_mock.storageVolLookupByName.return_value = vol_mock
+- self.mock_conn.storagePoolLookupByName.return_value = pool_mock
+- stream_mock = MagicMock()
+- self.mock_conn.newStream.return_value = stream_mock
+-
+- open_mock = MagicMock()
+- close_mock = MagicMock()
+- with patch.dict(
+- os.__dict__, {"open": open_mock, "close": close_mock}
+- ): # pylint: disable=no-member
+- # Normal case
+- self.assertTrue(virt.volume_upload("pool0", "vol1.qcow2", "/path/to/file"))
+- stream_mock.sendAll.assert_called_once()
+- stream_mock.finish.assert_called_once()
+- self.mock_conn.close.assert_called_once()
+- vol_mock.upload.assert_called_once_with(stream_mock, 0, 0, 0)
+-
+- # Sparse upload case
+- stream_mock.sendAll.reset_mock()
+- vol_mock.upload.reset_mock()
+- self.assertTrue(
+- virt.volume_upload(
+- "pool0",
+- "vol1.qcow2",
+- "/path/to/file",
+- offset=123,
+- length=456,
+- sparse=True,
+- )
+- )
+- stream_mock.sendAll.assert_not_called()
+- stream_mock.sparseSendAll.assert_called_once()
+- vol_mock.upload.assert_called_once_with(
+- stream_mock,
+- 123,
+- 456,
+- self.mock_libvirt.VIR_STORAGE_VOL_UPLOAD_SPARSE_STREAM,
+- )
+-
+- # Upload unsupported case
+- vol_mock.upload.side_effect = self.mock_libvirt.libvirtError("Unsupported")
+- self.assertRaisesRegex(
+- CommandExecutionError,
+- "Unsupported",
+- virt.volume_upload,
+- "pool0",
+- "vol1.qcow2",
+- "/path/to/file",
+- )
--
-2.23.0
+2.29.2
diff --git a/x509-fixes-111.patch b/x509-fixes-111.patch
index a5671f4..87df139 100644
--- a/x509-fixes-111.patch
+++ b/x509-fixes-111.patch
@@ -1,4 +1,4 @@
-From ebd4bada22dca8f384078e977202c0052a80f1fc Mon Sep 17 00:00:00 2001
+From d947df3004323a3d400aaf7b5d94580965bf4a11 Mon Sep 17 00:00:00 2001
From: Florian Bergmann
Date: Fri, 14 Sep 2018 10:30:39 +0200
Subject: [PATCH] X509 fixes (#111)
@@ -33,45 +33,81 @@ PEP8: line too long
* Fix unit tests
---
- salt/modules/publish.py | 8 +--
- salt/modules/x509.py | 129 +++++++++++++++++++-----------------------------
- salt/states/x509.py | 19 ++++---
- 3 files changed, 66 insertions(+), 90 deletions(-)
+ salt/modules/publish.py | 17 +++----
+ salt/modules/x509.py | 100 ++++++++++++++++++++--------------------
+ salt/states/x509.py | 75 ++++++++++++++++++++++++++++--
+ 3 files changed, 129 insertions(+), 63 deletions(-)
diff --git a/salt/modules/publish.py b/salt/modules/publish.py
-index 1550aa39a8..f12f1cc947 100644
+index 0926f3fa13..6d56c4d08e 100644
--- a/salt/modules/publish.py
+++ b/salt/modules/publish.py
-@@ -82,10 +82,8 @@ def _publish(
- in minion configuration but `via_master` was specified.')
- else:
- # Find the master in the list of master_uris generated by the minion base class
-- matching_master_uris = [master for master
-- in __opts__['master_uri_list']
-- if '//{0}:'.format(via_master)
-- in master]
-+ matching_master_uris = [master for master in __opts__['master_uri_list']
-+ if '//{0}:'.format(via_master) in master]
+@@ -1,15 +1,10 @@
+-# -*- coding: utf-8 -*-
+ """
+ Publish a command from a minion to a target
+ """
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+-
+-# Import python libs
+ import time
+
+-# Import salt libs
+ import salt.crypt
+ import salt.payload
+ import salt.transport.client
+@@ -93,13 +88,13 @@ def _publish(
+ matching_master_uris = [
+ master
+ for master in __opts__["master_uri_list"]
+- if "//{0}:".format(via_master) in master
++ if "//{}:".format(via_master) in master
+ ]
if not matching_master_uris:
- raise SaltInvocationError('Could not find match for {0} in \
-@@ -175,6 +173,8 @@ def _publish(
+ raise SaltInvocationError(
+- "Could not find match for {0} in \
+- list of configured masters {1} when using `via_master` option".format(
++ "Could not find match for {} in \
++ list of configured masters {} when using `via_master` option".format(
+ via_master, __opts__["master_uri_list"]
+ )
+ )
+@@ -143,7 +138,7 @@ def _publish(
+ try:
+ peer_data = channel.send(load)
+ except SaltReqTimeoutError:
+- return "'{0}' publish timed out".format(fun)
++ return "'{}' publish timed out".format(fun)
+ if not peer_data:
+ return {}
+ # CLI args are passed as strings, re-cast to keep time.sleep happy
+@@ -198,6 +193,8 @@ def _publish(
else:
return ret
+ return {}
+
- def publish(tgt,
- fun,
+ def publish(
+ tgt, fun, arg=None, tgt_type="glob", returner="", timeout=5, via_master=None
+@@ -347,4 +344,4 @@ def runner(fun, arg=None, timeout=5):
+ try:
+ return channel.send(load)
+ except SaltReqTimeoutError:
+- return "'{0}' runner publish timed out".format(fun)
++ return "'{}' runner publish timed out".format(fun)
diff --git a/salt/modules/x509.py b/salt/modules/x509.py
-index 1cdd912bfb..72ab3bb03e 100644
+index ec9c92c008..9b475f3096 100644
--- a/salt/modules/x509.py
+++ b/salt/modules/x509.py
-@@ -39,14 +39,13 @@ from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
- # Import 3rd Party Libs
+@@ -30,16 +30,13 @@ from salt.utils.odict import OrderedDict
+
try:
import M2Crypto
+-
- HAS_M2 = True
except ImportError:
- HAS_M2 = False
@@ -79,71 +115,51 @@ index 1cdd912bfb..72ab3bb03e 100644
+
try:
import OpenSSL
+-
- HAS_OPENSSL = True
except ImportError:
- HAS_OPENSSL = False
+ OpenSSL = None
- __virtualname__ = 'x509'
+ __virtualname__ = "x509"
-@@ -84,10 +83,7 @@ def __virtual__():
- '''
+@@ -79,10 +76,10 @@ def __virtual__():
+ """
only load this module if m2crypto is available
- '''
+ """
- if HAS_M2:
- return __virtualname__
- else:
-- return (False, 'Could not load x509 module, m2crypto unavailable')
-+ return __virtualname__ if M2Crypto is not None else False, 'Could not load x509 module, m2crypto unavailable'
+- return (False, "Could not load x509 module, m2crypto unavailable")
++ return (
++ __virtualname__ if M2Crypto is not None else False,
++ "Could not load x509 module, m2crypto unavailable",
++ )
class _Ctx(ctypes.Structure):
-@@ -130,10 +126,8 @@ def _new_extension(name, value, critical=0, issuer=None, _pyfree=1):
- doesn't support getting the publickeyidentifier from the issuer
- to create the authoritykeyidentifier extension.
- '''
-- if name == 'subjectKeyIdentifier' and \
-- value.strip('0123456789abcdefABCDEF:') is not '':
-- raise salt.exceptions.SaltInvocationError(
-- 'value must be precomputed hash')
-+ if name == 'subjectKeyIdentifier' and value.strip('0123456789abcdefABCDEF:') is not '':
-+ raise salt.exceptions.SaltInvocationError('value must be precomputed hash')
-
- # ensure name and value are bytes
- name = salt.utils.stringutils.to_str(name)
-@@ -148,7 +142,7 @@ def _new_extension(name, value, critical=0, issuer=None, _pyfree=1):
+@@ -140,8 +137,8 @@ def _new_extension(name, value, critical=0, issuer=None, _pyfree=1):
x509_ext_ptr = M2Crypto.m2.x509v3_ext_conf(None, ctx, name, value)
lhash = None
except AttributeError:
-- lhash = M2Crypto.m2.x509v3_lhash() # pylint: disable=no-member
+- lhash = M2Crypto.m2.x509v3_lhash()
+- ctx = M2Crypto.m2.x509v3_set_conf_lhash(lhash)
+ lhash = M2Crypto.m2.x509v3_lhash() # pylint: disable=no-member
- ctx = M2Crypto.m2.x509v3_set_conf_lhash(
- lhash) # pylint: disable=no-member
++ ctx = M2Crypto.m2.x509v3_set_conf_lhash(lhash) # pylint: disable=no-member
# ctx not zeroed
-@@ -199,10 +193,8 @@ def _get_csr_extensions(csr):
- csrtempfile.flush()
- csryaml = _parse_openssl_req(csrtempfile.name)
- csrtempfile.close()
-- if csryaml and 'Requested Extensions' in \
-- csryaml['Certificate Request']['Data']:
-- csrexts = \
-- csryaml['Certificate Request']['Data']['Requested Extensions']
-+ if csryaml and 'Requested Extensions' in csryaml['Certificate Request']['Data']:
-+ csrexts = csryaml['Certificate Request']['Data']['Requested Extensions']
-
- if not csrexts:
- return ret
-@@ -297,7 +289,7 @@ def _get_signing_policy(name):
+ _fix_ctx(ctx, issuer)
+ x509_ext_ptr = M2Crypto.m2.x509v3_ext_conf(lhash, ctx, name, value)
+@@ -280,7 +277,7 @@ def _get_signing_policy(name):
signing_policy = policies.get(name)
if signing_policy:
return signing_policy
-- return __salt__['config.get']('x509_signing_policies', {}).get(name)
-+ return __salt__['config.get']('x509_signing_policies', {}).get(name) or {}
+- return __salt__["config.get"]("x509_signing_policies", {}).get(name)
++ return __salt__["config.get"]("x509_signing_policies", {}).get(name) or {}
def _pretty_hex(hex_str):
-@@ -336,9 +328,11 @@ def _text_or_file(input_):
- '''
+@@ -318,9 +315,11 @@ def _text_or_file(input_):
+ """
if _isfile(input_):
with salt.utils.files.fopen(input_) as fp_:
- return salt.utils.stringutils.to_str(fp_.read())
@@ -156,322 +172,316 @@ index 1cdd912bfb..72ab3bb03e 100644
def _parse_subject(subject):
-@@ -356,7 +350,7 @@ def _parse_subject(subject):
- ret[nid_name] = val
+@@ -339,7 +338,7 @@ def _parse_subject(subject):
+ ret_list.append((nid_num, nid_name, val))
nids.append(nid_num)
except TypeError as err:
- log.trace("Missing attribute '%s'. Error: %s", nid_name, err)
+ log.debug("Missing attribute '%s'. Error: %s", nid_name, err)
-
+ for nid_num, nid_name, val in sorted(ret_list):
+ ret[nid_name] = val
return ret
-
-@@ -533,8 +527,8 @@ def get_pem_entries(glob_path):
+@@ -536,8 +535,8 @@ def get_pem_entries(glob_path):
if os.path.isfile(path):
try:
ret[path] = get_pem_entry(text=path)
- except ValueError:
- pass
+ except ValueError as err:
-+ log.debug('Unable to get PEM entries from %s: %s', path, err)
++ log.debug("Unable to get PEM entries from %s: %s", path, err)
return ret
-@@ -612,8 +606,8 @@ def read_certificates(glob_path):
+@@ -615,8 +614,8 @@ def read_certificates(glob_path):
if os.path.isfile(path):
try:
ret[path] = read_certificate(certificate=path)
- except ValueError:
- pass
+ except ValueError as err:
-+ log.debug('Unable to read certificate %s: %s', path, err)
++ log.debug("Unable to read certificate %s: %s", path, err)
return ret
-@@ -642,12 +636,10 @@ def read_csr(csr):
- # Get size returns in bytes. The world thinks of key sizes in bits.
- 'Subject': _parse_subject(csr.get_subject()),
- 'Subject Hash': _dec2hex(csr.get_subject().as_hash()),
-- 'Public Key Hash': hashlib.sha1(csr.get_pubkey().get_modulus())\
-- .hexdigest()
-+ 'Public Key Hash': hashlib.sha1(csr.get_pubkey().get_modulus()).hexdigest(),
-+ 'X509v3 Extensions': _get_csr_extensions(csr),
+@@ -646,10 +645,9 @@ def read_csr(csr):
+ "Subject": _parse_subject(csr.get_subject()),
+ "Subject Hash": _dec2hex(csr.get_subject().as_hash()),
+ "Public Key Hash": hashlib.sha1(csr.get_pubkey().get_modulus()).hexdigest(),
++ "X509v3 Extensions": _get_csr_extensions(csr),
}
-- ret['X509v3 Extensions'] = _get_csr_extensions(csr)
+- ret["X509v3 Extensions"] = _get_csr_extensions(csr)
-
return ret
-@@ -944,7 +936,7 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
+@@ -959,7 +957,7 @@ def create_crl(
# pyOpenSSL Note due to current limitations in pyOpenSSL it is impossible
# to specify a digest For signing the CRL. This will hopefully be fixed
# soon: https://github.com/pyca/pyopenssl/pull/161
- if not HAS_OPENSSL:
+ if OpenSSL is None:
raise salt.exceptions.SaltInvocationError(
- 'Could not load OpenSSL module, OpenSSL unavailable'
+ "Could not load OpenSSL module, OpenSSL unavailable"
)
-@@ -970,8 +962,7 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
- continue
-
- if 'revocation_date' not in rev_item:
-- rev_item['revocation_date'] = datetime.datetime\
-- .now().strftime('%Y-%m-%d %H:%M:%S')
-+ rev_item['revocation_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
-
- rev_date = datetime.datetime.strptime(
- rev_item['revocation_date'], '%Y-%m-%d %H:%M:%S')
-@@ -1013,8 +1004,9 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
- try:
+@@ -1033,7 +1031,9 @@ def create_crl(
crltext = crl.export(**export_kwargs)
except (TypeError, ValueError):
-- log.warning(
-- 'Error signing crl with specified digest. Are you using pyopenssl 0.15 or newer? The default md5 digest will be used.')
-+ log.warning('Error signing crl with specified digest. '
-+ 'Are you using pyopenssl 0.15 or newer? '
-+ 'The default md5 digest will be used.')
- export_kwargs.pop('digest', None)
+ log.warning(
+- "Error signing crl with specified digest. Are you using pyopenssl 0.15 or newer? The default md5 digest will be used."
++ "Error signing crl with specified digest. "
++ "Are you using pyopenssl 0.15 or newer? "
++ "The default md5 digest will be used."
+ )
+ export_kwargs.pop("digest", None)
crltext = crl.export(**export_kwargs)
-
-@@ -1052,8 +1044,7 @@ def sign_remote_certificate(argdic, **kwargs):
- if 'signing_policy' in argdic:
- signing_policy = _get_signing_policy(argdic['signing_policy'])
- if not signing_policy:
-- return 'Signing policy {0} does not exist.'.format(
-- argdic['signing_policy'])
-+ return 'Signing policy {0} does not exist.'.format(argdic['signing_policy'])
-
- if isinstance(signing_policy, list):
- dict_ = {}
-@@ -1093,6 +1084,7 @@ def get_signing_policy(signing_policy_name):
+@@ -1109,6 +1109,7 @@ def get_signing_policy(signing_policy_name):
signing_policy = _get_signing_policy(signing_policy_name)
if not signing_policy:
- return 'Signing policy {0} does not exist.'.format(signing_policy_name)
+ return "Signing policy {} does not exist.".format(signing_policy_name)
+
if isinstance(signing_policy, list):
dict_ = {}
for item in signing_policy:
-@@ -1105,10 +1097,9 @@ def get_signing_policy(signing_policy_name):
- pass
-
- try:
-- signing_policy['signing_cert'] = get_pem_entry(
-- signing_policy['signing_cert'], 'CERTIFICATE')
-+ signing_policy['signing_cert'] = get_pem_entry(signing_policy['signing_cert'], 'CERTIFICATE')
+@@ -1125,7 +1126,7 @@ def get_signing_policy(signing_policy_name):
+ signing_policy["signing_cert"], "CERTIFICATE"
+ )
except KeyError:
- pass
+ log.debug('Unable to get "certificate" PEM entry')
return signing_policy
-@@ -1358,8 +1349,7 @@ def create_certificate(
- salt '*' x509.create_certificate path=/etc/pki/myca.crt signing_private_key='/etc/pki/myca.key' csr='/etc/pki/myca.csr'}
- '''
+@@ -1734,7 +1735,8 @@ def create_csr(path=None, text=False, **kwargs):
+ if "private_key" not in kwargs and "public_key" in kwargs:
+ kwargs["private_key"] = kwargs["public_key"]
+ log.warning(
+- "OpenSSL no longer allows working with non-signed CSRs. A private_key must be specified. Attempting to use public_key as private_key"
++ "OpenSSL no longer allows working with non-signed CSRs. "
++ "A private_key must be specified. Attempting to use public_key as private_key"
+ )
-- if not path and not text and \
-- ('testrun' not in kwargs or kwargs['testrun'] is False):
-+ if not path and not text and ('testrun' not in kwargs or kwargs['testrun'] is False):
- raise salt.exceptions.SaltInvocationError(
- 'Either path or text must be specified.')
- if path and text:
-@@ -1504,8 +1494,7 @@ def create_certificate(
- continue
+ if "private_key" not in kwargs:
+@@ -1758,7 +1760,8 @@ def create_csr(path=None, text=False, **kwargs):
+ )
+ )
- # Use explicitly set values first, fall back to CSR values.
-- extval = kwargs.get(extname) or kwargs.get(extlongname) or \
-- csrexts.get(extname) or csrexts.get(extlongname)
-+ extval = kwargs.get(extname) or kwargs.get(extlongname) or csrexts.get(extname) or csrexts.get(extlongname)
+- for entry in sorted(subject.nid):
++ # pylint: disable=unused-variable
++ for entry, num in subject.nid.items():
+ if entry in kwargs:
+ setattr(subject, entry, kwargs[entry])
- critical = False
- if extval.startswith('critical '):
-@@ -1627,8 +1616,8 @@ def create_csr(path=None, text=False, **kwargs):
-
- if 'private_key' not in kwargs and 'public_key' in kwargs:
- kwargs['private_key'] = kwargs['public_key']
-- log.warning(
-- "OpenSSL no longer allows working with non-signed CSRs. A private_key must be specified. Attempting to use public_key as private_key")
-+ log.warning("OpenSSL no longer allows working with non-signed CSRs. "
-+ "A private_key must be specified. Attempting to use public_key as private_key")
-
- if 'private_key' not in kwargs:
- raise salt.exceptions.SaltInvocationError('private_key is required')
-@@ -1640,11 +1629,9 @@ def create_csr(path=None, text=False, **kwargs):
- kwargs['private_key_passphrase'] = None
- if 'public_key_passphrase' not in kwargs:
- kwargs['public_key_passphrase'] = None
-- if kwargs['public_key_passphrase'] and not kwargs[
-- 'private_key_passphrase']:
-+ if kwargs['public_key_passphrase'] and not kwargs['private_key_passphrase']:
- kwargs['private_key_passphrase'] = kwargs['public_key_passphrase']
-- if kwargs['private_key_passphrase'] and not kwargs[
-- 'public_key_passphrase']:
-+ if kwargs['private_key_passphrase'] and not kwargs['public_key_passphrase']:
- kwargs['public_key_passphrase'] = kwargs['private_key_passphrase']
-
- csr.set_pubkey(get_public_key(kwargs['public_key'],
-@@ -1688,18 +1675,10 @@ def create_csr(path=None, text=False, **kwargs):
+@@ -1794,7 +1797,6 @@ def create_csr(path=None, text=False, **kwargs):
extstack.push(ext)
csr.add_extensions(extstack)
-
- csr.sign(_get_private_key_obj(kwargs['private_key'],
- passphrase=kwargs['private_key_passphrase']), kwargs['algorithm'])
+ csr.sign(
+ _get_private_key_obj(
+ kwargs["private_key"], passphrase=kwargs["private_key_passphrase"]
+@@ -1802,10 +1804,11 @@ def create_csr(path=None, text=False, **kwargs):
+ kwargs["algorithm"],
+ )
- if path:
-- return write_pem(
-- text=csr.as_pem(),
-- path=path,
-- pem_type='CERTIFICATE REQUEST'
-- )
+- return write_pem(text=csr.as_pem(), path=path, pem_type="CERTIFICATE REQUEST")
- else:
- return csr.as_pem()
-+ return write_pem(text=csr.as_pem(), path=path, pem_type='CERTIFICATE REQUEST') if path else csr.as_pem()
++ return (
++ write_pem(text=csr.as_pem(), path=path, pem_type="CERTIFICATE REQUEST")
++ if path
++ else csr.as_pem()
++ )
def verify_private_key(private_key, public_key, passphrase=None):
-@@ -1724,8 +1703,7 @@ def verify_private_key(private_key, public_key, passphrase=None):
+@@ -1830,7 +1833,7 @@ def verify_private_key(private_key, public_key, passphrase=None):
salt '*' x509.verify_private_key private_key=/etc/pki/myca.key \\
public_key=/etc/pki/myca.crt
- '''
-- return bool(get_public_key(private_key, passphrase)
-- == get_public_key(public_key))
+ """
+- return bool(get_public_key(private_key, passphrase) == get_public_key(public_key))
+ return get_public_key(private_key, passphrase) == get_public_key(public_key)
- def verify_signature(certificate, signing_pub_key=None,
-@@ -1779,9 +1757,8 @@ def verify_crl(crl, cert):
+ def verify_signature(
+@@ -1886,7 +1889,10 @@ def verify_crl(crl, cert):
salt '*' x509.verify_crl crl=/etc/pki/myca.crl cert=/etc/pki/myca.crt
- '''
- if not salt.utils.path.which('openssl'):
-- raise salt.exceptions.SaltInvocationError(
-- 'openssl binary not found in path'
-- )
-+ raise salt.exceptions.SaltInvocationError('External command "openssl" not found')
+ """
+ if not salt.utils.path.which("openssl"):
+- raise salt.exceptions.SaltInvocationError("openssl binary not found in path")
++ raise salt.exceptions.SaltInvocationError(
++ 'External command "openssl" not found'
++ )
+
crltext = _text_or_file(crl)
- crltext = get_pem_entry(crltext, pem_type='X509 CRL')
- crltempfile = tempfile.NamedTemporaryFile()
-@@ -1802,10 +1779,7 @@ def verify_crl(crl, cert):
+ crltext = get_pem_entry(crltext, pem_type="X509 CRL")
+ crltempfile = tempfile.NamedTemporaryFile(delete=True)
+@@ -1908,10 +1914,7 @@ def verify_crl(crl, cert):
crltempfile.close()
certtempfile.close()
-- if 'verify OK' in output:
+- if "verify OK" in output:
- return True
- else:
- return False
-+ return 'verify OK' in output
++ return "verify OK" in output
def expired(certificate):
-@@ -1842,8 +1816,9 @@ def expired(certificate):
- ret['expired'] = True
+@@ -1949,8 +1952,9 @@ def expired(certificate):
+ ret["expired"] = True
else:
- ret['expired'] = False
+ ret["expired"] = False
- except ValueError:
- pass
+ except ValueError as err:
-+ log.debug('Failed to get data of expired certificate: %s', err)
++ log.debug("Failed to get data of expired certificate: %s", err)
+ log.trace(err, exc_info=True)
return ret
-@@ -1866,6 +1841,7 @@ def will_expire(certificate, days):
+@@ -1973,6 +1977,7 @@ def will_expire(certificate, days):
salt '*' x509.will_expire "/etc/pki/mycert.crt" days=30
- '''
+ """
+ ts_pt = "%Y-%m-%d %H:%M:%S"
ret = {}
if os.path.isfile(certificate):
-@@ -1875,18 +1851,13 @@ def will_expire(certificate, days):
-
- cert = _get_certificate_obj(certificate)
-
-- _check_time = datetime.datetime.utcnow() + \
-- datetime.timedelta(days=days)
-+ _check_time = datetime.datetime.utcnow() + datetime.timedelta(days=days)
+@@ -1986,14 +1991,11 @@ def will_expire(certificate, days):
_expiration_date = cert.get_not_after().get_datetime()
- ret['cn'] = _parse_subject(cert.get_subject())['CN']
+ ret["cn"] = _parse_subject(cert.get_subject())["CN"]
-
-- if _expiration_date.strftime("%Y-%m-%d %H:%M:%S") <= \
-- _check_time.strftime("%Y-%m-%d %H:%M:%S"):
-- ret['will_expire'] = True
+- if _expiration_date.strftime("%Y-%m-%d %H:%M:%S") <= _check_time.strftime(
+- "%Y-%m-%d %H:%M:%S"
+- ):
+- ret["will_expire"] = True
- else:
-- ret['will_expire'] = False
+- ret["will_expire"] = False
- except ValueError:
- pass
-+ ret['will_expire'] = _expiration_date.strftime(ts_pt) <= _check_time.strftime(ts_pt)
++ ret["will_expire"] = _expiration_date.strftime(
++ ts_pt
++ ) <= _check_time.strftime(ts_pt)
+ except ValueError as err:
-+ log.debug('Unable to return details of a sertificate expiration: %s', err)
++ log.debug("Unable to return details of a sertificate expiration: %s", err)
+ log.trace(err, exc_info=True)
return ret
diff --git a/salt/states/x509.py b/salt/states/x509.py
-index 3774f7d5eb..e4cc288dc9 100644
+index 8269df2f37..5ad7195b7c 100644
--- a/salt/states/x509.py
+++ b/salt/states/x509.py
-@@ -163,6 +163,7 @@ import copy
+@@ -177,12 +177,14 @@ import os
+ import re
- # Import Salt Libs
import salt.exceptions
+import salt.utils.stringutils
+ import salt.utils.versions
++from salt.ext import six
- # Import 3rd-party libs
- from salt.ext import six
-@@ -170,7 +171,7 @@ from salt.ext import six
try:
from M2Crypto.RSA import RSAError
except ImportError:
- pass
-+ RSAError = Exception('RSA Error')
++ RSAError = Exception("RSA Error")
+ log = logging.getLogger(__name__)
- def __virtual__():
-@@ -180,7 +181,7 @@ def __virtual__():
- if 'x509.get_pem_entry' in __salt__:
- return 'x509'
+@@ -194,7 +196,7 @@ def __virtual__():
+ if "x509.get_pem_entry" in __salt__:
+ return "x509"
else:
-- return (False, 'Could not load x509 state: m2crypto unavailable')
-+ return False, 'Could not load x509 state: the x509 is not available'
+- return (False, "Could not load x509 state: m2crypto unavailable")
++ return False, "Could not load x509 state: the x509 is not available"
def _revoked_to_list(revs):
-@@ -459,8 +460,10 @@ def certificate_managed(name,
- private_key_args['name'], pem_type='RSA PRIVATE KEY')
- else:
- new_private_key = True
-- private_key = __salt__['x509.create_private_key'](text=True, bits=private_key_args['bits'], passphrase=private_key_args[
-- 'passphrase'], cipher=private_key_args['cipher'], verbose=private_key_args['verbose'])
-+ private_key = __salt__['x509.create_private_key'](text=True, bits=private_key_args['bits'],
-+ passphrase=private_key_args['passphrase'],
-+ cipher=private_key_args['cipher'],
-+ verbose=private_key_args['verbose'])
+@@ -696,7 +698,70 @@ def certificate_managed(
+ "Old": invalid_reason,
+ "New": "Certificate will be valid and up to date",
+ }
+- return ret
++ private_key_args.update(managed_private_key)
++ kwargs["public_key_passphrase"] = private_key_args["passphrase"]
++
++ if private_key_args["new"]:
++ rotate_private_key = True
++ private_key_args["new"] = False
++
++ if _check_private_key(
++ private_key_args["name"],
++ bits=private_key_args["bits"],
++ passphrase=private_key_args["passphrase"],
++ new=private_key_args["new"],
++ overwrite=private_key_args["overwrite"],
++ ):
++ private_key = __salt__["x509.get_pem_entry"](
++ private_key_args["name"], pem_type="RSA PRIVATE KEY"
++ )
++ else:
++ new_private_key = True
++ private_key = __salt__["x509.create_private_key"](
++ text=True,
++ bits=private_key_args["bits"],
++ passphrase=private_key_args["passphrase"],
++ cipher=private_key_args["cipher"],
++ verbose=private_key_args["verbose"],
++ )
++
++ kwargs["public_key"] = private_key
++
++ current_days_remaining = 0
++ current_comp = {}
++
++ if os.path.isfile(name):
++ try:
++ current = __salt__["x509.read_certificate"](certificate=name)
++ current_comp = copy.deepcopy(current)
++ if "serial_number" not in kwargs:
++ current_comp.pop("Serial Number")
++ if "signing_cert" not in kwargs:
++ try:
++ current_comp["X509v3 Extensions"][
++ "authorityKeyIdentifier"
++ ] = re.sub(
++ r"serial:([0-9A-F]{2}:)*[0-9A-F]{2}",
++ "serial:--",
++ current_comp["X509v3 Extensions"]["authorityKeyIdentifier"],
++ )
++ except KeyError:
++ pass
++ current_comp.pop("Not Before")
++ current_comp.pop("MD5 Finger Print")
++ current_comp.pop("SHA1 Finger Print")
++ current_comp.pop("SHA-256 Finger Print")
++ current_notafter = current_comp.pop("Not After")
++ current_days_remaining = (
++ datetime.datetime.strptime(current_notafter, "%Y-%m-%d %H:%M:%S")
++ - datetime.datetime.now()
++ ).days
++ if days_remaining == 0:
++ days_remaining = current_days_remaining - 1
++ except salt.exceptions.SaltInvocationError:
++ current = "{} is not a valid Certificate.".format(name)
++ else:
++ current = "{} does not exist.".format(name)
- kwargs['public_key'] = private_key
-
-@@ -671,8 +674,10 @@ def crl_managed(name,
- else:
- current = '{0} does not exist.'.format(name)
-
-- new_crl = __salt__['x509.create_crl'](text=True, signing_private_key=signing_private_key, signing_private_key_passphrase=signing_private_key_passphrase,
-- signing_cert=signing_cert, revoked=revoked, days_valid=days_valid, digest=digest, include_expired=include_expired)
-+ new_crl = __salt__['x509.create_crl'](text=True, signing_private_key=signing_private_key,
-+ signing_private_key_passphrase=signing_private_key_passphrase,
-+ signing_cert=signing_cert, revoked=revoked, days_valid=days_valid,
-+ digest=digest, include_expired=include_expired)
-
- new = __salt__['x509.read_crl'](crl=new_crl)
- new_comp = new.copy()
-@@ -714,6 +719,6 @@ def pem_managed(name,
+ contents = __salt__["x509.create_certificate"](text=True, **kwargs)
+ # Check the module actually returned a cert and not an error message as a string
+@@ -892,6 +957,8 @@ def pem_managed(name, text, backup=False, **kwargs):
Any arguments supported by :py:func:`file.managed ` are supported.
- '''
+ """
file_args, kwargs = _get_file_args(name, **kwargs)
-- file_args['contents'] = __salt__['x509.get_pem_entry'](text=text)
-+ file_args['contents'] = salt.utils.stringutils.to_str(__salt__['x509.get_pem_entry'](text=text))
+- file_args["contents"] = __salt__["x509.get_pem_entry"](text=text)
++ file_args["contents"] = salt.utils.stringutils.to_str(
++ __salt__["x509.get_pem_entry"](text=text)
++ )
- return __states__['file.managed'](**file_args)
+ return __states__["file.managed"](**file_args)
--
-2.16.4
+2.29.2
diff --git a/xen-disk-fixes-264.patch b/xen-disk-fixes-264.patch
index 406385a..030da75 100644
--- a/xen-disk-fixes-264.patch
+++ b/xen-disk-fixes-264.patch
@@ -1,4 +1,4 @@
-From d260c5984d64fc8448a6adf8d5bf07ebb08e4126 Mon Sep 17 00:00:00 2001
+From da22c9ee9bd3a2ca34d028e40ff3b476bb944933 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat
Date: Mon, 5 Oct 2020 15:50:44 +0200
Subject: [PATCH] Xen disk fixes (#264)
@@ -28,22 +28,13 @@ when generating the new XML to compare.
In such cases, use the pool target path to compute the volume path.
---
- changelog/58333.fixed | 1 +
- salt/modules/virt.py | 258 +++++++++++-------
- salt/templates/virt/libvirt_disks.jinja | 12 +
- salt/templates/virt/libvirt_domain.jinja | 17 +-
- tests/pytests/unit/modules/virt/__init__.py | 0
- tests/pytests/unit/modules/virt/conftest.py | 191 +++++++++++++
- .../pytests/unit/modules/virt/test_domain.py | 256 +++++++++++++++++
- .../pytests/unit/modules/virt/test_helpers.py | 11 +
- tests/unit/modules/test_virt.py | 180 ++++--------
- 9 files changed, 698 insertions(+), 228 deletions(-)
+ changelog/58333.fixed | 1 +
+ salt/modules/virt.py | 264 ++++++++++++--------
+ salt/templates/virt/libvirt_domain.jinja | 16 +-
+ tests/pytests/unit/modules/virt/conftest.py | 4 +-
+ tests/unit/modules/test_virt.py | 180 +++++--------
+ 5 files changed, 232 insertions(+), 233 deletions(-)
create mode 100644 changelog/58333.fixed
- create mode 100644 salt/templates/virt/libvirt_disks.jinja
- create mode 100644 tests/pytests/unit/modules/virt/__init__.py
- create mode 100644 tests/pytests/unit/modules/virt/conftest.py
- create mode 100644 tests/pytests/unit/modules/virt/test_domain.py
- create mode 100644 tests/pytests/unit/modules/virt/test_helpers.py
diff --git a/changelog/58333.fixed b/changelog/58333.fixed
new file mode 100644
@@ -53,10 +44,10 @@ index 0000000000..f958d40964
@@ -0,0 +1 @@
+Convert disks of volume type to file or block disks on Xen
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index 4a8a55ced6..34643787f9 100644
+index c1a73fcb7f..e306bc0679 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
-@@ -459,6 +459,8 @@ def _get_disks(conn, dom):
+@@ -453,6 +453,8 @@ def _get_disks(conn, dom):
"""
disks = {}
doc = ElementTree.fromstring(dom.XMLDesc(0))
@@ -65,7 +56,7 @@ index 4a8a55ced6..34643787f9 100644
for elem in doc.findall("devices/disk"):
source = elem.find("source")
if source is None:
-@@ -471,13 +473,61 @@ def _get_disks(conn, dom):
+@@ -465,13 +467,61 @@ def _get_disks(conn, dom):
extra_properties = None
if "dev" in target.attrib:
disk_type = elem.get("type")
@@ -129,7 +120,7 @@ index 4a8a55ced6..34643787f9 100644
try:
stdout = subprocess.Popen(
[
-@@ -499,6 +549,12 @@ def _get_disks(conn, dom):
+@@ -493,6 +543,12 @@ def _get_disks(conn, dom):
disk.update({"file": "Does not exist"})
elif disk_type == "block":
qemu_target = source.get("dev", "")
@@ -142,7 +133,7 @@ index 4a8a55ced6..34643787f9 100644
elif disk_type == "network":
qemu_target = source.get("protocol")
source_name = source.get("name")
-@@ -537,43 +593,9 @@ def _get_disks(conn, dom):
+@@ -531,43 +587,9 @@ def _get_disks(conn, dom):
elif disk_type == "volume":
pool_name = source.get("pool")
volume_name = source.get("volume")
@@ -189,7 +180,7 @@ index 4a8a55ced6..34643787f9 100644
if not qemu_target:
continue
-@@ -636,6 +658,73 @@ def _get_target(target, ssh):
+@@ -630,6 +652,73 @@ def _get_target(target, ssh):
return " {}://{}/{}".format(proto, target, "system")
@@ -263,7 +254,7 @@ index 4a8a55ced6..34643787f9 100644
def _gen_xml(
conn,
name,
-@@ -741,41 +830,16 @@ def _gen_xml(
+@@ -735,41 +824,16 @@ def _gen_xml(
elif disk.get("pool"):
disk_context["volume"] = disk["filename"]
# If we had no source_file, then we want a volume
@@ -314,7 +305,20 @@ index 4a8a55ced6..34643787f9 100644
else:
if pool_type in ["disk", "logical"]:
# The volume format for these types doesn't match the driver format in the VM
-@@ -3981,7 +4045,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
+@@ -2441,9 +2505,9 @@ def update(
+ data = {k: v for k, v in locals().items() if bool(v)}
+ if boot_dev:
+ data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
+- need_update = salt.utils.xmlutil.change_xml(
+- desc, data, params_mapping
+- ) or need_update
++ need_update = (
++ salt.utils.xmlutil.change_xml(desc, data, params_mapping) or need_update
++ )
+
+ # Update the XML definition with the new disks and diff changes
+ devices_node = desc.find("devices")
+@@ -4092,7 +4156,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
directories.add(os.path.dirname(disks[disk]["file"]))
else:
# We may have a volume to delete here
@@ -323,7 +327,7 @@ index 4a8a55ced6..34643787f9 100644
if matcher:
pool_name = matcher.group("pool")
pool = None
-@@ -6499,29 +6563,33 @@ def _is_valid_volume(vol):
+@@ -6676,29 +6740,33 @@ def _is_valid_volume(vol):
def _get_all_volumes_paths(conn):
"""
@@ -376,45 +380,22 @@ index 4a8a55ced6..34643787f9 100644
def volume_infos(pool=None, volume=None, **kwargs):
-@@ -6592,8 +6660,8 @@ def volume_infos(pool=None, volume=None, **kwargs):
+@@ -6769,8 +6837,8 @@ def volume_infos(pool=None, volume=None, **kwargs):
if vol.path():
as_backing_store = {
path
-- for (path, all_paths) in six.iteritems(backing_stores)
+- for (path, all_paths) in backing_stores.items()
- if vol.path() in all_paths
-+ for (path, volume) in six.iteritems(backing_stores)
++ for (path, volume) in backing_stores.items()
+ if vol.path() in volume.get("backing_stores")
}
used_by = [
vm_name
-diff --git a/salt/templates/virt/libvirt_disks.jinja b/salt/templates/virt/libvirt_disks.jinja
-new file mode 100644
-index 0000000000..38f836afbb
---- /dev/null
-+++ b/salt/templates/virt/libvirt_disks.jinja
-@@ -0,0 +1,12 @@
-+{% macro network_source(disk) -%}
-+
-+ {%- for host in disk.get('hosts') %}
-+
-+ {%- endfor %}
-+ {%- if disk.get("auth") %}
-+
-+
-+
-+ {%- endif %}
-+
-+{%- endmacro %}
diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja
-index 04a61ffa78..18728a75b5 100644
+index 2a2f5e4141..18728a75b5 100644
--- a/salt/templates/virt/libvirt_domain.jinja
+++ b/salt/templates/virt/libvirt_domain.jinja
-@@ -1,3 +1,4 @@
-+{%- import 'libvirt_disks.jinja' as libvirt_disks -%}
-
- {{ name }}
- {{ cpu }}
-@@ -32,21 +33,13 @@
+@@ -33,21 +33,13 @@
{% if disk.type == 'file' and 'source_file' in disk -%}
{% endif %}
@@ -440,490 +421,33 @@ index 04a61ffa78..18728a75b5 100644
{% if disk.address -%}
-diff --git a/tests/pytests/unit/modules/virt/__init__.py b/tests/pytests/unit/modules/virt/__init__.py
-new file mode 100644
-index 0000000000..e69de29bb2
diff --git a/tests/pytests/unit/modules/virt/conftest.py b/tests/pytests/unit/modules/virt/conftest.py
-new file mode 100644
-index 0000000000..1c32ae12eb
---- /dev/null
+index d70c2abc9e..1c32ae12eb 100644
+--- a/tests/pytests/unit/modules/virt/conftest.py
+++ b/tests/pytests/unit/modules/virt/conftest.py
-@@ -0,0 +1,191 @@
-+import pytest
-+import salt.modules.config as config
-+import salt.modules.virt as virt
-+from salt._compat import ElementTree as ET
-+from tests.support.mock import MagicMock
-+
-+
-+class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
-+ """
-+ Libvirt library mock
-+ """
-+
-+ class virDomain(MagicMock):
-+ """
-+ virDomain mock
-+ """
-+
-+ class libvirtError(Exception):
-+ """
-+ libvirtError mock
-+ """
-+
-+ def __init__(self, msg):
-+ super().__init__(msg)
-+ self.msg = msg
-+
-+ def get_error_message(self):
-+ return self.msg
-+
-+
-+class MappedResultMock(MagicMock):
-+ """
-+ Mock class consistently return the same mock object based on the first argument.
-+ """
-+
-+ _instances = {}
-+
-+ def __init__(self):
-+ def mapped_results(*args, **kwargs):
-+ if args[0] not in self._instances.keys():
-+ raise virt.libvirt.libvirtError("Not found: {}".format(args[0]))
-+ return self._instances[args[0]]
-+
-+ super().__init__(side_effect=mapped_results)
-+
-+ def add(self, name):
-+ self._instances[name] = MagicMock()
-+
-+
-+@pytest.fixture(autouse=True)
+@@ -48,7 +48,7 @@ class MappedResultMock(MagicMock):
+
+
+ @pytest.fixture(autouse=True)
+-def setup_loader():
+def setup_loader(request):
-+ # Create libvirt mock and connection mock
-+ mock_libvirt = LibvirtMock()
-+ mock_conn = MagicMock()
-+ mock_conn.getStoragePoolCapabilities.return_value = ""
-+
-+ mock_libvirt.openAuth.return_value = mock_conn
-+ setup_loader_modules = {
-+ virt: {
-+ "libvirt": mock_libvirt,
-+ "__salt__": {"config.get": config.get, "config.option": config.option},
-+ },
-+ config: {},
-+ }
+ # Create libvirt mock and connection mock
+ mock_libvirt = LibvirtMock()
+ mock_conn = MagicMock()
+@@ -62,7 +62,7 @@ def setup_loader():
+ },
+ config: {},
+ }
+- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
+ with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
-+ yield loader_mock
-+
-+
-+@pytest.fixture
-+def make_mock_vm():
-+ def _make_mock_vm(xml_def):
-+ mocked_conn = virt.libvirt.openAuth.return_value
-+
-+ doc = ET.fromstring(xml_def)
-+ name = doc.find("name").text
-+ os_type = "hvm"
-+ os_type_node = doc.find("os/type")
-+ if os_type_node is not None:
-+ os_type = os_type_node.text
-+
-+ mocked_conn.listDefinedDomains.return_value = [name]
-+
-+ # Configure the mocked domain
-+ domain_mock = virt.libvirt.virDomain()
-+ if not isinstance(mocked_conn.lookupByName, MappedResultMock):
-+ mocked_conn.lookupByName = MappedResultMock()
-+ mocked_conn.lookupByName.add(name)
-+ domain_mock = mocked_conn.lookupByName(name)
-+ domain_mock.XMLDesc.return_value = xml_def
-+ domain_mock.OSType.return_value = os_type
-+
-+ # Return state as shutdown
-+ domain_mock.info.return_value = [
-+ 4,
-+ 2048 * 1024,
-+ 1024 * 1024,
-+ 2,
-+ 1234,
-+ ]
-+ domain_mock.ID.return_value = 1
-+ domain_mock.name.return_value = name
-+
-+ domain_mock.attachDevice.return_value = 0
-+ domain_mock.detachDevice.return_value = 0
-+
-+ return domain_mock
-+
-+ return _make_mock_vm
-+
-+
-+@pytest.fixture
-+def make_mock_storage_pool():
-+ def _make_mock_storage_pool(name, type, volumes):
-+ mocked_conn = virt.libvirt.openAuth.return_value
-+
-+ # Append the pool name to the list of known mocked pools
-+ all_pools = mocked_conn.listStoragePools.return_value
-+ if not isinstance(all_pools, list):
-+ all_pools = []
-+ all_pools.append(name)
-+ mocked_conn.listStoragePools.return_value = all_pools
-+
-+ # Ensure we have mapped results for the pools
-+ if not isinstance(mocked_conn.storagePoolLookupByName, MappedResultMock):
-+ mocked_conn.storagePoolLookupByName = MappedResultMock()
-+
-+ # Configure the pool
-+ mocked_conn.storagePoolLookupByName.add(name)
-+ mocked_pool = mocked_conn.storagePoolLookupByName(name)
-+ source = ""
-+ if type == "disk":
-+ source = "".format(name)
-+ pool_path = "/path/to/{}".format(name)
-+ mocked_pool.XMLDesc.return_value = """
-+
-+
-+ {}
-+
-+
-+ {}
-+
-+
-+ """.format(
-+ type, source, pool_path
-+ )
-+ mocked_pool.name.return_value = name
-+ mocked_pool.info.return_value = [
-+ virt.libvirt.VIR_STORAGE_POOL_RUNNING,
-+ ]
-+
-+ # Append the pool to the listAllStoragePools list
-+ all_pools_obj = mocked_conn.listAllStoragePools.return_value
-+ if not isinstance(all_pools_obj, list):
-+ all_pools_obj = []
-+ all_pools_obj.append(mocked_pool)
-+ mocked_conn.listAllStoragePools.return_value = all_pools_obj
-+
-+ # Configure the volumes
-+ if not isinstance(mocked_pool.storageVolLookupByName, MappedResultMock):
-+ mocked_pool.storageVolLookupByName = MappedResultMock()
-+ mocked_pool.listVolumes.return_value = volumes
-+
-+ all_volumes = []
-+ for volume in volumes:
-+ mocked_pool.storageVolLookupByName.add(volume)
-+ mocked_vol = mocked_pool.storageVolLookupByName(volume)
-+ vol_path = "{}/{}".format(pool_path, volume)
-+ mocked_vol.XMLDesc.return_value = """
-+
-+
-+ {}
-+
-+
-+ """.format(
-+ vol_path,
-+ )
-+ mocked_vol.path.return_value = vol_path
-+ mocked_vol.name.return_value = volume
-+
-+ mocked_vol.info.return_value = [
-+ 0,
-+ 1234567,
-+ 12345,
-+ ]
-+ all_volumes.append(mocked_vol)
-+
-+ # Set the listAllVolumes return_value
-+ mocked_pool.listAllVolumes.return_value = all_volumes
-+ return mocked_pool
-+
-+ return _make_mock_storage_pool
-diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py
-new file mode 100644
-index 0000000000..5f9b45ec9a
---- /dev/null
-+++ b/tests/pytests/unit/modules/virt/test_domain.py
-@@ -0,0 +1,256 @@
-+import salt.modules.virt as virt
-+from salt._compat import ElementTree as ET
-+from tests.support.mock import MagicMock, patch
-+
-+from .test_helpers import append_to_XMLDesc
-+
-+
-+def test_update_xen_disk_volumes(make_mock_vm, make_mock_storage_pool):
-+ xml_def = """
-+
-+ my_vm
-+ 524288
-+ 524288
-+ 1
-+
-+ linux
-+ /usr/lib/grub2/x86_64-xen/grub.xen
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+ """
-+ domain_mock = make_mock_vm(xml_def)
-+ make_mock_storage_pool("default", "dir", ["my_vm_system"])
-+ make_mock_storage_pool("my-iscsi", "iscsi", ["unit:0:0:1"])
-+ make_mock_storage_pool("vdb", "disk", ["vdb1"])
-+
-+ ret = virt.update(
-+ "my_vm",
-+ disks=[
-+ {"name": "system", "pool": "default"},
-+ {"name": "iscsi-data", "pool": "my-iscsi", "source_file": "unit:0:0:1"},
-+ {"name": "vdb-data", "pool": "vdb", "source_file": "vdb1"},
-+ {"name": "file-data", "pool": "default", "size": "10240"},
-+ ],
-+ )
-+
-+ assert ret["definition"]
-+ define_mock = virt.libvirt.openAuth().defineXML
-+ setxml = ET.fromstring(define_mock.call_args[0][0])
-+ assert "block" == setxml.find(".//disk[3]").get("type")
-+ assert "/path/to/vdb/vdb1" == setxml.find(".//disk[3]/source").get("dev")
-+
-+ # Note that my_vm-file-data was not an existing volume before the update
-+ assert "file" == setxml.find(".//disk[4]").get("type")
-+ assert "/path/to/default/my_vm_file-data" == setxml.find(".//disk[4]/source").get(
-+ "file"
-+ )
-+
-+
-+def test_get_disks(make_mock_vm, make_mock_storage_pool):
-+ # test with volumes
-+ vm_def = """
-+ srv01
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+ """
-+ domain_mock = make_mock_vm(vm_def)
-+
-+ pool_mock = make_mock_storage_pool(
-+ "default", "dir", ["srv01_system", "srv01_data", "vm05_system"]
-+ )
-+
-+ # Append backing store to srv01_data volume XML description
-+ srv1data_mock = pool_mock.storageVolLookupByName("srv01_data")
-+ append_to_XMLDesc(
-+ srv1data_mock,
-+ """
-+
-+ /var/lib/libvirt/images/vol01
-+
-+ """,
-+ )
-+
-+ assert virt.get_disks("srv01") == {
-+ "vda": {
-+ "type": "disk",
-+ "file": "default/srv01_system",
-+ "file format": "qcow2",
-+ "disk size": 12345,
-+ "virtual size": 1234567,
-+ },
-+ "vdb": {
-+ "type": "disk",
-+ "file": "default/srv01_data",
-+ "file format": "qcow2",
-+ "disk size": 12345,
-+ "virtual size": 1234567,
-+ "backing file": {
-+ "file": "/var/lib/libvirt/images/vol01",
-+ "file format": "qcow2",
-+ },
-+ },
-+ "vdc": {
-+ "type": "disk",
-+ "file": "default/vm05_system",
-+ "file format": "qcow2",
-+ "disk size": 12345,
-+ "virtual size": 1234567,
-+ "backing file": {
-+ "file": "/var/lib/libvirt/images/vm04_system.qcow2",
-+ "file format": "qcow2",
-+ "backing file": {
-+ "file": "/var/testsuite-data/disk-image-template.raw",
-+ "file format": "raw",
-+ },
-+ },
-+ },
-+ "hda": {
-+ "type": "cdrom",
-+ "file format": "raw",
-+ "file": "http://dev-srv.tf.local:80/pub/iso/myimage.iso?foo=bar&baz=flurb",
-+ },
-+ }
-+
-+
-+def test_get_disk_convert_volumes(make_mock_vm, make_mock_storage_pool):
-+ vm_def = """
-+ srv01
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+ """
-+ domain_mock = make_mock_vm(vm_def)
-+
-+ pool_mock = make_mock_storage_pool("default", "dir", ["srv01_system", "srv01_data"])
-+
-+ subprocess_mock = MagicMock()
-+ popen_mock = MagicMock(spec=virt.subprocess.Popen)
-+ popen_mock.return_value.communicate.return_value = [
-+ """[
-+ {
-+ "virtual-size": 214748364800,
-+ "filename": "/path/to/srv01_extra",
-+ "cluster-size": 65536,
-+ "format": "qcow2",
-+ "actual-size": 340525056,
-+ "format-specific": {
-+ "type": "qcow2",
-+ "data": {
-+ "compat": "1.1",
-+ "lazy-refcounts": false,
-+ "refcount-bits": 16,
-+ "corrupt": false
-+ }
-+ },
-+ "dirty-flag": false
-+ }
-+ ]
-+ """
-+ ]
-+ subprocess_mock.Popen = popen_mock
-+
-+ with patch.dict(virt.__dict__, {"subprocess": subprocess_mock}):
-+ assert {
-+ "vda": {
-+ "type": "disk",
-+ "file": "default/srv01_system",
-+ "file format": "qcow2",
-+ "disk size": 12345,
-+ "virtual size": 1234567,
-+ },
-+ "vdb": {
-+ "type": "disk",
-+ "file": "default/srv01_data",
-+ "file format": "raw",
-+ "disk size": 12345,
-+ "virtual size": 1234567,
-+ },
-+ "vdc": {
-+ "type": "disk",
-+ "file": "/path/to/srv01_extra",
-+ "file format": "qcow2",
-+ "cluster size": 65536,
-+ "disk size": 340525056,
-+ "virtual size": 214748364800,
-+ },
-+ } == virt.get_disks("srv01")
-diff --git a/tests/pytests/unit/modules/virt/test_helpers.py b/tests/pytests/unit/modules/virt/test_helpers.py
-new file mode 100644
-index 0000000000..f64aee2821
---- /dev/null
-+++ b/tests/pytests/unit/modules/virt/test_helpers.py
-@@ -0,0 +1,11 @@
-+from salt._compat import ElementTree as ET
-+
-+
-+def append_to_XMLDesc(mocked, fragment):
-+ """
-+ Append an XML fragment at the end of the mocked XMLDesc return_value of mocked.
-+ """
-+ xml_doc = ET.fromstring(mocked.XMLDesc())
-+ xml_fragment = ET.fromstring(fragment)
-+ xml_doc.append(xml_fragment)
-+ mocked.XMLDesc.return_value = ET.tostring(xml_doc)
+ yield loader_mock
+
+
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index 27c4b9d1b0..6e61544a1f 100644
+index 4a4c0395a7..e214e406e2 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
-@@ -1141,6 +1141,65 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -1138,6 +1138,65 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual("vdb2", source.attrib["volume"])
self.assertEqual("raw", disk.find("driver").get("type"))
@@ -989,7 +513,7 @@ index 27c4b9d1b0..6e61544a1f 100644
def test_gen_xml_cdrom(self):
"""
Test virt._gen_xml(), generating a cdrom device (different disk type, no source)
-@@ -5503,124 +5562,3 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -5499,124 +5558,3 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"vol1.qcow2",
"/path/to/file",
)
@@ -1115,6 +639,6 @@ index 27c4b9d1b0..6e61544a1f 100644
- },
- )
--
-2.28.0
+2.29.2
diff --git a/xfs-do-not-fails-if-type-is-not-present.patch b/xfs-do-not-fails-if-type-is-not-present.patch
index f1bd207..5561337 100644
--- a/xfs-do-not-fails-if-type-is-not-present.patch
+++ b/xfs-do-not-fails-if-type-is-not-present.patch
@@ -1,4 +1,4 @@
-From 9d1e598bf8c7aff612a58405ad864ba701f022c3 Mon Sep 17 00:00:00 2001
+From fb8c573cffff1c3909cd6c84c5474193ac5588a6 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Tue, 11 Jun 2019 17:21:05 +0200
Subject: [PATCH] xfs: do not fails if type is not present
@@ -12,81 +12,207 @@ output.
(cherry picked from commit 88df6963470007aa4fe2adb09f000311f48226a8)
---
- salt/modules/xfs.py | 2 +-
- tests/unit/modules/test_xfs.py | 50 ++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 51 insertions(+), 1 deletion(-)
- create mode 100644 tests/unit/modules/test_xfs.py
+ salt/modules/xfs.py | 47 +++++++++++++++-------------------
+ tests/unit/modules/test_xfs.py | 8 ------
+ 2 files changed, 20 insertions(+), 35 deletions(-)
diff --git a/salt/modules/xfs.py b/salt/modules/xfs.py
-index ce7bd187fe..0116d7600e 100644
+index 6782872cf7..7563bd2d65 100644
--- a/salt/modules/xfs.py
+++ b/salt/modules/xfs.py
-@@ -329,7 +329,7 @@ def _blkid_output(out):
- for items in flt(dev_meta.strip().split("\n")):
- key, val = items.split("=", 1)
- dev[key.lower()] = val
-- if dev.pop("type") == "xfs":
-+ if dev.pop("type", None) == "xfs":
- dev['label'] = dev.get('label')
+@@ -1,4 +1,3 @@
+-# -*- coding: utf-8 -*-
+ #
+ # The MIT License (MIT)
+ # Copyright (C) 2014 SUSE LLC
+@@ -25,8 +24,6 @@
+ Module for managing XFS file systems.
+ """
+
+-# Import Python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+
+ import logging
+ import os
+@@ -34,14 +31,10 @@ import re
+ import time
+
+ import salt.utils.data
+-
+-# Import Salt libs
+ import salt.utils.files
+ import salt.utils.path
+ import salt.utils.platform
+ from salt.exceptions import CommandExecutionError
+-
+-# Import 3rd-party libs
+ from salt.ext import six
+ from salt.ext.six.moves import range
+
+@@ -120,7 +113,7 @@ def info(device):
+
+ salt '*' xfs.info /dev/sda1
+ """
+- out = __salt__["cmd.run_all"]("xfs_info {0}".format(device))
++ out = __salt__["cmd.run_all"]("xfs_info {}".format(device))
+ if out.get("stderr"):
+ raise CommandExecutionError(out["stderr"].replace("xfs_info:", "").strip())
+ return _parse_xfs_info(out["stdout"])
+@@ -195,16 +188,16 @@ def dump(device, destination, level=0, label=None, noerase=None):
+ label
+ and label
+ or time.strftime(
+- 'XFS dump for "{0}" of %Y.%m.%d, %H:%M'.format(device), time.localtime()
++ 'XFS dump for "{}" of %Y.%m.%d, %H:%M'.format(device), time.localtime()
+ ).replace("'", '"')
+ )
+ cmd = ["xfsdump"]
+ cmd.append("-F") # Force
+ if not noerase:
+ cmd.append("-E") # pre-erase
+- cmd.append("-L '{0}'".format(label)) # Label
+- cmd.append("-l {0}".format(level)) # Dump level
+- cmd.append("-f {0}".format(destination)) # Media destination
++ cmd.append("-L '{}'".format(label)) # Label
++ cmd.append("-l {}".format(level)) # Dump level
++ cmd.append("-f {}".format(destination)) # Media destination
+ cmd.append(device) # Device
+
+ cmd = " ".join(cmd)
+@@ -220,10 +213,10 @@ def _xr_to_keyset(line):
+ """
+ tkns = [elm for elm in line.strip().split(":", 1) if elm]
+ if len(tkns) == 1:
+- return "'{0}': ".format(tkns[0])
++ return "'{}': ".format(tkns[0])
+ else:
+ key, val = tkns
+- return "'{0}': '{1}',".format(key.strip(), val.strip())
++ return "'{}': '{}',".format(key.strip(), val.strip())
+
+
+ def _xfs_inventory_output(out):
+@@ -314,14 +307,14 @@ def prune_dump(sessionid):
+ salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
+
+ """
+- out = __salt__["cmd.run_all"]("xfsinvutil -s {0} -F".format(sessionid))
++ out = __salt__["cmd.run_all"]("xfsinvutil -s {} -F".format(sessionid))
+ _verify_run(out)
+
+ data = _xfs_prune_output(out["stdout"], sessionid)
+ if data:
+ return data
+
+- raise CommandExecutionError('Session UUID "{0}" was not found.'.format(sessionid))
++ raise CommandExecutionError('Session UUID "{}" was not found.'.format(sessionid))
+
+
+ def _blkid_output(out):
+@@ -340,7 +333,7 @@ def _blkid_output(out):
data[dev.pop("devname")] = dev
+ mounts = _get_mounts()
+- for device in six.iterkeys(mounts):
++ for device in mounts.keys():
+ if data.get(device):
+ data[device].update(mounts[device])
+
+@@ -396,9 +389,9 @@ def estimate(path):
+ salt '*' xfs.estimate /path/to/dir/*
+ """
+ if not os.path.exists(path):
+- raise CommandExecutionError('Path "{0}" was not found.'.format(path))
++ raise CommandExecutionError('Path "{}" was not found.'.format(path))
+
+- out = __salt__["cmd.run_all"]("xfs_estimate -v {0}".format(path))
++ out = __salt__["cmd.run_all"]("xfs_estimate -v {}".format(path))
+ _verify_run(out)
+
+ return _xfs_estimate_output(out["stdout"])
+@@ -449,14 +442,14 @@ def mkfs(
+ """
+
+ getopts = lambda args: dict(
+- ((args and ("=" in args) and args or None))
++ (args and ("=" in args) and args or None)
+ and [kw.split("=") for kw in args.split(",")]
+ or []
+ )
+ cmd = ["mkfs.xfs"]
+ if label:
+ cmd.append("-L")
+- cmd.append("'{0}'".format(label))
++ cmd.append("'{}'".format(label))
+
+ if ssize:
+ cmd.append("-s")
+@@ -477,7 +470,7 @@ def mkfs(
+ cmd.append(opts)
+ except Exception: # pylint: disable=broad-except
+ raise CommandExecutionError(
+- 'Wrong parameters "{0}" for option "{1}"'.format(opts, switch)
++ 'Wrong parameters "{}" for option "{}"'.format(opts, switch)
+ )
+
+ if not noforce:
+@@ -505,13 +498,13 @@ def modify(device, label=None, lazy_counting=None, uuid=None):
+ """
+ if not label and lazy_counting is None and uuid is None:
+ raise CommandExecutionError(
+- 'Nothing specified for modification for "{0}" device'.format(device)
++ 'Nothing specified for modification for "{}" device'.format(device)
+ )
+
+ cmd = ["xfs_admin"]
+ if label:
+ cmd.append("-L")
+- cmd.append("'{0}'".format(label))
++ cmd.append("'{}'".format(label))
+
+ if lazy_counting is False:
+ cmd.append("-c")
+@@ -531,7 +524,7 @@ def modify(device, label=None, lazy_counting=None, uuid=None):
+ cmd = " ".join(cmd)
+ _verify_run(__salt__["cmd.run_all"](cmd), cmd=cmd)
+
+- out = __salt__["cmd.run_all"]("blkid -o export {0}".format(device))
++ out = __salt__["cmd.run_all"]("blkid -o export {}".format(device))
+ _verify_run(out)
+
+ return _blkid_output(out["stdout"])
+@@ -572,9 +565,9 @@ def defragment(device):
+ raise CommandExecutionError("Root is not a device.")
+
+ if not _get_mounts().get(device):
+- raise CommandExecutionError('Device "{0}" is not mounted'.format(device))
++ raise CommandExecutionError('Device "{}" is not mounted'.format(device))
+
+- out = __salt__["cmd.run_all"]("xfs_fsr {0}".format(device))
++ out = __salt__["cmd.run_all"]("xfs_fsr {}".format(device))
+ _verify_run(out)
+
+ return {"log": out["stdout"]}
diff --git a/tests/unit/modules/test_xfs.py b/tests/unit/modules/test_xfs.py
-new file mode 100644
-index 0000000000..4b423d69d1
---- /dev/null
+index 149f6c8f7b..778aff793d 100644
+--- a/tests/unit/modules/test_xfs.py
+++ b/tests/unit/modules/test_xfs.py
-@@ -0,0 +1,50 @@
-+# -*- coding: utf-8 -*-
-+
-+# Import Python libs
-+from __future__ import absolute_import, print_function, unicode_literals
-+import textwrap
-+
-+# Import Salt Testing Libs
-+from tests.support.mixins import LoaderModuleMockMixin
-+from tests.support.unit import skipIf, TestCase
-+from tests.support.mock import (
-+ NO_MOCK,
-+ NO_MOCK_REASON,
-+ MagicMock,
-+ patch)
-+
-+# Import Salt Libs
-+import salt.modules.xfs as xfs
-+
-+
-+@skipIf(NO_MOCK, NO_MOCK_REASON)
-+@patch('salt.modules.xfs._get_mounts', MagicMock(return_value={}))
-+class XFSTestCase(TestCase, LoaderModuleMockMixin):
-+ '''
-+ Test cases for salt.modules.xfs
-+ '''
-+ def setup_loader_modules(self):
-+ return {xfs: {}}
-+
-+ def test__blkid_output(self):
-+ '''
-+ Test xfs._blkid_output when there is data
-+ '''
-+ blkid_export = textwrap.dedent('''
-+ DEVNAME=/dev/sda1
-+ UUID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
-+ TYPE=xfs
-+ PARTUUID=YYYYYYYY-YY
-+
-+ DEVNAME=/dev/sdb1
-+ PARTUUID=ZZZZZZZZ-ZZZZ-ZZZZ-ZZZZ-ZZZZZZZZZZZZ
-+ ''')
-+ # We expect to find only data from /dev/sda1, nothig from
-+ # /dev/sdb1
-+ self.assertEqual(xfs._blkid_output(blkid_export), {
-+ '/dev/sda1': {
-+ 'label': None,
-+ 'partuuid': 'YYYYYYYY-YY',
-+ 'uuid': 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
-+ }
-+ })
+@@ -1,14 +1,6 @@
+-# -*- coding: utf-8 -*-
+-
+-# Import Python libs
+-from __future__ import absolute_import, print_function, unicode_literals
+-
+ import textwrap
+
+-# Import Salt Libs
+ import salt.modules.xfs as xfs
+-
+-# Import Salt Testing Libs
+ from tests.support.mixins import LoaderModuleMockMixin
+ from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
--
-2.16.4
+2.29.2
diff --git a/zypperpkg-filter-patterns-that-start-with-dot-244.patch b/zypperpkg-filter-patterns-that-start-with-dot-244.patch
index 79bc0bd..e485467 100644
--- a/zypperpkg-filter-patterns-that-start-with-dot-244.patch
+++ b/zypperpkg-filter-patterns-that-start-with-dot-244.patch
@@ -1,4 +1,4 @@
-From 31bccc548b6f9d894b7c87ade035b1b178c18841 Mon Sep 17 00:00:00 2001
+From c9543da431aab06c39911c2217e9cb449b63f8e3 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Thu, 21 May 2020 10:19:15 +0200
Subject: [PATCH] zypperpkg: filter patterns that start with dot (#244)
@@ -16,37 +16,38 @@ Fix bsc#1171906
(cherry picked from commit d043db63000df2892b2e7259f580ede81e33724d)
---
- salt/modules/zypperpkg.py | 10 ++++++++--
+ salt/modules/zypperpkg.py | 9 ++++++---
tests/unit/modules/test_zypperpkg.py | 22 ++++++++++++++++++++++
- 2 files changed, 30 insertions(+), 2 deletions(-)
+ 2 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index ed8420f398b91b3ef76417d2f11ec59c4051d120..96c3eed851b819ec800e733628e2ae255481bb92 100644
+index b75572f4ff..ffcd1dfa4f 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -2302,8 +2302,14 @@ def _get_installed_patterns(root=None):
+@@ -2422,11 +2422,14 @@ def _get_installed_patterns(root=None):
# a real error.
- output = __salt__['cmd.run'](cmd, ignore_retcode=True)
+ output = __salt__["cmd.run"](cmd, ignore_retcode=True)
-- installed_patterns = [_pattern_name(line) for line in output.splitlines()
-- if line.startswith('pattern() = ')]
+- installed_patterns = [
+ # On <= SLE12SP4 we have patterns that have multiple names (alias)
+ # and that are duplicated. The alias start with ".", so we filter
+ # them.
+ installed_patterns = {
-+ _pattern_name(line)
-+ for line in output.splitlines()
+ _pattern_name(line)
+ for line in output.splitlines()
+- if line.startswith("pattern() = ")
+- ]
+ if line.startswith("pattern() = ") and not _pattern_name(line).startswith(".")
+ }
- patterns = {k: v for k, v in _get_visible_patterns(root=root).items() if v['installed']}
-
+ patterns = {
+ k: v for k, v in _get_visible_patterns(root=root).items() if v["installed"]
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 9a5c59a8572cb47c947645ed7c0b5c645c48a909..1fce3352c6aa0b5f19c802831bf8583012feb6bf 100644
+index 1b62122e0e..b346ef9ebd 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -1493,6 +1493,28 @@ pattern() = package-c'''),
- },
+@@ -1978,6 +1978,28 @@ pattern() = package-c"""
+ "package-c": {"installed": True, "summary": "Non-visible pattern",},
}
+ @patch("salt.modules.zypperpkg._get_visible_patterns")
@@ -71,10 +72,10 @@ index 9a5c59a8572cb47c947645ed7c0b5c645c48a909..1fce3352c6aa0b5f19c802831bf85830
+ "package-c": {"installed": True, "summary": "Non-visible pattern"},
+ }
+
- @patch('salt.modules.zypperpkg._get_visible_patterns')
+ @patch("salt.modules.zypperpkg._get_visible_patterns")
def test_list_patterns(self, get_visible_patterns):
- '''Test available patterns in the repo'''
+ """Test available patterns in the repo"""
--
-2.23.0
+2.29.2
diff --git a/zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch b/zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch
index ffcfe5c..90ac005 100644
--- a/zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch
+++ b/zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch
@@ -1,28 +1,28 @@
-From 76c38695fa663d55876902feda4a1c93211a1a9f Mon Sep 17 00:00:00 2001
+From 10c728215f4b01210753add829f941d746ac3607 Mon Sep 17 00:00:00 2001
From: Alberto Planas
Date: Mon, 5 Oct 2020 16:24:16 +0200
Subject: [PATCH] zypperpkg: ignore retcode 104 for search()
(bsc#1176697) (#270)
---
- salt/modules/zypperpkg.py | 38 ++++++++++--
+ salt/modules/zypperpkg.py | 28 ++++++---
tests/unit/modules/test_zypperpkg.py | 87 ++++++++++++++++++++++------
- 2 files changed, 101 insertions(+), 24 deletions(-)
+ 2 files changed, 89 insertions(+), 26 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index 96c3eed851..ad11da4ad1 100644
+index ffcd1dfa4f..5369a0342e 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
-@@ -98,6 +98,8 @@ class _Zypper(object):
+@@ -98,6 +98,8 @@ class _Zypper:
}
LOCK_EXIT_CODE = 7
+ NOT_FOUND_EXIT_CODE = 104
+
- XML_DIRECTIVES = ['-x', '--xmlout']
+ XML_DIRECTIVES = ["-x", "--xmlout"]
# ZYPPER_LOCK is not affected by --root
- ZYPPER_LOCK = '/var/run/zypp.pid'
-@@ -128,6 +130,7 @@ class _Zypper(object):
+ ZYPPER_LOCK = "/var/run/zypp.pid"
+@@ -128,6 +130,7 @@ class _Zypper:
self.__no_raise = False
self.__refresh = False
self.__ignore_repo_failure = False
@@ -30,99 +30,89 @@ index 96c3eed851..ad11da4ad1 100644
self.__systemd_scope = False
self.__root = None
-@@ -147,6 +150,9 @@ class _Zypper(object):
+@@ -147,6 +150,9 @@ class _Zypper:
# Ignore exit code for 106 (repo is not available)
- if 'no_repo_failure' in kwargs:
- self.__ignore_repo_failure = kwargs['no_repo_failure']
+ if "no_repo_failure" in kwargs:
+ self.__ignore_repo_failure = kwargs["no_repo_failure"]
+ # Ignore exit code for 104 (package not found)
+ if "ignore_not_found" in kwargs:
+ self.__ignore_not_found = kwargs["ignore_not_found"]
- if 'systemd_scope' in kwargs:
- self.__systemd_scope = kwargs['systemd_scope']
- if 'root' in kwargs:
-@@ -296,6 +302,10 @@ class _Zypper(object):
+ if "systemd_scope" in kwargs:
+ self.__systemd_scope = kwargs["systemd_scope"]
+ if "root" in kwargs:
+@@ -305,6 +311,10 @@ class _Zypper:
if self.__root:
- self.__cmd.extend(['--root', self.__root])
+ self.__cmd.extend(["--root", self.__root])
+ # Do not consider 104 as a retcode error
+ if self.__ignore_not_found:
+ kwargs["success_retcodes"] = [_Zypper.NOT_FOUND_EXIT_CODE]
+
self.__cmd.extend(args)
- kwargs['output_loglevel'] = 'trace'
- kwargs['python_shell'] = False
-@@ -405,7 +415,11 @@ class Wildcard(object):
+ kwargs["output_loglevel"] = "trace"
+ kwargs["python_shell"] = False
+@@ -442,9 +452,11 @@ class Wildcard:
Get available versions of the package.
:return:
- '''
-- solvables = self.zypper.nolock.xml.call('se', '-xv', self.name).getElementsByTagName('solvable')
+ """
+- solvables = self.zypper.nolock.xml.call(
+- "se", "-xv", self.name
+- ).getElementsByTagName("solvable")
+ solvables = (
+ self.zypper(ignore_not_found=True)
+ .nolock.xml.call("se", "-v", self.name)
+ .getElementsByTagName("solvable")
+ )
if not solvables:
- raise CommandExecutionError('No packages found matching \'{0}\''.format(self.name))
+ raise CommandExecutionError(
+ "No packages found matching '{}'".format(self.name)
+@@ -1042,7 +1054,7 @@ def list_repo_pkgs(*args, **kwargs):
-@@ -983,7 +997,11 @@ def list_repo_pkgs(*args, **kwargs):
- return False
-
- root = kwargs.get('root') or None
-- for node in __zypper__(root=root).xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
-+ for node in (
+ root = kwargs.get("root") or None
+ for node in (
+- __zypper__(root=root)
+ __zypper__(root=root, ignore_not_found=True)
-+ .xml.call("se", "-s", *targets)
-+ .getElementsByTagName("solvable")
-+ ):
- pkginfo = dict(node.attributes.items())
- try:
- if pkginfo['kind'] != 'package':
-@@ -2261,7 +2279,9 @@ def owner(*paths, **kwargs):
+ .xml.call("se", "-s", *targets)
+ .getElementsByTagName("solvable")
+ ):
+@@ -2381,7 +2393,9 @@ def owner(*paths, **kwargs):
def _get_visible_patterns(root=None):
- '''Get all available patterns in the repo that are visible.'''
+ """Get all available patterns in the repo that are visible."""
patterns = {}
-- search_patterns = __zypper__(root=root).nolock.xml.call('se', '-t', 'pattern')
+- search_patterns = __zypper__(root=root).nolock.xml.call("se", "-t", "pattern")
+ search_patterns = __zypper__(root=root, ignore_not_found=True).nolock.xml.call(
+ "se", "-t", "pattern"
+ )
- for element in search_patterns.getElementsByTagName('solvable'):
- installed = element.getAttribute('status') == 'installed'
- patterns[element.getAttribute('name')] = {
-@@ -2455,7 +2475,11 @@ def search(criteria, refresh=False, **kwargs):
- cmd.append(ALLOWED_SEARCH_OPTIONS.get(opt))
+ for element in search_patterns.getElementsByTagName("solvable"):
+ installed = element.getAttribute("status") == "installed"
+ patterns[element.getAttribute("name")] = {
+@@ -2578,7 +2592,7 @@ def search(criteria, refresh=False, **kwargs):
cmd.append(criteria)
-- solvables = __zypper__(root=root).nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable')
-+ solvables = (
+ solvables = (
+- __zypper__(root=root)
+ __zypper__(root=root, ignore_not_found=True)
-+ .nolock.noraise.xml.call(*cmd)
-+ .getElementsByTagName("solvable")
-+ )
- if not solvables:
- raise CommandExecutionError(
- 'No packages found matching \'{0}\''.format(criteria)
-@@ -2690,7 +2714,11 @@ def _get_patches(installed_only=False, root=None):
- List all known patches in repos.
- '''
+ .nolock.noraise.xml.call(*cmd)
+ .getElementsByTagName("solvable")
+ )
+@@ -2830,7 +2844,7 @@ def _get_patches(installed_only=False, root=None):
+ """
patches = {}
-- for element in __zypper__(root=root).nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'):
-+ for element in (
+ for element in (
+- __zypper__(root=root)
+ __zypper__(root=root, ignore_not_found=True)
-+ .nolock.xml.call("se", "-t", "patch")
-+ .getElementsByTagName("solvable")
-+ ):
- installed = element.getAttribute('status') == 'installed'
- if (installed_only and installed) or not installed_only:
- patches[element.getAttribute('name')] = {
+ .nolock.xml.call("se", "-t", "patch")
+ .getElementsByTagName("solvable")
+ ):
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
-index 1fce3352c6..a3d20f66d5 100644
+index b346ef9ebd..a60b1546c6 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
-@@ -39,7 +39,10 @@ class ZyppCallMock(object):
+@@ -27,7 +27,10 @@ class ZyppCallMock:
def __call__(self, *args, **kwargs):
# If the call is for a configuration modifier, we return self
-- if any(i in kwargs for i in ('no_repo_failure', 'systemd_scope', 'root')):
+- if any(i in kwargs for i in ("no_repo_failure", "systemd_scope", "root")):
+ if any(
+ i in kwargs
+ for i in ("no_repo_failure", "ignore_not_found", "systemd_scope", "root")
@@ -130,7 +120,7 @@ index 1fce3352c6..a3d20f66d5 100644
return self
return MagicMock(return_value=self.__return_value)()
-@@ -1303,8 +1306,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+@@ -1782,8 +1785,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -140,9 +130,9 @@ index 1fce3352c6..a3d20f66d5 100644
+ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc)
+ _zpr = MagicMock(return_value=__zpr)
wcard = zypper.Wildcard(_zpr)
- wcard.name, wcard.version = 'libzypp', '*'
- assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.4-19.5', '16.3.2-25.1', '16.5.2-27.9.1']
-@@ -1322,8 +1326,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ wcard.name, wcard.version = "libzypp", "*"
+ assert wcard._get_scope_versions(wcard._get_available_versions()) == [
+@@ -1805,8 +1809,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -152,9 +142,9 @@ index 1fce3352c6..a3d20f66d5 100644
+ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc)
+ _zpr = MagicMock(return_value=__zpr)
wcard = zypper.Wildcard(_zpr)
- wcard.name, wcard.version = 'libzypp', '16.2.*-2*'
- assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.5-25.1', '16.2.6-27.9.1']
-@@ -1341,8 +1346,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ wcard.name, wcard.version = "libzypp", "16.2.*-2*"
+ assert wcard._get_scope_versions(wcard._get_available_versions()) == [
+@@ -1827,8 +1832,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -164,9 +154,9 @@ index 1fce3352c6..a3d20f66d5 100644
+ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc)
+ _zpr = MagicMock(return_value=__zpr)
wcard = zypper.Wildcard(_zpr)
- wcard.name, wcard.version = 'libzypp', '16.2.5*'
- assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.5-25.1']
-@@ -1360,8 +1366,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ wcard.name, wcard.version = "libzypp", "16.2.5*"
+ assert wcard._get_scope_versions(wcard._get_available_versions()) == [
+@@ -1848,8 +1854,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -176,9 +166,9 @@ index 1fce3352c6..a3d20f66d5 100644
+ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc)
+ _zpr = MagicMock(return_value=__zpr)
wcard = zypper.Wildcard(_zpr)
- wcard.name, wcard.version = 'libzypp', '*.1'
- assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.5-25.1', '17.2.6-27.9.1']
-@@ -1379,8 +1386,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ wcard.name, wcard.version = "libzypp", "*.1"
+ assert wcard._get_scope_versions(wcard._get_available_versions()) == [
+@@ -1870,8 +1877,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -187,10 +177,10 @@ index 1fce3352c6..a3d20f66d5 100644
+ __zpr = MagicMock()
+ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc)
+ _zpr = MagicMock(return_value=__zpr)
- assert zypper.Wildcard(_zpr)('libzypp', '16.2.4*') == '16.2.4-19.5'
- assert zypper.Wildcard(_zpr)('libzypp', '16.2*') == '16.2.5-25.1'
- assert zypper.Wildcard(_zpr)('libzypp', '*6-*') == '17.2.6-27.9.1'
-@@ -1399,8 +1407,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ assert zypper.Wildcard(_zpr)("libzypp", "16.2.4*") == "16.2.4-19.5"
+ assert zypper.Wildcard(_zpr)("libzypp", "16.2*") == "16.2.5-25.1"
+ assert zypper.Wildcard(_zpr)("libzypp", "*6-*") == "17.2.6-27.9.1"
+@@ -1890,8 +1898,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -200,10 +190,10 @@ index 1fce3352c6..a3d20f66d5 100644
+ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc)
+ _zpr = MagicMock(return_value=__zpr)
+
- assert zypper.Wildcard(_zpr)('libzypp', None) is None
+ assert zypper.Wildcard(_zpr)("libzypp", None) is None
def test_wildcard_to_query_typecheck(self):
-@@ -1416,8 +1426,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+@@ -1907,8 +1917,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -212,10 +202,10 @@ index 1fce3352c6..a3d20f66d5 100644
+ __zpr = MagicMock()
+ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc)
+ _zpr = MagicMock(return_value=__zpr)
- assert isinstance(zypper.Wildcard(_zpr)('libzypp', '*.1'), six.string_types)
+ assert isinstance(zypper.Wildcard(_zpr)("libzypp", "*.1"), str)
def test_wildcard_to_query_condition_preservation(self):
-@@ -1433,8 +1444,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+@@ -1924,8 +1935,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -226,8 +216,8 @@ index 1fce3352c6..a3d20f66d5 100644
+ _zpr = MagicMock(return_value=__zpr)
for op in zypper.Wildcard.Z_OP:
- assert zypper.Wildcard(_zpr)('libzypp', '{0}*.1'.format(op)) == '{0}17.2.6-27.9.1'.format(op)
-@@ -1456,8 +1468,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ assert zypper.Wildcard(_zpr)(
+@@ -1951,8 +1963,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"""
@@ -238,12 +228,12 @@ index 1fce3352c6..a3d20f66d5 100644
+ _zpr = MagicMock(return_value=__zpr)
+
with self.assertRaises(CommandExecutionError):
- for op in ['>>', '==', '<<', '+']:
- zypper.Wildcard(_zpr)('libzypp', '{0}*.1'.format(op))
-@@ -1557,3 +1571,38 @@ pattern() = package-c"""
+ for op in [">>", "==", "<<", "+"]:
+ zypper.Wildcard(_zpr)("libzypp", "{}*.1".format(op))
+@@ -2030,3 +2044,38 @@ pattern() = package-c"""
with patch.dict(zypper.__context__, context):
zypper._clean_cache()
- self.assertEqual(zypper.__context__, {'pkg.other_data': None})
+ self.assertEqual(zypper.__context__, {"pkg.other_data": None})
+
+ def test_search(self):
+ """Test zypperpkg.search()"""
@@ -280,6 +270,6 @@ index 1fce3352c6..a3d20f66d5 100644
+ env={"ZYPP_READONLY_HACK": "1"},
+ )
--
-2.28.0
+2.29.2