From 65598582f5f7f8359ce258a921ca50944a5bc2a74efa756be316de770725e2f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Fri, 8 Jan 2021 12:41:50 +0000 Subject: [PATCH] osc copypac from project:systemsmanagement:saltstack:testing package:salt revision:374 OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=179 --- _lastrevision | 2 +- _service | 6 +- accumulated-changes-from-yomi-167.patch | 319 +- ...ulated-changes-required-for-yomi-165.patch | 145 +- ...beacons-sources-config-pillar-grains.patch | 26 +- ...-parameter-to-include-all-installed-.patch | 924 +- ...common-edition-to-the-os-family-list.patch | 48 +- ...ce_ping_timeout-and-batch_presence_p.patch | 53 +- ...-for-osversion-grain-parsing-u-49946.patch | 10 +- add-custom-suse-capabilities-as-grains.patch | 8 +- add-docker-logout-237.patch | 82 +- ...variable-to-know-if-yum-is-invoked-f.patch | 115 +- add-hold-unhold-functions.patch | 403 +- ...te-and-gpg-key-management-functions-.patch | 282 +- ...missing-_utils-at-loader-grains_func.patch | 26 - ...un-for-returns-from-wfunc-executions.patch | 36 - ...upport-and-globbing-to-the-filetree-.patch | 38 +- ...use-capability-for-saltutil-state-mo.patch | 8 +- ..._batch-to-clearfuncs-exposed-methods.patch | 25 +- ...i-version-support-across-python-inte.patch | 38 +- ...onfiguration-file-for-enabling-packa.patch | 8 +- ...g-module-for-remote-calls-and-saltss.patch | 1273 +- add-virt.all_capabilities.patch | 251 +- adds-explicit-type-cast-for-port.patch | 20 +- ...args-to-pkg.list_downloaded-bsc-1140.patch | 53 +- ...-care-of-failed-skipped-and-unreacha.patch | 1843 +- ...-from-upstream-to-support-python-3.8.patch | 289 +- async-batch-implementation.patch | 972 +- ...ve-syslogging-by-watchdog-cronjob-58.patch | 4 +- ...-true-if-import-messes-with-salt.uti.patch | 34 - ...when-http.query-request-cannot-be-pe.patch | 32 +- backport-a-few-virt-prs-272.patch | 246 +- backport-virt-patches-from-3001-256.patch | 5807 +--- ...h-exceptions-and-safety-unregister-a.patch | 182 +- ...xception-when-minion-does-not-respon.patch | 27 +- ...oid-using-fnmatch-to-match-event-217.patch | 15 +- ...file-directory-user-and-group-owners.patch | 146 +- ...in-parallel-to-avoid-blockings-bsc-1.patch | 83 +- changed-imports-to-vendored-tornado.patch | 182 +- ...n-info_installed-compatibility-50453.patch | 877 +- ...he-source-should-be-actually-skipped.patch | 54 - ...o-files-with-multiple-line-values-on.patch | 67 +- ...n-there-are-ipv6-established-connect.patch | 18 +- ...state-if-there-is-no-3rd-party-depen.patch | 345 +- ...iblegate-to-crash-on-python3-minions.patch | 618 +- ...eamclosederror-traceback-but-only-lo.patch | 12 +- ...tches-as-installed-when-not-all-the-.patch | 32 - ...zypper-with-more-than-one-no-refresh.patch | 32 +- drop-wrong-mock-from-chroot-unit-test.patch | 10 +- ...capabilities-code-after-rebasing-pat.patch | 99 + early-feature-support-config.patch | 1302 +- ...-unix_socket-for-mysql-returners-bsc.patch | 229 +- ...te-stop_on_reboot-is-updated-with-it.patch | 18 +- fall-back-to-pymysql.patch | 10 +- fix-__mount_device-wrapper-254.patch | 30 +- fix-a-test-and-some-variable-names-229.patch | 59 +- fix-a-wrong-rebase-in-test_core.py-180.patch | 147 +- fix-aptpkg-systemd-call-bsc-1143301.patch | 45 +- fix-async-batch-multiple-done-events.patch | 76 +- fix-async-batch-race-conditions.patch | 218 +- fix-batch_async-obsolete-test.patch | 17 +- fix-bsc-1065792.patch | 33 +- ...2020-25592-and-add-tests-bsc-1178319.patch | 551 +- fix-failing-unit-tests-for-batch-async.patch | 137 +- ...248-psutil-is-broken-and-so-process-.patch | 738 - fix-for-log-checking-in-x509-test.patch | 10 +- ...rn-value-ret-vs-return-in-batch-mode.patch | 113 - fix-for-suse-expanded-support-detection.patch | 20 +- ...older-definition-in-loader-unit-test.patch | 24 +- ...erging-across-multiple-__env__-repos.patch | 62 +- fix-grains.test_core-unit-test-277.patch | 41 +- fix-ipv6-scope-bsc-1108557.patch | 10 +- fix-issue-2068-test.patch | 32 +- ...produced-by-batch-async-find_jobs-me.patch | 139 +- fix-novendorchange-option-284.patch | 241 +- ...ed-six.itermitems-and-six.-_type-262.patch | 770 +- ...ack-version-when-sanitizing-msgpack-.patch | 28 - fix-unit-test-for-grains-core.patch | 43 +- ...tests-for-batch-async-after-refactor.patch | 32 +- fix-virt.update-with-cpu-defined-263.patch | 10 +- ...od_del_repo_multiline_values-test-af.patch | 107 +- ...ist_pkgs-expectation-and-dpkg-mockin.patch | 60 +- ...st_pkgs-to-be-aligned-with-pkg-state.patch | 28 +- ...s-no-parttion-type.-the-scipt-later-.patch | 42 +- fixes-cve-2018-15750-cve-2018-15751.patch | 46 +- fixing-streamclosed-issue.patch | 16 +- ...h-also-without-rpm-package-installed.patch | 44 +- grains-master-can-read-grains.patch | 8 +- html.tar.bz2 | 4 +- ...k.fqdns-module-function-bsc-1134860-.patch | 398 +- ...ync-to-release-consumed-memory-bsc-1.patch | 143 +- include-aliases-in-the-fqdns-grains.patch | 250 +- ...talled-works-without-status-attr-now.patch | 59 +- ...si-authentication-with-azurearm-clou.patch | 375 +- ...list-cache-when-cache-file-modified-.patch | 76 +- ...-platform-python-binary-in-rhel8-191.patch | 6 +- ...te-the-import-cachefor-extra-modules.patch | 52 - ...fix-variable-names-for-until_no_eval.patch | 12 +- ...-dependencies-in-azurearm-cloud-driv.patch | 29 +- ..._repos-compatible-on-enabled-disable.patch | 34 +- ...__init__-call-to-_refresh_file_mappi.patch | 29 - make-profiles-a-package.patch | 4 +- ...rnado.gen-to-use-salt.ext.backports_.patch | 35 - ...script-to-not-require-setuptools-9.1.patch | 14 +- ...eprecation-warning-to-reduce-log-spa.patch | 64 +- msgpack-support-versions-1.0.0.patch | 72 - opensuse-3000-libvirt-engine-fixes-251.patch | 1314 +- opensuse-3000-virt-defined-states-222.patch | 4343 +-- opensuse-3000.2-virt-backports-236-257.patch | 21927 ++-------------- ...3-spacewalk-runner-parse-command-250.patch | 24 +- ...-disable-force-refresh-in-zypper-215.patch | 96 +- ...ctools.wraps-with-six.wraps-bsc-1177.patch | 20 +- ...support-python-2.7-function-call-295.patch | 8 +- ...blegate-unit-tests-to-fail-on-ubuntu.patch | 55 +- ...rrors-when-running-test_btrfs-unit-t.patch | 22 +- ...deadlock-on-salt-api-subprocesses-bs.patch | 410 +- ...run-description-issue-when-running-a.patch | 45 +- ...od_del_repo_multiline_values-to-fail.patch | 68 +- ...ing-features-required-for-yomi-yet-o.patch | 7979 +----- python3.8-compatibility-pr-s-235.patch | 1067 +- re-adding-function-to-test-for-root.patch | 12 +- ...ithout-using-interpolation-bsc-11356.patch | 24 +- reintroducing-reverted-changes.patch | 22 +- ...-name-when-pkg.list_pkgs-is-called-w.patch | 376 +- ...d-usage-of-no_mock-and-no_mock_reaso.patch | 192 +- ....0.0-requirement-in-the-installed-me.patch | 340 +- ...ry-yield-causing-badyielderror-bsc-1.patch | 12 +- ...ored-backports-abc-from-requirements.patch | 22 +- ...lved-merge-conflict-in-yumpkg-module.patch | 29 - ...default-behaviour-of-pkg-list-return.patch | 83 +- ...expected-powerpc-os-arch-bsc-1117995.patch | 90 +- ...use-case-when-multiple-inotify-beaco.patch | 270 + run-salt-api-as-user-salt-bsc-1064520.patch | 4 +- run-salt-master-as-dedicated-salt-user.patch | 8 +- salt.changes | 209 + salt.spec | 452 +- ...rains-loaded-from-roster_grains.json.patch | 520 +- ...or-salt-ssh-keys-to-empty-string-293.patch | 31 - ...rom-repo.uri-when-comparing-repos-in.patch | 20 +- ...on-root-permission-issues-fixes-u-50.patch | 74 +- ...for-btrfs-and-xfs-in-parted-and-mkfs.patch | 89 +- ...rt-transactional-systems-microos-271.patch | 198 +- ...ewalld-state-to-use-change_interface.patch | 763 +- ...tend-the-whitelist-of-allowed-comman.patch | 226 +- ...tional_update-unify-with-chroot.call.patch | 90 +- update-documentation.sh | 6 +- ...lgorithm-to-compute-string-checksums.patch | 167 +- ...op-for-the-localclient-instance-of-b.patch | 28 +- ...name-instead-of-undocumented-abbrevi.patch | 60 - ...rom-multiprocessing.pool-to-avoid-le.patch | 28 +- v3000.3.tar.gz | 3 - v3002.2.tar.gz | 3 + ...el-boot-parameters-to-libvirt-xml-55.patch | 24 +- ...-don-t-raise-an-exception-if-there-i.patch | 228 +- x509-fixes-111.patch | 516 +- xen-disk-fixes-264.patch | 588 +- xfs-do-not-fails-if-type-is-not-present.patch | 262 +- ...ter-patterns-that-start-with-dot-244.patch | 37 +- ...-retcode-104-for-search-bsc-1176697-.patch | 174 +- 159 files changed, 19138 insertions(+), 47937 deletions(-) delete mode 100644 add-missing-_utils-at-loader-grains_func.patch delete mode 100644 add-missing-fun-for-returns-from-wfunc-executions.patch delete mode 100644 avoid-has_docker-true-if-import-messes-with-salt.uti.patch delete mode 100644 decide-if-the-source-should-be-actually-skipped.patch delete mode 100644 do-not-report-patches-as-installed-when-not-all-the-.patch create mode 100644 drop-wrong-virt-capabilities-code-after-rebasing-pat.patch delete mode 100644 fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch delete mode 100644 fix-for-return-value-ret-vs-return-in-batch-mode.patch delete mode 100644 fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch delete mode 100644 loader-invalidate-the-import-cachefor-extra-modules.patch delete mode 100644 make-lazyloader.__init__-call-to-_refresh_file_mappi.patch delete mode 100644 make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch delete mode 100644 msgpack-support-versions-1.0.0.patch delete mode 100644 removes-unresolved-merge-conflict-in-yumpkg-module.patch create mode 100644 revert-fixing-a-use-case-when-multiple-inotify-beaco.patch delete mode 100644 set-passphrase-for-salt-ssh-keys-to-empty-string-293.patch delete mode 100644 use-full-option-name-instead-of-undocumented-abbrevi.patch delete mode 100644 v3000.3.tar.gz create mode 100644 v3002.2.tar.gz diff --git a/_lastrevision b/_lastrevision index 621dcf9..11b2fa5 100644 --- a/_lastrevision +++ b/_lastrevision @@ -1 +1 @@ -1a73678e768b896323b9d2d1f903a400e48e51e1 \ No newline at end of file +dcd92ac1689278a421ca9b4ecdcfe6cf9e94cf45 \ No newline at end of file diff --git a/_service b/_service index 69d0f78..b060a52 100644 --- a/_service +++ b/_service @@ -3,7 +3,7 @@ https://github.com/openSUSE/salt-packaging.git salt package - 3000.3 + 3002.2 git @@ -12,8 +12,8 @@ codeload.github.com - openSUSE/salt/tar.gz/v3000.3-suse - v3000.3.tar.gz + openSUSE/salt/tar.gz/v3002.2-suse + v3002.2.tar.gz diff --git a/accumulated-changes-from-yomi-167.patch b/accumulated-changes-from-yomi-167.patch index e8de9cb..46b7f0c 100644 --- a/accumulated-changes-from-yomi-167.patch +++ b/accumulated-changes-from-yomi-167.patch @@ -1,4 +1,4 @@ -From 951d2a385a40c5322155f952e08430e8402bfbde Mon Sep 17 00:00:00 2001 +From 828650500159fd7040d2fa76b2fc4d2b627f7065 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 22 Oct 2019 11:02:33 +0200 Subject: [PATCH] Accumulated changes from Yomi (#167) @@ -17,190 +17,207 @@ This patch ignore this kind of issue during the grains creation. (cherry picked from commit b865491b74679140f7a71c5ba50d482db47b600f) --- - salt/grains/core.py | 4 +++ - salt/modules/zypperpkg.py | 30 +++++++++++----- - tests/unit/grains/test_core.py | 68 ++++++++++++++++++++++++++++++++++++ - tests/unit/modules/test_zypperpkg.py | 26 ++++++++++++++ - 4 files changed, 119 insertions(+), 9 deletions(-) + salt/grains/core.py | 6 +-- + salt/modules/zypperpkg.py | 22 ---------- + tests/unit/grains/test_core.py | 64 +++++++++++++++++++++++++++- + tests/unit/modules/test_zypperpkg.py | 38 +++++++++++++++++ + 4 files changed, 103 insertions(+), 27 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 77ae99590f..68c43482d3 100644 +index 0dc1d97f97..a2983e388b 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -997,6 +997,10 @@ def _virtual(osdata): - grains['virtual'] = 'gce' - elif 'BHYVE' in output: - grains['virtual'] = 'bhyve' -+ except UnicodeDecodeError: -+ # Some firmwares provide non-valid 'product_name' -+ # files, ignore them +@@ -1046,7 +1046,7 @@ def _virtual(osdata): + if os.path.isfile("/sys/devices/virtual/dmi/id/product_name"): + try: + with salt.utils.files.fopen( +- "/sys/devices/virtual/dmi/id/product_name", "rb" ++ "/sys/devices/virtual/dmi/id/product_name", "r" + ) as fhr: + output = salt.utils.stringutils.to_unicode( + fhr.read(), errors="replace" +@@ -1066,9 +1066,7 @@ def _virtual(osdata): + except UnicodeDecodeError: + # Some firmwares provide non-valid 'product_name' + # files, ignore them +- log.debug( +- "The content in /sys/devices/virtual/dmi/id/product_name is not valid" +- ) + pass - except IOError: + except OSError: pass - elif osdata['kernel'] == 'FreeBSD': + elif osdata["kernel"] == "FreeBSD": diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index f7158e0810..5f3b6d6855 100644 +index 2daec0f380..b5621174a4 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -863,23 +863,35 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs): - _ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version']) +@@ -958,28 +958,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs): + } + ] - for include in includes: -+ if include == 'product': -+ products = list_products(all=False, root=root) -+ for product in products: -+ extended_name = '{}:{}'.format(include, product['name']) -+ _ret[extended_name] = [{ -+ 'epoch': product['epoch'], -+ 'version': product['version'], -+ 'release': product['release'], -+ 'arch': product['arch'], -+ 'install_date': None, -+ 'install_date_time_t': None, -+ }] - if include in ('pattern', 'patch'): - if include == 'pattern': +- for include in includes: +- if include in ("pattern", "patch"): +- if include == "pattern": - pkgs = list_installed_patterns(root=root) -+ elements = list_installed_patterns(root=root) - elif include == 'patch': +- elif include == "patch": - pkgs = list_installed_patches(root=root) -+ elements = list_installed_patches(root=root) - else: +- else: - pkgs = [] - for pkg in pkgs: -- pkg_extended_name = '{}:{}'.format(include, pkg) -- info = info_available(pkg_extended_name, -+ elements = [] -+ for element in elements: -+ extended_name = '{}:{}'.format(include, element) -+ info = info_available(extended_name, - refresh=False, - root=root) -- _ret[pkg_extended_name] = [{ -+ _ret[extended_name] = [{ - 'epoch': None, -- 'version': info[pkg]['version'], -+ 'version': info[element]['version'], - 'release': None, -- 'arch': info[pkg]['arch'], -+ 'arch': info[element]['arch'], - 'install_date': None, - 'install_date_time_t': None, - }] +- pkg_extended_name = "{}:{}".format(include, pkg) +- info = info_available(pkg_extended_name, refresh=False, root=root) +- _ret[pkg_extended_name] = [ +- { +- "epoch": None, +- "version": info[pkg]["version"], +- "release": None, +- "arch": info[pkg]["arch"], +- "install_date": None, +- "install_date_time_t": None, +- } +- ] +- + __context__[contextkey] = _ret + + return __salt__["pkg_resource.format_pkg_list"]( diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index e722bfab5b..33d6a9507f 100644 +index a5ceeb8317..0dc3423646 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -1559,3 +1559,71 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - assert len(info) == 2 - assert all([x is not None for x in info]) - assert all([isinstance(x, int) for x in info]) -+ -+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') +@@ -2047,13 +2047,74 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + result = core.path() + assert result == {"path": path, "systempath": comps}, result + ++ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") ++ @patch("os.path.exists") ++ @patch("salt.utils.platform.is_proxy") + def test_kernelparams_return(self): + expectations = [ -+ ('BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64', -+ {'kernelparams': [('BOOT_IMAGE', '/vmlinuz-3.10.0-693.2.2.el7.x86_64')]}), -+ ('root=/dev/mapper/centos_daemon-root', -+ {'kernelparams': [('root', '/dev/mapper/centos_daemon-root')]}), -+ ('rhgb quiet ro', -+ {'kernelparams': [('rhgb', None), ('quiet', None), ('ro', None)]}), -+ ('param="value1"', -+ {'kernelparams': [('param', 'value1')]}), -+ ('param="value1 value2 value3"', -+ {'kernelparams': [('param', 'value1 value2 value3')]}), -+ ('param="value1 value2 value3" LANG="pl" ro', -+ {'kernelparams': [('param', 'value1 value2 value3'), ('LANG', 'pl'), ('ro', None)]}), -+ ('ipv6.disable=1', -+ {'kernelparams': [('ipv6.disable', '1')]}), -+ ('param="value1:value2:value3"', -+ {'kernelparams': [('param', 'value1:value2:value3')]}), -+ ('param="value1,value2,value3"', -+ {'kernelparams': [('param', 'value1,value2,value3')]}), -+ ('param="value1" param="value2" param="value3"', -+ {'kernelparams': [('param', 'value1'), ('param', 'value2'), ('param', 'value3')]}), ++ ( ++ "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64", ++ { ++ "kernelparams": [ ++ ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64") ++ ] ++ }, ++ ), ++ ( ++ "root=/dev/mapper/centos_daemon-root", ++ {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]}, ++ ), ++ ( ++ "rhgb quiet ro", ++ {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]}, ++ ), ++ ('param="value1"', {"kernelparams": [("param", "value1")]}), ++ ( ++ 'param="value1 value2 value3"', ++ {"kernelparams": [("param", "value1 value2 value3")]}, ++ ), ++ ( ++ 'param="value1 value2 value3" LANG="pl" ro', ++ { ++ "kernelparams": [ ++ ("param", "value1 value2 value3"), ++ ("LANG", "pl"), ++ ("ro", None), ++ ] ++ }, ++ ), ++ ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}), ++ ( ++ 'param="value1:value2:value3"', ++ {"kernelparams": [("param", "value1:value2:value3")]}, ++ ), ++ ( ++ 'param="value1,value2,value3"', ++ {"kernelparams": [("param", "value1,value2,value3")]}, ++ ), ++ ( ++ 'param="value1" param="value2" param="value3"', ++ { ++ "kernelparams": [ ++ ("param", "value1"), ++ ("param", "value2"), ++ ("param", "value3"), ++ ] ++ }, ++ ), + ] + + for cmdline, expectation in expectations: -+ with patch('salt.utils.files.fopen', mock_open(read_data=cmdline)): ++ with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)): + self.assertEqual(core.kernelparams(), expectation) + -+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') -+ @patch('os.path.exists') -+ @patch('salt.utils.platform.is_proxy') -+ def test__hw_data_linux_empty(self, is_proxy, exists): -+ is_proxy.return_value = False -+ exists.return_value = True -+ with patch('salt.utils.files.fopen', mock_open(read_data='')): -+ self.assertEqual(core._hw_data({'kernel': 'Linux'}), { -+ 'biosreleasedate': '', -+ 'biosversion': '', -+ 'manufacturer': '', -+ 'productname': '', -+ 'serialnumber': '', -+ 'uuid': '' -+ }) -+ -+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') -+ @skipIf(six.PY2, 'UnicodeDecodeError is throw in Python 3') -+ @patch('os.path.exists') -+ @patch('salt.utils.platform.is_proxy') -+ def test__hw_data_linux_unicode_error(self, is_proxy, exists): -+ def _fopen(*args): -+ class _File(object): -+ def __enter__(self): -+ return self -+ -+ def __exit__(self, *args): -+ pass -+ -+ def read(self): -+ raise UnicodeDecodeError('enconding', b'', 1, 2, 'reason') -+ -+ return _File() -+ -+ is_proxy.return_value = False -+ exists.return_value = True -+ with patch('salt.utils.files.fopen', _fopen): -+ self.assertEqual(core._hw_data({'kernel': 'Linux'}), {}) + @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") + @patch("os.path.exists") + @patch("salt.utils.platform.is_proxy") + def test__hw_data_linux_empty(self, is_proxy, exists): + is_proxy.return_value = False + exists.return_value = True +- with patch("salt.utils.files.fopen", mock_open(read_data=b"")): ++ with patch("salt.utils.files.fopen", mock_open(read_data="")): + self.assertEqual( + core._hw_data({"kernel": "Linux"}), + { +@@ -2067,6 +2128,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + ) + + @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") ++ @skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3") + @patch("os.path.exists") + @patch("salt.utils.platform.is_proxy") + def test__hw_data_linux_unicode_error(self, is_proxy, exists): diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 6102043384..76937cc358 100644 +index 5d4e7766b6..1b62122e0e 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -944,6 +944,32 @@ Repository 'DUMMY' not found by its alias, number, or URI. - with self.assertRaisesRegex(CommandExecutionError, '^Advisory id "SUSE-PATCH-XXX" not found$'): - zypper.install(advisory_ids=['SUSE-PATCH-XXX']) +@@ -1424,6 +1424,44 @@ Repository 'DUMMY' not found by its alias, number, or URI. + ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}} + ) -+ @patch('salt.modules.zypperpkg._systemd_scope', -+ MagicMock(return_value=False)) -+ @patch('salt.modules.zypperpkg.list_products', -+ MagicMock(return_value={'openSUSE': {'installed': False, 'summary': 'test'}})) -+ @patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"product:openSUSE": "15.2"}, -+ {"product:openSUSE": "15.3"}])) ++ @patch("salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)) ++ @patch( ++ "salt.modules.zypperpkg.list_products", ++ MagicMock(return_value={"openSUSE": {"installed": False, "summary": "test"}}), ++ ) ++ @patch( ++ "salt.modules.zypperpkg.list_pkgs", ++ MagicMock( ++ side_effect=[{"product:openSUSE": "15.2"}, {"product:openSUSE": "15.3"}] ++ ), ++ ) + def test_install_product_ok(self): -+ ''' ++ """ + Test successfully product installation. -+ ''' -+ with patch.dict(zypper.__salt__, -+ { -+ 'pkg_resource.parse_targets': MagicMock( -+ return_value=(['product:openSUSE'], None)) -+ }): -+ with patch('salt.modules.zypperpkg.__zypper__.noraise.call', MagicMock()) as zypper_mock: -+ ret = zypper.install('product:openSUSE', includes=['product']) -+ zypper_mock.assert_called_once_with( -+ '--no-refresh', -+ 'install', -+ '--auto-agree-with-licenses', -+ '--name', -+ 'product:openSUSE' ++ """ ++ with patch.dict( ++ zypper.__salt__, ++ { ++ "pkg_resource.parse_targets": MagicMock( ++ return_value=(["product:openSUSE"], None) ++ ) ++ }, ++ ): ++ with patch( ++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock() ++ ) as zypper_mock: ++ ret = zypper.install("product:openSUSE", includes=["product"]) ++ zypper_mock.assert_called_once_with( ++ "--no-refresh", ++ "install", ++ "--auto-agree-with-licenses", ++ "--name", ++ "product:openSUSE", ++ ) ++ self.assertDictEqual( ++ ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}} + ) -+ self.assertDictEqual(ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}}) + def test_remove_purge(self): - ''' + """ Test package removal -- -2.16.4 +2.29.2 diff --git a/accumulated-changes-required-for-yomi-165.patch b/accumulated-changes-required-for-yomi-165.patch index 000ee98..29409fa 100644 --- a/accumulated-changes-required-for-yomi-165.patch +++ b/accumulated-changes-required-for-yomi-165.patch @@ -1,4 +1,4 @@ -From 9f29577b75cac1e79ec7c30a5dff0dff0ab9da3a Mon Sep 17 00:00:00 2001 +From 7d35fdba84b6e1b62a3abc71e518366a35efb662 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 30 Jul 2019 11:23:12 +0200 Subject: [PATCH] Accumulated changes required for Yomi (#165) @@ -58,143 +58,60 @@ so the cached data will be separated too. (cherry picked from commit 9c54bb3e8c93ba21fc583bdefbcadbe53cbcd7b5) --- - salt/modules/cmdmod.py | 12 +++++++++--- - salt/modules/zypperpkg.py | 13 ++++++++++--- - tests/unit/modules/test_cmdmod.py | 16 ++++++++++++++++ - tests/unit/modules/test_zypperpkg.py | 21 +++++++++++++++++++++ - 4 files changed, 56 insertions(+), 6 deletions(-) + salt/modules/zypperpkg.py | 1 - + tests/unit/modules/test_zypperpkg.py | 22 +++++++++++++++++++++- + 2 files changed, 21 insertions(+), 2 deletions(-) -diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py -index eed7656a6d..0d2f720bbb 100644 ---- a/salt/modules/cmdmod.py -+++ b/salt/modules/cmdmod.py -@@ -3094,13 +3094,19 @@ def run_chroot(root, - - if isinstance(cmd, (list, tuple)): - cmd = ' '.join([six.text_type(i) for i in cmd]) -- cmd = 'chroot {0} {1} -c {2}'.format(root, sh_, _cmd_quote(cmd)) -+ -+ # If runas and group are provided, we expect that the user lives -+ # inside the chroot, not outside. -+ if runas: -+ userspec = '--userspec {}:{}'.format(runas, group if group else '') -+ else: -+ userspec = '' -+ -+ cmd = 'chroot {} {} {} -c {}'.format(userspec, root, sh_, _cmd_quote(cmd)) - - run_func = __context__.pop('cmd.run_chroot.func', run_all) - - ret = run_func(cmd, -- runas=runas, -- group=group, - cwd=cwd, - stdin=stdin, - shell=shell, diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 3760b525e7..8179cd8c1d 100644 +index c996935bff..b099f3e5d7 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -449,8 +449,14 @@ def _clean_cache(): - ''' - Clean cached results - ''' -+ keys = [] - for cache_name in ['pkg.list_pkgs', 'pkg.list_provides']: -- __context__.pop(cache_name, None) -+ for contextkey in __context__: -+ if contextkey.startswith(cache_name): -+ keys.append(contextkey) -+ -+ for key in keys: -+ __context__.pop(key, None) - - - def list_upgrades(refresh=True, root=None, **kwargs): -@@ -811,9 +817,10 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs): - - includes = includes if includes else [] - -- contextkey = 'pkg.list_pkgs' -+ # Results can be different if a different root or a different -+ # inclusion types are passed -+ contextkey = 'pkg.list_pkgs_{}_{}'.format(root, includes) +@@ -879,7 +879,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs): + # inclusion types are passed + contextkey = "pkg.list_pkgs_{}_{}".format(root, includes) - # TODO(aplanas): this cached value depends on the parameters if contextkey not in __context__: ret = {} - cmd = ['rpm'] -diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py -index f8fba59294..8d763435f8 100644 ---- a/tests/unit/modules/test_cmdmod.py -+++ b/tests/unit/modules/test_cmdmod.py -@@ -371,6 +371,22 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): - else: - raise RuntimeError - -+ @skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows') -+ @skipIf(salt.utils.platform.is_darwin(), 'Do not run on MacOS') -+ def test_run_cwd_in_combination_with_runas(self): -+ ''' -+ cmd.run executes command in the cwd directory -+ when the runas parameter is specified -+ ''' -+ cmd = 'pwd' -+ cwd = '/tmp' -+ runas = os.getlogin() -+ -+ with patch.dict(cmdmod.__grains__, {'os': 'Darwin', -+ 'os_family': 'Solaris'}): -+ stdout = cmdmod._run(cmd, cwd=cwd, runas=runas).get('stdout') -+ self.assertEqual(stdout, cwd) -+ - def test_run_all_binary_replace(self): - ''' - Test for failed decoding of binary data, for instance when doing + cmd = ["rpm"] diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 12c22bfcb2..6102043384 100644 +index 032785395e..5d4e7766b6 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -571,6 +571,7 @@ Repository 'DUMMY' not found by its alias, number, or URI. - patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}): - pkgs = zypper.list_pkgs(versions_as_list=True) - self.assertFalse(pkgs.get('gpg-pubkey', False)) -+ self.assertTrue('pkg.list_pkgs_None_[]' in zypper.__context__) - for pkg_name, pkg_version in { - 'jakarta-commons-discovery': ['0.4-129.686'], - 'yast2-ftp-server': ['3.1.8-8.1'], -@@ -613,6 +614,7 @@ Repository 'DUMMY' not found by its alias, number, or URI. - patch.dict(pkg_resource.__salt__, {'pkg.parse_arch_from_name': zypper.parse_arch_from_name}): - pkgs = zypper.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t']) - self.assertFalse(pkgs.get('gpg-pubkey', False)) -+ self.assertTrue('pkg.list_pkgs_None_[]' in zypper.__context__) - for pkg_name, pkg_attr in { - 'jakarta-commons-discovery': [{ - 'version': '0.4', -@@ -1456,3 +1458,22 @@ pattern() = package-c'''), - 'summary': 'description b', - }, +@@ -912,7 +912,8 @@ Repository 'DUMMY' not found by its alias, number, or URI. + ), patch.dict( + zypper.__salt__, {"pkg_resource.stringify": MagicMock()} + ), patch.dict( +- pkg_resource.__salt__, {"pkg.parse_arch": zypper.parse_arch} ++ pkg_resource.__salt__, ++ {"pkg.parse_arch_from_name": zypper.parse_arch_from_name}, + ): + pkgs = zypper.list_pkgs( + attr=["epoch", "release", "arch", "install_date_time_t"] +@@ -1950,3 +1951,22 @@ pattern() = package-c""" + "package-a": {"installed": True, "summary": "description a",}, + "package-b": {"installed": False, "summary": "description b",}, } + + def test__clean_cache_empty(self): -+ '''Test that an empty cached can be cleaned''' ++ """Test that an empty cached can be cleaned""" + context = {} + with patch.dict(zypper.__context__, context): + zypper._clean_cache() + assert context == {} + + def test__clean_cache_filled(self): -+ '''Test that a filled cached can be cleaned''' ++ """Test that a filled cached can be cleaned""" + context = { -+ 'pkg.list_pkgs_/mnt_[]': None, -+ 'pkg.list_pkgs_/mnt_[patterns]': None, -+ 'pkg.list_provides': None, -+ 'pkg.other_data': None, ++ "pkg.list_pkgs_/mnt_[]": None, ++ "pkg.list_pkgs_/mnt_[patterns]": None, ++ "pkg.list_provides": None, ++ "pkg.other_data": None, + } + with patch.dict(zypper.__context__, context): + zypper._clean_cache() -+ self.assertEqual(zypper.__context__, {'pkg.other_data': None}) ++ self.assertEqual(zypper.__context__, {"pkg.other_data": None}) -- -2.16.4 +2.29.2 diff --git a/activate-all-beacons-sources-config-pillar-grains.patch b/activate-all-beacons-sources-config-pillar-grains.patch index bce3d30..9667639 100644 --- a/activate-all-beacons-sources-config-pillar-grains.patch +++ b/activate-all-beacons-sources-config-pillar-grains.patch @@ -1,26 +1,28 @@ -From 6df4cef549665aad5b9e2af50eb06124a2bb0997 Mon Sep 17 00:00:00 2001 +From c44b897eb1305c6b9c341fc16f729d2293ab24e4 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 17 Oct 2017 16:52:33 +0200 Subject: [PATCH] Activate all beacons sources: config/pillar/grains --- - salt/minion.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) + salt/minion.py | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/minion.py b/salt/minion.py -index 6a77d90185..457f485b0a 100644 +index c255f37c26..4da665a130 100644 --- a/salt/minion.py +++ b/salt/minion.py -@@ -483,7 +483,7 @@ class MinionBase(object): +@@ -508,9 +508,7 @@ class MinionBase: the pillar or grains changed - ''' - if 'config.merge' in functions: -- b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) -+ b_conf = functions['config.merge']('beacons', self.opts['beacons']) + """ + if "config.merge" in functions: +- b_conf = functions["config.merge"]( +- "beacons", self.opts["beacons"], omit_opts=True +- ) ++ b_conf = functions["config.merge"]("beacons", self.opts["beacons"]) if b_conf: - return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member - return [] + return self.beacons.process( + b_conf, self.opts["grains"] -- -2.16.4 +2.29.2 diff --git a/add-all_versions-parameter-to-include-all-installed-.patch b/add-all_versions-parameter-to-include-all-installed-.patch index e7045c0..6212a62 100644 --- a/add-all_versions-parameter-to-include-all-installed-.patch +++ b/add-all_versions-parameter-to-include-all-installed-.patch @@ -1,4 +1,4 @@ -From cd66b1e6636013440577a38a5a68729fec2f3f99 Mon Sep 17 00:00:00 2001 +From 2e300c770c227cf394929b7d5d025d5c52f1ae2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Mon, 14 May 2018 11:33:13 +0100 @@ -19,21 +19,119 @@ Refactor: use dict.setdefault instead if-else statement Allow removing only specific package versions with zypper and yum --- - salt/states/pkg.py | 21 +++++++++++++++++++++ - 1 file changed, 21 insertions(+) + salt/states/pkg.py | 285 +++++++++++++++++++++++---------------------- + 1 file changed, 146 insertions(+), 139 deletions(-) diff --git a/salt/states/pkg.py b/salt/states/pkg.py -index a13d418400..c0fa2f6b69 100644 +index 51b5a06e8f..a1b2a122bb 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py -@@ -450,6 +450,16 @@ def _find_remove_targets(name=None, +@@ -1,4 +1,3 @@ +-# -*- coding: utf-8 -*- + """ + Installation of packages using OS package managers such as yum or apt-get + ========================================================================= +@@ -71,21 +70,16 @@ state module + used. This will be addressed in a future release of Salt. + """ - if __grains__['os'] == 'FreeBSD' and origin: - cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] -+ elif __grains__['os_family'] == 'Suse': +-# Import python libs +-from __future__ import absolute_import, print_function, unicode_literals + + import fnmatch + import logging + import os + import re + +-# Import Salt libs + import salt.utils.pkg + import salt.utils.platform + import salt.utils.versions + from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError +- +-# Import 3rd-party libs + from salt.ext import six + from salt.modules.pkg_resource import _repack_pkgs + from salt.output import nested +@@ -323,7 +317,7 @@ def _find_download_targets( + "name": name, + "changes": {}, + "result": True, +- "comment": "Version {0} of package '{1}' is already " ++ "comment": "Version {} of package '{}' is already " + "downloaded".format(version, name), + } + +@@ -334,7 +328,7 @@ def _find_download_targets( + "name": name, + "changes": {}, + "result": True, +- "comment": "Package {0} is already " "downloaded".format(name), ++ "comment": "Package {} is already " "downloaded".format(name), + } + + version_spec = False +@@ -349,13 +343,13 @@ def _find_download_targets( + comments.append( + "The following package(s) were not found, and no " + "possible matches were found in the package db: " +- "{0}".format(", ".join(sorted(problems["no_suggest"]))) ++ "{}".format(", ".join(sorted(problems["no_suggest"]))) + ) + if problems.get("suggest"): +- for pkgname, suggestions in six.iteritems(problems["suggest"]): ++ for pkgname, suggestions in problems["suggest"].items(): + comments.append( +- "Package '{0}' not found (possible matches: " +- "{1})".format(pkgname, ", ".join(suggestions)) ++ "Package '{}' not found (possible matches: " ++ "{})".format(pkgname, ", ".join(suggestions)) + ) + if comments: + if len(comments) > 1: +@@ -371,7 +365,7 @@ def _find_download_targets( + # Check current downloaded versions against specified versions + targets = {} + problems = [] +- for pkgname, pkgver in six.iteritems(to_download): ++ for pkgname, pkgver in to_download.items(): + cver = cur_pkgs.get(pkgname, {}) + # Package not yet downloaded, so add to targets + if not cver: +@@ -401,7 +395,7 @@ def _find_download_targets( + + if not targets: + # All specified packages are already downloaded +- msg = "All specified packages{0} are already downloaded".format( ++ msg = "All specified packages{} are already downloaded".format( + " (matching specified versions)" if version_spec else "" + ) + return {"name": name, "changes": {}, "result": True, "comment": msg} +@@ -425,7 +419,7 @@ def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): + "name": name, + "changes": {}, + "result": True, +- "comment": "Advisory patch {0} is already " "installed".format(name), ++ "comment": "Advisory patch {} is already " "installed".format(name), + } + + # Find out which advisory patches will be targeted in the call to pkg.install +@@ -477,12 +471,22 @@ def _find_remove_targets( + # Check current versions against specified versions + targets = [] + problems = [] +- for pkgname, pkgver in six.iteritems(to_remove): ++ for pkgname, pkgver in to_remove.items(): + # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names + origin = bool(re.search("/", pkgname)) + + if __grains__["os"] == "FreeBSD" and origin: +- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == pkgname] ++ cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname] ++ elif __grains__["os_family"] == "Suse": + # On SUSE systems. Zypper returns packages without "arch" in name + try: -+ namepart, archpart = pkgname.rsplit('.', 1) ++ namepart, archpart = pkgname.rsplit(".", 1) + except ValueError: + cver = cur_pkgs.get(pkgname, []) + else: @@ -43,14 +141,162 @@ index a13d418400..c0fa2f6b69 100644 else: cver = cur_pkgs.get(pkgname, []) -@@ -856,6 +866,17 @@ def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): - cver = new_pkgs.get(pkgname.split('%')[0]) - elif __grains__['os_family'] == 'Debian': - cver = new_pkgs.get(pkgname.split('=')[0]) -+ elif __grains__['os_family'] == 'Suse': +@@ -518,7 +522,7 @@ def _find_remove_targets( + + if not targets: + # All specified packages are already absent +- msg = "All specified packages{0} are already absent".format( ++ msg = "All specified packages{} are already absent".format( + " (matching specified versions)" if version_spec else "" + ) + return {"name": name, "changes": {}, "result": True, "comment": msg} +@@ -619,7 +623,7 @@ def _find_install_targets( + "name": name, + "changes": {}, + "result": False, +- "comment": "Invalidly formatted '{0}' parameter. See " ++ "comment": "Invalidly formatted '{}' parameter. See " + "minion log.".format("pkgs" if pkgs else "sources"), + } + +@@ -634,7 +638,7 @@ def _find_install_targets( + "name": name, + "changes": {}, + "result": False, +- "comment": "Package {0} not found in the " ++ "comment": "Package {} not found in the " + "repository.".format(name), + } + if version is None: +@@ -656,7 +660,7 @@ def _find_install_targets( + origin = bool(re.search("/", name)) + + if __grains__["os"] == "FreeBSD" and origin: +- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == name] ++ cver = [k for k, v in cur_pkgs.items() if v["origin"] == name] + else: + cver = cur_pkgs.get(name, []) + +@@ -667,7 +671,7 @@ def _find_install_targets( + "name": name, + "changes": {}, + "result": True, +- "comment": "Version {0} of package '{1}' is already " ++ "comment": "Version {} of package '{}' is already " + "installed".format(version, name), + } + +@@ -678,7 +682,7 @@ def _find_install_targets( + "name": name, + "changes": {}, + "result": True, +- "comment": "Package {0} is already " "installed".format(name), ++ "comment": "Package {} is already " "installed".format(name), + } + + version_spec = False +@@ -687,21 +691,19 @@ def _find_install_targets( + # enforced. Takes extra time. Disable for improved performance + if not skip_suggestions: + # Perform platform-specific pre-flight checks +- not_installed = dict( +- [ +- (name, version) +- for name, version in desired.items() +- if not ( +- name in cur_pkgs +- and ( +- version is None +- or _fulfills_version_string( +- cur_pkgs[name], version, ignore_epoch=ignore_epoch +- ) ++ not_installed = { ++ name: version ++ for name, version in desired.items() ++ if not ( ++ name in cur_pkgs ++ and ( ++ version is None ++ or _fulfills_version_string( ++ cur_pkgs[name], version, ignore_epoch=ignore_epoch + ) + ) +- ] +- ) ++ ) ++ } + if not_installed: + try: + problems = _preflight_check(not_installed, **kwargs) +@@ -713,13 +715,13 @@ def _find_install_targets( + comments.append( + "The following package(s) were not found, and no " + "possible matches were found in the package db: " +- "{0}".format(", ".join(sorted(problems["no_suggest"]))) ++ "{}".format(", ".join(sorted(problems["no_suggest"]))) + ) + if problems.get("suggest"): +- for pkgname, suggestions in six.iteritems(problems["suggest"]): ++ for pkgname, suggestions in problems["suggest"].items(): + comments.append( +- "Package '{0}' not found (possible matches: " +- "{1})".format(pkgname, ", ".join(suggestions)) ++ "Package '{}' not found (possible matches: " ++ "{})".format(pkgname, ", ".join(suggestions)) + ) + if comments: + if len(comments) > 1: +@@ -733,9 +735,7 @@ def _find_install_targets( + + # Resolve the latest package version for any packages with "latest" in the + # package version +- wants_latest = ( +- [] if sources else [x for x, y in six.iteritems(desired) if y == "latest"] +- ) ++ wants_latest = [] if sources else [x for x, y in desired.items() if y == "latest"] + if wants_latest: + resolved_latest = __salt__["pkg.latest_version"]( + *wants_latest, refresh=refresh, **kwargs +@@ -766,7 +766,7 @@ def _find_install_targets( + problems = [] + warnings = [] + failed_verify = False +- for package_name, version_string in six.iteritems(desired): ++ for package_name, version_string in desired.items(): + cver = cur_pkgs.get(package_name, []) + if resolve_capabilities and not cver and package_name in cur_prov: + cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) +@@ -795,12 +795,12 @@ def _find_install_targets( + problems.append(err.format(version_string, "file not found")) + continue + elif not os.path.exists(cached_path): +- problems.append("{0} does not exist on minion".format(version_string)) ++ problems.append("{} does not exist on minion".format(version_string)) + continue + source_info = __salt__["lowpkg.bin_pkg_info"](cached_path) + if source_info is None: + warnings.append( +- "Failed to parse metadata for {0}".format(version_string) ++ "Failed to parse metadata for {}".format(version_string) + ) + continue + else: +@@ -923,13 +923,24 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None): + has_origin = "/" in pkgname + + if __grains__["os"] == "FreeBSD" and has_origin: +- cver = [k for k, v in six.iteritems(new_pkgs) if v["origin"] == pkgname] ++ cver = [k for k, v in new_pkgs.items() if v["origin"] == pkgname] + elif __grains__["os"] == "MacOS" and has_origin: + cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split("/")[-1])) + elif __grains__["os"] == "OpenBSD": + cver = new_pkgs.get(pkgname.split("%")[0]) + elif __grains__["os_family"] == "Debian": + cver = new_pkgs.get(pkgname.split("=")[0]) ++ elif __grains__["os_family"] == "Suse": + # On SUSE systems. Zypper returns packages without "arch" in name + try: -+ namepart, archpart = pkgname.rsplit('.', 1) ++ namepart, archpart = pkgname.rsplit(".", 1) + except ValueError: + cver = new_pkgs.get(pkgname) + else: @@ -61,7 +307,653 @@ index a13d418400..c0fa2f6b69 100644 else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: +@@ -964,7 +975,7 @@ def _get_desired_pkg(name, desired): + oper = "" + else: + oper = "=" +- return "{0}{1}{2}".format(name, oper, "" if not desired[name] else desired[name]) ++ return "{}{}{}".format(name, oper, "" if not desired[name] else desired[name]) + + + def _preflight_check(desired, fromrepo, **kwargs): +@@ -1709,8 +1720,8 @@ def installed( + "comment": "pkg.verify not implemented", + } + +- if not isinstance(version, six.string_types) and version is not None: +- version = six.text_type(version) ++ if not isinstance(version, str) and version is not None: ++ version = str(version) + + kwargs["allow_updates"] = allow_updates + +@@ -1754,7 +1765,7 @@ def installed( + "name": name, + "changes": {}, + "result": False, +- "comment": six.text_type(exc), ++ "comment": str(exc), + } + + if "result" in hold_ret and not hold_ret["result"]: +@@ -1763,7 +1774,7 @@ def installed( + "changes": {}, + "result": False, + "comment": "An error was encountered while " +- "holding/unholding package(s): {0}".format(hold_ret["comment"]), ++ "holding/unholding package(s): {}".format(hold_ret["comment"]), + } + else: + modified_hold = [ +@@ -1779,16 +1790,16 @@ def installed( + ] + + for i in modified_hold: +- result["comment"] += ".\n{0}".format(i["comment"]) ++ result["comment"] += ".\n{}".format(i["comment"]) + result["result"] = i["result"] + result["changes"][i["name"]] = i["changes"] + + for i in not_modified_hold: +- result["comment"] += ".\n{0}".format(i["comment"]) ++ result["comment"] += ".\n{}".format(i["comment"]) + result["result"] = i["result"] + + for i in failed_hold: +- result["comment"] += ".\n{0}".format(i["comment"]) ++ result["comment"] += ".\n{}".format(i["comment"]) + result["result"] = i["result"] + return result + +@@ -1805,8 +1816,8 @@ def installed( + + # Remove any targets not returned by _find_install_targets + if pkgs: +- pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] +- pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) ++ pkgs = [dict([(x, y)]) for x, y in targets.items()] ++ pkgs.extend([dict([(x, y)]) for x, y in to_reinstall.items()]) + elif sources: + oldsources = sources + sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] +@@ -1823,12 +1834,12 @@ def installed( + summary = ", ".join([_get_desired_pkg(x, targets) for x in targets]) + comment.append( + "The following packages would be " +- "installed/updated: {0}".format(summary) ++ "installed/updated: {}".format(summary) + ) + if to_unpurge: + comment.append( + "The following packages would have their selection status " +- "changed from 'purge' to 'install': {0}".format(", ".join(to_unpurge)) ++ "changed from 'purge' to 'install': {}".format(", ".join(to_unpurge)) + ) + if to_reinstall: + # Add a comment for each package in to_reinstall with its +@@ -1852,7 +1863,7 @@ def installed( + else: + pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) + comment.append( +- "Package '{0}' would be reinstalled because the " ++ "Package '{}' would be reinstalled because the " + "following files have been altered:".format(pkgstr) + ) + comment.append(_nested_output(altered_files[reinstall_pkg])) +@@ -1896,7 +1907,7 @@ def installed( + ret["changes"] = {} + ret["comment"] = ( + "An error was encountered while installing " +- "package(s): {0}".format(exc) ++ "package(s): {}".format(exc) + ) + if warnings: + ret.setdefault("warnings", []).extend(warnings) +@@ -1907,7 +1918,7 @@ def installed( + + if isinstance(pkg_ret, dict): + changes["installed"].update(pkg_ret) +- elif isinstance(pkg_ret, six.string_types): ++ elif isinstance(pkg_ret, str): + comment.append(pkg_ret) + # Code below will be looking for a dictionary. If this is a string + # it means that there was an exception raised and that no packages +@@ -1921,7 +1932,7 @@ def installed( + action = "pkg.hold" if kwargs["hold"] else "pkg.unhold" + hold_ret = __salt__[action](name=name, pkgs=desired) + except (CommandExecutionError, SaltInvocationError) as exc: +- comment.append(six.text_type(exc)) ++ comment.append(str(exc)) + ret = { + "name": name, + "changes": changes, +@@ -1938,7 +1949,7 @@ def installed( + "changes": {}, + "result": False, + "comment": "An error was encountered while " +- "holding/unholding package(s): {0}".format(hold_ret["comment"]), ++ "holding/unholding package(s): {}".format(hold_ret["comment"]), + } + if warnings: + ret.setdefault("warnings", []).extend(warnings) +@@ -1996,11 +2007,11 @@ def installed( + summary = ", ".join([_get_desired_pkg(x, desired) for x in modified]) + if len(summary) < 20: + comment.append( +- "The following packages were installed/updated: " "{0}".format(summary) ++ "The following packages were installed/updated: " "{}".format(summary) + ) + else: + comment.append( +- "{0} targeted package{1} {2} installed/updated.".format( ++ "{} targeted package{} {} installed/updated.".format( + len(modified), + "s" if len(modified) > 1 else "", + "were" if len(modified) > 1 else "was", +@@ -2014,14 +2025,14 @@ def installed( + comment.append(i["comment"]) + if len(changes[change_name]["new"]) > 0: + changes[change_name]["new"] += "\n" +- changes[change_name]["new"] += "{0}".format(i["changes"]["new"]) ++ changes[change_name]["new"] += "{}".format(i["changes"]["new"]) + if len(changes[change_name]["old"]) > 0: + changes[change_name]["old"] += "\n" +- changes[change_name]["old"] += "{0}".format(i["changes"]["old"]) ++ changes[change_name]["old"] += "{}".format(i["changes"]["old"]) + else: + comment.append(i["comment"]) + changes[change_name] = {} +- changes[change_name]["new"] = "{0}".format(i["changes"]["new"]) ++ changes[change_name]["new"] = "{}".format(i["changes"]["new"]) + + # Any requested packages that were not targeted for install or reinstall + if not_modified: +@@ -2031,11 +2042,11 @@ def installed( + summary = ", ".join([_get_desired_pkg(x, desired) for x in not_modified]) + if len(not_modified) <= 20: + comment.append( +- "The following packages were already installed: " "{0}".format(summary) ++ "The following packages were already installed: " "{}".format(summary) + ) + else: + comment.append( +- "{0} targeted package{1} {2} already installed".format( ++ "{} targeted package{} {} already installed".format( + len(not_modified), + "s" if len(not_modified) > 1 else "", + "were" if len(not_modified) > 1 else "was", +@@ -2054,7 +2065,7 @@ def installed( + else: + summary = ", ".join([_get_desired_pkg(x, desired) for x in failed]) + comment.insert( +- 0, "The following packages failed to " "install/update: {0}".format(summary) ++ 0, "The following packages failed to " "install/update: {}".format(summary) + ) + result = False + +@@ -2118,7 +2129,7 @@ def installed( + pkgstr = modified_pkg + else: + pkgstr = _get_desired_pkg(modified_pkg, desired) +- msg = "Package {0} was reinstalled.".format(pkgstr) ++ msg = "Package {} was reinstalled.".format(pkgstr) + if modified_pkg in altered_files: + msg += " The following files were remediated:" + comment.append(msg) +@@ -2133,7 +2144,7 @@ def installed( + pkgstr = failed_pkg + else: + pkgstr = _get_desired_pkg(failed_pkg, desired) +- msg = "Reinstall was not successful for package {0}.".format(pkgstr) ++ msg = "Reinstall was not successful for package {}.".format(pkgstr) + if failed_pkg in altered_files: + msg += " The following files could not be remediated:" + comment.append(msg) +@@ -2274,12 +2285,12 @@ def downloaded( + ret["result"] = False + ret[ + "comment" +- ] = "An error was encountered while checking targets: " "{0}".format(targets) ++ ] = "An error was encountered while checking targets: " "{}".format(targets) + return ret + + if __opts__["test"]: + summary = ", ".join(targets) +- ret["comment"] = "The following packages would be " "downloaded: {0}".format( ++ ret["comment"] = "The following packages would be " "downloaded: {}".format( + summary + ) + return ret +@@ -2306,7 +2317,7 @@ def downloaded( + ret["changes"] = {} + ret["comment"] = ( + "An error was encountered while downloading " +- "package(s): {0}".format(exc) ++ "package(s): {}".format(exc) + ) + return ret + +@@ -2316,13 +2327,13 @@ def downloaded( + if failed: + summary = ", ".join([_get_desired_pkg(x, targets) for x in failed]) + ret["result"] = False +- ret["comment"] = "The following packages failed to " "download: {0}".format( ++ ret["comment"] = "The following packages failed to " "download: {}".format( + summary + ) + + if not ret["changes"] and not ret["comment"]: + ret["result"] = True +- ret["comment"] = "Packages downloaded: " "{0}".format(", ".join(targets)) ++ ret["comment"] = "Packages downloaded: " "{}".format(", ".join(targets)) + + return ret + +@@ -2382,14 +2393,14 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): + ret["result"] = False + ret[ + "comment" +- ] = "An error was encountered while checking targets: " "{0}".format(targets) ++ ] = "An error was encountered while checking targets: " "{}".format(targets) + return ret + + if __opts__["test"]: + summary = ", ".join(targets) + ret[ + "comment" +- ] = "The following advisory patches would be " "downloaded: {0}".format(summary) ++ ] = "The following advisory patches would be " "downloaded: {}".format(summary) + return ret + + try: +@@ -2408,7 +2419,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): + ret["changes"] = {} + ret["comment"] = ( + "An error was encountered while downloading " +- "package(s): {0}".format(exc) ++ "package(s): {}".format(exc) + ) + return ret + +@@ -2417,7 +2428,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): + ret["result"] = True + ret["comment"] = ( + "Advisory patch is not needed or related packages " +- "are already {0}".format(status) ++ "are already {}".format(status) + ) + + return ret +@@ -2674,7 +2685,7 @@ def latest( + "changes": {}, + "result": False, + "comment": "An error was encountered while checking the " +- "newest available version of package(s): {0}".format(exc), ++ "newest available version of package(s): {}".format(exc), + } + + try: +@@ -2683,9 +2694,9 @@ def latest( + return {"name": name, "changes": {}, "result": False, "comment": exc.strerror} + + # Repack the cur/avail data if only a single package is being checked +- if isinstance(cur, six.string_types): ++ if isinstance(cur, str): + cur = {desired_pkgs[0]: cur} +- if isinstance(avail, six.string_types): ++ if isinstance(avail, str): + avail = {desired_pkgs[0]: avail} + + targets = {} +@@ -2695,7 +2706,7 @@ def latest( + # Package either a) is up-to-date, or b) does not exist + if not cur.get(pkg): + # Package does not exist +- msg = "No information found for '{0}'.".format(pkg) ++ msg = "No information found for '{}'.".format(pkg) + log.error(msg) + problems.append(msg) + elif ( +@@ -2741,12 +2752,12 @@ def latest( + comments.append( + "The following packages are already up-to-date: " + + ", ".join( +- ["{0} ({1})".format(x, cur[x]) for x in sorted(up_to_date)] ++ ["{} ({})".format(x, cur[x]) for x in sorted(up_to_date)] + ) + ) + else: + comments.append( +- "{0} packages are already up-to-date".format(up_to_date_count) ++ "{} packages are already up-to-date".format(up_to_date_count) + ) + + return { +@@ -2784,7 +2795,7 @@ def latest( + "changes": {}, + "result": False, + "comment": "An error was encountered while installing " +- "package(s): {0}".format(exc), ++ "package(s): {}".format(exc), + } + + if changes: +@@ -2800,7 +2811,7 @@ def latest( + + comments = [] + if failed: +- msg = "The following packages failed to update: " "{0}".format( ++ msg = "The following packages failed to update: " "{}".format( + ", ".join(sorted(failed)) + ) + comments.append(msg) +@@ -2808,19 +2819,17 @@ def latest( + msg = ( + "The following packages were successfully " + "installed/upgraded: " +- "{0}".format(", ".join(sorted(successful))) ++ "{}".format(", ".join(sorted(successful))) + ) + comments.append(msg) + if up_to_date: + if len(up_to_date) <= 10: + msg = ( + "The following packages were already up-to-date: " +- "{0}".format(", ".join(sorted(up_to_date))) ++ "{}".format(", ".join(sorted(up_to_date))) + ) + else: +- msg = "{0} packages were already up-to-date ".format( +- len(up_to_date) +- ) ++ msg = "{} packages were already up-to-date ".format(len(up_to_date)) + comments.append(msg) + + return { +@@ -2832,18 +2841,18 @@ def latest( + else: + if len(targets) > 10: + comment = ( +- "{0} targeted packages failed to update. " ++ "{} targeted packages failed to update. " + "See debug log for details.".format(len(targets)) + ) + elif len(targets) > 1: + comment = ( + "The following targeted packages failed to update. " +- "See debug log for details: ({0}).".format( ++ "See debug log for details: ({}).".format( + ", ".join(sorted(targets)) + ) + ) + else: +- comment = "Package {0} failed to " "update.".format( ++ comment = "Package {} failed to " "update.".format( + next(iter(list(targets.keys()))) + ) + if up_to_date: +@@ -2851,10 +2860,10 @@ def latest( + comment += ( + " The following packages were already " + "up-to-date: " +- "{0}".format(", ".join(sorted(up_to_date))) ++ "{}".format(", ".join(sorted(up_to_date))) + ) + else: +- comment += "{0} packages were already " "up-to-date".format( ++ comment += "{} packages were already " "up-to-date".format( + len(up_to_date) + ) + +@@ -2866,13 +2875,13 @@ def latest( + } + else: + if len(desired_pkgs) > 10: +- comment = "All {0} packages are up-to-date.".format(len(desired_pkgs)) ++ comment = "All {} packages are up-to-date.".format(len(desired_pkgs)) + elif len(desired_pkgs) > 1: +- comment = "All packages are up-to-date " "({0}).".format( ++ comment = "All packages are up-to-date " "({}).".format( + ", ".join(sorted(desired_pkgs)) + ) + else: +- comment = "Package {0} is already " "up-to-date".format(desired_pkgs[0]) ++ comment = "Package {} is already " "up-to-date".format(desired_pkgs[0]) + + return {"name": name, "changes": {}, "result": True, "comment": comment} + +@@ -2894,8 +2903,7 @@ def _uninstall( + "name": name, + "changes": {}, + "result": False, +- "comment": "Invalid action '{0}'. " +- "This is probably a bug.".format(action), ++ "comment": "Invalid action '{}'. " "This is probably a bug.".format(action), + } + + try: +@@ -2908,7 +2916,7 @@ def _uninstall( + "changes": {}, + "result": False, + "comment": "An error was encountered while parsing targets: " +- "{0}".format(exc), ++ "{}".format(exc), + } + targets = _find_remove_targets( + name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs +@@ -2921,7 +2929,7 @@ def _uninstall( + "changes": {}, + "result": False, + "comment": "An error was encountered while checking targets: " +- "{0}".format(targets), ++ "{}".format(targets), + } + if action == "purge": + old_removed = __salt__["pkg.list_pkgs"]( +@@ -2936,7 +2944,7 @@ def _uninstall( + "changes": {}, + "result": True, + "comment": "None of the targeted packages are installed" +- "{0}".format(" or partially installed" if action == "purge" else ""), ++ "{}".format(" or partially installed" if action == "purge" else ""), + } + + if __opts__["test"]: +@@ -2944,11 +2952,11 @@ def _uninstall( + "name": name, + "changes": {}, + "result": None, +- "comment": "The following packages will be {0}d: " +- "{1}.".format(action, ", ".join(targets)), ++ "comment": "The following packages will be {}d: " ++ "{}.".format(action, ", ".join(targets)), + } + +- changes = __salt__["pkg.{0}".format(action)]( ++ changes = __salt__["pkg.{}".format(action)]( + name, pkgs=pkgs, version=version, **kwargs + ) + new = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs) +@@ -2975,8 +2983,8 @@ def _uninstall( + "name": name, + "changes": changes, + "result": False, +- "comment": "The following packages failed to {0}: " +- "{1}.".format(action, ", ".join(failed)), ++ "comment": "The following packages failed to {}: " ++ "{}.".format(action, ", ".join(failed)), + } + + comments = [] +@@ -2984,14 +2992,13 @@ def _uninstall( + if not_installed: + comments.append( + "The following packages were not installed: " +- "{0}".format(", ".join(not_installed)) ++ "{}".format(", ".join(not_installed)) + ) + comments.append( +- "The following packages were {0}d: " +- "{1}.".format(action, ", ".join(targets)) ++ "The following packages were {}d: " "{}.".format(action, ", ".join(targets)) + ) + else: +- comments.append("All targeted packages were {0}d.".format(action)) ++ comments.append("All targeted packages were {}d.".format(action)) + + return { + "name": name, +@@ -3089,7 +3096,7 @@ def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, ** + ret["changes"] = {} + ret[ + "comment" +- ] = "An error was encountered while removing " "package(s): {0}".format(exc) ++ ] = "An error was encountered while removing " "package(s): {}".format(exc) + return ret + + +@@ -3181,7 +3188,7 @@ def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **k + ret["changes"] = {} + ret[ + "comment" +- ] = "An error was encountered while purging " "package(s): {0}".format(exc) ++ ] = "An error was encountered while purging " "package(s): {}".format(exc) + return ret + + +@@ -3247,17 +3254,17 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs): + "new": pkgver, + "old": __salt__["pkg.version"](pkgname, **kwargs), + } +- for pkgname, pkgver in six.iteritems(packages) ++ for pkgname, pkgver in packages.items() + } + if isinstance(pkgs, list): + packages = [pkg for pkg in packages if pkg in pkgs] + expected = { + pkgname: pkgver +- for pkgname, pkgver in six.iteritems(expected) ++ for pkgname, pkgver in expected.items() + if pkgname in pkgs + } + except Exception as exc: # pylint: disable=broad-except +- ret["comment"] = six.text_type(exc) ++ ret["comment"] = str(exc) + return ret + else: + ret["comment"] = "refresh must be either True or False" +@@ -3284,16 +3291,16 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs): + ret["changes"] = {} + ret[ + "comment" +- ] = "An error was encountered while updating " "packages: {0}".format(exc) ++ ] = "An error was encountered while updating " "packages: {}".format(exc) + return ret + + # If a package list was provided, ensure those packages were updated + missing = [] + if isinstance(pkgs, list): +- missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret["changes"]] ++ missing = [pkg for pkg in expected.keys() if pkg not in ret["changes"]] + + if missing: +- ret["comment"] = "The following package(s) failed to update: {0}".format( ++ ret["comment"] = "The following package(s) failed to update: {}".format( + ", ".join(missing) + ) + ret["result"] = False +@@ -3362,8 +3369,8 @@ def group_installed(name, skip=None, include=None, **kwargs): + ret["comment"] = "skip must be formatted as a list" + return ret + for idx, item in enumerate(skip): +- if not isinstance(item, six.string_types): +- skip[idx] = six.text_type(item) ++ if not isinstance(item, str): ++ skip[idx] = str(item) + + if include is None: + include = [] +@@ -3372,15 +3379,15 @@ def group_installed(name, skip=None, include=None, **kwargs): + ret["comment"] = "include must be formatted as a list" + return ret + for idx, item in enumerate(include): +- if not isinstance(item, six.string_types): +- include[idx] = six.text_type(item) ++ if not isinstance(item, str): ++ include[idx] = str(item) + + try: + diff = __salt__["pkg.group_diff"](name) + except CommandExecutionError as err: + ret["comment"] = ( + "An error was encountered while installing/updating " +- "group '{0}': {1}.".format(name, err) ++ "group '{}': {}.".format(name, err) + ) + return ret + +@@ -3390,7 +3397,7 @@ def group_installed(name, skip=None, include=None, **kwargs): + if invalid_skip: + ret[ + "comment" +- ] = "The following mandatory packages cannot be skipped: {0}".format( ++ ] = "The following mandatory packages cannot be skipped: {}".format( + ", ".join(invalid_skip) + ) + return ret +@@ -3401,7 +3408,7 @@ def group_installed(name, skip=None, include=None, **kwargs): + + if not targets: + ret["result"] = True +- ret["comment"] = "Group '{0}' is already installed".format(name) ++ ret["comment"] = "Group '{}' is already installed".format(name) + return ret + + partially_installed = ( +@@ -3415,9 +3422,9 @@ def group_installed(name, skip=None, include=None, **kwargs): + if partially_installed: + ret[ + "comment" +- ] = "Group '{0}' is partially installed and will be updated".format(name) ++ ] = "Group '{}' is partially installed and will be updated".format(name) + else: +- ret["comment"] = "Group '{0}' will be installed".format(name) ++ ret["comment"] = "Group '{}' will be installed".format(name) + return ret + + try: +@@ -3432,19 +3439,19 @@ def group_installed(name, skip=None, include=None, **kwargs): + ret["changes"] = {} + ret["comment"] = ( + "An error was encountered while " +- "installing/updating group '{0}': {1}".format(name, exc) ++ "installing/updating group '{}': {}".format(name, exc) + ) + return ret + + failed = [x for x in targets if x not in __salt__["pkg.list_pkgs"](**kwargs)] + if failed: +- ret["comment"] = "Failed to install the following packages: {0}".format( ++ ret["comment"] = "Failed to install the following packages: {}".format( + ", ".join(failed) + ) + return ret + + ret["result"] = True +- ret["comment"] = "Group '{0}' was {1}".format( ++ ret["comment"] = "Group '{}' was {}".format( + name, "updated" if partially_installed else "installed" + ) + return ret +@@ -3561,6 +3568,6 @@ def mod_watch(name, **kwargs): + return { + "name": name, + "changes": {}, +- "comment": "pkg.{0} does not work with the watch requisite".format(sfun), ++ "comment": "pkg.{} does not work with the watch requisite".format(sfun), + "result": False, + } -- -2.16.4 +2.29.2 diff --git a/add-astra-linux-common-edition-to-the-os-family-list.patch b/add-astra-linux-common-edition-to-the-os-family-list.patch index 5d99070..1a98229 100644 --- a/add-astra-linux-common-edition-to-the-os-family-list.patch +++ b/add-astra-linux-common-edition-to-the-os-family-list.patch @@ -1,4 +1,4 @@ -From acf0b24353d831dcc2c5b292f99480938f5ecd93 Mon Sep 17 00:00:00 2001 +From d5569023c64a3fcec57a7aa6823ee94e8be91b3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julio=20Gonz=C3=A1lez=20Gil?= Date: Wed, 12 Feb 2020 10:05:45 +0100 @@ -11,49 +11,49 @@ Subject: [PATCH] Add Astra Linux Common Edition to the OS Family list 2 files changed, 21 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 20950988d9..f410985198 100644 +index 5dff6ecfd4..5634327623 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -1523,6 +1523,7 @@ _OS_FAMILY_MAP = { - 'Funtoo': 'Gentoo', - 'AIX': 'AIX', - 'TurnKey': 'Debian', -+ 'AstraLinuxCE': 'Debian', +@@ -1618,6 +1618,7 @@ _OS_FAMILY_MAP = { + "Funtoo": "Gentoo", + "AIX": "AIX", + "TurnKey": "Debian", ++ "AstraLinuxCE": "Debian", } # Matches any possible format: diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index b4ed9379e5..c276dee9f3 100644 +index 85d434dd9d..196dbcf83d 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -605,6 +605,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): +@@ -728,6 +728,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): } self._run_os_grains_tests("ubuntu-17.10", _os_release_map, expectation) -+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') ++ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") + def test_astralinuxce_2_os_grains(self): -+ ''' ++ """ + Test if OS grains are parsed correctly in Astra Linux CE 2.12.22 "orel" -+ ''' ++ """ + _os_release_map = { -+ 'linux_distribution': ('AstraLinuxCE', '2.12.22', 'orel'), ++ "linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"), + } + expectation = { -+ 'os': 'AstraLinuxCE', -+ 'os_family': 'Debian', -+ 'oscodename': 'orel', -+ 'osfullname': 'AstraLinuxCE', -+ 'osrelease': '2.12.22', -+ 'osrelease_info': (2, 12, 22), -+ 'osmajorrelease': 2, -+ 'osfinger': 'AstraLinuxCE-2', ++ "os": "AstraLinuxCE", ++ "os_family": "Debian", ++ "oscodename": "orel", ++ "osfullname": "AstraLinuxCE", ++ "osrelease": "2.12.22", ++ "osrelease_info": (2, 12, 22), ++ "osmajorrelease": 2, ++ "osfinger": "AstraLinuxCE-2", + } + self._run_os_grains_tests("astralinuxce-2.12.22", _os_release_map, expectation) + - @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows') + @skipIf(not salt.utils.platform.is_windows(), "System is not Windows") def test_windows_platform_data(self): - ''' + """ -- -2.16.4 +2.29.2 diff --git a/add-batch_presence_ping_timeout-and-batch_presence_p.patch b/add-batch_presence_ping_timeout-and-batch_presence_p.patch index 4fa802a..296a963 100644 --- a/add-batch_presence_ping_timeout-and-batch_presence_p.patch +++ b/add-batch_presence_ping_timeout-and-batch_presence_p.patch @@ -1,46 +1,55 @@ -From 376a7d2eeb6b3b215fac9322f1baee4497bdb339 Mon Sep 17 00:00:00 2001 +From 66f6c2540a151487b26c89a2bb66199d6c65c18d Mon Sep 17 00:00:00 2001 From: Marcelo Chiaradia Date: Thu, 4 Apr 2019 13:57:38 +0200 Subject: [PATCH] Add 'batch_presence_ping_timeout' and 'batch_presence_ping_gather_job_timeout' parameters for synchronous batching --- - salt/cli/batch.py | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) + salt/cli/batch.py | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/salt/cli/batch.py b/salt/cli/batch.py -index 36e66da1af..67f03c8a45 100644 +index 527cffdeb7..2bc5444aef 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py -@@ -83,6 +83,9 @@ def batch_get_opts( +@@ -77,6 +77,13 @@ def batch_get_opts( if key not in opts: opts[key] = val -+ opts['batch_presence_ping_timeout'] = kwargs.get('batch_presence_ping_timeout', opts['timeout']) -+ opts['batch_presence_ping_gather_job_timeout'] = kwargs.get('batch_presence_ping_gather_job_timeout', opts['gather_job_timeout']) ++ opts["batch_presence_ping_timeout"] = kwargs.get( ++ "batch_presence_ping_timeout", opts["timeout"] ++ ) ++ opts["batch_presence_ping_gather_job_timeout"] = kwargs.get( ++ "batch_presence_ping_gather_job_timeout", opts["gather_job_timeout"] ++ ) + return opts -@@ -119,7 +122,7 @@ class Batch(object): - args = [self.opts['tgt'], - 'test.ping', - [], -- self.opts['timeout'], -+ self.opts.get('batch_presence_ping_timeout', self.opts['timeout']), - ] +@@ -115,7 +122,7 @@ class Batch: + self.opts["tgt"], + "test.ping", + [], +- self.opts["timeout"], ++ self.opts.get("batch_presence_ping_timeout", self.opts["timeout"]), + ] - selected_target_option = self.opts.get('selected_target_option', None) -@@ -130,7 +133,7 @@ class Batch(object): + selected_target_option = self.opts.get("selected_target_option", None) +@@ -126,7 +133,12 @@ class Batch: - self.pub_kwargs['yield_pub_data'] = True - ping_gen = self.local.cmd_iter(*args, -- gather_job_timeout=self.opts['gather_job_timeout'], -+ gather_job_timeout=self.opts.get('batch_presence_ping_gather_job_timeout', self.opts['gather_job_timeout']), - **self.pub_kwargs) + self.pub_kwargs["yield_pub_data"] = True + ping_gen = self.local.cmd_iter( +- *args, gather_job_timeout=self.opts["gather_job_timeout"], **self.pub_kwargs ++ *args, ++ gather_job_timeout=self.opts.get( ++ "batch_presence_ping_gather_job_timeout", ++ self.opts["gather_job_timeout"], ++ ), ++ **self.pub_kwargs + ) # Broadcast to targets -- -2.16.4 +2.29.2 diff --git a/add-cpe_name-for-osversion-grain-parsing-u-49946.patch b/add-cpe_name-for-osversion-grain-parsing-u-49946.patch index 23a7e7c..64228b2 100644 --- a/add-cpe_name-for-osversion-grain-parsing-u-49946.patch +++ b/add-cpe_name-for-osversion-grain-parsing-u-49946.patch @@ -1,4 +1,4 @@ -From a90f35bc03b477a63aae20c58f8957c075569465 Mon Sep 17 00:00:00 2001 +From c845d56fdf1762586b1f210b1eb49193893d4312 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 9 Oct 2018 14:08:50 +0200 Subject: [PATCH] Add CPE_NAME for osversion* grain parsing (U#49946) @@ -29,10 +29,10 @@ Fix proper part name in the string-bound CPE 1 file changed, 28 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 9c1b5d930e..7b7e328520 100644 +index 5535584d1b..bc3cf129cd 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -1642,6 +1642,34 @@ def _parse_cpe_name(cpe): +@@ -1732,6 +1732,34 @@ def _parse_cpe_name(cpe): return ret @@ -65,9 +65,9 @@ index 9c1b5d930e..7b7e328520 100644 + + def os_data(): - ''' + """ Return grains pertaining to the operating system -- -2.16.4 +2.29.2 diff --git a/add-custom-suse-capabilities-as-grains.patch b/add-custom-suse-capabilities-as-grains.patch index 1051ce0..395a9b5 100644 --- a/add-custom-suse-capabilities-as-grains.patch +++ b/add-custom-suse-capabilities-as-grains.patch @@ -1,4 +1,4 @@ -From e57dd3c2ae655422f0f6939825154ce5827d43c4 Mon Sep 17 00:00:00 2001 +From 713ccfdc5c6733495d3ce7f26a8cfeddb8e9e9c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 21 Jun 2018 11:57:57 +0100 @@ -9,10 +9,10 @@ Subject: [PATCH] Add custom SUSE capabilities as Grains 1 file changed, 7 insertions(+) diff --git a/salt/grains/extra.py b/salt/grains/extra.py -index 9ce644b766..1082b05dba 100644 +index 2fdbe6526a..ddc22293ea 100644 --- a/salt/grains/extra.py +++ b/salt/grains/extra.py -@@ -75,3 +75,10 @@ def config(): +@@ -66,3 +66,10 @@ def config(): log.warning("Bad syntax in grains file! Skipping.") return {} return {} @@ -24,6 +24,6 @@ index 9ce644b766..1082b05dba 100644 + '__suse_reserved_pkg_patches_support': True + } -- -2.16.4 +2.29.2 diff --git a/add-docker-logout-237.patch b/add-docker-logout-237.patch index 33bf399..7f878dc 100644 --- a/add-docker-logout-237.patch +++ b/add-docker-logout-237.patch @@ -1,4 +1,4 @@ -From 9e6bd24b07cd2424c3805777b07b9ea84adff416 Mon Sep 17 00:00:00 2001 +From 355e1e29e8f3286eeb13bc2d05089c096c9e01e3 Mon Sep 17 00:00:00 2001 From: Alexander Graul Date: Mon, 18 May 2020 16:39:27 +0200 Subject: [PATCH] Add docker logout (#237) @@ -13,10 +13,10 @@ interpreted as a list of docker registries to log out of. 2 files changed, 139 insertions(+) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py -index 28a2107cec..119e9eb170 100644 +index 934038c927..176b4db926 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py -@@ -1481,6 +1481,86 @@ def login(*registries): +@@ -1586,6 +1586,86 @@ def logout(*registries): return ret @@ -102,44 +102,15 @@ index 28a2107cec..119e9eb170 100644 + # Functions for information gathering def depends(name): - ''' + """ diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py -index 191bfc123f..8f4ead2867 100644 +index 34e2e9c610..48526acb71 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py -@@ -164,6 +164,65 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): - self.assertIn('retcode', ret) - self.assertNotEqual(ret['retcode'], 0) +@@ -199,6 +199,65 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): + output_loglevel="quiet", + ) -+ def test_logout_calls_docker_cli_logout_single(self): -+ client = Mock() -+ get_client_mock = MagicMock(return_value=client) -+ ref_out = {"stdout": "", "stderr": "", "retcode": 0} -+ registry_auth_data = { -+ "portus.example.com:5000": { -+ "username": "admin", -+ "password": "linux12345", -+ "email": "tux@example.com", -+ } -+ } -+ docker_mock = MagicMock(return_value=ref_out) -+ with patch.object(docker_mod, "_get_client", get_client_mock): -+ dunder_salt = { -+ "config.get": MagicMock(return_value=registry_auth_data), -+ "cmd.run_all": docker_mock, -+ "config.option": MagicMock(return_value={}), -+ } -+ with patch.dict(docker_mod.__salt__, dunder_salt): -+ ret = docker_mod.logout("portus.example.com:5000") -+ assert "retcode" in ret -+ assert ret["retcode"] == 0 -+ docker_mock.assert_called_with( -+ ["docker", "logout", "portus.example.com:5000"], -+ python_shell=False, -+ output_loglevel="quiet", -+ ) -+ -+ + def test_logout_calls_docker_cli_logout_all(self): + client = Mock() + get_client_mock = MagicMock(return_value=client) @@ -170,10 +141,39 @@ index 191bfc123f..8f4ead2867 100644 + assert ret["retcode"] == 0 + assert docker_mock.call_count == 2 + - def test_ps_with_host_true(self): - ''' - Check that docker.ps called with host is ``True``, ++ def test_logout_calls_docker_cli_logout_single(self): ++ client = Mock() ++ get_client_mock = MagicMock(return_value=client) ++ ref_out = {"stdout": "", "stderr": "", "retcode": 0} ++ registry_auth_data = { ++ "portus.example.com:5000": { ++ "username": "admin", ++ "password": "linux12345", ++ "email": "tux@example.com", ++ } ++ } ++ docker_mock = MagicMock(return_value=ref_out) ++ with patch.object(docker_mod, "_get_client", get_client_mock): ++ dunder_salt = { ++ "config.get": MagicMock(return_value=registry_auth_data), ++ "cmd.run_all": docker_mock, ++ "config.option": MagicMock(return_value={}), ++ } ++ with patch.dict(docker_mod.__salt__, dunder_salt): ++ ret = docker_mod.logout("portus.example.com:5000") ++ assert "retcode" in ret ++ assert ret["retcode"] == 0 ++ docker_mock.assert_called_with( ++ ["docker", "logout", "portus.example.com:5000"], ++ python_shell=False, ++ output_loglevel="quiet", ++ ) ++ ++ + def test_logout_calls_docker_cli_logout_all(self): + client = Mock() + get_client_mock = MagicMock(return_value=client) -- -2.26.2 +2.29.2 diff --git a/add-environment-variable-to-know-if-yum-is-invoked-f.patch b/add-environment-variable-to-know-if-yum-is-invoked-f.patch index ac344a6..a03e838 100644 --- a/add-environment-variable-to-know-if-yum-is-invoked-f.patch +++ b/add-environment-variable-to-know-if-yum-is-invoked-f.patch @@ -1,78 +1,83 @@ -From 874b1229babf5244debac141cd260f695ccc1e9d Mon Sep 17 00:00:00 2001 +From 7b2b5fc53d30397b8f7a11e59f5c7a57bcb63058 Mon Sep 17 00:00:00 2001 From: Marcelo Chiaradia Date: Thu, 7 Jun 2018 10:29:41 +0200 -Subject: [PATCH] Add environment variable to know if yum is invoked from - Salt(bsc#1057635) +Subject: [PATCH] Add environment variable to know if yum is invoked + from Salt(bsc#1057635) --- - salt/modules/yumpkg.py | 18 ++++++++++++------ - 1 file changed, 12 insertions(+), 6 deletions(-) + salt/modules/yumpkg.py | 23 +++++++++++++++++------ + 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py -index f7e4ac9753..c89d321a1b 100644 +index b547fe6be7..c58b3e4c70 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py -@@ -913,7 +913,8 @@ def list_repo_pkgs(*args, **kwargs): - yum_version = None if _yum() != 'yum' else _LooseVersion( - __salt__['cmd.run']( - ['yum', '--version'], -- python_shell=False -+ python_shell=False, -+ env={"SALT_RUNNING": '1'} - ).splitlines()[0].strip() - ) - # Really old version of yum; does not even have --showduplicates option -@@ -2324,7 +2325,8 @@ def list_holds(pattern=__HOLD_PATTERN, full=True): +@@ -917,7 +917,9 @@ def list_repo_pkgs(*args, **kwargs): + None + if _yum() != "yum" + else _LooseVersion( +- __salt__["cmd.run"](["yum", "--version"], python_shell=False) ++ __salt__["cmd.run"]( ++ ["yum", "--version"], python_shell=False, env={"SALT_RUNNING": "1"} ++ ) + .splitlines()[0] + .strip() + ) +@@ -2347,7 +2349,9 @@ def list_holds(pattern=__HOLD_PATTERN, full=True): + """ _check_versionlock() - out = __salt__['cmd.run']([_yum(), 'versionlock', 'list'], -- python_shell=False) -+ python_shell=False, -+ env={"SALT_RUNNING": '1'}) +- out = __salt__["cmd.run"]([_yum(), "versionlock", "list"], python_shell=False) ++ out = __salt__["cmd.run"]( ++ [_yum(), "versionlock", "list"], python_shell=False, env={"SALT_RUNNING": "1"} ++ ) ret = [] - for line in salt.utils.itertools.split(out, '\n'): + for line in salt.utils.itertools.split(out, "\n"): match = _get_hold(line, pattern=pattern, full=full) -@@ -2390,7 +2392,8 @@ def group_list(): - out = __salt__['cmd.run_stdout']( - [_yum(), 'grouplist', 'hidden'], - output_loglevel='trace', -- python_shell=False +@@ -2415,7 +2419,10 @@ def group_list(): + } + + out = __salt__["cmd.run_stdout"]( +- [_yum(), "grouplist", "hidden"], output_loglevel="trace", python_shell=False ++ [_yum(), "grouplist", "hidden"], ++ output_loglevel="trace", + python_shell=False, -+ env={"SALT_RUNNING": '1'} ++ env={"SALT_RUNNING": "1"}, ) key = None - for line in salt.utils.itertools.split(out, '\n'): -@@ -2457,7 +2460,8 @@ def group_info(name, expand=False): - out = __salt__['cmd.run_stdout']( - cmd, - output_loglevel='trace', -- python_shell=False -+ python_shell=False, -+ env={"SALT_RUNNING": '1'} - ) + for line in salt.utils.itertools.split(out, "\n"): +@@ -2486,7 +2493,9 @@ def group_info(name, expand=False, ignore_groups=None): + ret[pkgtype] = set() + + cmd = [_yum(), "--quiet", "groupinfo", name] +- out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False) ++ out = __salt__["cmd.run_stdout"]( ++ cmd, output_loglevel="trace", python_shell=False, env={"SALT_RUNNING": "1"} ++ ) g_info = {} -@@ -3134,7 +3138,8 @@ def download(*packages): - __salt__['cmd.run']( - cmd, - output_loglevel='trace', -- python_shell=False -+ python_shell=False, -+ env={"SALT_RUNNING": '1'} - ) + for line in salt.utils.itertools.split(out, "\n"): +@@ -3203,7 +3212,9 @@ def download(*packages, **kwargs): + + cmd = ["yumdownloader", "-q", "--destdir={}".format(CACHE_DIR)] + cmd.extend(packages) +- __salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False) ++ __salt__["cmd.run"]( ++ cmd, output_loglevel="trace", python_shell=False, env={"SALT_RUNNING": "1"} ++ ) ret = {} for dld_result in os.listdir(CACHE_DIR): -@@ -3209,7 +3214,8 @@ def _get_patches(installed_only=False): - cmd = [_yum(), '--quiet', 'updateinfo', 'list', 'all'] - ret = __salt__['cmd.run_stdout']( - cmd, -- python_shell=False -+ python_shell=False, -+ env={"SALT_RUNNING": '1'} - ) + if not dld_result.endswith(".rpm"): +@@ -3279,7 +3290,7 @@ def _get_patches(installed_only=False): + patches = {} + + cmd = [_yum(), "--quiet", "updateinfo", "list", "all"] +- ret = __salt__["cmd.run_stdout"](cmd, python_shell=False) ++ ret = __salt__["cmd.run_stdout"](cmd, python_shell=False, env={"SALT_RUNNING": "1"}) for line in salt.utils.itertools.split(ret, os.linesep): - inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)', + inst, advisory_id, sev, pkg = re.match( + r"([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)", line -- -2.16.4 +2.29.2 diff --git a/add-hold-unhold-functions.patch b/add-hold-unhold-functions.patch index dbb61a3..da32063 100644 --- a/add-hold-unhold-functions.patch +++ b/add-hold-unhold-functions.patch @@ -1,4 +1,4 @@ -From 666f62917bbc48cbee2ed0aa319a61afd1b1fcb2 Mon Sep 17 00:00:00 2001 +From 6176ef8aa39626dcb450a1665231a796e9544342 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Thu, 6 Dec 2018 16:26:23 +0100 Subject: [PATCH] Add hold/unhold functions @@ -7,43 +7,324 @@ Add unhold function Add warnings --- - salt/modules/zypperpkg.py | 88 ++++++++++++++++++++++++++++++++++++++++++++++- - 1 file changed, 87 insertions(+), 1 deletion(-) + salt/modules/zypperpkg.py | 186 +++++++++++++++++++++++++++----------- + 1 file changed, 131 insertions(+), 55 deletions(-) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 50279ccbd1..08a9c2ed4d 100644 +index 44bcbbf2f2..6fa6e3e0a1 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -41,6 +41,7 @@ import salt.utils.pkg +@@ -1,4 +1,3 @@ +-# -*- coding: utf-8 -*- + """ + Package support for openSUSE via the zypper package manager + +@@ -12,8 +11,6 @@ Package support for openSUSE via the zypper package manager + + """ + +-# Import python libs +-from __future__ import absolute_import, print_function, unicode_literals + + import datetime + import fnmatch +@@ -24,7 +21,6 @@ import time + from xml.dom import minidom as dom + from xml.parsers.expat import ExpatError + +-# Import salt libs + import salt.utils.data + import salt.utils.environment + import salt.utils.event +@@ -35,9 +31,9 @@ import salt.utils.pkg import salt.utils.pkg.rpm import salt.utils.stringutils import salt.utils.systemd +import salt.utils.versions - from salt.utils.versions import LooseVersion - import salt.utils.environment from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError -@@ -1771,7 +1772,7 @@ def clean_locks(): + +-# Import 3rd-party libs + # pylint: disable=import-error,redefined-builtin,no-name-in-module + from salt.ext import six + from salt.ext.six.moves import configparser +@@ -51,8 +47,8 @@ log = logging.getLogger(__name__) + + HAS_ZYPP = False + ZYPP_HOME = "/etc/zypp" +-LOCKS = "{0}/locks".format(ZYPP_HOME) +-REPOS = "{0}/repos.d".format(ZYPP_HOME) ++LOCKS = "{}/locks".format(ZYPP_HOME) ++REPOS = "{}/repos.d".format(ZYPP_HOME) + DEFAULT_PRIORITY = 99 + PKG_ARCH_SEPARATOR = "." + +@@ -75,7 +71,7 @@ def __virtual__(): + return __virtualname__ + + +-class _Zypper(object): ++class _Zypper: + """ + Zypper parallel caller. + Validates the result and either raises an exception or reports an error. +@@ -339,7 +335,7 @@ class _Zypper(object): + attrs=["pid", "name", "cmdline", "create_time"], + ) + data["cmdline"] = " ".join(data["cmdline"]) +- data["info"] = "Blocking process created at {0}.".format( ++ data["info"] = "Blocking process created at {}.".format( + datetime.datetime.utcfromtimestamp( + data["create_time"] + ).isoformat() +@@ -347,7 +343,7 @@ class _Zypper(object): + data["success"] = True + except Exception as err: # pylint: disable=broad-except + data = { +- "info": "Unable to retrieve information about blocking process: {0}".format( ++ "info": "Unable to retrieve information about blocking process: {}".format( + err.message + ), + "success": False, +@@ -382,7 +378,7 @@ class _Zypper(object): + ) + if self.error_msg and not self.__no_raise and not self.__ignore_repo_failure: + raise CommandExecutionError( +- "Zypper command failure: {0}".format(self.error_msg) ++ "Zypper command failure: {}".format(self.error_msg) + ) + + return ( +@@ -397,7 +393,7 @@ class _Zypper(object): + __zypper__ = _Zypper() + + +-class Wildcard(object): ++class Wildcard: + """ + .. versionadded:: 2017.7.0 + +@@ -439,7 +435,7 @@ class Wildcard(object): + for vrs in self._get_scope_versions(self._get_available_versions()) + ] + ) +- return versions and "{0}{1}".format(self._op or "", versions[-1]) or None ++ return versions and "{}{}".format(self._op or "", versions[-1]) or None + + def _get_available_versions(self): + """ +@@ -451,17 +447,15 @@ class Wildcard(object): + ).getElementsByTagName("solvable") + if not solvables: + raise CommandExecutionError( +- "No packages found matching '{0}'".format(self.name) ++ "No packages found matching '{}'".format(self.name) + ) + + return sorted( +- set( +- [ +- slv.getAttribute(self._attr_solvable_version) +- for slv in solvables +- if slv.getAttribute(self._attr_solvable_version) +- ] +- ) ++ { ++ slv.getAttribute(self._attr_solvable_version) ++ for slv in solvables ++ if slv.getAttribute(self._attr_solvable_version) ++ } + ) + + def _get_scope_versions(self, pkg_versions): +@@ -489,7 +483,7 @@ class Wildcard(object): + self._op = version.replace(exact_version, "") or None + if self._op and self._op not in self.Z_OP: + raise CommandExecutionError( +- 'Zypper do not supports operator "{0}".'.format(self._op) ++ 'Zypper do not supports operator "{}".'.format(self._op) + ) + self.version = exact_version + +@@ -539,14 +533,11 @@ def list_upgrades(refresh=True, root=None, **kwargs): + cmd = ["list-updates"] + if "fromrepo" in kwargs: + repos = kwargs["fromrepo"] +- if isinstance(repos, six.string_types): ++ if isinstance(repos, str): + repos = [repos] + for repo in repos: + cmd.extend( +- [ +- "--repo", +- repo if isinstance(repo, six.string_types) else six.text_type(repo), +- ] ++ ["--repo", repo if isinstance(repo, str) else str(repo),] + ) + log.debug("Targeting repos: %s", repos) + for update_node in ( +@@ -610,7 +601,7 @@ def info_installed(*names, **kwargs): + for _nfo in pkg_nfo: + t_nfo = dict() + # Translate dpkg-specific keys to a common structure +- for key, value in six.iteritems(_nfo): ++ for key, value in _nfo.items(): + if key == "source_rpm": + t_nfo["source"] = value + else: +@@ -1033,9 +1024,7 @@ def list_repo_pkgs(*args, **kwargs): + fromrepo = kwargs.pop("fromrepo", "") or "" + ret = {} + +- targets = [ +- arg if isinstance(arg, six.string_types) else six.text_type(arg) for arg in args +- ] ++ targets = [arg if isinstance(arg, str) else str(arg) for arg in args] + + def _is_match(pkgname): + """ +@@ -1124,7 +1113,7 @@ def _get_repo_info(alias, repos_cfg=None, root=None): + try: + meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias)) + meta["alias"] = alias +- for key, val in six.iteritems(meta): ++ for key, val in meta.items(): + if val in ["0", "1"]: + meta[key] = int(meta[key]) == 1 + elif val == "NONE": +@@ -1197,7 +1186,7 @@ def del_repo(repo, root=None): + "message": msg[0].childNodes[0].nodeValue, + } + +- raise CommandExecutionError("Repository '{0}' not found.".format(repo)) ++ raise CommandExecutionError("Repository '{}' not found.".format(repo)) + + + def mod_repo(repo, **kwargs): +@@ -1252,13 +1241,13 @@ def mod_repo(repo, **kwargs): + url = kwargs.get("url", kwargs.get("mirrorlist", kwargs.get("baseurl"))) + if not url: + raise CommandExecutionError( +- "Repository '{0}' not found, and neither 'baseurl' nor " ++ "Repository '{}' not found, and neither 'baseurl' nor " + "'mirrorlist' was specified".format(repo) + ) + + if not _urlparse(url).scheme: + raise CommandExecutionError( +- "Repository '{0}' not found and URL for baseurl/mirrorlist " ++ "Repository '{}' not found and URL for baseurl/mirrorlist " + "is malformed".format(repo) + ) + +@@ -1281,7 +1270,7 @@ def mod_repo(repo, **kwargs): + + if new_url == base_url: + raise CommandExecutionError( +- "Repository '{0}' already exists as '{1}'.".format(repo, alias) ++ "Repository '{}' already exists as '{}'.".format(repo, alias) + ) + + # Add new repo +@@ -1291,7 +1280,7 @@ def mod_repo(repo, **kwargs): + repos_cfg = _get_configured_repos(root=root) + if repo not in repos_cfg.sections(): + raise CommandExecutionError( +- "Failed add new repository '{0}' for unspecified reason. " ++ "Failed add new repository '{}' for unspecified reason. " + "Please check zypper logs.".format(repo) + ) + added = True +@@ -1327,12 +1316,10 @@ def mod_repo(repo, **kwargs): + cmd_opt.append(kwargs["gpgcheck"] and "--gpgcheck" or "--no-gpgcheck") + + if "priority" in kwargs: +- cmd_opt.append( +- "--priority={0}".format(kwargs.get("priority", DEFAULT_PRIORITY)) +- ) ++ cmd_opt.append("--priority={}".format(kwargs.get("priority", DEFAULT_PRIORITY))) + + if "humanname" in kwargs: +- cmd_opt.append("--name='{0}'".format(kwargs.get("humanname"))) ++ cmd_opt.append("--name='{}'".format(kwargs.get("humanname"))) + + if kwargs.get("gpgautoimport") is True: + global_cmd_opt.append("--gpg-auto-import-keys") +@@ -1589,7 +1576,7 @@ def install( + + if pkg_type == "repository": + targets = [] +- for param, version_num in six.iteritems(pkg_params): ++ for param, version_num in pkg_params.items(): + if version_num is None: + log.debug("targeting package: %s", param) + targets.append(param) +@@ -1597,7 +1584,7 @@ def install( + prefix, verstr = salt.utils.pkg.split_comparison(version_num) + if not prefix: + prefix = "=" +- target = "{0}{1}{2}".format(param, prefix, verstr) ++ target = "{}{}{}".format(param, prefix, verstr) + log.debug("targeting package: %s", target) + targets.append(target) + elif pkg_type == "advisory": +@@ -1606,7 +1593,7 @@ def install( + for advisory_id in pkg_params: + if advisory_id not in cur_patches: + raise CommandExecutionError( +- 'Advisory id "{0}" not found'.format(advisory_id) ++ 'Advisory id "{}" not found'.format(advisory_id) + ) + else: + # If we add here the `patch:` prefix, the +@@ -1703,7 +1690,7 @@ def install( + + if errors: + raise CommandExecutionError( +- "Problem encountered {0} package(s)".format( ++ "Problem encountered {} package(s)".format( + "downloading" if downloadonly else "installing" + ), + info={"errors": errors, "changes": ret}, +@@ -1797,7 +1784,7 @@ def upgrade( + cmd_update.append("--dry-run") + + if fromrepo: +- if isinstance(fromrepo, six.string_types): ++ if isinstance(fromrepo, str): + fromrepo = [fromrepo] + for repo in fromrepo: + cmd_update.extend(["--from" if dist_upgrade else "--repo", repo]) +@@ -2052,7 +2039,7 @@ def list_locks(root=None): + ) + if lock.get("solvable_name"): + locks[lock.pop("solvable_name")] = lock +- except IOError: ++ except OSError: + pass + except Exception: # pylint: disable=broad-except + log.warning("Detected a problem when accessing {}".format(_locks)) +@@ -2089,7 +2076,7 @@ def clean_locks(root=None): return out --def remove_lock(packages, **kwargs): # pylint: disable=unused-argument +-def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument +def unhold(name=None, pkgs=None, **kwargs): - ''' + """ Remove specified package lock. -@@ -1783,7 +1784,47 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument +@@ -2104,8 +2091,50 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume salt '*' pkg.remove_lock ,, salt '*' pkg.remove_lock pkgs='["foo", "bar"]' - ''' + """ + ret = {} + if (not name and not pkgs) or (name and pkgs): -+ raise CommandExecutionError('Name or packages must be specified.') ++ raise CommandExecutionError("Name or packages must be specified.") + elif name: + pkgs = [name] + + locks = list_locks() + try: -+ pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys()) ++ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys()) + except MinionError as exc: + raise CommandExecutionError(exc) + @@ -52,39 +333,44 @@ index 50279ccbd1..08a9c2ed4d 100644 + for pkg in pkgs: + if locks.get(pkg): + removed.append(pkg) -+ ret[pkg]['comment'] = 'Package {0} is no longer held.'.format(pkg) ++ ret[pkg]["comment"] = "Package {} is no longer held.".format(pkg) + else: + missing.append(pkg) -+ ret[pkg]['comment'] = 'Package {0} unable to be unheld.'.format(pkg) ++ ret[pkg]["comment"] = "Package {} unable to be unheld.".format(pkg) + + if removed: -+ __zypper__.call('rl', *removed) ++ __zypper__.call("rl", *removed) + + return ret + + +def remove_lock(packages, **kwargs): # pylint: disable=unused-argument -+ ''' ++ """ + Remove specified package lock. + + CLI Example: + + .. code-block:: bash +- locks = list_locks(root) + salt '*' pkg.remove_lock + salt '*' pkg.remove_lock ,, + salt '*' pkg.remove_lock pkgs='["foo", "bar"]' -+ ''' -+ salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use unhold() instead.') - locks = list_locks() ++ """ ++ salt.utils.versions.warn_until( ++ "Sodium", "This function is deprecated. Please use unhold() instead." ++ ) ++ locks = list_locks() try: - packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys()) -@@ -1804,6 +1845,50 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument - return {'removed': len(removed), 'not_found': missing} + packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys()) + except MinionError as exc: +@@ -2125,7 +2154,51 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume + return {"removed": len(removed), "not_found": missing} +-def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument +def hold(name=None, pkgs=None, **kwargs): -+ ''' ++ """ + Add a package lock. Specify packages to lock by exact name. + + CLI Example: @@ -99,46 +385,85 @@ index 50279ccbd1..08a9c2ed4d 100644 + :param pkgs: + :param kwargs: + :return: -+ ''' ++ """ + ret = {} + if (not name and not pkgs) or (name and pkgs): -+ raise CommandExecutionError('Name or packages must be specified.') ++ raise CommandExecutionError("Name or packages must be specified.") + elif name: + pkgs = [name] + + locks = list_locks() + added = [] + try: -+ pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys()) ++ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys()) + except MinionError as exc: + raise CommandExecutionError(exc) + + for pkg in pkgs: -+ ret[pkg] = {'name': pkg, 'changes': {}, 'result': False, 'comment': ''} ++ ret[pkg] = {"name": pkg, "changes": {}, "result": False, "comment": ""} + if not locks.get(pkg): + added.append(pkg) -+ ret[pkg]['comment'] = 'Package {0} is now being held.'.format(pkg) ++ ret[pkg]["comment"] = "Package {} is now being held.".format(pkg) + else: -+ ret[pkg]['comment'] = 'Package {0} is already set to be held.'.format(pkg) ++ ret[pkg]["comment"] = "Package {} is already set to be held.".format(pkg) + + if added: -+ __zypper__.call('al', *added) ++ __zypper__.call("al", *added) + + return ret + + - def add_lock(packages, **kwargs): # pylint: disable=unused-argument - ''' ++def add_lock(packages, **kwargs): # pylint: disable=unused-argument + """ Add a package lock. Specify packages to lock by exact name. -@@ -1816,6 +1901,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument + +@@ -2140,7 +2213,10 @@ def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument salt '*' pkg.add_lock ,, salt '*' pkg.add_lock pkgs='["foo", "bar"]' - ''' -+ salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use hold() instead.') - locks = list_locks() + """ +- locks = list_locks(root) ++ salt.utils.versions.warn_until( ++ "Sodium", "This function is deprecated. Please use hold() instead." ++ ) ++ locks = list_locks() added = [] try: + packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys()) +@@ -2495,7 +2571,7 @@ def search(criteria, refresh=False, **kwargs): + .getElementsByTagName("solvable") + ) + if not solvables: +- raise CommandExecutionError("No packages found matching '{0}'".format(criteria)) ++ raise CommandExecutionError("No packages found matching '{}'".format(criteria)) + + out = {} + for solvable in solvables: +@@ -2649,13 +2725,13 @@ def download(*packages, **kwargs): + if failed: + pkg_ret[ + "_error" +- ] = "The following package(s) failed to download: {0}".format( ++ ] = "The following package(s) failed to download: {}".format( + ", ".join(failed) + ) + return pkg_ret + + raise CommandExecutionError( +- "Unable to download packages: {0}".format(", ".join(packages)) ++ "Unable to download packages: {}".format(", ".join(packages)) + ) + + +@@ -2726,7 +2802,7 @@ def diff(*paths, **kwargs): + + if pkg_to_paths: + local_pkgs = __salt__["pkg.download"](*pkg_to_paths.keys(), **kwargs) +- for pkg, files in six.iteritems(pkg_to_paths): ++ for pkg, files in pkg_to_paths.items(): + for path in files: + ret[path] = ( + __salt__["lowpkg.diff"](local_pkgs[pkg]["path"], path) -- -2.16.4 +2.29.2 diff --git a/add-migrated-state-and-gpg-key-management-functions-.patch b/add-migrated-state-and-gpg-key-management-functions-.patch index cbcc578..5a81fa5 100644 --- a/add-migrated-state-and-gpg-key-management-functions-.patch +++ b/add-migrated-state-and-gpg-key-management-functions-.patch @@ -1,4 +1,4 @@ -From 5254ec34316a0924edb4856f84e6092fafe479fa Mon Sep 17 00:00:00 2001 +From 57cab2d4e282f8b1d17610e6b4a0e772494bfcb1 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 20 Oct 2020 11:43:09 +0200 Subject: [PATCH] Add "migrated" state and GPG key management functions @@ -20,17 +20,16 @@ same virtual package, based on the counterpart from rpm_lowpkg API. --- changelog/58782.added | 1 + salt/modules/aptpkg.py | 7 +- - salt/modules/rpm_lowpkg.py | 151 ++++++++ + salt/modules/rpm_lowpkg.py | 151 +++++++++ salt/modules/yumpkg.py | 88 +++++ - salt/modules/zypperpkg.py | 90 ++++- - salt/states/pkgrepo.py | 208 ++++++++++ - tests/unit/modules/test_rpm_lowpkg.py | 215 +++++++++++ - tests/unit/modules/test_yumpkg.py | 43 ++- - tests/unit/modules/test_zypperpkg.py | 40 +- - tests/unit/states/test_pkgrepo.py | 527 ++++++++++++++++++++++++++ - 10 files changed, 1363 insertions(+), 7 deletions(-) + salt/modules/zypperpkg.py | 88 +++++ + salt/states/pkgrepo.py | 207 ++++++++++++ + tests/unit/modules/test_rpm_lowpkg.py | 236 ++++++++++++- + tests/unit/modules/test_yumpkg.py | 41 ++- + tests/unit/modules/test_zypperpkg.py | 40 ++- + tests/unit/states/test_pkgrepo.py | 468 +++++++++++++++++++++++++- + 10 files changed, 1301 insertions(+), 26 deletions(-) create mode 100644 changelog/58782.added - create mode 100644 tests/unit/states/test_pkgrepo.py diff --git a/changelog/58782.added b/changelog/58782.added new file mode 100644 @@ -41,43 +40,43 @@ index 0000000000..f9e69f64f2 +Add GPG key functions in "lowpkg" and a "migrated" function in the "pkgrepo" state for repository and GPG key migration. \ No newline at end of file diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 765d69aff2..28b8597ef5 100644 +index e4a9872aad..e001d2f11c 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -1878,7 +1878,7 @@ def _convert_if_int(value): +@@ -1908,7 +1908,7 @@ def _convert_if_int(value): return value -def get_repo_keys(): +def get_repo_keys(**kwargs): - ''' + """ .. versionadded:: 2017.7.0 -@@ -1950,7 +1950,9 @@ def get_repo_keys(): +@@ -1990,7 +1990,9 @@ def get_repo_keys(): return ret --def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv='base'): +-def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base"): +def add_repo_key( -+ path=None, text=None, keyserver=None, keyid=None, saltenv='base', **kwargs ++ path=None, text=None, keyserver=None, keyid=None, saltenv="base", **kwargs +): - ''' + """ .. versionadded:: 2017.7.0 -@@ -1976,7 +1978,6 @@ def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv='base +@@ -2016,7 +2018,6 @@ def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base salt '*' pkg.add_repo_key keyserver='keyserver.example' keyid='0000AAAA' - ''' - cmd = ['apt-key'] + """ + cmd = ["apt-key"] - kwargs = {} current_repo_keys = get_repo_keys() diff --git a/salt/modules/rpm_lowpkg.py b/salt/modules/rpm_lowpkg.py -index c8a87276b2..fee0221a7c 100644 +index 393b0f453a..57f336bacf 100644 --- a/salt/modules/rpm_lowpkg.py +++ b/salt/modules/rpm_lowpkg.py -@@ -823,3 +823,154 @@ def checksum(*paths, **kwargs): - python_shell=False)) +@@ -835,3 +835,154 @@ def checksum(*paths, **kwargs): + ) return ret + @@ -232,12 +231,12 @@ index c8a87276b2..fee0221a7c 100644 + cmd.extend(["-e", key]) + return __salt__["cmd.retcode"](cmd) == 0 diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py -index 04ab240cd4..85a2dbd857 100644 +index c58b3e4c70..dd843f985b 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py -@@ -3271,3 +3271,91 @@ def list_installed_patches(**kwargs): +@@ -3346,3 +3346,91 @@ def list_installed_patches(**kwargs): salt '*' pkg.list_installed_patches - ''' + """ return _get_patches(installed_only=True) + + @@ -328,19 +327,10 @@ index 04ab240cd4..85a2dbd857 100644 + """ + return __salt__["lowpkg.remove_gpg_key"](keyid, root) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index d84a6af6e0..fab7736701 100644 +index d06c265202..5e13c68708 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -1270,7 +1270,7 @@ def mod_repo(repo, **kwargs): - cmd_opt.append("--priority={0}".format(kwargs.get('priority', DEFAULT_PRIORITY))) - - if 'humanname' in kwargs: -- cmd_opt.append("--name='{0}'".format(kwargs.get('humanname'))) -+ cmd_opt.extend(["--name", kwargs.get("humanname")]) - - if kwargs.get('gpgautoimport') is True: - global_cmd_opt.append('--gpg-auto-import-keys') -@@ -2879,3 +2879,91 @@ def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs): +@@ -3004,3 +3004,91 @@ def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs): else: ret.append(name) return ret @@ -433,27 +423,19 @@ index d84a6af6e0..fab7736701 100644 + """ + return __salt__["lowpkg.remove_gpg_key"](keyid, root) diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py -index c39e857580..6c42d17d32 100644 +index 70cb7a1c7e..d734bb9de9 100644 --- a/salt/states/pkgrepo.py +++ b/salt/states/pkgrepo.py -@@ -84,6 +84,7 @@ package managers are APT, DNF, YUM and Zypper. Here is some example SLS: +@@ -93,6 +93,7 @@ package managers are APT, DNF, YUM and Zypper. Here is some example SLS: + """ + - # Import Python libs - from __future__ import absolute_import, print_function, unicode_literals +import os import sys - # Import salt libs -@@ -96,6 +97,7 @@ import salt.utils.pkg.rpm - - # Import 3rd-party libs - from salt.ext import six -+import salt.utils.versions - - - def __virtual__(): -@@ -643,3 +645,209 @@ def absent(name, **kwargs): - ret['comment'] = 'Failed to remove repo {0}'.format(name) + import salt.utils.data +@@ -679,3 +680,209 @@ def absent(name, **kwargs): + ret["comment"] = "Failed to remove repo {}".format(name) return ret + @@ -663,21 +645,49 @@ index c39e857580..6c42d17d32 100644 + + return ret diff --git a/tests/unit/modules/test_rpm_lowpkg.py b/tests/unit/modules/test_rpm_lowpkg.py -index b6cbd9e5cb..ff3678fde5 100644 +index ec9ecd40cb..84020263ea 100644 --- a/tests/unit/modules/test_rpm_lowpkg.py +++ b/tests/unit/modules/test_rpm_lowpkg.py -@@ -5,6 +5,7 @@ +@@ -2,6 +2,7 @@ + :codeauthor: Jayesh Kariya + """ - # Import Python Libs - from __future__ import absolute_import +import datetime - # Import Salt Testing Libs + import salt.modules.rpm_lowpkg as rpm from tests.support.mixins import LoaderModuleMockMixin -@@ -205,3 +206,217 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin): - with patch('salt.modules.rpm_lowpkg.rpm.labelCompare', MagicMock(return_value=0)), \ - patch('salt.modules.rpm_lowpkg.HAS_RPM', False): - self.assertEqual(-1, rpm.version_cmp('1', '2')) # mock returns -1, a python implementation was called +@@ -15,8 +16,8 @@ def _called_with_root(mock): + + + def _called_with_root(mock): +- cmd = ' '.join(mock.call_args[0][0]) +- return cmd.startswith('rpm --root /') ++ cmd = " ".join(mock.call_args[0][0]) ++ return cmd.startswith("rpm --root /") + + + class RpmTestCase(TestCase, LoaderModuleMockMixin): +@@ -263,14 +264,223 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin): + + :return: + """ +- self.assertEqual(-1, rpm.version_cmp("1", "2")) +- self.assertEqual(mock_version_cmp.called, True) +- self.assertEqual(mock_log.warning.called, True) +- self.assertEqual( +- mock_log.warning.mock_calls[0][1][0], +- "Please install a package that provides rpm.labelCompare for more accurate version comparisons.", +- ) +- self.assertEqual( +- mock_log.warning.mock_calls[1][1][0], +- "Falling back on salt.utils.versions.version_cmp() for version comparisons", +- ) ++ with patch( ++ "salt.modules.rpm_lowpkg.rpm.labelCompare", MagicMock(return_value=0) ++ ), patch("salt.modules.rpm_lowpkg.HAS_RPM", False): ++ self.assertEqual( ++ -1, rpm.version_cmp("1", "2") ++ ) # mock returns -1, a python implementation was called + + def test_list_gpg_keys_no_info(self): + """ @@ -893,39 +903,31 @@ index b6cbd9e5cb..ff3678fde5 100644 + self.assertTrue(rpm.remove_gpg_key("gpg-pubkey-1")) + self.assertFalse(_called_with_root(mock)) diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py -index 9fbe3d051e..dfe00a7181 100644 +index 4784160d25..e65a1f8b8b 100644 --- a/tests/unit/modules/test_yumpkg.py +++ b/tests/unit/modules/test_yumpkg.py -@@ -10,15 +10,17 @@ from tests.support.unit import TestCase, skipIf - from tests.support.mock import ( - Mock, - MagicMock, -+ mock_open, - patch, - ) - - # Import Salt libs +@@ -5,9 +5,9 @@ import salt.modules.pkg_resource as pkg_resource + import salt.modules.rpm_lowpkg as rpm + import salt.modules.yumpkg as yumpkg + import salt.utils.platform -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, SaltInvocationError - import salt.modules.rpm_lowpkg as rpm - from salt.ext import six - import salt.modules.yumpkg as yumpkg - import salt.modules.pkg_resource as pkg_resource -+import salt.utils.platform + from tests.support.mixins import LoaderModuleMockMixin +-from tests.support.mock import MagicMock, Mock, patch ++from tests.support.mock import MagicMock, Mock, mock_open, patch + from tests.support.unit import TestCase, skipIf try: - import pytest -@@ -799,8 +801,45 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): - with pytest.raises(CommandExecutionError): - yumpkg._get_yum_config() +@@ -1630,6 +1630,43 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): + ret = yumpkg.get_repo(repo, **kwargs) + assert ret == expected, ret + def test_get_repo_keys(self): + salt_mock = {"lowpkg.list_gpg_keys": MagicMock(return_value=True)} + with patch.dict(yumpkg.__salt__, salt_mock): + self.assertTrue(yumpkg.get_repo_keys(info=True, root="/mnt")) + salt_mock["lowpkg.list_gpg_keys"].assert_called_once_with(True, "/mnt") - --@skipIf(pytest is None, 'PyTest is missing') ++ + def test_add_repo_key_fail(self): + with self.assertRaises(SaltInvocationError): + yumpkg.add_repo_key() @@ -957,25 +959,23 @@ index 9fbe3d051e..dfe00a7181 100644 + self.assertTrue(yumpkg.del_repo_key(keyid="keyid", root="/mnt")) + salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") + -+ -+@skipIf(pytest is None, "PyTest is missing") + + @skipIf(pytest is None, "PyTest is missing") class YumUtilsTestCase(TestCase, LoaderModuleMockMixin): - ''' - Yum/Dnf utils tests. diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 8cc84485b5..1f2a7dc4b2 100644 +index eaa4d9a76a..018c1ffbca 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -22,7 +22,7 @@ from tests.support.mock import ( - import salt.utils.files +@@ -10,7 +10,7 @@ import salt.modules.pkg_resource as pkg_resource import salt.modules.zypperpkg as zypper - import salt.modules.pkg_resource as pkg_resource + import salt.utils.files + import salt.utils.pkg -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, SaltInvocationError - - # Import 3rd-party libs + from salt.ext import six from salt.ext.six.moves import configparser -@@ -1728,3 +1728,41 @@ pattern() = package-c""" + from tests.support.mixins import LoaderModuleMockMixin +@@ -2175,3 +2175,41 @@ pattern() = package-c""" python_shell=False, env={"ZYPP_READONLY_HACK": "1"}, ) @@ -1018,79 +1018,33 @@ index 8cc84485b5..1f2a7dc4b2 100644 + self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt")) + salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") diff --git a/tests/unit/states/test_pkgrepo.py b/tests/unit/states/test_pkgrepo.py -new file mode 100644 -index 0000000000..9d8d88abd9 ---- /dev/null +index b2be5b4da1..135e545220 100644 +--- a/tests/unit/states/test_pkgrepo.py +++ b/tests/unit/states/test_pkgrepo.py -@@ -0,0 +1,527 @@ -+""" -+ :codeauthor: Tyler Johnson -+""" -+import salt.states.pkgrepo as pkgrepo +@@ -1,17 +1,12 @@ +-# -*- coding: utf-8 -*- + """ + :codeauthor: Tyler Johnson + """ +-# Import Python libs +-from __future__ import absolute_import + +-# Import Salt Libs + import salt.states.pkgrepo as pkgrepo +- +-# Import Salt Testing Libs +import salt.utils.platform -+from tests.support.mixins import LoaderModuleMockMixin -+from tests.support.mock import MagicMock, patch + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, patch +-from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf -+ -+ -+class PkgrepoTestCase(TestCase, LoaderModuleMockMixin): -+ """ -+ Test cases for salt.states.pkgrepo -+ """ -+ -+ def setup_loader_modules(self): -+ return { -+ pkgrepo: { -+ "__opts__": {"test": True}, -+ "__grains__": {"os": "", "os_family": ""}, -+ } -+ } -+ -+ def test_new_key_url(self): -+ """ -+ Test when only the key_url is changed that a change is triggered -+ """ -+ kwargs = { -+ "name": "deb http://mock/ sid main", -+ "disabled": False, -+ } -+ key_url = "http://mock/changed_gpg.key" -+ -+ with patch.dict( -+ pkgrepo.__salt__, {"pkg.get_repo": MagicMock(return_value=kwargs)} -+ ): -+ ret = pkgrepo.managed(key_url=key_url, **kwargs) -+ self.assertDictEqual( -+ {"key_url": {"old": None, "new": key_url}}, ret["changes"] -+ ) -+ -+ def test_update_key_url(self): -+ """ -+ Test when only the key_url is changed that a change is triggered -+ """ -+ kwargs = { -+ "name": "deb http://mock/ sid main", -+ "gpgcheck": 1, -+ "disabled": False, -+ "key_url": "http://mock/gpg.key", -+ } -+ changed_kwargs = kwargs.copy() -+ changed_kwargs["key_url"] = "http://mock/gpg2.key" -+ -+ with patch.dict( -+ pkgrepo.__salt__, {"pkg.get_repo": MagicMock(return_value=kwargs)} -+ ): -+ ret = pkgrepo.managed(**changed_kwargs) -+ self.assertIn("key_url", ret["changes"], "Expected a change to key_url") -+ self.assertDictEqual( -+ { -+ "key_url": { -+ "old": kwargs["key_url"], -+ "new": changed_kwargs["key_url"], -+ } -+ }, -+ ret["changes"], -+ ) + + + class PkgrepoTestCase(TestCase, LoaderModuleMockMixin): +@@ -72,3 +67,462 @@ class PkgrepoTestCase(TestCase, LoaderModuleMockMixin): + }, + ret["changes"], + ) + + def test__normalize_repo_suse(self): + repo = { @@ -1551,6 +1505,6 @@ index 0000000000..9d8d88abd9 + }, + ) -- -2.29.1 +2.29.2 diff --git a/add-missing-_utils-at-loader-grains_func.patch b/add-missing-_utils-at-loader-grains_func.patch deleted file mode 100644 index f422eea..0000000 --- a/add-missing-_utils-at-loader-grains_func.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 082fa07e5301414b5b834b731aaa96bd5d966de7 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Tue, 10 Mar 2020 13:16:05 +0000 -Subject: [PATCH] Add missing _utils at loader grains_func - ---- - salt/loader.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/salt/loader.py b/salt/loader.py -index c68562988d..742b2f8e22 100644 ---- a/salt/loader.py -+++ b/salt/loader.py -@@ -683,6 +683,7 @@ def grain_funcs(opts, proxy=None): - __opts__ = salt.config.minion_config('/etc/salt/minion') - grainfuncs = salt.loader.grain_funcs(__opts__) - ''' -+ _utils = utils(opts) - ret = LazyLoader( - _module_dirs( - opts, --- -2.23.0 - - diff --git a/add-missing-fun-for-returns-from-wfunc-executions.patch b/add-missing-fun-for-returns-from-wfunc-executions.patch deleted file mode 100644 index 585f69a..0000000 --- a/add-missing-fun-for-returns-from-wfunc-executions.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 5c25babafd4e4bbe55626713851ea5d6345c43d1 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Wed, 9 Oct 2019 13:03:33 +0100 -Subject: [PATCH] Add missing 'fun' for returns from wfunc executions - ---- - salt/client/ssh/__init__.py | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py -index 4881540837..1373274739 100644 ---- a/salt/client/ssh/__init__.py -+++ b/salt/client/ssh/__init__.py -@@ -682,6 +682,8 @@ class SSH(object): - data = {'return': data} - if 'id' not in data: - data['id'] = id_ -+ if 'fun' not in data: -+ data['fun'] = fun - data['jid'] = jid # make the jid in the payload the same as the jid in the tag - self.event.fire_event( - data, -@@ -797,6 +799,8 @@ class SSH(object): - data = {'return': data} - if 'id' not in data: - data['id'] = id_ -+ if 'fun' not in data: -+ data['fun'] = fun - data['jid'] = jid # make the jid in the payload the same as the jid in the tag - self.event.fire_event( - data, --- -2.16.4 - - diff --git a/add-multi-file-support-and-globbing-to-the-filetree-.patch b/add-multi-file-support-and-globbing-to-the-filetree-.patch index 04c883b..dae5602 100644 --- a/add-multi-file-support-and-globbing-to-the-filetree-.patch +++ b/add-multi-file-support-and-globbing-to-the-filetree-.patch @@ -1,4 +1,4 @@ -From 0a6b5e92a4a74dee94eb33a939600f8c2e429c01 Mon Sep 17 00:00:00 2001 +From c5e5dc304e897f8c1664cce29fe9ee63d84f3ae6 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Fri, 12 Oct 2018 16:20:40 +0200 Subject: [PATCH] Add multi-file support and globbing to the filetree @@ -12,37 +12,37 @@ Collect system logs and boot logs Support globbing in filetree --- - salt/cli/support/intfunc.py | 49 +++++++++++++++++++++-------------- - salt/cli/support/profiles/default.yml | 7 +++++ + salt/cli/support/intfunc.py | 49 ++++++++++++++++----------- + salt/cli/support/profiles/default.yml | 7 ++++ 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py -index 2727cd6394..f15f4d4097 100644 +index d3d8f83cb8..a9f76a6003 100644 --- a/salt/cli/support/intfunc.py +++ b/salt/cli/support/intfunc.py -@@ -6,6 +6,7 @@ Internal functions. +@@ -3,6 +3,7 @@ Internal functions. + """ + # Maybe this needs to be a modules in a future? - from __future__ import absolute_import, print_function, unicode_literals - import os +import glob - from salt.cli.support.console import MessagesOutput - import salt.utils.files + import os -@@ -13,7 +14,7 @@ import salt.utils.files + import salt.utils.files +@@ -11,7 +12,7 @@ from salt.cli.support.console import MessagesOutput out = MessagesOutput() -def filetree(collector, path): +def filetree(collector, *paths): - ''' + """ Add all files in the tree. If the "path" is a file, only that file will be added. -@@ -21,22 +22,32 @@ def filetree(collector, path): +@@ -19,22 +20,32 @@ def filetree(collector, path): :param path: File or directory :return: - ''' + """ - if not path: -- out.error('Path not defined', ident=2) +- out.error("Path not defined", ident=2) - else: - # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. - # pylint: disable=W8470 @@ -50,7 +50,7 @@ index 2727cd6394..f15f4d4097 100644 - filename = os.path.basename(path) - try: - file_ref = salt.utils.files.fopen(path) # pylint: disable=W -- out.put('Add {}'.format(filename), indent=2) +- out.put("Add {}".format(filename), indent=2) - collector.add(filename) - collector.link(title=path, path=file_ref) - except Exception as err: @@ -62,9 +62,9 @@ index 2727cd6394..f15f4d4097 100644 + _paths += glob.glob(path) + for path in set(_paths): + if not path: -+ out.error('Path not defined', ident=2) ++ out.error("Path not defined", ident=2) + elif not os.path.exists(path): -+ out.warning('Path {} does not exists'.format(path)) ++ out.warning("Path {} does not exists".format(path)) else: - for fname in os.listdir(path): - fname = os.path.join(path, fname) @@ -75,7 +75,7 @@ index 2727cd6394..f15f4d4097 100644 + filename = os.path.basename(path) + try: + file_ref = salt.utils.files.fopen(path) # pylint: disable=W -+ out.put('Add {}'.format(filename), indent=2) ++ out.put("Add {}".format(filename), indent=2) + collector.add(filename) + collector.link(title=path, path=file_ref) + except Exception as err: @@ -111,6 +111,6 @@ index 01d9a26193..3defb5eef3 100644 + - /var/log/messages -- -2.16.4 +2.29.2 diff --git a/add-new-custom-suse-capability-for-saltutil-state-mo.patch b/add-new-custom-suse-capability-for-saltutil-state-mo.patch index 1428332..537fcf9 100644 --- a/add-new-custom-suse-capability-for-saltutil-state-mo.patch +++ b/add-new-custom-suse-capability-for-saltutil-state-mo.patch @@ -1,4 +1,4 @@ -From ad1323b4f83fa8f2954c0a965f4acaf91575a59b Mon Sep 17 00:00:00 2001 +From 70d13dcc62286d5195bbf28b53aae61616cc0f8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 26 Mar 2020 13:08:16 +0000 @@ -10,10 +10,10 @@ Subject: [PATCH] Add new custom SUSE capability for saltutil state 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/grains/extra.py b/salt/grains/extra.py -index 1082b05dba7830ee53078cff86b5183b5eea2829..b30ab0091fee7cda8f74b861e9e9f95f8ad85b39 100644 +index ddc22293ea..0eec27e628 100644 --- a/salt/grains/extra.py +++ b/salt/grains/extra.py -@@ -80,5 +80,6 @@ def config(): +@@ -71,5 +71,6 @@ def config(): def suse_backported_capabilities(): return { '__suse_reserved_pkg_all_versions_support': True, @@ -22,6 +22,6 @@ index 1082b05dba7830ee53078cff86b5183b5eea2829..b30ab0091fee7cda8f74b861e9e9f95f + '__suse_reserved_saltutil_states_support': True } -- -2.23.0 +2.29.2 diff --git a/add-publish_batch-to-clearfuncs-exposed-methods.patch b/add-publish_batch-to-clearfuncs-exposed-methods.patch index b918dd0..a572ef1 100644 --- a/add-publish_batch-to-clearfuncs-exposed-methods.patch +++ b/add-publish_batch-to-clearfuncs-exposed-methods.patch @@ -1,27 +1,26 @@ -From da936daeebd701e147707ad814c07bfc259d4be4 Mon Sep 17 00:00:00 2001 +From 2422d30358bcd0f96e399e623136f7984d136b38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 28 May 2020 09:37:08 +0100 Subject: [PATCH] Add publish_batch to ClearFuncs exposed methods --- - salt/master.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) + salt/master.py | 1 + + 1 file changed, 1 insertion(+) diff --git a/salt/master.py b/salt/master.py -index 485c16029b12fc38fc88b54aba95f03aa95d14ee..7d7a094a1a212180bfb294df3ad8b38477981450 100644 +index ab85c7f5c6..59bb19ce75 100644 --- a/salt/master.py +++ b/salt/master.py -@@ -1906,7 +1906,7 @@ class ClearFuncs(TransportMethods): - # These methods will be exposed to the transport layer by - # MWorker._handle_clear +@@ -2042,6 +2042,7 @@ class ClearFuncs(TransportMethods): expose_methods = ( -- 'ping', 'publish', 'get_token', 'mk_token', 'wheel', 'runner', -+ 'ping', 'publish', 'publish_batch', 'get_token', 'mk_token', 'wheel', 'runner', - ) - - # The ClearFuncs object encapsulates the functions that can be executed in + "ping", + "publish", ++ "publish_batch", + "get_token", + "mk_token", + "wheel", -- -2.23.0 +2.29.2 diff --git a/add-saltssh-multi-version-support-across-python-inte.patch b/add-saltssh-multi-version-support-across-python-inte.patch index c09d327..19c92ce 100644 --- a/add-saltssh-multi-version-support-across-python-inte.patch +++ b/add-saltssh-multi-version-support-across-python-inte.patch @@ -1,4 +1,4 @@ -From 369567107fa18187f8cbc5040728037d0774287b Mon Sep 17 00:00:00 2001 +From 99aa26e7ab4840cf38f54e7692d7d1eede3adeb4 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Mar 2018 12:01:39 +0100 Subject: [PATCH] Add SaltSSH multi-version support across Python @@ -254,10 +254,9 @@ Lintfix Set master_top_first to False by default --- - doc/topics/releases/fluorine.rst | 178 +++++++++++++++++++++++++++++++++++++++ - salt/client/ssh/ssh_py_shim.py | 4 + - salt/utils/thin.py | 1 + - 3 files changed, 183 insertions(+) + doc/topics/releases/fluorine.rst | 178 +++++++++++++++++++++++++++++++ + salt/client/ssh/ssh_py_shim.py | 3 + + 2 files changed, 181 insertions(+) create mode 100644 doc/topics/releases/fluorine.rst diff --git a/doc/topics/releases/fluorine.rst b/doc/topics/releases/fluorine.rst @@ -445,39 +444,20 @@ index 0000000000..40c69e25cc +Salt version is also available on the Master machine, although does not need to be directly +installed together with the older Python interpreter. diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py -index cd7549a178..95b3931a32 100644 +index c0ce0fd7de..5ddd282ed0 100644 --- a/salt/client/ssh/ssh_py_shim.py +++ b/salt/client/ssh/ssh_py_shim.py -@@ -165,6 +165,9 @@ def unpack_thin(thin_path): +@@ -171,6 +171,9 @@ def unpack_thin(thin_path): old_umask = os.umask(0o077) # pylint: disable=blacklisted-function tfile.extractall(path=OPTIONS.saltdir) tfile.close() + checksum_path = os.path.normpath(os.path.join(OPTIONS.saltdir, "thin_checksum")) -+ with open(checksum_path, 'w') as chk: -+ chk.write(OPTIONS.checksum + '\n') ++ with open(checksum_path, "w") as chk: ++ chk.write(OPTIONS.checksum + "\n") os.umask(old_umask) # pylint: disable=blacklisted-function try: os.unlink(thin_path) -@@ -358,5 +361,6 @@ def main(argv): # pylint: disable=W0613 - return retcode - - -+ - if __name__ == '__main__': - sys.exit(main(sys.argv)) -diff --git a/salt/utils/thin.py b/salt/utils/thin.py -index 8496db9569..0ff31cef39 100644 ---- a/salt/utils/thin.py -+++ b/salt/utils/thin.py -@@ -9,6 +9,7 @@ from __future__ import absolute_import, print_function, unicode_literals - import copy - import logging - import os -+import copy - import shutil - import subprocess - import sys -- -2.16.4 +2.29.2 diff --git a/add-standalone-configuration-file-for-enabling-packa.patch b/add-standalone-configuration-file-for-enabling-packa.patch index a6634a5..5eb1546 100644 --- a/add-standalone-configuration-file-for-enabling-packa.patch +++ b/add-standalone-configuration-file-for-enabling-packa.patch @@ -1,9 +1,9 @@ -From 717c9bc6cb81994c5f23de87cfa91112fa7bf89c Mon Sep 17 00:00:00 2001 +From 8ad65d6fa39edc7fc1967e2df1f3db0aa7df4d11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 22 May 2019 13:00:46 +0100 -Subject: [PATCH] Add standalone configuration file for enabling package - formulas +Subject: [PATCH] Add standalone configuration file for enabling + package formulas --- conf/suse/standalone-formulas-configuration.conf | 4 ++++ @@ -21,6 +21,6 @@ index 0000000000..94d05fb2ee + - /usr/share/salt-formulas/states + - /srv/salt -- -2.16.4 +2.29.2 diff --git a/add-supportconfig-module-for-remote-calls-and-saltss.patch b/add-supportconfig-module-for-remote-calls-and-saltss.patch index 45e7eb0..d03d6fe 100644 --- a/add-supportconfig-module-for-remote-calls-and-saltss.patch +++ b/add-supportconfig-module-for-remote-calls-and-saltss.patch @@ -1,4 +1,4 @@ -From f4388ef82b5053e9996272b182c29a2da21a6258 Mon Sep 17 00:00:00 2001 +From 9fba801c1e1e6136808dca80ccd7524ed483250e Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Fri, 19 Oct 2018 15:44:47 +0200 Subject: [PATCH] Add supportconfig module for remote calls and SaltSSH @@ -195,70 +195,98 @@ Remove unused import Check last function by full name --- + doc/ref/modules/all/index.rst | 1 + + doc/ref/states/all/index.rst | 1 + salt/cli/support/__init__.py | 2 +- - salt/cli/support/collector.py | 12 +- + salt/cli/support/collector.py | 14 +- salt/loader.py | 6 +- - salt/modules/saltsupport.py | 381 +++++++++++++++++++++++++++++++ - salt/state.py | 34 ++- - salt/states/saltsupport.py | 206 +++++++++++++++++ - salt/utils/args.py | 6 +- - salt/utils/decorators/__init__.py | 24 ++ - tests/unit/modules/test_saltsupport.py | 394 +++++++++++++++++++++++++++++++++ - 9 files changed, 1044 insertions(+), 21 deletions(-) + salt/modules/saltsupport.py | 405 ++++++++++++++++++++ + salt/state.py | 38 +- + salt/states/saltsupport.py | 225 +++++++++++ + salt/utils/args.py | 23 +- + salt/utils/decorators/__init__.py | 68 ++-- + tests/unit/modules/test_saltsupport.py | 496 +++++++++++++++++++++++++ + 11 files changed, 1220 insertions(+), 59 deletions(-) create mode 100644 salt/modules/saltsupport.py create mode 100644 salt/states/saltsupport.py create mode 100644 tests/unit/modules/test_saltsupport.py +diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst +index 4c93972276..9fea7af07f 100644 +--- a/doc/ref/modules/all/index.rst ++++ b/doc/ref/modules/all/index.rst +@@ -415,6 +415,7 @@ execution modules + salt_version + saltcheck + saltcloudmod ++ saltsupport + saltutil + schedule + scp_mod +diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst +index 2664b4ce45..052efe4582 100644 +--- a/doc/ref/states/all/index.rst ++++ b/doc/ref/states/all/index.rst +@@ -281,6 +281,7 @@ state modules + rvm + salt_proxy + saltmod ++ saltsupport + saltutil + schedule + selinux diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py -index 6a98a2d656..0a48b0a081 100644 +index 4fdf44186f..59c2609e07 100644 --- a/salt/cli/support/__init__.py +++ b/salt/cli/support/__init__.py -@@ -40,7 +40,7 @@ def get_profile(profile, caller, runner): +@@ -47,7 +47,7 @@ def get_profile(profile, caller, runner): if os.path.exists(profile_path): try: rendered_template = _render_profile(profile_path, caller, runner) -- log.trace('\n{d}\n{t}\n{d}\n'.format(d='-' * 80, t=rendered_template)) -+ log.debug('\n{d}\n{t}\n{d}\n'.format(d='-' * 80, t=rendered_template)) +- log.trace("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template)) ++ log.debug("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template)) data.update(yaml.load(rendered_template)) except Exception as ex: log.debug(ex, exc_info=True) diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py -index a4343297b6..cbae189aea 100644 +index a08a0b8c6e..1879cc5220 100644 --- a/salt/cli/support/collector.py +++ b/salt/cli/support/collector.py -@@ -354,7 +354,7 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): +@@ -362,7 +362,7 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): return data - def collect_local_data(self): + def collect_local_data(self, profile=None, profile_source=None): - ''' + """ Collects master system data. :return: -@@ -375,7 +375,7 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): - ''' - return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs})) +@@ -388,8 +388,8 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): + self._local_run({"fun": func, "arg": args, "kwarg": kwargs}) + ) -- scenario = salt.cli.support.get_profile(self.config['support_profile'], call, run) -+ scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run) +- scenario = salt.cli.support.get_profile( +- self.config["support_profile"], call, run ++ scenario = profile_source or salt.cli.support.get_profile( ++ profile or self.config["support_profile"], call, run + ) for category_name in scenario: self.out.put(category_name) - self.collector.add(category_name) -@@ -415,13 +415,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): +@@ -441,13 +441,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): - return action_name.split(':')[0] or None + return action_name.split(":")[0] or None - def collect_targets_data(self): -- ''' +- """ - Collects minion targets data - :return: -- ''' +- """ - # TODO: remote collector? - def _cleanup(self): - ''' + """ Cleanup if crash/exception -@@ -511,7 +504,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): +@@ -551,7 +544,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): self.collector.open() self.collect_local_data() self.collect_internal_data() @@ -267,16 +295,16 @@ index a4343297b6..cbae189aea 100644 archive_path = self.collector.archive_path diff --git a/salt/loader.py b/salt/loader.py -index 428fb338c9..860162b791 100644 +index 8232ed632e..1ee40712e5 100644 --- a/salt/loader.py +++ b/salt/loader.py -@@ -1727,8 +1727,10 @@ class LazyLoader(salt.utils.lazy.LazyDict): - )) +@@ -1843,8 +1843,10 @@ class LazyLoader(salt.utils.lazy.LazyDict): + } - for attr in getattr(mod, '__load__', dir(mod)): -- if attr.startswith('_'): + for attr in getattr(mod, "__load__", dir(mod)): +- if attr.startswith("_"): - # private functions are skipped -+ if attr.startswith('_') and attr != '__call__': ++ if attr.startswith("_") and attr != "__call__": + # private functions are skipped, + # except __call__ which is default entrance + # for multi-function batch-like state syntax @@ -285,11 +313,10 @@ index 428fb338c9..860162b791 100644 if not inspect.isfunction(func) and not isinstance(func, functools.partial): diff --git a/salt/modules/saltsupport.py b/salt/modules/saltsupport.py new file mode 100644 -index 0000000000..750b2655d6 +index 0000000000..e800e3bf1f --- /dev/null +++ b/salt/modules/saltsupport.py -@@ -0,0 +1,381 @@ -+# -*- coding: utf-8 -*- +@@ -0,0 +1,405 @@ +# +# Author: Bo Maryniuk +# @@ -305,50 +332,55 @@ index 0000000000..750b2655d6 +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -+''' ++""" +:codeauthor: :email:`Bo Maryniuk ` + +Module to run salt-support within Salt. -+''' ++""" +# pylint: disable=W0231,W0221 + -+from __future__ import unicode_literals, print_function, absolute_import + -+import tempfile -+import re -+import os -+import sys -+import time +import datetime +import logging ++import os ++import re ++import sys ++import tempfile ++import time + -+import salt.cli.support.intfunc -+import salt.utils.decorators -+import salt.utils.path +import salt.cli.support -+import salt.exceptions -+import salt.utils.stringutils ++import salt.cli.support.intfunc +import salt.defaults.exitcodes -+import salt.utils.odict ++import salt.exceptions ++import salt.utils.decorators +import salt.utils.dictupdate -+ ++import salt.utils.odict ++import salt.utils.path ++import salt.utils.stringutils +from salt.cli.support.collector import SaltSupport, SupportDataCollector + -+__virtualname__ = 'support' ++__virtualname__ = "support" +log = logging.getLogger(__name__) + + -+class LogCollector(object): -+ ''' ++class LogCollector: ++ """ + Output collector. -+ ''' -+ INFO = 'info' -+ WARNING = 'warning' -+ ERROR = 'error' ++ """ ++ ++ INFO = "info" ++ WARNING = "warning" ++ ERROR = "error" + + class MessagesList(list): + def append(self, obj): -+ list.append(self, '{} - {}'.format(datetime.datetime.utcnow().strftime('%T.%f')[:-3], obj)) ++ list.append( ++ self, ++ "{} - {}".format( ++ datetime.datetime.utcnow().strftime("%T.%f")[:-3], obj ++ ), ++ ) ++ + __call__ = append + + def __init__(self): @@ -359,9 +391,9 @@ index 0000000000..750b2655d6 + } + + def msg(self, message, *args, **kwargs): -+ title = kwargs.get('title') ++ title = kwargs.get("title") + if title: -+ message = '{}: {}'.format(title, message) ++ message = "{}: {}".format(title, message) + self.messages[self.INFO](message) + + def info(self, message, *args, **kwargs): @@ -381,64 +413,69 @@ index 0000000000..750b2655d6 + + +class SaltSupportModule(SaltSupport): -+ ''' ++ """ + Salt Support module class. -+ ''' ++ """ ++ + def __init__(self): -+ ''' ++ """ + Constructor -+ ''' ++ """ + self.config = self.setup_config() + + def setup_config(self): -+ ''' ++ """ + Return current configuration + :return: -+ ''' ++ """ + return __opts__ + + def _get_archive_name(self, archname=None): -+ ''' ++ """ + Create default archive name. + + :return: -+ ''' -+ archname = re.sub('[^a-z0-9]', '', (archname or '').lower()) or 'support' -+ for grain in ['fqdn', 'host', 'localhost', 'nodename']: ++ """ ++ archname = re.sub("[^a-z0-9]", "", (archname or "").lower()) or "support" ++ for grain in ["fqdn", "host", "localhost", "nodename"]: + host = __grains__.get(grain) + if host: + break + if not host: -+ host = 'localhost' ++ host = "localhost" + -+ return os.path.join(tempfile.gettempdir(), -+ '{hostname}-{archname}-{date}-{time}.bz2'.format(archname=archname, -+ hostname=host, -+ date=time.strftime('%Y%m%d'), -+ time=time.strftime('%H%M%S'))) ++ return os.path.join( ++ tempfile.gettempdir(), ++ "{hostname}-{archname}-{date}-{time}.bz2".format( ++ archname=archname, ++ hostname=host, ++ date=time.strftime("%Y%m%d"), ++ time=time.strftime("%H%M%S"), ++ ), ++ ) + + @salt.utils.decorators.external + def profiles(self): -+ ''' ++ """ + Get list of profiles. + + :return: -+ ''' ++ """ + return { -+ 'standard': salt.cli.support.get_profiles(self.config), -+ 'custom': [], ++ "standard": salt.cli.support.get_profiles(self.config), ++ "custom": [], + } + + @salt.utils.decorators.external + def archives(self): -+ ''' ++ """ + Get list of existing archives. + :return: -+ ''' ++ """ + arc_files = [] + tmpdir = tempfile.gettempdir() + for filename in os.listdir(tmpdir): -+ mtc = re.match(r'\w+-\w+-\d+-\d+\.bz2', filename) ++ mtc = re.match(r"\w+-\w+-\d+-\d+\.bz2", filename) + if mtc and len(filename) == mtc.span()[-1]: + arc_files.append(os.path.join(tmpdir, filename)) + @@ -446,29 +483,29 @@ index 0000000000..750b2655d6 + + @salt.utils.decorators.external + def last_archive(self): -+ ''' ++ """ + Get the last available archive + :return: -+ ''' ++ """ + archives = {} + for archive in self.archives(): -+ archives[int(archive.split('.')[0].split('-')[-1])] = archive ++ archives[int(archive.split(".")[0].split("-")[-1])] = archive + + return archives and archives[max(archives)] or None + + @salt.utils.decorators.external + def delete_archives(self, *archives): -+ ''' ++ """ + Delete archives + :return: -+ ''' ++ """ + # Remove paths + _archives = [] + for archive in archives: + _archives.append(os.path.basename(archive)) + archives = _archives[:] + -+ ret = {'files': {}, 'errors': {}} ++ ret = {"files": {}, "errors": {}} + for archive in self.archives(): + arc_dir = os.path.dirname(archive) + archive = os.path.basename(archive) @@ -476,43 +513,43 @@ index 0000000000..750b2655d6 + archive = os.path.join(arc_dir, archive) + try: + os.unlink(archive) -+ ret['files'][archive] = 'removed' ++ ret["files"][archive] = "removed" + except Exception as err: -+ ret['errors'][archive] = str(err) -+ ret['files'][archive] = 'left' ++ ret["errors"][archive] = str(err) ++ ret["files"][archive] = "left" + + return ret + + def format_sync_stats(self, cnt): -+ ''' ++ """ + Format stats of the sync output. + + :param cnt: + :return: -+ ''' ++ """ + stats = salt.utils.odict.OrderedDict() -+ if cnt.get('retcode') == salt.defaults.exitcodes.EX_OK: -+ for line in cnt.get('stdout', '').split(os.linesep): -+ line = line.split(': ') ++ if cnt.get("retcode") == salt.defaults.exitcodes.EX_OK: ++ for line in cnt.get("stdout", "").split(os.linesep): ++ line = line.split(": ") + if len(line) == 2: -+ stats[line[0].lower().replace(' ', '_')] = line[1] -+ cnt['transfer'] = stats -+ del cnt['stdout'] ++ stats[line[0].lower().replace(" ", "_")] = line[1] ++ cnt["transfer"] = stats ++ del cnt["stdout"] + + # Remove empty + empty_sections = [] + for section in cnt: -+ if not cnt[section] and section != 'retcode': ++ if not cnt[section] and section != "retcode": + empty_sections.append(section) + for section in empty_sections: + del cnt[section] + + return cnt + -+ @salt.utils.decorators.depends('rsync') ++ @salt.utils.decorators.depends("rsync") + @salt.utils.decorators.external + def sync(self, group, name=None, host=None, location=None, move=False, all=False): -+ ''' ++ """ + Sync the latest archive to the host on given location. + + CLI Example: @@ -532,7 +569,7 @@ index 0000000000..750b2655d6 + :param all: work with all available archives. Default is False (i.e. latest available) + + :return: -+ ''' ++ """ + tfh, tfn = tempfile.mkstemp() + processed_archives = [] + src_uri = uri = None @@ -550,7 +587,7 @@ index 0000000000..750b2655d6 + for name in archives: + err = None + if not name: -+ err = 'No support archive has been defined.' ++ err = "No support archive has been defined." + elif not os.path.exists(name): + err = 'Support archive "{}" was not found'.format(name) + if err is not None: @@ -559,38 +596,46 @@ index 0000000000..750b2655d6 + + if not uri: + src_uri = os.path.dirname(name) -+ uri = '{host}:{loc}'.format(host=host or __opts__['master'], -+ loc=os.path.join(location or tempfile.gettempdir(), group)) ++ uri = "{host}:{loc}".format( ++ host=host or __opts__["master"], ++ loc=os.path.join(location or tempfile.gettempdir(), group), ++ ) + + os.write(tfh, salt.utils.stringutils.to_bytes(os.path.basename(name))) + os.write(tfh, salt.utils.stringutils.to_bytes(os.linesep)) + processed_archives.append(name) -+ log.debug('Syncing {filename} to {uri}'.format(filename=name, uri=uri)) ++ log.debug("Syncing {filename} to {uri}".format(filename=name, uri=uri)) + os.close(tfh) + + if not processed_archives: -+ raise salt.exceptions.SaltInvocationError('No archives found to transfer.') ++ raise salt.exceptions.SaltInvocationError("No archives found to transfer.") + -+ ret = __salt__['rsync.rsync'](src=src_uri, dst=uri, additional_opts=['--stats', '--files-from={}'.format(tfn)]) -+ ret['files'] = {} ++ ret = __salt__["rsync.rsync"]( ++ src=src_uri, ++ dst=uri, ++ additional_opts=["--stats", "--files-from={}".format(tfn)], ++ ) ++ ret["files"] = {} + for name in processed_archives: + if move: + salt.utils.dictupdate.update(ret, self.delete_archives(name)) -+ log.debug('Deleting {filename}'.format(filename=name)) -+ ret['files'][name] = 'moved' ++ log.debug("Deleting {filename}".format(filename=name)) ++ ret["files"][name] = "moved" + else: -+ ret['files'][name] = 'copied' ++ ret["files"][name] = "copied" + + try: + os.unlink(tfn) -+ except (OSError, IOError) as err: -+ log.error('Cannot remove temporary rsync file {fn}: {err}'.format(fn=tfn, err=err)) ++ except OSError as err: ++ log.error( ++ "Cannot remove temporary rsync file {fn}: {err}".format(fn=tfn, err=err) ++ ) + + return self.format_sync_stats(ret) + + @salt.utils.decorators.external -+ def run(self, profile='default', pillar=None, archive=None, output='nested'): -+ ''' ++ def run(self, profile="default", pillar=None, archive=None, output="nested"): ++ """ + Run Salt Support on the minion. + + profile @@ -612,11 +657,13 @@ index 0000000000..750b2655d6 + salt '*' support.run + salt '*' support.run profile=network + salt '*' support.run pillar=something_special -+ ''' -+ class outputswitch(object): -+ ''' ++ """ ++ ++ class outputswitch: ++ """ + Output switcher on context -+ ''' ++ """ ++ + def __init__(self, output_device): + self._tmp_out = output_device + self._orig_out = None @@ -630,116 +677,124 @@ index 0000000000..750b2655d6 + + self.out = LogCollector() + with outputswitch(self.out): -+ self.collector = SupportDataCollector(archive or self._get_archive_name(archname=archive), output) ++ self.collector = SupportDataCollector( ++ archive or self._get_archive_name(archname=archive), output ++ ) + self.collector.out = self.out + self.collector.open() -+ self.collect_local_data(profile=profile, profile_source=__pillar__.get(pillar)) ++ self.collect_local_data( ++ profile=profile, profile_source=__pillar__.get(pillar) ++ ) + self.collect_internal_data() + self.collector.close() + -+ return {'archive': self.collector.archive_path, -+ 'messages': self.out.messages} ++ return {"archive": self.collector.archive_path, "messages": self.out.messages} + + +def __virtual__(): -+ ''' ++ """ + Set method references as module functions aliases + :return: -+ ''' ++ """ + support = SaltSupportModule() + + def _set_function(obj): -+ ''' ++ """ + Create a Salt function for the SaltSupport class. -+ ''' ++ """ ++ + def _cmd(*args, **kwargs): -+ ''' ++ """ + Call support method as a function from the Salt. -+ ''' ++ """ + _kwargs = {} + for kw in kwargs: -+ if not kw.startswith('__'): ++ if not kw.startswith("__"): + _kwargs[kw] = kwargs[kw] + return obj(*args, **_kwargs) ++ + _cmd.__doc__ = obj.__doc__ + return _cmd + + for m_name in dir(support): + obj = getattr(support, m_name) -+ if getattr(obj, 'external', False): ++ if getattr(obj, "external", False): + setattr(sys.modules[__name__], m_name, _set_function(obj)) + + return __virtualname__ diff --git a/salt/state.py b/salt/state.py -index 1db1c35c52..bc5277554e 100644 +index beab2cb16c..b1bce4e0cd 100644 --- a/salt/state.py +++ b/salt/state.py -@@ -1406,8 +1406,9 @@ class State(object): +@@ -1547,7 +1547,9 @@ class State: names = [] - if state.startswith('__'): + if state.startswith("__"): continue -- chunk = {'state': state, -- 'name': name} +- chunk = {"state": state, "name": name} + chunk = OrderedDict() -+ chunk['state'] = state -+ chunk['name'] = name ++ chunk["state"] = state ++ chunk["name"] = name if orchestration_jid is not None: - chunk['__orchestration_jid__'] = orchestration_jid - if '__sls__' in body: -@@ -1977,8 +1978,12 @@ class State(object): + chunk["__orchestration_jid__"] = orchestration_jid + if "__sls__" in body: +@@ -2150,9 +2152,16 @@ class State: ret = self.call_parallel(cdata, low) else: self.format_slots(cdata) -- ret = self.states[cdata['full']](*cdata['args'], -- **cdata['kwargs']) -+ if cdata['full'].split('.')[-1] == '__call__': +- ret = self.states[cdata["full"]]( +- *cdata["args"], **cdata["kwargs"] +- ) ++ if cdata["full"].split(".")[-1] == "__call__": + # __call__ requires OrderedDict to preserve state order + # kwargs are also invalid overall -+ ret = self.states[cdata['full']](cdata['args'], module=None, state=cdata['kwargs']) ++ ret = self.states[cdata["full"]]( ++ cdata["args"], module=None, state=cdata["kwargs"] ++ ) + else: -+ ret = self.states[cdata['full']](*cdata['args'], **cdata['kwargs']) ++ ret = self.states[cdata["full"]]( ++ *cdata["args"], **cdata["kwargs"] ++ ) self.states.inject_globals = {} - if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states: - ret.update(self._run_check_cmd(low)) -@@ -2882,10 +2887,31 @@ class State(object): + if ( + "check_cmd" in low +@@ -3188,10 +3197,31 @@ class State: running.update(errors) return running + def inject_default_call(self, high): -+ ''' ++ """ + Sets .call function to a state, if not there. + + :param high: + :return: -+ ''' ++ """ + for chunk in high: + state = high[chunk] + for state_ref in state: + needs_default = True + for argset in state[state_ref]: -+ if isinstance(argset, six.string_types): ++ if isinstance(argset, str): + needs_default = False + break + if needs_default: + order = state[state_ref].pop(-1) -+ state[state_ref].append('__call__') ++ state[state_ref].append("__call__") + state[state_ref].append(order) + def call_high(self, high, orchestration_jid=None): - ''' + """ Process a high data call and ensure the defined states. - ''' + """ + self.inject_default_call(high) errors = [] # If there is extension data reconcile it high, ext_errors = self.reconcile_extend(high) diff --git a/salt/states/saltsupport.py b/salt/states/saltsupport.py new file mode 100644 -index 0000000000..f245f7f137 +index 0000000000..fb0c9e0372 --- /dev/null +++ b/salt/states/saltsupport.py -@@ -0,0 +1,206 @@ -+# -*- coding: utf-8 -*- +@@ -0,0 +1,225 @@ +# +# Author: Bo Maryniuk +# @@ -756,7 +811,7 @@ index 0000000000..f245f7f137 +# See the License for the specific language governing permissions and +# limitations under the License. + -+r''' ++r""" +:codeauthor: :email:`Bo Maryniuk ` + +Execution of Salt Support from within states @@ -774,27 +829,28 @@ index 0000000000..f245f7f137 + - group: somewhere + - move: true + -+''' -+from __future__ import absolute_import, print_function, unicode_literals ++""" +import logging +import os +import tempfile + ++import salt.exceptions ++ +# Import salt modules +import salt.fileclient +import salt.utils.decorators.path -+import salt.exceptions +import salt.utils.odict + +log = logging.getLogger(__name__) -+__virtualname__ = 'support' ++__virtualname__ = "support" + + -+class SaltSupportState(object): -+ ''' ++class SaltSupportState: ++ """ + Salt-support. -+ ''' -+ EXPORTED = ['collected', 'taken'] ++ """ ++ ++ EXPORTED = ["collected", "taken"] + + def get_kwargs(self, data): + kwargs = {} @@ -804,57 +860,65 @@ index 0000000000..f245f7f137 + return kwargs + + def __call__(self, state): -+ ''' ++ """ + Call support. + + :param args: + :param kwargs: + :return: -+ ''' ++ """ + ret = { -+ 'name': state.pop('name'), -+ 'changes': {}, -+ 'result': True, -+ 'comment': '', ++ "name": state.pop("name"), ++ "changes": {}, ++ "result": True, ++ "comment": "", + } + + out = {} -+ functions = ['Functions:'] ++ functions = ["Functions:"] + try: + for ref_func, ref_kwargs in state.items(): + if ref_func not in self.EXPORTED: -+ raise salt.exceptions.SaltInvocationError('Function {} is not found'.format(ref_func)) ++ raise salt.exceptions.SaltInvocationError( ++ "Function {} is not found".format(ref_func) ++ ) + out[ref_func] = getattr(self, ref_func)(**self.get_kwargs(ref_kwargs)) -+ functions.append(' - {}'.format(ref_func)) -+ ret['comment'] = '\n'.join(functions) ++ functions.append(" - {}".format(ref_func)) ++ ret["comment"] = "\n".join(functions) + except Exception as ex: -+ ret['comment'] = str(ex) -+ ret['result'] = False -+ ret['changes'] = out ++ ret["comment"] = str(ex) ++ ret["result"] = False ++ ret["changes"] = out + + return ret + + def check_destination(self, location, group): -+ ''' ++ """ + Check destination for the archives. + :return: -+ ''' ++ """ + # Pre-create destination, since rsync will + # put one file named as group + try: + destination = os.path.join(location, group) + if os.path.exists(destination) and not os.path.isdir(destination): -+ raise salt.exceptions.SaltException('Destination "{}" should be directory!'.format(destination)) ++ raise salt.exceptions.SaltException( ++ 'Destination "{}" should be directory!'.format(destination) ++ ) + if not os.path.exists(destination): + os.makedirs(destination) -+ log.debug('Created destination directory for archives: %s', destination) ++ log.debug("Created destination directory for archives: %s", destination) + else: -+ log.debug('Archives destination directory %s already exists', destination) ++ log.debug( ++ "Archives destination directory %s already exists", destination ++ ) + except OSError as err: + log.error(err) + -+ def collected(self, group, filename=None, host=None, location=None, move=True, all=True): -+ ''' ++ def collected( ++ self, group, filename=None, host=None, location=None, move=True, all=True ++ ): ++ """ + Sync archives to a central place. + + :param name: @@ -865,22 +929,23 @@ index 0000000000..f245f7f137 + :param move: + :param all: + :return: -+ ''' ++ """ + ret = { -+ 'name': 'support.collected', -+ 'changes': {}, -+ 'result': True, -+ 'comment': '', ++ "name": "support.collected", ++ "changes": {}, ++ "result": True, ++ "comment": "", + } + location = location or tempfile.gettempdir() + self.check_destination(location, group) -+ ret['changes'] = __salt__['support.sync'](group, name=filename, host=host, -+ location=location, move=move, all=all) ++ ret["changes"] = __salt__["support.sync"]( ++ group, name=filename, host=host, location=location, move=move, all=all ++ ) + + return ret + -+ def taken(self, profile='default', pillar=None, archive=None, output='nested'): -+ ''' ++ def taken(self, profile="default", pillar=None, archive=None, output="nested"): ++ """ + Takes minion support config data. + + :param profile: @@ -888,23 +953,29 @@ index 0000000000..f245f7f137 + :param archive: + :param output: + :return: -+ ''' ++ """ + ret = { -+ 'name': 'support.taken', -+ 'changes': {}, -+ 'result': True, ++ "name": "support.taken", ++ "changes": {}, ++ "result": True, + } + -+ result = __salt__['support.run'](profile=profile, pillar=pillar, archive=archive, output=output) -+ if result.get('archive'): -+ ret['comment'] = 'Information about this system has been saved to {} file.'.format(result['archive']) -+ ret['changes']['archive'] = result['archive'] -+ ret['changes']['messages'] = {} -+ for key in ['info', 'error', 'warning']: -+ if result.get('messages', {}).get(key): -+ ret['changes']['messages'][key] = result['messages'][key] ++ result = __salt__["support.run"]( ++ profile=profile, pillar=pillar, archive=archive, output=output ++ ) ++ if result.get("archive"): ++ ret[ ++ "comment" ++ ] = "Information about this system has been saved to {} file.".format( ++ result["archive"] ++ ) ++ ret["changes"]["archive"] = result["archive"] ++ ret["changes"]["messages"] = {} ++ for key in ["info", "error", "warning"]: ++ if result.get("messages", {}).get(key): ++ ret["changes"]["messages"][key] = result["messages"][key] + else: -+ ret['comment'] = '' ++ ret["comment"] = "" + + return ret + @@ -913,7 +984,7 @@ index 0000000000..f245f7f137 + + +def __call__(*args, **kwargs): -+ ''' ++ """ + SLS single-ID syntax processing. + + module: @@ -926,75 +997,289 @@ index 0000000000..f245f7f137 + :param cdata: + :param kwargs: + :return: -+ ''' -+ return _support_state(kwargs.get('state', {})) ++ """ ++ return _support_state(kwargs.get("state", {})) + + -+def taken(name, profile='default', pillar=None, archive=None, output='nested'): -+ return _support_state.taken(profile=profile, pillar=pillar, -+ archive=archive, output=output) ++def taken(name, profile="default", pillar=None, archive=None, output="nested"): ++ return _support_state.taken( ++ profile=profile, pillar=pillar, archive=archive, output=output ++ ) + + -+def collected(name, group, filename=None, host=None, location=None, move=True, all=True): -+ return _support_state.collected(group=group, filename=filename, -+ host=host, location=location, move=move, all=all) ++def collected( ++ name, group, filename=None, host=None, location=None, move=True, all=True ++): ++ return _support_state.collected( ++ group=group, filename=filename, host=host, location=location, move=move, all=all ++ ) + + +def __virtual__(): -+ ''' ++ """ + Salt Support state -+ ''' ++ """ + return __virtualname__ diff --git a/salt/utils/args.py b/salt/utils/args.py -index 8cc0f35196..666a502498 100644 +index 87afdd3597..102402500c 100644 --- a/salt/utils/args.py +++ b/salt/utils/args.py -@@ -20,9 +20,7 @@ import salt.utils.data - import salt.utils.jid +@@ -1,8 +1,6 @@ +-# -*- coding: utf-8 -*- + """ + Functions used for CLI argument handling + """ +-from __future__ import absolute_import, print_function, unicode_literals + + import copy + import fnmatch +@@ -17,6 +15,7 @@ import salt.utils.jid import salt.utils.versions import salt.utils.yaml -- --log = logging.getLogger(__name__) -- + from salt.exceptions import SaltInvocationError +from salt.utils.odict import OrderedDict - if six.PY3: - KWARG_REGEX = re.compile(r'^([^\d\W][\w.-]*)=(?!=)(.*)$', re.UNICODE) -@@ -423,7 +421,7 @@ def format_call(fun, + log = logging.getLogger(__name__) + +@@ -70,9 +69,9 @@ def invalid_kwargs(invalid_kwargs, raise_exc=True): + """ + if invalid_kwargs: + if isinstance(invalid_kwargs, dict): +- new_invalid = ["{0}={1}".format(x, y) for x, y in invalid_kwargs.items()] ++ new_invalid = ["{}={}".format(x, y) for x, y in invalid_kwargs.items()] + invalid_kwargs = new_invalid +- msg = "The following keyword arguments are not valid: {0}".format( ++ msg = "The following keyword arguments are not valid: {}".format( + ", ".join(invalid_kwargs) + ) + if raise_exc: +@@ -259,7 +258,7 @@ def get_function_argspec(func, is_class_method=None): + and this is not always wanted. + """ + if not callable(func): +- raise TypeError("{0} is not a callable".format(func)) ++ raise TypeError("{} is not a callable".format(func)) + + if hasattr(func, "__wrapped__"): + func = func.__wrapped__ +@@ -279,7 +278,7 @@ def get_function_argspec(func, is_class_method=None): + try: + sig = inspect.signature(func) + except TypeError: +- raise TypeError("Cannot inspect argument list for '{0}'".format(func)) ++ raise TypeError("Cannot inspect argument list for '{}'".format(func)) + else: + # argspec-related functions are deprecated in Python 3 in favor of + # the new inspect.Signature class, and will be removed at some +@@ -439,7 +438,7 @@ def format_call( ret = initial_ret is not None and initial_ret or {} - ret['args'] = [] -- ret['kwargs'] = {} -+ ret['kwargs'] = OrderedDict() + ret["args"] = [] +- ret["kwargs"] = {} ++ ret["kwargs"] = OrderedDict() aspec = get_function_argspec(fun, is_class_method=is_class_method) +@@ -470,7 +469,7 @@ def format_call( + used_args_count = len(ret["args"]) + len(args) + args_count = used_args_count + len(missing_args) + raise SaltInvocationError( +- "{0} takes at least {1} argument{2} ({3} given)".format( ++ "{} takes at least {} argument{} ({} given)".format( + fun.__name__, args_count, args_count > 1 and "s" or "", used_args_count + ) + ) +@@ -506,18 +505,18 @@ def format_call( + # In case this is being called for a state module + "full", + # Not a state module, build the name +- "{0}.{1}".format(fun.__module__, fun.__name__), ++ "{}.{}".format(fun.__module__, fun.__name__), + ), + ) + else: +- msg = "{0} and '{1}' are invalid keyword arguments for '{2}'".format( +- ", ".join(["'{0}'".format(e) for e in extra][:-1]), ++ msg = "{} and '{}' are invalid keyword arguments for '{}'".format( ++ ", ".join(["'{}'".format(e) for e in extra][:-1]), + list(extra.keys())[-1], + ret.get( + # In case this is being called for a state module + "full", + # Not a state module, build the name +- "{0}.{1}".format(fun.__module__, fun.__name__), ++ "{}.{}".format(fun.__module__, fun.__name__), + ), + ) + diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py -index 45d69072c7..b2abb15425 100644 +index 940d0a90f2..b06cf0abc8 100644 --- a/salt/utils/decorators/__init__.py +++ b/salt/utils/decorators/__init__.py -@@ -690,3 +690,27 @@ def ensure_unicode_args(function): +@@ -1,10 +1,7 @@ +-# -*- coding: utf-8 -*- + """ + Helpful decorators for module writing + """ + +-# Import python libs +-from __future__ import absolute_import, print_function, unicode_literals + + import errno + import inspect +@@ -15,13 +12,10 @@ import time + from collections import defaultdict + from functools import wraps + +-# Import salt libs + import salt.utils.args + import salt.utils.data + import salt.utils.versions + from salt.exceptions import CommandExecutionError, SaltConfigurationError +- +-# Import 3rd-party libs + from salt.ext import six + from salt.log import LOG_LEVELS + +@@ -32,7 +26,7 @@ if getattr(sys, "getwindowsversion", False): + log = logging.getLogger(__name__) + + +-class Depends(object): ++class Depends: + """ + This decorator will check the module when it is loaded and check that the + dependencies passed in are in the globals of the module. If not, it will +@@ -121,7 +115,7 @@ class Depends(object): + + @staticmethod + def run_command(dependency, mod_name, func_name): +- full_name = "{0}.{1}".format(mod_name, func_name) ++ full_name = "{}.{}".format(mod_name, func_name) + log.trace("Running '%s' for '%s'", dependency, full_name) + if IS_WINDOWS: + args = salt.utils.args.shlex_split(dependency, posix=False) +@@ -145,8 +139,8 @@ class Depends(object): + It will modify the "functions" dict and remove/replace modules that + are missing dependencies. + """ +- for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]): +- for (mod_name, func_name), (frame, params) in six.iteritems(dependent_dict): ++ for dependency, dependent_dict in cls.dependency_dict[kind].items(): ++ for (mod_name, func_name), (frame, params) in dependent_dict.items(): + if mod_name != tgt_mod: + continue + # Imports from local context take presedence over those from the global context. +@@ -232,7 +226,7 @@ class Depends(object): + except (AttributeError, KeyError): + pass + +- mod_key = "{0}.{1}".format(mod_name, func_name) ++ mod_key = "{}.{}".format(mod_name, func_name) + + # if we don't have this module loaded, skip it! + if mod_key not in functions: +@@ -267,9 +261,7 @@ def timing(function): + mod_name = function.__module__[16:] else: - return function(*args, **kwargs) + mod_name = function.__module__ +- fstr = "Function %s.%s took %.{0}f seconds to execute".format( +- sys.float_info.dig +- ) ++ fstr = "Function %s.%s took %.{}f seconds to execute".format(sys.float_info.dig) + log.profile(fstr, mod_name, function.__name__, end_time - start_time) + return ret + +@@ -291,13 +283,13 @@ def memoize(func): + def _memoize(*args, **kwargs): + str_args = [] + for arg in args: +- if not isinstance(arg, six.string_types): +- str_args.append(six.text_type(arg)) ++ if not isinstance(arg, str): ++ str_args.append(str(arg)) + else: + str_args.append(arg) + + args_ = ",".join( +- list(str_args) + ["{0}={1}".format(k, kwargs[k]) for k in sorted(kwargs)] ++ list(str_args) + ["{}={}".format(k, kwargs[k]) for k in sorted(kwargs)] + ) + if args_ not in cache: + cache[args_] = func(*args, **kwargs) +@@ -306,7 +298,7 @@ def memoize(func): + return _memoize + + +-class _DeprecationDecorator(object): ++class _DeprecationDecorator: + """ + Base mix-in class for the deprecation decorator. + Takes care of a common functionality, used in its derivatives. +@@ -359,7 +351,7 @@ class _DeprecationDecorator(object): + try: + return self._function(*args, **kwargs) + except TypeError as error: +- error = six.text_type(error).replace( ++ error = str(error).replace( + self._function, self._orig_f_name + ) # Hide hidden functions + log.error( +@@ -374,7 +366,7 @@ class _DeprecationDecorator(object): + self._function.__name__, + error, + ) +- six.reraise(*sys.exc_info()) ++ raise + else: + raise CommandExecutionError( + "Function is deprecated, but the successor function was not found." +@@ -626,11 +618,11 @@ class _WithDeprecated(_DeprecationDecorator): + + if use_deprecated and use_superseded: + raise SaltConfigurationError( +- "Function '{0}' is mentioned both in deprecated " ++ "Function '{}' is mentioned both in deprecated " + "and superseded sections. Please remove any of that.".format(full_name) + ) + old_function = self._globals.get( +- self._with_name or "_{0}".format(function.__name__) ++ self._with_name or "_{}".format(function.__name__) + ) + if self._policy == self.OPT_IN: + self._function = function if use_superseded else old_function +@@ -782,12 +774,30 @@ def ensure_unicode_args(function): + + @wraps(function) + def wrapped(*args, **kwargs): +- if six.PY2: +- return function( +- *salt.utils.data.decode_list(args), +- **salt.utils.data.decode_dict(kwargs) +- ) +- else: +- return function(*args, **kwargs) ++ return function(*args, **kwargs) + return wrapped + + +def external(func): -+ ''' ++ """ + Mark function as external. + + :param func: + :return: -+ ''' ++ """ + + def f(*args, **kwargs): -+ ''' ++ """ + Stub. + + :param args: + :param kwargs: + :return: -+ ''' ++ """ + return func(*args, **kwargs) + + f.external = True @@ -1003,405 +1288,507 @@ index 45d69072c7..b2abb15425 100644 + return f diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py new file mode 100644 -index 0000000000..7bd652a90e +index 0000000000..f9ce7be29a --- /dev/null +++ b/tests/unit/modules/test_saltsupport.py -@@ -0,0 +1,394 @@ -+# -*- coding: utf-8 -*- -+''' +@@ -0,0 +1,496 @@ ++""" + :codeauthor: Bo Maryniuk -+''' ++""" + -+# Import Python libs -+from __future__ import absolute_import, print_function, unicode_literals + -+# Import Salt Testing Libs -+from tests.support.mixins import LoaderModuleMockMixin -+from tests.support.unit import TestCase, skipIf -+from tests.support.mock import patch, MagicMock, NO_MOCK, NO_MOCK_REASON -+from salt.modules import saltsupport -+import salt.exceptions +import datetime + ++import salt.exceptions ++from salt.modules import saltsupport ++from tests.support.mixins import LoaderModuleMockMixin ++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch ++from tests.support.unit import TestCase, skipIf ++ +try: + import pytest +except ImportError: + pytest = None + + -+@skipIf(not bool(pytest), 'Pytest required') ++@skipIf(not bool(pytest), "Pytest required") +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SaltSupportModuleTestCase(TestCase, LoaderModuleMockMixin): -+ ''' ++ """ + Test cases for salt.modules.support::SaltSupportModule -+ ''' ++ """ ++ + def setup_loader_modules(self): + return {saltsupport: {}} + -+ @patch('tempfile.gettempdir', MagicMock(return_value='/mnt/storage')) -+ @patch('salt.modules.saltsupport.__grains__', {'fqdn': 'c-3po'}) -+ @patch('time.strftime', MagicMock(return_value='000')) ++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) ++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"}) ++ @patch("time.strftime", MagicMock(return_value="000")) + def test_get_archive_name(self): -+ ''' ++ """ + Test archive name construction. + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ assert support._get_archive_name() == '/mnt/storage/c-3po-support-000-000.bz2' ++ assert support._get_archive_name() == "/mnt/storage/c-3po-support-000-000.bz2" + -+ @patch('tempfile.gettempdir', MagicMock(return_value='/mnt/storage')) -+ @patch('salt.modules.saltsupport.__grains__', {'fqdn': 'c-3po'}) -+ @patch('time.strftime', MagicMock(return_value='000')) ++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) ++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"}) ++ @patch("time.strftime", MagicMock(return_value="000")) + def test_get_custom_archive_name(self): -+ ''' ++ """ + Test get custom archive name. + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ temp_name = support._get_archive_name(archname='Darth Wader') -+ assert temp_name == '/mnt/storage/c-3po-darthwader-000-000.bz2' -+ temp_name = support._get_archive_name(archname='Яйця з сіллю') -+ assert temp_name == '/mnt/storage/c-3po-support-000-000.bz2' -+ temp_name = support._get_archive_name(archname='!@#$%^&*()Fillip J. Fry') -+ assert temp_name == '/mnt/storage/c-3po-fillipjfry-000-000.bz2' ++ temp_name = support._get_archive_name(archname="Darth Wader") ++ assert temp_name == "/mnt/storage/c-3po-darthwader-000-000.bz2" ++ temp_name = support._get_archive_name(archname="Яйця з сіллю") ++ assert temp_name == "/mnt/storage/c-3po-support-000-000.bz2" ++ temp_name = support._get_archive_name(archname="!@#$%^&*()Fillip J. Fry") ++ assert temp_name == "/mnt/storage/c-3po-fillipjfry-000-000.bz2" + -+ @patch('salt.cli.support.get_profiles', MagicMock(return_value={'message': 'Feature was not beta tested'})) ++ @patch( ++ "salt.cli.support.get_profiles", ++ MagicMock(return_value={"message": "Feature was not beta tested"}), ++ ) + def test_profiles_format(self): -+ ''' ++ """ + Test profiles format. + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() + profiles = support.profiles() -+ assert 'custom' in profiles -+ assert 'standard' in profiles -+ assert 'message' in profiles['standard'] -+ assert profiles['custom'] == [] -+ assert profiles['standard']['message'] == 'Feature was not beta tested' ++ assert "custom" in profiles ++ assert "standard" in profiles ++ assert "message" in profiles["standard"] ++ assert profiles["custom"] == [] ++ assert profiles["standard"]["message"] == "Feature was not beta tested" + -+ @patch('tempfile.gettempdir', MagicMock(return_value='/mnt/storage')) -+ @patch('os.listdir', MagicMock(return_value=['one-support-000-000.bz2', 'two-support-111-111.bz2', 'trash.bz2', -+ 'hostname-000-000.bz2', 'three-support-wrong222-222.bz2', -+ '000-support-000-000.bz2'])) ++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) ++ @patch( ++ "os.listdir", ++ MagicMock( ++ return_value=[ ++ "one-support-000-000.bz2", ++ "two-support-111-111.bz2", ++ "trash.bz2", ++ "hostname-000-000.bz2", ++ "three-support-wrong222-222.bz2", ++ "000-support-000-000.bz2", ++ ] ++ ), ++ ) + def test_get_existing_archives(self): -+ ''' ++ """ + Get list of existing archives. + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() + out = support.archives() + assert len(out) == 3 -+ for name in ['/mnt/storage/one-support-000-000.bz2', '/mnt/storage/two-support-111-111.bz2', -+ '/mnt/storage/000-support-000-000.bz2']: ++ for name in [ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/000-support-000-000.bz2", ++ ]: + assert name in out + + def test_last_archive(self): -+ ''' ++ """ + Get last archive name + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2', -+ '/mnt/storage/two-support-111-111.bz2', -+ '/mnt/storage/three-support-222-222.bz2']) -+ assert support.last_archive() == '/mnt/storage/three-support-222-222.bz2' ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ assert support.last_archive() == "/mnt/storage/three-support-222-222.bz2" + -+ @patch('os.unlink', MagicMock(return_value=True)) ++ @patch("os.unlink", MagicMock(return_value=True)) + def test_delete_all_archives_success(self): -+ ''' ++ """ + Test delete archives + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2', -+ '/mnt/storage/two-support-111-111.bz2', -+ '/mnt/storage/three-support-222-222.bz2']) ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) + ret = support.delete_archives() -+ assert 'files' in ret -+ assert 'errors' in ret -+ assert not bool(ret['errors']) -+ assert bool(ret['files']) -+ assert isinstance(ret['errors'], dict) -+ assert isinstance(ret['files'], dict) ++ assert "files" in ret ++ assert "errors" in ret ++ assert not bool(ret["errors"]) ++ assert bool(ret["files"]) ++ assert isinstance(ret["errors"], dict) ++ assert isinstance(ret["files"], dict) + + for arc in support.archives(): -+ assert ret['files'][arc] == 'removed' ++ assert ret["files"][arc] == "removed" + -+ @patch('os.unlink', MagicMock(return_value=False, side_effect=[OSError('Decreasing electron flux'), -+ OSError('Solar flares interference'), -+ None])) ++ @patch( ++ "os.unlink", ++ MagicMock( ++ return_value=False, ++ side_effect=[ ++ OSError("Decreasing electron flux"), ++ OSError("Solar flares interference"), ++ None, ++ ], ++ ), ++ ) + def test_delete_all_archives_failure(self): -+ ''' ++ """ + Test delete archives failure + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2', -+ '/mnt/storage/two-support-111-111.bz2', -+ '/mnt/storage/three-support-222-222.bz2']) ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) + ret = support.delete_archives() -+ assert 'files' in ret -+ assert 'errors' in ret -+ assert bool(ret['errors']) -+ assert bool(ret['files']) -+ assert isinstance(ret['errors'], dict) -+ assert isinstance(ret['files'], dict) ++ assert "files" in ret ++ assert "errors" in ret ++ assert bool(ret["errors"]) ++ assert bool(ret["files"]) ++ assert isinstance(ret["errors"], dict) ++ assert isinstance(ret["files"], dict) + -+ assert ret['files']['/mnt/storage/three-support-222-222.bz2'] == 'removed' -+ assert ret['files']['/mnt/storage/one-support-000-000.bz2'] == 'left' -+ assert ret['files']['/mnt/storage/two-support-111-111.bz2'] == 'left' ++ assert ret["files"]["/mnt/storage/three-support-222-222.bz2"] == "removed" ++ assert ret["files"]["/mnt/storage/one-support-000-000.bz2"] == "left" ++ assert ret["files"]["/mnt/storage/two-support-111-111.bz2"] == "left" + -+ assert len(ret['errors']) == 2 -+ assert ret['errors']['/mnt/storage/one-support-000-000.bz2'] == 'Decreasing electron flux' -+ assert ret['errors']['/mnt/storage/two-support-111-111.bz2'] == 'Solar flares interference' ++ assert len(ret["errors"]) == 2 ++ assert ( ++ ret["errors"]["/mnt/storage/one-support-000-000.bz2"] ++ == "Decreasing electron flux" ++ ) ++ assert ( ++ ret["errors"]["/mnt/storage/two-support-111-111.bz2"] ++ == "Solar flares interference" ++ ) + + def test_format_sync_stats(self): -+ ''' ++ """ + Test format rsync stats for preserving ordering of the keys + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ stats = ''' ++ stats = """ +robot: Bender +cute: Leela +weird: Zoidberg +professor: Farnsworth -+ ''' -+ f_stats = support.format_sync_stats({'retcode': 0, 'stdout': stats}) -+ assert list(f_stats['transfer'].keys()) == ['robot', 'cute', 'weird', 'professor'] -+ assert list(f_stats['transfer'].values()) == ['Bender', 'Leela', 'Zoidberg', 'Farnsworth'] ++ """ ++ f_stats = support.format_sync_stats({"retcode": 0, "stdout": stats}) ++ assert list(f_stats["transfer"].keys()) == [ ++ "robot", ++ "cute", ++ "weird", ++ "professor", ++ ] ++ assert list(f_stats["transfer"].values()) == [ ++ "Bender", ++ "Leela", ++ "Zoidberg", ++ "Farnsworth", ++ ] + -+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy'))) -+ @patch('os.close', MagicMock()) ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.close", MagicMock()) + def test_sync_no_archives_failure(self): -+ ''' ++ """ + Test sync failed when no archives specified. + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() + support.archives = MagicMock(return_value=[]) + + with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync('group-name') -+ assert 'No archives found to transfer' in str(err) ++ support.sync("group-name") ++ assert "No archives found to transfer" in str(err) + -+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy'))) -+ @patch('os.path.exists', MagicMock(return_value=False)) ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=False)) + def test_sync_last_picked_archive_not_found_failure(self): -+ ''' ++ """ + Test sync failed when archive was not found (last picked) + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2', -+ '/mnt/storage/two-support-111-111.bz2', -+ '/mnt/storage/three-support-222-222.bz2']) ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) + + with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync('group-name') -+ assert ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found' in str(err) ++ support.sync("group-name") ++ assert ( ++ ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found' ++ in str(err) ++ ) + -+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy'))) -+ @patch('os.path.exists', MagicMock(return_value=False)) ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=False)) + def test_sync_specified_archive_not_found_failure(self): -+ ''' ++ """ + Test sync failed when archive was not found (last picked) + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2', -+ '/mnt/storage/two-support-111-111.bz2', -+ '/mnt/storage/three-support-222-222.bz2']) ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) + + with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync('group-name', name='lost.bz2') ++ support.sync("group-name", name="lost.bz2") + assert ' Support archive "lost.bz2" was not found' in str(err) + -+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy'))) -+ @patch('os.path.exists', MagicMock(return_value=False)) -+ @patch('os.close', MagicMock()) ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=False)) ++ @patch("os.close", MagicMock()) + def test_sync_no_archive_to_transfer_failure(self): -+ ''' ++ """ + Test sync failed when no archive was found to transfer + + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() + support.archives = MagicMock(return_value=[]) + with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync('group-name', all=True) -+ assert 'No archives found to transfer' in str(err) ++ support.sync("group-name", all=True) ++ assert "No archives found to transfer" in str(err) + -+ @patch('tempfile.mkstemp', MagicMock(return_value=(0, 'dummy'))) -+ @patch('os.path.exists', MagicMock(return_value=True)) -+ @patch('os.close', MagicMock()) -+ @patch('os.write', MagicMock()) -+ @patch('os.unlink', MagicMock()) -+ @patch('salt.modules.saltsupport.__salt__', {'rsync.rsync': MagicMock(return_value={})}) ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=True)) ++ @patch("os.close", MagicMock()) ++ @patch("os.write", MagicMock()) ++ @patch("os.unlink", MagicMock()) ++ @patch( ++ "salt.modules.saltsupport.__salt__", {"rsync.rsync": MagicMock(return_value={})} ++ ) + def test_sync_archives(self): -+ ''' ++ """ + Test sync archives + :return: -+ ''' ++ """ + support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=['/mnt/storage/one-support-000-000.bz2', -+ '/mnt/storage/two-support-111-111.bz2', -+ '/mnt/storage/three-support-222-222.bz2']) -+ out = support.sync('group-name', host='buzz', all=True, move=False) -+ assert 'files' in out -+ for arc_name in out['files']: -+ assert out['files'][arc_name] == 'copied' ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ out = support.sync("group-name", host="buzz", all=True, move=False) ++ assert "files" in out ++ for arc_name in out["files"]: ++ assert out["files"][arc_name] == "copied" + assert saltsupport.os.unlink.call_count == 1 -+ assert saltsupport.os.unlink.call_args_list[0][0][0] == 'dummy' ++ assert saltsupport.os.unlink.call_args_list[0][0][0] == "dummy" + calls = [] + for call in saltsupport.os.write.call_args_list: + assert len(call) == 2 + calls.append(call[0]) -+ assert calls == [(0, b'one-support-000-000.bz2'), -+ (0, b'\n'), (0, b'two-support-111-111.bz2'), (0, b'\n'), -+ (0, b'three-support-222-222.bz2'), (0, b'\n')] ++ assert calls == [ ++ (0, b"one-support-000-000.bz2"), ++ (0, b"\n"), ++ (0, b"two-support-111-111.bz2"), ++ (0, b"\n"), ++ (0, b"three-support-222-222.bz2"), ++ (0, b"\n"), ++ ] + -+ @patch('salt.modules.saltsupport.__pillar__', {}) -+ @patch('salt.modules.saltsupport.SupportDataCollector', MagicMock()) ++ @patch("salt.modules.saltsupport.__pillar__", {}) ++ @patch("salt.modules.saltsupport.SupportDataCollector", MagicMock()) + def test_run_support(self): -+ ''' ++ """ + Test run support + :return: -+ ''' -+ saltsupport.SupportDataCollector(None, None).archive_path = 'dummy' ++ """ ++ saltsupport.SupportDataCollector(None, None).archive_path = "dummy" + support = saltsupport.SaltSupportModule() + support.collect_internal_data = MagicMock() + support.collect_local_data = MagicMock() + out = support.run() + -+ for section in ['messages', 'archive']: ++ for section in ["messages", "archive"]: + assert section in out -+ assert out['archive'] == 'dummy' -+ for section in ['warning', 'error', 'info']: -+ assert section in out['messages'] ++ assert out["archive"] == "dummy" ++ for section in ["warning", "error", "info"]: ++ assert section in out["messages"] + ld_call = support.collect_local_data.call_args_list[0][1] -+ assert 'profile' in ld_call -+ assert ld_call['profile'] == 'default' -+ assert 'profile_source' in ld_call -+ assert ld_call['profile_source'] is None ++ assert "profile" in ld_call ++ assert ld_call["profile"] == "default" ++ assert "profile_source" in ld_call ++ assert ld_call["profile_source"] is None + assert support.collector.open.call_count == 1 + assert support.collector.close.call_count == 1 + assert support.collect_internal_data.call_count == 1 + + -+@skipIf(not bool(pytest), 'Pytest required') ++@skipIf(not bool(pytest), "Pytest required") +@skipIf(NO_MOCK, NO_MOCK_REASON) +class LogCollectorTestCase(TestCase, LoaderModuleMockMixin): -+ ''' ++ """ + Test cases for salt.modules.support::LogCollector -+ ''' ++ """ ++ + def setup_loader_modules(self): + return {saltsupport: {}} + + def test_msg(self): -+ ''' ++ """ + Test message to the log collector. + + :return: -+ ''' ++ """ + utcmock = MagicMock() + utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch('datetime.datetime', utcmock): -+ msg = 'Upgrading /dev/null device' ++ with patch("datetime.datetime", utcmock): ++ msg = "Upgrading /dev/null device" + out = saltsupport.LogCollector() -+ out.msg(msg, title='Here') ++ out.msg(msg, title="Here") + assert saltsupport.LogCollector.INFO in out.messages -+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList -+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - {0}: {1}'.format('Here', msg)] ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - {}: {}".format("Here", msg) ++ ] + + def test_info_message(self): -+ ''' ++ """ + Test info message to the log collector. + + :return: -+ ''' ++ """ + utcmock = MagicMock() + utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch('datetime.datetime', utcmock): -+ msg = 'SIMM crosstalk during tectonic stress' ++ with patch("datetime.datetime", utcmock): ++ msg = "SIMM crosstalk during tectonic stress" + out = saltsupport.LogCollector() + out.info(msg) + assert saltsupport.LogCollector.INFO in out.messages -+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList -+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - {}'.format(msg)] ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] + + def test_put_message(self): -+ ''' ++ """ + Test put message to the log collector. + + :return: -+ ''' ++ """ + utcmock = MagicMock() + utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch('datetime.datetime', utcmock): -+ msg = 'Webmaster kidnapped by evil cult' ++ with patch("datetime.datetime", utcmock): ++ msg = "Webmaster kidnapped by evil cult" + out = saltsupport.LogCollector() + out.put(msg) + assert saltsupport.LogCollector.INFO in out.messages -+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList -+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - {}'.format(msg)] ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] + + def test_warning_message(self): -+ ''' ++ """ + Test warning message to the log collector. + + :return: -+ ''' ++ """ + utcmock = MagicMock() + utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch('datetime.datetime', utcmock): -+ msg = 'Your e-mail is now being delivered by USPS' ++ with patch("datetime.datetime", utcmock): ++ msg = "Your e-mail is now being delivered by USPS" + out = saltsupport.LogCollector() + out.warning(msg) + assert saltsupport.LogCollector.WARNING in out.messages -+ assert type(out.messages[saltsupport.LogCollector.WARNING]) == saltsupport.LogCollector.MessagesList -+ assert out.messages[saltsupport.LogCollector.WARNING] == ['00:00:00.000 - {}'.format(msg)] ++ assert ( ++ type(out.messages[saltsupport.LogCollector.WARNING]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.WARNING] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] + + def test_error_message(self): -+ ''' ++ """ + Test error message to the log collector. + + :return: -+ ''' ++ """ + utcmock = MagicMock() + utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch('datetime.datetime', utcmock): -+ msg = 'Learning curve appears to be fractal' ++ with patch("datetime.datetime", utcmock): ++ msg = "Learning curve appears to be fractal" + out = saltsupport.LogCollector() + out.error(msg) + assert saltsupport.LogCollector.ERROR in out.messages -+ assert type(out.messages[saltsupport.LogCollector.ERROR]) == saltsupport.LogCollector.MessagesList -+ assert out.messages[saltsupport.LogCollector.ERROR] == ['00:00:00.000 - {}'.format(msg)] ++ assert ( ++ type(out.messages[saltsupport.LogCollector.ERROR]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.ERROR] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] + + def test_hl_message(self): -+ ''' ++ """ + Test highlighter message to the log collector. + + :return: -+ ''' ++ """ + utcmock = MagicMock() + utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch('datetime.datetime', utcmock): ++ with patch("datetime.datetime", utcmock): + out = saltsupport.LogCollector() -+ out.highlight('The {} TTYs became {} TTYs and vice versa', 'real', 'pseudo') ++ out.highlight("The {} TTYs became {} TTYs and vice versa", "real", "pseudo") + assert saltsupport.LogCollector.INFO in out.messages -+ assert type(out.messages[saltsupport.LogCollector.INFO]) == saltsupport.LogCollector.MessagesList -+ assert out.messages[saltsupport.LogCollector.INFO] == ['00:00:00.000 - The real TTYs became ' -+ 'pseudo TTYs and vice versa'] ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - The real TTYs became " "pseudo TTYs and vice versa" ++ ] -- -2.16.4 +2.29.2 diff --git a/add-virt.all_capabilities.patch b/add-virt.all_capabilities.patch index a5dd125..794c5fd 100644 --- a/add-virt.all_capabilities.patch +++ b/add-virt.all_capabilities.patch @@ -1,4 +1,4 @@ -From 82ddc9d93f6c0d6bc1e8dc6ebd30d6809d9f4d8f Mon Sep 17 00:00:00 2001 +From ca2ad86438293af6715a9890b168f159ff4d9b9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 18 Oct 2018 13:32:59 +0200 Subject: [PATCH] Add virt.all_capabilities @@ -10,100 +10,37 @@ before calling virt.domain_capabilities for each of them. This commit embeds all this logic to get them all in a single virt.all_capabilities call. --- - salt/modules/virt.py | 107 +++++++++++++++++++++++++++++----------- - tests/unit/modules/test_virt.py | 56 +++++++++++++++++++++ - 2 files changed, 134 insertions(+), 29 deletions(-) + salt/modules/virt.py | 73 +++++++++++++++++++++++++++++++-- + tests/unit/modules/test_virt.py | 2 +- + 2 files changed, 71 insertions(+), 4 deletions(-) diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index a2412bb745..3889238ecd 100644 +index 313181c49e..362c2a68b5 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py -@@ -4254,37 +4254,10 @@ def _parse_caps_loader(node): +@@ -5568,11 +5568,76 @@ def _parse_domain_caps(caps): return result --def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs): +def _parse_domain_caps(caps): - ''' -- Return the domain capabilities given an emulator, architecture, machine or virtualization type. -- -- .. versionadded:: 2019.2.0 -- -- :param emulator: return the capabilities for the given emulator binary -- :param arch: return the capabilities for the given CPU architecture -- :param machine: return the capabilities for the given emulated machine type -- :param domain: return the capabilities for the given virtualization type. -- :param connection: libvirt connection URI, overriding defaults -- :param username: username to connect with, overriding defaults -- :param password: password to connect with, overriding defaults -- -- The list of the possible emulator, arch, machine and domain can be found in -- the host capabilities output. -- -- If none of the parameters is provided the libvirt default domain capabilities -- will be returned. -- -- CLI Example: -- -- .. code-block:: bash -- -- salt '*' virt.domain_capabilities arch='x86_64' domain='kvm' -- ++ """ + Parse the XML document of domain capabilities into a structure. - ''' -- conn = __get_conn(**kwargs) -- caps = ElementTree.fromstring(conn.getDomainCapabilities(emulator, arch, machine, domain, 0)) -- conn.close() -- - result = { - 'emulator': caps.find('path').text if caps.find('path') is not None else None, - 'domain': caps.find('domain').text if caps.find('domain') is not None else None, -@@ -4324,6 +4297,82 @@ def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **k - return result - - -+def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs): -+ ''' -+ Return the domain capabilities given an emulator, architecture, machine or virtualization type. -+ -+ .. versionadded:: Fluorine -+ -+ :param emulator: return the capabilities for the given emulator binary -+ :param arch: return the capabilities for the given CPU architecture -+ :param machine: return the capabilities for the given emulated machine type -+ :param domain: return the capabilities for the given virtualization type. -+ :param connection: libvirt connection URI, overriding defaults -+ :param username: username to connect with, overriding defaults -+ :param password: password to connect with, overriding defaults -+ -+ The list of the possible emulator, arch, machine and domain can be found in -+ the host capabilities output. -+ -+ If none of the parameters is provided, the libvirt default one is returned. -+ -+ CLI Example: -+ -+ .. code-block:: bash -+ -+ salt '*' virt.domain_capabilities arch='x86_64' domain='kvm' -+ -+ ''' -+ conn = __get_conn(**kwargs) -+ result = [] -+ try: -+ caps = ElementTree.fromstring(conn.getDomainCapabilities(emulator, arch, machine, domain, 0)) -+ result = _parse_domain_caps(caps) -+ finally: -+ conn.close() -+ -+ return result ++ """ ++ result = { ++ "emulator": caps.find("path").text if caps.find("path") is not None else None, ++ "domain": caps.find("domain").text if caps.find("domain") is not None else None, ++ "machine": caps.find("machine").text ++ if caps.find("machine") is not None ++ else None, ++ "arch": caps.find("arch").text if caps.find("arch") is not None else None, ++ } + + +def all_capabilities(**kwargs): -+ ''' ++ """ + Return the host and domain capabilities in a single call. + -+ .. versionadded:: Neon ++ .. versionadded:: 3001 + + :param connection: libvirt connection URI, overriding defaults + :param username: username to connect with, overriding defaults @@ -115,100 +52,94 @@ index a2412bb745..3889238ecd 100644 + + salt '*' virt.all_capabilities + -+ ''' ++ """ + conn = __get_conn(**kwargs) -+ result = {} + try: + host_caps = ElementTree.fromstring(conn.getCapabilities()) -+ domains = [[(guest.get('arch', {}).get('name', None), key) -+ for key in guest.get('arch', {}).get('domains', {}).keys()] -+ for guest in [_parse_caps_guest(guest) for guest in host_caps.findall('guest')]] ++ domains = [ ++ [ ++ (guest.get("arch", {}).get("name", None), key) ++ for key in guest.get("arch", {}).get("domains", {}).keys() ++ ] ++ for guest in [ ++ _parse_caps_guest(guest) for guest in host_caps.findall("guest") ++ ] ++ ] + flattened = [pair for item in (x for x in domains) for pair in item] + result = { -+ 'host': { -+ 'host': _parse_caps_host(host_caps.find('host')), -+ 'guests': [_parse_caps_guest(guest) for guest in host_caps.findall('guest')] -+ }, -+ 'domains': [_parse_domain_caps(ElementTree.fromstring( -+ conn.getDomainCapabilities(None, arch, None, domain))) -+ for (arch, domain) in flattened]} ++ "host": { ++ "host": _parse_caps_host(host_caps.find("host")), ++ "guests": [ ++ _parse_caps_guest(guest) for guest in host_caps.findall("guest") ++ ], ++ }, ++ "domains": [ ++ _parse_domain_caps( ++ ElementTree.fromstring( ++ conn.getDomainCapabilities(None, arch, None, domain) ++ ) ++ ) ++ for (arch, domain) in flattened ++ ], ++ } ++ return result + finally: + conn.close() + ++ + def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs): + """ + Return the domain capabilities given an emulator, architecture, machine or virtualization type. + +- .. versionadded:: 2019.2.0 ++ .. versionadded:: Fluorine + + :param emulator: return the capabilities for the given emulator binary + :param arch: return the capabilities for the given CPU architecture +@@ -5611,7 +5676,7 @@ def all_capabilities(**kwargs): + """ + Return the host and domain capabilities in a single call. + +- .. versionadded:: 3001 ++ .. versionadded:: Neon + + :param connection: libvirt connection URI, overriding defaults + :param username: username to connect with, overriding defaults +@@ -5625,6 +5690,7 @@ def all_capabilities(**kwargs): + + """ + conn = __get_conn(**kwargs) ++ result = {} + try: + host_caps = ElementTree.fromstring(conn.getCapabilities()) + domains = [ +@@ -5653,10 +5719,11 @@ def all_capabilities(**kwargs): + for (arch, domain) in flattened + ], + } +- return result + finally: + conn.close() + + return result + -+ - def cpu_baseline(full=False, migratable=False, out='libvirt', **kwargs): - ''' - Return the optimal 'custom' CPU baseline config for VM's on this minion + + def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs): + """ diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index 32f4302e5f..94372c6d72 100644 +index cce107c9e4..e9e73d7b5d 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py -@@ -2216,6 +2216,62 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -4063,7 +4063,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + "44454c4c-3400-105a-8033-b3c04f4b344a", caps["host"]["host"]["uuid"] + ) + self.assertEqual( +- {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]}, ++ {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]} + ) - self.assertEqual(expected, caps) - -+ def test_all_capabilities(self): -+ ''' -+ Test the virt.domain_capabilities default output -+ ''' -+ domainXml = ''' -+ -+ /usr/bin/qemu-system-x86_64 -+ kvm -+ virt-2.12 -+ x86_64 -+ -+ -+ -+ ''' -+ hostXml = ''' -+ -+ -+ 44454c4c-3400-105a-8033-b3c04f4b344a -+ -+ x86_64 -+ Nehalem -+ Intel -+ -+ -+ -+ -+ -+ hvm -+ -+ 64 -+ /usr/bin/qemu-system-x86_64 -+ pc-i440fx-2.6 -+ pc -+ pc-0.12 -+ -+ -+ /usr/bin/qemu-kvm -+ pc-i440fx-2.6 -+ pc -+ pc-0.12 -+ -+ -+ -+ -+ ''' -+ -+ # pylint: disable=no-member -+ self.mock_conn.getCapabilities.return_value = hostXml -+ self.mock_conn.getDomainCapabilities.side_effect = [ -+ domainXml, domainXml.replace('kvm', 'qemu')] -+ # pylint: enable=no-member -+ -+ caps = virt.all_capabilities() -+ self.assertEqual('44454c4c-3400-105a-8033-b3c04f4b344a', caps['host']['host']['uuid']) -+ self.assertEqual(set(['qemu', 'kvm']), set([domainCaps['domain'] for domainCaps in caps['domains']])) -+ def test_network_tag(self): - ''' - Test virt._get_net_xml() with VLAN tag -- -2.16.4 +2.29.2 diff --git a/adds-explicit-type-cast-for-port.patch b/adds-explicit-type-cast-for-port.patch index abb13cb..176d530 100644 --- a/adds-explicit-type-cast-for-port.patch +++ b/adds-explicit-type-cast-for-port.patch @@ -1,4 +1,4 @@ -From 2182f2cbc835fee8a95101ce0c722d582b7456aa Mon Sep 17 00:00:00 2001 +From 12d67e0cfa54399f3a0b6ae0d4faa09793fa2b0f Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 1 Apr 2020 16:13:23 +0200 Subject: [PATCH] Adds explicit type cast for port @@ -12,22 +12,22 @@ The type casting to int solves this issue. 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/utils/network.py b/salt/utils/network.py -index d6543ff160..def997f3dc 100644 +index 25b2d06758..1705a5809d 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py -@@ -1457,9 +1457,9 @@ def _netlink_tool_remote_on(port, which_end): - local_host, local_port = chunks[3].rsplit(':', 1) - remote_host, remote_port = chunks[4].rsplit(':', 1) +@@ -1626,9 +1626,9 @@ def _netlink_tool_remote_on(port, which_end): + local_host, local_port = chunks[3].rsplit(":", 1) + remote_host, remote_port = chunks[4].rsplit(":", 1) -- if which_end == 'remote_port' and int(remote_port) != port: -+ if which_end == 'remote_port' and int(remote_port) != int(port): +- if which_end == "remote_port" and int(remote_port) != port: ++ if which_end == "remote_port" and int(remote_port) != int(port): continue -- if which_end == 'local_port' and int(local_port) != port: -+ if which_end == 'local_port' and int(local_port) != int(port): +- if which_end == "local_port" and int(local_port) != port: ++ if which_end == "local_port" and int(local_port) != int(port): continue remotes.add(remote_host.strip("[]")) -- -2.16.4 +2.29.2 diff --git a/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch b/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch index f3ccc21..22fdf66 100644 --- a/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch +++ b/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch @@ -1,4 +1,4 @@ -From 206a2f7c4c1104f2f35dfa2c0b775bef4adc5b91 Mon Sep 17 00:00:00 2001 +From 125f973014b8d5ffa13ae7dd231043e39af75ea0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 3 Jul 2019 09:34:50 +0100 @@ -7,62 +7,23 @@ Subject: [PATCH] Allow passing kwargs to pkg.list_downloaded Add unit test for pkg.list_downloaded with kwargs --- - salt/modules/zypperpkg.py | 2 +- - tests/unit/modules/test_zypperpkg.py | 27 +++++++++++++++++++++++++++ - 2 files changed, 28 insertions(+), 1 deletion(-) + salt/modules/zypperpkg.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 582caffb59..3760b525e7 100644 +index 75cb5ce4a8..c996935bff 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -2557,7 +2557,7 @@ def download(*packages, **kwargs): +@@ -2754,7 +2754,7 @@ def download(*packages, **kwargs): ) -def list_downloaded(root=None): +def list_downloaded(root=None, **kwargs): - ''' + """ .. versionadded:: 2017.7.0 -diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 3a6466f061..12c22bfcb2 100644 ---- a/tests/unit/modules/test_zypperpkg.py -+++ b/tests/unit/modules/test_zypperpkg.py -@@ -767,6 +767,33 @@ Repository 'DUMMY' not found by its alias, number, or URI. - self.assertEqual(len(list_patches), 3) - self.assertDictEqual(list_patches, PATCHES_RET) - -+ @patch('salt.utils.path.os_walk', MagicMock(return_value=[('test', 'test', 'test')])) -+ @patch('os.path.getsize', MagicMock(return_value=123456)) -+ @patch('os.path.getctime', MagicMock(return_value=1234567890.123456)) -+ @patch('fnmatch.filter', MagicMock(return_value=['/var/cache/zypper/packages/foo/bar/test_package.rpm'])) -+ def test_list_downloaded_with_kwargs(self): -+ ''' -+ Test downloaded packages listing. -+ -+ :return: -+ ''' -+ DOWNLOADED_RET = { -+ 'test-package': { -+ '1.0': { -+ 'path': '/var/cache/zypper/packages/foo/bar/test_package.rpm', -+ 'size': 123456, -+ 'creation_date_time_t': 1234567890, -+ 'creation_date_time': '2009-02-13T23:31:30', -+ } -+ } -+ } -+ -+ with patch.dict(zypper.__salt__, {'lowpkg.bin_pkg_info': MagicMock(return_value={'name': 'test-package', -+ 'version': '1.0'})}): -+ list_downloaded = zypper.list_downloaded(kw1=True, kw2=False) -+ self.assertEqual(len(list_downloaded), 1) -+ self.assertDictEqual(list_downloaded, DOWNLOADED_RET) -+ - @patch('salt.utils.path.os_walk', MagicMock(return_value=[('test', 'test', 'test')])) - @patch('os.path.getsize', MagicMock(return_value=123456)) - @patch('os.path.getctime', MagicMock(return_value=1234567890.123456)) -- -2.16.4 +2.29.2 diff --git a/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch b/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch index d5fd856..e11f91a 100644 --- a/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch +++ b/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch @@ -1,4 +1,4 @@ -From e1b4dda1eed90b4c6495b7a1fb047052f2cc5d5c Mon Sep 17 00:00:00 2001 +From 6111853f13c9c1e8eaaa1acd521cd3abfbfff766 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 13 Aug 2020 13:49:16 +0100 @@ -15,1810 +15,77 @@ Add new unit test for ansible.playbooks Add unit tests for ansible.playbooks state --- - salt/modules/ansiblegate.py | 10 +- - salt/states/ansiblegate.py | 51 +- - .../unit/files/playbooks/failed_example.json | 748 ++++++++++++++++ - .../unit/files/playbooks/success_example.json | 803 ++++++++++++++++++ - tests/unit/modules/test_ansiblegate.py | 15 + - tests/unit/states/test_ansiblegate.py | 113 +++ - 6 files changed, 1717 insertions(+), 23 deletions(-) - create mode 100644 tests/unit/files/playbooks/failed_example.json - create mode 100644 tests/unit/files/playbooks/success_example.json - create mode 100644 tests/unit/states/test_ansiblegate.py + tests/unit/modules/test_ansiblegate.py | 12 ++++++++++++ + tests/unit/states/test_ansiblegate.py | 7 ++++--- + 2 files changed, 16 insertions(+), 3 deletions(-) -diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py -index 8e28fcafa3..e76809d4ba 100644 ---- a/salt/modules/ansiblegate.py -+++ b/salt/modules/ansiblegate.py -@@ -381,9 +381,9 @@ def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None, - 'cwd': rundir, - 'cmd': ' '.join(command) - } -- ret = __salt__['cmd.run_all'](**cmd_kwargs) -- log.debug('Ansible Playbook Return: %s', ret) -- retdata = json.loads(ret['stdout']) -- if ret['retcode']: -- __context__['retcode'] = ret['retcode'] -+ ret = __salt__["cmd.run_all"](**cmd_kwargs) -+ log.debug("Ansible Playbook Return: %s", ret) -+ retdata = json.loads(ret["stdout"]) -+ if 'retcode' in ret: -+ __context__["retcode"] = retdata["retcode"] = ret["retcode"] - return retdata -diff --git a/salt/states/ansiblegate.py b/salt/states/ansiblegate.py -index b42dc02938..d268e492e2 100644 ---- a/salt/states/ansiblegate.py -+++ b/salt/states/ansiblegate.py -@@ -120,9 +120,11 @@ def _changes(plays): - task_changes = {} - for task in play['tasks']: - host_changes = {} -- for host, data in six.iteritems(task['hosts']): -- if data['changed'] is True: -- host_changes[host] = data.get('diff', data.get('changes', {})) -+ for host, data in six.iteritems(task["hosts"]): -+ if data["changed"] is True: -+ host_changes[host] = data.get("diff", data.get("changes", {})) -+ elif any(x in data for x in ["failed", "skipped", "unreachable"]): -+ host_changes[host] = data.get("results", data.get("msg", {})) - if host_changes: - task_changes[task['task']['name']] = host_changes - if task_changes: -@@ -177,20 +179,33 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs= - if not isinstance(ansible_kwargs, dict): - log.debug('Setting ansible_kwargs to empty dict: %s', ansible_kwargs) - ansible_kwargs = {} -- checks = __salt__['ansible.playbooks'](name, rundir=rundir, check=True, diff=True, **ansible_kwargs) -- if all(not check['changed'] for check in six.itervalues(checks['stats'])): -- ret['comment'] = 'No changes to be made from playbook {0}'.format(name) -- ret['result'] = True -- elif __opts__['test']: -- ret['comment'] = 'Changes will be made from playbook {0}'.format(name) -- ret['result'] = None -- ret['changes'] = _changes(checks) -+ if __opts__["test"]: -+ checks = __salt__["ansible.playbooks"](name, rundir=rundir, check=True, diff=True, **ansible_kwargs) -+ if all(not check["changed"] and not check["failures"] and not check["unreachable"] and not check["skipped"] for check in six.itervalues(checks["stats"])): -+ ret["comment"] = "No changes to be made from playbook {0}".format(name) -+ ret["result"] = True -+ elif any(check["changed"] and not check["failures"] and not check["unreachable"] and not check["skipped"] for check in six.itervalues(checks["stats"])): -+ ret["comment"] = "Changes will be made from playbook {0}".format(name) -+ ret["result"] = None -+ ret["changes"] = _changes(checks) -+ else: -+ ret["comment"] = "There were some issues running the playbook {0}".format(name) -+ ret["result"] = False -+ ret["changes"] = _changes(checks) - else: -- results = __salt__['ansible.playbooks'](name, rundir=rundir, diff=True, **ansible_kwargs) -- ret['comment'] = 'Changes were made by playbook {0}'.format(name) -- ret['changes'] = _changes(results) -- ret['result'] = all( -- not check['failures'] and not check['unreachable'] -- for check in six.itervalues(checks['stats']) -- ) -+ results = __salt__["ansible.playbooks"](name, rundir=rundir, diff=True, **ansible_kwargs) -+ if all(not check["changed"] and not check["failures"] and not check["unreachable"] and not check["skipped"] for check in six.itervalues(results["stats"])): -+ ret["comment"] = "No changes to be made from playbook {0}".format(name) -+ ret["result"] = True -+ ret["changes"] = _changes(results) -+ else: -+ ret["changes"] = _changes(results) -+ ret["result"] = all( -+ not check["failures"] and not check["unreachable"] and not check["skipped"] -+ for check in six.itervalues(results["stats"]) -+ ) -+ if ret["result"]: -+ ret["comment"] = "Changes were made by playbook {0}".format(name) -+ else: -+ ret["comment"] = "There were some issues running the playbook {0}".format(name) - return ret -diff --git a/tests/unit/files/playbooks/failed_example.json b/tests/unit/files/playbooks/failed_example.json -new file mode 100644 -index 0000000000..9ee8ba25b7 ---- /dev/null -+++ b/tests/unit/files/playbooks/failed_example.json -@@ -0,0 +1,748 @@ -+{ -+ "custom_stats": {}, -+ "global_custom_stats": {}, -+ "plays": [ -+ { -+ "play": { -+ "duration": { -+ "end": "2020-08-14T11:55:33.889442Z", -+ "start": "2020-08-14T11:55:30.460145Z" -+ }, -+ "id": "5254001e-9fce-297d-21cd-000000000007", -+ "name": "py2hosts" -+ }, -+ "tasks": [ -+ { -+ "hosts": { -+ "centos7-host1.tf.local": { -+ "_ansible_no_log": false, -+ "_ansible_verbose_override": true, -+ "action": "gather_facts", -+ "ansible_facts": { -+ "ansible_all_ipv4_addresses": [ -+ "192.168.122.29" -+ ], -+ "ansible_all_ipv6_addresses": [ -+ "fe80::5054:ff:fe3e:4ce" -+ ], -+ "ansible_apparmor": { -+ "status": "disabled" -+ }, -+ "ansible_architecture": "x86_64", -+ "ansible_bios_date": "04/01/2014", -+ "ansible_bios_version": "rel-1.13.0-0-gf21b5a4-rebuilt.opensuse.org", -+ "ansible_cmdline": { -+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64", -+ "LANG": "en_US.UTF-8", -+ "console": "ttyS0,115200", -+ "crashkernel": "auto", -+ "quiet": true, -+ "rhgb": true, -+ "ro": true, -+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ "ansible_date_time": { -+ "date": "2020-08-14", -+ "day": "14", -+ "epoch": "1597406131", -+ "hour": "13", -+ "iso8601": "2020-08-14T11:55:31Z", -+ "iso8601_basic": "20200814T135531991936", -+ "iso8601_basic_short": "20200814T135531", -+ "iso8601_micro": "2020-08-14T11:55:31.992035Z", -+ "minute": "55", -+ "month": "08", -+ "second": "31", -+ "time": "13:55:31", -+ "tz": "CEST", -+ "tz_offset": "+0200", -+ "weekday": "Friday", -+ "weekday_number": "5", -+ "weeknumber": "32", -+ "year": "2020" -+ }, -+ "ansible_default_ipv4": { -+ "address": "192.168.122.29", -+ "alias": "eth0", -+ "broadcast": "192.168.122.255", -+ "gateway": "192.168.122.1", -+ "interface": "eth0", -+ "macaddress": "52:54:00:3e:04:ce", -+ "mtu": 1500, -+ "netmask": "255.255.255.0", -+ "network": "192.168.122.0", -+ "type": "ether" -+ }, -+ "ansible_default_ipv6": {}, -+ "ansible_device_links": { -+ "ids": {}, -+ "labels": {}, -+ "masters": {}, -+ "uuids": { -+ "vda1": [ -+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ ], -+ "vda2": [ -+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24" -+ ], -+ "vda3": [ -+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ ], -+ "vda5": [ -+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ ] -+ } -+ }, -+ "ansible_devices": { -+ "vda": { -+ "holders": [], -+ "host": "", -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [] -+ }, -+ "model": null, -+ "partitions": { -+ "vda1": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ ] -+ }, -+ "sectors": "2097152", -+ "sectorsize": 512, -+ "size": "1.00 GB", -+ "start": "2048", -+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ }, -+ "vda2": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24" -+ ] -+ }, -+ "sectors": "4196352", -+ "sectorsize": 512, -+ "size": "2.00 GB", -+ "start": "2099200", -+ "uuid": "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24" -+ }, -+ "vda3": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ ] -+ }, -+ "sectors": "104857600", -+ "sectorsize": 512, -+ "size": "50.00 GB", -+ "start": "6295552", -+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ "vda4": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [] -+ }, -+ "sectors": "2", -+ "sectorsize": 512, -+ "size": "1.00 KB", -+ "start": "111153152", -+ "uuid": null -+ }, -+ "vda5": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ ] -+ }, -+ "sectors": "308275200", -+ "sectorsize": 512, -+ "size": "147.00 GB", -+ "start": "111155200", -+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ } -+ }, -+ "removable": "0", -+ "rotational": "1", -+ "sas_address": null, -+ "sas_device_handle": null, -+ "scheduler_mode": "mq-deadline", -+ "sectors": "419430400", -+ "sectorsize": "512", -+ "size": "200.00 GB", -+ "support_discard": "0", -+ "vendor": "0x1af4", -+ "virtual": 1 -+ } -+ }, -+ "ansible_distribution": "CentOS", -+ "ansible_distribution_file_parsed": true, -+ "ansible_distribution_file_path": "/etc/redhat-release", -+ "ansible_distribution_file_variety": "RedHat", -+ "ansible_distribution_major_version": "7", -+ "ansible_distribution_release": "Core", -+ "ansible_distribution_version": "7.5", -+ "ansible_dns": { -+ "nameservers": [ -+ "192.168.122.1" -+ ] -+ }, -+ "ansible_domain": "tf.local", -+ "ansible_effective_group_id": 0, -+ "ansible_effective_user_id": 0, -+ "ansible_env": { -+ "HOME": "/root", -+ "LANG": "es_ES.utf8", -+ "LC_ADDRESS": "C", -+ "LC_COLLATE": "C", -+ "LC_CTYPE": "C", -+ "LC_IDENTIFICATION": "C", -+ "LC_MEASUREMENT": "C", -+ "LC_MESSAGES": "C", -+ "LC_MONETARY": "C", -+ "LC_NAME": "C", -+ "LC_NUMERIC": "C", -+ "LC_PAPER": "C", -+ "LC_TELEPHONE": "C", -+ "LC_TIME": "C", -+ "LESSOPEN": "||/usr/bin/lesspipe.sh %s", -+ "LOGNAME": "root", -+ "LS_COLORS": "rs=0:di=38;5;27:ln=38;5;51:mh=44;38;5;15:pi=40;38;5;11:so=38;5;13:do=38;5;5:bd=48;5;232;38;5;11:cd=48;5;232;38;5;3:or=48;5;232;38;5;9:mi=05;48;5;232;38;5;15:su=48;5;196;38;5;15:sg=48;5;11;38;5;16:ca=48;5;196;38;5;226:tw=48;5;10;38;5;16:ow=48;5;10;38;5;21:st=48;5;21;38;5;15:ex=38;5;34:*.tar=38;5;9:*.tgz=38;5;9:*.arc=38;5;9:*.arj=38;5;9:*.taz=38;5;9:*.lha=38;5;9:*.lz4=38;5;9:*.lzh=38;5;9:*.lzma=38;5;9:*.tlz=38;5;9:*.txz=38;5;9:*.tzo=38;5;9:*.t7z=38;5;9:*.zip=38;5;9:*.z=38;5;9:*.Z=38;5;9:*.dz=38;5;9:*.gz=38;5;9:*.lrz=38;5;9:*.lz=38;5;9:*.lzo=38;5;9:*.xz=38;5;9:*.bz2=38;5;9:*.bz=38;5;9:*.tbz=38;5;9:*.tbz2=38;5;9:*.tz=38;5;9:*.deb=38;5;9:*.rpm=38;5;9:*.jar=38;5;9:*.war=38;5;9:*.ear=38;5;9:*.sar=38;5;9:*.rar=38;5;9:*.alz=38;5;9:*.ace=38;5;9:*.zoo=38;5;9:*.cpio=38;5;9:*.7z=38;5;9:*.rz=38;5;9:*.cab=38;5;9:*.jpg=38;5;13:*.jpeg=38;5;13:*.gif=38;5;13:*.bmp=38;5;13:*.pbm=38;5;13:*.pgm=38;5;13:*.ppm=38;5;13:*.tga=38;5;13:*.xbm=38;5;13:*.xpm=38;5;13:*.tif=38;5;13:*.tiff=38;5;13:*.png=38;5;13:*.svg=38;5;13:*.svgz=38;5;13:*.mng=38;5;13:*.pcx=38;5;13:*.mov=38;5;13:*.mpg=38;5;13:*.mpeg=38;5;13:*.m2v=38;5;13:*.mkv=38;5;13:*.webm=38;5;13:*.ogm=38;5;13:*.mp4=38;5;13:*.m4v=38;5;13:*.mp4v=38;5;13:*.vob=38;5;13:*.qt=38;5;13:*.nuv=38;5;13:*.wmv=38;5;13:*.asf=38;5;13:*.rm=38;5;13:*.rmvb=38;5;13:*.flc=38;5;13:*.avi=38;5;13:*.fli=38;5;13:*.flv=38;5;13:*.gl=38;5;13:*.dl=38;5;13:*.xcf=38;5;13:*.xwd=38;5;13:*.yuv=38;5;13:*.cgm=38;5;13:*.emf=38;5;13:*.axv=38;5;13:*.anx=38;5;13:*.ogv=38;5;13:*.ogx=38;5;13:*.aac=38;5;45:*.au=38;5;45:*.flac=38;5;45:*.mid=38;5;45:*.midi=38;5;45:*.mka=38;5;45:*.mp3=38;5;45:*.mpc=38;5;45:*.ogg=38;5;45:*.ra=38;5;45:*.wav=38;5;45:*.axa=38;5;45:*.oga=38;5;45:*.spx=38;5;45:*.xspf=38;5;45:", -+ "MAIL": "/var/mail/root", -+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", -+ "PWD": "/root", -+ "SHELL": "/bin/bash", -+ "SHLVL": "2", -+ "SSH_CLIENT": "192.168.122.179 55766 22", -+ "SSH_CONNECTION": "192.168.122.179 55766 192.168.122.29 22", -+ "SSH_TTY": "/dev/pts/0", -+ "TERM": "xterm-256color", -+ "USER": "root", -+ "XDG_RUNTIME_DIR": "/run/user/0", -+ "XDG_SESSION_ID": "110", -+ "_": "/usr/bin/python" -+ }, -+ "ansible_eth0": { -+ "active": true, -+ "device": "eth0", -+ "features": { -+ "busy_poll": "off [fixed]", -+ "fcoe_mtu": "off [fixed]", -+ "generic_receive_offload": "on", -+ "generic_segmentation_offload": "on", -+ "highdma": "on [fixed]", -+ "hw_tc_offload": "off [fixed]", -+ "l2_fwd_offload": "off [fixed]", -+ "large_receive_offload": "off [fixed]", -+ "loopback": "off [fixed]", -+ "netns_local": "off [fixed]", -+ "ntuple_filters": "off [fixed]", -+ "receive_hashing": "off [fixed]", -+ "rx_all": "off [fixed]", -+ "rx_checksumming": "on [fixed]", -+ "rx_fcs": "off [fixed]", -+ "rx_udp_tunnel_port_offload": "off [fixed]", -+ "rx_vlan_filter": "on [fixed]", -+ "rx_vlan_offload": "off [fixed]", -+ "rx_vlan_stag_filter": "off [fixed]", -+ "rx_vlan_stag_hw_parse": "off [fixed]", -+ "scatter_gather": "on", -+ "tcp_segmentation_offload": "on", -+ "tx_checksum_fcoe_crc": "off [fixed]", -+ "tx_checksum_ip_generic": "on", -+ "tx_checksum_ipv4": "off [fixed]", -+ "tx_checksum_ipv6": "off [fixed]", -+ "tx_checksum_sctp": "off [fixed]", -+ "tx_checksumming": "on", -+ "tx_fcoe_segmentation": "off [fixed]", -+ "tx_gre_csum_segmentation": "off [fixed]", -+ "tx_gre_segmentation": "off [fixed]", -+ "tx_gso_partial": "off [fixed]", -+ "tx_gso_robust": "off [fixed]", -+ "tx_ipip_segmentation": "off [fixed]", -+ "tx_lockless": "off [fixed]", -+ "tx_nocache_copy": "off", -+ "tx_scatter_gather": "on", -+ "tx_scatter_gather_fraglist": "off [fixed]", -+ "tx_sctp_segmentation": "off [fixed]", -+ "tx_sit_segmentation": "off [fixed]", -+ "tx_tcp6_segmentation": "on", -+ "tx_tcp_ecn_segmentation": "on", -+ "tx_tcp_mangleid_segmentation": "off", -+ "tx_tcp_segmentation": "on", -+ "tx_udp_tnl_csum_segmentation": "off [fixed]", -+ "tx_udp_tnl_segmentation": "off [fixed]", -+ "tx_vlan_offload": "off [fixed]", -+ "tx_vlan_stag_hw_insert": "off [fixed]", -+ "udp_fragmentation_offload": "on", -+ "vlan_challenged": "off [fixed]" -+ }, -+ "hw_timestamp_filters": [], -+ "ipv4": { -+ "address": "192.168.122.29", -+ "broadcast": "192.168.122.255", -+ "netmask": "255.255.255.0", -+ "network": "192.168.122.0" -+ }, -+ "ipv6": [ -+ { -+ "address": "fe80::5054:ff:fe3e:4ce", -+ "prefix": "64", -+ "scope": "link" -+ } -+ ], -+ "macaddress": "52:54:00:3e:04:ce", -+ "module": "virtio_net", -+ "mtu": 1500, -+ "pciid": "virtio0", -+ "promisc": false, -+ "timestamping": [ -+ "rx_software", -+ "software" -+ ], -+ "type": "ether" -+ }, -+ "ansible_fibre_channel_wwn": [], -+ "ansible_fips": false, -+ "ansible_form_factor": "Other", -+ "ansible_fqdn": "centos7-host1.tf.local", -+ "ansible_hostname": "centos7-host1", -+ "ansible_hostnqn": "", -+ "ansible_interfaces": [ -+ "lo", -+ "eth0" -+ ], -+ "ansible_is_chroot": false, -+ "ansible_iscsi_iqn": "", -+ "ansible_kernel": "3.10.0-862.el7.x86_64", -+ "ansible_kernel_version": "#1 SMP Fri Apr 20 16:44:24 UTC 2018", -+ "ansible_lo": { -+ "active": true, -+ "device": "lo", -+ "features": { -+ "busy_poll": "off [fixed]", -+ "fcoe_mtu": "off [fixed]", -+ "generic_receive_offload": "on", -+ "generic_segmentation_offload": "on", -+ "highdma": "on [fixed]", -+ "hw_tc_offload": "off [fixed]", -+ "l2_fwd_offload": "off [fixed]", -+ "large_receive_offload": "off [fixed]", -+ "loopback": "on [fixed]", -+ "netns_local": "on [fixed]", -+ "ntuple_filters": "off [fixed]", -+ "receive_hashing": "off [fixed]", -+ "rx_all": "off [fixed]", -+ "rx_checksumming": "on [fixed]", -+ "rx_fcs": "off [fixed]", -+ "rx_udp_tunnel_port_offload": "off [fixed]", -+ "rx_vlan_filter": "off [fixed]", -+ "rx_vlan_offload": "off [fixed]", -+ "rx_vlan_stag_filter": "off [fixed]", -+ "rx_vlan_stag_hw_parse": "off [fixed]", -+ "scatter_gather": "on", -+ "tcp_segmentation_offload": "on", -+ "tx_checksum_fcoe_crc": "off [fixed]", -+ "tx_checksum_ip_generic": "on [fixed]", -+ "tx_checksum_ipv4": "off [fixed]", -+ "tx_checksum_ipv6": "off [fixed]", -+ "tx_checksum_sctp": "on [fixed]", -+ "tx_checksumming": "on", -+ "tx_fcoe_segmentation": "off [fixed]", -+ "tx_gre_csum_segmentation": "off [fixed]", -+ "tx_gre_segmentation": "off [fixed]", -+ "tx_gso_partial": "off [fixed]", -+ "tx_gso_robust": "off [fixed]", -+ "tx_ipip_segmentation": "off [fixed]", -+ "tx_lockless": "on [fixed]", -+ "tx_nocache_copy": "off [fixed]", -+ "tx_scatter_gather": "on [fixed]", -+ "tx_scatter_gather_fraglist": "on [fixed]", -+ "tx_sctp_segmentation": "on", -+ "tx_sit_segmentation": "off [fixed]", -+ "tx_tcp6_segmentation": "on", -+ "tx_tcp_ecn_segmentation": "on", -+ "tx_tcp_mangleid_segmentation": "on", -+ "tx_tcp_segmentation": "on", -+ "tx_udp_tnl_csum_segmentation": "off [fixed]", -+ "tx_udp_tnl_segmentation": "off [fixed]", -+ "tx_vlan_offload": "off [fixed]", -+ "tx_vlan_stag_hw_insert": "off [fixed]", -+ "udp_fragmentation_offload": "on", -+ "vlan_challenged": "on [fixed]" -+ }, -+ "hw_timestamp_filters": [], -+ "ipv4": { -+ "address": "127.0.0.1", -+ "broadcast": "host", -+ "netmask": "255.0.0.0", -+ "network": "127.0.0.0" -+ }, -+ "ipv6": [ -+ { -+ "address": "::1", -+ "prefix": "128", -+ "scope": "host" -+ } -+ ], -+ "mtu": 65536, -+ "promisc": false, -+ "timestamping": [ -+ "rx_software", -+ "software" -+ ], -+ "type": "loopback" -+ }, -+ "ansible_local": {}, -+ "ansible_lsb": {}, -+ "ansible_machine": "x86_64", -+ "ansible_machine_id": "d5f025e24919a00e864180785ebaa8c9", -+ "ansible_memfree_mb": 717, -+ "ansible_memory_mb": { -+ "nocache": { -+ "free": 893, -+ "used": 98 -+ }, -+ "real": { -+ "free": 717, -+ "total": 991, -+ "used": 274 -+ }, -+ "swap": { -+ "cached": 0, -+ "free": 2048, -+ "total": 2048, -+ "used": 0 -+ } -+ }, -+ "ansible_memtotal_mb": 991, -+ "ansible_mounts": [ -+ { -+ "block_available": 243103, -+ "block_size": 4096, -+ "block_total": 259584, -+ "block_used": 16481, -+ "device": "/dev/vda1", -+ "fstype": "xfs", -+ "inode_available": 523998, -+ "inode_total": 524288, -+ "inode_used": 290, -+ "mount": "/boot", -+ "options": "rw,relatime,attr2,inode64,noquota", -+ "size_available": 995749888, -+ "size_total": 1063256064, -+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ }, -+ { -+ "block_available": 12902656, -+ "block_size": 4096, -+ "block_total": 13100800, -+ "block_used": 198144, -+ "device": "/dev/vda3", -+ "fstype": "xfs", -+ "inode_available": 26189994, -+ "inode_total": 26214400, -+ "inode_used": 24406, -+ "mount": "/", -+ "options": "rw,relatime,attr2,inode64,noquota", -+ "size_available": 52849278976, -+ "size_total": 53660876800, -+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ { -+ "block_available": 38507349, -+ "block_size": 4096, -+ "block_total": 38515585, -+ "block_used": 8236, -+ "device": "/dev/vda5", -+ "fstype": "xfs", -+ "inode_available": 77068797, -+ "inode_total": 77068800, -+ "inode_used": 3, -+ "mount": "/home", -+ "options": "rw,relatime,attr2,inode64,noquota", -+ "size_available": 157726101504, -+ "size_total": 157759836160, -+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ } -+ ], -+ "ansible_nodename": "centos7-host1", -+ "ansible_os_family": "RedHat", -+ "ansible_pkg_mgr": "yum", -+ "ansible_proc_cmdline": { -+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64", -+ "LANG": "en_US.UTF-8", -+ "console": "ttyS0,115200", -+ "crashkernel": "auto", -+ "quiet": true, -+ "rhgb": true, -+ "ro": true, -+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ "ansible_processor": [ -+ "0", -+ "GenuineIntel", -+ "QEMU Virtual CPU version 2.5+" -+ ], -+ "ansible_processor_cores": 1, -+ "ansible_processor_count": 1, -+ "ansible_processor_threads_per_core": 1, -+ "ansible_processor_vcpus": 1, -+ "ansible_product_name": "Standard PC (i440FX + PIIX, 1996)", -+ "ansible_product_serial": "NA", -+ "ansible_product_uuid": "18FEBA4D-2060-45E8-87AF-AD6574F522CC", -+ "ansible_product_version": "pc-i440fx-4.2", -+ "ansible_python": { -+ "executable": "/usr/bin/python", -+ "has_sslcontext": true, -+ "type": "CPython", -+ "version": { -+ "major": 2, -+ "micro": 5, -+ "minor": 7, -+ "releaselevel": "final", -+ "serial": 0 -+ }, -+ "version_info": [ -+ 2, -+ 7, -+ 5, -+ "final", -+ 0 -+ ] -+ }, -+ "ansible_python_version": "2.7.5", -+ "ansible_real_group_id": 0, -+ "ansible_real_user_id": 0, -+ "ansible_selinux": { -+ "status": "disabled" -+ }, -+ "ansible_selinux_python_present": true, -+ "ansible_service_mgr": "systemd", -+ "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE3bXHUHyjmlbxE6LCP2ohRTr0pTX7sq89g0yKvovFK1qhP1rsBvy2jW8wjo2P8mlBWhL7obRGl8B+i3cMxZdrc=", -+ "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIHv4wovK7u1Est8e1rMvQifupxLPpxtNEJIvKHq/iIVF", -+ "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDPW4spvldGYXFraJCWJAqkuyQQRogSL+aECRU0hAG+IwESq3ceVkUZrvMVnhxmVImcRGWLCP24wmiMC2G/sDMHfBIhQIc4ySvLLyVd20VIsQHWiODQsSZTKCWkIwNmWuUD/8FcIpHm4YKlzZdHRVPwx9oIkdzoxgGyGZ3em7QwhryPZ+GiK8P9dEE2xy2lfAMXCFEL6Eyw/WF1AS0KLZiKl5ct9aYedUZN1rWkWW1Kb9S+OsZ+qzjdZbU2EfQI8SnP8kkvKt1E/B1UnsfZ5R0nlsyIX6Bh8oCluqJrxXrsTBf/s4Pe76/Q7JH/QHp2Yw+sQb+l7wXhlNmDRTpqXDdR", -+ "ansible_swapfree_mb": 2048, -+ "ansible_swaptotal_mb": 2048, -+ "ansible_system": "Linux", -+ "ansible_system_capabilities": [ -+ "cap_chown", -+ "cap_dac_override", -+ "cap_dac_read_search", -+ "cap_fowner", -+ "cap_fsetid", -+ "cap_kill", -+ "cap_setgid", -+ "cap_setuid", -+ "cap_setpcap", -+ "cap_linux_immutable", -+ "cap_net_bind_service", -+ "cap_net_broadcast", -+ "cap_net_admin", -+ "cap_net_raw", -+ "cap_ipc_lock", -+ "cap_ipc_owner", -+ "cap_sys_module", -+ "cap_sys_rawio", -+ "cap_sys_chroot", -+ "cap_sys_ptrace", -+ "cap_sys_pacct", -+ "cap_sys_admin", -+ "cap_sys_boot", -+ "cap_sys_nice", -+ "cap_sys_resource", -+ "cap_sys_time", -+ "cap_sys_tty_config", -+ "cap_mknod", -+ "cap_lease", -+ "cap_audit_write", -+ "cap_audit_control", -+ "cap_setfcap", -+ "cap_mac_override", -+ "cap_mac_admin", -+ "cap_syslog", -+ "35", -+ "36+ep" -+ ], -+ "ansible_system_capabilities_enforced": "True", -+ "ansible_system_vendor": "QEMU", -+ "ansible_uptime_seconds": 178555, -+ "ansible_user_dir": "/root", -+ "ansible_user_gecos": "root", -+ "ansible_user_gid": 0, -+ "ansible_user_id": "root", -+ "ansible_user_shell": "/bin/bash", -+ "ansible_user_uid": 0, -+ "ansible_userspace_architecture": "x86_64", -+ "ansible_userspace_bits": "64", -+ "ansible_virtualization_role": "guest", -+ "ansible_virtualization_type": "kvm", -+ "discovered_interpreter_python": "/usr/bin/python", -+ "gather_subset": [ -+ "all" -+ ], -+ "module_setup": true -+ }, -+ "changed": false, -+ "deprecations": [], -+ "warnings": [] -+ } -+ }, -+ "task": { -+ "duration": { -+ "end": "2020-08-14T11:55:31.760375Z", -+ "start": "2020-08-14T11:55:30.470536Z" -+ }, -+ "id": "5254001e-9fce-297d-21cd-00000000000f", -+ "name": "Gathering Facts" -+ } -+ }, -+ { -+ "hosts": { -+ "centos7-host1.tf.local": { -+ "_ansible_no_log": false, -+ "action": "yum", -+ "changed": false, -+ "invocation": { -+ "module_args": { -+ "allow_downgrade": false, -+ "autoremove": false, -+ "bugfix": false, -+ "conf_file": null, -+ "disable_excludes": null, -+ "disable_gpg_check": false, -+ "disable_plugin": [], -+ "disablerepo": [], -+ "download_dir": null, -+ "download_only": false, -+ "enable_plugin": [], -+ "enablerepo": [], -+ "exclude": [], -+ "install_repoquery": true, -+ "install_weak_deps": true, -+ "installroot": "/", -+ "list": null, -+ "lock_timeout": 30, -+ "name": [ -+ "httpd" -+ ], -+ "releasever": null, -+ "security": false, -+ "skip_broken": false, -+ "state": "present", -+ "update_cache": false, -+ "update_only": false, -+ "use_backend": "auto", -+ "validate_certs": true -+ } -+ }, -+ "msg": "", -+ "rc": 0, -+ "results": [ -+ "httpd-2.4.6-93.el7.centos.x86_64 providing httpd is already installed" -+ ] -+ } -+ }, -+ "task": { -+ "duration": { -+ "end": "2020-08-14T11:55:32.952644Z", -+ "start": "2020-08-14T11:55:31.776073Z" -+ }, -+ "id": "5254001e-9fce-297d-21cd-000000000009", -+ "name": "yum" -+ } -+ }, -+ { -+ "hosts": { -+ "centos7-host1.tf.local": { -+ "_ansible_no_log": false, -+ "action": "yum", -+ "changed": false, -+ "failed": true, -+ "invocation": { -+ "module_args": { -+ "allow_downgrade": false, -+ "autoremove": false, -+ "bugfix": false, -+ "conf_file": null, -+ "disable_excludes": null, -+ "disable_gpg_check": false, -+ "disable_plugin": [], -+ "disablerepo": [], -+ "download_dir": null, -+ "download_only": false, -+ "enable_plugin": [], -+ "enablerepo": [], -+ "exclude": [], -+ "install_repoquery": true, -+ "install_weak_deps": true, -+ "installroot": "/", -+ "list": null, -+ "lock_timeout": 30, -+ "name": [ -+ "rsyndc" -+ ], -+ "releasever": null, -+ "security": false, -+ "skip_broken": false, -+ "state": "present", -+ "update_cache": false, -+ "update_only": false, -+ "use_backend": "auto", -+ "validate_certs": true -+ } -+ }, -+ "msg": "No package matching 'rsyndc' found available, installed or updated", -+ "rc": 126, -+ "results": [ -+ "No package matching 'rsyndc' found available, installed or updated" -+ ] -+ } -+ }, -+ "task": { -+ "duration": { -+ "end": "2020-08-14T11:55:33.889442Z", -+ "start": "2020-08-14T11:55:32.969762Z" -+ }, -+ "id": "5254001e-9fce-297d-21cd-00000000000a", -+ "name": "yum" -+ } -+ } -+ ] -+ } -+ ], -+ "stats": { -+ "centos7-host1.tf.local": { -+ "changed": 0, -+ "failures": 1, -+ "ignored": 0, -+ "ok": 2, -+ "rescued": 0, -+ "skipped": 0, -+ "unreachable": 0 -+ } -+ }, -+ "retcode": 2 -+} -diff --git a/tests/unit/files/playbooks/success_example.json b/tests/unit/files/playbooks/success_example.json -new file mode 100644 -index 0000000000..8a9f3ad868 ---- /dev/null -+++ b/tests/unit/files/playbooks/success_example.json -@@ -0,0 +1,803 @@ -+{ -+ "custom_stats": {}, -+ "global_custom_stats": {}, -+ "plays": [ -+ { -+ "play": { -+ "duration": { -+ "end": "2020-08-14T11:55:58.334076Z", -+ "start": "2020-08-14T11:55:54.295001Z" -+ }, -+ "id": "5254001e-9fce-f8b5-c66a-000000000007", -+ "name": "py2hosts" -+ }, -+ "tasks": [ -+ { -+ "hosts": { -+ "centos7-host1.tf.local": { -+ "_ansible_no_log": false, -+ "_ansible_verbose_override": true, -+ "action": "gather_facts", -+ "ansible_facts": { -+ "ansible_all_ipv4_addresses": [ -+ "192.168.122.29" -+ ], -+ "ansible_all_ipv6_addresses": [ -+ "fe80::5054:ff:fe3e:4ce" -+ ], -+ "ansible_apparmor": { -+ "status": "disabled" -+ }, -+ "ansible_architecture": "x86_64", -+ "ansible_bios_date": "04/01/2014", -+ "ansible_bios_version": "rel-1.13.0-0-gf21b5a4-rebuilt.opensuse.org", -+ "ansible_cmdline": { -+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64", -+ "LANG": "en_US.UTF-8", -+ "console": "ttyS0,115200", -+ "crashkernel": "auto", -+ "quiet": true, -+ "rhgb": true, -+ "ro": true, -+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ "ansible_date_time": { -+ "date": "2020-08-14", -+ "day": "14", -+ "epoch": "1597406155", -+ "hour": "13", -+ "iso8601": "2020-08-14T11:55:55Z", -+ "iso8601_basic": "20200814T135555808955", -+ "iso8601_basic_short": "20200814T135555", -+ "iso8601_micro": "2020-08-14T11:55:55.809048Z", -+ "minute": "55", -+ "month": "08", -+ "second": "55", -+ "time": "13:55:55", -+ "tz": "CEST", -+ "tz_offset": "+0200", -+ "weekday": "Friday", -+ "weekday_number": "5", -+ "weeknumber": "32", -+ "year": "2020" -+ }, -+ "ansible_default_ipv4": { -+ "address": "192.168.122.29", -+ "alias": "eth0", -+ "broadcast": "192.168.122.255", -+ "gateway": "192.168.122.1", -+ "interface": "eth0", -+ "macaddress": "52:54:00:3e:04:ce", -+ "mtu": 1500, -+ "netmask": "255.255.255.0", -+ "network": "192.168.122.0", -+ "type": "ether" -+ }, -+ "ansible_default_ipv6": {}, -+ "ansible_device_links": { -+ "ids": {}, -+ "labels": {}, -+ "masters": {}, -+ "uuids": { -+ "vda1": [ -+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ ], -+ "vda2": [ -+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24" -+ ], -+ "vda3": [ -+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ ], -+ "vda5": [ -+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ ] -+ } -+ }, -+ "ansible_devices": { -+ "vda": { -+ "holders": [], -+ "host": "", -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [] -+ }, -+ "model": null, -+ "partitions": { -+ "vda1": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ ] -+ }, -+ "sectors": "2097152", -+ "sectorsize": 512, -+ "size": "1.00 GB", -+ "start": "2048", -+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ }, -+ "vda2": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24" -+ ] -+ }, -+ "sectors": "4196352", -+ "sectorsize": 512, -+ "size": "2.00 GB", -+ "start": "2099200", -+ "uuid": "5ec08dbf-55e4-4fb1-a866-7b0fedcb4a24" -+ }, -+ "vda3": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ ] -+ }, -+ "sectors": "104857600", -+ "sectorsize": 512, -+ "size": "50.00 GB", -+ "start": "6295552", -+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ "vda4": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [] -+ }, -+ "sectors": "2", -+ "sectorsize": 512, -+ "size": "1.00 KB", -+ "start": "111153152", -+ "uuid": null -+ }, -+ "vda5": { -+ "holders": [], -+ "links": { -+ "ids": [], -+ "labels": [], -+ "masters": [], -+ "uuids": [ -+ "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ ] -+ }, -+ "sectors": "308275200", -+ "sectorsize": 512, -+ "size": "147.00 GB", -+ "start": "111155200", -+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ } -+ }, -+ "removable": "0", -+ "rotational": "1", -+ "sas_address": null, -+ "sas_device_handle": null, -+ "scheduler_mode": "mq-deadline", -+ "sectors": "419430400", -+ "sectorsize": "512", -+ "size": "200.00 GB", -+ "support_discard": "0", -+ "vendor": "0x1af4", -+ "virtual": 1 -+ } -+ }, -+ "ansible_distribution": "CentOS", -+ "ansible_distribution_file_parsed": true, -+ "ansible_distribution_file_path": "/etc/redhat-release", -+ "ansible_distribution_file_variety": "RedHat", -+ "ansible_distribution_major_version": "7", -+ "ansible_distribution_release": "Core", -+ "ansible_distribution_version": "7.5", -+ "ansible_dns": { -+ "nameservers": [ -+ "192.168.122.1" -+ ] -+ }, -+ "ansible_domain": "tf.local", -+ "ansible_effective_group_id": 0, -+ "ansible_effective_user_id": 0, -+ "ansible_env": { -+ "HOME": "/root", -+ "LANG": "es_ES.utf8", -+ "LC_ADDRESS": "C", -+ "LC_COLLATE": "C", -+ "LC_CTYPE": "C", -+ "LC_IDENTIFICATION": "C", -+ "LC_MEASUREMENT": "C", -+ "LC_MESSAGES": "C", -+ "LC_MONETARY": "C", -+ "LC_NAME": "C", -+ "LC_NUMERIC": "C", -+ "LC_PAPER": "C", -+ "LC_TELEPHONE": "C", -+ "LC_TIME": "C", -+ "LESSOPEN": "||/usr/bin/lesspipe.sh %s", -+ "LOGNAME": "root", -+ "LS_COLORS": "rs=0:di=38;5;27:ln=38;5;51:mh=44;38;5;15:pi=40;38;5;11:so=38;5;13:do=38;5;5:bd=48;5;232;38;5;11:cd=48;5;232;38;5;3:or=48;5;232;38;5;9:mi=05;48;5;232;38;5;15:su=48;5;196;38;5;15:sg=48;5;11;38;5;16:ca=48;5;196;38;5;226:tw=48;5;10;38;5;16:ow=48;5;10;38;5;21:st=48;5;21;38;5;15:ex=38;5;34:*.tar=38;5;9:*.tgz=38;5;9:*.arc=38;5;9:*.arj=38;5;9:*.taz=38;5;9:*.lha=38;5;9:*.lz4=38;5;9:*.lzh=38;5;9:*.lzma=38;5;9:*.tlz=38;5;9:*.txz=38;5;9:*.tzo=38;5;9:*.t7z=38;5;9:*.zip=38;5;9:*.z=38;5;9:*.Z=38;5;9:*.dz=38;5;9:*.gz=38;5;9:*.lrz=38;5;9:*.lz=38;5;9:*.lzo=38;5;9:*.xz=38;5;9:*.bz2=38;5;9:*.bz=38;5;9:*.tbz=38;5;9:*.tbz2=38;5;9:*.tz=38;5;9:*.deb=38;5;9:*.rpm=38;5;9:*.jar=38;5;9:*.war=38;5;9:*.ear=38;5;9:*.sar=38;5;9:*.rar=38;5;9:*.alz=38;5;9:*.ace=38;5;9:*.zoo=38;5;9:*.cpio=38;5;9:*.7z=38;5;9:*.rz=38;5;9:*.cab=38;5;9:*.jpg=38;5;13:*.jpeg=38;5;13:*.gif=38;5;13:*.bmp=38;5;13:*.pbm=38;5;13:*.pgm=38;5;13:*.ppm=38;5;13:*.tga=38;5;13:*.xbm=38;5;13:*.xpm=38;5;13:*.tif=38;5;13:*.tiff=38;5;13:*.png=38;5;13:*.svg=38;5;13:*.svgz=38;5;13:*.mng=38;5;13:*.pcx=38;5;13:*.mov=38;5;13:*.mpg=38;5;13:*.mpeg=38;5;13:*.m2v=38;5;13:*.mkv=38;5;13:*.webm=38;5;13:*.ogm=38;5;13:*.mp4=38;5;13:*.m4v=38;5;13:*.mp4v=38;5;13:*.vob=38;5;13:*.qt=38;5;13:*.nuv=38;5;13:*.wmv=38;5;13:*.asf=38;5;13:*.rm=38;5;13:*.rmvb=38;5;13:*.flc=38;5;13:*.avi=38;5;13:*.fli=38;5;13:*.flv=38;5;13:*.gl=38;5;13:*.dl=38;5;13:*.xcf=38;5;13:*.xwd=38;5;13:*.yuv=38;5;13:*.cgm=38;5;13:*.emf=38;5;13:*.axv=38;5;13:*.anx=38;5;13:*.ogv=38;5;13:*.ogx=38;5;13:*.aac=38;5;45:*.au=38;5;45:*.flac=38;5;45:*.mid=38;5;45:*.midi=38;5;45:*.mka=38;5;45:*.mp3=38;5;45:*.mpc=38;5;45:*.ogg=38;5;45:*.ra=38;5;45:*.wav=38;5;45:*.axa=38;5;45:*.oga=38;5;45:*.spx=38;5;45:*.xspf=38;5;45:", -+ "MAIL": "/var/mail/root", -+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", -+ "PWD": "/root", -+ "SHELL": "/bin/bash", -+ "SHLVL": "2", -+ "SSH_CLIENT": "192.168.122.179 55766 22", -+ "SSH_CONNECTION": "192.168.122.179 55766 192.168.122.29 22", -+ "SSH_TTY": "/dev/pts/0", -+ "TERM": "xterm-256color", -+ "USER": "root", -+ "XDG_RUNTIME_DIR": "/run/user/0", -+ "XDG_SESSION_ID": "110", -+ "_": "/usr/bin/python" -+ }, -+ "ansible_eth0": { -+ "active": true, -+ "device": "eth0", -+ "features": { -+ "busy_poll": "off [fixed]", -+ "fcoe_mtu": "off [fixed]", -+ "generic_receive_offload": "on", -+ "generic_segmentation_offload": "on", -+ "highdma": "on [fixed]", -+ "hw_tc_offload": "off [fixed]", -+ "l2_fwd_offload": "off [fixed]", -+ "large_receive_offload": "off [fixed]", -+ "loopback": "off [fixed]", -+ "netns_local": "off [fixed]", -+ "ntuple_filters": "off [fixed]", -+ "receive_hashing": "off [fixed]", -+ "rx_all": "off [fixed]", -+ "rx_checksumming": "on [fixed]", -+ "rx_fcs": "off [fixed]", -+ "rx_udp_tunnel_port_offload": "off [fixed]", -+ "rx_vlan_filter": "on [fixed]", -+ "rx_vlan_offload": "off [fixed]", -+ "rx_vlan_stag_filter": "off [fixed]", -+ "rx_vlan_stag_hw_parse": "off [fixed]", -+ "scatter_gather": "on", -+ "tcp_segmentation_offload": "on", -+ "tx_checksum_fcoe_crc": "off [fixed]", -+ "tx_checksum_ip_generic": "on", -+ "tx_checksum_ipv4": "off [fixed]", -+ "tx_checksum_ipv6": "off [fixed]", -+ "tx_checksum_sctp": "off [fixed]", -+ "tx_checksumming": "on", -+ "tx_fcoe_segmentation": "off [fixed]", -+ "tx_gre_csum_segmentation": "off [fixed]", -+ "tx_gre_segmentation": "off [fixed]", -+ "tx_gso_partial": "off [fixed]", -+ "tx_gso_robust": "off [fixed]", -+ "tx_ipip_segmentation": "off [fixed]", -+ "tx_lockless": "off [fixed]", -+ "tx_nocache_copy": "off", -+ "tx_scatter_gather": "on", -+ "tx_scatter_gather_fraglist": "off [fixed]", -+ "tx_sctp_segmentation": "off [fixed]", -+ "tx_sit_segmentation": "off [fixed]", -+ "tx_tcp6_segmentation": "on", -+ "tx_tcp_ecn_segmentation": "on", -+ "tx_tcp_mangleid_segmentation": "off", -+ "tx_tcp_segmentation": "on", -+ "tx_udp_tnl_csum_segmentation": "off [fixed]", -+ "tx_udp_tnl_segmentation": "off [fixed]", -+ "tx_vlan_offload": "off [fixed]", -+ "tx_vlan_stag_hw_insert": "off [fixed]", -+ "udp_fragmentation_offload": "on", -+ "vlan_challenged": "off [fixed]" -+ }, -+ "hw_timestamp_filters": [], -+ "ipv4": { -+ "address": "192.168.122.29", -+ "broadcast": "192.168.122.255", -+ "netmask": "255.255.255.0", -+ "network": "192.168.122.0" -+ }, -+ "ipv6": [ -+ { -+ "address": "fe80::5054:ff:fe3e:4ce", -+ "prefix": "64", -+ "scope": "link" -+ } -+ ], -+ "macaddress": "52:54:00:3e:04:ce", -+ "module": "virtio_net", -+ "mtu": 1500, -+ "pciid": "virtio0", -+ "promisc": false, -+ "timestamping": [ -+ "rx_software", -+ "software" -+ ], -+ "type": "ether" -+ }, -+ "ansible_fibre_channel_wwn": [], -+ "ansible_fips": false, -+ "ansible_form_factor": "Other", -+ "ansible_fqdn": "centos7-host1.tf.local", -+ "ansible_hostname": "centos7-host1", -+ "ansible_hostnqn": "", -+ "ansible_interfaces": [ -+ "lo", -+ "eth0" -+ ], -+ "ansible_is_chroot": false, -+ "ansible_iscsi_iqn": "", -+ "ansible_kernel": "3.10.0-862.el7.x86_64", -+ "ansible_kernel_version": "#1 SMP Fri Apr 20 16:44:24 UTC 2018", -+ "ansible_lo": { -+ "active": true, -+ "device": "lo", -+ "features": { -+ "busy_poll": "off [fixed]", -+ "fcoe_mtu": "off [fixed]", -+ "generic_receive_offload": "on", -+ "generic_segmentation_offload": "on", -+ "highdma": "on [fixed]", -+ "hw_tc_offload": "off [fixed]", -+ "l2_fwd_offload": "off [fixed]", -+ "large_receive_offload": "off [fixed]", -+ "loopback": "on [fixed]", -+ "netns_local": "on [fixed]", -+ "ntuple_filters": "off [fixed]", -+ "receive_hashing": "off [fixed]", -+ "rx_all": "off [fixed]", -+ "rx_checksumming": "on [fixed]", -+ "rx_fcs": "off [fixed]", -+ "rx_udp_tunnel_port_offload": "off [fixed]", -+ "rx_vlan_filter": "off [fixed]", -+ "rx_vlan_offload": "off [fixed]", -+ "rx_vlan_stag_filter": "off [fixed]", -+ "rx_vlan_stag_hw_parse": "off [fixed]", -+ "scatter_gather": "on", -+ "tcp_segmentation_offload": "on", -+ "tx_checksum_fcoe_crc": "off [fixed]", -+ "tx_checksum_ip_generic": "on [fixed]", -+ "tx_checksum_ipv4": "off [fixed]", -+ "tx_checksum_ipv6": "off [fixed]", -+ "tx_checksum_sctp": "on [fixed]", -+ "tx_checksumming": "on", -+ "tx_fcoe_segmentation": "off [fixed]", -+ "tx_gre_csum_segmentation": "off [fixed]", -+ "tx_gre_segmentation": "off [fixed]", -+ "tx_gso_partial": "off [fixed]", -+ "tx_gso_robust": "off [fixed]", -+ "tx_ipip_segmentation": "off [fixed]", -+ "tx_lockless": "on [fixed]", -+ "tx_nocache_copy": "off [fixed]", -+ "tx_scatter_gather": "on [fixed]", -+ "tx_scatter_gather_fraglist": "on [fixed]", -+ "tx_sctp_segmentation": "on", -+ "tx_sit_segmentation": "off [fixed]", -+ "tx_tcp6_segmentation": "on", -+ "tx_tcp_ecn_segmentation": "on", -+ "tx_tcp_mangleid_segmentation": "on", -+ "tx_tcp_segmentation": "on", -+ "tx_udp_tnl_csum_segmentation": "off [fixed]", -+ "tx_udp_tnl_segmentation": "off [fixed]", -+ "tx_vlan_offload": "off [fixed]", -+ "tx_vlan_stag_hw_insert": "off [fixed]", -+ "udp_fragmentation_offload": "on", -+ "vlan_challenged": "on [fixed]" -+ }, -+ "hw_timestamp_filters": [], -+ "ipv4": { -+ "address": "127.0.0.1", -+ "broadcast": "host", -+ "netmask": "255.0.0.0", -+ "network": "127.0.0.0" -+ }, -+ "ipv6": [ -+ { -+ "address": "::1", -+ "prefix": "128", -+ "scope": "host" -+ } -+ ], -+ "mtu": 65536, -+ "promisc": false, -+ "timestamping": [ -+ "rx_software", -+ "software" -+ ], -+ "type": "loopback" -+ }, -+ "ansible_local": {}, -+ "ansible_lsb": {}, -+ "ansible_machine": "x86_64", -+ "ansible_machine_id": "d5f025e24919a00e864180785ebaa8c9", -+ "ansible_memfree_mb": 717, -+ "ansible_memory_mb": { -+ "nocache": { -+ "free": 893, -+ "used": 98 -+ }, -+ "real": { -+ "free": 717, -+ "total": 991, -+ "used": 274 -+ }, -+ "swap": { -+ "cached": 0, -+ "free": 2048, -+ "total": 2048, -+ "used": 0 -+ } -+ }, -+ "ansible_memtotal_mb": 991, -+ "ansible_mounts": [ -+ { -+ "block_available": 243103, -+ "block_size": 4096, -+ "block_total": 259584, -+ "block_used": 16481, -+ "device": "/dev/vda1", -+ "fstype": "xfs", -+ "inode_available": 523998, -+ "inode_total": 524288, -+ "inode_used": 290, -+ "mount": "/boot", -+ "options": "rw,relatime,attr2,inode64,noquota", -+ "size_available": 995749888, -+ "size_total": 1063256064, -+ "uuid": "81b5a934-1fbb-4d6f-a972-bc7c9eb48345" -+ }, -+ { -+ "block_available": 12902661, -+ "block_size": 4096, -+ "block_total": 13100800, -+ "block_used": 198139, -+ "device": "/dev/vda3", -+ "fstype": "xfs", -+ "inode_available": 26189994, -+ "inode_total": 26214400, -+ "inode_used": 24406, -+ "mount": "/", -+ "options": "rw,relatime,attr2,inode64,noquota", -+ "size_available": 52849299456, -+ "size_total": 53660876800, -+ "uuid": "2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ { -+ "block_available": 38507349, -+ "block_size": 4096, -+ "block_total": 38515585, -+ "block_used": 8236, -+ "device": "/dev/vda5", -+ "fstype": "xfs", -+ "inode_available": 77068797, -+ "inode_total": 77068800, -+ "inode_used": 3, -+ "mount": "/home", -+ "options": "rw,relatime,attr2,inode64,noquota", -+ "size_available": 157726101504, -+ "size_total": 157759836160, -+ "uuid": "7f7965bf-54e8-43d4-a2f6-cb7f56a9a249" -+ } -+ ], -+ "ansible_nodename": "centos7-host1", -+ "ansible_os_family": "RedHat", -+ "ansible_pkg_mgr": "yum", -+ "ansible_proc_cmdline": { -+ "BOOT_IMAGE": "/vmlinuz-3.10.0-862.el7.x86_64", -+ "LANG": "en_US.UTF-8", -+ "console": "ttyS0,115200", -+ "crashkernel": "auto", -+ "quiet": true, -+ "rhgb": true, -+ "ro": true, -+ "root": "UUID=2b13ca03-1e1d-4f51-8929-4e7fef390e0c" -+ }, -+ "ansible_processor": [ -+ "0", -+ "GenuineIntel", -+ "QEMU Virtual CPU version 2.5+" -+ ], -+ "ansible_processor_cores": 1, -+ "ansible_processor_count": 1, -+ "ansible_processor_threads_per_core": 1, -+ "ansible_processor_vcpus": 1, -+ "ansible_product_name": "Standard PC (i440FX + PIIX, 1996)", -+ "ansible_product_serial": "NA", -+ "ansible_product_uuid": "18FEBA4D-2060-45E8-87AF-AD6574F522CC", -+ "ansible_product_version": "pc-i440fx-4.2", -+ "ansible_python": { -+ "executable": "/usr/bin/python", -+ "has_sslcontext": true, -+ "type": "CPython", -+ "version": { -+ "major": 2, -+ "micro": 5, -+ "minor": 7, -+ "releaselevel": "final", -+ "serial": 0 -+ }, -+ "version_info": [ -+ 2, -+ 7, -+ 5, -+ "final", -+ 0 -+ ] -+ }, -+ "ansible_python_version": "2.7.5", -+ "ansible_real_group_id": 0, -+ "ansible_real_user_id": 0, -+ "ansible_selinux": { -+ "status": "disabled" -+ }, -+ "ansible_selinux_python_present": true, -+ "ansible_service_mgr": "systemd", -+ "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE3bXHUHyjmlbxE6LCP2ohRTr0pTX7sq89g0yKvovFK1qhP1rsBvy2jW8wjo2P8mlBWhL7obRGl8B+i3cMxZdrc=", -+ "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIHv4wovK7u1Est8e1rMvQifupxLPpxtNEJIvKHq/iIVF", -+ "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDPW4spvldGYXFraJCWJAqkuyQQRogSL+aECRU0hAG+IwESq3ceVkUZrvMVnhxmVImcRGWLCP24wmiMC2G/sDMHfBIhQIc4ySvLLyVd20VIsQHWiODQsSZTKCWkIwNmWuUD/8FcIpHm4YKlzZdHRVPwx9oIkdzoxgGyGZ3em7QwhryPZ+GiK8P9dEE2xy2lfAMXCFEL6Eyw/WF1AS0KLZiKl5ct9aYedUZN1rWkWW1Kb9S+OsZ+qzjdZbU2EfQI8SnP8kkvKt1E/B1UnsfZ5R0nlsyIX6Bh8oCluqJrxXrsTBf/s4Pe76/Q7JH/QHp2Yw+sQb+l7wXhlNmDRTpqXDdR", -+ "ansible_swapfree_mb": 2048, -+ "ansible_swaptotal_mb": 2048, -+ "ansible_system": "Linux", -+ "ansible_system_capabilities": [ -+ "cap_chown", -+ "cap_dac_override", -+ "cap_dac_read_search", -+ "cap_fowner", -+ "cap_fsetid", -+ "cap_kill", -+ "cap_setgid", -+ "cap_setuid", -+ "cap_setpcap", -+ "cap_linux_immutable", -+ "cap_net_bind_service", -+ "cap_net_broadcast", -+ "cap_net_admin", -+ "cap_net_raw", -+ "cap_ipc_lock", -+ "cap_ipc_owner", -+ "cap_sys_module", -+ "cap_sys_rawio", -+ "cap_sys_chroot", -+ "cap_sys_ptrace", -+ "cap_sys_pacct", -+ "cap_sys_admin", -+ "cap_sys_boot", -+ "cap_sys_nice", -+ "cap_sys_resource", -+ "cap_sys_time", -+ "cap_sys_tty_config", -+ "cap_mknod", -+ "cap_lease", -+ "cap_audit_write", -+ "cap_audit_control", -+ "cap_setfcap", -+ "cap_mac_override", -+ "cap_mac_admin", -+ "cap_syslog", -+ "35", -+ "36+ep" -+ ], -+ "ansible_system_capabilities_enforced": "True", -+ "ansible_system_vendor": "QEMU", -+ "ansible_uptime_seconds": 178578, -+ "ansible_user_dir": "/root", -+ "ansible_user_gecos": "root", -+ "ansible_user_gid": 0, -+ "ansible_user_id": "root", -+ "ansible_user_shell": "/bin/bash", -+ "ansible_user_uid": 0, -+ "ansible_userspace_architecture": "x86_64", -+ "ansible_userspace_bits": "64", -+ "ansible_virtualization_role": "guest", -+ "ansible_virtualization_type": "kvm", -+ "discovered_interpreter_python": "/usr/bin/python", -+ "gather_subset": [ -+ "all" -+ ], -+ "module_setup": true -+ }, -+ "changed": false, -+ "deprecations": [], -+ "warnings": [] -+ } -+ }, -+ "task": { -+ "duration": { -+ "end": "2020-08-14T11:55:55.578128Z", -+ "start": "2020-08-14T11:55:54.313122Z" -+ }, -+ "id": "5254001e-9fce-f8b5-c66a-00000000000f", -+ "name": "Gathering Facts" -+ } -+ }, -+ { -+ "hosts": { -+ "centos7-host1.tf.local": { -+ "_ansible_no_log": false, -+ "action": "yum", -+ "changed": false, -+ "invocation": { -+ "module_args": { -+ "allow_downgrade": false, -+ "autoremove": false, -+ "bugfix": false, -+ "conf_file": null, -+ "disable_excludes": null, -+ "disable_gpg_check": false, -+ "disable_plugin": [], -+ "disablerepo": [], -+ "download_dir": null, -+ "download_only": false, -+ "enable_plugin": [], -+ "enablerepo": [], -+ "exclude": [], -+ "install_repoquery": true, -+ "install_weak_deps": true, -+ "installroot": "/", -+ "list": null, -+ "lock_timeout": 30, -+ "name": [ -+ "httpd" -+ ], -+ "releasever": null, -+ "security": false, -+ "skip_broken": false, -+ "state": "present", -+ "update_cache": false, -+ "update_only": false, -+ "use_backend": "auto", -+ "validate_certs": true -+ } -+ }, -+ "msg": "", -+ "rc": 0, -+ "results": [ -+ "httpd-2.4.6-93.el7.centos.x86_64 providing httpd is already installed" -+ ] -+ } -+ }, -+ "task": { -+ "duration": { -+ "end": "2020-08-14T11:55:56.737921Z", -+ "start": "2020-08-14T11:55:55.596293Z" -+ }, -+ "id": "5254001e-9fce-f8b5-c66a-000000000009", -+ "name": "yum" -+ } -+ }, -+ { -+ "hosts": { -+ "centos7-host1.tf.local": { -+ "_ansible_no_log": false, -+ "action": "yum", -+ "changed": false, -+ "invocation": { -+ "module_args": { -+ "allow_downgrade": false, -+ "autoremove": false, -+ "bugfix": false, -+ "conf_file": null, -+ "disable_excludes": null, -+ "disable_gpg_check": false, -+ "disable_plugin": [], -+ "disablerepo": [], -+ "download_dir": null, -+ "download_only": false, -+ "enable_plugin": [], -+ "enablerepo": [], -+ "exclude": [], -+ "install_repoquery": true, -+ "install_weak_deps": true, -+ "installroot": "/", -+ "list": null, -+ "lock_timeout": 30, -+ "name": [ -+ "rsync" -+ ], -+ "releasever": null, -+ "security": false, -+ "skip_broken": false, -+ "state": "present", -+ "update_cache": false, -+ "update_only": false, -+ "use_backend": "auto", -+ "validate_certs": true -+ } -+ }, -+ "msg": "", -+ "rc": 0, -+ "results": [ -+ "rsync-3.1.2-10.el7.x86_64 providing rsync is already installed" -+ ] -+ } -+ }, -+ "task": { -+ "duration": { -+ "end": "2020-08-14T11:55:57.609670Z", -+ "start": "2020-08-14T11:55:56.755620Z" -+ }, -+ "id": "5254001e-9fce-f8b5-c66a-00000000000a", -+ "name": "yum" -+ } -+ }, -+ { -+ "hosts": { -+ "centos7-host1.tf.local": { -+ "_ansible_no_log": false, -+ "action": "synchronize", -+ "changed": true, -+ "cmd": "/usr/bin/rsync --delay-updates -F --compress --delete-after --archive --rsh=/usr/bin/ssh -S none -i /etc/ansible/keys/mykey.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null --out-format=<>%i %n%L /root/myfiles/ centos7-host1.tf.local:/var/www/html/", -+ "invocation": { -+ "module_args": { -+ "_local_rsync_password": null, -+ "_local_rsync_path": "rsync", -+ "_substitute_controller": false, -+ "archive": true, -+ "checksum": false, -+ "compress": true, -+ "copy_links": false, -+ "delete": true, -+ "dest": "centos7-host1.tf.local:/var/www/html/", -+ "dest_port": null, -+ "dirs": false, -+ "existing_only": false, -+ "group": null, -+ "link_dest": null, -+ "links": null, -+ "mode": "push", -+ "owner": null, -+ "partial": false, -+ "perms": null, -+ "private_key": "/etc/ansible/keys/mykey.pem", -+ "recursive": null, -+ "rsync_opts": [], -+ "rsync_path": null, -+ "rsync_timeout": 0, -+ "set_remote_user": true, -+ "src": "/root/myfiles/", -+ "ssh_args": null, -+ "times": null, -+ "verify_host": false -+ } -+ }, -+ "msg": " Date: Mon, 17 Feb 2020 15:34:00 +1100 Subject: [PATCH] Apply patch from upstream to support Python 3.8 @@ -7,15 +7,12 @@ Apply saltstack/salt#56031 to support Python 3.8, which removed a deprecated module and changed some behaviour. Add a {Build,}Requires on python-distro, since it is now required. --- - pkg/suse/salt.spec | 2 ++ - salt/config/__init__.py | 4 +++- - salt/grains/core.py | 16 ++++++++-------- - salt/renderers/stateconf.py | 8 ++++---- - tests/unit/modules/test_virt.py | 2 +- - 5 files changed, 18 insertions(+), 14 deletions(-) + pkg/suse/salt.spec | 2 ++ + salt/renderers/stateconf.py | 49 ++++++++++++++++--------------------- + 2 files changed, 23 insertions(+), 28 deletions(-) diff --git a/pkg/suse/salt.spec b/pkg/suse/salt.spec -index e3e678af3b..0f6a9bc012 100644 +index a17d2381ce..0df9d6c283 100644 --- a/pkg/suse/salt.spec +++ b/pkg/suse/salt.spec @@ -62,6 +62,7 @@ BuildRequires: python-psutil @@ -34,95 +31,205 @@ index e3e678af3b..0f6a9bc012 100644 %if 0%{?suse_version} # requirements/opt.txt (not all) Recommends: python-MySQL-python -diff --git a/salt/config/__init__.py b/salt/config/__init__.py -index 0ebe1181dd..f484d94e7e 100644 ---- a/salt/config/__init__.py -+++ b/salt/config/__init__.py -@@ -3196,7 +3196,9 @@ def apply_cloud_providers_config(overrides, defaults=None): - # Merge provided extends - keep_looping = False - for alias, entries in six.iteritems(providers.copy()): -- for driver, details in six.iteritems(entries): -+ for driver in list(six.iterkeys(entries)): -+ # Don't use iteritems, because the values of the dictionary will be changed -+ details = entries[driver] - - if 'extends' not in details: - # Extends resolved or non existing, continue! -diff --git a/salt/grains/core.py b/salt/grains/core.py -index f410985198..358b66fdb0 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -40,20 +40,20 @@ except ImportError: - __proxyenabled__ = ['*'] - __FQDN__ = None - --# Extend the default list of supported distros. This will be used for the --# /etc/DISTRO-release checking that is part of linux_distribution() --from platform import _supported_dists --_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64', -- 'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void') -- - # linux_distribution deprecated in py3.7 - try: - from platform import linux_distribution as _deprecated_linux_distribution - -+ # Extend the default list of supported distros. This will be used for the -+ # /etc/DISTRO-release checking that is part of linux_distribution() -+ from platform import _supported_dists -+ _supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64', -+ 'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void') -+ - def linux_distribution(**kwargs): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") -- return _deprecated_linux_distribution(**kwargs) -+ return _deprecated_linux_distribution(supported_dists=_supported_dists, **kwargs) - except ImportError: - from distro import linux_distribution - -@@ -1976,7 +1976,7 @@ def os_data(): - ) - (osname, osrelease, oscodename) = \ - [x.strip('"').strip("'") for x in -- linux_distribution(supported_dists=_supported_dists)] -+ linux_distribution()] - # Try to assign these three names based on the lsb info, they tend to - # be more accurate than what python gets from /etc/DISTRO-release. - # It's worth noting that Ubuntu has patched their Python distribution diff --git a/salt/renderers/stateconf.py b/salt/renderers/stateconf.py -index cfce9e6926..5c8a8322ed 100644 +index 298ae28338..f0527d51d7 100644 --- a/salt/renderers/stateconf.py +++ b/salt/renderers/stateconf.py -@@ -224,10 +224,10 @@ def render(input, saltenv='base', sls='', argline='', **kws): - tmplctx = STATE_CONF.copy() - if tmplctx: - prefix = sls + '::' -- for k in six.iterkeys(tmplctx): # iterate over a copy of keys -- if k.startswith(prefix): -- tmplctx[k[len(prefix):]] = tmplctx[k] -- del tmplctx[k] -+ tmplctx = { -+ k[len(prefix):] if k.startswith(prefix) else k: v -+ for k, v in six.iteritems(tmplctx) -+ } +@@ -1,4 +1,3 @@ +-# -*- coding: utf-8 -*- + """ + A flexible renderer that takes a templating engine and a data format + +@@ -26,8 +25,6 @@ A flexible renderer that takes a templating engine and a data format + # - apache: >= 0.1.0 + # + +-# Import python libs +-from __future__ import absolute_import, print_function, unicode_literals + + import copy + import getopt +@@ -36,12 +33,9 @@ import os + import re + from itertools import chain + +-# Import salt libs + import salt.utils.files + import salt.utils.stringutils + from salt.exceptions import SaltRenderError +- +-# Import 3rd-party libs + from salt.ext import six + from salt.ext.six.moves import StringIO # pylint: disable=import-error + +@@ -135,7 +129,7 @@ def render(input, saltenv="base", sls="", argline="", **kws): + sid = has_names_decls(data) + if sid: + raise SaltRenderError( +- "'names' declaration(found in state id: {0}) is " ++ "'names' declaration(found in state id: {}) is " + "not supported with implicitly ordered states! You " + "should generate the states in a template for-loop " + "instead.".format(sid) +@@ -203,11 +197,11 @@ def render(input, saltenv="base", sls="", argline="", **kws): + name, rt_argline = (args[1] + " ").split(" ", 1) + render_template = renderers[name] # e.g., the mako renderer + except KeyError as err: +- raise SaltRenderError("Renderer: {0} is not available!".format(err)) ++ raise SaltRenderError("Renderer: {} is not available!".format(err)) + except IndexError: + raise INVALID_USAGE_ERROR + +- if isinstance(input, six.string_types): ++ if isinstance(input, str): + with salt.utils.files.fopen(input, "r") as ifile: + sls_templ = salt.utils.stringutils.to_unicode(ifile.read()) + else: # assume file-like +@@ -227,7 +221,7 @@ def render(input, saltenv="base", sls="", argline="", **kws): + prefix = sls + "::" + tmplctx = { + k[len(prefix) :] if k.startswith(prefix) else k: v +- for k, v in six.iteritems(tmplctx) ++ for k, v in tmplctx.items() + } else: tmplctx = {} +@@ -262,8 +256,8 @@ def rewrite_single_shorthand_state_decl(data): # pylint: disable=C0103 + state_id_decl: + state.func: [] + """ +- for sid, states in six.iteritems(data): +- if isinstance(states, six.string_types): ++ for sid, states in data.items(): ++ if isinstance(states, str): + data[sid] = {states: []} -diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index 94372c6d72..d762dcc479 100644 ---- a/tests/unit/modules/test_virt.py -+++ b/tests/unit/modules/test_virt.py -@@ -1256,7 +1256,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - -
- -- -+ - - - @@ -917,7 +847,7 @@ index 6e61544a1f..ca5e80d2d2 100644 domain_mock_boot = self.set_mock_vm("vm_with_boot_param", xml_boot) domain_mock_boot.OSType = MagicMock(return_value="hvm") define_mock_boot = MagicMock(return_value=True) -@@ -2697,6 +2787,218 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2694,6 +2786,218 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(setxml.find("os").find("loader"), None) self.assertEqual(setxml.find("os").find("nvram"), None) @@ -1137,32 +1067,18 @@ index 6e61544a1f..ca5e80d2d2 100644 """ Test virt._nic_profile with mixed dictionaries and lists as input. diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py -index f03159334b..1923ae5c0f 100644 +index 8fe892f607..1923ae5c0f 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py -@@ -1,21 +1,15 @@ - """ - :codeauthor: Jayesh Kariya - """ --# Import Python libs - - import shutil - import tempfile - --# Import Salt Libs +@@ -8,7 +8,6 @@ import tempfile import salt.states.virt as virt import salt.utils.files from salt.exceptions import CommandExecutionError, SaltInvocationError -- --# Import 3rd-party libs +-from salt.ext import six from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, mock_open, patch -- --# Import Salt Testing Libs from tests.support.runtests import RUNTIME_VARS - from tests.support.unit import TestCase - -@@ -351,6 +345,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -346,6 +345,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): install=False, pub_key="/path/to/key.pub", priv_key="/path/to/key", @@ -1170,7 +1086,7 @@ index f03159334b..1923ae5c0f 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -376,6 +371,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -371,6 +371,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): start=False, pub_key="/path/to/key.pub", priv_key="/path/to/key", @@ -1178,7 +1094,7 @@ index f03159334b..1923ae5c0f 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -489,6 +485,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -484,6 +485,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): password=None, boot=None, test=False, @@ -1186,7 +1102,7 @@ index f03159334b..1923ae5c0f 100644 ) # Failed definition update case -@@ -559,6 +556,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -554,6 +556,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): install=False, pub_key="/path/to/key.pub", priv_key="/path/to/key", @@ -1194,7 +1110,7 @@ index f03159334b..1923ae5c0f 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -601,6 +599,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -596,6 +599,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): boot=None, test=True, boot_dev=None, @@ -1202,7 +1118,7 @@ index f03159334b..1923ae5c0f 100644 ) # No changes case -@@ -636,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -631,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): boot=None, test=True, boot_dev=None, @@ -1210,7 +1126,7 @@ index f03159334b..1923ae5c0f 100644 ) def test_running(self): -@@ -713,6 +713,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -708,6 +713,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): pub_key=None, priv_key=None, boot_dev=None, @@ -1218,7 +1134,7 @@ index f03159334b..1923ae5c0f 100644 connection=None, username=None, password=None, -@@ -775,6 +776,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -770,6 +776,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): pub_key="/path/to/key.pub", priv_key="/path/to/key", boot_dev="network hd", @@ -1226,7 +1142,7 @@ index f03159334b..1923ae5c0f 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -800,6 +802,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -795,6 +802,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): pub_key="/path/to/key.pub", priv_key="/path/to/key", boot_dev="network hd", @@ -1234,7 +1150,7 @@ index f03159334b..1923ae5c0f 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -945,6 +948,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -940,6 +948,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): boot=None, test=False, boot_dev=None, @@ -1242,7 +1158,7 @@ index f03159334b..1923ae5c0f 100644 ) # Failed definition update case -@@ -1018,6 +1022,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1013,6 +1022,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): install=False, pub_key="/path/to/key.pub", priv_key="/path/to/key", @@ -1250,7 +1166,7 @@ index f03159334b..1923ae5c0f 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -1064,6 +1069,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1059,6 +1069,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): boot=None, test=True, boot_dev=None, @@ -1258,7 +1174,7 @@ index f03159334b..1923ae5c0f 100644 ) start_mock.assert_not_called() -@@ -1101,6 +1107,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1096,6 +1107,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): boot=None, test=True, boot_dev=None, @@ -1267,6 +1183,6 @@ index f03159334b..1923ae5c0f 100644 def test_stopped(self): -- -2.28.0 +2.29.2 diff --git a/backport-virt-patches-from-3001-256.patch b/backport-virt-patches-from-3001-256.patch index 0e6785a..4a32152 100644 --- a/backport-virt-patches-from-3001-256.patch +++ b/backport-virt-patches-from-3001-256.patch @@ -1,4 +1,4 @@ -From 5bd071081ccb8ae3938643831d2e5632712b48b7 Mon Sep 17 00:00:00 2001 +From 32559016ba2bd306a3a027a2191857f24258fc46 Mon Sep 17 00:00:00 2001 From: Cedric Bosdonnat Date: Mon, 7 Sep 2020 15:00:40 +0200 Subject: [PATCH] Backport virt patches from 3001+ (#256) @@ -411,26 +411,20 @@ Co-authored-by: xeacott Co-authored-by: Frode Gundersen Co-authored-by: Daniel A. Wozniak --- - changelog/56454.fixed | 1 + - changelog/57544.added | 1 + - changelog/58331.fixed | 1 + - salt/modules/virt.py | 442 ++++--- - salt/states/virt.py | 171 ++- - salt/templates/virt/libvirt_domain.jinja | 2 +- - salt/utils/data.py | 976 +++++++++------ - salt/utils/xmlutil.py | 251 +++- - tests/pytests/unit/utils/test_data.py | 57 + - tests/pytests/unit/utils/test_xmlutil.py | 169 +++ - tests/unit/modules/test_virt.py | 218 ++-- - tests/unit/states/test_virt.py | 98 +- - tests/unit/utils/test_data.py | 1399 ++++++++++++---------- - tests/unit/utils/test_xmlutil.py | 164 +-- - 14 files changed, 2588 insertions(+), 1362 deletions(-) + changelog/56454.fixed | 1 + + changelog/57544.added | 1 + + changelog/58331.fixed | 1 + + salt/modules/virt.py | 270 +++++++++++++---------- + salt/states/virt.py | 88 ++++++-- + salt/templates/virt/libvirt_domain.jinja | 29 +-- + salt/utils/xmlutil.py | 4 +- + tests/unit/modules/test_virt.py | 159 +++++++++---- + tests/unit/states/test_virt.py | 93 +++++++- + tests/unit/utils/test_data.py | 32 --- + 10 files changed, 441 insertions(+), 237 deletions(-) create mode 100644 changelog/56454.fixed create mode 100644 changelog/57544.added create mode 100644 changelog/58331.fixed - create mode 100644 tests/pytests/unit/utils/test_data.py - create mode 100644 tests/pytests/unit/utils/test_xmlutil.py diff --git a/changelog/56454.fixed b/changelog/56454.fixed new file mode 100644 @@ -454,41 +448,11 @@ index 0000000000..4b8f78dd53 @@ -0,0 +1 @@ +Leave boot parameters untouched if boot parameter is set to None in virt.update diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index a78c21e323..cd80fbe608 100644 +index fb27397baa..ec40f08359 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py -@@ -1,8 +1,11 @@ --# -*- coding: utf-8 -*- - """ - Work with virtual machines managed by libvirt - --:depends: libvirt Python module -+:depends: -+ * libvirt Python module -+ * libvirt client -+ * qemu-img -+ * grep - - Connection - ========== -@@ -73,7 +76,7 @@ The calls not using the libvirt connection setup are: - # of his in the virt func module have been used - - # Import python libs --from __future__ import absolute_import, print_function, unicode_literals -+ - import base64 - import copy - import datetime -@@ -89,23 +92,19 @@ from xml.etree import ElementTree - from xml.sax import saxutils - - # Import third party libs --import jinja2 +@@ -94,17 +94,13 @@ from xml.sax import saxutils import jinja2.exceptions - - # Import salt libs -+import salt.utils.data import salt.utils.files import salt.utils.json -import salt.utils.network @@ -505,72 +469,7 @@ index a78c21e323..cd80fbe608 100644 from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves.urllib.parse import urlparse, urlunparse from salt.utils.virt import check_remote, download_remote -@@ -227,8 +226,8 @@ def __get_conn(**kwargs): - ) - except Exception: # pylint: disable=broad-except - raise CommandExecutionError( -- "Sorry, {0} failed to open a connection to the hypervisor " -- "software at {1}".format(__grains__["fqdn"], conn_str) -+ "Sorry, {} failed to open a connection to the hypervisor " -+ "software at {}".format(__grains__["fqdn"], conn_str) - ) - return conn - -@@ -405,7 +404,7 @@ def _get_nics(dom): - # driver, source, and match can all have optional attributes - if re.match("(driver|source|address)", v_node.tag): - temp = {} -- for key, value in six.iteritems(v_node.attrib): -+ for key, value in v_node.attrib.items(): - temp[key] = value - nic[v_node.tag] = temp - # virtualport needs to be handled separately, to pick up the -@@ -413,7 +412,7 @@ def _get_nics(dom): - if v_node.tag == "virtualport": - temp = {} - temp["type"] = v_node.get("type") -- for key, value in six.iteritems(v_node.attrib): -+ for key, value in v_node.attrib.items(): - temp[key] = value - nic["virtualport"] = temp - if "mac" not in nic: -@@ -435,7 +434,7 @@ def _get_graphics(dom): - } - doc = ElementTree.fromstring(dom.XMLDesc(0)) - for g_node in doc.findall("devices/graphics"): -- for key, value in six.iteritems(g_node.attrib): -+ for key, value in g_node.attrib.items(): - out[key] = value - return out - -@@ -448,7 +447,7 @@ def _get_loader(dom): - doc = ElementTree.fromstring(dom.XMLDesc(0)) - for g_node in doc.findall("os/loader"): - out["path"] = g_node.text -- for key, value in six.iteritems(g_node.attrib): -+ for key, value in g_node.attrib.items(): - out[key] = value - return out - -@@ -503,7 +502,7 @@ def _get_disks(conn, dom): - qemu_target = source.get("protocol") - source_name = source.get("name") - if source_name: -- qemu_target = "{0}:{1}".format(qemu_target, source_name) -+ qemu_target = "{}:{}".format(qemu_target, source_name) - - # Reverse the magic for the rbd and gluster pools - if source.get("protocol") in ["rbd", "gluster"]: -@@ -633,7 +632,7 @@ def _get_target(target, ssh): - proto = "qemu" - if ssh: - proto += "+ssh" -- return " {0}://{1}/{2}".format(proto, target, "system") -+ return " {}://{}/{}".format(proto, target, "system") - - - def _gen_xml( -@@ -648,6 +647,7 @@ def _gen_xml( +@@ -647,6 +643,7 @@ def _gen_xml( arch, graphics=None, boot=None, @@ -578,18 +477,7 @@ index a78c21e323..cd80fbe608 100644 **kwargs ): """ -@@ -657,8 +657,8 @@ def _gen_xml( - context = { - "hypervisor": hypervisor, - "name": name, -- "cpu": six.text_type(cpu), -- "mem": six.text_type(mem), -+ "cpu": str(cpu), -+ "mem": str(mem), - } - if hypervisor in ["qemu", "kvm"]: - context["controller_model"] = False -@@ -681,15 +681,17 @@ def _gen_xml( +@@ -680,15 +677,17 @@ def _gen_xml( graphics = None context["graphics"] = graphics @@ -613,213 +501,7 @@ index a78c21e323..cd80fbe608 100644 if os_type == "xen": # Compute the Xen PV boot method if __grains__["os_family"] == "Suse": -@@ -720,7 +722,7 @@ def _gen_xml( - "target_dev": _get_disk_target(targets, len(diskp), prefix), - "disk_bus": disk["model"], - "format": disk.get("format", "raw"), -- "index": six.text_type(i), -+ "index": str(i), - } - targets.append(disk_context["target_dev"]) - if disk.get("source_file"): -@@ -825,8 +827,8 @@ def _gen_vol_xml( - "name": name, - "target": {"permissions": permissions, "nocow": nocow}, - "format": format, -- "size": six.text_type(size), -- "allocation": six.text_type(int(allocation) * 1024), -+ "size": str(size), -+ "allocation": str(int(allocation) * 1024), - "backingStore": backing_store, - } - fn_ = "libvirt_volume.jinja" -@@ -978,31 +980,29 @@ def _zfs_image_create( - """ - if not disk_image_name and not disk_size: - raise CommandExecutionError( -- "Unable to create new disk {0}, please specify" -+ "Unable to create new disk {}, please specify" - " the disk image name or disk size argument".format(disk_name) - ) - - if not pool: - raise CommandExecutionError( -- "Unable to create new disk {0}, please specify" -+ "Unable to create new disk {}, please specify" - " the disk pool name".format(disk_name) - ) - -- destination_fs = os.path.join(pool, "{0}.{1}".format(vm_name, disk_name)) -+ destination_fs = os.path.join(pool, "{}.{}".format(vm_name, disk_name)) - log.debug("Image destination will be %s", destination_fs) - - existing_disk = __salt__["zfs.list"](name=pool) - if "error" in existing_disk: - raise CommandExecutionError( -- "Unable to create new disk {0}. {1}".format( -+ "Unable to create new disk {}. {}".format( - destination_fs, existing_disk["error"] - ) - ) - elif destination_fs in existing_disk: - log.info( -- "ZFS filesystem {0} already exists. Skipping creation".format( -- destination_fs -- ) -+ "ZFS filesystem {} already exists. Skipping creation".format(destination_fs) - ) - blockdevice_path = os.path.join("/dev/zvol", pool, vm_name) - return blockdevice_path -@@ -1025,7 +1025,7 @@ def _zfs_image_create( - ) - - blockdevice_path = os.path.join( -- "/dev/zvol", pool, "{0}.{1}".format(vm_name, disk_name) -+ "/dev/zvol", pool, "{}.{}".format(vm_name, disk_name) - ) - log.debug("Image path will be %s", blockdevice_path) - return blockdevice_path -@@ -1042,7 +1042,7 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"): - - if not disk_size and not disk_image: - raise CommandExecutionError( -- "Unable to create new disk {0}, please specify" -+ "Unable to create new disk {}, please specify" - " disk size and/or disk image argument".format(disk["filename"]) - ) - -@@ -1066,7 +1066,7 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"): - if create_overlay and qcow2: - log.info("Cloning qcow2 image %s using copy on write", sfn) - __salt__["cmd.run"]( -- 'qemu-img create -f qcow2 -o backing_file="{0}" "{1}"'.format( -+ 'qemu-img create -f qcow2 -o backing_file="{}" "{}"'.format( - sfn, img_dest - ).split() - ) -@@ -1079,16 +1079,16 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"): - if disk_size and qcow2: - log.debug("Resize qcow2 image to %sM", disk_size) - __salt__["cmd.run"]( -- 'qemu-img resize "{0}" {1}M'.format(img_dest, disk_size) -+ 'qemu-img resize "{}" {}M'.format(img_dest, disk_size) - ) - - log.debug("Apply umask and remove exec bit") - mode = (0o0777 ^ mask) & 0o0666 - os.chmod(img_dest, mode) - -- except (IOError, OSError) as err: -+ except OSError as err: - raise CommandExecutionError( -- "Problem while copying image. {0} - {1}".format(disk_image, err) -+ "Problem while copying image. {} - {}".format(disk_image, err) - ) - - else: -@@ -1099,13 +1099,13 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"): - if disk_size: - log.debug("Create empty image with size %sM", disk_size) - __salt__["cmd.run"]( -- 'qemu-img create -f {0} "{1}" {2}M'.format( -+ 'qemu-img create -f {} "{}" {}M'.format( - disk.get("format", "qcow2"), img_dest, disk_size - ) - ) - else: - raise CommandExecutionError( -- "Unable to create new disk {0}," -+ "Unable to create new disk {}," - " please specify argument".format(img_dest) - ) - -@@ -1113,9 +1113,9 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"): - mode = (0o0777 ^ mask) & 0o0666 - os.chmod(img_dest, mode) - -- except (IOError, OSError) as err: -+ except OSError as err: - raise CommandExecutionError( -- "Problem while creating volume {0} - {1}".format(img_dest, err) -+ "Problem while creating volume {} - {}".format(img_dest, err) - ) - - return img_dest -@@ -1252,7 +1252,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name): - __salt__["config.get"]("virt:disk", {}).get(profile, default) - ) - -- # Transform the list to remove one level of dictionnary and add the name as a property -+ # Transform the list to remove one level of dictionary and add the name as a property - disklist = [dict(d, name=name) for disk in disklist for name, d in disk.items()] - - # Merge with the user-provided disks definitions -@@ -1274,7 +1274,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name): - disk["model"] = "ide" - - # Add the missing properties that have defaults -- for key, val in six.iteritems(overlay): -+ for key, val in overlay.items(): - if key not in disk: - disk[key] = val - -@@ -1296,7 +1296,7 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps): - Compute the disk file name and update it in the disk value. - """ - # Compute the filename without extension since it may not make sense for some pool types -- disk["filename"] = "{0}_{1}".format(vm_name, disk["name"]) -+ disk["filename"] = "{}_{}".format(vm_name, disk["name"]) - - # Compute the source file path - base_dir = disk.get("pool", None) -@@ -1311,7 +1311,7 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps): - # For path-based disks, keep the qcow2 default format - if not disk.get("format"): - disk["format"] = "qcow2" -- disk["filename"] = "{0}.{1}".format(disk["filename"], disk["format"]) -+ disk["filename"] = "{}.{}".format(disk["filename"], disk["format"]) - disk["source_file"] = os.path.join(base_dir, disk["filename"]) - else: - if "pool" not in disk: -@@ -1365,7 +1365,7 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps): - disk["format"] = volume_options.get("default_format", None) - - elif hypervisor == "bhyve" and vm_name: -- disk["filename"] = "{0}.{1}".format(vm_name, disk["name"]) -+ disk["filename"] = "{}.{}".format(vm_name, disk["name"]) - disk["source_file"] = os.path.join( - "/dev/zvol", base_dir or "", disk["filename"] - ) -@@ -1373,8 +1373,8 @@ def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps): - elif hypervisor in ["esxi", "vmware"]: - if not base_dir: - base_dir = __salt__["config.get"]("virt:storagepool", "[0] ") -- disk["filename"] = "{0}.{1}".format(disk["filename"], disk["format"]) -- disk["source_file"] = "{0}{1}".format(base_dir, disk["filename"]) -+ disk["filename"] = "{}.{}".format(disk["filename"], disk["format"]) -+ disk["source_file"] = "{}{}".format(base_dir, disk["filename"]) - - - def _complete_nics(interfaces, hypervisor): -@@ -1422,7 +1422,7 @@ def _complete_nics(interfaces, hypervisor): - """ - Apply the default overlay to attributes - """ -- for key, value in six.iteritems(overlays[hypervisor]): -+ for key, value in overlays[hypervisor].items(): - if key not in attributes or not attributes[key]: - attributes[key] = value - -@@ -1449,7 +1449,7 @@ def _nic_profile(profile_name, hypervisor): - """ - Append dictionary profile data to interfaces list - """ -- for interface_name, attributes in six.iteritems(profile_dict): -+ for interface_name, attributes in profile_dict.items(): - attributes["name"] = interface_name - interfaces.append(attributes) - -@@ -1522,17 +1522,24 @@ def _handle_remote_boot_params(orig_boot): +@@ -1519,17 +1518,24 @@ def _handle_remote_boot_params(orig_boot): new_boot = orig_boot.copy() keys = orig_boot.keys() cases = [ @@ -847,7 +529,7 @@ index a78c21e323..cd80fbe608 100644 if saltinst_dir is None: os.makedirs(CACHE_DIR) saltinst_dir = CACHE_DIR -@@ -1540,12 +1547,41 @@ def _handle_remote_boot_params(orig_boot): +@@ -1537,12 +1543,41 @@ def _handle_remote_boot_params(orig_boot): return new_boot else: raise SaltInvocationError( @@ -890,7 +572,7 @@ index a78c21e323..cd80fbe608 100644 def init( name, cpu, -@@ -1566,6 +1602,7 @@ def init( +@@ -1563,6 +1598,7 @@ def init( os_type=None, arch=None, boot=None, @@ -898,7 +580,7 @@ index a78c21e323..cd80fbe608 100644 **kwargs ): """ -@@ -1635,7 +1672,8 @@ def init( +@@ -1632,7 +1668,8 @@ def init( This is an optional parameter, all of the keys are optional within the dictionary. The structure of the dictionary is documented in :ref:`init-boot-def`. If a remote path is provided to kernel or initrd, salt will handle the downloading of the specified remote file and modify the XML accordingly. @@ -908,7 +590,7 @@ index a78c21e323..cd80fbe608 100644 .. versionadded:: 3000 -@@ -1649,6 +1687,12 @@ def init( +@@ -1646,6 +1683,12 @@ def init( 'nvram': '/usr/share/OVMF/OVMF_VARS.ms.fd' } @@ -921,7 +603,7 @@ index a78c21e323..cd80fbe608 100644 .. _init-boot-def: .. rubric:: Boot parameters definition -@@ -1674,6 +1718,11 @@ def init( +@@ -1671,6 +1714,11 @@ def init( .. versionadded:: sodium @@ -933,16 +615,7 @@ index a78c21e323..cd80fbe608 100644 .. _init-nic-def: .. rubric:: Network Interfaces Definitions -@@ -1797,7 +1846,7 @@ def init( - - .. rubric:: Graphics Definition - -- The graphics dictionnary can have the following properties: -+ The graphics dictionary can have the following properties: - - type - Graphics type. The possible values are ``none``, ``'spice'``, ``'vnc'`` and other values -@@ -1858,6 +1907,8 @@ def init( +@@ -1855,6 +1903,8 @@ def init( for x in y } ) @@ -951,27 +624,7 @@ index a78c21e323..cd80fbe608 100644 virt_hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0] # esxi used to be a possible value for the hypervisor: map it to vmware since it's the same -@@ -1890,8 +1941,8 @@ def init( - else: - # assume libvirt manages disks for us - log.debug("Generating libvirt XML for %s", _disk) -- volume_name = "{0}/{1}".format(name, _disk["name"]) -- filename = "{0}.{1}".format(volume_name, _disk["format"]) -+ volume_name = "{}/{}".format(name, _disk["name"]) -+ filename = "{}.{}".format(volume_name, _disk["format"]) - vol_xml = _gen_vol_xml( - filename, _disk["size"], format=_disk["format"] - ) -@@ -1939,7 +1990,7 @@ def init( - else: - # Unknown hypervisor - raise SaltInvocationError( -- "Unsupported hypervisor when handling disk image: {0}".format( -+ "Unsupported hypervisor when handling disk image: {}".format( - virt_hypervisor - ) - ) -@@ -1965,8 +2016,10 @@ def init( +@@ -1962,8 +2012,10 @@ def init( arch, graphics, boot, @@ -982,7 +635,7 @@ index a78c21e323..cd80fbe608 100644 conn.defineXML(vm_xml) except libvirt.libvirtError as err: conn.close() -@@ -2192,6 +2245,7 @@ def update( +@@ -2189,6 +2241,7 @@ def update( live=True, boot=None, test=False, @@ -990,7 +643,7 @@ index a78c21e323..cd80fbe608 100644 **kwargs ): """ -@@ -2234,11 +2288,28 @@ def update( +@@ -2248,11 +2301,28 @@ def update( Refer to :ref:`init-boot-def` for the complete boot parameter description. @@ -1021,23 +674,8 @@ index a78c21e323..cd80fbe608 100644 :param test: run in dry-run mode if set to True .. versionadded:: sodium -@@ -2286,6 +2357,8 @@ def update( - - if boot is not None: - boot = _handle_remote_boot_params(boot) -+ if boot.get("efi", None) is not None: -+ need_update = _handle_efi_param(boot, desc) - - new_desc = ElementTree.fromstring( - _gen_xml( -@@ -2307,76 +2380,58 @@ def update( - # Update the cpu - cpu_node = desc.find("vcpu") - if cpu and int(cpu_node.text) != cpu: -- cpu_node.text = six.text_type(cpu) -- cpu_node.set("current", six.text_type(cpu)) -+ cpu_node.text = str(cpu) -+ cpu_node.set("current", str(cpu)) +@@ -2327,67 +2397,54 @@ def update( + cpu_node.set("current", str(cpu)) need_update = True - # Update the kernel boot parameters @@ -1062,31 +700,25 @@ index a78c21e323..cd80fbe608 100644 - parent_tag.remove(found_tag) - else: - found_tag.text = boot_tag_value +- +- # If the existing tag is loader or nvram, we need to update the corresponding attribute +- if found_tag.tag == "loader" and boot_tag_value is not None: +- found_tag.set("readonly", "yes") +- found_tag.set("type", "pflash") +- +- if found_tag.tag == "nvram" and boot_tag_value is not None: +- found_tag.set("template", found_tag.text) +- found_tag.text = None + def _set_loader(node, value): + salt.utils.xmlutil.set_node_text(node, value) + if value is not None: + node.set("readonly", "yes") + node.set("type", "pflash") -- # If the existing tag is loader or nvram, we need to update the corresponding attribute -- if found_tag.tag == "loader" and boot_tag_value is not None: -- found_tag.set("readonly", "yes") -- found_tag.set("type", "pflash") +- need_update = True + def _set_nvram(node, value): + node.set("template", value) -- if found_tag.tag == "nvram" and boot_tag_value is not None: -- found_tag.set("template", found_tag.text) -- found_tag.text = None -+ def _set_with_mib_unit(node, value): -+ node.text = str(value) -+ node.set("unit", "MiB") - -- need_update = True -- -- # Existing tag is not found, but value is not None -- elif found_tag is None and boot_tag_value is not None: -- - # Need to check for parent tag, and add it if it does not exist. - # Add a subelement and set the value to the new value, and then - # mark for update. @@ -1102,12 +734,13 @@ index a78c21e323..cd80fbe608 100644 - if child_tag.tag == "loader": - child_tag.set("readonly", "yes") - child_tag.set("type", "pflash") -- ++ def _set_with_mib_unit(node, value): ++ node.text = str(value) ++ node.set("unit", "MiB") + - if child_tag.tag == "nvram": - child_tag.set("template", child_tag.text) - child_tag.text = None -- -- need_update = True + # Update the kernel boot parameters + params_mapping = [ + {"path": "boot:kernel", "xpath": "os/kernel"}, @@ -1141,7 +774,7 @@ index a78c21e323..cd80fbe608 100644 - for mem_node_name in ["memory", "currentMemory"]: - mem_node = desc.find(mem_node_name) - if mem and int(mem_node.text) != mem * 1024: -- mem_node.text = six.text_type(mem) +- mem_node.text = str(mem) - mem_node.set("unit", "MiB") - need_update = True + data = {k: v for k, v in locals().items() if bool(v)} @@ -1153,18 +786,7 @@ index a78c21e323..cd80fbe608 100644 # Update the XML definition with the new disks and diff changes devices_node = desc.find("devices") -@@ -2395,8 +2450,8 @@ def update( - if func_locals.get(param, None) is not None - ]: - old = devices_node.findall(dev_type) -- new = new_desc.findall("devices/{0}".format(dev_type)) -- changes[dev_type] = globals()["_diff_{0}_lists".format(dev_type)](old, new) -+ new = new_desc.findall("devices/{}".format(dev_type)) -+ changes[dev_type] = globals()["_diff_{}_lists".format(dev_type)](old, new) - if changes[dev_type]["deleted"] or changes[dev_type]["new"]: - for item in old: - devices_node.remove(item) -@@ -2423,9 +2478,9 @@ def update( +@@ -2434,9 +2491,9 @@ def update( _disk_volume_create(conn, all_disks[idx]) if not test: @@ -1177,25 +799,7 @@ index a78c21e323..cd80fbe608 100644 status["definition"] = True except libvirt.libvirtError as err: conn.close() -@@ -2554,7 +2609,7 @@ def update( - except libvirt.libvirtError as err: - if "errors" not in status: - status["errors"] = [] -- status["errors"].append(six.text_type(err)) -+ status["errors"].append(str(err)) - - conn.close() - return status -@@ -2768,7 +2823,7 @@ def _node_info(conn): - info = { - "cpucores": raw[6], - "cpumhz": raw[3], -- "cpumodel": six.text_type(raw[0]), -+ "cpumodel": str(raw[0]), - "cpus": raw[2], - "cputhreads": raw[7], - "numanodes": raw[4], -@@ -3207,24 +3262,21 @@ def get_profiles(hypervisor=None, **kwargs): +@@ -3218,24 +3275,19 @@ def get_profiles(hypervisor=None, **kwargs): for x in y } ) @@ -1208,14 +812,13 @@ index a78c21e323..cd80fbe608 100644 + hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0] virtconf = __salt__["config.get"]("virt", {}) for typ in ["disk", "nic"]: -- _func = getattr(sys.modules[__name__], "_{0}_profile".format(typ)) -+ _func = getattr(sys.modules[__name__], "_{}_profile".format(typ)) - ret[typ] = { + _func = getattr(sys.modules[__name__], "_{}_profile".format(typ)) +- ret[typ] = { - "default": _func( - "default", hypervisor if hypervisor else default_hypervisor - ) -+ "default": _func("default", hypervisor) - } +- } ++ ret[typ] = {"default": _func("default", hypervisor)} if typ in virtconf: ret.setdefault(typ, {}) for prf in virtconf[typ]: @@ -1226,60 +829,7 @@ index a78c21e323..cd80fbe608 100644 return ret -@@ -3506,7 +3558,7 @@ def create_xml_path(path, **kwargs): - return create_xml_str( - salt.utils.stringutils.to_unicode(fp_.read()), **kwargs - ) -- except (OSError, IOError): -+ except OSError: - return False - - -@@ -3564,7 +3616,7 @@ def define_xml_path(path, **kwargs): - return define_xml_str( - salt.utils.stringutils.to_unicode(fp_.read()), **kwargs - ) -- except (OSError, IOError): -+ except OSError: - return False - - -@@ -3576,7 +3628,7 @@ def _define_vol_xml_str(conn, xml, pool=None): # pylint: disable=redefined-oute - poolname = ( - pool if pool else __salt__["config.get"]("virt:storagepool", default_pool) - ) -- pool = conn.storagePoolLookupByName(six.text_type(poolname)) -+ pool = conn.storagePoolLookupByName(str(poolname)) - ret = pool.createXML(xml, 0) is not None - return ret - -@@ -3660,7 +3712,7 @@ def define_vol_xml_path(path, pool=None, **kwargs): - return define_vol_xml_str( - salt.utils.stringutils.to_unicode(fp_.read()), pool=pool, **kwargs - ) -- except (OSError, IOError): -+ except OSError: - return False - - -@@ -3777,7 +3829,7 @@ def seed_non_shared_migrate(disks, force=False): - - salt '*' virt.seed_non_shared_migrate - """ -- for _, data in six.iteritems(disks): -+ for _, data in disks.items(): - fn_ = data["file"] - form = data["file format"] - size = data["virtual size"].split()[1][1:] -@@ -3921,14 +3973,14 @@ def purge(vm_, dirs=False, removables=False, **kwargs): - # TODO create solution for 'dataset is busy' - time.sleep(3) - fs_name = disks[disk]["file"][len("/dev/zvol/") :] -- log.info("Destroying VM ZFS volume {0}".format(fs_name)) -+ log.info("Destroying VM ZFS volume {}".format(fs_name)) - __salt__["zfs.destroy"](name=fs_name, force=True) - elif os.path.exists(disks[disk]["file"]): - os.remove(disks[disk]["file"]) +@@ -4043,7 +4095,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs): directories.add(os.path.dirname(disks[disk]["file"])) else: # We may have a volume to delete here @@ -1288,74 +838,7 @@ index a78c21e323..cd80fbe608 100644 if matcher: pool_name = matcher.group("pool") pool = None -@@ -3975,7 +4027,7 @@ def _is_kvm_hyper(): - with salt.utils.files.fopen("/proc/modules") as fp_: - if "kvm_" not in salt.utils.stringutils.to_unicode(fp_.read()): - return False -- except IOError: -+ except OSError: - # No /proc/modules? Are we on Windows? Or Solaris? - return False - return "libvirtd" in __salt__["cmd.run"](__grains__["ps"]) -@@ -3995,7 +4047,7 @@ def _is_xen_hyper(): - with salt.utils.files.fopen("/proc/modules") as fp_: - if "xen_" not in salt.utils.stringutils.to_unicode(fp_.read()): - return False -- except (OSError, IOError): -+ except OSError: - # No /proc/modules? Are we on Windows? Or Solaris? - return False - return "libvirtd" in __salt__["cmd.run"](__grains__["ps"]) -@@ -4110,7 +4162,7 @@ def vm_cputime(vm_=None, **kwargs): - cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus - return { - "cputime": int(raw[4]), -- "cputime_percent": int("{0:.0f}".format(cputime_percent)), -+ "cputime_percent": int("{:.0f}".format(cputime_percent)), - } - - info = {} -@@ -4180,7 +4232,7 @@ def vm_netstats(vm_=None, **kwargs): - "tx_errs": 0, - "tx_drop": 0, - } -- for attrs in six.itervalues(nics): -+ for attrs in nics.values(): - if "target" in attrs: - dev = attrs["target"] - stats = dom.interfaceStats(dev) -@@ -4508,7 +4560,7 @@ def revert_snapshot(name, vm_snapshot=None, cleanup=False, **kwargs): - conn.close() - raise CommandExecutionError( - snapshot -- and 'Snapshot "{0}" not found'.format(vm_snapshot) -+ and 'Snapshot "{}" not found'.format(vm_snapshot) - or "No more previous snapshots available" - ) - elif snap.isCurrent(): -@@ -5102,10 +5154,10 @@ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs): - ] - - if not cpu_specs: -- raise ValueError("Model {0} not found in CPU map".format(cpu_model)) -+ raise ValueError("Model {} not found in CPU map".format(cpu_model)) - elif len(cpu_specs) > 1: - raise ValueError( -- "Multiple models {0} found in CPU map".format(cpu_model) -+ "Multiple models {} found in CPU map".format(cpu_model) - ) - - cpu_specs = cpu_specs[0] -@@ -5126,7 +5178,7 @@ def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs): - "vendor": cpu.find("vendor").text, - "features": [feature.get("name") for feature in cpu.findall("feature")], - } -- return cpu.toxml() -+ return ElementTree.tostring(cpu) - - - def network_define(name, bridge, forward, ipv4_config=None, ipv6_config=None, **kwargs): -@@ -5250,7 +5302,7 @@ def list_networks(**kwargs): +@@ -5431,7 +5483,7 @@ def list_networks(**kwargs): def network_info(name=None, **kwargs): """ @@ -1364,40 +847,7 @@ index a78c21e323..cd80fbe608 100644 :param name: virtual network name :param connection: libvirt connection URI, overriding defaults -@@ -5446,20 +5498,20 @@ def _parse_pools_caps(doc): - for option_kind in ["pool", "vol"]: - options = {} - default_format_node = pool.find( -- "{0}Options/defaultFormat".format(option_kind) -+ "{}Options/defaultFormat".format(option_kind) - ) - if default_format_node is not None: - options["default_format"] = default_format_node.get("type") - options_enums = { - enum.get("name"): [value.text for value in enum.findall("value")] -- for enum in pool.findall("{0}Options/enum".format(option_kind)) -+ for enum in pool.findall("{}Options/enum".format(option_kind)) - } - if options_enums: - options.update(options_enums) - if options: - if "options" not in pool_caps: - pool_caps["options"] = {} -- kind = option_kind if option_kind is not "vol" else "volume" -+ kind = option_kind if option_kind != "vol" else "volume" - pool_caps["options"][kind] = options - return pool_caps - -@@ -5695,7 +5747,7 @@ def pool_define( - keys. The path is the qualified name for iSCSI devices. - - Report to `this libvirt page `_ -- for more informations on the use of ``part_separator`` -+ for more information on the use of ``part_separator`` - :param source_dir: - Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``. - (Default: ``None``) -@@ -5847,15 +5899,19 @@ def _pool_set_secret( +@@ -6028,15 +6080,19 @@ def _pool_set_secret( if secret_type: # Get the previously defined secret if any secret = None @@ -1426,16 +876,7 @@ index a78c21e323..cd80fbe608 100644 # Create secret if needed if not secret: -@@ -5918,7 +5974,7 @@ def pool_update( - keys. The path is the qualified name for iSCSI devices. - - Report to `this libvirt page `_ -- for more informations on the use of ``part_separator`` -+ for more information on the use of ``part_separator`` - :param source_dir: - Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``. - (Default: ``None``) -@@ -6107,7 +6163,7 @@ def list_pools(**kwargs): +@@ -6288,7 +6344,7 @@ def list_pools(**kwargs): def pool_info(name=None, **kwargs): """ @@ -1444,30 +885,7 @@ index a78c21e323..cd80fbe608 100644 :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults -@@ -6283,6 +6339,22 @@ def pool_undefine(name, **kwargs): - conn = __get_conn(**kwargs) - try: - pool = conn.storagePoolLookupByName(name) -+ desc = ElementTree.fromstring(pool.XMLDesc()) -+ -+ # Is there a secret that we generated and would need to be removed? -+ # Don't remove the other secrets -+ auth_node = desc.find("source/auth") -+ if auth_node is not None: -+ auth_types = { -+ "ceph": libvirt.VIR_SECRET_USAGE_TYPE_CEPH, -+ "iscsi": libvirt.VIR_SECRET_USAGE_TYPE_ISCSI, -+ } -+ secret_type = auth_types[auth_node.get("type")] -+ secret_usage = auth_node.find("secret").get("usage") -+ if secret_type and "pool_{}".format(name) == secret_usage: -+ secret = conn.secretLookupByUsage(secret_type, secret_usage) -+ secret.undefine() -+ - return not bool(pool.undefine()) - finally: - conn.close() -@@ -6308,22 +6380,6 @@ def pool_delete(name, **kwargs): +@@ -6505,22 +6561,6 @@ def pool_delete(name, **kwargs): conn = __get_conn(**kwargs) try: pool = conn.storagePoolLookupByName(name) @@ -1490,55 +908,11 @@ index a78c21e323..cd80fbe608 100644 return not bool(pool.delete(libvirt.VIR_STORAGE_POOL_DELETE_NORMAL)) finally: conn.close() -@@ -6768,7 +6824,7 @@ def _volume_upload(conn, pool, volume, file, offset=0, length=0, sparse=False): - stream.abort() - if ret: - raise CommandExecutionError( -- "Failed to close file: {0}".format(err.strerror) -+ "Failed to close file: {}".format(err.strerror) - ) - if stream: - try: -@@ -6776,7 +6832,7 @@ def _volume_upload(conn, pool, volume, file, offset=0, length=0, sparse=False): - except libvirt.libvirtError as err: - if ret: - raise CommandExecutionError( -- "Failed to finish stream: {0}".format(err.get_error_message()) -+ "Failed to finish stream: {}".format(err.get_error_message()) - ) - return ret - diff --git a/salt/states/virt.py b/salt/states/virt.py -index fdef002293..3d99fd53c8 100644 +index cb15d57d8f..b45cf72ed3 100644 --- a/salt/states/virt.py +++ b/salt/states/virt.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Manage virt - =========== -@@ -13,9 +12,9 @@ for the generation and signing of certificates for systems running libvirt: - """ - - # Import Python libs --from __future__ import absolute_import, print_function, unicode_literals - - import fnmatch -+import logging - import os - - # Import Salt libs -@@ -25,9 +24,6 @@ import salt.utils.stringutils - import salt.utils.versions - from salt.exceptions import CommandExecutionError, SaltInvocationError - --# Import 3rd-party libs --from salt.ext import six -- - try: - import libvirt # pylint: disable=import-error - -@@ -38,6 +34,8 @@ except ImportError: +@@ -33,6 +33,8 @@ except ImportError: __virtualname__ = "virt" @@ -1547,56 +921,7 @@ index fdef002293..3d99fd53c8 100644 def __virtual__(): """ -@@ -99,8 +97,8 @@ def keys(name, basepath="/etc/pki", **kwargs): - # rename them to something hopefully unique to avoid - # overriding anything existing - pillar_kwargs = {} -- for key, value in six.iteritems(kwargs): -- pillar_kwargs["ext_pillar_virt.{0}".format(key)] = value -+ for key, value in kwargs.items(): -+ pillar_kwargs["ext_pillar_virt.{}".format(key)] = value - - pillar = __salt__["pillar.ext"]({"libvirt": "_"}, pillar_kwargs) - paths = { -@@ -112,7 +110,7 @@ def keys(name, basepath="/etc/pki", **kwargs): - } - - for key in paths: -- p_key = "libvirt.{0}.pem".format(key) -+ p_key = "libvirt.{}.pem".format(key) - if p_key not in pillar: - continue - if not os.path.exists(os.path.dirname(paths[key])): -@@ -134,7 +132,7 @@ def keys(name, basepath="/etc/pki", **kwargs): - for key in ret["changes"]: - with salt.utils.files.fopen(paths[key], "w+") as fp_: - fp_.write( -- salt.utils.stringutils.to_str(pillar["libvirt.{0}.pem".format(key)]) -+ salt.utils.stringutils.to_str(pillar["libvirt.{}.pem".format(key)]) - ) - - ret["comment"] = "Updated libvirt certs and keys" -@@ -176,7 +174,7 @@ def _virt_call( - domain_state = __salt__["virt.vm_state"](targeted_domain) - action_needed = domain_state.get(targeted_domain) != state - if action_needed: -- response = __salt__["virt.{0}".format(function)]( -+ response = __salt__["virt.{}".format(function)]( - targeted_domain, - connection=connection, - username=username, -@@ -189,9 +187,7 @@ def _virt_call( - else: - noaction_domains.append(targeted_domain) - except libvirt.libvirtError as err: -- ignored_domains.append( -- {"domain": targeted_domain, "issue": six.text_type(err)} -- ) -+ ignored_domains.append({"domain": targeted_domain, "issue": str(err)}) - if not changed_domains: - ret["result"] = not ignored_domains and bool(targeted_domains) - ret["comment"] = "No changes had happened" -@@ -292,6 +288,7 @@ def defined( +@@ -285,6 +287,7 @@ def defined( arch=None, boot=None, update=True, @@ -1604,7 +929,7 @@ index fdef002293..3d99fd53c8 100644 ): """ Starts an existing guest, or defines and starts a new VM with specified arguments. -@@ -352,6 +349,14 @@ def defined( +@@ -345,6 +348,14 @@ def defined( .. deprecated:: sodium @@ -1619,7 +944,7 @@ index fdef002293..3d99fd53c8 100644 .. rubric:: Example States Make sure a virtual machine called ``domain_name`` is defined: -@@ -362,6 +367,7 @@ def defined( +@@ -355,6 +366,7 @@ def defined( virt.defined: - cpu: 2 - mem: 2048 @@ -1627,7 +952,7 @@ index fdef002293..3d99fd53c8 100644 - disk_profile: prod - disks: - name: system -@@ -414,17 +420,18 @@ def defined( +@@ -407,6 +419,7 @@ def defined( password=password, boot=boot, test=__opts__["test"], @@ -1635,37 +960,15 @@ index fdef002293..3d99fd53c8 100644 ) ret["changes"][name] = status if not status.get("definition"): -- ret["comment"] = "Domain {0} unchanged".format(name) -+ ret["comment"] = "Domain {} unchanged".format(name) - ret["result"] = True - elif status.get("errors"): - ret[ - "comment" -- ] = "Domain {0} updated with live update(s) failures".format(name) -+ ] = "Domain {} updated with live update(s) failures".format(name) - else: -- ret["comment"] = "Domain {0} updated".format(name) -+ ret["comment"] = "Domain {} updated".format(name) - else: - if not __opts__["test"]: - __salt__["virt.init"]( -@@ -448,12 +455,13 @@ def defined( +@@ -441,6 +454,7 @@ def defined( password=password, boot=boot, start=False, + boot_dev=boot_dev, ) ret["changes"][name] = {"definition": True} -- ret["comment"] = "Domain {0} defined".format(name) -+ ret["comment"] = "Domain {} defined".format(name) - except libvirt.libvirtError as err: - # Something bad happened when defining / updating the VM, report it -- ret["comment"] = six.text_type(err) -+ ret["comment"] = str(err) - ret["result"] = False - - return ret -@@ -480,6 +488,7 @@ def running( + ret["comment"] = "Domain {} defined".format(name) +@@ -473,6 +487,7 @@ def running( os_type=None, arch=None, boot=None, @@ -1673,7 +976,7 @@ index fdef002293..3d99fd53c8 100644 ): """ Starts an existing guest, or defines and starts a new VM with specified arguments. -@@ -591,6 +600,14 @@ def running( +@@ -584,6 +599,14 @@ def running( .. versionadded:: 3000 @@ -1688,15 +991,7 @@ index fdef002293..3d99fd53c8 100644 .. rubric:: Example States Make sure an already-defined virtual machine called ``domain_name`` is running: -@@ -609,6 +626,7 @@ def running( - - cpu: 2 - - mem: 2048 - - disk_profile: prod -+ - boot_dev: network hd - - disks: - - name: system - size: 8192 -@@ -657,6 +675,7 @@ def running( +@@ -651,6 +674,7 @@ def running( arch=arch, boot=boot, update=update, @@ -1704,91 +999,7 @@ index fdef002293..3d99fd53c8 100644 connection=connection, username=username, password=password, -@@ -681,11 +700,11 @@ def running( - ret["comment"] = comment - ret["changes"][name]["started"] = True - elif not changed: -- ret["comment"] = "Domain {0} exists and is running".format(name) -+ ret["comment"] = "Domain {} exists and is running".format(name) - - except libvirt.libvirtError as err: - # Something bad happened when starting / updating the VM, report it -- ret["comment"] = six.text_type(err) -+ ret["comment"] = str(err) - ret["result"] = False - - return ret -@@ -830,7 +849,7 @@ def reverted( - try: - domains = fnmatch.filter(__salt__["virt.list_domains"](), name) - if not domains: -- ret["comment"] = 'No domains found for criteria "{0}"'.format(name) -+ ret["comment"] = 'No domains found for criteria "{}"'.format(name) - else: - ignored_domains = list() - if len(domains) > 1: -@@ -848,9 +867,7 @@ def reverted( - } - except CommandExecutionError as err: - if len(domains) > 1: -- ignored_domains.append( -- {"domain": domain, "issue": six.text_type(err)} -- ) -+ ignored_domains.append({"domain": domain, "issue": str(err)}) - if len(domains) > 1: - if result: - ret["changes"]["reverted"].append(result) -@@ -860,7 +877,7 @@ def reverted( - - ret["result"] = len(domains) != len(ignored_domains) - if ret["result"]: -- ret["comment"] = "Domain{0} has been reverted".format( -+ ret["comment"] = "Domain{} has been reverted".format( - len(domains) > 1 and "s" or "" - ) - if ignored_domains: -@@ -868,9 +885,9 @@ def reverted( - if not ret["changes"]["reverted"]: - ret["changes"].pop("reverted") - except libvirt.libvirtError as err: -- ret["comment"] = six.text_type(err) -+ ret["comment"] = str(err) - except CommandExecutionError as err: -- ret["comment"] = six.text_type(err) -+ ret["comment"] = str(err) - - return ret - -@@ -955,7 +972,7 @@ def network_defined( - name, connection=connection, username=username, password=password - ) - if info and info[name]: -- ret["comment"] = "Network {0} exists".format(name) -+ ret["comment"] = "Network {} exists".format(name) - ret["result"] = True - else: - if not __opts__["test"]: -@@ -974,7 +991,7 @@ def network_defined( - password=password, - ) - ret["changes"][name] = "Network defined" -- ret["comment"] = "Network {0} defined".format(name) -+ ret["comment"] = "Network {} defined".format(name) - except libvirt.libvirtError as err: - ret["result"] = False - ret["comment"] = err.get_error_message() -@@ -1108,6 +1125,10 @@ def network_running( - return ret - - -+# Some of the libvirt storage drivers do not support the build action -+BUILDABLE_POOL_TYPES = {"disk", "fs", "netfs", "dir", "logical", "vstorage", "zfs"} -+ -+ - def pool_defined( - name, - ptype=None, -@@ -1222,25 +1243,35 @@ def pool_defined( +@@ -1218,14 +1242,24 @@ def pool_defined( action = "" if info[name]["state"] != "running": @@ -1821,21 +1032,7 @@ index fdef002293..3d99fd53c8 100644 action = ( "{}, autostart flag changed".format(action) - if needs_autostart - else action - ) -- ret["changes"][name] = "Pool updated{0}".format(action) -- ret["comment"] = "Pool {0} updated{1}".format(name, action) -+ ret["changes"][name] = "Pool updated{}".format(action) -+ ret["comment"] = "Pool {} updated{}".format(name, action) - - else: -- ret["comment"] = "Pool {0} unchanged".format(name) -+ ret["comment"] = "Pool {} unchanged".format(name) - ret["result"] = True - else: - needs_autostart = autostart -@@ -1265,15 +1296,28 @@ def pool_defined( +@@ -1261,9 +1295,22 @@ def pool_defined( password=password, ) @@ -1860,16 +1057,8 @@ index fdef002293..3d99fd53c8 100644 + ) if needs_autostart: ret["changes"][name] = "Pool defined, marked for autostart" -- ret["comment"] = "Pool {0} defined, marked for autostart".format(name) -+ ret["comment"] = "Pool {} defined, marked for autostart".format(name) - else: - ret["changes"][name] = "Pool defined" -- ret["comment"] = "Pool {0} defined".format(name) -+ ret["comment"] = "Pool {} defined".format(name) - - if needs_autostart: - if not __opts__["test"]: -@@ -1374,7 +1418,7 @@ def pool_running( + ret["comment"] = "Pool {} defined, marked for autostart".format(name) +@@ -1370,7 +1417,7 @@ def pool_running( is_running = info.get(name, {}).get("state", "stopped") == "running" if is_running: if updated: @@ -1878,7 +1067,7 @@ index fdef002293..3d99fd53c8 100644 if not __opts__["test"]: __salt__["virt.pool_stop"]( name, -@@ -1382,13 +1426,16 @@ def pool_running( +@@ -1378,13 +1425,16 @@ def pool_running( username=username, password=password, ) @@ -1902,2323 +1091,73 @@ index fdef002293..3d99fd53c8 100644 else: action = "already running" result = True -@@ -1402,16 +1449,16 @@ def pool_running( - password=password, - ) - -- comment = "Pool {0}".format(name) -+ comment = "Pool {}".format(name) - change = "Pool" - if name in ret["changes"]: -- comment = "{0},".format(ret["comment"]) -- change = "{0},".format(ret["changes"][name]) -+ comment = "{},".format(ret["comment"]) -+ change = "{},".format(ret["changes"][name]) - - if action != "already running": -- ret["changes"][name] = "{0} {1}".format(change, action) -+ ret["changes"][name] = "{} {}".format(change, action) - -- ret["comment"] = "{0} {1}".format(comment, action) -+ ret["comment"] = "{} {}".format(comment, action) - ret["result"] = result - - except libvirt.libvirtError as err: -@@ -1539,15 +1586,13 @@ def pool_deleted(name, purge=False, connection=None, username=None, password=Non - ret["result"] = None - - if unsupported: -- ret[ -- "comment" -- ] = 'Unsupported actions for pool of type "{0}": {1}'.format( -+ ret["comment"] = 'Unsupported actions for pool of type "{}": {}'.format( - info[name]["type"], ", ".join(unsupported) - ) - else: -- ret["comment"] = "Storage pool could not be found: {0}".format(name) -+ ret["comment"] = "Storage pool could not be found: {}".format(name) - except libvirt.libvirtError as err: -- ret["comment"] = "Failed deleting pool: {0}".format(err.get_error_message()) -+ ret["comment"] = "Failed deleting pool: {}".format(err.get_error_message()) - ret["result"] = False - - return ret diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja -index aac6283eb0..04a61ffa78 100644 +index 439ed83f7f..2a2f5e4141 100644 --- a/salt/templates/virt/libvirt_domain.jinja +++ b/salt/templates/virt/libvirt_domain.jinja -@@ -3,7 +3,7 @@ +@@ -2,32 +2,9 @@ + + {{ name }} {{ cpu }} - {{ mem }} - {{ mem }} -- +- {%- if mem.max %} +- {{ mem.max }} +- {%- endif %} +- {%- if mem.boot %} +- {{ mem.boot }} +- {%- endif %} +- {%- if mem.current %} +- {{ mem.current }} +- {%- endif %} +- {%- if mem %} +- +- {%- if 'hard_limit' in mem and mem.hard_limit %} +- {{ mem.hard_limit }} +- {%- endif %} +- {%- if 'soft_limit' in mem and mem.soft_limit %} +- {{ mem.soft_limit }} +- {%- endif %} +- {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %} +- {{ mem.swap_hard_limit }} +- {%- endif %} +- {%- if 'min_guarantee' in mem and mem.min_guarantee %} +- {{ mem.min_guarantee }} +- {%- endif %} +- +- {%- endif %} +- ++ {{ mem }} ++ {{ mem }} + {{ os_type }} {% if boot %} {% if 'kernel' in boot %} -diff --git a/salt/utils/data.py b/salt/utils/data.py -index 8f84c2ea42..1c4c22efb3 100644 ---- a/salt/utils/data.py -+++ b/salt/utils/data.py -@@ -1,22 +1,16 @@ - # -*- coding: utf-8 -*- --''' -+""" - Functions for manipulating, inspecting, or otherwise working with data types - and data structures. --''' -+""" - --from __future__ import absolute_import, print_function, unicode_literals - - # Import Python libs - import copy - import fnmatch -+import functools - import logging - import re --import functools -- --try: -- from collections.abc import Mapping, MutableMapping, Sequence --except ImportError: -- from collections import Mapping, MutableMapping, Sequence - - # Import Salt libs - import salt.utils.dictupdate -@@ -24,13 +18,22 @@ import salt.utils.stringutils - import salt.utils.yaml - from salt.defaults import DEFAULT_TARGET_DELIM - from salt.exceptions import SaltException --from salt.utils.decorators.jinja import jinja_filter --from salt.utils.odict import OrderedDict -+from salt.ext import six - - # Import 3rd-party libs --from salt.ext.six.moves import zip # pylint: disable=redefined-builtin --from salt.ext import six - from salt.ext.six.moves import range # pylint: disable=redefined-builtin -+from salt.ext.six.moves import zip # pylint: disable=redefined-builtin -+from salt.utils.decorators.jinja import jinja_filter -+from salt.utils.odict import OrderedDict -+ -+try: -+ from collections.abc import Mapping, MutableMapping, Sequence -+except ImportError: -+ # pylint: disable=no-name-in-module -+ from collections import Mapping, MutableMapping, Sequence -+ -+ # pylint: enable=no-name-in-module -+ - - try: - import jmespath -@@ -41,15 +44,16 @@ log = logging.getLogger(__name__) - - - class CaseInsensitiveDict(MutableMapping): -- ''' -+ """ - Inspired by requests' case-insensitive dict implementation, but works with - non-string keys as well. -- ''' -+ """ -+ - def __init__(self, init=None, **kwargs): -- ''' -+ """ - Force internal dict to be ordered to ensure a consistent iteration - order, irrespective of case. -- ''' -+ """ - self._data = OrderedDict() - self.update(init or {}, **kwargs) - -@@ -67,7 +71,7 @@ class CaseInsensitiveDict(MutableMapping): - return self._data[to_lowercase(key)][1] - - def __iter__(self): -- return (item[0] for item in six.itervalues(self._data)) -+ return (item[0] for item in self._data.values()) - - def __eq__(self, rval): - if not isinstance(rval, Mapping): -@@ -76,28 +80,28 @@ class CaseInsensitiveDict(MutableMapping): - return dict(self.items_lower()) == dict(CaseInsensitiveDict(rval).items_lower()) - - def __repr__(self): -- return repr(dict(six.iteritems(self))) -+ return repr(dict(self.items())) - - def items_lower(self): -- ''' -+ """ - Returns a generator iterating over keys and values, with the keys all - being lowercase. -- ''' -- return ((key, val[1]) for key, val in six.iteritems(self._data)) -+ """ -+ return ((key, val[1]) for key, val in self._data.items()) - - def copy(self): -- ''' -+ """ - Returns a copy of the object -- ''' -- return CaseInsensitiveDict(six.iteritems(self._data)) -+ """ -+ return CaseInsensitiveDict(self._data.items()) - - - def __change_case(data, attr, preserve_dict_class=False): -- ''' -+ """ - Calls data.attr() if data has an attribute/method called attr. - Processes data recursively if data is a Mapping or Sequence. - For Mapping, processes both keys and values. -- ''' -+ """ - try: - return getattr(data, attr)() - except AttributeError: -@@ -107,73 +111,120 @@ def __change_case(data, attr, preserve_dict_class=False): - - if isinstance(data, Mapping): - return (data_type if preserve_dict_class else dict)( -- (__change_case(key, attr, preserve_dict_class), -- __change_case(val, attr, preserve_dict_class)) -- for key, val in six.iteritems(data) -+ ( -+ __change_case(key, attr, preserve_dict_class), -+ __change_case(val, attr, preserve_dict_class), -+ ) -+ for key, val in data.items() - ) - if isinstance(data, Sequence): - return data_type( -- __change_case(item, attr, preserve_dict_class) for item in data) -+ __change_case(item, attr, preserve_dict_class) for item in data -+ ) - return data - - - def to_lowercase(data, preserve_dict_class=False): -- ''' -+ """ - Recursively changes everything in data to lowercase. -- ''' -- return __change_case(data, 'lower', preserve_dict_class) -+ """ -+ return __change_case(data, "lower", preserve_dict_class) - - - def to_uppercase(data, preserve_dict_class=False): -- ''' -+ """ - Recursively changes everything in data to uppercase. -- ''' -- return __change_case(data, 'upper', preserve_dict_class) -+ """ -+ return __change_case(data, "upper", preserve_dict_class) - - --@jinja_filter('compare_dicts') -+@jinja_filter("compare_dicts") - def compare_dicts(old=None, new=None): -- ''' -+ """ - Compare before and after results from various salt functions, returning a - dict describing the changes that were made. -- ''' -+ """ - ret = {} -- for key in set((new or {})).union((old or {})): -+ for key in set(new or {}).union(old or {}): - if key not in old: - # New key -- ret[key] = {'old': '', -- 'new': new[key]} -+ ret[key] = {"old": "", "new": new[key]} - elif key not in new: - # Key removed -- ret[key] = {'new': '', -- 'old': old[key]} -+ ret[key] = {"new": "", "old": old[key]} - elif new[key] != old[key]: - # Key modified -- ret[key] = {'old': old[key], -- 'new': new[key]} -+ ret[key] = {"old": old[key], "new": new[key]} - return ret - - --@jinja_filter('compare_lists') -+@jinja_filter("compare_lists") - def compare_lists(old=None, new=None): -- ''' -+ """ - Compare before and after results from various salt functions, returning a - dict describing the changes that were made -- ''' -+ """ - ret = {} - for item in new: - if item not in old: -- ret.setdefault('new', []).append(item) -+ ret.setdefault("new", []).append(item) - for item in old: - if item not in new: -- ret.setdefault('old', []).append(item) -+ ret.setdefault("old", []).append(item) - return ret - - --def decode(data, encoding=None, errors='strict', keep=False, -- normalize=False, preserve_dict_class=False, preserve_tuples=False, -- to_str=False): -- ''' -+def _remove_circular_refs(ob, _seen=None): -+ """ -+ Generic method to remove circular references from objects. -+ This has been taken from author Martijn Pieters -+ https://stackoverflow.com/questions/44777369/ -+ remove-circular-references-in-dicts-lists-tuples/44777477#44777477 -+ :param ob: dict, list, typle, set, and frozenset -+ Standard python object -+ :param object _seen: -+ Object that has circular reference -+ :returns: -+ Cleaned Python object -+ :rtype: -+ type(ob) -+ """ -+ if _seen is None: -+ _seen = set() -+ if id(ob) in _seen: -+ # Here we caught a circular reference. -+ # Alert user and cleanup to continue. -+ log.exception( -+ "Caught a circular reference in data structure below." -+ "Cleaning and continuing execution.\n%r\n", -+ ob, -+ ) -+ return None -+ _seen.add(id(ob)) -+ res = ob -+ if isinstance(ob, dict): -+ res = { -+ _remove_circular_refs(k, _seen): _remove_circular_refs(v, _seen) -+ for k, v in ob.items() -+ } -+ elif isinstance(ob, (list, tuple, set, frozenset)): -+ res = type(ob)(_remove_circular_refs(v, _seen) for v in ob) -+ # remove id again; only *nested* references count -+ _seen.remove(id(ob)) -+ return res -+ -+ -+def decode( -+ data, -+ encoding=None, -+ errors="strict", -+ keep=False, -+ normalize=False, -+ preserve_dict_class=False, -+ preserve_tuples=False, -+ to_str=False, -+): -+ """ - Generic function which will decode whichever type is passed, if necessary. - Optionally use to_str=True to ensure strings are str types and not unicode - on Python 2. -@@ -199,22 +250,55 @@ def decode(data, encoding=None, errors='strict', keep=False, - two strings above, in which "й" is represented as two code points (i.e. one - for the base character, and one for the breve mark). Normalizing allows for - a more reliable test case. -- ''' -- _decode_func = salt.utils.stringutils.to_unicode \ -- if not to_str \ -+ -+ """ -+ # Clean data object before decoding to avoid circular references -+ data = _remove_circular_refs(data) -+ -+ _decode_func = ( -+ salt.utils.stringutils.to_unicode -+ if not to_str - else salt.utils.stringutils.to_str -+ ) - if isinstance(data, Mapping): -- return decode_dict(data, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ return decode_dict( -+ data, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) - if isinstance(data, list): -- return decode_list(data, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ return decode_list( -+ data, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) - if isinstance(data, tuple): -- return decode_tuple(data, encoding, errors, keep, normalize, -- preserve_dict_class, to_str) \ -- if preserve_tuples \ -- else decode_list(data, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ return ( -+ decode_tuple( -+ data, encoding, errors, keep, normalize, preserve_dict_class, to_str -+ ) -+ if preserve_tuples -+ else decode_list( -+ data, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) -+ ) - try: - data = _decode_func(data, encoding, errors, normalize) - except TypeError: -@@ -228,25 +312,48 @@ def decode(data, encoding=None, errors='strict', keep=False, - return data - - --def decode_dict(data, encoding=None, errors='strict', keep=False, -- normalize=False, preserve_dict_class=False, -- preserve_tuples=False, to_str=False): -- ''' -+def decode_dict( -+ data, -+ encoding=None, -+ errors="strict", -+ keep=False, -+ normalize=False, -+ preserve_dict_class=False, -+ preserve_tuples=False, -+ to_str=False, -+): -+ """ - Decode all string values to Unicode. Optionally use to_str=True to ensure - strings are str types and not unicode on Python 2. -- ''' -- _decode_func = salt.utils.stringutils.to_unicode \ -- if not to_str \ -+ """ -+ # Clean data object before decoding to avoid circular references -+ data = _remove_circular_refs(data) -+ -+ _decode_func = ( -+ salt.utils.stringutils.to_unicode -+ if not to_str - else salt.utils.stringutils.to_str -+ ) - # Make sure we preserve OrderedDicts - ret = data.__class__() if preserve_dict_class else {} -- for key, value in six.iteritems(data): -+ for key, value in data.items(): - if isinstance(key, tuple): -- key = decode_tuple(key, encoding, errors, keep, normalize, -- preserve_dict_class, to_str) \ -- if preserve_tuples \ -- else decode_list(key, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ key = ( -+ decode_tuple( -+ key, encoding, errors, keep, normalize, preserve_dict_class, to_str -+ ) -+ if preserve_tuples -+ else decode_list( -+ key, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) -+ ) - else: - try: - key = _decode_func(key, encoding, errors, normalize) -@@ -260,17 +367,50 @@ def decode_dict(data, encoding=None, errors='strict', keep=False, - raise - - if isinstance(value, list): -- value = decode_list(value, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ value = decode_list( -+ value, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) - elif isinstance(value, tuple): -- value = decode_tuple(value, encoding, errors, keep, normalize, -- preserve_dict_class, to_str) \ -- if preserve_tuples \ -- else decode_list(value, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ value = ( -+ decode_tuple( -+ value, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ to_str, -+ ) -+ if preserve_tuples -+ else decode_list( -+ value, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) -+ ) - elif isinstance(value, Mapping): -- value = decode_dict(value, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ value = decode_dict( -+ value, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) - else: - try: - value = _decode_func(value, encoding, errors, normalize) -@@ -287,30 +427,69 @@ def decode_dict(data, encoding=None, errors='strict', keep=False, - return ret - - --def decode_list(data, encoding=None, errors='strict', keep=False, -- normalize=False, preserve_dict_class=False, -- preserve_tuples=False, to_str=False): -- ''' -+def decode_list( -+ data, -+ encoding=None, -+ errors="strict", -+ keep=False, -+ normalize=False, -+ preserve_dict_class=False, -+ preserve_tuples=False, -+ to_str=False, -+): -+ """ - Decode all string values to Unicode. Optionally use to_str=True to ensure - strings are str types and not unicode on Python 2. -- ''' -- _decode_func = salt.utils.stringutils.to_unicode \ -- if not to_str \ -+ """ -+ # Clean data object before decoding to avoid circular references -+ data = _remove_circular_refs(data) -+ -+ _decode_func = ( -+ salt.utils.stringutils.to_unicode -+ if not to_str - else salt.utils.stringutils.to_str -+ ) - ret = [] - for item in data: - if isinstance(item, list): -- item = decode_list(item, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ item = decode_list( -+ item, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) - elif isinstance(item, tuple): -- item = decode_tuple(item, encoding, errors, keep, normalize, -- preserve_dict_class, to_str) \ -- if preserve_tuples \ -- else decode_list(item, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ item = ( -+ decode_tuple( -+ item, encoding, errors, keep, normalize, preserve_dict_class, to_str -+ ) -+ if preserve_tuples -+ else decode_list( -+ item, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) -+ ) - elif isinstance(item, Mapping): -- item = decode_dict(item, encoding, errors, keep, normalize, -- preserve_dict_class, preserve_tuples, to_str) -+ item = decode_dict( -+ item, -+ encoding, -+ errors, -+ keep, -+ normalize, -+ preserve_dict_class, -+ preserve_tuples, -+ to_str, -+ ) - else: - try: - item = _decode_func(item, encoding, errors, normalize) -@@ -327,21 +506,35 @@ def decode_list(data, encoding=None, errors='strict', keep=False, - return ret - - --def decode_tuple(data, encoding=None, errors='strict', keep=False, -- normalize=False, preserve_dict_class=False, to_str=False): -- ''' -+def decode_tuple( -+ data, -+ encoding=None, -+ errors="strict", -+ keep=False, -+ normalize=False, -+ preserve_dict_class=False, -+ to_str=False, -+): -+ """ - Decode all string values to Unicode. Optionally use to_str=True to ensure - strings are str types and not unicode on Python 2. -- ''' -+ """ - return tuple( -- decode_list(data, encoding, errors, keep, normalize, -- preserve_dict_class, True, to_str) -+ decode_list( -+ data, encoding, errors, keep, normalize, preserve_dict_class, True, to_str -+ ) - ) - - --def encode(data, encoding=None, errors='strict', keep=False, -- preserve_dict_class=False, preserve_tuples=False): -- ''' -+def encode( -+ data, -+ encoding=None, -+ errors="strict", -+ keep=False, -+ preserve_dict_class=False, -+ preserve_tuples=False, -+): -+ """ - Generic function which will encode whichever type is passed, if necessary - - If `strict` is True, and `keep` is False, and we fail to encode, a -@@ -349,18 +542,27 @@ def encode(data, encoding=None, errors='strict', keep=False, - original value to silently be returned in cases where encoding fails. This - can be useful for cases where the data passed to this function is likely to - contain binary blobs. -- ''' -+ -+ """ -+ # Clean data object before encoding to avoid circular references -+ data = _remove_circular_refs(data) -+ - if isinstance(data, Mapping): -- return encode_dict(data, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ return encode_dict( -+ data, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) - if isinstance(data, list): -- return encode_list(data, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ return encode_list( -+ data, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) - if isinstance(data, tuple): -- return encode_tuple(data, encoding, errors, keep, preserve_dict_class) \ -- if preserve_tuples \ -- else encode_list(data, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ return ( -+ encode_tuple(data, encoding, errors, keep, preserve_dict_class) -+ if preserve_tuples -+ else encode_list( -+ data, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) -+ ) - try: - return salt.utils.stringutils.to_bytes(data, encoding, errors) - except TypeError: -@@ -374,20 +576,31 @@ def encode(data, encoding=None, errors='strict', keep=False, - return data - - --@jinja_filter('json_decode_dict') # Remove this for Aluminium --@jinja_filter('json_encode_dict') --def encode_dict(data, encoding=None, errors='strict', keep=False, -- preserve_dict_class=False, preserve_tuples=False): -- ''' -+@jinja_filter("json_decode_dict") # Remove this for Aluminium -+@jinja_filter("json_encode_dict") -+def encode_dict( -+ data, -+ encoding=None, -+ errors="strict", -+ keep=False, -+ preserve_dict_class=False, -+ preserve_tuples=False, -+): -+ """ - Encode all string values to bytes -- ''' -+ """ -+ # Clean data object before encoding to avoid circular references -+ data = _remove_circular_refs(data) - ret = data.__class__() if preserve_dict_class else {} -- for key, value in six.iteritems(data): -+ for key, value in data.items(): - if isinstance(key, tuple): -- key = encode_tuple(key, encoding, errors, keep, preserve_dict_class) \ -- if preserve_tuples \ -- else encode_list(key, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ key = ( -+ encode_tuple(key, encoding, errors, keep, preserve_dict_class) -+ if preserve_tuples -+ else encode_list( -+ key, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) -+ ) - else: - try: - key = salt.utils.stringutils.to_bytes(key, encoding, errors) -@@ -401,16 +614,21 @@ def encode_dict(data, encoding=None, errors='strict', keep=False, - raise - - if isinstance(value, list): -- value = encode_list(value, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ value = encode_list( -+ value, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) - elif isinstance(value, tuple): -- value = encode_tuple(value, encoding, errors, keep, preserve_dict_class) \ -- if preserve_tuples \ -- else encode_list(value, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ value = ( -+ encode_tuple(value, encoding, errors, keep, preserve_dict_class) -+ if preserve_tuples -+ else encode_list( -+ value, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) -+ ) - elif isinstance(value, Mapping): -- value = encode_dict(value, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ value = encode_dict( -+ value, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) - else: - try: - value = salt.utils.stringutils.to_bytes(value, encoding, errors) -@@ -427,26 +645,40 @@ def encode_dict(data, encoding=None, errors='strict', keep=False, - return ret - - --@jinja_filter('json_decode_list') # Remove this for Aluminium --@jinja_filter('json_encode_list') --def encode_list(data, encoding=None, errors='strict', keep=False, -- preserve_dict_class=False, preserve_tuples=False): -- ''' -+@jinja_filter("json_decode_list") # Remove this for Aluminium -+@jinja_filter("json_encode_list") -+def encode_list( -+ data, -+ encoding=None, -+ errors="strict", -+ keep=False, -+ preserve_dict_class=False, -+ preserve_tuples=False, -+): -+ """ - Encode all string values to bytes -- ''' -+ """ -+ # Clean data object before encoding to avoid circular references -+ data = _remove_circular_refs(data) -+ - ret = [] - for item in data: - if isinstance(item, list): -- item = encode_list(item, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ item = encode_list( -+ item, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) - elif isinstance(item, tuple): -- item = encode_tuple(item, encoding, errors, keep, preserve_dict_class) \ -- if preserve_tuples \ -- else encode_list(item, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ item = ( -+ encode_tuple(item, encoding, errors, keep, preserve_dict_class) -+ if preserve_tuples -+ else encode_list( -+ item, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) -+ ) - elif isinstance(item, Mapping): -- item = encode_dict(item, encoding, errors, keep, -- preserve_dict_class, preserve_tuples) -+ item = encode_dict( -+ item, encoding, errors, keep, preserve_dict_class, preserve_tuples -+ ) - else: - try: - item = salt.utils.stringutils.to_bytes(item, encoding, errors) -@@ -463,42 +695,37 @@ def encode_list(data, encoding=None, errors='strict', keep=False, - return ret - - --def encode_tuple(data, encoding=None, errors='strict', keep=False, -- preserve_dict_class=False): -- ''' -+def encode_tuple( -+ data, encoding=None, errors="strict", keep=False, preserve_dict_class=False -+): -+ """ - Encode all string values to Unicode -- ''' -- return tuple( -- encode_list(data, encoding, errors, keep, preserve_dict_class, True)) -+ """ -+ return tuple(encode_list(data, encoding, errors, keep, preserve_dict_class, True)) - - --@jinja_filter('exactly_n_true') -+@jinja_filter("exactly_n_true") - def exactly_n(iterable, amount=1): -- ''' -+ """ - Tests that exactly N items in an iterable are "truthy" (neither None, - False, nor 0). -- ''' -+ """ - i = iter(iterable) - return all(any(i) for j in range(amount)) and not any(i) - - --@jinja_filter('exactly_one_true') -+@jinja_filter("exactly_one_true") - def exactly_one(iterable): -- ''' -+ """ - Check if only one item is not None, False, or 0 in an iterable. -- ''' -+ """ - return exactly_n(iterable) - - --def filter_by(lookup_dict, -- lookup, -- traverse, -- merge=None, -- default='default', -- base=None): -- ''' -+def filter_by(lookup_dict, lookup, traverse, merge=None, default="default", base=None): -+ """ - Common code to filter data structures like grains and pillar -- ''' -+ """ - ret = None - # Default value would be an empty list if lookup not found - val = traverse_dict_and_list(traverse, lookup, []) -@@ -507,10 +734,8 @@ def filter_by(lookup_dict, - # lookup_dict keys - for each in val if isinstance(val, list) else [val]: - for key in lookup_dict: -- test_key = key if isinstance(key, six.string_types) \ -- else six.text_type(key) -- test_each = each if isinstance(each, six.string_types) \ -- else six.text_type(each) -+ test_key = key if isinstance(key, str) else str(key) -+ test_each = each if isinstance(each, str) else str(each) - if fnmatch.fnmatchcase(test_each, test_key): - ret = lookup_dict[key] - break -@@ -528,14 +753,13 @@ def filter_by(lookup_dict, - elif isinstance(base_values, Mapping): - if not isinstance(ret, Mapping): - raise SaltException( -- 'filter_by default and look-up values must both be ' -- 'dictionaries.') -+ "filter_by default and look-up values must both be " "dictionaries." -+ ) - ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret) - - if merge: - if not isinstance(merge, Mapping): -- raise SaltException( -- 'filter_by merge argument must be a dictionary.') -+ raise SaltException("filter_by merge argument must be a dictionary.") - - if ret is None: - ret = merge -@@ -546,12 +770,12 @@ def filter_by(lookup_dict, - - - def traverse_dict(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): -- ''' -+ """ - Traverse a dict using a colon-delimited (or otherwise delimited, using the - 'delimiter' param) target string. The target 'foo:bar:baz' will return - data['foo']['bar']['baz'] if this value exists, and will otherwise return - the dict in the default argument. -- ''' -+ """ - ptr = data - try: - for each in key.split(delimiter): -@@ -562,9 +786,9 @@ def traverse_dict(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): - return ptr - - --@jinja_filter('traverse') -+@jinja_filter("traverse") - def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): -- ''' -+ """ - Traverse a dict or list using a colon-delimited (or otherwise delimited, - using the 'delimiter' param) target string. The target 'foo:bar:0' will - return data['foo']['bar'][0] if this value exists, and will otherwise -@@ -573,7 +797,7 @@ def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DEL - The target 'foo:bar:0' will return data['foo']['bar'][0] if data like - {'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}} - then return data['foo']['bar']['0'] -- ''' -+ """ - ptr = data - for each in key.split(delimiter): - if isinstance(ptr, list): -@@ -605,18 +829,17 @@ def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DEL - return ptr - - --def subdict_match(data, -- expr, -- delimiter=DEFAULT_TARGET_DELIM, -- regex_match=False, -- exact_match=False): -- ''' -+def subdict_match( -+ data, expr, delimiter=DEFAULT_TARGET_DELIM, regex_match=False, exact_match=False -+): -+ """ - Check for a match in a dictionary using a delimiter character to denote - levels of subdicts, and also allowing the delimiter character to be - matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and - data['foo']['bar'] == 'baz'. The latter would take priority over the - former, as more deeply-nested matches are tried first. -- ''' -+ """ -+ - def _match(target, pattern, regex_match=False, exact_match=False): - # The reason for using six.text_type first and _then_ using - # to_unicode as a fallback is because we want to eventually have -@@ -628,11 +851,11 @@ def subdict_match(data, - # begin with is that (by design) to_unicode will raise a TypeError if a - # non-string/bytestring/bytearray value is passed. - try: -- target = six.text_type(target).lower() -+ target = str(target).lower() - except UnicodeDecodeError: - target = salt.utils.stringutils.to_unicode(target).lower() - try: -- pattern = six.text_type(pattern).lower() -+ pattern = str(pattern).lower() - except UnicodeDecodeError: - pattern = salt.utils.stringutils.to_unicode(pattern).lower() - -@@ -640,48 +863,54 @@ def subdict_match(data, - try: - return re.match(pattern, target) - except Exception: # pylint: disable=broad-except -- log.error('Invalid regex \'%s\' in match', pattern) -+ log.error("Invalid regex '%s' in match", pattern) - return False - else: -- return target == pattern if exact_match \ -- else fnmatch.fnmatch(target, pattern) -+ return ( -+ target == pattern if exact_match else fnmatch.fnmatch(target, pattern) -+ ) - - def _dict_match(target, pattern, regex_match=False, exact_match=False): - ret = False -- wildcard = pattern.startswith('*:') -+ wildcard = pattern.startswith("*:") - if wildcard: - pattern = pattern[2:] - -- if pattern == '*': -+ if pattern == "*": - # We are just checking that the key exists - ret = True - if not ret and pattern in target: - # We might want to search for a key - ret = True -- if not ret and subdict_match(target, -- pattern, -- regex_match=regex_match, -- exact_match=exact_match): -+ if not ret and subdict_match( -+ target, pattern, regex_match=regex_match, exact_match=exact_match -+ ): - ret = True - if not ret and wildcard: - for key in target: - if isinstance(target[key], dict): -- if _dict_match(target[key], -- pattern, -- regex_match=regex_match, -- exact_match=exact_match): -+ if _dict_match( -+ target[key], -+ pattern, -+ regex_match=regex_match, -+ exact_match=exact_match, -+ ): - return True - elif isinstance(target[key], list): - for item in target[key]: -- if _match(item, -- pattern, -- regex_match=regex_match, -- exact_match=exact_match): -- return True -- elif _match(target[key], -+ if _match( -+ item, - pattern, - regex_match=regex_match, -- exact_match=exact_match): -+ exact_match=exact_match, -+ ): -+ return True -+ elif _match( -+ target[key], -+ pattern, -+ regex_match=regex_match, -+ exact_match=exact_match, -+ ): - return True - return ret - -@@ -695,7 +924,7 @@ def subdict_match(data, - # want to use are 3, 2, and 1, in that order. - for idx in range(num_splits - 1, 0, -1): - key = delimiter.join(splits[:idx]) -- if key == '*': -+ if key == "*": - # We are matching on everything under the top level, so we need to - # treat the match as the entire data being passed in - matchstr = expr -@@ -703,54 +932,55 @@ def subdict_match(data, - else: - matchstr = delimiter.join(splits[idx:]) - match = traverse_dict_and_list(data, key, {}, delimiter=delimiter) -- log.debug("Attempting to match '%s' in '%s' using delimiter '%s'", -- matchstr, key, delimiter) -+ log.debug( -+ "Attempting to match '%s' in '%s' using delimiter '%s'", -+ matchstr, -+ key, -+ delimiter, -+ ) - if match == {}: - continue - if isinstance(match, dict): -- if _dict_match(match, -- matchstr, -- regex_match=regex_match, -- exact_match=exact_match): -+ if _dict_match( -+ match, matchstr, regex_match=regex_match, exact_match=exact_match -+ ): - return True - continue - if isinstance(match, (list, tuple)): - # We are matching a single component to a single list member - for member in match: - if isinstance(member, dict): -- if _dict_match(member, -- matchstr, -- regex_match=regex_match, -- exact_match=exact_match): -+ if _dict_match( -+ member, -+ matchstr, -+ regex_match=regex_match, -+ exact_match=exact_match, -+ ): - return True -- if _match(member, -- matchstr, -- regex_match=regex_match, -- exact_match=exact_match): -+ if _match( -+ member, matchstr, regex_match=regex_match, exact_match=exact_match -+ ): - return True - continue -- if _match(match, -- matchstr, -- regex_match=regex_match, -- exact_match=exact_match): -+ if _match(match, matchstr, regex_match=regex_match, exact_match=exact_match): - return True - return False - - --@jinja_filter('substring_in_list') -+@jinja_filter("substring_in_list") - def substr_in_list(string_to_search_for, list_to_search): -- ''' -+ """ - Return a boolean value that indicates whether or not a given - string is present in any of the strings which comprise a list -- ''' -+ """ - return any(string_to_search_for in s for s in list_to_search) - - - def is_dictlist(data): -- ''' -+ """ - Returns True if data is a list of one-element dicts (as found in many SLS - schemas), otherwise returns False -- ''' -+ """ - if isinstance(data, list): - for element in data: - if isinstance(element, dict): -@@ -762,16 +992,12 @@ def is_dictlist(data): - return False - - --def repack_dictlist(data, -- strict=False, -- recurse=False, -- key_cb=None, -- val_cb=None): -- ''' -+def repack_dictlist(data, strict=False, recurse=False, key_cb=None, val_cb=None): -+ """ - Takes a list of one-element dicts (as found in many SLS schemas) and - repacks into a single dictionary. -- ''' -- if isinstance(data, six.string_types): -+ """ -+ if isinstance(data, str): - try: - data = salt.utils.yaml.safe_load(data) - except salt.utils.yaml.parser.ParserError as err: -@@ -783,7 +1009,7 @@ def repack_dictlist(data, - if val_cb is None: - val_cb = lambda x, y: y - -- valid_non_dict = (six.string_types, six.integer_types, float) -+ valid_non_dict = ((str,), (int,), float) - if isinstance(data, list): - for element in data: - if isinstance(element, valid_non_dict): -@@ -791,21 +1017,21 @@ def repack_dictlist(data, - if isinstance(element, dict): - if len(element) != 1: - log.error( -- 'Invalid input for repack_dictlist: key/value pairs ' -- 'must contain only one element (data passed: %s).', -- element -+ "Invalid input for repack_dictlist: key/value pairs " -+ "must contain only one element (data passed: %s).", -+ element, - ) - return {} - else: - log.error( -- 'Invalid input for repack_dictlist: element %s is ' -- 'not a string/dict/numeric value', element -+ "Invalid input for repack_dictlist: element %s is " -+ "not a string/dict/numeric value", -+ element, - ) - return {} - else: - log.error( -- 'Invalid input for repack_dictlist, data passed is not a list ' -- '(%s)', data -+ "Invalid input for repack_dictlist, data passed is not a list " "(%s)", data - ) - return {} - -@@ -821,8 +1047,8 @@ def repack_dictlist(data, - ret[key_cb(key)] = repack_dictlist(val, recurse=recurse) - elif strict: - log.error( -- 'Invalid input for repack_dictlist: nested dictlist ' -- 'found, but recurse is set to False' -+ "Invalid input for repack_dictlist: nested dictlist " -+ "found, but recurse is set to False" - ) - return {} - else: -@@ -832,17 +1058,17 @@ def repack_dictlist(data, - return ret - - --@jinja_filter('is_list') -+@jinja_filter("is_list") - def is_list(value): -- ''' -+ """ - Check if a variable is a list. -- ''' -+ """ - return isinstance(value, list) - - --@jinja_filter('is_iter') --def is_iter(thing, ignore=six.string_types): -- ''' -+@jinja_filter("is_iter") -+def is_iter(thing, ignore=(str,)): -+ """ - Test if an object is iterable, but not a string type. - - Test if an object is an iterator or is iterable itself. By default this -@@ -853,7 +1079,7 @@ def is_iter(thing, ignore=six.string_types): - dictionaries or named tuples. - - Based on https://bitbucket.org/petershinners/yter -- ''' -+ """ - if ignore and isinstance(thing, ignore): - return False - try: -@@ -863,9 +1089,9 @@ def is_iter(thing, ignore=six.string_types): - return False - - --@jinja_filter('sorted_ignorecase') -+@jinja_filter("sorted_ignorecase") - def sorted_ignorecase(to_sort): -- ''' -+ """ - Sort a list of strings ignoring case. - - >>> L = ['foo', 'Foo', 'bar', 'Bar'] -@@ -874,19 +1100,19 @@ def sorted_ignorecase(to_sort): - >>> sorted(L, key=lambda x: x.lower()) - ['bar', 'Bar', 'foo', 'Foo'] - >>> -- ''' -+ """ - return sorted(to_sort, key=lambda x: x.lower()) - - - def is_true(value=None): -- ''' -+ """ - Returns a boolean value representing the "truth" of the value passed. The - rules for what is a "True" value are: - - 1. Integer/float values greater than 0 - 2. The string values "True" and "true" - 3. Any object for which bool(obj) returns True -- ''' -+ """ - # First, try int/float conversion - try: - value = int(value) -@@ -898,26 +1124,26 @@ def is_true(value=None): - pass - - # Now check for truthiness -- if isinstance(value, (six.integer_types, float)): -+ if isinstance(value, ((int,), float)): - return value > 0 -- if isinstance(value, six.string_types): -- return six.text_type(value).lower() == 'true' -+ if isinstance(value, str): -+ return str(value).lower() == "true" - return bool(value) - - --@jinja_filter('mysql_to_dict') -+@jinja_filter("mysql_to_dict") - def mysql_to_dict(data, key): -- ''' -+ """ - Convert MySQL-style output to a python dictionary -- ''' -+ """ - ret = {} -- headers = [''] -+ headers = [""] - for line in data: - if not line: - continue -- if line.startswith('+'): -+ if line.startswith("+"): - continue -- comps = line.split('|') -+ comps = line.split("|") - for comp in range(len(comps)): - comps[comp] = comps[comp].strip() - if len(headers) > 1: -@@ -934,14 +1160,14 @@ def mysql_to_dict(data, key): - - - def simple_types_filter(data): -- ''' -+ """ - Convert the data list, dictionary into simple types, i.e., int, float, string, - bool, etc. -- ''' -+ """ - if data is None: - return data - -- simpletypes_keys = (six.string_types, six.text_type, six.integer_types, float, bool) -+ simpletypes_keys = ((str,), str, (int,), float, bool) - simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple]) - - if isinstance(data, (list, tuple)): -@@ -957,7 +1183,7 @@ def simple_types_filter(data): - - if isinstance(data, dict): - simpledict = {} -- for key, value in six.iteritems(data): -+ for key, value in data.items(): - if key is not None and not isinstance(key, simpletypes_keys): - key = repr(key) - if value is not None and isinstance(value, (dict, list, tuple)): -@@ -971,23 +1197,23 @@ def simple_types_filter(data): - - - def stringify(data): -- ''' -+ """ - Given an iterable, returns its items as a list, with any non-string items - converted to unicode strings. -- ''' -+ """ - ret = [] - for item in data: - if six.PY2 and isinstance(item, str): - item = salt.utils.stringutils.to_unicode(item) -- elif not isinstance(item, six.string_types): -- item = six.text_type(item) -+ elif not isinstance(item, str): -+ item = str(item) - ret.append(item) - return ret - - --@jinja_filter('json_query') -+@jinja_filter("json_query") - def json_query(data, expr): -- ''' -+ """ - Query data using JMESPath language (http://jmespath.org). - - Requires the https://github.com/jmespath/jmespath.py library. -@@ -1009,16 +1235,16 @@ def json_query(data, expr): - .. code-block:: text - - [80, 25, 22] -- ''' -+ """ - if jmespath is None: -- err = 'json_query requires jmespath module installed' -+ err = "json_query requires jmespath module installed" - log.error(err) - raise RuntimeError(err) - return jmespath.search(expr, data) - - - def _is_not_considered_falsey(value, ignore_types=()): -- ''' -+ """ - Helper function for filter_falsey to determine if something is not to be - considered falsey. - -@@ -1026,12 +1252,12 @@ def _is_not_considered_falsey(value, ignore_types=()): - :param list ignore_types: The types to ignore when considering the value. - - :return bool -- ''' -+ """ - return isinstance(value, bool) or type(value) in ignore_types or value - - - def filter_falsey(data, recurse_depth=None, ignore_types=()): -- ''' -+ """ - Helper function to remove items from an iterable with falsey value. - Removes ``None``, ``{}`` and ``[]``, 0, '' (but does not remove ``False``). - Recurses into sub-iterables if ``recurse`` is set to ``True``. -@@ -1045,37 +1271,42 @@ def filter_falsey(data, recurse_depth=None, ignore_types=()): - :return type(data) - - .. versionadded:: 3000 -- ''' -+ """ - filter_element = ( -- functools.partial(filter_falsey, -- recurse_depth=recurse_depth-1, -- ignore_types=ignore_types) -- if recurse_depth else lambda x: x -+ functools.partial( -+ filter_falsey, recurse_depth=recurse_depth - 1, ignore_types=ignore_types -+ ) -+ if recurse_depth -+ else lambda x: x - ) - - if isinstance(data, dict): -- processed_elements = [(key, filter_element(value)) for key, value in six.iteritems(data)] -- return type(data)([ -- (key, value) -- for key, value in processed_elements -- if _is_not_considered_falsey(value, ignore_types=ignore_types) -- ]) -+ processed_elements = [ -+ (key, filter_element(value)) for key, value in data.items() -+ ] -+ return type(data)( -+ [ -+ (key, value) -+ for key, value in processed_elements -+ if _is_not_considered_falsey(value, ignore_types=ignore_types) -+ ] -+ ) - if is_iter(data): - processed_elements = (filter_element(value) for value in data) -- return type(data)([ -- value for value in processed_elements -- if _is_not_considered_falsey(value, ignore_types=ignore_types) -- ]) -+ return type(data)( -+ [ -+ value -+ for value in processed_elements -+ if _is_not_considered_falsey(value, ignore_types=ignore_types) -+ ] -+ ) - return data - - - def recursive_diff( -- old, -- new, -- ignore_keys=None, -- ignore_order=False, -- ignore_missing_keys=False): -- ''' -+ old, new, ignore_keys=None, ignore_order=False, ignore_missing_keys=False -+): -+ """ - Performs a recursive diff on mappings and/or iterables and returns the result - in a {'old': values, 'new': values}-style. - Compares dicts and sets unordered (obviously), OrderedDicts and Lists ordered -@@ -1090,12 +1321,16 @@ def recursive_diff( - but missing in ``new``. Only works for regular dicts. - - :return dict: Returns dict with keys 'old' and 'new' containing the differences. -- ''' -+ """ - ignore_keys = ignore_keys or [] - res = {} - ret_old = copy.deepcopy(old) - ret_new = copy.deepcopy(new) -- if isinstance(old, OrderedDict) and isinstance(new, OrderedDict) and not ignore_order: -+ if ( -+ isinstance(old, OrderedDict) -+ and isinstance(new, OrderedDict) -+ and not ignore_order -+ ): - append_old, append_new = [], [] - if len(old) != len(new): - min_length = min(len(old), len(new)) -@@ -1114,13 +1349,14 @@ def recursive_diff( - new[key_new], - ignore_keys=ignore_keys, - ignore_order=ignore_order, -- ignore_missing_keys=ignore_missing_keys) -+ ignore_missing_keys=ignore_missing_keys, -+ ) - if not res: # Equal - del ret_old[key_old] - del ret_new[key_new] - else: -- ret_old[key_old] = res['old'] -- ret_new[key_new] = res['new'] -+ ret_old[key_old] = res["old"] -+ ret_new[key_new] = res["new"] - else: - if key_old in ignore_keys: - del ret_old[key_old] -@@ -1131,7 +1367,7 @@ def recursive_diff( - ret_old[item] = old[item] - for item in append_new: - ret_new[item] = new[item] -- ret = {'old': ret_old, 'new': ret_new} if ret_old or ret_new else {} -+ ret = {"old": ret_old, "new": ret_new} if ret_old or ret_new else {} - elif isinstance(old, Mapping) and isinstance(new, Mapping): - # Compare unordered - for key in set(list(old) + list(new)): -@@ -1146,16 +1382,17 @@ def recursive_diff( - new[key], - ignore_keys=ignore_keys, - ignore_order=ignore_order, -- ignore_missing_keys=ignore_missing_keys) -+ ignore_missing_keys=ignore_missing_keys, -+ ) - if not res: # Equal - del ret_old[key] - del ret_new[key] - else: -- ret_old[key] = res['old'] -- ret_new[key] = res['new'] -- ret = {'old': ret_old, 'new': ret_new} if ret_old or ret_new else {} -+ ret_old[key] = res["old"] -+ ret_new[key] = res["new"] -+ ret = {"old": ret_old, "new": ret_new} if ret_old or ret_new else {} - elif isinstance(old, set) and isinstance(new, set): -- ret = {'old': old - new, 'new': new - old} if old - new or new - old else {} -+ ret = {"old": old - new, "new": new - old} if old - new or new - old else {} - elif is_iter(old) and is_iter(new): - # Create a list so we can edit on an index-basis. - list_old = list(ret_old) -@@ -1168,7 +1405,8 @@ def recursive_diff( - item_new, - ignore_keys=ignore_keys, - ignore_order=ignore_order, -- ignore_missing_keys=ignore_missing_keys) -+ ignore_missing_keys=ignore_missing_keys, -+ ) - if not res: - list_old.remove(item_old) - list_new.remove(item_new) -@@ -1181,19 +1419,87 @@ def recursive_diff( - iter_new, - ignore_keys=ignore_keys, - ignore_order=ignore_order, -- ignore_missing_keys=ignore_missing_keys) -+ ignore_missing_keys=ignore_missing_keys, -+ ) - if not res: # Equal - remove_indices.append(index) - else: -- list_old[index] = res['old'] -- list_new[index] = res['new'] -+ list_old[index] = res["old"] -+ list_new[index] = res["new"] - for index in reversed(remove_indices): - list_old.pop(index) - list_new.pop(index) - # Instantiate a new whatever-it-was using the list as iterable source. - # This may not be the most optimized in way of speed and memory usage, - # but it will work for all iterable types. -- ret = {'old': type(old)(list_old), 'new': type(new)(list_new)} if list_old or list_new else {} -+ ret = ( -+ {"old": type(old)(list_old), "new": type(new)(list_new)} -+ if list_old or list_new -+ else {} -+ ) - else: -- ret = {} if old == new else {'old': ret_old, 'new': ret_new} -+ ret = {} if old == new else {"old": ret_old, "new": ret_new} - return ret -+ -+ -+def get_value(obj, path, default=None): -+ """ -+ Get the values for a given path. -+ -+ :param path: -+ keys of the properties in the tree separated by colons. -+ One segment in the path can be replaced by an id surrounded by curly braces. -+ This will match all items in a list of dictionary. -+ -+ :param default: -+ default value to return when no value is found -+ -+ :return: -+ a list of dictionaries, with at least the "value" key providing the actual value. -+ If a placeholder was used, the placeholder id will be a key providing the replacement for it. -+ Note that a value that wasn't found in the tree will be an empty list. -+ This ensures we can make the difference with a None value set by the user. -+ """ -+ res = [{"value": obj}] -+ if path: -+ key = path[: path.find(":")] if ":" in path else path -+ next_path = path[path.find(":") + 1 :] if ":" in path else None -+ -+ if key.startswith("{") and key.endswith("}"): -+ placeholder_name = key[1:-1] -+ # There will be multiple values to get here -+ items = [] -+ if obj is None: -+ return res -+ if isinstance(obj, dict): -+ items = obj.items() -+ elif isinstance(obj, list): -+ items = enumerate(obj) -+ -+ def _append_placeholder(value_dict, key): -+ value_dict[placeholder_name] = key -+ return value_dict -+ -+ values = [ -+ [ -+ _append_placeholder(item, key) -+ for item in get_value(val, next_path, default) -+ ] -+ for key, val in items -+ ] -+ -+ # flatten the list -+ values = [y for x in values for y in x] -+ return values -+ elif isinstance(obj, dict): -+ if key not in obj.keys(): -+ return [{"value": default}] -+ -+ value = obj.get(key) -+ if res is not None: -+ res = get_value(value, next_path, default) -+ else: -+ res = [{"value": value}] -+ else: -+ return [{"value": default if obj is not None else obj}] -+ return res diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py -index 6d8d74fd3f..2b9c7bf43f 100644 +index e5c8ad4eec..b9f047820b 100644 --- a/salt/utils/xmlutil.py +++ b/salt/utils/xmlutil.py -@@ -1,30 +1,34 @@ --# -*- coding: utf-8 -*- --''' -+""" - Various XML utilities --''' -+""" - - # Import Python libs --from __future__ import absolute_import, print_function, unicode_literals -+import re -+import string # pylint: disable=deprecated-module -+from xml.etree import ElementTree -+ -+# Import salt libs -+import salt.utils.data - - - def _conv_name(x): -- ''' -+ """ - If this XML tree has an xmlns attribute, then etree will add it - to the beginning of the tag, like: "{http://path}tag". -- ''' -- if '}' in x: -- comps = x.split('}') -+ """ -+ if "}" in x: -+ comps = x.split("}") - name = comps[1] - return name - return x - - - def _to_dict(xmltree): -- ''' -+ """ +@@ -25,7 +25,7 @@ def _to_dict(xmltree): + """ Converts an XML ElementTree to a dictionary that only contains items. This is the default behavior in version 2017.7. This will default to prevent - unexpected parsing issues on modules dependant on this. -- ''' -+ """ +- unexpected parsing issues on modules dependent on this. ++ unexpected parsing issues on modules dependant on this. + """ # If this object has no children, the for..loop below will return nothing # for it, so just return a single dict representing it. - if len(xmltree.getchildren()) < 1: -@@ -51,9 +55,9 @@ def _to_dict(xmltree): +@@ -298,7 +298,7 @@ def change_xml(doc, data, mapping): + if convert_fn: + new_value = convert_fn(new_value) - - def _to_full_dict(xmltree): -- ''' -+ """ - Returns the full XML dictionary including attributes. -- ''' -+ """ - xmldict = {} - - for attrName, attrValue in xmltree.attrib.items(): -@@ -87,15 +91,234 @@ def _to_full_dict(xmltree): - - - def to_dict(xmltree, attr=False): -- ''' -+ """ - Convert an XML tree into a dict. The tree that is passed in must be an - ElementTree object. - Args: - xmltree: An ElementTree object. - attr: If true, attributes will be parsed. If false, they will be ignored. - -- ''' -+ """ - if attr: - return _to_full_dict(xmltree) - else: - return _to_dict(xmltree) -+ -+ -+def get_xml_node(node, xpath): -+ """ -+ Get an XML node using a path (super simple xpath showing complete node ancestry). -+ This also creates the missing nodes. -+ -+ The supported XPath can contain elements filtering using [@attr='value']. -+ -+ Args: -+ node: an Element object -+ xpath: simple XPath to look for. -+ """ -+ if not xpath.startswith("./"): -+ xpath = "./{}".format(xpath) -+ res = node.find(xpath) -+ if res is None: -+ parent_xpath = xpath[: xpath.rfind("/")] -+ parent = node.find(parent_xpath) -+ if parent is None: -+ parent = get_xml_node(node, parent_xpath) -+ segment = xpath[xpath.rfind("/") + 1 :] -+ # We may have [] filter in the segment -+ matcher = re.match( -+ r"""(?P[^[]+)(?:\[@(?P\w+)=["'](?P[^"']+)["']])?""", -+ segment, -+ ) -+ attrib = ( -+ {matcher.group("attr"): matcher.group("value")} -+ if matcher.group("attr") and matcher.group("value") -+ else {} -+ ) -+ res = ElementTree.SubElement(parent, matcher.group("tag"), attrib) -+ return res -+ -+ -+def set_node_text(node, value): -+ """ -+ Function to use in the ``set`` value in the :py:func:`change_xml` mapping items to set the text. -+ This is the default. -+ -+ :param node: the node to set the text to -+ :param value: the value to set -+ """ -+ node.text = str(value) -+ -+ -+def clean_node(parent_map, node, ignored=None): -+ """ -+ Remove the node from its parent if it has no attribute but the ignored ones, no text and no child. -+ Recursively called up to the document root to ensure no empty node is left. -+ -+ :param parent_map: dictionary mapping each node to its parent -+ :param node: the node to clean -+ :param ignored: a list of ignored attributes. -+ """ -+ has_text = node.text is not None and node.text.strip() -+ parent = parent_map.get(node) -+ if ( -+ len(node.attrib.keys() - (ignored or [])) == 0 -+ and not list(node) -+ and not has_text -+ ): -+ parent.remove(node) -+ # Clean parent nodes if needed -+ if parent is not None: -+ clean_node(parent_map, parent, ignored) -+ -+ -+def del_text(parent_map, node): -+ """ -+ Function to use as ``del`` value in the :py:func:`change_xml` mapping items to remove the text. -+ This is the default function. -+ Calls :py:func:`clean_node` before returning. -+ """ -+ parent = parent_map[node] -+ parent.remove(node) -+ clean_node(parent, node) -+ -+ -+def del_attribute(attribute, ignored=None): -+ """ -+ Helper returning a function to use as ``del`` value in the :py:func:`change_xml` mapping items to -+ remove an attribute. -+ -+ The generated function calls :py:func:`clean_node` before returning. -+ -+ :param attribute: the name of the attribute to remove -+ :param ignored: the list of attributes to ignore during the cleanup -+ -+ :return: the function called by :py:func:`change_xml`. -+ """ -+ -+ def _do_delete(parent_map, node): -+ if attribute not in node.keys(): -+ return -+ node.attrib.pop(attribute) -+ clean_node(parent_map, node, ignored) -+ -+ return _do_delete -+ -+ -+def change_xml(doc, data, mapping): -+ """ -+ Change an XML ElementTree document according. -+ -+ :param doc: the ElementTree parsed XML document to modify -+ :param data: the dictionary of values used to modify the XML. -+ :param mapping: a list of items describing how to modify the XML document. -+ Each item is a dictionary containing the following keys: -+ -+ .. glossary:: -+ path -+ the path to the value to set or remove in the ``data`` parameter. -+ See :py:func:`salt.utils.data.get_value ` for the format -+ of the value. -+ -+ xpath -+ Simplified XPath expression used to locate the change in the XML tree. -+ See :py:func:`get_xml_node` documentation for details on the supported XPath syntax -+ -+ get -+ function gettin the value from the XML. -+ Takes a single parameter for the XML node found by the XPath expression. -+ Default returns the node text value. -+ This may be used to return an attribute or to perform value transformation. -+ -+ set -+ function setting the value in the XML. -+ Takes two parameters for the XML node and the value to set. -+ Default is to set the text value. -+ -+ del -+ function deleting the value in the XML. -+ Takes two parameters for the parent node and the node matched by the XPath. -+ Default is to remove the text value. -+ More cleanup may be performed, see the :py:func:`clean_node` function for details. -+ -+ convert -+ function modifying the user-provided value right before comparing it with the one from the XML. -+ Takes the value as single parameter. -+ Default is to apply no conversion. -+ -+ :return: ``True`` if the XML has been modified, ``False`` otherwise. -+ """ -+ need_update = False -+ for param in mapping: -+ # Get the value from the function parameter using the path-like description -+ # Using an empty list as a default value will cause values not provided by the user -+ # to be left untouched, as opposed to explicit None unsetting the value -+ values = salt.utils.data.get_value(data, param["path"], []) -+ xpath = param["xpath"] -+ # Prepend the xpath with ./ to handle the root more easily -+ if not xpath.startswith("./"): -+ xpath = "./{}".format(xpath) -+ -+ placeholders = [ -+ s[1:-1] -+ for s in param["path"].split(":") -+ if s.startswith("{") and s.endswith("}") -+ ] -+ -+ ctx = {placeholder: "$$$" for placeholder in placeholders} -+ all_nodes_xpath = string.Template(xpath).substitute(ctx) -+ all_nodes_xpath = re.sub( -+ r"""(?:=['"]\$\$\$["'])|(?:\[\$\$\$\])""", "", all_nodes_xpath -+ ) -+ -+ # Store the nodes that are not removed for later cleanup -+ kept_nodes = set() -+ -+ for value_item in values: -+ new_value = value_item["value"] -+ -+ # Only handle simple type values. Use multiple entries or a custom get for dict or lists -+ if isinstance(new_value, list) or isinstance(new_value, dict): -+ continue -+ -+ if new_value is not None: -+ ctx = { -+ placeholder: value_item.get(placeholder, "") -+ for placeholder in placeholders -+ } -+ node_xpath = string.Template(xpath).substitute(ctx) -+ node = get_xml_node(doc, node_xpath) -+ -+ kept_nodes.add(node) -+ -+ get_fn = param.get("get", lambda n: n.text) -+ set_fn = param.get("set", set_node_text) -+ current_value = get_fn(node) -+ -+ # Do we need to apply some conversion to the user-provided value? -+ convert_fn = param.get("convert") -+ if convert_fn: -+ new_value = convert_fn(new_value) -+ +- if str(current_value) != str(new_value): + if current_value != new_value: -+ set_fn(node, new_value) -+ need_update = True -+ else: -+ nodes = doc.findall(all_nodes_xpath) -+ del_fn = param.get("del", del_text) -+ parent_map = {c: p for p in doc.iter() for c in p} -+ for node in nodes: -+ del_fn(parent_map, node) -+ need_update = True -+ -+ # Clean the left over XML elements if there were placeholders -+ if placeholders and values[0].get("value") != []: -+ all_nodes = set(doc.findall(all_nodes_xpath)) -+ to_remove = all_nodes - kept_nodes -+ del_fn = param.get("del", del_text) -+ parent_map = {c: p for p in doc.iter() for c in p} -+ for node in to_remove: -+ del_fn(parent_map, node) -+ need_update = True -+ -+ return need_update -diff --git a/tests/pytests/unit/utils/test_data.py b/tests/pytests/unit/utils/test_data.py -new file mode 100644 -index 0000000000..b3f0ba04ae ---- /dev/null -+++ b/tests/pytests/unit/utils/test_data.py -@@ -0,0 +1,57 @@ -+import salt.utils.data -+ -+ -+def test_get_value_simple_path(): -+ data = {"a": {"b": {"c": "foo"}}} -+ assert [{"value": "foo"}] == salt.utils.data.get_value(data, "a:b:c") -+ -+ -+def test_get_value_placeholder_dict(): -+ data = {"a": {"b": {"name": "foo"}, "c": {"name": "bar"}}} -+ assert [ -+ {"value": "foo", "id": "b"}, -+ {"value": "bar", "id": "c"}, -+ ] == salt.utils.data.get_value(data, "a:{id}:name") -+ -+ -+def test_get_value_placeholder_list(): -+ data = {"a": [{"name": "foo"}, {"name": "bar"}]} -+ assert [ -+ {"value": "foo", "id": 0}, -+ {"value": "bar", "id": 1}, -+ ] == salt.utils.data.get_value(data, "a:{id}:name") -+ -+ -+def test_get_value_nested_placeholder(): -+ data = { -+ "a": { -+ "b": {"b1": {"name": "foo1"}, "b2": {"name": "foo2"}}, -+ "c": {"c1": {"name": "bar"}}, -+ } -+ } -+ assert [ -+ {"value": "foo1", "id": "b", "sub": "b1"}, -+ {"value": "foo2", "id": "b", "sub": "b2"}, -+ {"value": "bar", "id": "c", "sub": "c1"}, -+ ] == salt.utils.data.get_value(data, "a:{id}:{sub}:name") -+ -+ -+def test_get_value_nested_notfound(): -+ data = {"a": {"b": {"c": "foo"}}} -+ assert [{"value": []}] == salt.utils.data.get_value(data, "a:b:d", []) -+ -+ -+def test_get_value_not_found(): -+ assert [{"value": []}] == salt.utils.data.get_value({}, "a", []) -+ -+ -+def test_get_value_none(): -+ assert [{"value": None}] == salt.utils.data.get_value({"a": None}, "a") -+ -+ -+def test_get_value_simple_type_path(): -+ assert [{"value": []}] == salt.utils.data.get_value({"a": 1024}, "a:b", []) -+ -+ -+def test_get_value_None_path(): -+ assert [{"value": None}] == salt.utils.data.get_value({"a": None}, "a:b", []) -diff --git a/tests/pytests/unit/utils/test_xmlutil.py b/tests/pytests/unit/utils/test_xmlutil.py -new file mode 100644 -index 0000000000..081cc64193 ---- /dev/null -+++ b/tests/pytests/unit/utils/test_xmlutil.py -@@ -0,0 +1,169 @@ -+import pytest -+import salt.utils.xmlutil as xml -+from salt._compat import ElementTree as ET -+ -+ -+@pytest.fixture -+def xml_doc(): -+ return ET.fromstring( -+ """ -+ -+ test01 -+ 1024 -+ -+ -+ -+ -+ -+ -+ -+ """ -+ ) -+ -+ -+def test_change_xml_text(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, {"name": "test02"}, [{"path": "name", "xpath": "name"}] -+ ) -+ assert ret -+ assert "test02" == xml_doc.find("name").text -+ -+ -+def test_change_xml_text_nochange(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, {"name": "test01"}, [{"path": "name", "xpath": "name"}] -+ ) -+ assert not ret -+ -+ -+def test_change_xml_text_notdefined(xml_doc): -+ ret = xml.change_xml(xml_doc, {}, [{"path": "name", "xpath": "name"}]) -+ assert not ret -+ -+ -+def test_change_xml_text_removed(xml_doc): -+ ret = xml.change_xml(xml_doc, {"name": None}, [{"path": "name", "xpath": "name"}]) -+ assert ret -+ assert xml_doc.find("name") is None -+ -+ -+def test_change_xml_text_add(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"cpu": {"vendor": "ACME"}}, -+ [{"path": "cpu:vendor", "xpath": "cpu/vendor"}], -+ ) -+ assert ret -+ assert "ACME" == xml_doc.find("cpu/vendor").text -+ -+ -+def test_change_xml_convert(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"mem": 2}, -+ [{"path": "mem", "xpath": "memory", "convert": lambda v: v * 1024}], -+ ) -+ assert ret -+ assert "2048" == xml_doc.find("memory").text -+ -+ -+def test_change_xml_attr(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"cpu": {"topology": {"cores": 4}}}, -+ [ -+ { -+ "path": "cpu:topology:cores", -+ "xpath": "cpu/topology", -+ "get": lambda n: int(n.get("cores")) if n.get("cores") else None, -+ "set": lambda n, v: n.set("cores", str(v)), -+ "del": xml.del_attribute("cores"), -+ } -+ ], -+ ) -+ assert ret -+ assert "4" == xml_doc.find("cpu/topology").get("cores") -+ -+ -+def test_change_xml_attr_unchanged(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"cpu": {"topology": {"sockets": 1}}}, -+ [ -+ { -+ "path": "cpu:topology:sockets", -+ "xpath": "cpu/topology", -+ "get": lambda n: int(n.get("sockets")) if n.get("sockets") else None, -+ "set": lambda n, v: n.set("sockets", str(v)), -+ "del": xml.del_attribute("sockets"), -+ } -+ ], -+ ) -+ assert not ret -+ -+ -+def test_change_xml_attr_remove(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"cpu": {"topology": {"sockets": None}}}, -+ [ -+ { -+ "path": "cpu:topology:sockets", -+ "xpath": "./cpu/topology", -+ "get": lambda n: int(n.get("sockets")) if n.get("sockets") else None, -+ "set": lambda n, v: n.set("sockets", str(v)), -+ "del": xml.del_attribute("sockets"), -+ } -+ ], -+ ) -+ assert ret -+ assert xml_doc.find("cpu") is None -+ -+ -+def test_change_xml_not_simple_value(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"cpu": {"topology": {"sockets": None}}}, -+ [{"path": "cpu", "xpath": "vcpu", "get": lambda n: int(n.text)}], -+ ) -+ assert not ret -+ -+ -+def test_change_xml_template(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"cpu": {"vcpus": {2: {"enabled": True}, 4: {"enabled": False}}}}, -+ [ -+ { -+ "path": "cpu:vcpus:{id}:enabled", -+ "xpath": "vcpus/vcpu[@id='$id']", -+ "convert": lambda v: "yes" if v else "no", -+ "get": lambda n: n.get("enabled"), -+ "set": lambda n, v: n.set("enabled", v), -+ "del": xml.del_attribute("enabled", ["id"]), -+ }, -+ ], -+ ) -+ assert ret -+ assert xml_doc.find("vcpus/vcpu[@id='1']") is None -+ assert "yes" == xml_doc.find("vcpus/vcpu[@id='2']").get("enabled") -+ assert "no" == xml_doc.find("vcpus/vcpu[@id='4']").get("enabled") -+ -+ -+def test_change_xml_template_remove(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"cpu": {"vcpus": None}}, -+ [ -+ { -+ "path": "cpu:vcpus:{id}:enabled", -+ "xpath": "vcpus/vcpu[@id='$id']", -+ "convert": lambda v: "yes" if v else "no", -+ "get": lambda n: n.get("enabled"), -+ "set": lambda n, v: n.set("enabled", v), -+ "del": xml.del_attribute("enabled", ["id"]), -+ }, -+ ], -+ ) -+ assert ret -+ assert xml_doc.find("vcpus") is None + set_fn(node, new_value) + need_update = True + else: diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index d3988464f6..5ec8de77e7 100644 +index f53b4a85c1..4775fec31f 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - virt execution module unit tests - """ -@@ -6,7 +5,6 @@ virt execution module unit tests - # pylint: disable=3rd-party-module-not-gated - - # Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import datetime - import os -@@ -23,9 +21,6 @@ import salt.utils.yaml - from salt._compat import ElementTree as ET - from salt.exceptions import CommandExecutionError, SaltInvocationError - --# Import third party libs --from salt.ext import six -- - # pylint: disable=import-error - from salt.ext.six.moves import range # pylint: disable=redefined-builtin - -@@ -136,7 +131,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "model": "virtio", - "filename": "myvm_system.qcow2", - "image": "/path/to/image", -- "source_file": "{0}{1}myvm_system.qcow2".format(root_dir, os.sep), -+ "source_file": "{}{}myvm_system.qcow2".format(root_dir, os.sep), - }, - { - "name": "data", -@@ -145,7 +140,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "format": "raw", - "model": "virtio", - "filename": "myvm_data.raw", -- "source_file": "{0}{1}myvm_data.raw".format(root_dir, os.sep), -+ "source_file": "{}{}myvm_data.raw".format(root_dir, os.sep), - }, - ], - disks, -@@ -582,8 +577,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertIsNone(root.get("type")) - self.assertEqual(root.find("name").text, "vmname/system.vmdk") - self.assertEqual(root.find("capacity").attrib["unit"], "KiB") -- self.assertEqual(root.find("capacity").text, six.text_type(8192 * 1024)) -- self.assertEqual(root.find("allocation").text, six.text_type(0)) -+ self.assertEqual(root.find("capacity").text, str(8192 * 1024)) -+ self.assertEqual(root.find("allocation").text, str(0)) - self.assertEqual(root.find("target/format").get("type"), "vmdk") - self.assertIsNone(root.find("target/permissions")) - self.assertIsNone(root.find("target/nocow")) -@@ -615,9 +610,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertIsNone(root.find("target/path")) - self.assertEqual(root.find("target/format").get("type"), "qcow2") - self.assertEqual(root.find("capacity").attrib["unit"], "KiB") -- self.assertEqual(root.find("capacity").text, six.text_type(8192 * 1024)) -+ self.assertEqual(root.find("capacity").text, str(8192 * 1024)) - self.assertEqual(root.find("capacity").attrib["unit"], "KiB") -- self.assertEqual(root.find("allocation").text, six.text_type(4096 * 1024)) -+ self.assertEqual(root.find("allocation").text, str(4096 * 1024)) - self.assertEqual(root.find("target/permissions/mode").text, "0775") - self.assertEqual(root.find("target/permissions/owner").text, "123") - self.assertEqual(root.find("target/permissions/group").text, "456") -@@ -638,7 +633,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - root = ET.fromstring(xml_data) - self.assertEqual(root.attrib["type"], "kvm") - self.assertEqual(root.find("vcpu").text, "1") -- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024)) -+ self.assertEqual(root.find("memory").text, str(512 * 1024)) - self.assertEqual(root.find("memory").attrib["unit"], "KiB") - - disks = root.findall(".//disk") -@@ -671,7 +666,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - root = ET.fromstring(xml_data) - self.assertEqual(root.attrib["type"], "vmware") - self.assertEqual(root.find("vcpu").text, "1") -- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024)) -+ self.assertEqual(root.find("memory").text, str(512 * 1024)) - self.assertEqual(root.find("memory").attrib["unit"], "KiB") - - disks = root.findall(".//disk") -@@ -714,7 +709,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - root = ET.fromstring(xml_data) - self.assertEqual(root.attrib["type"], "xen") - self.assertEqual(root.find("vcpu").text, "1") -- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024)) -+ self.assertEqual(root.find("memory").text, str(512 * 1024)) - self.assertEqual(root.find("memory").attrib["unit"], "KiB") - self.assertEqual( - root.find(".//kernel").text, "/usr/lib/grub2/x86_64-xen/grub.xen" -@@ -768,7 +763,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - root = ET.fromstring(xml_data) - self.assertEqual(root.attrib["type"], "vmware") - self.assertEqual(root.find("vcpu").text, "1") -- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024)) -+ self.assertEqual(root.find("memory").text, str(512 * 1024)) - self.assertEqual(root.find("memory").attrib["unit"], "KiB") - self.assertTrue(len(root.findall(".//disk")) == 2) - self.assertTrue(len(root.findall(".//interface")) == 2) -@@ -801,7 +796,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - root = ET.fromstring(xml_data) - self.assertEqual(root.attrib["type"], "kvm") - self.assertEqual(root.find("vcpu").text, "1") -- self.assertEqual(root.find("memory").text, six.text_type(512 * 1024)) -+ self.assertEqual(root.find("memory").text, str(512 * 1024)) - self.assertEqual(root.find("memory").attrib["unit"], "KiB") - disks = root.findall(".//disk") - self.assertTrue(len(disks) == 2) -@@ -1635,7 +1630,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertIsNone(definition.find("./devices/disk[2]/source")) - self.assertEqual( - mock_run.call_args[0][0], -- 'qemu-img create -f qcow2 "{0}" 10240M'.format(expected_disk_path), -+ 'qemu-img create -f qcow2 "{}" 10240M'.format(expected_disk_path), - ) - self.assertEqual(mock_chmod.call_args[0][0], expected_disk_path) - -@@ -1729,11 +1724,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - 1 - - hvm -+ - - - - -- -+ - - - -@@ -1850,17 +1846,36 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1843,17 +1843,36 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/", } @@ -4263,7 +1202,7 @@ index d3988464f6..5ec8de77e7 100644 self.assertEqual( { "definition": True, -@@ -1884,6 +1899,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1877,6 +1896,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): "console=ttyS0 ks=http://example.com/f8-i386/os/", ) @@ -4275,7 +1214,7 @@ index d3988464f6..5ec8de77e7 100644 self.assertEqual( { "definition": True, -@@ -1903,9 +1923,28 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1896,9 +1920,28 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): "/usr/share/OVMF/OVMF_VARS.ms.fd", ) @@ -4304,16 +1243,7 @@ index d3988464f6..5ec8de77e7 100644 # Update memory case setmem_mock = MagicMock(return_value=0) domain_mock.setMemoryFlags = setmem_mock -@@ -1955,7 +1994,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - ) # pylint: disable=no-member - self.assertEqual( - mock_run.call_args[0][0], -- 'qemu-img create -f qcow2 "{0}" 2048M'.format(added_disk_path), -+ 'qemu-img create -f qcow2 "{}" 2048M'.format(added_disk_path), - ) - self.assertEqual(mock_chmod.call_args[0][0], added_disk_path) - self.assertListEqual( -@@ -2397,6 +2436,43 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2390,6 +2433,43 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): ], ) @@ -4357,7 +1287,7 @@ index d3988464f6..5ec8de77e7 100644 def test_update_existing_boot_params(self): """ Test virt.update() with existing boot parameters. -@@ -2537,6 +2613,18 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2530,6 +2610,18 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(setxml.find("os").find("initrd"), None) self.assertEqual(setxml.find("os").find("cmdline"), None) @@ -4376,49 +1306,7 @@ index d3988464f6..5ec8de77e7 100644 self.assertEqual( { "definition": True, -@@ -2582,7 +2670,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - salt.modules.config.__opts__, mock_config # pylint: disable=no-member - ): - -- for name in six.iterkeys(mock_config["virt"]["nic"]): -+ for name in mock_config["virt"]["nic"].keys(): - profile = salt.modules.virt._nic_profile(name, "kvm") - self.assertEqual(len(profile), 2) - -@@ -3592,8 +3680,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "44454c4c-3400-105a-8033-b3c04f4b344a", caps["host"]["host"]["uuid"] - ) - self.assertEqual( -- set(["qemu", "kvm"]), -- set([domainCaps["domain"] for domainCaps in caps["domains"]]), -+ {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]}, - ) - - def test_network_tag(self): -@@ -3694,9 +3781,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - for i in range(2): - net_mock = MagicMock() - -- net_mock.name.return_value = "net{0}".format(i) -+ net_mock.name.return_value = "net{}".format(i) - net_mock.UUIDString.return_value = "some-uuid" -- net_mock.bridgeName.return_value = "br{0}".format(i) -+ net_mock.bridgeName.return_value = "br{}".format(i) - net_mock.autostart.return_value = True - net_mock.isActive.return_value = False - net_mock.isPersistent.return_value = True -@@ -4156,8 +4243,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - pool_mocks = [] - for i in range(2): - pool_mock = MagicMock() -- pool_mock.name.return_value = "pool{0}".format(i) -- pool_mock.UUIDString.return_value = "some-uuid-{0}".format(i) -+ pool_mock.name.return_value = "pool{}".format(i) -+ pool_mock.UUIDString.return_value = "some-uuid-{}".format(i) - pool_mock.info.return_value = [0, 1234, 5678, 123] - pool_mock.autostart.return_value = True - pool_mock.isPersistent.return_value = True -@@ -4257,7 +4344,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -4248,7 +4340,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): """ mock_pool = MagicMock() mock_pool.delete = MagicMock(return_value=0) @@ -4426,7 +1314,7 @@ index d3988464f6..5ec8de77e7 100644 self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mock_pool) res = virt.pool_delete("test-pool") -@@ -4271,12 +4357,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -4262,12 +4353,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL ) @@ -4442,7 +1330,7 @@ index d3988464f6..5ec8de77e7 100644 mock_pool.XMLDesc.return_value = """ test-ses -@@ -4293,16 +4379,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -4284,16 +4375,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): mock_undefine = MagicMock(return_value=0) self.mock_conn.secretLookupByUsage.return_value.undefine = mock_undefine @@ -4461,7 +1349,7 @@ index d3988464f6..5ec8de77e7 100644 self.mock_conn.secretLookupByUsage.assert_called_once_with( self.mock_libvirt.VIR_SECRET_USAGE_TYPE_CEPH, "pool_test-ses" -@@ -4571,24 +4652,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -4562,24 +4648,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): """ @@ -4486,7 +1374,7 @@ index d3988464f6..5ec8de77e7 100644 mock_secret = MagicMock() self.mock_conn.secretLookupByUUIDString = MagicMock(return_value=mock_secret) -@@ -4609,6 +4672,23 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -4600,6 +4668,23 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): self.mock_conn.storagePoolDefineXML.assert_not_called() mock_secret.setValue.assert_called_once_with(b"secret") @@ -4510,80 +1398,11 @@ index d3988464f6..5ec8de77e7 100644 def test_pool_update_password_create(self): """ Test the pool_update function, where the password only is changed -@@ -4695,11 +4775,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - for idx, disk in enumerate(vms_disks): - vm = MagicMock() - # pylint: disable=no-member -- vm.name.return_value = "vm{0}".format(idx) -+ vm.name.return_value = "vm{}".format(idx) - vm.XMLDesc.return_value = """ - -- vm{0} -- {1} -+ vm{} -+ {} - - """.format( - idx, disk -@@ -4760,7 +4840,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - # pylint: disable=no-member - mock_volume.name.return_value = vol_data["name"] - mock_volume.key.return_value = vol_data["key"] -- mock_volume.path.return_value = "/path/to/{0}.qcow2".format( -+ mock_volume.path.return_value = "/path/to/{}.qcow2".format( - vol_data["name"] - ) - if vol_data["info"]: -@@ -4769,7 +4849,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - """ - - -- {0} -+ {} - - """.format( - vol_data["backingStore"] -@@ -5234,7 +5314,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - - def create_mock_vm(idx): - mock_vm = MagicMock() -- mock_vm.name.return_value = "vm{0}".format(idx) -+ mock_vm.name.return_value = "vm{}".format(idx) - return mock_vm - - mock_vms = [create_mock_vm(idx) for idx in range(3)] diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py -index c76f8a5fc0..f03159334b 100644 +index 6d38829870..8fe892f607 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py -@@ -1,9 +1,7 @@ --# -*- coding: utf-8 -*- - """ - :codeauthor: Jayesh Kariya - """ - # Import Python libs --from __future__ import absolute_import, print_function, unicode_literals - - import shutil - import tempfile -@@ -14,7 +12,6 @@ import salt.utils.files - from salt.exceptions import CommandExecutionError, SaltInvocationError - - # Import 3rd-party libs --from salt.ext import six - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.mock import MagicMock, mock_open, patch - -@@ -37,7 +34,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors - """ - Fake function return error message - """ -- return six.text_type(self) -+ return str(self) - - - class LibvirtTestCase(TestCase, LoaderModuleMockMixin): -@@ -341,6 +338,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -333,6 +333,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): "myvm", cpu=2, mem=2048, @@ -4591,7 +1410,7 @@ index c76f8a5fc0..f03159334b 100644 os_type="linux", arch="i686", vm_type="qemu", -@@ -363,6 +361,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -355,6 +356,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): "myvm", cpu=2, mem=2048, @@ -4599,7 +1418,7 @@ index c76f8a5fc0..f03159334b 100644 os_type="linux", arch="i686", disk="prod", -@@ -471,10 +470,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -463,10 +465,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): "comment": "Domain myvm updated with live update(s) failures", } ) @@ -4614,7 +1433,7 @@ index c76f8a5fc0..f03159334b 100644 mem=None, disk_profile=None, disks=None, -@@ -598,6 +600,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -590,6 +595,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): password=None, boot=None, test=True, @@ -4622,7 +1441,7 @@ index c76f8a5fc0..f03159334b 100644 ) # No changes case -@@ -632,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -624,6 +630,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): password=None, boot=None, test=True, @@ -4630,7 +1449,7 @@ index c76f8a5fc0..f03159334b 100644 ) def test_running(self): -@@ -708,6 +712,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -700,6 +707,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): install=True, pub_key=None, priv_key=None, @@ -4638,7 +1457,7 @@ index c76f8a5fc0..f03159334b 100644 connection=None, username=None, password=None, -@@ -769,6 +774,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -761,6 +769,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): install=False, pub_key="/path/to/key.pub", priv_key="/path/to/key", @@ -4646,7 +1465,7 @@ index c76f8a5fc0..f03159334b 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -793,6 +799,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -785,6 +794,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): start=False, pub_key="/path/to/key.pub", priv_key="/path/to/key", @@ -4654,7 +1473,7 @@ index c76f8a5fc0..f03159334b 100644 connection="someconnection", username="libvirtuser", password="supersecret", -@@ -937,6 +944,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -929,6 +939,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): password=None, boot=None, test=False, @@ -4662,7 +1481,7 @@ index c76f8a5fc0..f03159334b 100644 ) # Failed definition update case -@@ -1055,6 +1063,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1047,6 +1058,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): password=None, boot=None, test=True, @@ -4670,7 +1489,7 @@ index c76f8a5fc0..f03159334b 100644 ) start_mock.assert_not_called() -@@ -1091,6 +1100,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1083,6 +1095,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): password=None, boot=None, test=True, @@ -4678,7 +1497,7 @@ index c76f8a5fc0..f03159334b 100644 ) def test_stopped(self): -@@ -1978,6 +1988,72 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1970,6 +1983,72 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): password="secret", ) @@ -4751,7 +1570,7 @@ index c76f8a5fc0..f03159334b 100644 mocks["update"] = MagicMock(return_value=False) for mock in mocks: mocks[mock].reset_mock() -@@ -2027,6 +2103,9 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2019,6 +2098,9 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): for mock in mocks: mocks[mock].reset_mock() mocks["update"] = MagicMock(return_value=True) @@ -4761,7 +1580,7 @@ index c76f8a5fc0..f03159334b 100644 with patch.dict( virt.__salt__, { # pylint: disable=no-member -@@ -2130,6 +2209,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2122,6 +2204,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): ), ret, ) @@ -4769,7 +1588,7 @@ index c76f8a5fc0..f03159334b 100644 mocks["update"].assert_called_with( "mypool", ptype="logical", -@@ -2477,8 +2557,8 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2469,8 +2552,8 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): ): ret.update( { @@ -4780,7 +1599,7 @@ index c76f8a5fc0..f03159334b 100644 "result": True, } ) -@@ -2504,9 +2584,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2496,9 +2579,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): mocks["start"].assert_called_with( "mypool", connection=None, username=None, password=None ) @@ -4792,2321 +1611,49 @@ index c76f8a5fc0..f03159334b 100644 "mypool", ptype="logical", diff --git a/tests/unit/utils/test_data.py b/tests/unit/utils/test_data.py -index 8fa352321c..8a6956d442 100644 +index 9206979284..aff7384232 100644 --- a/tests/unit/utils/test_data.py +++ b/tests/unit/utils/test_data.py -@@ -1,38 +1,38 @@ --# -*- coding: utf-8 -*- --''' -+""" - Tests for salt.utils.data --''' -+""" +@@ -220,38 +220,6 @@ class DataTestCase(TestCase): + ), + ) - # Import Python libs --from __future__ import absolute_import, print_function, unicode_literals -+ - import logging - - # Import Salt libs - import salt.utils.data - import salt.utils.stringutils --from salt.utils.odict import OrderedDict --from tests.support.unit import TestCase, LOREM_IPSUM --from tests.support.mock import patch - - # Import 3rd party libs --from salt.ext.six.moves import builtins # pylint: disable=import-error,redefined-builtin --from salt.ext import six -+from salt.ext.six.moves import ( # pylint: disable=import-error,redefined-builtin -+ builtins, -+) -+from salt.utils.odict import OrderedDict -+from tests.support.mock import patch -+from tests.support.unit import LOREM_IPSUM, TestCase - - log = logging.getLogger(__name__) --_b = lambda x: x.encode('utf-8') -+_b = lambda x: x.encode("utf-8") - _s = lambda x: salt.utils.stringutils.to_str(x, normalize=True) - # Some randomized data that will not decode --BYTES = b'1\x814\x10' -+BYTES = b"1\x814\x10" - - # This is an example of a unicode string with й constructed using two separate - # code points. Do not modify it. --EGGS = '\u044f\u0438\u0306\u0446\u0430' -+EGGS = "\u044f\u0438\u0306\u0446\u0430" - - - class DataTestCase(TestCase): - test_data = [ -- 'unicode_str', -- _b('питон'), -+ "unicode_str", -+ _b("питон"), - 123, - 456.789, - True, -@@ -40,71 +40,79 @@ class DataTestCase(TestCase): - None, - EGGS, - BYTES, -- [123, 456.789, _b('спам'), True, False, None, EGGS, BYTES], -- (987, 654.321, _b('яйца'), EGGS, None, (True, EGGS, BYTES)), -- {_b('str_key'): _b('str_val'), -- None: True, -- 123: 456.789, -- EGGS: BYTES, -- _b('subdict'): {'unicode_key': EGGS, -- _b('tuple'): (123, 'hello', _b('world'), True, EGGS, BYTES), -- _b('list'): [456, _b('спам'), False, EGGS, BYTES]}}, -- OrderedDict([(_b('foo'), 'bar'), (123, 456), (EGGS, BYTES)]) -+ [123, 456.789, _b("спам"), True, False, None, EGGS, BYTES], -+ (987, 654.321, _b("яйца"), EGGS, None, (True, EGGS, BYTES)), -+ { -+ _b("str_key"): _b("str_val"), -+ None: True, -+ 123: 456.789, -+ EGGS: BYTES, -+ _b("subdict"): { -+ "unicode_key": EGGS, -+ _b("tuple"): (123, "hello", _b("world"), True, EGGS, BYTES), -+ _b("list"): [456, _b("спам"), False, EGGS, BYTES], -+ }, -+ }, -+ OrderedDict([(_b("foo"), "bar"), (123, 456), (EGGS, BYTES)]), - ] - - def test_sorted_ignorecase(self): -- test_list = ['foo', 'Foo', 'bar', 'Bar'] -- expected_list = ['bar', 'Bar', 'foo', 'Foo'] +- # Traverse and match integer key in a nested dict +- # https://github.com/saltstack/salt/issues/56444 - self.assertEqual( -- salt.utils.data.sorted_ignorecase(test_list), expected_list) -+ test_list = ["foo", "Foo", "bar", "Bar"] -+ expected_list = ["bar", "Bar", "foo", "Foo"] -+ self.assertEqual(salt.utils.data.sorted_ignorecase(test_list), expected_list) - - def test_mysql_to_dict(self): -- test_mysql_output = ['+----+------+-----------+------+---------+------+-------+------------------+', -- '| Id | User | Host | db | Command | Time | State | Info |', -- '+----+------+-----------+------+---------+------+-------+------------------+', -- '| 7 | root | localhost | NULL | Query | 0 | init | show processlist |', -- '+----+------+-----------+------+---------+------+-------+------------------+'] -+ test_mysql_output = [ -+ "+----+------+-----------+------+---------+------+-------+------------------+", -+ "| Id | User | Host | db | Command | Time | State | Info |", -+ "+----+------+-----------+------+---------+------+-------+------------------+", -+ "| 7 | root | localhost | NULL | Query | 0 | init | show processlist |", -+ "+----+------+-----------+------+---------+------+-------+------------------+", -+ ] - -- ret = salt.utils.data.mysql_to_dict(test_mysql_output, 'Info') -+ ret = salt.utils.data.mysql_to_dict(test_mysql_output, "Info") - expected_dict = { -- 'show processlist': {'Info': 'show processlist', 'db': 'NULL', 'State': 'init', 'Host': 'localhost', -- 'Command': 'Query', 'User': 'root', 'Time': 0, 'Id': 7}} -+ "show processlist": { -+ "Info": "show processlist", -+ "db": "NULL", -+ "State": "init", -+ "Host": "localhost", -+ "Command": "Query", -+ "User": "root", -+ "Time": 0, -+ "Id": 7, -+ } -+ } - - self.assertDictEqual(ret, expected_dict) - - def test_subdict_match(self): -- test_two_level_dict = {'foo': {'bar': 'baz'}} -- test_two_level_comb_dict = {'foo': {'bar': 'baz:woz'}} -+ test_two_level_dict = {"foo": {"bar": "baz"}} -+ test_two_level_comb_dict = {"foo": {"bar": "baz:woz"}} - test_two_level_dict_and_list = { -- 'abc': ['def', 'ghi', {'lorem': {'ipsum': [{'dolor': 'sit'}]}}], -+ "abc": ["def", "ghi", {"lorem": {"ipsum": [{"dolor": "sit"}]}}], - } -- test_three_level_dict = {'a': {'b': {'c': 'v'}}} -+ test_three_level_dict = {"a": {"b": {"c": "v"}}} - - self.assertTrue( -- salt.utils.data.subdict_match( -- test_two_level_dict, 'foo:bar:baz' -- ) -+ salt.utils.data.subdict_match(test_two_level_dict, "foo:bar:baz") - ) - # In test_two_level_comb_dict, 'foo:bar' corresponds to 'baz:woz', not - # 'baz'. This match should return False. - self.assertFalse( -- salt.utils.data.subdict_match( -- test_two_level_comb_dict, 'foo:bar:baz' -- ) -+ salt.utils.data.subdict_match(test_two_level_comb_dict, "foo:bar:baz") - ) - # This tests matching with the delimiter in the value part (in other - # words, that the path 'foo:bar' corresponds to the string 'baz:woz'). - self.assertTrue( -- salt.utils.data.subdict_match( -- test_two_level_comb_dict, 'foo:bar:baz:woz' -- ) -+ salt.utils.data.subdict_match(test_two_level_comb_dict, "foo:bar:baz:woz") - ) - # This would match if test_two_level_comb_dict['foo']['bar'] was equal - # to 'baz:woz:wiz', or if there was more deep nesting. But it does not, - # so this should return False. - self.assertFalse( - salt.utils.data.subdict_match( -- test_two_level_comb_dict, 'foo:bar:baz:woz:wiz' -+ test_two_level_comb_dict, "foo:bar:baz:woz:wiz" - ) - ) - # This tests for cases when a key path corresponds to a list. The -@@ -115,189 +123,171 @@ class DataTestCase(TestCase): - # salt.utils.traverse_list_and_dict() so this particular assertion is a - # sanity check. - self.assertTrue( -- salt.utils.data.subdict_match( -- test_two_level_dict_and_list, 'abc:ghi' -- ) -+ salt.utils.data.subdict_match(test_two_level_dict_and_list, "abc:ghi") - ) - # This tests the use case of a dict embedded in a list, embedded in a - # list, embedded in a dict. This is a rather absurd case, but it - # confirms that match recursion works properly. - self.assertTrue( - salt.utils.data.subdict_match( -- test_two_level_dict_and_list, 'abc:lorem:ipsum:dolor:sit' -+ test_two_level_dict_and_list, "abc:lorem:ipsum:dolor:sit" - ) - ) - # Test four level dict match for reference -- self.assertTrue( -- salt.utils.data.subdict_match( -- test_three_level_dict, 'a:b:c:v' -- ) +- "it worked", +- salt.utils.data.traverse_dict_and_list( +- {"foo": {1234: "it worked"}}, "foo:1234", "it didn't work", +- ), - ) -+ self.assertTrue(salt.utils.data.subdict_match(test_three_level_dict, "a:b:c:v")) - # Test regression in 2015.8 where 'a:c:v' would match 'a:b:c:v' -- self.assertFalse( -- salt.utils.data.subdict_match( -- test_three_level_dict, 'a:c:v' -- ) +- # Make sure that we properly return the default value when the initial +- # attempt fails and YAML-loading the target key doesn't change its +- # value. +- self.assertEqual( +- "default", +- salt.utils.data.traverse_dict_and_list( +- {"foo": {"baz": "didn't work"}}, "foo:bar", "default", +- ), - ) -+ self.assertFalse(salt.utils.data.subdict_match(test_three_level_dict, "a:c:v")) - # Test wildcard match -- self.assertTrue( -- salt.utils.data.subdict_match( -- test_three_level_dict, 'a:*:c:v' -- ) -- ) -+ self.assertTrue(salt.utils.data.subdict_match(test_three_level_dict, "a:*:c:v")) - - def test_subdict_match_with_wildcards(self): -- ''' -+ """ - Tests subdict matching when wildcards are used in the expression -- ''' -- data = { -- 'a': { -- 'b': { -- 'ç': 'd', -- 'é': ['eff', 'gee', '8ch'], -- 'ĩ': {'j': 'k'} -- } -- } +- +- def test_issue_39709(self): +- test_two_level_dict_and_list = { +- "foo": ["bar", "baz", {"lorem": {"ipsum": [{"dolor": "sit"}]}}] - } -- assert salt.utils.data.subdict_match(data, '*:*:*:*') -- assert salt.utils.data.subdict_match(data, 'a:*:*:*') -- assert salt.utils.data.subdict_match(data, 'a:b:*:*') -- assert salt.utils.data.subdict_match(data, 'a:b:ç:*') -- assert salt.utils.data.subdict_match(data, 'a:b:*:d') -- assert salt.utils.data.subdict_match(data, 'a:*:ç:d') -- assert salt.utils.data.subdict_match(data, '*:b:ç:d') -- assert salt.utils.data.subdict_match(data, '*:*:ç:d') -- assert salt.utils.data.subdict_match(data, '*:*:*:d') -- assert salt.utils.data.subdict_match(data, 'a:*:*:d') -- assert salt.utils.data.subdict_match(data, 'a:b:*:ef*') -- assert salt.utils.data.subdict_match(data, 'a:b:*:g*') -- assert salt.utils.data.subdict_match(data, 'a:b:*:j:*') -- assert salt.utils.data.subdict_match(data, 'a:b:*:j:k') -- assert salt.utils.data.subdict_match(data, 'a:b:*:*:k') -- assert salt.utils.data.subdict_match(data, 'a:b:*:*:*') -+ """ -+ data = {"a": {"b": {"ç": "d", "é": ["eff", "gee", "8ch"], "ĩ": {"j": "k"}}}} -+ assert salt.utils.data.subdict_match(data, "*:*:*:*") -+ assert salt.utils.data.subdict_match(data, "a:*:*:*") -+ assert salt.utils.data.subdict_match(data, "a:b:*:*") -+ assert salt.utils.data.subdict_match(data, "a:b:ç:*") -+ assert salt.utils.data.subdict_match(data, "a:b:*:d") -+ assert salt.utils.data.subdict_match(data, "a:*:ç:d") -+ assert salt.utils.data.subdict_match(data, "*:b:ç:d") -+ assert salt.utils.data.subdict_match(data, "*:*:ç:d") -+ assert salt.utils.data.subdict_match(data, "*:*:*:d") -+ assert salt.utils.data.subdict_match(data, "a:*:*:d") -+ assert salt.utils.data.subdict_match(data, "a:b:*:ef*") -+ assert salt.utils.data.subdict_match(data, "a:b:*:g*") -+ assert salt.utils.data.subdict_match(data, "a:b:*:j:*") -+ assert salt.utils.data.subdict_match(data, "a:b:*:j:k") -+ assert salt.utils.data.subdict_match(data, "a:b:*:*:k") -+ assert salt.utils.data.subdict_match(data, "a:b:*:*:*") - - def test_traverse_dict(self): -- test_two_level_dict = {'foo': {'bar': 'baz'}} -+ test_two_level_dict = {"foo": {"bar": "baz"}} - - self.assertDictEqual( -- {'not_found': 'nope'}, -+ {"not_found": "nope"}, - salt.utils.data.traverse_dict( -- test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'} -- ) -+ test_two_level_dict, "foo:bar:baz", {"not_found": "nope"} -+ ), - ) - self.assertEqual( -- 'baz', -+ "baz", - salt.utils.data.traverse_dict( -- test_two_level_dict, 'foo:bar', {'not_found': 'not_found'} -- ) -+ test_two_level_dict, "foo:bar", {"not_found": "not_found"} -+ ), - ) - - def test_traverse_dict_and_list(self): -- test_two_level_dict = {'foo': {'bar': 'baz'}} -+ test_two_level_dict = {"foo": {"bar": "baz"}} - test_two_level_dict_and_list = { -- 'foo': ['bar', 'baz', {'lorem': {'ipsum': [{'dolor': 'sit'}]}}] -+ "foo": ["bar", "baz", {"lorem": {"ipsum": [{"dolor": "sit"}]}}] - } - - # Check traversing too far: salt.utils.data.traverse_dict_and_list() returns - # the value corresponding to a given key path, and baz is a value - # corresponding to the key path foo:bar. - self.assertDictEqual( -- {'not_found': 'nope'}, -+ {"not_found": "nope"}, - salt.utils.data.traverse_dict_and_list( -- test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'} -- ) -+ test_two_level_dict, "foo:bar:baz", {"not_found": "nope"} -+ ), - ) - # Now check to ensure that foo:bar corresponds to baz - self.assertEqual( -- 'baz', -+ "baz", - salt.utils.data.traverse_dict_and_list( -- test_two_level_dict, 'foo:bar', {'not_found': 'not_found'} -- ) -+ test_two_level_dict, "foo:bar", {"not_found": "not_found"} -+ ), - ) - # Check traversing too far - self.assertDictEqual( -- {'not_found': 'nope'}, -+ {"not_found": "nope"}, - salt.utils.data.traverse_dict_and_list( -- test_two_level_dict_and_list, 'foo:bar', {'not_found': 'nope'} -- ) -+ test_two_level_dict_and_list, "foo:bar", {"not_found": "nope"} -+ ), - ) - # Check index 1 (2nd element) of list corresponding to path 'foo' - self.assertEqual( -- 'baz', -+ "baz", - salt.utils.data.traverse_dict_and_list( -- test_two_level_dict_and_list, 'foo:1', {'not_found': 'not_found'} -- ) -+ test_two_level_dict_and_list, "foo:1", {"not_found": "not_found"} -+ ), - ) - # Traverse a couple times into dicts embedded in lists - self.assertEqual( -- 'sit', -+ "sit", - salt.utils.data.traverse_dict_and_list( - test_two_level_dict_and_list, -- 'foo:lorem:ipsum:dolor', -- {'not_found': 'not_found'} -- ) -+ "foo:lorem:ipsum:dolor", -+ {"not_found": "not_found"}, -+ ), - ) - - def test_compare_dicts(self): -- ret = salt.utils.data.compare_dicts(old={'foo': 'bar'}, new={'foo': 'bar'}) -+ ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "bar"}) - self.assertEqual(ret, {}) - -- ret = salt.utils.data.compare_dicts(old={'foo': 'bar'}, new={'foo': 'woz'}) -- expected_ret = {'foo': {'new': 'woz', 'old': 'bar'}} -+ ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "woz"}) -+ expected_ret = {"foo": {"new": "woz", "old": "bar"}} - self.assertDictEqual(ret, expected_ret) - - def test_compare_lists_no_change(self): -- ret = salt.utils.data.compare_lists(old=[1, 2, 3, 'a', 'b', 'c'], -- new=[1, 2, 3, 'a', 'b', 'c']) -+ ret = salt.utils.data.compare_lists( -+ old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 3, "a", "b", "c"] -+ ) - expected = {} - self.assertDictEqual(ret, expected) - - def test_compare_lists_changes(self): -- ret = salt.utils.data.compare_lists(old=[1, 2, 3, 'a', 'b', 'c'], -- new=[1, 2, 4, 'x', 'y', 'z']) -- expected = {'new': [4, 'x', 'y', 'z'], 'old': [3, 'a', 'b', 'c']} -+ ret = salt.utils.data.compare_lists( -+ old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 4, "x", "y", "z"] -+ ) -+ expected = {"new": [4, "x", "y", "z"], "old": [3, "a", "b", "c"]} - self.assertDictEqual(ret, expected) - - def test_compare_lists_changes_new(self): -- ret = salt.utils.data.compare_lists(old=[1, 2, 3], -- new=[1, 2, 3, 'x', 'y', 'z']) -- expected = {'new': ['x', 'y', 'z']} -+ ret = salt.utils.data.compare_lists(old=[1, 2, 3], new=[1, 2, 3, "x", "y", "z"]) -+ expected = {"new": ["x", "y", "z"]} - self.assertDictEqual(ret, expected) - - def test_compare_lists_changes_old(self): -- ret = salt.utils.data.compare_lists(old=[1, 2, 3, 'a', 'b', 'c'], -- new=[1, 2, 3]) -- expected = {'old': ['a', 'b', 'c']} -+ ret = salt.utils.data.compare_lists(old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 3]) -+ expected = {"old": ["a", "b", "c"]} - self.assertDictEqual(ret, expected) - - def test_decode(self): -- ''' -+ """ - Companion to test_decode_to_str, they should both be kept up-to-date - with one another. - - NOTE: This uses the lambda "_b" defined above in the global scope, - which encodes a string to a bytestring, assuming utf-8. -- ''' -+ """ - expected = [ -- 'unicode_str', -- 'питон', -+ "unicode_str", -+ "питон", - 123, - 456.789, - True, - False, - None, -- 'яйца', -+ "яйца", - BYTES, -- [123, 456.789, 'спам', True, False, None, 'яйца', BYTES], -- (987, 654.321, 'яйца', 'яйца', None, (True, 'яйца', BYTES)), -- {'str_key': 'str_val', -- None: True, -- 123: 456.789, -- 'яйца': BYTES, -- 'subdict': {'unicode_key': 'яйца', -- 'tuple': (123, 'hello', 'world', True, 'яйца', BYTES), -- 'list': [456, 'спам', False, 'яйца', BYTES]}}, -- OrderedDict([('foo', 'bar'), (123, 456), ('яйца', BYTES)]) -+ [123, 456.789, "спам", True, False, None, "яйца", BYTES], -+ (987, 654.321, "яйца", "яйца", None, (True, "яйца", BYTES)), -+ { -+ "str_key": "str_val", -+ None: True, -+ 123: 456.789, -+ "яйца": BYTES, -+ "subdict": { -+ "unicode_key": "яйца", -+ "tuple": (123, "hello", "world", True, "яйца", BYTES), -+ "list": [456, "спам", False, "яйца", BYTES], -+ }, -+ }, -+ OrderedDict([("foo", "bar"), (123, 456), ("яйца", BYTES)]), - ] - - ret = salt.utils.data.decode( -@@ -305,7 +295,8 @@ class DataTestCase(TestCase): - keep=True, - normalize=True, - preserve_dict_class=True, -- preserve_tuples=True) -+ preserve_tuples=True, -+ ) - self.assertEqual(ret, expected) - - # The binary data in the data structure should fail to decode, even -@@ -317,74 +308,100 @@ class DataTestCase(TestCase): - keep=False, - normalize=True, - preserve_dict_class=True, -- preserve_tuples=True) -+ preserve_tuples=True, -+ ) - - # Now munge the expected data so that we get what we would expect if we - # disable preservation of dict class and tuples -- expected[10] = [987, 654.321, 'яйца', 'яйца', None, [True, 'яйца', BYTES]] -- expected[11]['subdict']['tuple'] = [123, 'hello', 'world', True, 'яйца', BYTES] -- expected[12] = {'foo': 'bar', 123: 456, 'яйца': BYTES} -+ expected[10] = [987, 654.321, "яйца", "яйца", None, [True, "яйца", BYTES]] -+ expected[11]["subdict"]["tuple"] = [123, "hello", "world", True, "яйца", BYTES] -+ expected[12] = {"foo": "bar", 123: 456, "яйца": BYTES} - - ret = salt.utils.data.decode( - self.test_data, - keep=True, - normalize=True, - preserve_dict_class=False, -- preserve_tuples=False) -+ preserve_tuples=False, -+ ) - self.assertEqual(ret, expected) - - # Now test single non-string, non-data-structure items, these should - # return the same value when passed to this function - for item in (123, 4.56, True, False, None): -- log.debug('Testing decode of %s', item) -+ log.debug("Testing decode of %s", item) - self.assertEqual(salt.utils.data.decode(item), item) - - # Test single strings (not in a data structure) -- self.assertEqual(salt.utils.data.decode('foo'), 'foo') -- self.assertEqual(salt.utils.data.decode(_b('bar')), 'bar') -- self.assertEqual(salt.utils.data.decode(EGGS, normalize=True), 'яйца') -+ self.assertEqual(salt.utils.data.decode("foo"), "foo") -+ self.assertEqual(salt.utils.data.decode(_b("bar")), "bar") -+ self.assertEqual(salt.utils.data.decode(EGGS, normalize=True), "яйца") - self.assertEqual(salt.utils.data.decode(EGGS, normalize=False), EGGS) - - # Test binary blob - self.assertEqual(salt.utils.data.decode(BYTES, keep=True), BYTES) -- self.assertRaises( -- UnicodeDecodeError, -- salt.utils.data.decode, -- BYTES, -- keep=False) -+ self.assertRaises(UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False) -+ -+ def test_circular_refs_dicts(self): -+ test_dict = {"key": "value", "type": "test1"} -+ test_dict["self"] = test_dict -+ ret = salt.utils.data._remove_circular_refs(ob=test_dict) -+ self.assertDictEqual(ret, {"key": "value", "type": "test1", "self": None}) -+ -+ def test_circular_refs_lists(self): -+ test_list = { -+ "foo": [], -+ } -+ test_list["foo"].append((test_list,)) -+ ret = salt.utils.data._remove_circular_refs(ob=test_list) -+ self.assertDictEqual(ret, {"foo": [(None,)]}) -+ -+ def test_circular_refs_tuple(self): -+ test_dup = {"foo": "string 1", "bar": "string 1", "ham": 1, "spam": 1} -+ ret = salt.utils.data._remove_circular_refs(ob=test_dup) -+ self.assertDictEqual( -+ ret, {"foo": "string 1", "bar": "string 1", "ham": 1, "spam": 1} -+ ) - - def test_decode_to_str(self): -- ''' -+ """ - Companion to test_decode, they should both be kept up-to-date with one - another. - - NOTE: This uses the lambda "_s" defined above in the global scope, - which converts the string/bytestring to a str type. -- ''' -+ """ - expected = [ -- _s('unicode_str'), -- _s('питон'), -+ _s("unicode_str"), -+ _s("питон"), - 123, - 456.789, - True, - False, - None, -- _s('яйца'), -+ _s("яйца"), - BYTES, -- [123, 456.789, _s('спам'), True, False, None, _s('яйца'), BYTES], -- (987, 654.321, _s('яйца'), _s('яйца'), None, (True, _s('яйца'), BYTES)), -+ [123, 456.789, _s("спам"), True, False, None, _s("яйца"), BYTES], -+ (987, 654.321, _s("яйца"), _s("яйца"), None, (True, _s("яйца"), BYTES)), - { -- _s('str_key'): _s('str_val'), -+ _s("str_key"): _s("str_val"), - None: True, - 123: 456.789, -- _s('яйца'): BYTES, -- _s('subdict'): { -- _s('unicode_key'): _s('яйца'), -- _s('tuple'): (123, _s('hello'), _s('world'), True, _s('яйца'), BYTES), -- _s('list'): [456, _s('спам'), False, _s('яйца'), BYTES] -- } -+ _s("яйца"): BYTES, -+ _s("subdict"): { -+ _s("unicode_key"): _s("яйца"), -+ _s("tuple"): ( -+ 123, -+ _s("hello"), -+ _s("world"), -+ True, -+ _s("яйца"), -+ BYTES, -+ ), -+ _s("list"): [456, _s("спам"), False, _s("яйца"), BYTES], -+ }, - }, -- OrderedDict([(_s('foo'), _s('bar')), (123, 456), (_s('яйца'), BYTES)]) -+ OrderedDict([(_s("foo"), _s("bar")), (123, 456), (_s("яйца"), BYTES)]), - ] - - ret = salt.utils.data.decode( -@@ -393,27 +410,42 @@ class DataTestCase(TestCase): - normalize=True, - preserve_dict_class=True, - preserve_tuples=True, -- to_str=True) -+ to_str=True, -+ ) - self.assertEqual(ret, expected) - -- if six.PY3: -- # The binary data in the data structure should fail to decode, even -- # using the fallback, and raise an exception. -- self.assertRaises( -- UnicodeDecodeError, -- salt.utils.data.decode, -- self.test_data, -- keep=False, -- normalize=True, -- preserve_dict_class=True, -- preserve_tuples=True, -- to_str=True) -+ # The binary data in the data structure should fail to decode, even -+ # using the fallback, and raise an exception. -+ self.assertRaises( -+ UnicodeDecodeError, -+ salt.utils.data.decode, -+ self.test_data, -+ keep=False, -+ normalize=True, -+ preserve_dict_class=True, -+ preserve_tuples=True, -+ to_str=True, -+ ) - - # Now munge the expected data so that we get what we would expect if we - # disable preservation of dict class and tuples -- expected[10] = [987, 654.321, _s('яйца'), _s('яйца'), None, [True, _s('яйца'), BYTES]] -- expected[11][_s('subdict')][_s('tuple')] = [123, _s('hello'), _s('world'), True, _s('яйца'), BYTES] -- expected[12] = {_s('foo'): _s('bar'), 123: 456, _s('яйца'): BYTES} -+ expected[10] = [ -+ 987, -+ 654.321, -+ _s("яйца"), -+ _s("яйца"), -+ None, -+ [True, _s("яйца"), BYTES], -+ ] -+ expected[11][_s("subdict")][_s("tuple")] = [ -+ 123, -+ _s("hello"), -+ _s("world"), -+ True, -+ _s("яйца"), -+ BYTES, -+ ] -+ expected[12] = {_s("foo"): _s("bar"), 123: 456, _s("яйца"): BYTES} - - ret = salt.utils.data.decode( - self.test_data, -@@ -421,47 +453,41 @@ class DataTestCase(TestCase): - normalize=True, - preserve_dict_class=False, - preserve_tuples=False, -- to_str=True) -+ to_str=True, -+ ) - self.assertEqual(ret, expected) - - # Now test single non-string, non-data-structure items, these should - # return the same value when passed to this function - for item in (123, 4.56, True, False, None): -- log.debug('Testing decode of %s', item) -+ log.debug("Testing decode of %s", item) - self.assertEqual(salt.utils.data.decode(item, to_str=True), item) - - # Test single strings (not in a data structure) -- self.assertEqual(salt.utils.data.decode('foo', to_str=True), _s('foo')) -- self.assertEqual(salt.utils.data.decode(_b('bar'), to_str=True), _s('bar')) -+ self.assertEqual(salt.utils.data.decode("foo", to_str=True), _s("foo")) -+ self.assertEqual(salt.utils.data.decode(_b("bar"), to_str=True), _s("bar")) - - # Test binary blob +- - self.assertEqual( -- salt.utils.data.decode(BYTES, keep=True, to_str=True), -- BYTES -+ self.assertEqual(salt.utils.data.decode(BYTES, keep=True, to_str=True), BYTES) -+ self.assertRaises( -+ UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False, to_str=True, - ) -- if six.PY3: -- self.assertRaises( -- UnicodeDecodeError, -- salt.utils.data.decode, -- BYTES, -- keep=False, -- to_str=True) - - def test_decode_fallback(self): -- ''' -+ """ - Test fallback to utf-8 -- ''' -- with patch.object(builtins, '__salt_system_encoding__', 'ascii'): -- self.assertEqual(salt.utils.data.decode(_b('яйца')), 'яйца') -+ """ -+ with patch.object(builtins, "__salt_system_encoding__", "ascii"): -+ self.assertEqual(salt.utils.data.decode(_b("яйца")), "яйца") - - def test_encode(self): -- ''' -+ """ - NOTE: This uses the lambda "_b" defined above in the global scope, - which encodes a string to a bytestring, assuming utf-8. -- ''' -+ """ - expected = [ -- _b('unicode_str'), -- _b('питон'), -+ _b("unicode_str"), -+ _b("питон"), - 123, - 456.789, - True, -@@ -469,67 +495,71 @@ class DataTestCase(TestCase): - None, - _b(EGGS), - BYTES, -- [123, 456.789, _b('спам'), True, False, None, _b(EGGS), BYTES], -- (987, 654.321, _b('яйца'), _b(EGGS), None, (True, _b(EGGS), BYTES)), -+ [123, 456.789, _b("спам"), True, False, None, _b(EGGS), BYTES], -+ (987, 654.321, _b("яйца"), _b(EGGS), None, (True, _b(EGGS), BYTES)), - { -- _b('str_key'): _b('str_val'), -+ _b("str_key"): _b("str_val"), - None: True, - 123: 456.789, - _b(EGGS): BYTES, -- _b('subdict'): { -- _b('unicode_key'): _b(EGGS), -- _b('tuple'): (123, _b('hello'), _b('world'), True, _b(EGGS), BYTES), -- _b('list'): [456, _b('спам'), False, _b(EGGS), BYTES] -- } -+ _b("subdict"): { -+ _b("unicode_key"): _b(EGGS), -+ _b("tuple"): (123, _b("hello"), _b("world"), True, _b(EGGS), BYTES), -+ _b("list"): [456, _b("спам"), False, _b(EGGS), BYTES], -+ }, - }, -- OrderedDict([(_b('foo'), _b('bar')), (123, 456), (_b(EGGS), BYTES)]) -+ OrderedDict([(_b("foo"), _b("bar")), (123, 456), (_b(EGGS), BYTES)]), - ] - - # Both keep=True and keep=False should work because the BYTES data is - # already bytes. - ret = salt.utils.data.encode( -- self.test_data, -- keep=True, -- preserve_dict_class=True, -- preserve_tuples=True) -+ self.test_data, keep=True, preserve_dict_class=True, preserve_tuples=True -+ ) - self.assertEqual(ret, expected) - ret = salt.utils.data.encode( -- self.test_data, -- keep=False, -- preserve_dict_class=True, -- preserve_tuples=True) -+ self.test_data, keep=False, preserve_dict_class=True, preserve_tuples=True -+ ) - self.assertEqual(ret, expected) - - # Now munge the expected data so that we get what we would expect if we - # disable preservation of dict class and tuples -- expected[10] = [987, 654.321, _b('яйца'), _b(EGGS), None, [True, _b(EGGS), BYTES]] -- expected[11][_b('subdict')][_b('tuple')] = [ -- 123, _b('hello'), _b('world'), True, _b(EGGS), BYTES -+ expected[10] = [ -+ 987, -+ 654.321, -+ _b("яйца"), -+ _b(EGGS), -+ None, -+ [True, _b(EGGS), BYTES], - ] -- expected[12] = {_b('foo'): _b('bar'), 123: 456, _b(EGGS): BYTES} -+ expected[11][_b("subdict")][_b("tuple")] = [ -+ 123, -+ _b("hello"), -+ _b("world"), -+ True, -+ _b(EGGS), -+ BYTES, -+ ] -+ expected[12] = {_b("foo"): _b("bar"), 123: 456, _b(EGGS): BYTES} - - ret = salt.utils.data.encode( -- self.test_data, -- keep=True, -- preserve_dict_class=False, -- preserve_tuples=False) -+ self.test_data, keep=True, preserve_dict_class=False, preserve_tuples=False -+ ) - self.assertEqual(ret, expected) - ret = salt.utils.data.encode( -- self.test_data, -- keep=False, -- preserve_dict_class=False, -- preserve_tuples=False) -+ self.test_data, keep=False, preserve_dict_class=False, preserve_tuples=False -+ ) - self.assertEqual(ret, expected) - - # Now test single non-string, non-data-structure items, these should - # return the same value when passed to this function - for item in (123, 4.56, True, False, None): -- log.debug('Testing encode of %s', item) -+ log.debug("Testing encode of %s", item) - self.assertEqual(salt.utils.data.encode(item), item) - - # Test single strings (not in a data structure) -- self.assertEqual(salt.utils.data.encode('foo'), _b('foo')) -- self.assertEqual(salt.utils.data.encode(_b('bar')), _b('bar')) -+ self.assertEqual(salt.utils.data.encode("foo"), _b("foo")) -+ self.assertEqual(salt.utils.data.encode(_b("bar")), _b("bar")) - - # Test binary blob, nothing should happen even when keep=False since - # the data is already bytes -@@ -537,41 +567,43 @@ class DataTestCase(TestCase): - self.assertEqual(salt.utils.data.encode(BYTES, keep=False), BYTES) - - def test_encode_keep(self): -- ''' -+ """ - Whereas we tested the keep argument in test_decode, it is much easier - to do a more comprehensive test of keep in its own function where we - can force the encoding. -- ''' -- unicode_str = 'питон' -- encoding = 'ascii' -+ """ -+ unicode_str = "питон" -+ encoding = "ascii" - - # Test single string - self.assertEqual( -- salt.utils.data.encode(unicode_str, encoding, keep=True), -- unicode_str) -+ salt.utils.data.encode(unicode_str, encoding, keep=True), unicode_str -+ ) - self.assertRaises( - UnicodeEncodeError, - salt.utils.data.encode, - unicode_str, - encoding, -- keep=False) -+ keep=False, -+ ) - - data = [ - unicode_str, -- [b'foo', [unicode_str], {b'key': unicode_str}, (unicode_str,)], -- {b'list': [b'foo', unicode_str], -- b'dict': {b'key': unicode_str}, -- b'tuple': (b'foo', unicode_str)}, -- ([b'foo', unicode_str], {b'key': unicode_str}, (unicode_str,)) -+ [b"foo", [unicode_str], {b"key": unicode_str}, (unicode_str,)], -+ { -+ b"list": [b"foo", unicode_str], -+ b"dict": {b"key": unicode_str}, -+ b"tuple": (b"foo", unicode_str), -+ }, -+ ([b"foo", unicode_str], {b"key": unicode_str}, (unicode_str,)), - ] - - # Since everything was a bytestring aside from the bogus data, the - # return data should be identical. We don't need to test recursive - # decoding, that has already been tested in test_encode. - self.assertEqual( -- salt.utils.data.encode(data, encoding, -- keep=True, preserve_tuples=True), -- data -+ salt.utils.data.encode(data, encoding, keep=True, preserve_tuples=True), -+ data, - ) - self.assertRaises( - UnicodeEncodeError, -@@ -579,13 +611,15 @@ class DataTestCase(TestCase): - data, - encoding, - keep=False, -- preserve_tuples=True) -+ preserve_tuples=True, -+ ) - - for index, _ in enumerate(data): - self.assertEqual( -- salt.utils.data.encode(data[index], encoding, -- keep=True, preserve_tuples=True), -- data[index] -+ salt.utils.data.encode( -+ data[index], encoding, keep=True, preserve_tuples=True -+ ), -+ data[index], - ) - self.assertRaises( - UnicodeEncodeError, -@@ -593,31 +627,36 @@ class DataTestCase(TestCase): - data[index], - encoding, - keep=False, -- preserve_tuples=True) -+ preserve_tuples=True, -+ ) - - def test_encode_fallback(self): -- ''' -+ """ - Test fallback to utf-8 -- ''' -- with patch.object(builtins, '__salt_system_encoding__', 'ascii'): -- self.assertEqual(salt.utils.data.encode('яйца'), _b('яйца')) -- with patch.object(builtins, '__salt_system_encoding__', 'CP1252'): -- self.assertEqual(salt.utils.data.encode('Ψ'), _b('Ψ')) -+ """ -+ with patch.object(builtins, "__salt_system_encoding__", "ascii"): -+ self.assertEqual(salt.utils.data.encode("яйца"), _b("яйца")) -+ with patch.object(builtins, "__salt_system_encoding__", "CP1252"): -+ self.assertEqual(salt.utils.data.encode("Ψ"), _b("Ψ")) - - def test_repack_dict(self): -- list_of_one_element_dicts = [{'dict_key_1': 'dict_val_1'}, -- {'dict_key_2': 'dict_val_2'}, -- {'dict_key_3': 'dict_val_3'}] -- expected_ret = {'dict_key_1': 'dict_val_1', -- 'dict_key_2': 'dict_val_2', -- 'dict_key_3': 'dict_val_3'} -+ list_of_one_element_dicts = [ -+ {"dict_key_1": "dict_val_1"}, -+ {"dict_key_2": "dict_val_2"}, -+ {"dict_key_3": "dict_val_3"}, -+ ] -+ expected_ret = { -+ "dict_key_1": "dict_val_1", -+ "dict_key_2": "dict_val_2", -+ "dict_key_3": "dict_val_3", -+ } - ret = salt.utils.data.repack_dictlist(list_of_one_element_dicts) - self.assertDictEqual(ret, expected_ret) - - # Try with yaml -- yaml_key_val_pair = '- key1: val1' -+ yaml_key_val_pair = "- key1: val1" - ret = salt.utils.data.repack_dictlist(yaml_key_val_pair) -- self.assertDictEqual(ret, {'key1': 'val1'}) -+ self.assertDictEqual(ret, {"key1": "val1"}) - - # Make sure we handle non-yaml junk data - ret = salt.utils.data.repack_dictlist(LOREM_IPSUM) -@@ -626,43 +665,47 @@ class DataTestCase(TestCase): - def test_stringify(self): - self.assertRaises(TypeError, salt.utils.data.stringify, 9) - self.assertEqual( -- salt.utils.data.stringify(['one', 'two', str('three'), 4, 5]), # future lint: disable=blacklisted-function -- ['one', 'two', 'three', '4', '5'] -+ salt.utils.data.stringify( -+ ["one", "two", "three", 4, 5] -+ ), # future lint: disable=blacklisted-function -+ ["one", "two", "three", "4", "5"], - ) - - def test_json_query(self): - # Raises exception if jmespath module is not found -- with patch('salt.utils.data.jmespath', None): -+ with patch("salt.utils.data.jmespath", None): - self.assertRaisesRegex( -- RuntimeError, 'requires jmespath', -- salt.utils.data.json_query, {}, '@' -+ RuntimeError, "requires jmespath", salt.utils.data.json_query, {}, "@" - ) - - # Test search - user_groups = { -- 'user1': {'groups': ['group1', 'group2', 'group3']}, -- 'user2': {'groups': ['group1', 'group2']}, -- 'user3': {'groups': ['group3']}, -+ "user1": {"groups": ["group1", "group2", "group3"]}, -+ "user2": {"groups": ["group1", "group2"]}, -+ "user3": {"groups": ["group3"]}, - } -- expression = '*.groups[0]' -- primary_groups = ['group1', 'group1', 'group3'] -+ expression = "*.groups[0]" -+ primary_groups = ["group1", "group1", "group3"] - self.assertEqual( -- sorted(salt.utils.data.json_query(user_groups, expression)), -- primary_groups -+ sorted(salt.utils.data.json_query(user_groups, expression)), primary_groups - ) - - - class FilterFalseyTestCase(TestCase): -- ''' -+ """ - Test suite for salt.utils.data.filter_falsey -- ''' -+ """ - - def test_nop(self): -- ''' -+ """ - Test cases where nothing will be done. -- ''' -+ """ - # Test with dictionary without recursion -- old_dict = {'foo': 'bar', 'bar': {'baz': {'qux': 'quux'}}, 'baz': ['qux', {'foo': 'bar'}]} -+ old_dict = { -+ "foo": "bar", -+ "bar": {"baz": {"qux": "quux"}}, -+ "baz": ["qux", {"foo": "bar"}], -+ } - new_dict = salt.utils.data.filter_falsey(old_dict) - self.assertEqual(old_dict, new_dict) - # Check returned type equality -@@ -671,23 +714,25 @@ class FilterFalseyTestCase(TestCase): - new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3) - self.assertEqual(old_dict, new_dict) - # Test with list -- old_list = ['foo', 'bar'] -+ old_list = ["foo", "bar"] - new_list = salt.utils.data.filter_falsey(old_list) - self.assertEqual(old_list, new_list) - # Check returned type equality - self.assertIs(type(old_list), type(new_list)) - # Test with set -- old_set = set(['foo', 'bar']) -+ old_set = {"foo", "bar"} - new_set = salt.utils.data.filter_falsey(old_set) - self.assertEqual(old_set, new_set) - # Check returned type equality - self.assertIs(type(old_set), type(new_set)) - # Test with OrderedDict -- old_dict = OrderedDict([ -- ('foo', 'bar'), -- ('bar', OrderedDict([('qux', 'quux')])), -- ('baz', ['qux', OrderedDict([('foo', 'bar')])]) -- ]) -+ old_dict = OrderedDict( -+ [ -+ ("foo", "bar"), -+ ("bar", OrderedDict([("qux", "quux")])), -+ ("baz", ["qux", OrderedDict([("foo", "bar")])]), -+ ] -+ ) - new_dict = salt.utils.data.filter_falsey(old_dict) - self.assertEqual(old_dict, new_dict) - self.assertIs(type(old_dict), type(new_dict)) -@@ -696,8 +741,8 @@ class FilterFalseyTestCase(TestCase): - new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type(0)]) - self.assertEqual(old_list, new_list) - # Test excluding str (or unicode) (or both) -- old_list = [''] -- new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type('')]) -+ old_list = [""] -+ new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type("")]) - self.assertEqual(old_list, new_list) - # Test excluding list - old_list = [[]] -@@ -709,185 +754,264 @@ class FilterFalseyTestCase(TestCase): - self.assertEqual(old_list, new_list) - - def test_filter_dict_no_recurse(self): -- ''' -+ """ - Test filtering a dictionary without recursing. - This will only filter out key-values where the values are falsey. -- ''' -- old_dict = {'foo': None, -- 'bar': {'baz': {'qux': None, 'quux': '', 'foo': []}}, -- 'baz': ['qux'], -- 'qux': {}, -- 'quux': []} -+ """ -+ old_dict = { -+ "foo": None, -+ "bar": {"baz": {"qux": None, "quux": "", "foo": []}}, -+ "baz": ["qux"], -+ "qux": {}, -+ "quux": [], -+ } - new_dict = salt.utils.data.filter_falsey(old_dict) -- expect_dict = {'bar': {'baz': {'qux': None, 'quux': '', 'foo': []}}, 'baz': ['qux']} -+ expect_dict = { -+ "bar": {"baz": {"qux": None, "quux": "", "foo": []}}, -+ "baz": ["qux"], -+ } - self.assertEqual(expect_dict, new_dict) - self.assertIs(type(expect_dict), type(new_dict)) - - def test_filter_dict_recurse(self): -- ''' -+ """ - Test filtering a dictionary with recursing. - This will filter out any key-values where the values are falsey or when - the values *become* falsey after filtering their contents (in case they - are lists or dicts). -- ''' -- old_dict = {'foo': None, -- 'bar': {'baz': {'qux': None, 'quux': '', 'foo': []}}, -- 'baz': ['qux'], -- 'qux': {}, -- 'quux': []} -+ """ -+ old_dict = { -+ "foo": None, -+ "bar": {"baz": {"qux": None, "quux": "", "foo": []}}, -+ "baz": ["qux"], -+ "qux": {}, -+ "quux": [], -+ } - new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3) -- expect_dict = {'baz': ['qux']} -+ expect_dict = {"baz": ["qux"]} - self.assertEqual(expect_dict, new_dict) - self.assertIs(type(expect_dict), type(new_dict)) - - def test_filter_list_no_recurse(self): -- ''' -+ """ - Test filtering a list without recursing. - This will only filter out items which are falsey. -- ''' -- old_list = ['foo', None, [], {}, 0, ''] -+ """ -+ old_list = ["foo", None, [], {}, 0, ""] - new_list = salt.utils.data.filter_falsey(old_list) -- expect_list = ['foo'] -+ expect_list = ["foo"] - self.assertEqual(expect_list, new_list) - self.assertIs(type(expect_list), type(new_list)) - # Ensure nested values are *not* filtered out. - old_list = [ -- 'foo', -- ['foo'], -- ['foo', None], -- {'foo': 0}, -- {'foo': 'bar', 'baz': []}, -- [{'foo': ''}], -+ "foo", -+ ["foo"], -+ ["foo", None], -+ {"foo": 0}, -+ {"foo": "bar", "baz": []}, -+ [{"foo": ""}], - ] - new_list = salt.utils.data.filter_falsey(old_list) - self.assertEqual(old_list, new_list) - self.assertIs(type(old_list), type(new_list)) - - def test_filter_list_recurse(self): -- ''' -+ """ - Test filtering a list with recursing. - This will filter out any items which are falsey, or which become falsey - after filtering their contents (in case they are lists or dicts). -- ''' -+ """ - old_list = [ -- 'foo', -- ['foo'], -- ['foo', None], -- {'foo': 0}, -- {'foo': 'bar', 'baz': []}, -- [{'foo': ''}] -+ "foo", -+ ["foo"], -+ ["foo", None], -+ {"foo": 0}, -+ {"foo": "bar", "baz": []}, -+ [{"foo": ""}], - ] - new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3) -- expect_list = ['foo', ['foo'], ['foo'], {'foo': 'bar'}] -+ expect_list = ["foo", ["foo"], ["foo"], {"foo": "bar"}] - self.assertEqual(expect_list, new_list) - self.assertIs(type(expect_list), type(new_list)) - - def test_filter_set_no_recurse(self): -- ''' -+ """ - Test filtering a set without recursing. - Note that a set cannot contain unhashable types, so recursion is not possible. -- ''' -- old_set = set([ -- 'foo', -- None, -- 0, -- '', -- ]) -+ """ -+ old_set = {"foo", None, 0, ""} - new_set = salt.utils.data.filter_falsey(old_set) -- expect_set = set(['foo']) -+ expect_set = {"foo"} - self.assertEqual(expect_set, new_set) - self.assertIs(type(expect_set), type(new_set)) - - def test_filter_ordereddict_no_recurse(self): -- ''' -+ """ - Test filtering an OrderedDict without recursing. -- ''' -- old_dict = OrderedDict([ -- ('foo', None), -- ('bar', OrderedDict([('baz', OrderedDict([('qux', None), ('quux', ''), ('foo', [])]))])), -- ('baz', ['qux']), -- ('qux', {}), -- ('quux', []) -- ]) -+ """ -+ old_dict = OrderedDict( -+ [ -+ ("foo", None), -+ ( -+ "bar", -+ OrderedDict( -+ [ -+ ( -+ "baz", -+ OrderedDict([("qux", None), ("quux", ""), ("foo", [])]), -+ ) -+ ] -+ ), -+ ), -+ ("baz", ["qux"]), -+ ("qux", {}), -+ ("quux", []), -+ ] -+ ) - new_dict = salt.utils.data.filter_falsey(old_dict) -- expect_dict = OrderedDict([ -- ('bar', OrderedDict([('baz', OrderedDict([('qux', None), ('quux', ''), ('foo', [])]))])), -- ('baz', ['qux']), -- ]) -+ expect_dict = OrderedDict( -+ [ -+ ( -+ "bar", -+ OrderedDict( -+ [ -+ ( -+ "baz", -+ OrderedDict([("qux", None), ("quux", ""), ("foo", [])]), -+ ) -+ ] -+ ), -+ ), -+ ("baz", ["qux"]), -+ ] -+ ) - self.assertEqual(expect_dict, new_dict) - self.assertIs(type(expect_dict), type(new_dict)) - - def test_filter_ordereddict_recurse(self): -- ''' -+ """ - Test filtering an OrderedDict with recursing. -- ''' -- old_dict = OrderedDict([ -- ('foo', None), -- ('bar', OrderedDict([('baz', OrderedDict([('qux', None), ('quux', ''), ('foo', [])]))])), -- ('baz', ['qux']), -- ('qux', {}), -- ('quux', []) -- ]) -+ """ -+ old_dict = OrderedDict( -+ [ -+ ("foo", None), -+ ( -+ "bar", -+ OrderedDict( -+ [ -+ ( -+ "baz", -+ OrderedDict([("qux", None), ("quux", ""), ("foo", [])]), -+ ) -+ ] -+ ), -+ ), -+ ("baz", ["qux"]), -+ ("qux", {}), -+ ("quux", []), -+ ] -+ ) - new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3) -- expect_dict = OrderedDict([ -- ('baz', ['qux']), -- ]) -+ expect_dict = OrderedDict([("baz", ["qux"])]) - self.assertEqual(expect_dict, new_dict) - self.assertIs(type(expect_dict), type(new_dict)) - - def test_filter_list_recurse_limit(self): -- ''' -+ """ - Test filtering a list with recursing, but with a limited depth. - Note that the top-level is always processed, so a recursion depth of 2 - means that two *additional* levels are processed. -- ''' -+ """ - old_list = [None, [None, [None, [None]]]] - new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=2) - self.assertEqual([[[[None]]]], new_list) - - def test_filter_dict_recurse_limit(self): -- ''' -+ """ - Test filtering a dict with recursing, but with a limited depth. - Note that the top-level is always processed, so a recursion depth of 2 - means that two *additional* levels are processed. -- ''' -- old_dict = {'one': None, -- 'foo': {'two': None, 'bar': {'three': None, 'baz': {'four': None}}}} -+ """ -+ old_dict = { -+ "one": None, -+ "foo": {"two": None, "bar": {"three": None, "baz": {"four": None}}}, -+ } - new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=2) -- self.assertEqual({'foo': {'bar': {'baz': {'four': None}}}}, new_dict) -+ self.assertEqual({"foo": {"bar": {"baz": {"four": None}}}}, new_dict) - - def test_filter_exclude_types(self): -- ''' -+ """ - Test filtering a list recursively, but also ignoring (i.e. not filtering) - out certain types that can be falsey. -- ''' -+ """ - # Ignore int, unicode -- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]] -- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type(0), type('')]) -- self.assertEqual(['foo', ['foo'], ['foo'], {'foo': 0}, {'foo': 'bar'}, [{'foo': ''}]], new_list) -+ old_list = [ -+ "foo", -+ ["foo"], -+ ["foo", None], -+ {"foo": 0}, -+ {"foo": "bar", "baz": []}, -+ [{"foo": ""}], -+ ] -+ new_list = salt.utils.data.filter_falsey( -+ old_list, recurse_depth=3, ignore_types=[type(0), type("")] -+ ) -+ self.assertEqual( -+ ["foo", ["foo"], ["foo"], {"foo": 0}, {"foo": "bar"}, [{"foo": ""}]], -+ new_list, -+ ) - # Ignore list -- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]] -- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type([])]) -- self.assertEqual(['foo', ['foo'], ['foo'], {'foo': 'bar', 'baz': []}, []], new_list) -+ old_list = [ -+ "foo", -+ ["foo"], -+ ["foo", None], -+ {"foo": 0}, -+ {"foo": "bar", "baz": []}, -+ [{"foo": ""}], -+ ] -+ new_list = salt.utils.data.filter_falsey( -+ old_list, recurse_depth=3, ignore_types=[type([])] -+ ) -+ self.assertEqual( -+ ["foo", ["foo"], ["foo"], {"foo": "bar", "baz": []}, []], new_list -+ ) - # Ignore dict -- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]] -- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type({})]) -- self.assertEqual(['foo', ['foo'], ['foo'], {}, {'foo': 'bar'}, [{}]], new_list) -+ old_list = [ -+ "foo", -+ ["foo"], -+ ["foo", None], -+ {"foo": 0}, -+ {"foo": "bar", "baz": []}, -+ [{"foo": ""}], -+ ] -+ new_list = salt.utils.data.filter_falsey( -+ old_list, recurse_depth=3, ignore_types=[type({})] -+ ) -+ self.assertEqual(["foo", ["foo"], ["foo"], {}, {"foo": "bar"}, [{}]], new_list) - # Ignore NoneType -- old_list = ['foo', ['foo'], ['foo', None], {'foo': 0}, {'foo': 'bar', 'baz': []}, [{'foo': ''}]] -- new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3, ignore_types=[type(None)]) -- self.assertEqual(['foo', ['foo'], ['foo', None], {'foo': 'bar'}], new_list) -+ old_list = [ -+ "foo", -+ ["foo"], -+ ["foo", None], -+ {"foo": 0}, -+ {"foo": "bar", "baz": []}, -+ [{"foo": ""}], -+ ] -+ new_list = salt.utils.data.filter_falsey( -+ old_list, recurse_depth=3, ignore_types=[type(None)] -+ ) -+ self.assertEqual(["foo", ["foo"], ["foo", None], {"foo": "bar"}], new_list) - - - class FilterRecursiveDiff(TestCase): -- ''' -+ """ - Test suite for salt.utils.data.recursive_diff -- ''' -+ """ - - def test_list_equality(self): -- ''' -+ """ - Test cases where equal lists are compared. -- ''' -+ """ - test_list = [0, 1, 2] - self.assertEqual({}, salt.utils.data.recursive_diff(test_list, test_list)) - -@@ -895,392 +1019,455 @@ class FilterRecursiveDiff(TestCase): - self.assertEqual({}, salt.utils.data.recursive_diff(test_list, test_list)) - - def test_dict_equality(self): -- ''' -+ """ - Test cases where equal dicts are compared. -- ''' -- test_dict = {'foo': 'bar', 'bar': {'baz': {'qux': 'quux'}}, 'frop': 0} -+ """ -+ test_dict = {"foo": "bar", "bar": {"baz": {"qux": "quux"}}, "frop": 0} - self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_dict)) - - def test_ordereddict_equality(self): -- ''' -+ """ - Test cases where equal OrderedDicts are compared. -- ''' -- test_dict = OrderedDict([ -- ('foo', 'bar'), -- ('bar', OrderedDict([('baz', OrderedDict([('qux', 'quux')]))])), -- ('frop', 0)]) -+ """ -+ test_dict = OrderedDict( -+ [ -+ ("foo", "bar"), -+ ("bar", OrderedDict([("baz", OrderedDict([("qux", "quux")]))])), -+ ("frop", 0), -+ ] -+ ) - self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_dict)) - - def test_mixed_equality(self): -- ''' -+ """ - Test cases where mixed nested lists and dicts are compared. -- ''' -+ """ - test_data = { -- 'foo': 'bar', -- 'baz': [0, 1, 2], -- 'bar': {'baz': [{'qux': 'quux'}, {'froop', 0}]} -+ "foo": "bar", -+ "baz": [0, 1, 2], -+ "bar": {"baz": [{"qux": "quux"}, {"froop", 0}]}, - } - self.assertEqual({}, salt.utils.data.recursive_diff(test_data, test_data)) - - def test_set_equality(self): -- ''' -+ """ - Test cases where equal sets are compared. -- ''' -- test_set = set([0, 1, 2, 3, 'foo']) -+ """ -+ test_set = {0, 1, 2, 3, "foo"} - self.assertEqual({}, salt.utils.data.recursive_diff(test_set, test_set)) - - # This is a bit of an oddity, as python seems to sort the sets in memory - # so both sets end up with the same ordering (0..3). -- set_one = set([0, 1, 2, 3]) -- set_two = set([3, 2, 1, 0]) -+ set_one = {0, 1, 2, 3} -+ set_two = {3, 2, 1, 0} - self.assertEqual({}, salt.utils.data.recursive_diff(set_one, set_two)) - - def test_tuple_equality(self): -- ''' -+ """ - Test cases where equal tuples are compared. -- ''' -- test_tuple = (0, 1, 2, 3, 'foo') -+ """ -+ test_tuple = (0, 1, 2, 3, "foo") - self.assertEqual({}, salt.utils.data.recursive_diff(test_tuple, test_tuple)) - - def test_list_inequality(self): -- ''' -+ """ - Test cases where two inequal lists are compared. -- ''' -+ """ - list_one = [0, 1, 2] -- list_two = ['foo', 'bar', 'baz'] -- expected_result = {'old': list_one, 'new': list_two} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two)) -- expected_result = {'new': list_one, 'old': list_two} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one)) +- "sit", +- salt.utils.data.traverse_dict_and_list( +- test_two_level_dict_and_list, +- ["foo", "lorem", "ipsum", "dolor"], +- {"not_found": "not_found"}, +- ), +- ) - -- list_one = [0, 'foo', 1, 'bar'] -- list_two = [1, 'foo', 1, 'qux'] -- expected_result = {'old': [0, 'bar'], 'new': [1, 'qux']} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two)) -- expected_result = {'new': [0, 'bar'], 'old': [1, 'qux']} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one)) -+ list_two = ["foo", "bar", "baz"] -+ expected_result = {"old": list_one, "new": list_two} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_one, list_two) -+ ) -+ expected_result = {"new": list_one, "old": list_two} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_two, list_one) -+ ) -+ -+ list_one = [0, "foo", 1, "bar"] -+ list_two = [1, "foo", 1, "qux"] -+ expected_result = {"old": [0, "bar"], "new": [1, "qux"]} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_one, list_two) -+ ) -+ expected_result = {"new": [0, "bar"], "old": [1, "qux"]} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_two, list_one) -+ ) - - list_one = [0, 1, [2, 3]] -- list_two = [0, 1, ['foo', 'bar']] -- expected_result = {'old': [[2, 3]], 'new': [['foo', 'bar']]} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two)) -- expected_result = {'new': [[2, 3]], 'old': [['foo', 'bar']]} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one)) -+ list_two = [0, 1, ["foo", "bar"]] -+ expected_result = {"old": [[2, 3]], "new": [["foo", "bar"]]} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_one, list_two) -+ ) -+ expected_result = {"new": [[2, 3]], "old": [["foo", "bar"]]} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_two, list_one) -+ ) - - def test_dict_inequality(self): -- ''' -+ """ - Test cases where two inequal dicts are compared. -- ''' -- dict_one = {'foo': 1, 'bar': 2, 'baz': 3} -- dict_two = {'foo': 2, 1: 'bar', 'baz': 3} -- expected_result = {'old': {'foo': 1, 'bar': 2}, 'new': {'foo': 2, 1: 'bar'}} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)) -- expected_result = {'new': {'foo': 1, 'bar': 2}, 'old': {'foo': 2, 1: 'bar'}} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)) -- -- dict_one = {'foo': {'bar': {'baz': 1}}} -- dict_two = {'foo': {'qux': {'baz': 1}}} -- expected_result = {'old': dict_one, 'new': dict_two} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)) -- expected_result = {'new': dict_one, 'old': dict_two} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)) -+ """ -+ dict_one = {"foo": 1, "bar": 2, "baz": 3} -+ dict_two = {"foo": 2, 1: "bar", "baz": 3} -+ expected_result = {"old": {"foo": 1, "bar": 2}, "new": {"foo": 2, 1: "bar"}} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two) -+ ) -+ expected_result = {"new": {"foo": 1, "bar": 2}, "old": {"foo": 2, 1: "bar"}} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(dict_two, dict_one) -+ ) -+ -+ dict_one = {"foo": {"bar": {"baz": 1}}} -+ dict_two = {"foo": {"qux": {"baz": 1}}} -+ expected_result = {"old": dict_one, "new": dict_two} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two) -+ ) -+ expected_result = {"new": dict_one, "old": dict_two} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(dict_two, dict_one) -+ ) - - def test_ordereddict_inequality(self): -- ''' -+ """ - Test cases where two inequal OrderedDicts are compared. -- ''' -- odict_one = OrderedDict([('foo', 'bar'), ('bar', 'baz')]) -- odict_two = OrderedDict([('bar', 'baz'), ('foo', 'bar')]) -- expected_result = {'old': odict_one, 'new': odict_two} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(odict_one, odict_two)) -+ """ -+ odict_one = OrderedDict([("foo", "bar"), ("bar", "baz")]) -+ odict_two = OrderedDict([("bar", "baz"), ("foo", "bar")]) -+ expected_result = {"old": odict_one, "new": odict_two} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(odict_one, odict_two) -+ ) - - def test_set_inequality(self): -- ''' -+ """ - Test cases where two inequal sets are compared. - Tricky as the sets are compared zipped, so shuffled sets of equal values - are considered different. -- ''' -- set_one = set([0, 1, 2, 4]) -- set_two = set([0, 1, 3, 4]) -- expected_result = {'old': set([2]), 'new': set([3])} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(set_one, set_two)) -- expected_result = {'new': set([2]), 'old': set([3])} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(set_two, set_one)) -+ """ -+ set_one = {0, 1, 2, 4} -+ set_two = {0, 1, 3, 4} -+ expected_result = {"old": {2}, "new": {3}} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(set_one, set_two) -+ ) -+ expected_result = {"new": {2}, "old": {3}} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(set_two, set_one) -+ ) - - # It is unknown how different python versions will store sets in memory. - # Python 2.7 seems to sort it (i.e. set_one below becomes {0, 1, 'foo', 'bar'} - # However Python 3.6.8 stores it differently each run. - # So just test for "not equal" here. -- set_one = set([0, 'foo', 1, 'bar']) -- set_two = set(['foo', 1, 'bar', 2]) -+ set_one = {0, "foo", 1, "bar"} -+ set_two = {"foo", 1, "bar", 2} - expected_result = {} -- self.assertNotEqual(expected_result, salt.utils.data.recursive_diff(set_one, set_two)) -+ self.assertNotEqual( -+ expected_result, salt.utils.data.recursive_diff(set_one, set_two) -+ ) - - def test_mixed_inequality(self): -- ''' -+ """ - Test cases where two mixed dicts/iterables that are different are compared. -- ''' -- dict_one = {'foo': [1, 2, 3]} -- dict_two = {'foo': [3, 2, 1]} -- expected_result = {'old': {'foo': [1, 3]}, 'new': {'foo': [3, 1]}} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)) -- expected_result = {'new': {'foo': [1, 3]}, 'old': {'foo': [3, 1]}} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)) -- -- list_one = [1, 2, {'foo': ['bar', {'foo': 1, 'bar': 2}]}] -- list_two = [3, 4, {'foo': ['qux', {'foo': 1, 'bar': 2}]}] -- expected_result = {'old': [1, 2, {'foo': ['bar']}], 'new': [3, 4, {'foo': ['qux']}]} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_one, list_two)) -- expected_result = {'new': [1, 2, {'foo': ['bar']}], 'old': [3, 4, {'foo': ['qux']}]} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(list_two, list_one)) -- -- mixed_one = {'foo': set([0, 1, 2]), 'bar': [0, 1, 2]} -- mixed_two = {'foo': set([1, 2, 3]), 'bar': [1, 2, 3]} -+ """ -+ dict_one = {"foo": [1, 2, 3]} -+ dict_two = {"foo": [3, 2, 1]} -+ expected_result = {"old": {"foo": [1, 3]}, "new": {"foo": [3, 1]}} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two) -+ ) -+ expected_result = {"new": {"foo": [1, 3]}, "old": {"foo": [3, 1]}} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(dict_two, dict_one) -+ ) -+ -+ list_one = [1, 2, {"foo": ["bar", {"foo": 1, "bar": 2}]}] -+ list_two = [3, 4, {"foo": ["qux", {"foo": 1, "bar": 2}]}] - expected_result = { -- 'old': {'foo': set([0]), 'bar': [0, 1, 2]}, -- 'new': {'foo': set([3]), 'bar': [1, 2, 3]} -+ "old": [1, 2, {"foo": ["bar"]}], -+ "new": [3, 4, {"foo": ["qux"]}], - } -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)) -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_one, list_two) -+ ) -+ expected_result = { -+ "new": [1, 2, {"foo": ["bar"]}], -+ "old": [3, 4, {"foo": ["qux"]}], -+ } -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(list_two, list_one) -+ ) -+ -+ mixed_one = {"foo": {0, 1, 2}, "bar": [0, 1, 2]} -+ mixed_two = {"foo": {1, 2, 3}, "bar": [1, 2, 3]} - expected_result = { -- 'new': {'foo': set([0]), 'bar': [0, 1, 2]}, -- 'old': {'foo': set([3]), 'bar': [1, 2, 3]} -+ "old": {"foo": {0}, "bar": [0, 1, 2]}, -+ "new": {"foo": {3}, "bar": [1, 2, 3]}, - } -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one)) -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two) -+ ) -+ expected_result = { -+ "new": {"foo": {0}, "bar": [0, 1, 2]}, -+ "old": {"foo": {3}, "bar": [1, 2, 3]}, -+ } -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one) -+ ) - - def test_tuple_inequality(self): -- ''' -+ """ - Test cases where two tuples that are different are compared. -- ''' -+ """ - tuple_one = (1, 2, 3) - tuple_two = (3, 2, 1) -- expected_result = {'old': (1, 3), 'new': (3, 1)} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two)) -+ expected_result = {"old": (1, 3), "new": (3, 1)} -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two) -+ ) - - def test_list_vs_set(self): -- ''' -+ """ - Test case comparing a list with a set, will be compared unordered. -- ''' -+ """ - mixed_one = [1, 2, 3] -- mixed_two = set([3, 2, 1]) -+ mixed_two = {3, 2, 1} - expected_result = {} -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)) -- self.assertEqual(expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one)) -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two) -+ ) -+ self.assertEqual( -+ expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one) -+ ) - - def test_dict_vs_ordereddict(self): -- ''' -+ """ - Test case comparing a dict with an ordereddict, will be compared unordered. -- ''' -- test_dict = {'foo': 'bar', 'bar': 'baz'} -- test_odict = OrderedDict([('foo', 'bar'), ('bar', 'baz')]) -+ """ -+ test_dict = {"foo": "bar", "bar": "baz"} -+ test_odict = OrderedDict([("foo", "bar"), ("bar", "baz")]) - self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_odict)) - self.assertEqual({}, salt.utils.data.recursive_diff(test_odict, test_dict)) - -- test_odict2 = OrderedDict([('bar', 'baz'), ('foo', 'bar')]) -+ test_odict2 = OrderedDict([("bar", "baz"), ("foo", "bar")]) - self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_odict2)) - self.assertEqual({}, salt.utils.data.recursive_diff(test_odict2, test_dict)) - - def test_list_ignore_ignored(self): -- ''' -+ """ - Test case comparing two lists with ignore-list supplied (which is not used - when comparing lists). -- ''' -+ """ - list_one = [1, 2, 3] - list_two = [3, 2, 1] -- expected_result = {'old': [1, 3], 'new': [3, 1]} -+ expected_result = {"old": [1, 3], "new": [3, 1]} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(list_one, list_two, ignore_keys=[1, 3]) -+ salt.utils.data.recursive_diff(list_one, list_two, ignore_keys=[1, 3]), - ) - - def test_dict_ignore(self): -- ''' -+ """ - Test case comparing two dicts with ignore-list supplied. -- ''' -- dict_one = {'foo': 1, 'bar': 2, 'baz': 3} -- dict_two = {'foo': 3, 'bar': 2, 'baz': 1} -- expected_result = {'old': {'baz': 3}, 'new': {'baz': 1}} -+ """ -+ dict_one = {"foo": 1, "bar": 2, "baz": 3} -+ dict_two = {"foo": 3, "bar": 2, "baz": 1} -+ expected_result = {"old": {"baz": 3}, "new": {"baz": 1}} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=['foo']) -+ salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=["foo"]), - ) - - def test_ordereddict_ignore(self): -- ''' -+ """ - Test case comparing two OrderedDicts with ignore-list supplied. -- ''' -- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)]) -- odict_two = OrderedDict([('baz', 1), ('bar', 2), ('foo', 3)]) -+ """ -+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)]) -+ odict_two = OrderedDict([("baz", 1), ("bar", 2), ("foo", 3)]) - # The key 'foo' will be ignored, which means the key from the other OrderedDict - # will always be considered "different" since OrderedDicts are compared ordered. -- expected_result = {'old': OrderedDict([('baz', 3)]), 'new': OrderedDict([('baz', 1)])} -+ expected_result = { -+ "old": OrderedDict([("baz", 3)]), -+ "new": OrderedDict([("baz", 1)]), -+ } - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(odict_one, odict_two, ignore_keys=['foo']) -+ salt.utils.data.recursive_diff(odict_one, odict_two, ignore_keys=["foo"]), - ) - - def test_dict_vs_ordereddict_ignore(self): -- ''' -+ """ - Test case comparing a dict with an OrderedDict with ignore-list supplied. -- ''' -- dict_one = {'foo': 1, 'bar': 2, 'baz': 3} -- odict_two = OrderedDict([('foo', 3), ('bar', 2), ('baz', 1)]) -- expected_result = {'old': {'baz': 3}, 'new': OrderedDict([('baz', 1)])} -+ """ -+ dict_one = {"foo": 1, "bar": 2, "baz": 3} -+ odict_two = OrderedDict([("foo", 3), ("bar", 2), ("baz", 1)]) -+ expected_result = {"old": {"baz": 3}, "new": OrderedDict([("baz", 1)])} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, odict_two, ignore_keys=['foo']) -+ salt.utils.data.recursive_diff(dict_one, odict_two, ignore_keys=["foo"]), - ) - - def test_mixed_nested_ignore(self): -- ''' -+ """ - Test case comparing mixed, nested items with ignore-list supplied. -- ''' -- dict_one = {'foo': [1], 'bar': {'foo': 1, 'bar': 2}, 'baz': 3} -- dict_two = {'foo': [2], 'bar': {'foo': 3, 'bar': 2}, 'baz': 1} -- expected_result = {'old': {'baz': 3}, 'new': {'baz': 1}} -+ """ -+ dict_one = {"foo": [1], "bar": {"foo": 1, "bar": 2}, "baz": 3} -+ dict_two = {"foo": [2], "bar": {"foo": 3, "bar": 2}, "baz": 1} -+ expected_result = {"old": {"baz": 3}, "new": {"baz": 1}} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=['foo']) -+ salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=["foo"]), - ) - - def test_ordered_dict_unequal_length(self): -- ''' -+ """ - Test case comparing two OrderedDicts of unequal length. -- ''' -- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)]) -- odict_two = OrderedDict([('foo', 1), ('bar', 2)]) -- expected_result = {'old': OrderedDict([('baz', 3)]), 'new': {}} -+ """ -+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)]) -+ odict_two = OrderedDict([("foo", 1), ("bar", 2)]) -+ expected_result = {"old": OrderedDict([("baz", 3)]), "new": {}} - self.assertEqual( -- expected_result, -- salt.utils.data.recursive_diff(odict_one, odict_two) -+ expected_result, salt.utils.data.recursive_diff(odict_one, odict_two) - ) - - def test_list_unequal_length(self): -- ''' -+ """ - Test case comparing two lists of unequal length. -- ''' -+ """ - list_one = [1, 2, 3] - list_two = [1, 2, 3, 4] -- expected_result = {'old': [], 'new': [4]} -+ expected_result = {"old": [], "new": [4]} - self.assertEqual( -- expected_result, -- salt.utils.data.recursive_diff(list_one, list_two) -+ expected_result, salt.utils.data.recursive_diff(list_one, list_two) - ) - - def test_set_unequal_length(self): -- ''' -+ """ - Test case comparing two sets of unequal length. - This does not do anything special, as it is unordered. -- ''' -- set_one = set([1, 2, 3]) -- set_two = set([4, 3, 2, 1]) -- expected_result = {'old': set([]), 'new': set([4])} -+ """ -+ set_one = {1, 2, 3} -+ set_two = {4, 3, 2, 1} -+ expected_result = {"old": set(), "new": {4}} - self.assertEqual( -- expected_result, -- salt.utils.data.recursive_diff(set_one, set_two) -+ expected_result, salt.utils.data.recursive_diff(set_one, set_two) - ) - - def test_tuple_unequal_length(self): -- ''' -+ """ - Test case comparing two tuples of unequal length. - This should be the same as comparing two ordered lists. -- ''' -+ """ - tuple_one = (1, 2, 3) - tuple_two = (1, 2, 3, 4) -- expected_result = {'old': (), 'new': (4,)} -+ expected_result = {"old": (), "new": (4,)} - self.assertEqual( -- expected_result, -- salt.utils.data.recursive_diff(tuple_one, tuple_two) -+ expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two) - ) - - def test_list_unordered(self): -- ''' -+ """ - Test case comparing two lists unordered. -- ''' -+ """ - list_one = [1, 2, 3, 4] - list_two = [4, 3, 2] -- expected_result = {'old': [1], 'new': []} -+ expected_result = {"old": [1], "new": []} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(list_one, list_two, ignore_order=True) -+ salt.utils.data.recursive_diff(list_one, list_two, ignore_order=True), - ) - - def test_mixed_nested_unordered(self): -- ''' -+ """ - Test case comparing nested dicts/lists unordered. -- ''' -- dict_one = {'foo': {'bar': [1, 2, 3]}, 'bar': [{'foo': 4}, 0]} -- dict_two = {'foo': {'bar': [3, 2, 1]}, 'bar': [0, {'foo': 4}]} -+ """ -+ dict_one = {"foo": {"bar": [1, 2, 3]}, "bar": [{"foo": 4}, 0]} -+ dict_two = {"foo": {"bar": [3, 2, 1]}, "bar": [0, {"foo": 4}]} - expected_result = {} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_order=True) -+ salt.utils.data.recursive_diff(dict_one, dict_two, ignore_order=True), - ) - expected_result = { -- 'old': {'foo': {'bar': [1, 3]}, 'bar': [{'foo': 4}, 0]}, -- 'new': {'foo': {'bar': [3, 1]}, 'bar': [0, {'foo': 4}]}, -+ "old": {"foo": {"bar": [1, 3]}, "bar": [{"foo": 4}, 0]}, -+ "new": {"foo": {"bar": [3, 1]}, "bar": [0, {"foo": 4}]}, - } - self.assertEqual( -- expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two) -+ expected_result, salt.utils.data.recursive_diff(dict_one, dict_two) - ) - - def test_ordered_dict_unordered(self): -- ''' -+ """ - Test case comparing OrderedDicts unordered. -- ''' -- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)]) -- odict_two = OrderedDict([('baz', 3), ('bar', 2), ('foo', 1)]) -+ """ -+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)]) -+ odict_two = OrderedDict([("baz", 3), ("bar", 2), ("foo", 1)]) - expected_result = {} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(odict_one, odict_two, ignore_order=True) -+ salt.utils.data.recursive_diff(odict_one, odict_two, ignore_order=True), - ) - - def test_ignore_missing_keys_dict(self): -- ''' -+ """ - Test case ignoring missing keys on a comparison of dicts. -- ''' -- dict_one = {'foo': 1, 'bar': 2, 'baz': 3} -- dict_two = {'bar': 3} -- expected_result = {'old': {'bar': 2}, 'new': {'bar': 3}} -+ """ -+ dict_one = {"foo": 1, "bar": 2, "baz": 3} -+ dict_two = {"bar": 3} -+ expected_result = {"old": {"bar": 2}, "new": {"bar": 3}} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True) -+ salt.utils.data.recursive_diff( -+ dict_one, dict_two, ignore_missing_keys=True -+ ), - ) - - def test_ignore_missing_keys_ordered_dict(self): -- ''' -+ """ - Test case not ignoring missing keys on a comparison of OrderedDicts. -- ''' -- odict_one = OrderedDict([('foo', 1), ('bar', 2), ('baz', 3)]) -- odict_two = OrderedDict([('bar', 3)]) -- expected_result = {'old': odict_one, 'new': odict_two} -+ """ -+ odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)]) -+ odict_two = OrderedDict([("bar", 3)]) -+ expected_result = {"old": odict_one, "new": odict_two} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(odict_one, odict_two, ignore_missing_keys=True) -+ salt.utils.data.recursive_diff( -+ odict_one, odict_two, ignore_missing_keys=True -+ ), - ) - - def test_ignore_missing_keys_recursive(self): -- ''' -+ """ - Test case ignoring missing keys on a comparison of nested dicts. -- ''' -- dict_one = {'foo': {'bar': 2, 'baz': 3}} -- dict_two = {'foo': {'baz': 3}} -+ """ -+ dict_one = {"foo": {"bar": 2, "baz": 3}} -+ dict_two = {"foo": {"baz": 3}} - expected_result = {} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True) -+ salt.utils.data.recursive_diff( -+ dict_one, dict_two, ignore_missing_keys=True -+ ), - ) - # Compare from dict-in-dict - dict_two = {} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True) -+ salt.utils.data.recursive_diff( -+ dict_one, dict_two, ignore_missing_keys=True -+ ), - ) - # Compare from dict-in-list -- dict_one = {'foo': ['bar', {'baz': 3}]} -- dict_two = {'foo': ['bar', {}]} -+ dict_one = {"foo": ["bar", {"baz": 3}]} -+ dict_two = {"foo": ["bar", {}]} - self.assertEqual( - expected_result, -- salt.utils.data.recursive_diff(dict_one, dict_two, ignore_missing_keys=True) -+ salt.utils.data.recursive_diff( -+ dict_one, dict_two, ignore_missing_keys=True -+ ), - ) -diff --git a/tests/unit/utils/test_xmlutil.py b/tests/unit/utils/test_xmlutil.py -index c04f39498e..cbf73861e5 100644 ---- a/tests/unit/utils/test_xmlutil.py -+++ b/tests/unit/utils/test_xmlutil.py -@@ -1,148 +1,170 @@ --# -*- coding: utf-8 -*- --''' -+""" - tests.unit.xmlutil_test - ~~~~~~~~~~~~~~~~~~~~ --''' --from __future__ import absolute_import, print_function, unicode_literals --# Import Salt Testing libs --from tests.support.unit import TestCase -+""" -+import salt.utils.xmlutil as xml - - # Import Salt libs - from salt._compat import ElementTree as ET --import salt.utils.xmlutil as xml -+ -+# Import Salt Testing libs -+from tests.support.unit import TestCase - - - class XMLUtilTestCase(TestCase): -- ''' -+ """ - Tests that salt.utils.xmlutil properly parses XML data and returns as a properly formatted - dictionary. The default method of parsing will ignore attributes and return only the child - items. The full method will include parsing attributes. -- ''' -+ """ - - def setUp(self): - - # Populate our use cases for specific XML formats. - self.cases = { -- 'a': { -- 'xml': 'data', -- 'legacy': {'parent': 'data'}, -- 'full': 'data' -+ "a": { -+ "xml": "data", -+ "legacy": {"parent": "data"}, -+ "full": "data", - }, -- 'b': { -- 'xml': 'data', -- 'legacy': {'parent': 'data'}, -- 'full': {'parent': 'data', 'value': 'data'} -+ "b": { -+ "xml": 'data', -+ "legacy": {"parent": "data"}, -+ "full": {"parent": "data", "value": "data"}, - }, -- 'c': { -- 'xml': 'datadata' -- '', -- 'legacy': {'child': ['data', {'child': 'data'}, {'child': None}, {'child': None}]}, -- 'full': {'child': ['data', {'child': 'data', 'value': 'data'}, {'value': 'data'}, None]} -+ "c": { -+ "xml": 'datadata' -+ '', -+ "legacy": { -+ "child": [ -+ "data", -+ {"child": "data"}, -+ {"child": None}, -+ {"child": None}, -+ ] -+ }, -+ "full": { -+ "child": [ -+ "data", -+ {"child": "data", "value": "data"}, -+ {"value": "data"}, -+ None, -+ ] -+ }, - }, -- 'd': { -- 'xml': 'data', -- 'legacy': {'child': 'data'}, -- 'full': {'child': 'data', 'another': 'data', 'value': 'data'} -+ "d": { -+ "xml": 'data', -+ "legacy": {"child": "data"}, -+ "full": {"child": "data", "another": "data", "value": "data"}, - }, -- 'e': { -- 'xml': 'data', -- 'legacy': {'child': 'data'}, -- 'full': {'child': {'child': 'data', 'value': 'data'}, 'another': 'data', 'value': 'data'} -+ "e": { -+ "xml": 'data', -+ "legacy": {"child": "data"}, -+ "full": { -+ "child": {"child": "data", "value": "data"}, -+ "another": "data", -+ "value": "data", -+ }, - }, -- 'f': { -- 'xml': 'data' -- 'data', -- 'legacy': {'child': [{'sub-child': 'data'}, {'child': 'data'}]}, -- 'full': {'child': [{'sub-child': {'value': 'data', 'sub-child': 'data'}}, 'data']} -+ "f": { -+ "xml": 'data' -+ "data", -+ "legacy": {"child": [{"sub-child": "data"}, {"child": "data"}]}, -+ "full": { -+ "child": [ -+ {"sub-child": {"value": "data", "sub-child": "data"}}, -+ "data", -+ ] -+ }, - }, - } - - def test_xml_case_a(self): -- xmldata = ET.fromstring(self.cases['a']['xml']) -+ xmldata = ET.fromstring(self.cases["a"]["xml"]) - defaultdict = xml.to_dict(xmldata) -- self.assertEqual(defaultdict, self.cases['a']['legacy']) -+ self.assertEqual(defaultdict, self.cases["a"]["legacy"]) - - def test_xml_case_a_legacy(self): -- xmldata = ET.fromstring(self.cases['a']['xml']) -+ xmldata = ET.fromstring(self.cases["a"]["xml"]) - defaultdict = xml.to_dict(xmldata, False) -- self.assertEqual(defaultdict, self.cases['a']['legacy']) -+ self.assertEqual(defaultdict, self.cases["a"]["legacy"]) - - def test_xml_case_a_full(self): -- xmldata = ET.fromstring(self.cases['a']['xml']) -+ xmldata = ET.fromstring(self.cases["a"]["xml"]) - defaultdict = xml.to_dict(xmldata, True) -- self.assertEqual(defaultdict, self.cases['a']['full']) -+ self.assertEqual(defaultdict, self.cases["a"]["full"]) - - def test_xml_case_b(self): -- xmldata = ET.fromstring(self.cases['b']['xml']) -+ xmldata = ET.fromstring(self.cases["b"]["xml"]) - defaultdict = xml.to_dict(xmldata) -- self.assertEqual(defaultdict, self.cases['b']['legacy']) -+ self.assertEqual(defaultdict, self.cases["b"]["legacy"]) - - def test_xml_case_b_legacy(self): -- xmldata = ET.fromstring(self.cases['b']['xml']) -+ xmldata = ET.fromstring(self.cases["b"]["xml"]) - defaultdict = xml.to_dict(xmldata, False) -- self.assertEqual(defaultdict, self.cases['b']['legacy']) -+ self.assertEqual(defaultdict, self.cases["b"]["legacy"]) - - def test_xml_case_b_full(self): -- xmldata = ET.fromstring(self.cases['b']['xml']) -+ xmldata = ET.fromstring(self.cases["b"]["xml"]) - defaultdict = xml.to_dict(xmldata, True) -- self.assertEqual(defaultdict, self.cases['b']['full']) -+ self.assertEqual(defaultdict, self.cases["b"]["full"]) - - def test_xml_case_c(self): -- xmldata = ET.fromstring(self.cases['c']['xml']) -+ xmldata = ET.fromstring(self.cases["c"]["xml"]) - defaultdict = xml.to_dict(xmldata) -- self.assertEqual(defaultdict, self.cases['c']['legacy']) -+ self.assertEqual(defaultdict, self.cases["c"]["legacy"]) - - def test_xml_case_c_legacy(self): -- xmldata = ET.fromstring(self.cases['c']['xml']) -+ xmldata = ET.fromstring(self.cases["c"]["xml"]) - defaultdict = xml.to_dict(xmldata, False) -- self.assertEqual(defaultdict, self.cases['c']['legacy']) -+ self.assertEqual(defaultdict, self.cases["c"]["legacy"]) - - def test_xml_case_c_full(self): -- xmldata = ET.fromstring(self.cases['c']['xml']) -+ xmldata = ET.fromstring(self.cases["c"]["xml"]) - defaultdict = xml.to_dict(xmldata, True) -- self.assertEqual(defaultdict, self.cases['c']['full']) -+ self.assertEqual(defaultdict, self.cases["c"]["full"]) - - def test_xml_case_d(self): -- xmldata = ET.fromstring(self.cases['d']['xml']) -+ xmldata = ET.fromstring(self.cases["d"]["xml"]) - defaultdict = xml.to_dict(xmldata) -- self.assertEqual(defaultdict, self.cases['d']['legacy']) -+ self.assertEqual(defaultdict, self.cases["d"]["legacy"]) - - def test_xml_case_d_legacy(self): -- xmldata = ET.fromstring(self.cases['d']['xml']) -+ xmldata = ET.fromstring(self.cases["d"]["xml"]) - defaultdict = xml.to_dict(xmldata, False) -- self.assertEqual(defaultdict, self.cases['d']['legacy']) -+ self.assertEqual(defaultdict, self.cases["d"]["legacy"]) - - def test_xml_case_d_full(self): -- xmldata = ET.fromstring(self.cases['d']['xml']) -+ xmldata = ET.fromstring(self.cases["d"]["xml"]) - defaultdict = xml.to_dict(xmldata, True) -- self.assertEqual(defaultdict, self.cases['d']['full']) -+ self.assertEqual(defaultdict, self.cases["d"]["full"]) - - def test_xml_case_e(self): -- xmldata = ET.fromstring(self.cases['e']['xml']) -+ xmldata = ET.fromstring(self.cases["e"]["xml"]) - defaultdict = xml.to_dict(xmldata) -- self.assertEqual(defaultdict, self.cases['e']['legacy']) -+ self.assertEqual(defaultdict, self.cases["e"]["legacy"]) - - def test_xml_case_e_legacy(self): -- xmldata = ET.fromstring(self.cases['e']['xml']) -+ xmldata = ET.fromstring(self.cases["e"]["xml"]) - defaultdict = xml.to_dict(xmldata, False) -- self.assertEqual(defaultdict, self.cases['e']['legacy']) -+ self.assertEqual(defaultdict, self.cases["e"]["legacy"]) - - def test_xml_case_e_full(self): -- xmldata = ET.fromstring(self.cases['e']['xml']) -+ xmldata = ET.fromstring(self.cases["e"]["xml"]) - defaultdict = xml.to_dict(xmldata, True) -- self.assertEqual(defaultdict, self.cases['e']['full']) -+ self.assertEqual(defaultdict, self.cases["e"]["full"]) - - def test_xml_case_f(self): -- xmldata = ET.fromstring(self.cases['f']['xml']) -+ xmldata = ET.fromstring(self.cases["f"]["xml"]) - defaultdict = xml.to_dict(xmldata) -- self.assertEqual(defaultdict, self.cases['f']['legacy']) -+ self.assertEqual(defaultdict, self.cases["f"]["legacy"]) - - def test_xml_case_f_legacy(self): -- xmldata = ET.fromstring(self.cases['f']['xml']) -+ xmldata = ET.fromstring(self.cases["f"]["xml"]) - defaultdict = xml.to_dict(xmldata, False) -- self.assertEqual(defaultdict, self.cases['f']['legacy']) -+ self.assertEqual(defaultdict, self.cases["f"]["legacy"]) - - def test_xml_case_f_full(self): -- xmldata = ET.fromstring(self.cases['f']['xml']) -+ xmldata = ET.fromstring(self.cases["f"]["xml"]) - defaultdict = xml.to_dict(xmldata, True) -- self.assertEqual(defaultdict, self.cases['f']['full']) -+ self.assertEqual(defaultdict, self.cases["f"]["full"]) + def test_compare_dicts(self): + ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "bar"}) + self.assertEqual(ret, {}) -- -2.28.0 +2.29.2 diff --git a/batch-async-catch-exceptions-and-safety-unregister-a.patch b/batch-async-catch-exceptions-and-safety-unregister-a.patch index c87e6bc..8b84c50 100644 --- a/batch-async-catch-exceptions-and-safety-unregister-a.patch +++ b/batch-async-catch-exceptions-and-safety-unregister-a.patch @@ -1,42 +1,34 @@ -From c5edf396ffd66b6ac1479aa01367aae3eff7683d Mon Sep 17 00:00:00 2001 +From 1606379714f4776e2b529fb1d45891266985c896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Fri, 28 Feb 2020 15:11:53 +0000 -Subject: [PATCH] Batch Async: Catch exceptions and safety unregister and - close instances +Subject: [PATCH] Batch Async: Catch exceptions and safety unregister + and close instances --- - salt/cli/batch_async.py | 156 +++++++++++++++++++++++----------------- - 1 file changed, 89 insertions(+), 67 deletions(-) + salt/cli/batch_async.py | 160 ++++++++++++++++++++++++---------------- + 1 file changed, 96 insertions(+), 64 deletions(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index da069b64bd..b8f272ed67 100644 +index 1e2ac5b0d3..3dc04826d1 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -13,7 +13,6 @@ import salt.client - - # pylint: enable=import-error,no-name-in-module,redefined-builtin - import logging --import fnmatch - - log = logging.getLogger(__name__) - -@@ -104,22 +103,25 @@ class BatchAsync(object): +@@ -107,22 +107,25 @@ class BatchAsync: def __event_handler(self, raw): if not self.event: return - mtag, data = self.event.unpack(raw, self.event.serial) - for (pattern, op) in self.patterns: - if mtag.startswith(pattern[:-1]): -- minion = data['id'] -- if op == 'ping_return': +- minion = data["id"] +- if op == "ping_return": - self.minions.add(minion) - if self.targeted_minions == self.minions: - self.event.io_loop.spawn_callback(self.start_batch) -- elif op == 'find_job_return': +- elif op == "find_job_return": - if data.get("return", None): - self.find_job_returned.add(minion) -- elif op == 'batch_run': +- elif op == "batch_run": - if minion in self.active: - self.active.remove(minion) - self.done_minions.add(minion) @@ -45,25 +37,25 @@ index da069b64bd..b8f272ed67 100644 + mtag, data = self.event.unpack(raw, self.event.serial) + for (pattern, op) in self.patterns: + if mtag.startswith(pattern[:-1]): -+ minion = data['id'] -+ if op == 'ping_return': ++ minion = data["id"] ++ if op == "ping_return": + self.minions.add(minion) + if self.targeted_minions == self.minions: + self.event.io_loop.spawn_callback(self.start_batch) -+ elif op == 'find_job_return': ++ elif op == "find_job_return": + if data.get("return", None): + self.find_job_returned.add(minion) -+ elif op == 'batch_run': ++ elif op == "batch_run": + if minion in self.active: + self.active.remove(minion) + self.done_minions.add(minion) + self.event.io_loop.spawn_callback(self.schedule_next) + except Exception as ex: -+ log.error("Exception occured while processing event: {}".format(ex)) ++ log.error("Exception occured while processing event: {}".format(ex)) def _get_next(self): - to_run = self.minions.difference( -@@ -146,54 +148,59 @@ class BatchAsync(object): + to_run = ( +@@ -154,53 +157,67 @@ class BatchAsync: if timedout_minions: self.schedule_next() @@ -74,112 +66,118 @@ index da069b64bd..b8f272ed67 100644 @tornado.gen.coroutine def find_job(self, minions): -- not_done = minions.difference(self.done_minions).difference(self.timedout_minions) +- not_done = minions.difference(self.done_minions).difference( +- self.timedout_minions +- ) - - if not_done: - jid = self.jid_gen() -- find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid) +- find_job_return_pattern = "salt/job/{}/ret/*".format(jid) - self.patterns.add((find_job_return_pattern, "find_job_return")) -- self.event.subscribe(find_job_return_pattern, match_type='glob') +- self.event.subscribe(find_job_return_pattern, match_type="glob") - - ret = yield self.local.run_job_async( - not_done, -- 'saltutil.find_job', +- "saltutil.find_job", - [self.batch_jid], -- 'list', -- gather_job_timeout=self.opts['gather_job_timeout'], +- "list", +- gather_job_timeout=self.opts["gather_job_timeout"], - jid=jid, -- **self.eauth) -- yield tornado.gen.sleep(self.opts['gather_job_timeout']) -- self.event.io_loop.spawn_callback( -- self.check_find_job, -- not_done, -- jid) +- **self.eauth + if self.event: -+ not_done = minions.difference(self.done_minions).difference(self.timedout_minions) ++ not_done = minions.difference(self.done_minions).difference( ++ self.timedout_minions + ) +- yield tornado.gen.sleep(self.opts["gather_job_timeout"]) +- self.event.io_loop.spawn_callback(self.check_find_job, not_done, jid) + try: + if not_done: + jid = self.jid_gen() -+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid) ++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) + self.patterns.add((find_job_return_pattern, "find_job_return")) -+ self.event.subscribe(find_job_return_pattern, match_type='glob') ++ self.event.subscribe(find_job_return_pattern, match_type="glob") + ret = yield self.local.run_job_async( + not_done, -+ 'saltutil.find_job', ++ "saltutil.find_job", + [self.batch_jid], -+ 'list', -+ gather_job_timeout=self.opts['gather_job_timeout'], ++ "list", ++ gather_job_timeout=self.opts["gather_job_timeout"], + jid=jid, -+ **self.eauth) -+ yield tornado.gen.sleep(self.opts['gather_job_timeout']) ++ **self.eauth ++ ) ++ yield tornado.gen.sleep(self.opts["gather_job_timeout"]) + if self.event: + self.event.io_loop.spawn_callback( -+ self.check_find_job, -+ not_done, -+ jid) ++ self.check_find_job, not_done, jid ++ ) + except Exception as ex: -+ log.error("Exception occured handling batch async: {}. Aborting execution.".format(ex)) ++ log.error( ++ "Exception occured handling batch async: {}. Aborting execution.".format( ++ ex ++ ) ++ ) + self.close_safe() @tornado.gen.coroutine def start(self): - self.__set_event_handler() - ping_return = yield self.local.run_job_async( -- self.opts['tgt'], -- 'test.ping', +- self.opts["tgt"], +- "test.ping", - [], -- self.opts.get( -- 'selected_target_option', -- self.opts.get('tgt_type', 'glob') -- ), -- gather_job_timeout=self.opts['gather_job_timeout'], +- self.opts.get("selected_target_option", self.opts.get("tgt_type", "glob")), +- gather_job_timeout=self.opts["gather_job_timeout"], - jid=self.ping_jid, - metadata=self.metadata, -- **self.eauth) -- self.targeted_minions = set(ping_return['minions']) -- #start batching even if not all minions respond to ping -- yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout']) +- **self.eauth +- ) +- self.targeted_minions = set(ping_return["minions"]) +- # start batching even if not all minions respond to ping +- yield tornado.gen.sleep( +- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] +- ) - self.event.io_loop.spawn_callback(self.start_batch) -- + if self.event: + self.__set_event_handler() + ping_return = yield self.local.run_job_async( -+ self.opts['tgt'], -+ 'test.ping', ++ self.opts["tgt"], ++ "test.ping", + [], + self.opts.get( -+ 'selected_target_option', -+ self.opts.get('tgt_type', 'glob') ++ "selected_target_option", self.opts.get("tgt_type", "glob") + ), -+ gather_job_timeout=self.opts['gather_job_timeout'], ++ gather_job_timeout=self.opts["gather_job_timeout"], + jid=self.ping_jid, + metadata=self.metadata, -+ **self.eauth) -+ self.targeted_minions = set(ping_return['minions']) -+ #start batching even if not all minions respond to ping -+ yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout']) ++ **self.eauth ++ ) ++ self.targeted_minions = set(ping_return["minions"]) ++ # start batching even if not all minions respond to ping ++ yield tornado.gen.sleep( ++ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] ++ ) + if self.event: + self.event.io_loop.spawn_callback(self.start_batch) @tornado.gen.coroutine def start_batch(self): -@@ -206,7 +213,8 @@ class BatchAsync(object): - "metadata": self.metadata - } - ret = self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid)) +@@ -215,7 +232,8 @@ class BatchAsync: + ret = self.event.fire_event( + data, "salt/batch/{}/start".format(self.batch_jid) + ) - self.event.io_loop.spawn_callback(self.run_next) + if self.event: + self.event.io_loop.spawn_callback(self.run_next) @tornado.gen.coroutine def end_batch(self): -@@ -221,11 +229,21 @@ class BatchAsync(object): - "metadata": self.metadata +@@ -232,11 +250,21 @@ class BatchAsync: + "metadata": self.metadata, } - self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid)) + self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) - for (pattern, label) in self.patterns: - if label in ["ping_return", "batch_run"]: -- self.event.unsubscribe(pattern, match_type='glob') +- self.event.unsubscribe(pattern, match_type="glob") - del self - gc.collect() + @@ -189,18 +187,18 @@ index da069b64bd..b8f272ed67 100644 + self.close_safe() + + def close_safe(self): -+ for (pattern, label) in self.patterns: -+ self.event.unsubscribe(pattern, match_type='glob') -+ self.event.remove_event_handler(self.__event_handler) -+ self.event = None -+ self.local = None -+ self.ioloop = None -+ del self -+ gc.collect() ++ for (pattern, label) in self.patterns: ++ self.event.unsubscribe(pattern, match_type="glob") ++ self.event.remove_event_handler(self.__event_handler) ++ self.event = None ++ self.local = None ++ self.ioloop = None ++ del self ++ gc.collect() @tornado.gen.coroutine def schedule_next(self): -@@ -233,7 +251,8 @@ class BatchAsync(object): +@@ -244,7 +272,8 @@ class BatchAsync: self.scheduled = True # call later so that we maybe gather more returns yield tornado.gen.sleep(self.batch_delay) @@ -210,10 +208,10 @@ index da069b64bd..b8f272ed67 100644 @tornado.gen.coroutine def run_next(self): -@@ -254,17 +273,20 @@ class BatchAsync(object): - metadata=self.metadata) +@@ -266,17 +295,20 @@ class BatchAsync: + ) - yield tornado.gen.sleep(self.opts['timeout']) + yield tornado.gen.sleep(self.opts["timeout"]) - self.event.io_loop.spawn_callback(self.find_job, set(next_batch)) + + # The batch can be done already at this point, which means no self.event @@ -235,6 +233,6 @@ index da069b64bd..b8f272ed67 100644 self.ioloop = None gc.collect() -- -2.23.0 +2.29.2 diff --git a/batch.py-avoid-exception-when-minion-does-not-respon.patch b/batch.py-avoid-exception-when-minion-does-not-respon.patch index c7e4ea0..6471863 100644 --- a/batch.py-avoid-exception-when-minion-does-not-respon.patch +++ b/batch.py-avoid-exception-when-minion-does-not-respon.patch @@ -1,9 +1,9 @@ -From bbd2e622f7e165a6e16fd5edf5f4596764748208 Mon Sep 17 00:00:00 2001 +From 03f0aa44f6963e09a92dd3ea2090ef9ee463cb94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 5 Jun 2019 15:15:04 +0100 -Subject: [PATCH] batch.py: avoid exception when minion does not respond - (bsc#1135507) +Subject: [PATCH] batch.py: avoid exception when minion does not + respond (bsc#1135507) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -18,26 +18,29 @@ bsc#1135507 Signed-off-by: José Guilherme Vanz --- - salt/cli/batch.py | 5 +++++ - 1 file changed, 5 insertions(+) + salt/cli/batch.py | 8 ++++++++ + 1 file changed, 8 insertions(+) diff --git a/salt/cli/batch.py b/salt/cli/batch.py -index 67f03c8a45..10fc81a5f4 100644 +index 2bc5444aef..6285a45434 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py -@@ -318,6 +318,11 @@ class Batch(object): - if self.opts.get('failhard') and data['retcode'] > 0: +@@ -348,6 +348,14 @@ class Batch: + if self.opts.get("failhard") and data["retcode"] > 0: failhard = True + # avoid an exception if the minion does not respond. + if data.get("failed") is True: -+ log.debug('Minion %s failed to respond: data=%s', minion, data) -+ data = {'ret': 'Minion did not return. [Failed]', 'retcode': salt.defaults.exitcodes.EX_GENERIC} ++ log.debug("Minion %s failed to respond: data=%s", minion, data) ++ data = { ++ "ret": "Minion did not return. [Failed]", ++ "retcode": salt.defaults.exitcodes.EX_GENERIC, ++ } + - if self.opts.get('raw'): + if self.opts.get("raw"): ret[minion] = data yield data -- -2.16.4 +2.29.2 diff --git a/batch_async-avoid-using-fnmatch-to-match-event-217.patch b/batch_async-avoid-using-fnmatch-to-match-event-217.patch index b26a2d6..d97e8cf 100644 --- a/batch_async-avoid-using-fnmatch-to-match-event-217.patch +++ b/batch_async-avoid-using-fnmatch-to-match-event-217.patch @@ -1,26 +1,27 @@ -From bd20cd2655a1141fe9ea892e974e40988c3fb83c Mon Sep 17 00:00:00 2001 +From 31fedcb3173f73fbffc3b053bc64c94a7b608118 Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Mon, 2 Mar 2020 11:23:59 +0100 -Subject: [PATCH] batch_async: avoid using fnmatch to match event (#217) +Subject: [PATCH] batch_async: avoid using fnmatch to match event + (#217) --- salt/cli/batch_async.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index c4545e3ebc..da069b64bd 100644 +index 8d2601e636..1e2ac5b0d3 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -106,7 +106,7 @@ class BatchAsync(object): +@@ -109,7 +109,7 @@ class BatchAsync: return mtag, data = self.event.unpack(raw, self.event.serial) for (pattern, op) in self.patterns: - if fnmatch.fnmatch(mtag, pattern): + if mtag.startswith(pattern[:-1]): - minion = data['id'] - if op == 'ping_return': + minion = data["id"] + if op == "ping_return": self.minions.add(minion) -- -2.23.0 +2.29.2 diff --git a/bsc-1176024-fix-file-directory-user-and-group-owners.patch b/bsc-1176024-fix-file-directory-user-and-group-owners.patch index d4c5bda..a6f6811 100644 --- a/bsc-1176024-fix-file-directory-user-and-group-owners.patch +++ b/bsc-1176024-fix-file-directory-user-and-group-owners.patch @@ -1,4 +1,4 @@ -From 8973063f6ad24fd5b3788292aa8cc341221d7fb5 Mon Sep 17 00:00:00 2001 +From 60b8f6cdaab10a12973a074678608b86a34e23b7 Mon Sep 17 00:00:00 2001 From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> Date: Tue, 6 Oct 2020 12:36:41 +0300 Subject: [PATCH] bsc#1176024: Fix file/directory user and group @@ -12,22 +12,22 @@ Subject: [PATCH] bsc#1176024: Fix file/directory user and group Co-authored-by: Victor Zhestkov --- - salt/modules/file.py | 18 +++++++++--------- - salt/states/file.py | 4 ++-- - 2 files changed, 11 insertions(+), 11 deletions(-) + salt/modules/file.py | 26 +++++++++++++++++--------- + salt/states/file.py | 12 ++++++++++-- + 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py -index b5b70e2d4c..0b516aff05 100644 +index 989a7ad92d..b830b390d3 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py -@@ -256,7 +256,7 @@ def group_to_gid(group): +@@ -252,7 +252,7 @@ def group_to_gid(group): try: if isinstance(group, int): return group - return grp.getgrnam(group).gr_gid + return grp.getgrnam(salt.utils.stringutils.to_str(group)).gr_gid except KeyError: - return '' + return "" @@ -344,7 +344,7 @@ def user_to_uid(user): try: @@ -36,77 +36,91 @@ index b5b70e2d4c..0b516aff05 100644 - return pwd.getpwnam(user).pw_uid + return pwd.getpwnam(salt.utils.stringutils.to_str(user)).pw_uid except KeyError: - return '' + return "" -@@ -4574,7 +4574,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) - if (salt.utils.platform.is_windows() and - user_to_uid(user) != user_to_uid(perms['luser']) - ) or ( -- not salt.utils.platform.is_windows() and user != perms['luser'] -+ not salt.utils.platform.is_windows() and salt.utils.stringutils.to_str(user) != perms['luser'] - ): - perms['cuser'] = user +@@ -4977,7 +4977,10 @@ def check_perms( + if ( + salt.utils.platform.is_windows() + and user_to_uid(user) != user_to_uid(perms["luser"]) +- ) or (not salt.utils.platform.is_windows() and user != perms["luser"]): ++ ) or ( ++ not salt.utils.platform.is_windows() ++ and salt.utils.stringutils.to_str(user) != perms["luser"] ++ ): + perms["cuser"] = user -@@ -4584,7 +4584,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) - if (salt.utils.platform.is_windows() and - group_to_gid(group) != group_to_gid(perms['lgroup']) - ) or ( -- not salt.utils.platform.is_windows() and group != perms['lgroup'] -+ not salt.utils.platform.is_windows() and salt.utils.stringutils.to_str(group) != perms['lgroup'] - ): - perms['cgroup'] = group + if group: +@@ -4986,7 +4989,10 @@ def check_perms( + if ( + salt.utils.platform.is_windows() + and group_to_gid(group) != group_to_gid(perms["lgroup"]) +- ) or (not salt.utils.platform.is_windows() and group != perms["lgroup"]): ++ ) or ( ++ not salt.utils.platform.is_windows() ++ and salt.utils.stringutils.to_str(group) != perms["lgroup"] ++ ): + perms["cgroup"] = group -@@ -4615,7 +4615,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) - user != '' - ) or ( - not salt.utils.platform.is_windows() and -- user != get_user(name, follow_symlinks=follow_symlinks) and -+ salt.utils.stringutils.to_str(user) != get_user(name, follow_symlinks=follow_symlinks) and - user != '' + if "cuser" in perms or "cgroup" in perms: +@@ -5017,7 +5023,8 @@ def check_perms( + and user != "" + ) or ( + not salt.utils.platform.is_windows() +- and user != get_user(name, follow_symlinks=follow_symlinks) ++ and salt.utils.stringutils.to_str(user) ++ != get_user(name, follow_symlinks=follow_symlinks) + and user != "" ): - if __opts__['test'] is True: -@@ -4633,10 +4633,10 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) - if (salt.utils.platform.is_windows() and - group_to_gid(group) != group_to_gid( - get_group(name, follow_symlinks=follow_symlinks)) and -- user != '') or ( -+ group != '') or ( - not salt.utils.platform.is_windows() and -- group != get_group(name, follow_symlinks=follow_symlinks) and -- user != '' -+ salt.utils.stringutils.to_str(group) != get_group(name, follow_symlinks=follow_symlinks) and -+ group != '' + if __opts__["test"] is True: +@@ -5035,18 +5042,19 @@ def check_perms( + salt.utils.platform.is_windows() + and group_to_gid(group) + != group_to_gid(get_group(name, follow_symlinks=follow_symlinks)) +- and user != "" ++ and group != "" + ) or ( + not salt.utils.platform.is_windows() +- and group != get_group(name, follow_symlinks=follow_symlinks) +- and user != "" ++ and salt.utils.stringutils.to_str(group) ++ != get_group(name, follow_symlinks=follow_symlinks) ++ and group != "" ): - if __opts__['test'] is True: - ret['changes']['group'] = group -@@ -4644,7 +4644,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) - ret['result'] = False - ret['comment'].append('Failed to change group to {0}' - .format(group)) -- elif 'cgroup' in perms and user != '': -+ elif 'cgroup' in perms and group != '': - ret['changes']['group'] = group + if __opts__["test"] is True: + ret["changes"]["group"] = group + else: + ret["result"] = False + ret["comment"].append("Failed to change group to {}".format(group)) +- elif "cgroup" in perms and user != "": ++ elif "cgroup" in perms and group != "": + ret["changes"]["group"] = group # Mode changes if needed diff --git a/salt/states/file.py b/salt/states/file.py -index 0e925bb2ed..f21e0d12fc 100644 +index 9e24e389d8..89c70eb454 100644 --- a/salt/states/file.py +++ b/salt/states/file.py -@@ -960,11 +960,11 @@ def _check_dir_meta(name, - changes['directory'] = 'new' +@@ -989,9 +989,17 @@ def _check_dir_meta(name, user, group, mode, follow_symlinks=False): + if not stats: + changes["directory"] = "new" return changes - if (user is not None -- and user != stats['user'] -+ and salt.utils.stringutils.to_str(user) != stats['user'] - and user != stats.get('uid')): - changes['user'] = user - if (group is not None -- and group != stats['group'] -+ and salt.utils.stringutils.to_str(group) != stats['group'] - and group != stats.get('gid')): - changes['group'] = group +- if user is not None and user != stats["user"] and user != stats.get("uid"): ++ if ( ++ user is not None ++ and salt.utils.stringutils.to_str(user) != stats["user"] ++ and user != stats.get("uid") ++ ): + changes["user"] = user +- if group is not None and group != stats["group"] and group != stats.get("gid"): ++ if ( ++ group is not None ++ and salt.utils.stringutils.to_str(group) != stats["group"] ++ and group != stats.get("gid") ++ ): + changes["group"] = group # Normalize the dir mode + smode = salt.utils.files.normalize_mode(stats["mode"]) -- -2.28.0 +2.29.2 diff --git a/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch b/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch index 9ac3008..eff0ef5 100644 --- a/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch +++ b/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch @@ -1,4 +1,4 @@ -From 07f5a1d984b5a86c24620503f5e373ea0f11484a Mon Sep 17 00:00:00 2001 +From d9362f10681a2dfdb057939eee1ffae3a35d4a8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Fri, 12 Apr 2019 16:47:03 +0100 @@ -7,54 +7,81 @@ Subject: [PATCH] Calculate FQDNs in parallel to avoid blockings Fix pylint issue --- - salt/grains/core.py | 31 ++++++++++++++++++++++++++----- - 1 file changed, 26 insertions(+), 5 deletions(-) + salt/grains/core.py | 55 +++++++++++++++++++++++++++++++++------------ + 1 file changed, 41 insertions(+), 14 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 309e4c9c4a..4600f055dd 100644 +index 006878f806..883e3ebe09 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -20,12 +20,15 @@ import platform - import logging - import locale +@@ -20,8 +20,10 @@ import socket + import sys + import time import uuid -+import time ++import warnings import zlib from errno import EACCES, EPERM - import datetime - import warnings - import time - +from multiprocessing.dummy import Pool as ThreadPool + + import distro + import salt.exceptions +@@ -44,6 +46,14 @@ import salt.utils.versions + from salt.ext.six.moves import range + from salt.utils.network import _get_interfaces + ++# pylint: disable=import-error ++try: ++ import dateutil.tz + - # pylint: disable=import-error - try: - import dateutil.tz -@@ -2275,13 +2278,10 @@ def fqdns(): ++ _DATEUTIL_TZ = True ++except ImportError: ++ _DATEUTIL_TZ = False ++ + + # rewrite distro.linux_distribution to allow best=True kwarg in version(), needed to get the minor version numbers in CentOS + def _linux_distribution(): +@@ -2402,22 +2412,12 @@ def fqdns(): grains = {} fqdns = set() -- addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces()) -- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces())) -- err_message = 'Exception during resolving address: %s' +- addresses = salt.utils.network.ip_addrs( +- include_loopback=False, interface_data=_get_interfaces() +- ) +- addresses.extend( +- salt.utils.network.ip_addrs6( +- include_loopback=False, interface_data=_get_interfaces() +- ) +- ) +- err_message = "Exception during resolving address: %s" - for ip in addresses: + def _lookup_fqdn(ip): try: name, aliaslist, addresslist = socket.gethostbyaddr(ip) -- fqdns.update([socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]) -+ return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] +- fqdns.update( +- [socket.getfqdn(name)] +- + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] +- ) ++ return [socket.getfqdn(name)] + [ ++ als for als in aliaslist if salt.utils.network.is_fqdn(als) ++ ] except socket.herror as err: if err.errno in (0, HOST_NOT_FOUND, NO_DATA): # No FQDN for this IP address, so we don't need to know this all the time. -@@ -2291,6 +2291,27 @@ def fqdns(): - except (socket.error, socket.gaierror, socket.timeout) as err: +@@ -2427,6 +2427,33 @@ def fqdns(): + except (OSError, socket.gaierror, socket.timeout) as err: log.error(err_message, ip, err) + start = time.time() + -+ addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces()) -+ addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces())) -+ err_message = 'Exception during resolving address: %s' ++ addresses = salt.utils.network.ip_addrs( ++ include_loopback=False, interface_data=_get_interfaces() ++ ) ++ addresses.extend( ++ salt.utils.network.ip_addrs6( ++ include_loopback=False, interface_data=_get_interfaces() ++ ) ++ ) ++ err_message = "Exception during resolving address: %s" + + # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel. + # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing @@ -69,12 +96,12 @@ index 309e4c9c4a..4600f055dd 100644 + fqdns.update(item) + + elapsed = time.time() - start -+ log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed)) ++ log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed)) + return {"fqdns": sorted(list(fqdns))} -- -2.16.4 +2.29.2 diff --git a/changed-imports-to-vendored-tornado.patch b/changed-imports-to-vendored-tornado.patch index 0226156..403546a 100644 --- a/changed-imports-to-vendored-tornado.patch +++ b/changed-imports-to-vendored-tornado.patch @@ -1,38 +1,36 @@ -From 0cf1a655aa9353b22ae011e492a33aa52d780f83 Mon Sep 17 00:00:00 2001 +From 5db9ccdb4f557cdbff670b18c45e55124e29c57c Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Tue, 10 Mar 2020 14:02:17 +0100 Subject: [PATCH] Changed imports to vendored Tornado --- - salt/cli/batch_async.py | 26 ++++++++++++------------ + salt/cli/batch_async.py | 25 ++++++++++++----------- salt/master.py | 2 +- - salt/transport/ipc.py | 4 ++-- tests/unit/cli/test_batch_async.py | 32 +++++++++++++++--------------- - 4 files changed, 32 insertions(+), 32 deletions(-) + 3 files changed, 30 insertions(+), 29 deletions(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index b8f272ed67..08eeb34f1c 100644 +index 3dc04826d1..09aa85258b 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -6,7 +6,7 @@ Execute a job on the targeted minions by using a moving window of fixed size `ba - # Import python libs - from __future__ import absolute_import, print_function, unicode_literals - import gc --import tornado -+import salt.ext.tornado +@@ -8,6 +8,7 @@ import gc + import logging - # Import salt libs import salt.client -@@ -50,7 +50,7 @@ class BatchAsync(object): - } - ''' ++import salt.ext.tornado + import tornado + from salt.cli.batch import batch_get_eauth, batch_get_opts, get_bnum + +@@ -46,7 +47,7 @@ class BatchAsync: + """ + def __init__(self, parent_opts, jid_gen, clear_load): - ioloop = tornado.ioloop.IOLoop.current() + ioloop = salt.ext.tornado.ioloop.IOLoop.current() - self.local = salt.client.get_local_client(parent_opts['conf_file'], io_loop=ioloop) - if 'gather_job_timeout' in clear_load['kwargs']: - clear_load['gather_job_timeout'] = clear_load['kwargs'].pop('gather_job_timeout') -@@ -152,7 +152,7 @@ class BatchAsync(object): + self.local = salt.client.get_local_client( + parent_opts["conf_file"], io_loop=ioloop + ) +@@ -161,7 +162,7 @@ class BatchAsync: self.find_job_returned = self.find_job_returned.difference(running) self.event.io_loop.spawn_callback(self.find_job, running) @@ -40,18 +38,18 @@ index b8f272ed67..08eeb34f1c 100644 + @salt.ext.tornado.gen.coroutine def find_job(self, minions): if self.event: - not_done = minions.difference(self.done_minions).difference(self.timedout_minions) -@@ -170,7 +170,7 @@ class BatchAsync(object): - gather_job_timeout=self.opts['gather_job_timeout'], + not_done = minions.difference(self.done_minions).difference( +@@ -182,7 +183,7 @@ class BatchAsync: jid=jid, - **self.eauth) -- yield tornado.gen.sleep(self.opts['gather_job_timeout']) -+ yield salt.ext.tornado.gen.sleep(self.opts['gather_job_timeout']) + **self.eauth + ) +- yield tornado.gen.sleep(self.opts["gather_job_timeout"]) ++ yield salt.ext.tornado.gen.sleep(self.opts["gather_job_timeout"]) if self.event: self.event.io_loop.spawn_callback( - self.check_find_job, -@@ -180,7 +180,7 @@ class BatchAsync(object): - log.error("Exception occured handling batch async: {}. Aborting execution.".format(ex)) + self.check_find_job, not_done, jid +@@ -195,7 +196,7 @@ class BatchAsync: + ) self.close_safe() - @tornado.gen.coroutine @@ -59,12 +57,14 @@ index b8f272ed67..08eeb34f1c 100644 def start(self): if self.event: self.__set_event_handler() -@@ -198,11 +198,11 @@ class BatchAsync(object): - **self.eauth) - self.targeted_minions = set(ping_return['minions']) - #start batching even if not all minions respond to ping -- yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout']) -+ yield salt.ext.tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout']) +@@ -213,13 +214,13 @@ class BatchAsync: + ) + self.targeted_minions = set(ping_return["minions"]) + # start batching even if not all minions respond to ping +- yield tornado.gen.sleep( ++ yield salt.ext.tornado.gen.sleep( + self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] + ) if self.event: self.event.io_loop.spawn_callback(self.start_batch) @@ -73,16 +73,16 @@ index b8f272ed67..08eeb34f1c 100644 def start_batch(self): if not self.initialized: self.batch_size = get_bnum(self.opts, self.minions, True) -@@ -216,7 +216,7 @@ class BatchAsync(object): +@@ -235,7 +236,7 @@ class BatchAsync: if self.event: self.event.io_loop.spawn_callback(self.run_next) - @tornado.gen.coroutine + @salt.ext.tornado.gen.coroutine def end_batch(self): - left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions)) - if not left and not self.ended: -@@ -232,7 +232,7 @@ class BatchAsync(object): + left = self.minions.symmetric_difference( + self.done_minions.union(self.timedout_minions) +@@ -253,7 +254,7 @@ class BatchAsync: # release to the IOLoop to allow the event to be published # before closing batch async execution @@ -91,9 +91,9 @@ index b8f272ed67..08eeb34f1c 100644 self.close_safe() def close_safe(self): -@@ -245,16 +245,16 @@ class BatchAsync(object): - del self - gc.collect() +@@ -266,16 +267,16 @@ class BatchAsync: + del self + gc.collect() - @tornado.gen.coroutine + @salt.ext.tornado.gen.coroutine @@ -111,66 +111,44 @@ index b8f272ed67..08eeb34f1c 100644 def run_next(self): self.scheduled = False next_batch = self._get_next() -@@ -272,7 +272,7 @@ class BatchAsync(object): - jid=self.batch_jid, - metadata=self.metadata) +@@ -294,7 +295,7 @@ class BatchAsync: + metadata=self.metadata, + ) -- yield tornado.gen.sleep(self.opts['timeout']) -+ yield salt.ext.tornado.gen.sleep(self.opts['timeout']) +- yield tornado.gen.sleep(self.opts["timeout"]) ++ yield salt.ext.tornado.gen.sleep(self.opts["timeout"]) # The batch can be done already at this point, which means no self.event if self.event: diff --git a/salt/master.py b/salt/master.py -index 3abf7ae60b..3a9d12999d 100644 +index 7a99af357a..ab85c7f5c6 100644 --- a/salt/master.py +++ b/salt/master.py -@@ -2049,7 +2049,7 @@ class ClearFuncs(object): +@@ -2237,7 +2237,7 @@ class ClearFuncs(TransportMethods): functools.partial(self._prep_jid, clear_load, {}), - batch_load + batch_load, ) - ioloop = tornado.ioloop.IOLoop.current() + ioloop = salt.ext.tornado.ioloop.IOLoop.current() ioloop.add_callback(batch.start) return { -diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py -index d2b295a633..33ee3d4182 100644 ---- a/salt/transport/ipc.py -+++ b/salt/transport/ipc.py -@@ -697,7 +697,7 @@ class IPCMessageSubscriber(IPCClient): - for callback in self.callbacks: - self.io_loop.spawn_callback(callback, raw) - -- @tornado.gen.coroutine -+ @salt.ext.tornado.gen.coroutine - def read_async(self): - ''' - Asynchronously read messages and invoke a callback when they are ready. -@@ -712,7 +712,7 @@ class IPCMessageSubscriber(IPCClient): - yield salt.ext.tornado.gen.sleep(1) - except Exception as exc: # pylint: disable=broad-except - log.error('Exception occurred while Subscriber connecting: %s', exc) -- yield tornado.gen.sleep(1) -+ yield salt.ext.tornado.gen.sleep(1) - yield self._read(None, self.__run_callbacks) - - def close(self): diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index e1ce60859b..635dc689a8 100644 +index dcee9a87bd..82a712b15b 100644 --- a/tests/unit/cli/test_batch_async.py +++ b/tests/unit/cli/test_batch_async.py -@@ -5,8 +5,8 @@ from __future__ import absolute_import - # Import Salt Libs - from salt.cli.batch_async import BatchAsync - +@@ -1,8 +1,8 @@ -import tornado --from tornado.testing import AsyncTestCase +import salt.ext.tornado + from salt.cli.batch_async import BatchAsync +from salt.ext.tornado.testing import AsyncTestCase - from tests.support.unit import skipIf, TestCase - from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON + from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch + from tests.support.unit import TestCase, skipIf +-from tornado.testing import AsyncTestCase -@@ -59,10 +59,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + + @skipIf(NO_MOCK, NO_MOCK_REASON) +@@ -52,10 +52,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.batch.start_batch() self.assertEqual(self.batch.batch_size, 2) @@ -180,12 +158,12 @@ index e1ce60859b..635dc689a8 100644 self.batch.event = MagicMock() - future = tornado.gen.Future() + future = salt.ext.tornado.gen.Future() - future.set_result({'minions': ['foo', 'bar']}) + future.set_result({"minions": ["foo", "bar"]}) self.batch.local.run_job_async.return_value = future ret = self.batch.start() -@@ -78,10 +78,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -71,10 +71,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): # assert targeted_minions == all minions matched by tgt - self.assertEqual(self.batch.targeted_minions, set(['foo', 'bar'])) + self.assertEqual(self.batch.targeted_minions, {"foo", "bar"}) - @tornado.testing.gen_test + @salt.ext.tornado.testing.gen_test @@ -193,11 +171,11 @@ index e1ce60859b..635dc689a8 100644 self.batch.event = MagicMock() - future = tornado.gen.Future() + future = salt.ext.tornado.gen.Future() - future.set_result({'minions': ['foo', 'bar']}) + future.set_result({"minions": ["foo", "bar"]}) self.batch.local.run_job_async.return_value = future self.batch.batch_presence_ping_timeout = None -@@ -109,7 +109,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - ) +@@ -103,7 +103,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + ), ) - @tornado.testing.gen_test @@ -205,26 +183,26 @@ index e1ce60859b..635dc689a8 100644 def test_start_batch_calls_next(self): self.batch.run_next = MagicMock(return_value=MagicMock()) self.batch.event = MagicMock() -@@ -165,14 +165,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual( - len(event.remove_event_handler.mock_calls), 1) +@@ -160,14 +160,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.assertEqual(len(event.unsubscribe.mock_calls), 2) + self.assertEqual(len(event.remove_event_handler.mock_calls), 1) - @tornado.testing.gen_test + @salt.ext.tornado.testing.gen_test def test_batch_next(self): self.batch.event = MagicMock() - self.batch.opts['fun'] = 'my.fun' - self.batch.opts['arg'] = [] - self.batch._get_next = MagicMock(return_value={'foo', 'bar'}) + self.batch.opts["fun"] = "my.fun" + self.batch.opts["arg"] = [] + self.batch._get_next = MagicMock(return_value={"foo", "bar"}) self.batch.batch_size = 2 - future = tornado.gen.Future() + future = salt.ext.tornado.gen.Future() - future.set_result({'minions': ['foo', 'bar']}) + future.set_result({"minions": ["foo", "bar"]}) self.batch.local.run_job_async.return_value = future self.batch.run_next() -@@ -284,38 +284,38 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -290,38 +290,38 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.batch._BatchAsync__event_handler(MagicMock()) - self.assertEqual(self.batch.find_job_returned, {'foo'}) + self.assertEqual(self.batch.find_job_returned, {"foo"}) - @tornado.testing.gen_test + @salt.ext.tornado.testing.gen_test @@ -242,33 +220,33 @@ index e1ce60859b..635dc689a8 100644 + future = salt.ext.tornado.gen.Future() future.set_result({}) self.batch.local.run_job_async.return_value = future - self.batch.minions = set(['foo', 'bar']) + self.batch.minions = {"foo", "bar"} self.batch.jid_gen = MagicMock(return_value="1234") - tornado.gen.sleep = MagicMock(return_value=future) + salt.ext.tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({'foo', 'bar'}) + self.batch.find_job({"foo", "bar"}) self.assertEqual( self.batch.event.io_loop.spawn_callback.call_args[0], - (self.batch.check_find_job, {'foo', 'bar'}, "1234") + (self.batch.check_find_job, {"foo", "bar"}, "1234"), ) - @tornado.testing.gen_test + @salt.ext.tornado.testing.gen_test def test_batch_find_job_with_done_minions(self): - self.batch.done_minions = {'bar'} + self.batch.done_minions = {"bar"} self.batch.event = MagicMock() - future = tornado.gen.Future() + future = salt.ext.tornado.gen.Future() future.set_result({}) self.batch.local.run_job_async.return_value = future - self.batch.minions = set(['foo', 'bar']) + self.batch.minions = {"foo", "bar"} self.batch.jid_gen = MagicMock(return_value="1234") - tornado.gen.sleep = MagicMock(return_value=future) + salt.ext.tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({'foo', 'bar'}) + self.batch.find_job({"foo", "bar"}) self.assertEqual( self.batch.event.io_loop.spawn_callback.call_args[0], -- -2.23.0 +2.29.2 diff --git a/debian-info_installed-compatibility-50453.patch b/debian-info_installed-compatibility-50453.patch index dbb7665..7c45e35 100644 --- a/debian-info_installed-compatibility-50453.patch +++ b/debian-info_installed-compatibility-50453.patch @@ -1,4 +1,4 @@ -From 068eecfba4b2a14b334ff17a295d4005d17491f3 Mon Sep 17 00:00:00 2001 +From 36f4465d22f8cdf05be20ba72756757f5725e509 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 20 Nov 2018 16:06:31 +0100 Subject: [PATCH] Debian info_installed compatibility (#50453) @@ -49,17 +49,17 @@ Adjust architecture getter according to the lowpkg info Fix wrong Git merge: missing function signature --- - salt/modules/aptpkg.py | 20 ++++- - salt/modules/dpkg_lowpkg.py | 93 +++++++++++++++++--- - tests/unit/modules/test_aptpkg.py | 153 +++++++++++++++++++++------------ - tests/unit/modules/test_dpkg_lowpkg.py | 127 ++++++++++++++------------- - 4 files changed, 263 insertions(+), 130 deletions(-) + salt/modules/aptpkg.py | 24 ++- + salt/modules/dpkg_lowpkg.py | 110 ++++++++++-- + tests/unit/modules/test_aptpkg.py | 235 ++++++++++++++++++------- + tests/unit/modules/test_dpkg_lowpkg.py | 189 +++++++++++--------- + 4 files changed, 396 insertions(+), 162 deletions(-) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 8f4d95a195..4ec9158476 100644 +index 70e173806a..bf90d0614f 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -2825,6 +2825,15 @@ def info_installed(*names, **kwargs): +@@ -2902,6 +2902,15 @@ def info_installed(*names, **kwargs): .. versionadded:: 2016.11.3 @@ -75,24 +75,28 @@ index 8f4d95a195..4ec9158476 100644 CLI example: .. code-block:: bash -@@ -2835,11 +2844,15 @@ def info_installed(*names, **kwargs): - ''' +@@ -2912,11 +2921,19 @@ def info_installed(*names, **kwargs): + """ kwargs = salt.utils.args.clean_kwargs(**kwargs) - failhard = kwargs.pop('failhard', True) -+ kwargs.pop('errors', None) # Only for compatibility with RPM -+ attr = kwargs.pop('attr', None) # Package attributes to return -+ all_versions = kwargs.pop('all_versions', False) # This is for backward compatible structure only + failhard = kwargs.pop("failhard", True) ++ kwargs.pop("errors", None) # Only for compatibility with RPM ++ attr = kwargs.pop("attr", None) # Package attributes to return ++ all_versions = kwargs.pop( ++ "all_versions", False ++ ) # This is for backward compatible structure only + if kwargs: salt.utils.args.invalid_kwargs(kwargs) ret = dict() -- for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, failhard=failhard).items(): -+ for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, failhard=failhard, attr=attr).items(): +- for pkg_name, pkg_nfo in __salt__["lowpkg.info"](*names, failhard=failhard).items(): ++ for pkg_name, pkg_nfo in __salt__["lowpkg.info"]( ++ *names, failhard=failhard, attr=attr ++ ).items(): t_nfo = dict() - if pkg_nfo.get('status', 'ii')[1] != 'i': - continue # return only packages that are really installed -@@ -2860,7 +2873,10 @@ def info_installed(*names, **kwargs): + if pkg_nfo.get("status", "ii")[1] != "i": + continue # return only packages that are really installed +@@ -2937,7 +2954,10 @@ def info_installed(*names, **kwargs): else: t_nfo[key] = value @@ -105,38 +109,58 @@ index 8f4d95a195..4ec9158476 100644 return ret diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py -index 4ac8efd2f2..b78e844830 100644 +index d569e04995..7447637774 100644 --- a/salt/modules/dpkg_lowpkg.py +++ b/salt/modules/dpkg_lowpkg.py -@@ -252,6 +252,38 @@ def file_dict(*packages): - return {'errors': errors, 'packages': ret} +@@ -2,13 +2,11 @@ + Support for DEB packages + """ + +-# Import python libs + import datetime + import logging + import os + import re + +-# Import salt libs + import salt.utils.args + import salt.utils.data + import salt.utils.files +@@ -236,6 +234,44 @@ def file_dict(*packages, **kwargs): + return {"errors": errors, "packages": ret} +def _get_pkg_build_time(name): -+ ''' ++ """ + Get package build time, if possible. + + :param name: + :return: -+ ''' ++ """ + iso_time = iso_time_t = None -+ changelog_dir = os.path.join('/usr/share/doc', name) ++ changelog_dir = os.path.join("/usr/share/doc", name) + if os.path.exists(changelog_dir): + for fname in os.listdir(changelog_dir): + try: + iso_time_t = int(os.path.getmtime(os.path.join(changelog_dir, fname))) -+ iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z' ++ iso_time = ( ++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z" ++ ) + break + except OSError: + pass + + # Packager doesn't care about Debian standards, therefore Plan B: brute-force it. + if not iso_time: -+ for pkg_f_path in __salt__['cmd.run']('dpkg-query -L {}'.format(name)).splitlines(): -+ if 'changelog' in pkg_f_path.lower() and os.path.exists(pkg_f_path): ++ for pkg_f_path in __salt__["cmd.run"]( ++ "dpkg-query -L {}".format(name) ++ ).splitlines(): ++ if "changelog" in pkg_f_path.lower() and os.path.exists(pkg_f_path): + try: + iso_time_t = int(os.path.getmtime(pkg_f_path)) -+ iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z' ++ iso_time = ( ++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z" ++ ) + break + except OSError: + pass @@ -145,67 +169,76 @@ index 4ac8efd2f2..b78e844830 100644 + + def _get_pkg_info(*packages, **kwargs): - ''' + """ Return list of package information. If 'packages' parameter is empty, -@@ -274,7 +306,7 @@ def _get_pkg_info(*packages, **kwargs): - ret = [] - cmd = "dpkg-query -W -f='package:" + bin_var + "\\n" \ - "revision:${binary:Revision}\\n" \ -- "architecture:${Architecture}\\n" \ -+ "arch:${Architecture}\\n" \ - "maintainer:${Maintainer}\\n" \ - "summary:${Summary}\\n" \ - "source:${source:Package}\\n" \ -@@ -308,9 +340,14 @@ def _get_pkg_info(*packages, **kwargs): +@@ -259,7 +295,7 @@ def _get_pkg_info(*packages, **kwargs): + cmd = ( + "dpkg-query -W -f='package:" + bin_var + "\\n" + "revision:${binary:Revision}\\n" +- "architecture:${Architecture}\\n" ++ "arch:${Architecture}\\n" + "maintainer:${Maintainer}\\n" + "summary:${Summary}\\n" + "source:${source:Package}\\n" +@@ -298,9 +334,16 @@ def _get_pkg_info(*packages, **kwargs): key, value = pkg_info_line.split(":", 1) if value: pkg_data[key] = value -- install_date = _get_pkg_install_time(pkg_data.get('package')) +- install_date = _get_pkg_install_time(pkg_data.get("package")) - if install_date: -- pkg_data['install_date'] = install_date -+ install_date, install_date_t = _get_pkg_install_time(pkg_data.get('package'), pkg_data.get('arch')) +- pkg_data["install_date"] = install_date ++ install_date, install_date_t = _get_pkg_install_time( ++ pkg_data.get("package"), pkg_data.get("arch") ++ ) + if install_date: -+ pkg_data['install_date'] = install_date -+ pkg_data['install_date_time_t'] = install_date_t # Unix ticks -+ build_date, build_date_t = _get_pkg_build_time(pkg_data.get('package')) ++ pkg_data["install_date"] = install_date ++ pkg_data["install_date_time_t"] = install_date_t # Unix ticks ++ build_date, build_date_t = _get_pkg_build_time(pkg_data.get("package")) + if build_date: -+ pkg_data['build_date'] = build_date -+ pkg_data['build_date_time_t'] = build_date_t - pkg_data['description'] = pkg_descr.split(":", 1)[-1] ++ pkg_data["build_date"] = build_date ++ pkg_data["build_date_time_t"] = build_date_t + pkg_data["description"] = pkg_descr.split(":", 1)[-1] ret.append(pkg_data) -@@ -336,19 +373,32 @@ def _get_pkg_license(pkg): +@@ -326,24 +369,34 @@ def _get_pkg_license(pkg): return ", ".join(sorted(licenses)) -def _get_pkg_install_time(pkg): +def _get_pkg_install_time(pkg, arch): - ''' + """ Return package install time, based on the /var/lib/dpkg/info/.list :return: - ''' + """ - iso_time = None + iso_time = iso_time_t = None -+ loc_root = '/var/lib/dpkg/info' ++ loc_root = "/var/lib/dpkg/info" if pkg is not None: -- location = "/var/lib/dpkg/info/{0}.list".format(pkg) +- location = "/var/lib/dpkg/info/{}.list".format(pkg) - if os.path.exists(location): -- iso_time = datetime.datetime.utcfromtimestamp(int(os.path.getmtime(location))).isoformat() + "Z" +- iso_time = ( +- datetime.datetime.utcfromtimestamp( +- int(os.path.getmtime(location)) +- ).isoformat() +- + "Z" +- ) + locations = [] -+ if arch is not None and arch != 'all': -+ locations.append(os.path.join(loc_root, '{0}:{1}.list'.format(pkg, arch))) -+ -+ locations.append(os.path.join(loc_root, '{0}.list'.format(pkg))) ++ if arch is not None and arch != "all": ++ locations.append(os.path.join(loc_root, "{}:{}.list".format(pkg, arch))) + +- return iso_time ++ locations.append(os.path.join(loc_root, "{}.list".format(pkg))) + for location in locations: + try: + iso_time_t = int(os.path.getmtime(location)) -+ iso_time = datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + 'Z' ++ iso_time = ( ++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z" ++ ) + break + except OSError: + pass - -- return iso_time ++ + if iso_time is None: + log.debug('Unable to get package installation time for package "%s".', pkg) + @@ -213,7 +246,7 @@ index 4ac8efd2f2..b78e844830 100644 def _get_pkg_ds_avail(): -@@ -398,6 +448,15 @@ def info(*packages, **kwargs): +@@ -393,6 +446,15 @@ def info(*packages, **kwargs): .. versionadded:: 2016.11.3 @@ -229,25 +262,25 @@ index 4ac8efd2f2..b78e844830 100644 CLI example: .. code-block:: bash -@@ -412,6 +471,10 @@ def info(*packages, **kwargs): +@@ -407,6 +469,10 @@ def info(*packages, **kwargs): kwargs = salt.utils.args.clean_kwargs(**kwargs) - failhard = kwargs.pop('failhard', True) -+ attr = kwargs.pop('attr', None) or None + failhard = kwargs.pop("failhard", True) ++ attr = kwargs.pop("attr", None) or None + if attr: -+ attr = attr.split(',') ++ attr = attr.split(",") + if kwargs: salt.utils.args.invalid_kwargs(kwargs) -@@ -431,6 +494,14 @@ def info(*packages, **kwargs): - lic = _get_pkg_license(pkg['package']) +@@ -434,6 +500,14 @@ def info(*packages, **kwargs): + lic = _get_pkg_license(pkg["package"]) if lic: - pkg['license'] = lic -- ret[pkg['package']] = pkg + pkg["license"] = lic +- ret[pkg["package"]] = pkg + + # Remove keys that aren't in attrs -+ pkg_name = pkg['package'] ++ pkg_name = pkg["package"] + if attr: + for k in list(pkg.keys())[:]: + if k not in attr: @@ -257,400 +290,558 @@ index 4ac8efd2f2..b78e844830 100644 return ret diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py -index e1b6602df5..10e960f090 100644 +index a7b7a34166..77d8b84896 100644 --- a/tests/unit/modules/test_aptpkg.py +++ b/tests/unit/modules/test_aptpkg.py -@@ -20,6 +20,8 @@ from tests.support.mock import Mock, MagicMock, patch - from salt.ext import six - from salt.exceptions import CommandExecutionError, SaltInvocationError +@@ -13,6 +13,7 @@ import textwrap + import pytest import salt.modules.aptpkg as aptpkg -+import pytest -+import textwrap - - try: - import pytest -@@ -166,51 +168,39 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): + from salt.exceptions import CommandExecutionError, SaltInvocationError ++from salt.ext import six + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, Mock, call, patch + from tests.support.unit import TestCase, skipIf +@@ -182,49 +183,54 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - return {aptpkg: {}} + return {aptpkg: {"__grains__": {}}} -+ @patch('salt.modules.aptpkg.__salt__', -+ {'pkg_resource.version': MagicMock(return_value=LOWPKG_INFO['wget']['version'])}) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ { ++ "pkg_resource.version": MagicMock( ++ return_value=LOWPKG_INFO["wget"]["version"] ++ ) ++ }, ++ ) def test_version(self): - ''' + """ Test - Returns a string representing the package version or an empty string if not installed. - ''' -- version = LOWPKG_INFO['wget']['version'] + """ +- version = LOWPKG_INFO["wget"]["version"] - mock = MagicMock(return_value=version) -- with patch.dict(aptpkg.__salt__, {'pkg_resource.version': mock}): -- self.assertEqual(aptpkg.version(*['wget']), version) -+ assert aptpkg.version(*['wget']) == aptpkg.__salt__['pkg_resource.version']() +- with patch.dict(aptpkg.__salt__, {"pkg_resource.version": mock}): +- self.assertEqual(aptpkg.version(*["wget"]), version) ++ assert aptpkg.version(*["wget"]) == aptpkg.__salt__["pkg_resource.version"]() -+ @patch('salt.modules.aptpkg.latest_version', MagicMock(return_value='')) ++ @patch("salt.modules.aptpkg.latest_version", MagicMock(return_value="")) def test_upgrade_available(self): - ''' + """ Test - Check whether or not an upgrade is available for a given package. - ''' -- with patch('salt.modules.aptpkg.latest_version', -- MagicMock(return_value='')): -- self.assertFalse(aptpkg.upgrade_available('wget')) -+ assert not aptpkg.upgrade_available('wget') + """ +- with patch("salt.modules.aptpkg.latest_version", MagicMock(return_value="")): +- self.assertFalse(aptpkg.upgrade_available("wget")) ++ assert not aptpkg.upgrade_available("wget") -+ @patch('salt.modules.aptpkg.get_repo_keys', MagicMock(return_value=REPO_KEYS)) -+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': 'OK'})}) ++ @patch("salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS)) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": "OK"})}, ++ ) def test_add_repo_key(self): - ''' + """ Test - Add a repo key. - ''' -- with patch('salt.modules.aptpkg.get_repo_keys', -- MagicMock(return_value=REPO_KEYS)): -- mock = MagicMock(return_value={ -- 'retcode': 0, -- 'stdout': 'OK' -- }) -- with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock}): -- self.assertTrue(aptpkg.add_repo_key(keyserver='keyserver.ubuntu.com', -- keyid='FBB75451')) -+ assert aptpkg.add_repo_key(keyserver='keyserver.ubuntu.com', keyid='FBB75451') + """ +- with patch( +- "salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS) +- ): +- mock = MagicMock(return_value={"retcode": 0, "stdout": "OK"}) +- with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}): +- self.assertTrue( +- aptpkg.add_repo_key( +- keyserver="keyserver.ubuntu.com", keyid="FBB75451" +- ) +- ) ++ assert aptpkg.add_repo_key(keyserver="keyserver.ubuntu.com", keyid="FBB75451") -+ @patch('salt.modules.aptpkg.get_repo_keys', MagicMock(return_value=REPO_KEYS)) -+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': 'OK'})}) ++ @patch("salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS)) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": "OK"})}, ++ ) def test_add_repo_key_failed(self): - ''' + """ Test - Add a repo key using incomplete input data. - ''' -- with patch('salt.modules.aptpkg.get_repo_keys', -- MagicMock(return_value=REPO_KEYS)): -- kwargs = {'keyserver': 'keyserver.ubuntu.com'} -- mock = MagicMock(return_value={ -- 'retcode': 0, -- 'stdout': 'OK' -- }) -- with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock}): + """ +- with patch( +- "salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS) +- ): +- kwargs = {"keyserver": "keyserver.ubuntu.com"} +- mock = MagicMock(return_value={"retcode": 0, "stdout": "OK"}) +- with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}): - self.assertRaises(SaltInvocationError, aptpkg.add_repo_key, **kwargs) + with pytest.raises(SaltInvocationError) as ex: -+ aptpkg.add_repo_key(keyserver='keyserver.ubuntu.com') -+ assert ' No keyid or keyid too short for keyserver: keyserver.ubuntu.com' in str(ex) ++ aptpkg.add_repo_key(keyserver="keyserver.ubuntu.com") ++ assert ( ++ " No keyid or keyid too short for keyserver: keyserver.ubuntu.com" ++ in str(ex) ++ ) def test_get_repo_keys(self): - ''' -@@ -223,35 +213,31 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock}): + """ +@@ -234,35 +240,48 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): + with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}): self.assertEqual(aptpkg.get_repo_keys(), REPO_KEYS) -+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.file_dict': MagicMock(return_value=LOWPKG_FILES)}) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"lowpkg.file_dict": MagicMock(return_value=LOWPKG_FILES)}, ++ ) def test_file_dict(self): - ''' + """ Test - List the files that belong to a package, grouped by package. - ''' + """ - mock = MagicMock(return_value=LOWPKG_FILES) -- with patch.dict(aptpkg.__salt__, {'lowpkg.file_dict': mock}): -- self.assertEqual(aptpkg.file_dict('wget'), LOWPKG_FILES) -+ assert aptpkg.file_dict('wget') == LOWPKG_FILES +- with patch.dict(aptpkg.__salt__, {"lowpkg.file_dict": mock}): +- self.assertEqual(aptpkg.file_dict("wget"), LOWPKG_FILES) ++ assert aptpkg.file_dict("wget") == LOWPKG_FILES -+ @patch('salt.modules.aptpkg.__salt__', { -+ 'lowpkg.file_list': MagicMock(return_value={'errors': LOWPKG_FILES['errors'], -+ 'files': LOWPKG_FILES['packages']['wget']})}) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ { ++ "lowpkg.file_list": MagicMock( ++ return_value={ ++ "errors": LOWPKG_FILES["errors"], ++ "files": LOWPKG_FILES["packages"]["wget"], ++ } ++ ) ++ }, ++ ) def test_file_list(self): - ''' + """ - Test - List the files that belong to a package. + Test 'file_list' function, which is just an alias to the lowpkg 'file_list' + - ''' + """ - files = { -- 'errors': LOWPKG_FILES['errors'], -- 'files': LOWPKG_FILES['packages']['wget'], +- "errors": LOWPKG_FILES["errors"], +- "files": LOWPKG_FILES["packages"]["wget"], - } - mock = MagicMock(return_value=files) -- with patch.dict(aptpkg.__salt__, {'lowpkg.file_list': mock}): -- self.assertEqual(aptpkg.file_list('wget'), files) -+ assert aptpkg.file_list('wget') == aptpkg.__salt__['lowpkg.file_list']() +- with patch.dict(aptpkg.__salt__, {"lowpkg.file_list": mock}): +- self.assertEqual(aptpkg.file_list("wget"), files) ++ assert aptpkg.file_list("wget") == aptpkg.__salt__["lowpkg.file_list"]() -+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_stdout': MagicMock(return_value='wget\t\t\t\t\t\tinstall')}) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"cmd.run_stdout": MagicMock(return_value="wget\t\t\t\t\t\tinstall")}, ++ ) def test_get_selections(self): - ''' + """ Test - View package state from the dpkg database. - ''' -- selections = {'install': ['wget']} -- mock = MagicMock(return_value='wget\t\t\t\t\t\tinstall') -- with patch.dict(aptpkg.__salt__, {'cmd.run_stdout': mock}): -- self.assertEqual(aptpkg.get_selections('wget'), selections) -+ assert aptpkg.get_selections('wget') == {'install': ['wget']} + """ +- selections = {"install": ["wget"]} +- mock = MagicMock(return_value="wget\t\t\t\t\t\tinstall") +- with patch.dict(aptpkg.__salt__, {"cmd.run_stdout": mock}): +- self.assertEqual(aptpkg.get_selections("wget"), selections) ++ assert aptpkg.get_selections("wget") == {"install": ["wget"]} -+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)}) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)}, ++ ) def test_info_installed(self): - ''' + """ Test - Return the information of the named package(s) installed on the system. -@@ -267,21 +253,72 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - if installed['wget'].get(names[name], False): - installed['wget'][name] = installed['wget'].pop(names[name]) +@@ -274,21 +293,101 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): + if installed["wget"].get(names[name], False): + installed["wget"][name] = installed["wget"].pop(names[name]) - mock = MagicMock(return_value=LOWPKG_INFO) -- with patch.dict(aptpkg.__salt__, {'lowpkg.info': mock}): -- del installed['wget']['status'] -- self.assertEqual(aptpkg.info_installed('wget'), installed) +- with patch.dict(aptpkg.__salt__, {"lowpkg.info": mock}): +- del installed["wget"]["status"] +- self.assertEqual(aptpkg.info_installed("wget"), installed) - self.assertEqual(len(aptpkg.info_installed()), 1) -+ assert aptpkg.info_installed('wget') == installed -+ -+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)}) ++ assert aptpkg.info_installed("wget") == installed + ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)}, ++ ) + def test_info_installed_attr(self): -+ ''' ++ """ + Test info_installed 'attr'. + This doesn't test 'attr' behaviour per se, since the underlying function is in dpkg. + The test should simply not raise exceptions for invalid parameter. + + :return: -+ ''' -+ ret = aptpkg.info_installed('emacs', attr='foo,bar') ++ """ ++ ret = aptpkg.info_installed("emacs", attr="foo,bar") + assert isinstance(ret, dict) -+ assert 'wget' in ret -+ assert isinstance(ret['wget'], dict) ++ assert "wget" in ret ++ assert isinstance(ret["wget"], dict) + -+ wget_pkg = ret['wget'] -+ expected_pkg = {'url': 'http://www.gnu.org/software/wget/', -+ 'packager': 'Ubuntu Developers ', 'name': 'wget', -+ 'install_date': '2016-08-30T22:20:15Z', 'description': 'retrieves files from the web', -+ 'version': '1.15-1ubuntu1.14.04.2', 'architecture': 'amd64', 'group': 'web', 'source': 'wget'} ++ wget_pkg = ret["wget"] ++ expected_pkg = { ++ "url": "http://www.gnu.org/software/wget/", ++ "packager": "Ubuntu Developers ", ++ "name": "wget", ++ "install_date": "2016-08-30T22:20:15Z", ++ "description": "retrieves files from the web", ++ "version": "1.15-1ubuntu1.14.04.2", ++ "architecture": "amd64", ++ "group": "web", ++ "source": "wget", ++ } + for k in wget_pkg: + assert k in expected_pkg + assert wget_pkg[k] == expected_pkg[k] + -+ @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)}) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)}, ++ ) + def test_info_installed_all_versions(self): -+ ''' ++ """ + Test info_installed 'all_versions'. + Since Debian won't return same name packages with the different names, + this should just return different structure, backward compatible with + the RPM equivalents. + + :return: -+ ''' ++ """ + print() -+ ret = aptpkg.info_installed('emacs', all_versions=True) ++ ret = aptpkg.info_installed("emacs", all_versions=True) + assert isinstance(ret, dict) -+ assert 'wget' in ret -+ assert isinstance(ret['wget'], list) ++ assert "wget" in ret ++ assert isinstance(ret["wget"], list) + -+ pkgs = ret['wget'] ++ pkgs = ret["wget"] + + assert len(pkgs) == 1 + assert isinstance(pkgs[0], dict) + + wget_pkg = pkgs[0] -+ expected_pkg = {'url': 'http://www.gnu.org/software/wget/', -+ 'packager': 'Ubuntu Developers ', 'name': 'wget', -+ 'install_date': '2016-08-30T22:20:15Z', 'description': 'retrieves files from the web', -+ 'version': '1.15-1ubuntu1.14.04.2', 'architecture': 'amd64', 'group': 'web', 'source': 'wget'} ++ expected_pkg = { ++ "url": "http://www.gnu.org/software/wget/", ++ "packager": "Ubuntu Developers ", ++ "name": "wget", ++ "install_date": "2016-08-30T22:20:15Z", ++ "description": "retrieves files from the web", ++ "version": "1.15-1ubuntu1.14.04.2", ++ "architecture": "amd64", ++ "group": "web", ++ "source": "wget", ++ } + for k in wget_pkg: + assert k in expected_pkg + assert wget_pkg[k] == expected_pkg[k] - -+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_stdout': MagicMock(return_value='wget: /usr/bin/wget')}) ++ ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ {"cmd.run_stdout": MagicMock(return_value="wget: /usr/bin/wget")}, ++ ) def test_owner(self): - ''' + """ Test - Return the name of the package that owns the file. - ''' -- paths = ['/usr/bin/wget'] -- mock = MagicMock(return_value='wget: /usr/bin/wget') -- with patch.dict(aptpkg.__salt__, {'cmd.run_stdout': mock}): -- self.assertEqual(aptpkg.owner(*paths), 'wget') -+ assert aptpkg.owner('/usr/bin/wget') == 'wget' + """ +- paths = ["/usr/bin/wget"] +- mock = MagicMock(return_value="wget: /usr/bin/wget") +- with patch.dict(aptpkg.__salt__, {"cmd.run_stdout": mock}): +- self.assertEqual(aptpkg.owner(*paths), "wget") ++ assert aptpkg.owner("/usr/bin/wget") == "wget" -+ @patch('salt.utils.pkg.clear_rtag', MagicMock()) -+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0, -+ 'stdout': APT_Q_UPDATE}), -+ 'config.get': MagicMock(return_value=False)}) ++ @patch("salt.utils.pkg.clear_rtag", MagicMock()) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ { ++ "cmd.run_all": MagicMock( ++ return_value={"retcode": 0, "stdout": APT_Q_UPDATE} ++ ), ++ "config.get": MagicMock(return_value=False), ++ }, ++ ) def test_refresh_db(self): - ''' + """ Test - Updates the APT database to latest packages based upon repositories. -@@ -301,6 +338,10 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict(aptpkg.__salt__, {'cmd.run_all': mock, 'config.get': MagicMock(return_value=False)}): +@@ -308,6 +407,16 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): + ): self.assertEqual(aptpkg.refresh_db(), refresh_db) -+ @patch('salt.utils.pkg.clear_rtag', MagicMock()) -+ @patch('salt.modules.aptpkg.__salt__', {'cmd.run_all': MagicMock(return_value={'retcode': 0, -+ 'stdout': APT_Q_UPDATE_ERROR}), -+ 'config.get': MagicMock(return_value=False)}) ++ @patch("salt.utils.pkg.clear_rtag", MagicMock()) ++ @patch( ++ "salt.modules.aptpkg.__salt__", ++ { ++ "cmd.run_all": MagicMock( ++ return_value={"retcode": 0, "stdout": APT_Q_UPDATE_ERROR} ++ ), ++ "config.get": MagicMock(return_value=False), ++ }, ++ ) def test_refresh_db_failed(self): - ''' + """ Test - Update the APT database using unreachable repositories. -@@ -332,22 +373,24 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): +@@ -340,29 +449,33 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): assert aptpkg.autoremove(list_only=True) == [] assert aptpkg.autoremove(list_only=True, purge=True) == [] -+ @patch('salt.modules.aptpkg._uninstall', MagicMock(return_value=UNINSTALL)) +- def test_install(self): +- """ +- Test - Install packages. +- """ +- with patch("salt.modules.aptpkg.install", MagicMock(return_value=INSTALL)): +- self.assertEqual(aptpkg.install(name="tmux"), INSTALL) +- kwargs = {"force_conf_new": True} +- self.assertEqual(aptpkg.install(name="tmux", **kwargs), INSTALL) +- ++ @patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)) def test_remove(self): - ''' + """ Test - Remove packages. - ''' -- with patch('salt.modules.aptpkg._uninstall', -- MagicMock(return_value=UNINSTALL)): -- self.assertEqual(aptpkg.remove(name='tmux'), UNINSTALL) -+ assert aptpkg.remove(name='tmux') == UNINSTALL + """ +- with patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)): +- self.assertEqual(aptpkg.remove(name="tmux"), UNINSTALL) ++ assert aptpkg.remove(name="tmux") == UNINSTALL -+ @patch('salt.modules.aptpkg._uninstall', MagicMock(return_value=UNINSTALL)) ++ @patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)) def test_purge(self): - ''' + """ Test - Remove packages along with all configuration files. - ''' -- with patch('salt.modules.aptpkg._uninstall', -- MagicMock(return_value=UNINSTALL)): -- self.assertEqual(aptpkg.purge(name='tmux'), UNINSTALL) -+ assert aptpkg.purge(name='tmux') == UNINSTALL - -+ @patch('salt.utils.pkg.clear_rtag', MagicMock()) -+ @patch('salt.modules.aptpkg.list_pkgs', MagicMock(return_value=UNINSTALL)) -+ @patch.multiple(aptpkg, **{'__salt__': {'config.get': MagicMock(return_value=True), -+ 'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': UPGRADE})}}) + """ +- with patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)): +- self.assertEqual(aptpkg.purge(name="tmux"), UNINSTALL) +- ++ assert aptpkg.purge(name="tmux") == UNINSTALL ++ ++ @patch("salt.utils.pkg.clear_rtag", MagicMock()) ++ @patch("salt.modules.aptpkg.list_pkgs", MagicMock(return_value=UNINSTALL)) ++ @patch.multiple( ++ aptpkg, ++ **{ ++ "__salt__": { ++ "config.get": MagicMock(return_value=True), ++ "cmd.run_all": MagicMock( ++ return_value={"retcode": 0, "stdout": UPGRADE} ++ ), ++ } ++ } ++ ) def test_upgrade(self): - ''' + """ Test - Upgrades all packages. diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py -index 6c07a75417..a0b3346f9d 100644 +index 071c0f0742..160bbcd5b1 100644 --- a/tests/unit/modules/test_dpkg_lowpkg.py +++ b/tests/unit/modules/test_dpkg_lowpkg.py -@@ -23,6 +23,30 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): - ''' - Test cases for salt.modules.dpkg - ''' +@@ -1,18 +1,12 @@ +-# -*- coding: utf-8 -*- + """ + :codeauthor: Jayesh Kariya + """ + +-# Import Python libs +-from __future__ import absolute_import, print_function, unicode_literals + + import logging + import os + +-# Import Salt Libs + import salt.modules.dpkg_lowpkg as dpkg +- +-# Import Salt Testing Libs + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, patch + from tests.support.unit import TestCase +@@ -65,6 +59,51 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): + package = cmd[2] + return DPKG_L_OUTPUT[package] + + dselect_pkg = { -+ 'emacs': {'priority': 'optional', 'filename': 'pool/main/e/emacs-defaults/emacs_46.1_all.deb', -+ 'description': 'GNU Emacs editor (metapackage)', 'md5sum': '766eb2cee55ba0122dac64c4cea04445', -+ 'sha256': 'd172289b9a1608820eddad85c7ffc15f346a6e755c3120de0f64739c4bbc44ce', -+ 'description-md5': '21fb7da111336097a2378959f6d6e6a8', -+ 'bugs': 'https://bugs.launchpad.net/springfield/+filebug', -+ 'depends': 'emacs24 | emacs24-lucid | emacs24-nox', 'origin': 'Simpsons', 'version': '46.1', -+ 'task': 'ubuntu-usb, edubuntu-usb', 'original-maintainer': 'Homer Simpson ', -+ 'package': 'emacs', 'architecture': 'all', 'size': '1692', -+ 'sha1': '9271bcec53c1f7373902b1e594d9fc0359616407', 'source': 'emacs-defaults', -+ 'maintainer': 'Simpsons Developers ', 'supported': '9m', -+ 'section': 'editors', 'installed-size': '25'} ++ "emacs": { ++ "priority": "optional", ++ "filename": "pool/main/e/emacs-defaults/emacs_46.1_all.deb", ++ "description": "GNU Emacs editor (metapackage)", ++ "md5sum": "766eb2cee55ba0122dac64c4cea04445", ++ "sha256": "d172289b9a1608820eddad85c7ffc15f346a6e755c3120de0f64739c4bbc44ce", ++ "description-md5": "21fb7da111336097a2378959f6d6e6a8", ++ "bugs": "https://bugs.launchpad.net/springfield/+filebug", ++ "depends": "emacs24 | emacs24-lucid | emacs24-nox", ++ "origin": "Simpsons", ++ "version": "46.1", ++ "task": "ubuntu-usb, edubuntu-usb", ++ "original-maintainer": "Homer Simpson ", ++ "package": "emacs", ++ "architecture": "all", ++ "size": "1692", ++ "sha1": "9271bcec53c1f7373902b1e594d9fc0359616407", ++ "source": "emacs-defaults", ++ "maintainer": "Simpsons Developers ", ++ "supported": "9m", ++ "section": "editors", ++ "installed-size": "25", ++ } + } + + pkgs_info = [ -+ {'version': '46.1', 'arch': 'all', 'build_date': '2014-08-07T16:51:48Z', 'install_date_time_t': 1481745778, -+ 'section': 'editors', 'description': 'GNU Emacs editor (metapackage)\n GNU Emacs is the extensible ' -+ 'self-documenting text editor.\n This is a metapackage that will always ' -+ 'depend on the latest\n recommended Emacs release.\n', -+ 'package': 'emacs', 'source': 'emacs-defaults', -+ 'maintainer': 'Simpsons Developers ', -+ 'build_date_time_t': 1407430308, 'installed_size': '25', 'install_date': '2016-12-14T20:02:58Z'} ++ { ++ "version": "46.1", ++ "arch": "all", ++ "build_date": "2014-08-07T16:51:48Z", ++ "install_date_time_t": 1481745778, ++ "section": "editors", ++ "description": "GNU Emacs editor (metapackage)\n GNU Emacs is the extensible " ++ "self-documenting text editor.\n This is a metapackage that will always " ++ "depend on the latest\n recommended Emacs release.\n", ++ "package": "emacs", ++ "source": "emacs-defaults", ++ "maintainer": "Simpsons Developers ", ++ "build_date_time_t": 1407430308, ++ "installed_size": "25", ++ "install_date": "2016-12-14T20:02:58Z", ++ } + ] + def setup_loader_modules(self): return {dpkg: {}} -@@ -101,68 +125,47 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict(dpkg.__salt__, {'cmd.run_all': mock}): - self.assertEqual(dpkg.file_dict('httpd'), 'Error: error') +@@ -269,83 +308,71 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): + dpkg.bin_pkg_info("package.deb")["name"], "package_name" + ) -+ @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg)) -+ @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info)) -+ @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3')) ++ @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg)) ++ @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) ++ @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3")) def test_info(self): - ''' + """ - Test package info + Test info + :return: - ''' -- mock = MagicMock(return_value={'retcode': 0, -- 'stderr': '', -- 'stdout': -- os.linesep.join([ -- 'package:bash', -- 'revision:', -- 'architecture:amd64', -- 'maintainer:Ubuntu Developers ', -- 'summary:', -- 'source:bash', -- 'version:4.4.18-2ubuntu1', -- 'section:shells', -- 'installed_size:1588', -- 'size:', -- 'MD5:', -- 'SHA1:', -- 'SHA256:', -- 'origin:', -- 'homepage:http://tiswww.case.edu/php/chet/bash/bashtop.html', -- 'status:ii ', -- '======', -- 'description:GNU Bourne Again SHell', -- ' Bash is an sh-compatible command language interpreter that executes', -- ' commands read from the standard input or from a file. Bash also', -- ' incorporates useful features from the Korn and C shells (ksh and csh).', -- ' .', -- ' Bash is ultimately intended to be a conformant implementation of the', -- ' IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).', -- ' .', -- ' The Programmable Completion Code, by Ian Macdonald, is now found in', -- ' the bash-completion package.', -- '------' -- ])}) -- -- with patch.dict(dpkg.__salt__, {'cmd.run_all': mock}), \ -- patch.dict(dpkg.__grains__, {'os': 'Ubuntu', 'osrelease_info': (18, 4)}), \ -- patch('salt.utils.path.which', MagicMock(return_value=False)), \ -- patch('os.path.exists', MagicMock(return_value=False)),\ -- patch('os.path.getmtime', MagicMock(return_value=1560199259.0)): -- self.assertDictEqual(dpkg.info('bash'), -- {'bash': {'architecture': 'amd64', -- 'description': os.linesep.join([ -- 'GNU Bourne Again SHell', -- ' Bash is an sh-compatible command language interpreter that executes', -- ' commands read from the standard input or from a file. Bash also', -- ' incorporates useful features from the Korn and C shells (ksh and csh).', -- ' .', -- ' Bash is ultimately intended to be a conformant implementation of the', -- ' IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).', -- ' .', -- ' The Programmable Completion Code, by Ian Macdonald, is now found in', -- ' the bash-completion package.' + os.linesep -- ]), -- 'homepage': 'http://tiswww.case.edu/php/chet/bash/bashtop.html', -- 'maintainer': 'Ubuntu Developers ' -- '', -- 'package': 'bash', -- 'section': 'shells', -- 'source': 'bash', -- 'status': 'ii', -- 'version': '4.4.18-2ubuntu1'}}) -+ ret = dpkg.info('emacs') + """ +- mock = MagicMock( +- return_value={ +- "retcode": 0, +- "stderr": "", +- "stdout": os.linesep.join( +- [ +- "package:bash", +- "revision:", +- "architecture:amd64", +- "maintainer:Ubuntu Developers ", +- "summary:", +- "source:bash", +- "version:4.4.18-2ubuntu1", +- "section:shells", +- "installed_size:1588", +- "size:", +- "MD5:", +- "SHA1:", +- "SHA256:", +- "origin:", +- "homepage:http://tiswww.case.edu/php/chet/bash/bashtop.html", +- "status:ii ", +- "======", +- "description:GNU Bourne Again SHell", +- " Bash is an sh-compatible command language interpreter that executes", +- " commands read from the standard input or from a file. Bash also", +- " incorporates useful features from the Korn and C shells (ksh and csh).", +- " .", +- " Bash is ultimately intended to be a conformant implementation of the", +- " IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).", +- " .", +- " The Programmable Completion Code, by Ian Macdonald, is now found in", +- " the bash-completion package.", +- "------", +- ] +- ), +- } ++ ret = dpkg.info("emacs") + + assert isinstance(ret, dict) + assert len(ret.keys()) == 1 -+ assert 'emacs' in ret ++ assert "emacs" in ret + -+ pkg_data = ret['emacs'] ++ pkg_data = ret["emacs"] + + assert isinstance(pkg_data, dict) -+ for pkg_section in ['section', 'architecture', 'original-maintainer', 'maintainer', 'package', 'installed-size', -+ 'build_date_time_t', 'sha256', 'origin', 'build_date', 'size', 'source', 'version', -+ 'install_date_time_t', 'license', 'priority', 'description', 'md5sum', 'supported', -+ 'filename', 'sha1', 'install_date', 'arch']: ++ for pkg_section in [ ++ "section", ++ "architecture", ++ "original-maintainer", ++ "maintainer", ++ "package", ++ "installed-size", ++ "build_date_time_t", ++ "sha256", ++ "origin", ++ "build_date", ++ "size", ++ "source", ++ "version", ++ "install_date_time_t", ++ "license", ++ "priority", ++ "description", ++ "md5sum", ++ "supported", ++ "filename", ++ "sha1", ++ "install_date", ++ "arch", ++ ]: + assert pkg_section in pkg_data + -+ assert pkg_data['section'] == 'editors' -+ assert pkg_data['maintainer'] == 'Simpsons Developers ' -+ assert pkg_data['license'] == 'BSD v3' -+ -+ @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg)) -+ @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info)) -+ @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3')) ++ assert pkg_data["section"] == "editors" ++ assert ( ++ pkg_data["maintainer"] ++ == "Simpsons Developers " + ) ++ assert pkg_data["license"] == "BSD v3" + +- with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}), patch.dict( +- dpkg.__grains__, {"os": "Ubuntu", "osrelease_info": (18, 4)} +- ), patch("salt.utils.path.which", MagicMock(return_value=False)), patch( +- "os.path.exists", MagicMock(return_value=False) +- ), patch( +- "os.path.getmtime", MagicMock(return_value=1560199259.0) +- ): +- self.assertDictEqual( +- dpkg.info("bash"), +- { +- "bash": { +- "architecture": "amd64", +- "description": os.linesep.join( +- [ +- "GNU Bourne Again SHell", +- " Bash is an sh-compatible command language interpreter that executes", +- " commands read from the standard input or from a file. Bash also", +- " incorporates useful features from the Korn and C shells (ksh and csh).", +- " .", +- " Bash is ultimately intended to be a conformant implementation of the", +- " IEEE POSIX Shell and Tools specification (IEEE Working Group 1003.2).", +- " .", +- " The Programmable Completion Code, by Ian Macdonald, is now found in", +- " the bash-completion package." + os.linesep, +- ] +- ), +- "homepage": "http://tiswww.case.edu/php/chet/bash/bashtop.html", +- "maintainer": "Ubuntu Developers " +- "", +- "package": "bash", +- "section": "shells", +- "source": "bash", +- "status": "ii", +- "version": "4.4.18-2ubuntu1", +- } +- }, +- ) ++ @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg)) ++ @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) ++ @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3")) + def test_info_attr(self): -+ ''' ++ """ + Test info with 'attr' parameter + :return: -+ ''' -+ ret = dpkg.info('emacs', attr='arch,license,version') ++ """ ++ ret = dpkg.info("emacs", attr="arch,license,version") + assert isinstance(ret, dict) -+ assert 'emacs' in ret -+ for attr in ['arch', 'license', 'version']: -+ assert attr in ret['emacs'] ++ assert "emacs" in ret ++ for attr in ["arch", "license", "version"]: ++ assert attr in ret["emacs"] + -+ assert ret['emacs']['arch'] == 'all' -+ assert ret['emacs']['license'] == 'BSD v3' -+ assert ret['emacs']['version'] == '46.1' ++ assert ret["emacs"]["arch"] == "all" ++ assert ret["emacs"]["license"] == "BSD v3" ++ assert ret["emacs"]["version"] == "46.1" -- -2.16.4 +2.29.2 diff --git a/decide-if-the-source-should-be-actually-skipped.patch b/decide-if-the-source-should-be-actually-skipped.patch deleted file mode 100644 index 3f625c4..0000000 --- a/decide-if-the-source-should-be-actually-skipped.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 615a8f8dfa8ef12eeb4c387e48309cc466b8597d Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Tue, 4 Dec 2018 16:39:08 +0100 -Subject: [PATCH] Decide if the source should be actually skipped - ---- - salt/modules/aptpkg.py | 23 ++++++++++++++++++++++- - 1 file changed, 22 insertions(+), 1 deletion(-) - -diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 4ec9158476..3b0d8423db 100644 ---- a/salt/modules/aptpkg.py -+++ b/salt/modules/aptpkg.py -@@ -1620,6 +1620,27 @@ def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import - return ret - - -+def _skip_source(source): -+ ''' -+ Decide to skip source or not. -+ -+ :param source: -+ :return: -+ ''' -+ if source.invalid: -+ if source.uri and source.type and source.type in ("deb", "deb-src", "rpm", "rpm-src"): -+ pieces = source.mysplit(source.line) -+ if pieces[1].strip()[0] == "[": -+ options = pieces.pop(1).strip("[]").split() -+ if len(options) > 0: -+ log.debug("Source %s will be included although is marked invalid", source.uri) -+ return False -+ return True -+ else: -+ return True -+ return False -+ -+ - def list_repos(): - ''' - Lists all repos in the sources.list (and sources.lists.d) files -@@ -1635,7 +1656,7 @@ def list_repos(): - repos = {} - sources = sourceslist.SourcesList() - for source in sources.list: -- if source.invalid: -+ if _skip_source(source): - continue - repo = {} - repo['file'] = source.file --- -2.16.4 - - diff --git a/do-not-break-repo-files-with-multiple-line-values-on.patch b/do-not-break-repo-files-with-multiple-line-values-on.patch index cf5bd69..5db42ff 100644 --- a/do-not-break-repo-files-with-multiple-line-values-on.patch +++ b/do-not-break-repo-files-with-multiple-line-values-on.patch @@ -1,4 +1,4 @@ -From f81a5b92d691c1d511a814f9344104dd37466bc3 Mon Sep 17 00:00:00 2001 +From e986ed8fc0d5da74374d9ded82e10c16fc984ca8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 29 May 2019 11:03:16 +0100 @@ -6,42 +6,45 @@ Subject: [PATCH] Do not break repo files with multiple line values on yumpkg (bsc#1135360) --- - tests/integration/modules/test_pkg.py | 48 +++++++++++++++++++++++++++++++++++ - 1 file changed, 48 insertions(+) + tests/integration/modules/test_pkg.py | 51 +++++++++++++++++++++++++++ + 1 file changed, 51 insertions(+) diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py -index e8374db2c0..61748f9477 100644 +index 7a720523da..e32013800d 100644 --- a/tests/integration/modules/test_pkg.py +++ b/tests/integration/modules/test_pkg.py -@@ -182,6 +182,54 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): +@@ -194,6 +194,57 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): if repo is not None: - self.run_function('pkg.del_repo', [repo]) + self.run_function("pkg.del_repo", [repo]) + def test_mod_del_repo_multiline_values(self): -+ ''' ++ """ + test modifying and deleting a software repository defined with multiline values -+ ''' -+ os_grain = self.run_function('grains.item', ['os'])['os'] ++ """ ++ os_grain = self.run_function("grains.item", ["os"])["os"] + repo = None + try: -+ if os_grain in ['CentOS', 'RedHat', 'SUSE']: -+ my_baseurl = 'http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/' -+ expected_get_repo_baseurl = 'http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/' -+ major_release = int( -+ self.run_function( -+ 'grains.item', -+ ['osmajorrelease'] -+ )['osmajorrelease'] ++ if os_grain in ["CentOS", "RedHat", "SUSE"]: ++ my_baseurl = ( ++ "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/" + ) -+ repo = 'fakerepo' -+ name = 'Fake repo for RHEL/CentOS/SUSE' ++ expected_get_repo_baseurl = ( ++ "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/" ++ ) ++ major_release = int( ++ self.run_function("grains.item", ["osmajorrelease"])[ ++ "osmajorrelease" ++ ] ++ ) ++ repo = "fakerepo" ++ name = "Fake repo for RHEL/CentOS/SUSE" + baseurl = my_baseurl -+ gpgkey = 'https://my.fake.repo/foo/bar/MY-GPG-KEY.pub' -+ failovermethod = 'priority' ++ gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub" ++ failovermethod = "priority" + gpgcheck = 1 + enabled = 1 + ret = self.run_function( -+ 'pkg.mod_repo', ++ "pkg.mod_repo", + [repo], + name=name, + baseurl=baseurl, @@ -55,20 +58,20 @@ index e8374db2c0..61748f9477 100644 + self.assertNotEqual(ret, {}) + repo_info = ret[next(iter(ret))] + self.assertIn(repo, repo_info) -+ self.assertEqual(repo_info[repo]['baseurl'], my_baseurl) -+ ret = self.run_function('pkg.get_repo', [repo]) -+ self.assertEqual(ret['baseurl'], expected_get_repo_baseurl) -+ self.run_function('pkg.mod_repo', [repo]) -+ ret = self.run_function('pkg.get_repo', [repo]) -+ self.assertEqual(ret['baseurl'], expected_get_repo_baseurl) ++ self.assertEqual(repo_info[repo]["baseurl"], my_baseurl) ++ ret = self.run_function("pkg.get_repo", [repo]) ++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) ++ self.run_function("pkg.mod_repo", [repo]) ++ ret = self.run_function("pkg.get_repo", [repo]) ++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) + finally: + if repo is not None: -+ self.run_function('pkg.del_repo', [repo]) ++ self.run_function("pkg.del_repo", [repo]) + - @requires_salt_modules('pkg.owner') + @requires_salt_modules("pkg.owner") def test_owner(self): - ''' + """ -- -2.16.4 +2.29.2 diff --git a/do-not-crash-when-there-are-ipv6-established-connect.patch b/do-not-crash-when-there-are-ipv6-established-connect.patch index 5c10d80..2af9dca 100644 --- a/do-not-crash-when-there-are-ipv6-established-connect.patch +++ b/do-not-crash-when-there-are-ipv6-established-connect.patch @@ -1,4 +1,4 @@ -From bfee3a7c47786bb860663de97fca26725101f1d0 Mon Sep 17 00:00:00 2001 +From 998136ffd4c8442e0c3a7030af3d8196abec6be1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Tue, 7 May 2019 15:33:51 +0100 @@ -11,24 +11,24 @@ Add unit test for '_netlink_tool_remote_on' 1 file changed, 5 insertions(+) diff --git a/salt/utils/network.py b/salt/utils/network.py -index 2ae2e213b7..307cab885f 100644 +index dd7fceb91a..d253ded3ab 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py -@@ -1442,8 +1442,13 @@ def _netlink_tool_remote_on(port, which_end): - elif 'ESTAB' not in line: +@@ -1623,8 +1623,13 @@ def _netlink_tool_remote_on(port, which_end): + elif "ESTAB" not in line: continue chunks = line.split() -+ local_host, local_port = chunks[3].rsplit(':', 1) - remote_host, remote_port = chunks[4].rsplit(':', 1) ++ local_host, local_port = chunks[3].rsplit(":", 1) + remote_host, remote_port = chunks[4].rsplit(":", 1) -+ if which_end == 'remote_port' and int(remote_port) != port: ++ if which_end == "remote_port" and int(remote_port) != port: + continue -+ if which_end == 'local_port' and int(local_port) != port: ++ if which_end == "local_port" and int(local_port) != port: + continue remotes.add(remote_host.strip("[]")) if valid is False: -- -2.23.0 +2.29.2 diff --git a/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch index 2c00607..f4925d1 100644 --- a/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch +++ b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch @@ -1,4 +1,4 @@ -From 3d5d89428ca333caa2c2259f679f8fffd7110ba6 Mon Sep 17 00:00:00 2001 +From 57f9da0bd7727c46eab866941fee46a3eaf8c8ea Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Fri, 21 Sep 2018 17:31:39 +0200 Subject: [PATCH] Do not load pip state if there is no 3rd party @@ -6,40 +6,355 @@ Subject: [PATCH] Do not load pip state if there is no 3rd party Safe import 3rd party dependency --- - salt/modules/pip.py | 12 ++++++++++-- - 1 file changed, 10 insertions(+), 2 deletions(-) + salt/modules/pip.py | 93 ++++++++++++++++++++++++--------------------- + 1 file changed, 50 insertions(+), 43 deletions(-) diff --git a/salt/modules/pip.py b/salt/modules/pip.py -index 0a0773a8f4..f19593ed1a 100644 +index f7c101f6e4..742e0dd48a 100644 --- a/salt/modules/pip.py +++ b/salt/modules/pip.py -@@ -82,7 +82,10 @@ from __future__ import absolute_import, print_function, unicode_literals - # Import python libs +@@ -1,4 +1,3 @@ +-# -*- coding: utf-8 -*- + r""" + Install Python packages with pip to either the system or a virtualenv + +@@ -77,9 +76,7 @@ of the 2015.5 branch: + The issue is described here: https://github.com/saltstack/salt/issues/46163 + + """ +-from __future__ import absolute_import, print_function, unicode_literals + +-# Import python libs import logging import os --import pkg_resources + import re +@@ -89,7 +86,6 @@ import tempfile + + import pkg_resources # pylint: disable=3rd-party-module-not-gated + +-# Import Salt libs + import salt.utils.data + import salt.utils.files + import salt.utils.json +@@ -101,6 +97,12 @@ import salt.utils.versions + from salt.exceptions import CommandExecutionError, CommandNotFoundError + from salt.ext import six + +try: + import pkg_resources +except ImportError: + pkg_resources = None - import re - import shutil - import sys -@@ -121,7 +124,12 @@ def __virtual__(): ++ ++ + # This needs to be named logger so we don't shadow it in pip.install + logger = logging.getLogger(__name__) # pylint: disable=C0103 + +@@ -118,7 +120,12 @@ def __virtual__(): entire filesystem. If it's not installed in a conventional location, the user is required to provide the location of pip each time it is used. - ''' -- return 'pip' + """ +- return "pip" + if pkg_resources is None: + ret = False, 'Package dependency "pkg_resource" is missing' + else: -+ ret = 'pip' ++ ret = "pip" + + return ret def _pip_bin_env(cwd, bin_env): +@@ -140,7 +147,7 @@ def _clear_context(bin_env=None): + """ + contextkey = "pip.version" + if bin_env is not None: +- contextkey = "{0}.{1}".format(contextkey, bin_env) ++ contextkey = "{}.{}".format(contextkey, bin_env) + __context__.pop(contextkey, None) + + +@@ -196,7 +203,7 @@ def _get_pip_bin(bin_env): + bin_path, + ) + raise CommandNotFoundError( +- "Could not find a pip binary in virtualenv {0}".format(bin_env) ++ "Could not find a pip binary in virtualenv {}".format(bin_env) + ) + + # bin_env is the python or pip binary +@@ -209,11 +216,11 @@ def _get_pip_bin(bin_env): + return [os.path.normpath(bin_env)] + + raise CommandExecutionError( +- "Could not find a pip binary within {0}".format(bin_env) ++ "Could not find a pip binary within {}".format(bin_env) + ) + else: + raise CommandNotFoundError( +- "Access denied to {0}, could not find a pip binary".format(bin_env) ++ "Access denied to {}, could not find a pip binary".format(bin_env) + ) + + +@@ -283,7 +290,7 @@ def _resolve_requirements_chain(requirements): + + chain = [] + +- if isinstance(requirements, six.string_types): ++ if isinstance(requirements, str): + requirements = [requirements] + + for req_file in requirements: +@@ -300,7 +307,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user): + cleanup_requirements = [] + + if requirements is not None: +- if isinstance(requirements, six.string_types): ++ if isinstance(requirements, str): + requirements = [r.strip() for r in requirements.split(",")] + elif not isinstance(requirements, list): + raise TypeError("requirements must be a string or list") +@@ -314,7 +321,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user): + if not cached_requirements: + ret = { + "result": False, +- "comment": "pip requirements file '{0}' not found".format( ++ "comment": "pip requirements file '{}' not found".format( + requirement + ), + } +@@ -412,15 +419,15 @@ def _format_env_vars(env_vars): + ret = {} + if env_vars: + if isinstance(env_vars, dict): +- for key, val in six.iteritems(env_vars): +- if not isinstance(key, six.string_types): ++ for key, val in env_vars.items(): ++ if not isinstance(key, str): + key = str(key) # future lint: disable=blacklisted-function +- if not isinstance(val, six.string_types): ++ if not isinstance(val, str): + val = str(val) # future lint: disable=blacklisted-function + ret[key] = val + else: + raise CommandExecutionError( +- "env_vars {0} is not a dictionary".format(env_vars) ++ "env_vars {} is not a dictionary".format(env_vars) + ) + return ret + +@@ -762,9 +769,9 @@ def install( + + if log: + if os.path.isdir(log): +- raise IOError("'{0}' is a directory. Use --log path_to_file".format(log)) ++ raise OSError("'{}' is a directory. Use --log path_to_file".format(log)) + elif not os.access(log, os.W_OK): +- raise IOError("'{0}' is not writeable".format(log)) ++ raise OSError("'{}' is not writeable".format(log)) + + cmd.extend(["--log", log]) + +@@ -790,12 +797,12 @@ def install( + int(timeout) + except ValueError: + raise ValueError( +- "'{0}' is not a valid timeout, must be an integer".format(timeout) ++ "'{}' is not a valid timeout, must be an integer".format(timeout) + ) + cmd.extend(["--timeout", timeout]) + + if find_links: +- if isinstance(find_links, six.string_types): ++ if isinstance(find_links, str): + find_links = [l.strip() for l in find_links.split(",")] + + for link in find_links: +@@ -803,7 +810,7 @@ def install( + salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link) + ): + raise CommandExecutionError( +- "'{0}' is not a valid URL or path".format(link) ++ "'{}' is not a valid URL or path".format(link) + ) + cmd.extend(["--find-links", link]) + +@@ -815,13 +822,13 @@ def install( + + if index_url: + if not salt.utils.url.validate(index_url, VALID_PROTOS): +- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url)) ++ raise CommandExecutionError("'{}' is not a valid URL".format(index_url)) + cmd.extend(["--index-url", index_url]) + + if extra_index_url: + if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): + raise CommandExecutionError( +- "'{0}' is not a valid URL".format(extra_index_url) ++ "'{}' is not a valid URL".format(extra_index_url) + ) + cmd.extend(["--extra-index-url", extra_index_url]) + +@@ -836,13 +843,13 @@ def install( + " use index_url and/or extra_index_url instead" + ) + +- if isinstance(mirrors, six.string_types): ++ if isinstance(mirrors, str): + mirrors = [m.strip() for m in mirrors.split(",")] + + cmd.append("--use-mirrors") + for mirror in mirrors: + if not mirror.startswith("http://"): +- raise CommandExecutionError("'{0}' is not a valid URL".format(mirror)) ++ raise CommandExecutionError("'{}' is not a valid URL".format(mirror)) + cmd.extend(["--mirrors", mirror]) + + if disable_version_check: +@@ -883,7 +890,7 @@ def install( + if exists_action.lower() not in ("s", "i", "w", "b"): + raise CommandExecutionError( + "The exists_action pip option only supports the values " +- "s, i, w, and b. '{0}' is not valid.".format(exists_action) ++ "s, i, w, and b. '{}' is not valid.".format(exists_action) + ) + cmd.extend(["--exists-action", exists_action]) + +@@ -911,14 +918,14 @@ def install( + cmd.extend(["--cert", cert]) + + if global_options: +- if isinstance(global_options, six.string_types): ++ if isinstance(global_options, str): + global_options = [go.strip() for go in global_options.split(",")] + + for opt in global_options: + cmd.extend(["--global-option", opt]) + + if install_options: +- if isinstance(install_options, six.string_types): ++ if isinstance(install_options, str): + install_options = [io.strip() for io in install_options.split(",")] + + for opt in install_options: +@@ -929,7 +936,7 @@ def install( + try: + pkgs = [p.strip() for p in pkgs.split(",")] + except AttributeError: +- pkgs = [p.strip() for p in six.text_type(pkgs).split(",")] ++ pkgs = [p.strip() for p in str(pkgs).split(",")] + pkgs = salt.utils.data.stringify(salt.utils.data.decode_list(pkgs)) + + # It's possible we replaced version-range commas with semicolons so +@@ -945,7 +952,7 @@ def install( + + if editable: + egg_match = re.compile(r"(?:#|#.*?&)egg=([^&]*)") +- if isinstance(editable, six.string_types): ++ if isinstance(editable, str): + editable = [e.strip() for e in editable.split(",")] + + for entry in editable: +@@ -964,14 +971,14 @@ def install( + cmd.append("--allow-all-external") + + if allow_external: +- if isinstance(allow_external, six.string_types): ++ if isinstance(allow_external, str): + allow_external = [p.strip() for p in allow_external.split(",")] + + for pkg in allow_external: + cmd.extend(["--allow-external", pkg]) + + if allow_unverified: +- if isinstance(allow_unverified, six.string_types): ++ if isinstance(allow_unverified, str): + allow_unverified = [p.strip() for p in allow_unverified.split(",")] + + for pkg in allow_unverified: +@@ -1106,8 +1113,8 @@ def uninstall( + try: + # TODO make this check if writeable + os.path.exists(log) +- except IOError: +- raise IOError("'{0}' is not writeable".format(log)) ++ except OSError: ++ raise OSError("'{}' is not writeable".format(log)) + + cmd.extend(["--log", log]) + +@@ -1133,12 +1140,12 @@ def uninstall( + int(timeout) + except ValueError: + raise ValueError( +- "'{0}' is not a valid timeout, must be an integer".format(timeout) ++ "'{}' is not a valid timeout, must be an integer".format(timeout) + ) + cmd.extend(["--timeout", timeout]) + + if pkgs: +- if isinstance(pkgs, six.string_types): ++ if isinstance(pkgs, str): + pkgs = [p.strip() for p in pkgs.split(",")] + if requirements: + for requirement in requirements: +@@ -1323,7 +1330,7 @@ def version(bin_env=None, cwd=None, user=None): + cwd = _pip_bin_env(cwd, bin_env) + contextkey = "pip.version" + if bin_env is not None: +- contextkey = "{0}.{1}".format(contextkey, bin_env) ++ contextkey = "{}.{}".format(contextkey, bin_env) + + if contextkey in __context__: + return __context__[contextkey] +@@ -1402,7 +1409,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None): + if match: + name, version_ = match.groups() + else: +- logger.error("Can't parse line '{0}'".format(line)) ++ logger.error("Can't parse line '{}'".format(line)) + continue + packages[name] = version_ + +@@ -1414,7 +1421,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None): + raise CommandExecutionError("Invalid JSON", info=result) + + for pkg in pkgs: +- packages[pkg["name"]] = "{0} [{1}]".format( ++ packages[pkg["name"]] = "{} [{}]".format( + pkg["latest_version"], pkg["latest_filetype"] + ) + +@@ -1602,17 +1609,17 @@ def list_all_versions( + """ + cwd = _pip_bin_env(cwd, bin_env) + cmd = _get_pip_bin(bin_env) +- cmd.extend(["install", "{0}==versions".format(pkg)]) ++ cmd.extend(["install", "{}==versions".format(pkg)]) + + if index_url: + if not salt.utils.url.validate(index_url, VALID_PROTOS): +- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url)) ++ raise CommandExecutionError("'{}' is not a valid URL".format(index_url)) + cmd.extend(["--index-url", index_url]) + + if extra_index_url: + if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): + raise CommandExecutionError( +- "'{0}' is not a valid URL".format(extra_index_url) ++ "'{}' is not a valid URL".format(extra_index_url) + ) + cmd.extend(["--extra-index-url", extra_index_url]) + +@@ -1632,7 +1639,7 @@ def list_all_versions( + if not include_rc: + filtered.append("rc") + if filtered: +- excludes = re.compile(r"^((?!{0}).)*$".format("|".join(filtered))) ++ excludes = re.compile(r"^((?!{}).)*$".format("|".join(filtered))) + else: + excludes = re.compile(r"") + -- -2.16.4 +2.29.2 diff --git a/do-not-make-ansiblegate-to-crash-on-python3-minions.patch b/do-not-make-ansiblegate-to-crash-on-python3-minions.patch index 6725766..b3a8df1 100644 --- a/do-not-make-ansiblegate-to-crash-on-python3-minions.patch +++ b/do-not-make-ansiblegate-to-crash-on-python3-minions.patch @@ -1,4 +1,4 @@ -From 235cca81be2f64ed3feb48ed42bfa3f9196bff39 Mon Sep 17 00:00:00 2001 +From 5d465a5b392efa1b4df7870161b32e0125efa4af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Fri, 28 Jun 2019 15:17:56 +0100 @@ -10,77 +10,315 @@ Move MockTimedProc implementation to tests.support.mock Add unit test for ansible caller --- - salt/modules/ansiblegate.py | 14 +++++++++--- - tests/support/mock.py | 31 +++++++++++++++++++++++++ - tests/unit/modules/test_ansiblegate.py | 41 ++++++++++++++++++++++++++++++++++ - tests/unit/modules/test_cmdmod.py | 35 ++--------------------------- - 4 files changed, 85 insertions(+), 36 deletions(-) + salt/modules/ansiblegate.py | 7 +- + tests/support/mock.py | 128 +++++++++------- + tests/unit/modules/test_ansiblegate.py | 201 +++++++++++++++++++++++++ + tests/unit/modules/test_cmdmod.py | 1 + + 4 files changed, 280 insertions(+), 57 deletions(-) + create mode 100644 tests/unit/modules/test_ansiblegate.py diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py -index 6b903c2b94..8e28fcafa3 100644 +index 0279a26017..5d4b986ec2 100644 --- a/salt/modules/ansiblegate.py +++ b/salt/modules/ansiblegate.py -@@ -147,6 +147,10 @@ class AnsibleModuleCaller(object): +@@ -160,6 +160,7 @@ class AnsibleModuleCaller: :param kwargs: keywords to the module :return: - ''' -+ if six.PY3: -+ python_exec = 'python3' -+ else: -+ python_exec = 'python' + """ ++ python_exec = "python3" module = self._resolver.load_module(module) - if not hasattr(module, 'main'): -@@ -162,9 +166,13 @@ class AnsibleModuleCaller(object): - ["echo", "{0}".format(js_args)], - stdout=subprocess.PIPE, timeout=self.timeout) + if not hasattr(module, "main"): +@@ -182,9 +183,9 @@ class AnsibleModuleCaller: + timeout=self.timeout, + ) proc_out.run() -+ if six.PY3: -+ proc_out_stdout = proc_out.stdout.decode() -+ else: -+ proc_out_stdout = proc_out.stdout +- proc_out_stdout = salt.utils.stringutils.to_str(proc_out.stdout) ++ proc_out_stdout = proc_out.stdout.decode() proc_exc = salt.utils.timed_subprocess.TimedProc( -- ['python', module.__file__], -- stdin=proc_out.stdout, stdout=subprocess.PIPE, timeout=self.timeout) +- [sys.executable, module.__file__], + [python_exec, module.__file__], -+ stdin=proc_out_stdout, stdout=subprocess.PIPE, timeout=self.timeout) - proc_exc.run() - - try: -@@ -263,7 +271,7 @@ def help(module=None, *args): - description = doc.get('description') or '' - del doc['description'] - ret['Description'] = description -- ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = doc.keys() -+ ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = [i for i in doc.keys()] + stdin=proc_out_stdout, + stdout=subprocess.PIPE, + timeout=self.timeout, +@@ -298,7 +299,7 @@ def help(module=None, *args): + 'Available sections on module "{}"'.format( + module.__name__.replace("ansible.modules.", "") + ) +- ] = list(doc) ++ ] = [i for i in doc.keys()] else: for arg in args: info = doc.get(arg) diff --git a/tests/support/mock.py b/tests/support/mock.py -index 805a60377c..67ecb4838a 100644 +index 7ef02e0701..87d052c399 100644 --- a/tests/support/mock.py +++ b/tests/support/mock.py -@@ -461,6 +461,37 @@ class MockOpen(object): +@@ -1,4 +1,3 @@ +-# -*- coding: utf-8 -*- + """ + :codeauthor: Pedro Algarvio (pedro@algarvio.me) + +@@ -14,7 +13,6 @@ + """ + # pylint: disable=unused-import,function-redefined,blacklisted-module,blacklisted-external-module + +-from __future__ import absolute_import + + import collections + import copy +@@ -42,8 +40,6 @@ from mock import ( + patch, + sentinel, + ) +- +-# Import salt libs + from salt.ext import six + + # pylint: disable=no-name-in-module,no-member +@@ -57,7 +53,7 @@ if sys.version_info < (3, 6) and __mock_version < (2,): + raise ImportError("Please install mock>=2.0.0") + + +-class MockFH(object): ++class MockFH: + def __init__(self, filename, read_data, *args, **kwargs): + self.filename = filename + self.read_data = read_data +@@ -89,7 +85,7 @@ class MockFH(object): + """ + # Newline will always be a bytestring on PY2 because mock_open will have + # normalized it to one. +- newline = b"\n" if isinstance(read_data, six.binary_type) else "\n" ++ newline = b"\n" if isinstance(read_data, bytes) else "\n" + + read_data = [line + newline for line in read_data.split(newline)] + +@@ -103,8 +99,7 @@ class MockFH(object): + # newline that we added in the list comprehension. + read_data[-1] = read_data[-1][:-1] + +- for line in read_data: +- yield line ++ yield from read_data + + @property + def write_calls(self): +@@ -126,18 +121,18 @@ class MockFH(object): + def __check_read_data(self): + if not self.__read_data_ok: + if self.binary_mode: +- if not isinstance(self.read_data, six.binary_type): ++ if not isinstance(self.read_data, bytes): + raise TypeError( +- "{0} opened in binary mode, expected read_data to be " +- "bytes, not {1}".format( ++ "{} opened in binary mode, expected read_data to be " ++ "bytes, not {}".format( + self.filename, type(self.read_data).__name__ + ) + ) + else: + if not isinstance(self.read_data, str): + raise TypeError( +- "{0} opened in non-binary mode, expected read_data to " +- "be str, not {1}".format( ++ "{} opened in non-binary mode, expected read_data to " ++ "be str, not {}".format( + self.filename, type(self.read_data).__name__ + ) + ) +@@ -147,8 +142,8 @@ class MockFH(object): + def _read(self, size=0): + self.__check_read_data() + if not self.read_mode: +- raise IOError("File not open for reading") +- if not isinstance(size, six.integer_types) or size < 0: ++ raise OSError("File not open for reading") ++ if not isinstance(size, int) or size < 0: + raise TypeError("a positive integer is required") + + joined = self.empty_string.join(self.read_data_iter) +@@ -169,7 +164,7 @@ class MockFH(object): + # TODO: Implement "size" argument + self.__check_read_data() + if not self.read_mode: +- raise IOError("File not open for reading") ++ raise OSError("File not open for reading") + ret = list(self.read_data_iter) + self.__loc += sum(len(x) for x in ret) + return ret +@@ -178,7 +173,7 @@ class MockFH(object): + # TODO: Implement "size" argument + self.__check_read_data() + if not self.read_mode: +- raise IOError("File not open for reading") ++ raise OSError("File not open for reading") + try: + ret = next(self.read_data_iter) + self.__loc += len(ret) +@@ -189,7 +184,7 @@ class MockFH(object): + def __iter__(self): + self.__check_read_data() + if not self.read_mode: +- raise IOError("File not open for reading") ++ raise OSError("File not open for reading") + while True: + try: + ret = next(self.read_data_iter) +@@ -200,30 +195,22 @@ class MockFH(object): + + def _write(self, content): + if not self.write_mode: +- raise IOError("File not open for writing") +- if six.PY2: +- if isinstance(content, six.text_type): +- # encoding intentionally not specified to force a +- # UnicodeEncodeError when non-ascii unicode type is passed +- content.encode() +- else: +- content_type = type(content) +- if self.binary_mode and content_type is not bytes: +- raise TypeError( +- "a bytes-like object is required, not '{0}'".format( +- content_type.__name__ +- ) +- ) +- elif not self.binary_mode and content_type is not str: +- raise TypeError( +- "write() argument must be str, not {0}".format( +- content_type.__name__ +- ) ++ raise OSError("File not open for writing") ++ content_type = type(content) ++ if self.binary_mode and content_type is not bytes: ++ raise TypeError( ++ "a bytes-like object is required, not '{}'".format( ++ content_type.__name__ + ) ++ ) ++ elif not self.binary_mode and content_type is not str: ++ raise TypeError( ++ "write() argument must be str, not {}".format(content_type.__name__) ++ ) + + def _writelines(self, lines): + if not self.write_mode: +- raise IOError("File not open for writing") ++ raise OSError("File not open for writing") + for line in lines: + self._write(line) + +@@ -234,26 +221,24 @@ class MockFH(object): + pass + + +-class MockCall(object): ++class MockCall: + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + def __repr__(self): + # future lint: disable=blacklisted-function +- ret = str("MockCall(") ++ ret = "MockCall(" + for arg in self.args: +- ret += repr(arg) + str(", ") ++ ret += repr(arg) + ", " + if not self.kwargs: + if self.args: + # Remove trailing ', ' + ret = ret[:-2] + else: +- for key, val in six.iteritems(self.kwargs): +- ret += str("{0}={1}").format( +- salt.utils.stringutils.to_str(key), repr(val) +- ) +- ret += str(")") ++ for key, val in self.kwargs.items(): ++ ret += "{}={}".format(salt.utils.stringutils.to_str(key), repr(val)) ++ ret += ")" + return ret + # future lint: enable=blacklisted-function + +@@ -264,7 +249,7 @@ class MockCall(object): + return self.args == other.args and self.kwargs == other.kwargs + + +-class MockOpen(object): ++class MockOpen: + r''' + This class can be used to mock the use of ``open()``. + +@@ -379,7 +364,7 @@ class MockOpen(object): + # .__class__() used here to preserve the dict class in the event that + # an OrderedDict was used. + new_read_data = read_data.__class__() +- for key, val in six.iteritems(read_data): ++ for key, val in read_data.items(): + try: + val = salt.utils.data.decode(val, to_str=True) + except TypeError: +@@ -424,7 +409,7 @@ class MockOpen(object): + except IndexError: + # We've run out of file contents, abort! + raise RuntimeError( +- "File matching expression '{0}' opened more times than " ++ "File matching expression '{}' opened more times than " + "expected".format(matched_pattern) + ) + +@@ -443,7 +428,7 @@ class MockOpen(object): + except KeyError: + # No matching glob in read_data, treat this as a file that does + # not exist and raise the appropriate exception. +- raise IOError(errno.ENOENT, "No such file or directory", name) ++ raise OSError(errno.ENOENT, "No such file or directory", name) + + def write_calls(self, path=None): + """ +@@ -451,7 +436,7 @@ class MockOpen(object): + the results to files matching a given pattern. + """ + ret = [] +- for filename, handles in six.iteritems(self.filehandles): ++ for filename, handles in self.filehandles.items(): + if path is None or fnmatch.fnmatch(filename, path): + for fh_ in handles: + ret.extend(fh_.write_calls) +@@ -463,19 +448,54 @@ class MockOpen(object): + narrow the results to files matching a given pattern. + """ + ret = [] +- for filename, handles in six.iteritems(self.filehandles): ++ for filename, handles in self.filehandles.items(): + if path is None or fnmatch.fnmatch(filename, path): + for fh_ in handles: ret.extend(fh_.writelines_calls) return ret -+class MockTimedProc(object): -+ ''' + +-class MockTimedProc(object): ++class MockTimedProc: ++ """ + Class used as a stand-in for salt.utils.timed_subprocess.TimedProc -+ ''' -+ class _Process(object): -+ ''' ++ """ ++ ++ class _Process: ++ """ + Used to provide a dummy "process" attribute -+ ''' ++ """ ++ + def __init__(self, returncode=0, pid=12345): + self.returncode = returncode + self.pid = pid + + def __init__(self, stdout=None, stderr=None, returncode=0, pid=12345): + if stdout is not None and not isinstance(stdout, bytes): -+ raise TypeError('Must pass stdout to MockTimedProc as bytes') ++ raise TypeError("Must pass stdout to MockTimedProc as bytes") + if stderr is not None and not isinstance(stderr, bytes): -+ raise TypeError('Must pass stderr to MockTimedProc as bytes') ++ raise TypeError("Must pass stderr to MockTimedProc as bytes") + self._stdout = stdout + self._stderr = stderr + self.process = self._Process(returncode=returncode, pid=pid) @@ -95,124 +333,238 @@ index 805a60377c..67ecb4838a 100644 + @property + def stderr(self): + return self._stderr ++ ++ ++class MockTimedProc: + """ + Class used as a stand-in for salt.utils.timed_subprocess.TimedProc + """ - # reimplement mock_open to support multiple filehandles - mock_open = MockOpen +- class _Process(object): ++ class _Process: + """ + Used to provide a dummy "process" attribute + """ diff --git a/tests/unit/modules/test_ansiblegate.py b/tests/unit/modules/test_ansiblegate.py -index 5613a0e79b..b7b43efda4 100644 ---- a/tests/unit/modules/test_ansiblegate.py +new file mode 100644 +index 0000000000..61aad44b5c +--- /dev/null +++ b/tests/unit/modules/test_ansiblegate.py -@@ -29,11 +29,13 @@ from tests.support.unit import TestCase, skipIf - from tests.support.mock import ( - patch, - MagicMock, -+ MockTimedProc, - ) - - import salt.modules.ansiblegate as ansible - import salt.utils.platform - from salt.exceptions import LoaderError +@@ -0,0 +1,201 @@ ++# ++# Author: Bo Maryniuk ++# ++# Copyright 2017 SUSE LLC ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++ ++import os ++ ++import salt.modules.ansiblegate as ansible ++import salt.utils.platform ++from salt.exceptions import LoaderError +from salt.ext import six - - - @skipIf(NO_PYTEST, False) -@@ -134,3 +136,42 @@ description: - ''' - with patch('salt.modules.ansiblegate.ansible', None): - assert ansible.__virtual__() == 'ansible' ++from tests.support.mixins import LoaderModuleMockMixin ++from tests.support.mock import MagicMock, MockTimedProc, patch ++from tests.support.unit import TestCase, skipIf ++ ++try: ++ import pytest ++except ImportError as import_error: ++ pytest = None ++NO_PYTEST = not bool(pytest) ++ ++ ++@skipIf(NO_PYTEST, False) ++@skipIf(salt.utils.platform.is_windows(), "Not supported on Windows") ++class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin): ++ def setUp(self): ++ self.resolver = ansible.AnsibleModuleResolver({}) ++ self.resolver._modules_map = { ++ "one.two.three": os.sep + os.path.join("one", "two", "three.py"), ++ "four.five.six": os.sep + os.path.join("four", "five", "six.py"), ++ "three.six.one": os.sep + os.path.join("three", "six", "one.py"), ++ } ++ ++ def tearDown(self): ++ self.resolver = None ++ ++ def setup_loader_modules(self): ++ return {ansible: {}} ++ ++ def test_ansible_module_help(self): ++ """ ++ Test help extraction from the module ++ :return: ++ """ ++ ++ class Module: ++ """ ++ An ansible module mock. ++ """ ++ ++ __name__ = "foo" ++ DOCUMENTATION = """ ++--- ++one: ++ text here ++--- ++two: ++ text here ++description: ++ describe the second part ++ """ ++ ++ with patch.object(ansible, "_resolver", self.resolver), patch.object( ++ ansible._resolver, "load_module", MagicMock(return_value=Module()) ++ ): ++ ret = ansible.help("dummy") ++ assert sorted( ++ ret.get('Available sections on module "{}"'.format(Module().__name__)) ++ ) == ["one", "two"] ++ assert ret.get("Description") == "describe the second part" ++ ++ def test_module_resolver_modlist(self): ++ """ ++ Test Ansible resolver modules list. ++ :return: ++ """ ++ assert self.resolver.get_modules_list() == [ ++ "four.five.six", ++ "one.two.three", ++ "three.six.one", ++ ] ++ for ptr in ["five", "fi", "ve"]: ++ assert self.resolver.get_modules_list(ptr) == ["four.five.six"] ++ for ptr in ["si", "ix", "six"]: ++ assert self.resolver.get_modules_list(ptr) == [ ++ "four.five.six", ++ "three.six.one", ++ ] ++ assert self.resolver.get_modules_list("one") == [ ++ "one.two.three", ++ "three.six.one", ++ ] ++ assert self.resolver.get_modules_list("one.two") == ["one.two.three"] ++ assert self.resolver.get_modules_list("four") == ["four.five.six"] ++ ++ def test_resolver_module_loader_failure(self): ++ """ ++ Test Ansible module loader. ++ :return: ++ """ ++ mod = "four.five.six" ++ with pytest.raises(ImportError) as import_error: ++ self.resolver.load_module(mod) ++ ++ mod = "i.even.do.not.exist.at.all" ++ with pytest.raises(LoaderError) as loader_error: ++ self.resolver.load_module(mod) ++ ++ def test_resolver_module_loader(self): ++ """ ++ Test Ansible module loader. ++ :return: ++ """ ++ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch( ++ "salt.modules.ansiblegate.importlib.import_module", lambda x: x ++ ): ++ assert ( ++ self.resolver.load_module("four.five.six") ++ == "ansible.modules.four.five.six" ++ ) ++ ++ def test_resolver_module_loader_import_failure(self): ++ """ ++ Test Ansible module loader failure. ++ :return: ++ """ ++ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch( ++ "salt.modules.ansiblegate.importlib.import_module", lambda x: x ++ ): ++ with pytest.raises(LoaderError) as loader_error: ++ self.resolver.load_module("something.strange") ++ ++ def test_virtual_function(self): ++ """ ++ Test Ansible module __virtual__ when ansible is not installed on the minion. ++ :return: ++ """ ++ with patch("salt.modules.ansiblegate.ansible", None): ++ assert ansible.__virtual__() == "ansible" + + def test_ansible_module_call(self): -+ ''' ++ """ + Test Ansible module call from ansible gate module + + :return: -+ ''' ++ """ + -+ class Module(object): -+ ''' ++ class Module: ++ """ + An ansible module mock. -+ ''' -+ __name__ = 'one.two.three' -+ __file__ = 'foofile' ++ """ ++ ++ __name__ = "one.two.three" ++ __file__ = "foofile" + + def main(): + pass + + ANSIBLE_MODULE_ARGS = '{"ANSIBLE_MODULE_ARGS": ["arg_1", {"kwarg1": "foobar"}]}' + -+ proc = MagicMock(side_effect=[ -+ MockTimedProc( -+ stdout=ANSIBLE_MODULE_ARGS.encode(), -+ stderr=None), -+ MockTimedProc(stdout='{"completed": true}'.encode(), stderr=None) -+ ]) ++ proc = MagicMock( ++ side_effect=[ ++ MockTimedProc(stdout=ANSIBLE_MODULE_ARGS.encode(), stderr=None), ++ MockTimedProc(stdout=b'{"completed": true}', stderr=None), ++ ] ++ ) + -+ with patch.object(ansible, '_resolver', self.resolver), \ -+ patch.object(ansible._resolver, 'load_module', MagicMock(return_value=Module())): ++ with patch.object(ansible, "_resolver", self.resolver), patch.object( ++ ansible._resolver, "load_module", MagicMock(return_value=Module()) ++ ): + _ansible_module_caller = ansible.AnsibleModuleCaller(ansible._resolver) -+ with patch('salt.utils.timed_subprocess.TimedProc', proc): -+ ret = _ansible_module_caller.call("one.two.three", "arg_1", kwarg1="foobar") -+ if six.PY3: -+ proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}'], stdout=-1, timeout=1200) -+ proc.assert_any_call(['python3', 'foofile'], stdin=ANSIBLE_MODULE_ARGS, stdout=-1, timeout=1200) -+ else: -+ proc.assert_any_call(['echo', '{"ANSIBLE_MODULE_ARGS": {"_raw_params": "arg_1", "kwarg1": "foobar"}}'], stdout=-1, timeout=1200) -+ proc.assert_any_call(['python', 'foofile'], stdin=ANSIBLE_MODULE_ARGS, stdout=-1, timeout=1200) ++ with patch("salt.utils.timed_subprocess.TimedProc", proc): ++ ret = _ansible_module_caller.call( ++ "one.two.three", "arg_1", kwarg1="foobar" ++ ) ++ proc.assert_any_call( ++ [ ++ "echo", ++ '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}', ++ ], ++ stdout=-1, ++ timeout=1200, ++ ) ++ proc.assert_any_call( ++ ["python3", "foofile"], ++ stdin=ANSIBLE_MODULE_ARGS, ++ stdout=-1, ++ timeout=1200, ++ ) + assert ret == {"completed": True, "timeout": 1200} diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py -index 8170a56b4e..f8fba59294 100644 +index 15b97f8568..f3348bc379 100644 --- a/tests/unit/modules/test_cmdmod.py +++ b/tests/unit/modules/test_cmdmod.py -@@ -26,6 +26,7 @@ from tests.support.helpers import TstSuiteLoggingHandler - from tests.support.mock import ( - mock_open, - Mock, -+ MockTimedProc, - MagicMock, - patch - ) -@@ -36,39 +37,7 @@ MOCK_SHELL_FILE = '# List of acceptable shells\n' \ - '/bin/bash\n' +@@ -24,6 +24,7 @@ DEFAULT_SHELL = "foo/bar" + MOCK_SHELL_FILE = "# List of acceptable shells\n" "\n" "/bin/bash\n" --class MockTimedProc(object): -- ''' -- Class used as a stand-in for salt.utils.timed_subprocess.TimedProc -- ''' -- class _Process(object): -- ''' -- Used to provide a dummy "process" attribute -- ''' -- def __init__(self, returncode=0, pid=12345): -- self.returncode = returncode -- self.pid = pid -- -- def __init__(self, stdout=None, stderr=None, returncode=0, pid=12345): -- if stdout is not None and not isinstance(stdout, bytes): -- raise TypeError('Must pass stdout to MockTimedProc as bytes') -- if stderr is not None and not isinstance(stderr, bytes): -- raise TypeError('Must pass stderr to MockTimedProc as bytes') -- self._stdout = stdout -- self._stderr = stderr -- self.process = self._Process(returncode=returncode, pid=pid) -- -- def run(self): -- pass -- -- @property -- def stdout(self): -- return self._stdout -- -- @property -- def stderr(self): -- return self._stderr -- -- +@skipIf(NO_MOCK, NO_MOCK_REASON) class CMDMODTestCase(TestCase, LoaderModuleMockMixin): - ''' + """ Unit tests for the salt.modules.cmdmod module -- -2.16.4 +2.29.2 diff --git a/do-not-raise-streamclosederror-traceback-but-only-lo.patch b/do-not-raise-streamclosederror-traceback-but-only-lo.patch index bf180aa..b9cba9d 100644 --- a/do-not-raise-streamclosederror-traceback-but-only-lo.patch +++ b/do-not-raise-streamclosederror-traceback-but-only-lo.patch @@ -1,4 +1,4 @@ -From b651c2cd8b719a72e66b63afd9061739624763e1 Mon Sep 17 00:00:00 2001 +From 81d0105b0c0464c375070ffbc863a020a67e7965 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 26 Aug 2020 10:24:58 +0100 @@ -10,18 +10,18 @@ Subject: [PATCH] Do not raise StreamClosedError traceback but only log 1 file changed, 1 deletion(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py -index 33ee3d4182..624eca5a9c 100644 +index f411907da2..5ff0956dde 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py -@@ -667,7 +667,6 @@ class IPCMessageSubscriber(IPCClient): +@@ -688,7 +688,6 @@ class IPCMessageSubscriber(IPCClient): except StreamClosedError as exc: - log.trace('Subscriber disconnected from IPC %s', self.socket_path) + log.trace("Subscriber disconnected from IPC %s", self.socket_path) self._read_stream_future = None - exc_to_raise = exc except Exception as exc: # pylint: disable=broad-except - log.error('Exception occurred in Subscriber while handling stream: %s', exc) + log.error("Exception occurred in Subscriber while handling stream: %s", exc) self._read_stream_future = None -- -2.28.0 +2.29.2 diff --git a/do-not-report-patches-as-installed-when-not-all-the-.patch b/do-not-report-patches-as-installed-when-not-all-the-.patch deleted file mode 100644 index aaa0af0..0000000 --- a/do-not-report-patches-as-installed-when-not-all-the-.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 7e9adda8dfd53050756d0ac0cf64570b76ce7365 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Wed, 13 Mar 2019 16:14:07 +0000 -Subject: [PATCH] Do not report patches as installed when not all the - related packages are installed (bsc#1128061) - -Co-authored-by: Mihai Dinca ---- - salt/modules/yumpkg.py | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py -index b1257d0de0..3ddf989511 100644 ---- a/salt/modules/yumpkg.py -+++ b/salt/modules/yumpkg.py -@@ -3220,7 +3220,11 @@ def _get_patches(installed_only=False): - for line in salt.utils.itertools.split(ret, os.linesep): - inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)', - line).groups() -+<<<<<<< HEAD - if advisory_id not in patches: -+======= -+ if not advisory_id in patches: -+>>>>>>> Do not report patches as installed when not all the related packages are installed (bsc#1128061) - patches[advisory_id] = { - 'installed': True if inst == 'i' else False, - 'summary': [pkg] --- -2.16.4 - - diff --git a/don-t-call-zypper-with-more-than-one-no-refresh.patch b/don-t-call-zypper-with-more-than-one-no-refresh.patch index 0929110..819bd79 100644 --- a/don-t-call-zypper-with-more-than-one-no-refresh.patch +++ b/don-t-call-zypper-with-more-than-one-no-refresh.patch @@ -1,4 +1,4 @@ -From c1f5e6332bf025394b81868bf1edc6ae44944a7c Mon Sep 17 00:00:00 2001 +From 421988aea296ced1f8c63cfa4b517b25eedfb00c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 29 Jan 2019 09:44:03 +0100 Subject: [PATCH] Don't call zypper with more than one --no-refresh @@ -11,32 +11,32 @@ passed twice. Make sure we won't hit this. 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 04a6a6872d..37428cf67c 100644 +index 6fa6e3e0a1..dfaaf420a1 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -282,7 +282,7 @@ class _Zypper(object): +@@ -300,7 +300,7 @@ class _Zypper: self.__called = True if self.__xml: - self.__cmd.append('--xmlout') + self.__cmd.append("--xmlout") - if not self.__refresh: -+ if not self.__refresh and '--no-refresh' not in args: - self.__cmd.append('--no-refresh') - - self.__cmd.extend(args) ++ if not self.__refresh and "--no-refresh" not in args: + self.__cmd.append("--no-refresh") + if self.__root: + self.__cmd.extend(["--root", self.__root]) diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index b3162f10cd..956902eab3 100644 +index 7bff7065c6..b07f9a3af7 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -135,7 +135,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - self.assertEqual(zypper.__zypper__.call('foo'), stdout_xml_snippet) +@@ -136,7 +136,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): + self.assertEqual(zypper.__zypper__.call("foo"), stdout_xml_snippet) self.assertEqual(len(sniffer.calls), 1) -- zypper.__zypper__.call('bar') -+ zypper.__zypper__.call('--no-refresh', 'bar') +- zypper.__zypper__.call("bar") ++ zypper.__zypper__.call("--no-refresh", "bar") self.assertEqual(len(sniffer.calls), 2) - self.assertEqual(sniffer.calls[0]['args'][0], ['zypper', '--non-interactive', '--no-refresh', 'foo']) - self.assertEqual(sniffer.calls[1]['args'][0], ['zypper', '--non-interactive', '--no-refresh', 'bar']) + self.assertEqual( + sniffer.calls[0]["args"][0], -- -2.16.4 +2.29.2 diff --git a/drop-wrong-mock-from-chroot-unit-test.patch b/drop-wrong-mock-from-chroot-unit-test.patch index 7f56f14..e9eb834 100644 --- a/drop-wrong-mock-from-chroot-unit-test.patch +++ b/drop-wrong-mock-from-chroot-unit-test.patch @@ -1,4 +1,4 @@ -From e2c3b1cb72b796fe12f94af64baa2e64cbe5db0b Mon Sep 17 00:00:00 2001 +From 3dc61b426cee5c40976ee25a0357fd07244a630b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Tue, 13 Oct 2020 12:02:00 +0100 @@ -9,11 +9,11 @@ Subject: [PATCH] Drop wrong mock from chroot unit test 1 file changed, 1 deletion(-) diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py -index 62808ed680..045d56c5b0 100644 +index 196e3ad27f..a0f3f8e6af 100644 --- a/tests/unit/modules/test_chroot.py +++ b/tests/unit/modules/test_chroot.py -@@ -83,7 +83,6 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin): - self.assertTrue(chroot.create('/chroot')) +@@ -71,7 +71,6 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin): + self.assertTrue(chroot.create("/chroot")) makedirs.assert_called() - @patch("salt.modules.chroot.exist") @@ -21,6 +21,6 @@ index 62808ed680..045d56c5b0 100644 def test_in_chroot(self, fopen): """ -- -2.28.0 +2.29.2 diff --git a/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch b/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch new file mode 100644 index 0000000..fd02a36 --- /dev/null +++ b/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch @@ -0,0 +1,99 @@ +From 79ae019ac7515614c6fbc620e66575f015bc447d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= + +Date: Tue, 5 Jan 2021 09:34:45 +0000 +Subject: [PATCH] Drop wrong virt capabilities code after rebasing + patches + +--- + salt/modules/virt.py | 66 -------------------------------------------- + 1 file changed, 66 deletions(-) + +diff --git a/salt/modules/virt.py b/salt/modules/virt.py +index e3960a5a90..786bfa1e58 100644 +--- a/salt/modules/virt.py ++++ b/salt/modules/virt.py +@@ -143,7 +143,6 @@ import salt.utils.xmlutil as xmlutil + import salt.utils.yaml + from salt._compat import ElementTree, ipaddress, saxutils + from salt.exceptions import CommandExecutionError, SaltInvocationError +-from salt.ext import six + from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin + from salt.ext.six.moves.urllib.parse import urlparse, urlunparse + from salt.utils.virt import check_remote, download_remote +@@ -5416,71 +5415,6 @@ def _parse_domain_caps(caps): + return result + + +-def _parse_domain_caps(caps): +- """ +- Parse the XML document of domain capabilities into a structure. +- """ +- result = { +- "emulator": caps.find("path").text if caps.find("path") is not None else None, +- "domain": caps.find("domain").text if caps.find("domain") is not None else None, +- "machine": caps.find("machine").text +- if caps.find("machine") is not None +- else None, +- "arch": caps.find("arch").text if caps.find("arch") is not None else None, +- } +- +- +-def all_capabilities(**kwargs): +- """ +- Return the host and domain capabilities in a single call. +- +- .. versionadded:: 3001 +- +- :param connection: libvirt connection URI, overriding defaults +- :param username: username to connect with, overriding defaults +- :param password: password to connect with, overriding defaults +- +- CLI Example: +- +- .. code-block:: bash +- +- salt '*' virt.all_capabilities +- +- """ +- conn = __get_conn(**kwargs) +- try: +- host_caps = ElementTree.fromstring(conn.getCapabilities()) +- domains = [ +- [ +- (guest.get("arch", {}).get("name", None), key) +- for key in guest.get("arch", {}).get("domains", {}).keys() +- ] +- for guest in [ +- _parse_caps_guest(guest) for guest in host_caps.findall("guest") +- ] +- ] +- flattened = [pair for item in (x for x in domains) for pair in item] +- result = { +- "host": { +- "host": _parse_caps_host(host_caps.find("host")), +- "guests": [ +- _parse_caps_guest(guest) for guest in host_caps.findall("guest") +- ], +- }, +- "domains": [ +- _parse_domain_caps( +- ElementTree.fromstring( +- conn.getDomainCapabilities(None, arch, None, domain) +- ) +- ) +- for (arch, domain) in flattened +- ], +- } +- return result +- finally: +- conn.close() +- +- + def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs): + """ + Return the domain capabilities given an emulator, architecture, machine or virtualization type. +-- +2.29.2 + + diff --git a/early-feature-support-config.patch b/early-feature-support-config.patch index db7592a..297c074 100644 --- a/early-feature-support-config.patch +++ b/early-feature-support-config.patch @@ -1,4 +1,4 @@ -From 33a85b16a4740f3dd803fd0e47e26819afeecdd7 Mon Sep 17 00:00:00 2001 +From 550db5157741b0a252bfc684f3496a7fd6d674ad Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 10 Jul 2018 12:06:33 +0200 Subject: [PATCH] early feature: support-config @@ -291,12 +291,12 @@ Remove development stub. Ughh... Removed blacklist of pkg_resources --- - salt/cli/support/__init__.py | 65 +++ - salt/cli/support/collector.py | 495 ++++++++++++++++++++++ - salt/cli/support/console.py | 165 ++++++++ - salt/cli/support/intfunc.py | 42 ++ - salt/cli/support/localrunner.py | 34 ++ - salt/cli/support/profiles/default.yml | 71 ++++ + salt/cli/support/__init__.py | 76 +++ + salt/cli/support/collector.py | 538 +++++++++++++++++++++ + salt/cli/support/console.py | 184 +++++++ + salt/cli/support/intfunc.py | 40 ++ + salt/cli/support/localrunner.py | 33 ++ + salt/cli/support/profiles/default.yml | 71 +++ salt/cli/support/profiles/jobs-active.yml | 3 + salt/cli/support/profiles/jobs-last.yml | 3 + salt/cli/support/profiles/jobs-trace.yml | 7 + @@ -304,12 +304,12 @@ Removed blacklist of pkg_resources salt/cli/support/profiles/postgres.yml | 11 + salt/cli/support/profiles/salt.yml | 9 + salt/cli/support/profiles/users.yml | 22 + - salt/scripts.py | 14 + - salt/utils/parsers.py | 65 +++ + salt/scripts.py | 15 + + salt/utils/parsers.py | 114 +++++ scripts/salt-support | 11 + setup.py | 2 + - tests/unit/cli/test_support.py | 477 +++++++++++++++++++++ - 18 files changed, 1523 insertions(+) + tests/unit/cli/test_support.py | 553 ++++++++++++++++++++++ + 18 files changed, 1719 insertions(+) create mode 100644 salt/cli/support/__init__.py create mode 100644 salt/cli/support/collector.py create mode 100644 salt/cli/support/console.py @@ -328,168 +328,172 @@ Removed blacklist of pkg_resources diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py new file mode 100644 -index 0000000000000000000000000000000000000000..6a98a2d65656c0ad89d921b6842067a7399eab2c +index 0000000000..4fdf44186f --- /dev/null +++ b/salt/cli/support/__init__.py -@@ -0,0 +1,65 @@ -+# coding=utf-8 -+''' +@@ -0,0 +1,76 @@ ++""" +Get default scenario of the support. -+''' -+from __future__ import print_function, unicode_literals, absolute_import -+import yaml -+import os -+import salt.exceptions -+import jinja2 ++""" +import logging ++import os ++ ++import jinja2 ++import salt.exceptions ++import yaml + +log = logging.getLogger(__name__) + + +def _render_profile(path, caller, runner): -+ ''' ++ """ + Render profile as Jinja2. + :param path: + :return: -+ ''' -+ env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False) -+ return env.get_template(os.path.basename(path)).render(salt=caller, runners=runner).strip() ++ """ ++ env = jinja2.Environment( ++ loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False ++ ) ++ return ( ++ env.get_template(os.path.basename(path)) ++ .render(salt=caller, runners=runner) ++ .strip() ++ ) + + +def get_profile(profile, caller, runner): -+ ''' ++ """ + Get profile. + + :param profile: + :return: -+ ''' -+ profiles = profile.split(',') ++ """ ++ profiles = profile.split(",") + data = {} + for profile in profiles: + if os.path.basename(profile) == profile: -+ profile = profile.split('.')[0] # Trim extension if someone added it -+ profile_path = os.path.join(os.path.dirname(__file__), 'profiles', profile + '.yml') ++ profile = profile.split(".")[0] # Trim extension if someone added it ++ profile_path = os.path.join( ++ os.path.dirname(__file__), "profiles", profile + ".yml" ++ ) + else: + profile_path = profile + if os.path.exists(profile_path): + try: + rendered_template = _render_profile(profile_path, caller, runner) -+ log.trace('\n{d}\n{t}\n{d}\n'.format(d='-' * 80, t=rendered_template)) ++ log.trace("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template)) + data.update(yaml.load(rendered_template)) + except Exception as ex: + log.debug(ex, exc_info=True) -+ raise salt.exceptions.SaltException('Rendering profile failed: {}'.format(ex)) ++ raise salt.exceptions.SaltException( ++ "Rendering profile failed: {}".format(ex) ++ ) + else: -+ raise salt.exceptions.SaltException('Profile "{}" is not found.'.format(profile)) ++ raise salt.exceptions.SaltException( ++ 'Profile "{}" is not found.'.format(profile) ++ ) + + return data + + +def get_profiles(config): -+ ''' ++ """ + Get available profiles. + + :return: -+ ''' ++ """ + profiles = [] -+ for profile_name in os.listdir(os.path.join(os.path.dirname(__file__), 'profiles')): -+ if profile_name.endswith('.yml'): -+ profiles.append(profile_name.split('.')[0]) ++ for profile_name in os.listdir(os.path.join(os.path.dirname(__file__), "profiles")): ++ if profile_name.endswith(".yml"): ++ profiles.append(profile_name.split(".")[0]) + + return sorted(profiles) diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py new file mode 100644 -index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc45441ff259fb +index 0000000000..bfbf491f5b --- /dev/null +++ b/salt/cli/support/collector.py -@@ -0,0 +1,495 @@ -+# coding=utf-8 -+from __future__ import absolute_import, print_function, unicode_literals -+import os -+import sys +@@ -0,0 +1,538 @@ ++import builtins as exceptions +import copy -+import yaml +import json +import logging ++import os ++import sys +import tarfile +import time -+import salt.ext.six as six -+ -+if six.PY2: -+ import exceptions -+else: -+ import builtins as exceptions -+ from io import IOBase as file -+ +from io import BytesIO ++from io import IOBase as file + -+import salt.utils.stringutils -+import salt.utils.parsers -+import salt.utils.verify -+import salt.utils.platform -+import salt.utils.process -+import salt.exceptions -+import salt.defaults.exitcodes +import salt.cli.caller +import salt.cli.support +import salt.cli.support.console +import salt.cli.support.intfunc +import salt.cli.support.localrunner ++import salt.defaults.exitcodes ++import salt.exceptions ++import salt.ext.six as six +import salt.output.table_out +import salt.runner +import salt.utils.files -+ ++import salt.utils.parsers ++import salt.utils.platform ++import salt.utils.process ++import salt.utils.stringutils ++import salt.utils.verify ++import yaml + +salt.output.table_out.__opts__ = {} +log = logging.getLogger(__name__) + + -+class SupportDataCollector(object): -+ ''' ++class SupportDataCollector: ++ """ + Data collector. It behaves just like another outputter, + except it grabs the data to the archive files. -+ ''' ++ """ ++ + def __init__(self, name, output): -+ ''' ++ """ + constructor of the data collector + :param name: + :param path: + :param format: -+ ''' ++ """ + self.archive_path = name + self.__default_outputter = output + self.__format = format + self.__arch = None + self.__current_section = None + self.__current_section_name = None -+ self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot') ++ self.__default_root = time.strftime("%Y.%m.%d-%H.%M.%S-snapshot") + self.out = salt.cli.support.console.MessagesOutput() + + def open(self): -+ ''' ++ """ + Opens archive. + :return: -+ ''' ++ """ + if self.__arch is not None: -+ raise salt.exceptions.SaltException('Archive already opened.') -+ self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w') ++ raise salt.exceptions.SaltException("Archive already opened.") ++ self.__arch = tarfile.TarFile.bz2open(self.archive_path, "w") + + def close(self): -+ ''' ++ """ + Closes the archive. + :return: -+ ''' ++ """ + if self.__arch is None: -+ raise salt.exceptions.SaltException('Archive already closed') ++ raise salt.exceptions.SaltException("Archive already closed") + self._flush_content() + self.__arch.close() + self.__arch = None + + def _flush_content(self): -+ ''' ++ """ + Flush content to the archive + :return: -+ ''' ++ """ + if self.__current_section is not None: + buff = BytesIO() + buff._dirty = False @@ -499,51 +503,59 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544 + self.out.put(ret_data.name, indent=4) + self.__arch.add(ret_data.name, arcname=ret_data.name) + else: -+ buff.write(salt.utils.stringutils.to_bytes(title + '\n')) -+ buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n')) ++ buff.write(salt.utils.stringutils.to_bytes(title + "\n")) ++ buff.write( ++ salt.utils.stringutils.to_bytes(("-" * len(title)) + "\n\n") ++ ) + buff.write(salt.utils.stringutils.to_bytes(ret_data)) -+ buff.write(salt.utils.stringutils.to_bytes('\n\n\n')) ++ buff.write(salt.utils.stringutils.to_bytes("\n\n\n")) + buff._dirty = True + if buff._dirty: + buff.seek(0) -+ tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name)) -+ if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older ++ tar_info = tarfile.TarInfo( ++ name="{}/{}".format( ++ self.__default_root, self.__current_section_name ++ ) ++ ) ++ if not hasattr(buff, "getbuffer"): # Py2's BytesIO is older + buff.getbuffer = buff.getvalue + tar_info.size = len(buff.getbuffer()) + self.__arch.addfile(tarinfo=tar_info, fileobj=buff) + + def add(self, name): -+ ''' ++ """ + Start a new section. + :param name: + :return: -+ ''' ++ """ + if self.__current_section: + self._flush_content() + self.discard_current(name) + + def discard_current(self, name=None): -+ ''' ++ """ + Discard current section + :return: -+ ''' ++ """ + self.__current_section = [] + self.__current_section_name = name + + def write(self, title, data, output=None): -+ ''' ++ """ + Add a data to the current opened section. + :return: -+ ''' ++ """ + if not isinstance(data, (dict, list, tuple)): -+ data = {'raw-content': str(data)} ++ data = {"raw-content": str(data)} + output = output or self.__default_outputter + -+ if output != 'null': ++ if output != "null": + try: -+ if isinstance(data, dict) and 'return' in data: -+ data = data['return'] -+ content = salt.output.try_printout(data, output, {'extension_modules': '', 'color': False}) ++ if isinstance(data, dict) and "return" in data: ++ data = data["return"] ++ content = salt.output.try_printout( ++ data, output, {"extension_modules": "", "color": False} ++ ) + except Exception: # Fall-back to just raw YAML + content = None + else: @@ -551,20 +563,20 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544 + + if content is None: + data = json.loads(json.dumps(data)) -+ if isinstance(data, dict) and data.get('return'): -+ data = data.get('return') ++ if isinstance(data, dict) and data.get("return"): ++ data = data.get("return") + content = yaml.safe_dump(data, default_flow_style=False, indent=4) + + self.__current_section.append({title: content}) + + def link(self, title, path): -+ ''' ++ """ + Add a static file on the file system. + + :param title: + :param path: + :return: -+ ''' ++ """ + # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. + # pylint: disable=W8470 + if not isinstance(path, file): @@ -574,188 +586,204 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544 + + +class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): -+ ''' ++ """ + Class to run Salt Support subsystem. -+ ''' -+ RUNNER_TYPE = 'run' -+ CALL_TYPE = 'call' ++ """ ++ ++ RUNNER_TYPE = "run" ++ CALL_TYPE = "call" + + def _setup_fun_config(self, fun_conf): -+ ''' ++ """ + Setup function configuration. + + :param conf: + :return: -+ ''' ++ """ + conf = copy.deepcopy(self.config) -+ conf['file_client'] = 'local' -+ conf['fun'] = '' -+ conf['arg'] = [] -+ conf['kwarg'] = {} -+ conf['cache_jobs'] = False -+ conf['print_metadata'] = False ++ conf["file_client"] = "local" ++ conf["fun"] = "" ++ conf["arg"] = [] ++ conf["kwarg"] = {} ++ conf["cache_jobs"] = False ++ conf["print_metadata"] = False + conf.update(fun_conf) -+ conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix ++ conf["fun"] = conf["fun"].split(":")[-1] # Discard typing prefix + + return conf + + def _get_runner(self, conf): -+ ''' ++ """ + Get & setup runner. + + :param conf: + :return: -+ ''' ++ """ + conf = self._setup_fun_config(copy.deepcopy(conf)) -+ if not getattr(self, '_runner', None): ++ if not getattr(self, "_runner", None): + self._runner = salt.cli.support.localrunner.LocalRunner(conf) + else: + self._runner.opts = conf + return self._runner + + def _get_caller(self, conf): -+ ''' ++ """ + Get & setup caller from the factory. + + :param conf: + :return: -+ ''' ++ """ + conf = self._setup_fun_config(copy.deepcopy(conf)) -+ if not getattr(self, '_caller', None): ++ if not getattr(self, "_caller", None): + self._caller = salt.cli.caller.Caller.factory(conf) + else: + self._caller.opts = conf + return self._caller + + def _local_call(self, call_conf): -+ ''' ++ """ + Execute local call -+ ''' ++ """ + try: + ret = self._get_caller(call_conf).call() + except SystemExit: -+ ret = 'Data is not available at this moment' ++ ret = "Data is not available at this moment" + self.out.error(ret) + except Exception as ex: -+ ret = 'Unhandled exception occurred: {}'.format(ex) ++ ret = "Unhandled exception occurred: {}".format(ex) + log.debug(ex, exc_info=True) + self.out.error(ret) + + return ret + + def _local_run(self, run_conf): -+ ''' ++ """ + Execute local runner + + :param run_conf: + :return: -+ ''' ++ """ + try: + ret = self._get_runner(run_conf).run() + except SystemExit: -+ ret = 'Runner is not available at this moment' ++ ret = "Runner is not available at this moment" + self.out.error(ret) + except Exception as ex: -+ ret = 'Unhandled exception occurred: {}'.format(ex) ++ ret = "Unhandled exception occurred: {}".format(ex) + log.debug(ex, exc_info=True) + + return ret + + def _internal_function_call(self, call_conf): -+ ''' ++ """ + Call internal function. + + :param call_conf: + :return: -+ ''' ++ """ ++ + def stub(*args, **kwargs): -+ message = 'Function {} is not available'.format(call_conf['fun']) ++ message = "Function {} is not available".format(call_conf["fun"]) + self.out.error(message) -+ log.debug('Attempt to run "{fun}" with {arg} arguments and {kwargs} parameters.'.format(**call_conf)) ++ log.debug( ++ 'Attempt to run "{fun}" with {arg} arguments and {kwargs} parameters.'.format( ++ **call_conf ++ ) ++ ) + return message + -+ return getattr(salt.cli.support.intfunc, -+ call_conf['fun'], stub)(self.collector, -+ *call_conf['arg'], -+ **call_conf['kwargs']) ++ return getattr(salt.cli.support.intfunc, call_conf["fun"], stub)( ++ self.collector, *call_conf["arg"], **call_conf["kwargs"] ++ ) + + def _get_action(self, action_meta): -+ ''' ++ """ + Parse action and turn into a calling point. + :param action_meta: + :return: -+ ''' ++ """ + conf = { -+ 'fun': list(action_meta.keys())[0], -+ 'arg': [], -+ 'kwargs': {}, ++ "fun": list(action_meta.keys())[0], ++ "arg": [], ++ "kwargs": {}, + } -+ if not len(conf['fun'].split('.')) - 1: -+ conf['salt.int.intfunc'] = True ++ if not len(conf["fun"].split(".")) - 1: ++ conf["salt.int.intfunc"] = True + -+ action_meta = action_meta[conf['fun']] -+ info = action_meta.get('info', 'Action for {}'.format(conf['fun'])) -+ for arg in action_meta.get('args') or []: ++ action_meta = action_meta[conf["fun"]] ++ info = action_meta.get("info", "Action for {}".format(conf["fun"])) ++ for arg in action_meta.get("args") or []: + if not isinstance(arg, dict): -+ conf['arg'].append(arg) ++ conf["arg"].append(arg) + else: -+ conf['kwargs'].update(arg) ++ conf["kwargs"].update(arg) + -+ return info, action_meta.get('output'), conf ++ return info, action_meta.get("output"), conf + + def collect_internal_data(self): -+ ''' ++ """ + Dumps current running pillars, configuration etc. + :return: -+ ''' -+ section = 'configuration' ++ """ ++ section = "configuration" + self.out.put(section) + self.collector.add(section) -+ self.out.put('Saving config', indent=2) -+ self.collector.write('General Configuration', self.config) -+ self.out.put('Saving pillars', indent=2) -+ self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'})) ++ self.out.put("Saving config", indent=2) ++ self.collector.write("General Configuration", self.config) ++ self.out.put("Saving pillars", indent=2) ++ self.collector.write( ++ "Active Pillars", self._local_call({"fun": "pillar.items"}) ++ ) + -+ section = 'highstate' ++ section = "highstate" + self.out.put(section) + self.collector.add(section) -+ self.out.put('Saving highstate', indent=2) -+ self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'})) ++ self.out.put("Saving highstate", indent=2) ++ self.collector.write( ++ "Rendered highstate", self._local_call({"fun": "state.show_highstate"}) ++ ) + + def _extract_return(self, data): -+ ''' ++ """ + Extracts return data from the results. + + :param data: + :return: -+ ''' ++ """ + if isinstance(data, dict): -+ data = data.get('return', data) ++ data = data.get("return", data) + + return data + + def collect_local_data(self): -+ ''' ++ """ + Collects master system data. + :return: -+ ''' ++ """ ++ + def call(func, *args, **kwargs): -+ ''' ++ """ + Call wrapper for templates + :param func: + :return: -+ ''' -+ return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs})) ++ """ ++ return self._extract_return( ++ self._local_call({"fun": func, "arg": args, "kwarg": kwargs}) ++ ) + + def run(func, *args, **kwargs): -+ ''' ++ """ + Runner wrapper for templates + :param func: + :return: -+ ''' -+ return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs})) ++ """ ++ return self._extract_return( ++ self._local_run({"fun": func, "arg": args, "kwarg": kwargs}) ++ ) + -+ scenario = salt.cli.support.get_profile(self.config['support_profile'], call, run) ++ scenario = salt.cli.support.get_profile( ++ self.config["support_profile"], call, run ++ ) + for category_name in scenario: + self.out.put(category_name) + self.collector.add(category_name) @@ -763,70 +791,89 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544 + if not action: + continue + action_name = next(iter(action)) -+ if not isinstance(action[action_name], six.string_types): ++ if not isinstance(action[action_name], str): + info, output, conf = self._get_action(action) -+ action_type = self._get_action_type(action) # run: for runners ++ action_type = self._get_action_type( ++ action ++ ) # run: for runners + if action_type == self.RUNNER_TYPE: -+ self.out.put('Running {}'.format(info.lower()), indent=2) ++ self.out.put("Running {}".format(info.lower()), indent=2) + self.collector.write(info, self._local_run(conf), output=output) + elif action_type == self.CALL_TYPE: -+ if not conf.get('salt.int.intfunc'): -+ self.out.put('Collecting {}'.format(info.lower()), indent=2) -+ self.collector.write(info, self._local_call(conf), output=output) ++ if not conf.get("salt.int.intfunc"): ++ self.out.put("Collecting {}".format(info.lower()), indent=2) ++ self.collector.write( ++ info, self._local_call(conf), output=output ++ ) + else: + self.collector.discard_current() + self._internal_function_call(conf) + else: -+ self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action)) ++ self.out.error( ++ 'Unknown action type "{}" for action: {}'.format( ++ action_type, action ++ ) ++ ) + else: + # TODO: This needs to be moved then to the utils. + # But the code is not yet there (other PRs) -+ self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2) ++ self.out.msg( ++ "\n".join(salt.cli.support.console.wrap(action[action_name])), ++ ident=2, ++ ) + + def _get_action_type(self, action): -+ ''' ++ """ + Get action type. + :param action: + :return: -+ ''' -+ action_name = next(iter(action or {'': None})) -+ if ':' not in action_name: -+ action_name = '{}:{}'.format(self.CALL_TYPE, action_name) ++ """ ++ action_name = next(iter(action or {"": None})) ++ if ":" not in action_name: ++ action_name = "{}:{}".format(self.CALL_TYPE, action_name) + -+ return action_name.split(':')[0] or None ++ return action_name.split(":")[0] or None + + def collect_targets_data(self): -+ ''' ++ """ + Collects minion targets data + :return: -+ ''' ++ """ + # TODO: remote collector? + + def _cleanup(self): -+ ''' ++ """ + Cleanup if crash/exception + :return: -+ ''' -+ if (hasattr(self, 'config') -+ and self.config.get('support_archive') -+ and os.path.exists(self.config['support_archive'])): -+ self.out.warning('Terminated earlier, cleaning up') -+ os.unlink(self.config['support_archive']) ++ """ ++ if ( ++ hasattr(self, "config") ++ and self.config.get("support_archive") ++ and os.path.exists(self.config["support_archive"]) ++ ): ++ self.out.warning("Terminated earlier, cleaning up") ++ os.unlink(self.config["support_archive"]) + + def _check_existing_archive(self): -+ ''' ++ """ + Check if archive exists or not. If exists and --force was not specified, + bail out. Otherwise remove it and move on. + + :return: -+ ''' -+ if os.path.exists(self.config['support_archive']): -+ if self.config['support_archive_force_overwrite']: -+ self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive'])) -+ os.unlink(self.config['support_archive']) ++ """ ++ if os.path.exists(self.config["support_archive"]): ++ if self.config["support_archive_force_overwrite"]: ++ self.out.warning( ++ "Overwriting existing archive: {}".format( ++ self.config["support_archive"] ++ ) ++ ) ++ os.unlink(self.config["support_archive"]) + ret = True + else: -+ self.out.warning('File {} already exists.'.format(self.config['support_archive'])) ++ self.out.warning( ++ "File {} already exists.".format(self.config["support_archive"]) ++ ) + ret = False + else: + ret = True @@ -848,32 +895,36 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544 + exit_code = salt.defaults.exitcodes.EX_GENERIC + self.out.error(ex) + else: -+ if self.config['log_level'] not in ('quiet', ): ++ if self.config["log_level"] not in ("quiet",): + self.setup_logfile_logger() + salt.utils.verify.verify_log(self.config) + salt.cli.support.log = log # Pass update logger so trace is available + -+ if self.config['support_profile_list']: -+ self.out.put('List of available profiles:') -+ for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)): -+ msg_template = ' {}. '.format(idx + 1) + '{}' ++ if self.config["support_profile_list"]: ++ self.out.put("List of available profiles:") ++ for idx, profile in enumerate( ++ salt.cli.support.get_profiles(self.config) ++ ): ++ msg_template = " {}. ".format(idx + 1) + "{}" + self.out.highlight(msg_template, profile) + exit_code = salt.defaults.exitcodes.EX_OK -+ elif self.config['support_show_units']: -+ self.out.put('List of available units:') ++ elif self.config["support_show_units"]: ++ self.out.put("List of available units:") + for idx, unit in enumerate(self.find_existing_configs(None)): -+ msg_template = ' {}. '.format(idx + 1) + '{}' ++ msg_template = " {}. ".format(idx + 1) + "{}" + self.out.highlight(msg_template, unit) + exit_code = salt.defaults.exitcodes.EX_OK + else: -+ if not self.config['support_profile']: ++ if not self.config["support_profile"]: + self.print_help() + raise SystemExit() + + if self._check_existing_archive(): + try: -+ self.collector = SupportDataCollector(self.config['support_archive'], -+ output=self.config['support_output_format']) ++ self.collector = SupportDataCollector( ++ self.config["support_archive"], ++ output=self.config["support_output_format"], ++ ) + except Exception as ex: + self.out.error(ex) + exit_code = salt.defaults.exitcodes.EX_GENERIC @@ -887,8 +938,11 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544 + self.collector.close() + + archive_path = self.collector.archive_path -+ self.out.highlight('\nSupport data has been written to "{}" file.\n', -+ archive_path, _main='YELLOW') ++ self.out.highlight( ++ '\nSupport data has been written to "{}" file.\n', ++ archive_path, ++ _main="YELLOW", ++ ) + except Exception as ex: + self.out.error(ex) + log.debug(ex, exc_info=True) @@ -900,61 +954,64 @@ index 0000000000000000000000000000000000000000..478d07e13bf264b5d0caacd487cc4544 + sys.exit(exit_code) diff --git a/salt/cli/support/console.py b/salt/cli/support/console.py new file mode 100644 -index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7bd3720a9e +index 0000000000..266b645479 --- /dev/null +++ b/salt/cli/support/console.py -@@ -0,0 +1,165 @@ -+# coding=utf-8 -+''' +@@ -0,0 +1,184 @@ ++""" +Collection of tools to report messages to console. + +NOTE: This is subject to incorporate other formatting bits + from all around everywhere and then to be moved to utils. -+''' ++""" + -+from __future__ import absolute_import, print_function, unicode_literals + -+import sys +import os -+import salt.utils.color ++import sys +import textwrap + ++import salt.utils.color + -+class IndentOutput(object): -+ ''' ++ ++class IndentOutput: ++ """ + Paint different indends in different output. -+ ''' ++ """ ++ + def __init__(self, conf=None, device=sys.stdout): + if conf is None: -+ conf = {0: 'CYAN', 2: 'GREEN', 4: 'LIGHT_BLUE', 6: 'BLUE'} ++ conf = {0: "CYAN", 2: "GREEN", 4: "LIGHT_BLUE", 6: "BLUE"} + self._colors_conf = conf + self._device = device + self._colors = salt.utils.color.get_colors() -+ self._default_color = 'GREEN' -+ self._default_hl_color = 'LIGHT_GREEN' ++ self._default_color = "GREEN" ++ self._default_hl_color = "LIGHT_GREEN" + + def put(self, message, indent=0): -+ ''' ++ """ + Print message with an indent. + + :param message: + :param indent: + :return: -+ ''' -+ color = self._colors_conf.get(indent + indent % 2, self._colors_conf.get(0, self._default_color)) ++ """ ++ color = self._colors_conf.get( ++ indent + indent % 2, self._colors_conf.get(0, self._default_color) ++ ) + -+ for chunk in [' ' * indent, self._colors[color], message, self._colors['ENDC']]: ++ for chunk in [" " * indent, self._colors[color], message, self._colors["ENDC"]]: + self._device.write(str(chunk)) + self._device.write(os.linesep) + self._device.flush() + + +class MessagesOutput(IndentOutput): -+ ''' ++ """ + Messages output to the CLI. -+ ''' -+ def msg(self, message, title=None, title_color=None, color='BLUE', ident=0): -+ ''' ++ """ ++ ++ def msg(self, message, title=None, title_color=None, color="BLUE", ident=0): ++ """ + Hint message. + + :param message: @@ -963,7 +1020,7 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b + :param color: + :param ident: + :return: -+ ''' ++ """ + if title and not title_color: + title_color = color + if title_color and not title: @@ -972,49 +1029,55 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b + self.__colored_output(title, message, title_color, color, ident=ident) + + def info(self, message, ident=0): -+ ''' ++ """ + Write an info message to the CLI. + + :param message: + :param ident: + :return: -+ ''' -+ self.__colored_output('Info', message, 'GREEN', 'LIGHT_GREEN', ident=ident) ++ """ ++ self.__colored_output("Info", message, "GREEN", "LIGHT_GREEN", ident=ident) + + def warning(self, message, ident=0): -+ ''' ++ """ + Write a warning message to the CLI. + + :param message: + :param ident: + :return: -+ ''' -+ self.__colored_output('Warning', message, 'YELLOW', 'LIGHT_YELLOW', ident=ident) ++ """ ++ self.__colored_output("Warning", message, "YELLOW", "LIGHT_YELLOW", ident=ident) + + def error(self, message, ident=0): -+ ''' ++ """ + Write an error message to the CLI. + + :param message: + :param ident + :return: -+ ''' -+ self.__colored_output('Error', message, 'RED', 'LIGHT_RED', ident=ident) ++ """ ++ self.__colored_output("Error", message, "RED", "LIGHT_RED", ident=ident) + + def __colored_output(self, title, message, title_color, message_color, ident=0): -+ if title and not title.endswith(':'): ++ if title and not title.endswith(":"): + _linesep = title.endswith(os.linesep) -+ title = '{}:{}'.format(title.strip(), _linesep and os.linesep or ' ') ++ title = "{}:{}".format(title.strip(), _linesep and os.linesep or " ") + -+ for chunk in [title_color and self._colors[title_color] or None, ' ' * ident, -+ title, self._colors[message_color], message, self._colors['ENDC']]: ++ for chunk in [ ++ title_color and self._colors[title_color] or None, ++ " " * ident, ++ title, ++ self._colors[message_color], ++ message, ++ self._colors["ENDC"], ++ ]: + if chunk: + self._device.write(str(chunk)) + self._device.write(os.linesep) + self._device.flush() + + def highlight(self, message, *values, **colors): -+ ''' ++ """ + Highlighter works the way that message parameter is a template, + the "values" is a list of arguments going one after another as values there. + And so the "colors" should designate either highlight color or alternate for each. @@ -1038,31 +1101,41 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b + :param formatted: + :param colors: + :return: -+ ''' ++ """ + -+ m_color = colors.get('_main', self._default_color) -+ h_color = colors.get('_highlight', self._default_hl_color) ++ m_color = colors.get("_main", self._default_color) ++ h_color = colors.get("_highlight", self._default_hl_color) + + _values = [] + for value in values: -+ _values.append('{p}{c}{r}'.format(p=self._colors[colors.get(value, h_color)], -+ c=value, r=self._colors[m_color])) -+ self._device.write('{s}{m}{e}'.format(s=self._colors[m_color], -+ m=message.format(*_values), e=self._colors['ENDC'])) ++ _values.append( ++ "{p}{c}{r}".format( ++ p=self._colors[colors.get(value, h_color)], ++ c=value, ++ r=self._colors[m_color], ++ ) ++ ) ++ self._device.write( ++ "{s}{m}{e}".format( ++ s=self._colors[m_color], ++ m=message.format(*_values), ++ e=self._colors["ENDC"], ++ ) ++ ) + self._device.write(os.linesep) + self._device.flush() + + +def wrap(txt, width=80, ident=0): -+ ''' ++ """ + Wrap text to the required dimensions and clean it up, prepare for display. + + :param txt: + :param width: + :return: -+ ''' -+ ident = ' ' * ident -+ txt = (txt or '').replace(os.linesep, ' ').strip() ++ """ ++ ident = " " * ident ++ txt = (txt or "").replace(os.linesep, " ").strip() + + wrapper = textwrap.TextWrapper() + wrapper.fix_sentence_endings = False @@ -1071,35 +1144,33 @@ index 0000000000000000000000000000000000000000..fb6992d657e21a6b933e81135d812f7b + return wrapper.wrap(txt) diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py new file mode 100644 -index 0000000000000000000000000000000000000000..2727cd6394c364e35c99403b75e7fd23856187c6 +index 0000000000..d3d8f83cb8 --- /dev/null +++ b/salt/cli/support/intfunc.py -@@ -0,0 +1,42 @@ -+# coding=utf-8 -+''' +@@ -0,0 +1,40 @@ ++""" +Internal functions. -+''' ++""" +# Maybe this needs to be a modules in a future? + -+from __future__ import absolute_import, print_function, unicode_literals +import os -+from salt.cli.support.console import MessagesOutput -+import salt.utils.files + ++import salt.utils.files ++from salt.cli.support.console import MessagesOutput + +out = MessagesOutput() + + +def filetree(collector, path): -+ ''' ++ """ + Add all files in the tree. If the "path" is a file, + only that file will be added. + + :param path: File or directory + :return: -+ ''' ++ """ + if not path: -+ out.error('Path not defined', ident=2) ++ out.error("Path not defined", ident=2) + else: + # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. + # pylint: disable=W8470 @@ -1107,7 +1178,7 @@ index 0000000000000000000000000000000000000000..2727cd6394c364e35c99403b75e7fd23 + filename = os.path.basename(path) + try: + file_ref = salt.utils.files.fopen(path) # pylint: disable=W -+ out.put('Add {}'.format(filename), indent=2) ++ out.put("Add {}".format(filename), indent=2) + collector.add(filename) + collector.link(title=path, path=file_ref) + except Exception as err: @@ -1119,47 +1190,46 @@ index 0000000000000000000000000000000000000000..2727cd6394c364e35c99403b75e7fd23 + filetree(collector, fname) diff --git a/salt/cli/support/localrunner.py b/salt/cli/support/localrunner.py new file mode 100644 -index 0000000000000000000000000000000000000000..26deb883bccc98079201d26deba01008cca72921 +index 0000000000..ad10eda0b0 --- /dev/null +++ b/salt/cli/support/localrunner.py -@@ -0,0 +1,34 @@ -+# coding=utf-8 -+''' +@@ -0,0 +1,33 @@ ++""" +Local Runner -+''' ++""" ++ ++import logging + -+from __future__ import print_function, absolute_import, unicode_literals +import salt.runner +import salt.utils.platform +import salt.utils.process -+import logging + +log = logging.getLogger(__name__) + + +class LocalRunner(salt.runner.Runner): -+ ''' ++ """ + Runner class that changes its default behaviour. -+ ''' ++ """ + + def _proc_function(self, fun, low, user, tag, jid, daemonize=True): -+ ''' ++ """ + Same as original _proc_function in AsyncClientMixin, + except it calls "low" without firing a print event. -+ ''' ++ """ + if daemonize and not salt.utils.platform.is_windows(): + salt.log.setup.shutdown_multiprocessing_logging() + salt.utils.process.daemonize() + salt.log.setup.setup_multiprocessing_logging() + -+ low['__jid__'] = jid -+ low['__user__'] = user -+ low['__tag__'] = tag ++ low["__jid__"] = jid ++ low["__user__"] = user ++ low["__tag__"] = tag + + return self.low(fun, low, print_event=False, full_return=False) diff --git a/salt/cli/support/profiles/default.yml b/salt/cli/support/profiles/default.yml new file mode 100644 -index 0000000000000000000000000000000000000000..01d9a261933333a6246e932556cdd88f3adf1f82 +index 0000000000..01d9a26193 --- /dev/null +++ b/salt/cli/support/profiles/default.yml @@ -0,0 +1,71 @@ @@ -1236,7 +1306,7 @@ index 0000000000000000000000000000000000000000..01d9a261933333a6246e932556cdd88f + diff --git a/salt/cli/support/profiles/jobs-active.yml b/salt/cli/support/profiles/jobs-active.yml new file mode 100644 -index 0000000000000000000000000000000000000000..508c54ece79087c98ba2dd4bd0ee265b09520296 +index 0000000000..508c54ece7 --- /dev/null +++ b/salt/cli/support/profiles/jobs-active.yml @@ -0,0 +1,3 @@ @@ -1245,7 +1315,7 @@ index 0000000000000000000000000000000000000000..508c54ece79087c98ba2dd4bd0ee265b + info: List of all actively running jobs diff --git a/salt/cli/support/profiles/jobs-last.yml b/salt/cli/support/profiles/jobs-last.yml new file mode 100644 -index 0000000000000000000000000000000000000000..e3b719f552d2288d15dc5af4d6c320e0386ed7d0 +index 0000000000..e3b719f552 --- /dev/null +++ b/salt/cli/support/profiles/jobs-last.yml @@ -0,0 +1,3 @@ @@ -1254,7 +1324,7 @@ index 0000000000000000000000000000000000000000..e3b719f552d2288d15dc5af4d6c320e0 + info: List all detectable jobs and associated functions diff --git a/salt/cli/support/profiles/jobs-trace.yml b/salt/cli/support/profiles/jobs-trace.yml new file mode 100644 -index 0000000000000000000000000000000000000000..00b28e0502e2e6dbd9fe3e990750cbc1f9a94a30 +index 0000000000..00b28e0502 --- /dev/null +++ b/salt/cli/support/profiles/jobs-trace.yml @@ -0,0 +1,7 @@ @@ -1267,7 +1337,7 @@ index 0000000000000000000000000000000000000000..00b28e0502e2e6dbd9fe3e990750cbc1 + {% endfor %} diff --git a/salt/cli/support/profiles/network.yml b/salt/cli/support/profiles/network.yml new file mode 100644 -index 0000000000000000000000000000000000000000..268f02e61fbe9b5f0870569a9343da6e778a7017 +index 0000000000..268f02e61f --- /dev/null +++ b/salt/cli/support/profiles/network.yml @@ -0,0 +1,27 @@ @@ -1300,7 +1370,7 @@ index 0000000000000000000000000000000000000000..268f02e61fbe9b5f0870569a9343da6e + info: ARP table diff --git a/salt/cli/support/profiles/postgres.yml b/salt/cli/support/profiles/postgres.yml new file mode 100644 -index 0000000000000000000000000000000000000000..2238752c7a90a09bddc9cd3cbf27acbbf2a85c1c +index 0000000000..2238752c7a --- /dev/null +++ b/salt/cli/support/profiles/postgres.yml @@ -0,0 +1,11 @@ @@ -1317,7 +1387,7 @@ index 0000000000000000000000000000000000000000..2238752c7a90a09bddc9cd3cbf27acbb + - /etc/postgresql diff --git a/salt/cli/support/profiles/salt.yml b/salt/cli/support/profiles/salt.yml new file mode 100644 -index 0000000000000000000000000000000000000000..4b18d9887002f9a9efdd6f54870db4b74384a19e +index 0000000000..4b18d98870 --- /dev/null +++ b/salt/cli/support/profiles/salt.yml @@ -0,0 +1,9 @@ @@ -1332,7 +1402,7 @@ index 0000000000000000000000000000000000000000..4b18d9887002f9a9efdd6f54870db4b7 + - {{salt('config.get', 'log_file')}} diff --git a/salt/cli/support/profiles/users.yml b/salt/cli/support/profiles/users.yml new file mode 100644 -index 0000000000000000000000000000000000000000..391acdb606d2ebb35ac7cff10844fffd84d96915 +index 0000000000..391acdb606 --- /dev/null +++ b/salt/cli/support/profiles/users.yml @@ -0,0 +1,22 @@ @@ -1359,40 +1429,41 @@ index 0000000000000000000000000000000000000000..391acdb606d2ebb35ac7cff10844fffd + info: List of all available groups + output: table diff --git a/salt/scripts.py b/salt/scripts.py -index 5e623a578e8363b51e10af247b325069741064d5..401ec2055303dd3b342110ddbab155e30d5b4e31 100644 +index 8f3cde8477..e5c248f011 100644 --- a/salt/scripts.py +++ b/salt/scripts.py -@@ -579,3 +579,17 @@ def salt_unity(): +@@ -592,3 +592,18 @@ def salt_unity(): sys.argv.pop(1) - s_fun = getattr(sys.modules[__name__], 'salt_{0}'.format(cmd)) + s_fun = getattr(sys.modules[__name__], "salt_{}".format(cmd)) s_fun() + + +def salt_support(): -+ ''' ++ """ + Run Salt Support that collects system data, logs etc for debug and support purposes. + :return: -+ ''' ++ """ + + import salt.cli.support.collector -+ if '' in sys.path: -+ sys.path.remove('') ++ ++ if "" in sys.path: ++ sys.path.remove("") + client = salt.cli.support.collector.SaltSupport() + _install_signal_handlers(client) + client.run() diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py -index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa380495171 100644 +index 952f9aebc5..c1422a9556 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py -@@ -21,6 +21,7 @@ import getpass - import logging - import optparse - import traceback +@@ -17,6 +17,7 @@ import optparse + import os + import signal + import sys +import tempfile + import traceback + import types from functools import partial - - -@@ -34,6 +35,7 @@ import salt.utils.args +@@ -31,6 +32,7 @@ import salt.utils.args import salt.utils.data import salt.utils.files import salt.utils.jid @@ -1400,49 +1471,98 @@ index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa3 import salt.utils.platform import salt.utils.process import salt.utils.stringutils -@@ -1913,6 +1915,69 @@ class SyndicOptionParser(six.with_metaclass(OptionParserMeta, - self.get_config_file_path('minion')) +@@ -2049,6 +2051,118 @@ class SyndicOptionParser( + return opts -+class SaltSupportOptionParser(six.with_metaclass(OptionParserMeta, OptionParser, ConfigDirMixIn, -+ MergeConfigMixIn, LogLevelMixIn, TimeoutMixIn)): ++class SaltSupportOptionParser( ++ OptionParser, ++ ConfigDirMixIn, ++ MergeConfigMixIn, ++ LogLevelMixIn, ++ TimeoutMixIn, ++ metaclass=OptionParserMeta, ++): + default_timeout = 5 -+ description = 'Salt Support is a program to collect all support data: logs, system configuration etc.' -+ usage = '%prog [options] \'\' [arguments]' ++ description = "Salt Support is a program to collect all support data: logs, system configuration etc." ++ usage = "%prog [options] '' [arguments]" + # ConfigDirMixIn config filename attribute -+ _config_filename_ = 'master' ++ _config_filename_ = "master" + + # LogLevelMixIn attributes -+ _default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level'] -+ _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file'] ++ _default_logging_level_ = config.DEFAULT_MASTER_OPTS["log_level"] ++ _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS["log_file"] + + def _mixin_setup(self): -+ self.add_option('-P', '--show-profiles', default=False, action='store_true', -+ dest='support_profile_list', help='Show available profiles') -+ self.add_option('-p', '--profile', default='', dest='support_profile', -+ help='Specify support profile or comma-separated profiles, e.g.: "salt,network"') -+ support_archive = '{t}/{h}-support.tar.bz2'.format(t=tempfile.gettempdir(), -+ h=salt.utils.network.get_fqhostname()) -+ self.add_option('-a', '--archive', default=support_archive, dest='support_archive', -+ help=('Specify name of the resulting support archive. ' -+ 'Default is "{f}".'.format(f=support_archive))) -+ self.add_option('-u', '--unit', default='', dest='support_unit', -+ help='Specify examined unit (default "master").') -+ self.add_option('-U', '--show-units', default=False, action='store_true', dest='support_show_units', -+ help='Show available units') -+ self.add_option('-f', '--force', default=False, action='store_true', dest='support_archive_force_overwrite', -+ help='Force overwrite existing archive, if exists') -+ self.add_option('-o', '--out', default='null', dest='support_output_format', -+ help=('Set the default output using the specified outputter, ' -+ 'unless profile does not overrides this. Default: "yaml".')) ++ self.add_option( ++ "-P", ++ "--show-profiles", ++ default=False, ++ action="store_true", ++ dest="support_profile_list", ++ help="Show available profiles", ++ ) ++ self.add_option( ++ "-p", ++ "--profile", ++ default="", ++ dest="support_profile", ++ help='Specify support profile or comma-separated profiles, e.g.: "salt,network"', ++ ) ++ support_archive = "{t}/{h}-support.tar.bz2".format( ++ t=tempfile.gettempdir(), h=salt.utils.network.get_fqhostname() ++ ) ++ self.add_option( ++ "-a", ++ "--archive", ++ default=support_archive, ++ dest="support_archive", ++ help=( ++ "Specify name of the resulting support archive. " ++ 'Default is "{f}".'.format(f=support_archive) ++ ), ++ ) ++ self.add_option( ++ "-u", ++ "--unit", ++ default="", ++ dest="support_unit", ++ help='Specify examined unit (default "master").', ++ ) ++ self.add_option( ++ "-U", ++ "--show-units", ++ default=False, ++ action="store_true", ++ dest="support_show_units", ++ help="Show available units", ++ ) ++ self.add_option( ++ "-f", ++ "--force", ++ default=False, ++ action="store_true", ++ dest="support_archive_force_overwrite", ++ help="Force overwrite existing archive, if exists", ++ ) ++ self.add_option( ++ "-o", ++ "--out", ++ default="null", ++ dest="support_output_format", ++ help=( ++ "Set the default output using the specified outputter, " ++ 'unless profile does not overrides this. Default: "yaml".' ++ ), ++ ) + + def find_existing_configs(self, default): -+ ''' ++ """ + Find configuration files on the system. + :return: -+ ''' ++ """ + configs = [] -+ for cfg in [default, self._config_filename_, 'minion', 'proxy', 'cloud', 'spm']: ++ for cfg in [default, self._config_filename_, "minion", "proxy", "cloud", "spm"]: + if not cfg: + continue + config_path = self.get_config_file_path(cfg) @@ -1450,15 +1570,15 @@ index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa3 + configs.append(cfg) + + if default and default not in configs: -+ raise SystemExit('Unknown configuration unit: {}'.format(default)) ++ raise SystemExit("Unknown configuration unit: {}".format(default)) + + return configs + + def setup_config(self, cfg=None): -+ ''' ++ """ + Open suitable config file. + :return: -+ ''' ++ """ + _opts, _args = optparse.OptionParser.parse_args(self) + configs = self.find_existing_configs(_opts.support_unit) + if cfg not in configs: @@ -1467,72 +1587,70 @@ index fb9b0987a65ce1607de63f6f83ce96f6987800aa..83dfe717f6a66305ad1c2505da5d3aa3 + return config.master_config(self.get_config_file_path(cfg)) + + - class SaltCMDOptionParser(six.with_metaclass(OptionParserMeta, - OptionParser, - ConfigDirMixIn, + class SaltCMDOptionParser( + OptionParser, + ConfigDirMixIn, diff --git a/scripts/salt-support b/scripts/salt-support new file mode 100755 -index 0000000000000000000000000000000000000000..48ce141c673aa390174f88f0e18b857c561ab6f5 +index 0000000000..4e0e79f3ea --- /dev/null +++ b/scripts/salt-support @@ -0,0 +1,11 @@ +#!/usr/bin/env python -+''' ++""" +Salt support is to collect logs, +debug data and system information +for support purposes. -+''' ++""" + +from salt.scripts import salt_support + -+if __name__ == '__main__': ++if __name__ == "__main__": + salt_support() diff --git a/setup.py b/setup.py -index 788664e14e1e93ffe51e9ace4409b48e9b4afeaf..06374647df5e82a21fc39b08d41c596f0483ff0c 100755 +index 08c84344ea..39a66fefba 100755 --- a/setup.py +++ b/setup.py -@@ -1058,6 +1058,7 @@ class SaltDistribution(distutils.dist.Distribution): - 'scripts/salt-master', - 'scripts/salt-minion', - 'scripts/salt-proxy', -+ 'scripts/salt-support', - 'scripts/salt-ssh', - 'scripts/salt-syndic', - 'scripts/salt-unity', -@@ -1093,6 +1094,7 @@ class SaltDistribution(distutils.dist.Distribution): - 'salt-key = salt.scripts:salt_key', - 'salt-master = salt.scripts:salt_master', - 'salt-minion = salt.scripts:salt_minion', -+ 'salt-support = salt.scripts:salt_support', - 'salt-ssh = salt.scripts:salt_ssh', - 'salt-syndic = salt.scripts:salt_syndic', - 'salt-unity = salt.scripts:salt_unity', +@@ -1253,6 +1253,7 @@ class SaltDistribution(distutils.dist.Distribution): + "scripts/salt-master", + "scripts/salt-minion", + "scripts/salt-proxy", ++ "scripts/salt-support", + "scripts/salt-ssh", + "scripts/salt-syndic", + "scripts/salt-unity", +@@ -1299,6 +1300,7 @@ class SaltDistribution(distutils.dist.Distribution): + "salt-key = salt.scripts:salt_key", + "salt-master = salt.scripts:salt_master", + "salt-minion = salt.scripts:salt_minion", ++ "salt-support = salt.scripts:salt_support", + "salt-ssh = salt.scripts:salt_ssh", + "salt-syndic = salt.scripts:salt_syndic", + "salt-unity = salt.scripts:salt_unity", diff --git a/tests/unit/cli/test_support.py b/tests/unit/cli/test_support.py new file mode 100644 -index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c136438524008c7cd +index 0000000000..dc0e99bb3d --- /dev/null +++ b/tests/unit/cli/test_support.py -@@ -0,0 +1,477 @@ -+# -*- coding: utf-8 -*- -+''' +@@ -0,0 +1,553 @@ ++""" + :codeauthor: Bo Maryniuk -+''' ++""" + -+from __future__ import absolute_import, print_function, unicode_literals + -+from tests.support.unit import skipIf, TestCase -+from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON ++import os + ++import jinja2 ++import salt.cli.support.collector ++import salt.exceptions ++import salt.utils.files ++import yaml ++from salt.cli.support.collector import SaltSupport, SupportDataCollector +from salt.cli.support.console import IndentOutput -+from salt.cli.support.collector import SupportDataCollector, SaltSupport +from salt.utils.color import get_colors +from salt.utils.stringutils import to_bytes -+import salt.exceptions -+import salt.cli.support.collector -+import salt.utils.files -+import os -+import yaml -+import jinja2 ++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch ++from tests.support.unit import TestCase, skipIf + +try: + import pytest @@ -1540,239 +1658,268 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852 + pytest = None + + -+@skipIf(not bool(pytest), 'Pytest needs to be installed') ++@skipIf(not bool(pytest), "Pytest needs to be installed") +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SaltSupportIndentOutputTestCase(TestCase): -+ ''' ++ """ + Unit Tests for the salt-support indent output. -+ ''' ++ """ + + def setUp(self): -+ ''' ++ """ + Setup test + :return: -+ ''' ++ """ + -+ self.message = 'Stubborn processes on dumb terminal' ++ self.message = "Stubborn processes on dumb terminal" + self.device = MagicMock() + self.iout = IndentOutput(device=self.device) + self.colors = get_colors() + + def tearDown(self): -+ ''' ++ """ + Remove instances after test run + :return: -+ ''' ++ """ + del self.message + del self.device + del self.iout + del self.colors + + def test_standard_output(self): -+ ''' ++ """ + Test console standard output. -+ ''' ++ """ + self.iout.put(self.message) + assert self.device.write.called + assert self.device.write.call_count == 5 -+ for idx, data in enumerate(['', str(self.colors['CYAN']), self.message, str(self.colors['ENDC']), '\n']): ++ for idx, data in enumerate( ++ ["", str(self.colors["CYAN"]), self.message, str(self.colors["ENDC"]), "\n"] ++ ): + assert self.device.write.call_args_list[idx][0][0] == data + + def test_indent_output(self): -+ ''' ++ """ + Test indent distance. + :return: -+ ''' ++ """ + self.iout.put(self.message, indent=10) -+ for idx, data in enumerate([' ' * 10, str(self.colors['CYAN']), self.message, str(self.colors['ENDC']), '\n']): ++ for idx, data in enumerate( ++ [ ++ " " * 10, ++ str(self.colors["CYAN"]), ++ self.message, ++ str(self.colors["ENDC"]), ++ "\n", ++ ] ++ ): + assert self.device.write.call_args_list[idx][0][0] == data + + def test_color_config(self): -+ ''' ++ """ + Test color config changes on each ident. + :return: -+ ''' ++ """ + -+ conf = {0: 'MAGENTA', 2: 'RED', 4: 'WHITE', 6: 'YELLOW'} ++ conf = {0: "MAGENTA", 2: "RED", 4: "WHITE", 6: "YELLOW"} + self.iout = IndentOutput(conf=conf, device=self.device) + for indent in sorted(list(conf)): + self.iout.put(self.message, indent=indent) + + step = 1 + for ident_key in sorted(list(conf)): -+ assert str(self.device.write.call_args_list[step][0][0]) == str(self.colors[conf[ident_key]]) ++ assert str(self.device.write.call_args_list[step][0][0]) == str( ++ self.colors[conf[ident_key]] ++ ) + step += 5 + + -+@skipIf(not bool(pytest), 'Pytest needs to be installed') ++@skipIf(not bool(pytest), "Pytest needs to be installed") +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SaltSupportCollectorTestCase(TestCase): -+ ''' ++ """ + Collector tests. -+ ''' ++ """ ++ + def setUp(self): -+ ''' ++ """ + Setup the test case + :return: -+ ''' -+ self.archive_path = '/highway/to/hell' ++ """ ++ self.archive_path = "/highway/to/hell" + self.output_device = MagicMock() + self.collector = SupportDataCollector(self.archive_path, self.output_device) + + def tearDown(self): -+ ''' ++ """ + Tear down the test case elements + :return: -+ ''' ++ """ + del self.collector + del self.archive_path + del self.output_device + -+ @patch('salt.cli.support.collector.tarfile.TarFile', MagicMock()) ++ @patch("salt.cli.support.collector.tarfile.TarFile", MagicMock()) + def test_archive_open(self): -+ ''' ++ """ + Test archive is opened. + + :return: -+ ''' ++ """ + self.collector.open() + assert self.collector.archive_path == self.archive_path + with pytest.raises(salt.exceptions.SaltException) as err: + self.collector.open() -+ assert 'Archive already opened' in str(err) ++ assert "Archive already opened" in str(err) + -+ @patch('salt.cli.support.collector.tarfile.TarFile', MagicMock()) ++ @patch("salt.cli.support.collector.tarfile.TarFile", MagicMock()) + def test_archive_close(self): -+ ''' ++ """ + Test archive is opened. + + :return: -+ ''' ++ """ + self.collector.open() + self.collector._flush_content = lambda: None + self.collector.close() + assert self.collector.archive_path == self.archive_path + with pytest.raises(salt.exceptions.SaltException) as err: + self.collector.close() -+ assert 'Archive already closed' in str(err) ++ assert "Archive already closed" in str(err) + + def test_archive_addwrite(self): -+ ''' ++ """ + Test add to the archive a section and write to it. + + :return: -+ ''' ++ """ + archive = MagicMock() -+ with patch('salt.cli.support.collector.tarfile.TarFile', archive): ++ with patch("salt.cli.support.collector.tarfile.TarFile", archive): + self.collector.open() -+ self.collector.add('foo') -+ self.collector.write(title='title', data='data', output='null') ++ self.collector.add("foo") ++ self.collector.write(title="title", data="data", output="null") + self.collector._flush_content() + -+ assert (archive.bz2open().addfile.call_args[1]['fileobj'].read() -+ == to_bytes('title\n-----\n\nraw-content: data\n\n\n\n')) ++ assert archive.bz2open().addfile.call_args[1]["fileobj"].read() == to_bytes( ++ "title\n-----\n\nraw-content: data\n\n\n\n" ++ ) + -+ @patch('salt.utils.files.fopen', MagicMock(return_value='path=/dev/null')) ++ @patch("salt.utils.files.fopen", MagicMock(return_value="path=/dev/null")) + def test_archive_addlink(self): -+ ''' ++ """ + Test add to the archive a section and link an external file or directory to it. + + :return: -+ ''' ++ """ + archive = MagicMock() -+ with patch('salt.cli.support.collector.tarfile.TarFile', archive): ++ with patch("salt.cli.support.collector.tarfile.TarFile", archive): + self.collector.open() -+ self.collector.add('foo') -+ self.collector.link(title='Backup Path', path='/path/to/backup.config') ++ self.collector.add("foo") ++ self.collector.link(title="Backup Path", path="/path/to/backup.config") + self.collector._flush_content() + + assert archive.bz2open().addfile.call_count == 1 -+ assert (archive.bz2open().addfile.call_args[1]['fileobj'].read() -+ == to_bytes('Backup Path\n-----------\n\npath=/dev/null\n\n\n')) ++ assert archive.bz2open().addfile.call_args[1]["fileobj"].read() == to_bytes( ++ "Backup Path\n-----------\n\npath=/dev/null\n\n\n" ++ ) + -+ @patch('salt.utils.files.fopen', MagicMock(return_value='path=/dev/null')) ++ @patch("salt.utils.files.fopen", MagicMock(return_value="path=/dev/null")) + def test_archive_discard_section(self): -+ ''' ++ """ + Test discard a section from the archive. + + :return: -+ ''' ++ """ + archive = MagicMock() -+ with patch('salt.cli.support.collector.tarfile.TarFile', archive): ++ with patch("salt.cli.support.collector.tarfile.TarFile", archive): + self.collector.open() -+ self.collector.add('solar-interference') -+ self.collector.link(title='Thermal anomaly', path='/path/to/another/great.config') -+ self.collector.add('foo') -+ self.collector.link(title='Backup Path', path='/path/to/backup.config') ++ self.collector.add("solar-interference") ++ self.collector.link( ++ title="Thermal anomaly", path="/path/to/another/great.config" ++ ) ++ self.collector.add("foo") ++ self.collector.link(title="Backup Path", path="/path/to/backup.config") + self.collector._flush_content() + assert archive.bz2open().addfile.call_count == 2 -+ assert (archive.bz2open().addfile.mock_calls[0][2]['fileobj'].read() -+ == to_bytes('Thermal anomaly\n---------------\n\npath=/dev/null\n\n\n')) ++ assert archive.bz2open().addfile.mock_calls[0][2][ ++ "fileobj" ++ ].read() == to_bytes( ++ "Thermal anomaly\n---------------\n\npath=/dev/null\n\n\n" ++ ) + self.collector.close() + + archive = MagicMock() -+ with patch('salt.cli.support.collector.tarfile.TarFile', archive): ++ with patch("salt.cli.support.collector.tarfile.TarFile", archive): + self.collector.open() -+ self.collector.add('solar-interference') -+ self.collector.link(title='Thermal anomaly', path='/path/to/another/great.config') ++ self.collector.add("solar-interference") ++ self.collector.link( ++ title="Thermal anomaly", path="/path/to/another/great.config" ++ ) + self.collector.discard_current() -+ self.collector.add('foo') -+ self.collector.link(title='Backup Path', path='/path/to/backup.config') ++ self.collector.add("foo") ++ self.collector.link(title="Backup Path", path="/path/to/backup.config") + self.collector._flush_content() + assert archive.bz2open().addfile.call_count == 2 -+ assert (archive.bz2open().addfile.mock_calls[0][2]['fileobj'].read() -+ == to_bytes('Backup Path\n-----------\n\npath=/dev/null\n\n\n')) ++ assert archive.bz2open().addfile.mock_calls[0][2][ ++ "fileobj" ++ ].read() == to_bytes("Backup Path\n-----------\n\npath=/dev/null\n\n\n") + self.collector.close() + + -+@skipIf(not bool(pytest), 'Pytest needs to be installed') ++@skipIf(not bool(pytest), "Pytest needs to be installed") +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SaltSupportRunnerTestCase(TestCase): -+ ''' ++ """ + Test runner class. -+ ''' ++ """ + + def setUp(self): -+ ''' ++ """ + Set up test suite. + :return: -+ ''' -+ self.archive_path = '/dev/null' ++ """ ++ self.archive_path = "/dev/null" + self.output_device = MagicMock() + self.runner = SaltSupport() -+ self.runner.collector = SupportDataCollector(self.archive_path, self.output_device) ++ self.runner.collector = SupportDataCollector( ++ self.archive_path, self.output_device ++ ) + + def tearDown(self): -+ ''' ++ """ + Tear down. + + :return: -+ ''' ++ """ + del self.archive_path + del self.output_device + del self.runner + + def test_function_config(self): -+ ''' ++ """ + Test function config formation. + + :return: -+ ''' ++ """ + self.runner.config = {} -+ msg = 'Electromagnetic energy loss' -+ assert self.runner._setup_fun_config({'description': msg}) == {'print_metadata': False, -+ 'file_client': 'local', -+ 'fun': '', 'kwarg': {}, -+ 'description': msg, -+ 'cache_jobs': False, 'arg': []} ++ msg = "Electromagnetic energy loss" ++ assert self.runner._setup_fun_config({"description": msg}) == { ++ "print_metadata": False, ++ "file_client": "local", ++ "fun": "", ++ "kwarg": {}, ++ "description": msg, ++ "cache_jobs": False, ++ "arg": [], ++ } + + def test_local_caller(self): -+ ''' ++ """ + Test local caller. + + :return: -+ ''' -+ msg = 'Because of network lag due to too many people playing deathmatch' ++ """ ++ msg = "Because of network lag due to too many people playing deathmatch" + caller = MagicMock() + caller().call = MagicMock(return_value=msg) + @@ -1781,19 +1928,22 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852 + assert self.runner._local_call({}) == msg + + caller().call = MagicMock(side_effect=SystemExit) -+ assert self.runner._local_call({}) == 'Data is not available at this moment' ++ assert self.runner._local_call({}) == "Data is not available at this moment" + + err_msg = "The UPS doesn't have a battery backup." + caller().call = MagicMock(side_effect=Exception(err_msg)) -+ assert self.runner._local_call({}) == "Unhandled exception occurred: The UPS doesn't have a battery backup." ++ assert ( ++ self.runner._local_call({}) ++ == "Unhandled exception occurred: The UPS doesn't have a battery backup." ++ ) + + def test_local_runner(self): -+ ''' ++ """ + Test local runner. + + :return: -+ ''' -+ msg = 'Big to little endian conversion error' ++ """ ++ msg = "Big to little endian conversion error" + runner = MagicMock() + runner().run = MagicMock(return_value=msg) + @@ -1802,163 +1952,202 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852 + assert self.runner._local_run({}) == msg + + runner().run = MagicMock(side_effect=SystemExit) -+ assert self.runner._local_run({}) == 'Runner is not available at this moment' ++ assert self.runner._local_run({}) == "Runner is not available at this moment" + -+ err_msg = 'Trojan horse ran out of hay' ++ err_msg = "Trojan horse ran out of hay" + runner().run = MagicMock(side_effect=Exception(err_msg)) -+ assert self.runner._local_run({}) == 'Unhandled exception occurred: Trojan horse ran out of hay' ++ assert ( ++ self.runner._local_run({}) ++ == "Unhandled exception occurred: Trojan horse ran out of hay" ++ ) + -+ @patch('salt.cli.support.intfunc', MagicMock(spec=[])) ++ @patch("salt.cli.support.intfunc", MagicMock(spec=[])) + def test_internal_function_call_stub(self): -+ ''' ++ """ + Test missing internal function call is handled accordingly. + + :return: -+ ''' ++ """ + self.runner.out = MagicMock() -+ out = self.runner._internal_function_call({'fun': 'everythingisawesome', -+ 'arg': [], 'kwargs': {}}) -+ assert out == 'Function everythingisawesome is not available' ++ out = self.runner._internal_function_call( ++ {"fun": "everythingisawesome", "arg": [], "kwargs": {}} ++ ) ++ assert out == "Function everythingisawesome is not available" + + def test_internal_function_call(self): -+ ''' ++ """ + Test missing internal function call is handled accordingly. + + :return: -+ ''' -+ msg = 'Internet outage' ++ """ ++ msg = "Internet outage" + intfunc = MagicMock() + intfunc.everythingisawesome = MagicMock(return_value=msg) + self.runner.out = MagicMock() -+ with patch('salt.cli.support.intfunc', intfunc): -+ out = self.runner._internal_function_call({'fun': 'everythingisawesome', -+ 'arg': [], 'kwargs': {}}) ++ with patch("salt.cli.support.intfunc", intfunc): ++ out = self.runner._internal_function_call( ++ {"fun": "everythingisawesome", "arg": [], "kwargs": {}} ++ ) + assert out == msg + + def test_get_action(self): -+ ''' ++ """ + Test action meta gets parsed. + + :return: -+ ''' -+ action_meta = {'run:jobs.list_jobs_filter': {'info': 'List jobs filter', 'args': [1]}} -+ assert self.runner._get_action(action_meta) == ('List jobs filter', None, -+ {'fun': 'run:jobs.list_jobs_filter', 'kwargs': {}, 'arg': [1]}) -+ action_meta = {'user.info': {'info': 'Information about "usbmux"', 'args': ['usbmux']}} -+ assert self.runner._get_action(action_meta) == ('Information about "usbmux"', None, -+ {'fun': 'user.info', 'kwargs': {}, 'arg': ['usbmux']}) ++ """ ++ action_meta = { ++ "run:jobs.list_jobs_filter": {"info": "List jobs filter", "args": [1]} ++ } ++ assert self.runner._get_action(action_meta) == ( ++ "List jobs filter", ++ None, ++ {"fun": "run:jobs.list_jobs_filter", "kwargs": {}, "arg": [1]}, ++ ) ++ action_meta = { ++ "user.info": {"info": 'Information about "usbmux"', "args": ["usbmux"]} ++ } ++ assert self.runner._get_action(action_meta) == ( ++ 'Information about "usbmux"', ++ None, ++ {"fun": "user.info", "kwargs": {}, "arg": ["usbmux"]}, ++ ) + + def test_extract_return(self): -+ ''' ++ """ + Test extract return from the output. + + :return: -+ ''' -+ out = {'key': 'value'} ++ """ ++ out = {"key": "value"} + assert self.runner._extract_return(out) == out -+ assert self.runner._extract_return({'return': out}) == out ++ assert self.runner._extract_return({"return": out}) == out + + def test_get_action_type(self): -+ ''' ++ """ + Test action meta determines action type. + + :return: -+ ''' -+ action_meta = {'run:jobs.list_jobs_filter': {'info': 'List jobs filter', 'args': [1]}} -+ assert self.runner._get_action_type(action_meta) == 'run' ++ """ ++ action_meta = { ++ "run:jobs.list_jobs_filter": {"info": "List jobs filter", "args": [1]} ++ } ++ assert self.runner._get_action_type(action_meta) == "run" + -+ action_meta = {'user.info': {'info': 'Information about "usbmux"', 'args': ['usbmux']}} -+ assert self.runner._get_action_type(action_meta) == 'call' ++ action_meta = { ++ "user.info": {"info": 'Information about "usbmux"', "args": ["usbmux"]} ++ } ++ assert self.runner._get_action_type(action_meta) == "call" + -+ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch("os.path.exists", MagicMock(return_value=True)) + def test_cleanup(self): -+ ''' ++ """ + Test cleanup routine. + + :return: -+ ''' -+ arch = '/tmp/killme.zip' ++ """ ++ arch = "/tmp/killme.zip" + unlink = MagicMock() -+ with patch('os.unlink', unlink): -+ self.runner.config = {'support_archive': arch} ++ with patch("os.unlink", unlink): ++ self.runner.config = {"support_archive": arch} + self.runner.out = MagicMock() + self.runner._cleanup() + -+ assert self.runner.out.warning.call_args[0][0] == 'Terminated earlier, cleaning up' ++ assert ( ++ self.runner.out.warning.call_args[0][0] ++ == "Terminated earlier, cleaning up" ++ ) + unlink.assert_called_once_with(arch) + -+ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch("os.path.exists", MagicMock(return_value=True)) + def test_check_existing_archive(self): -+ ''' ++ """ + Test check existing archive. + + :return: -+ ''' -+ arch = '/tmp/endothermal-recalibration.zip' ++ """ ++ arch = "/tmp/endothermal-recalibration.zip" + unlink = MagicMock() -+ with patch('os.unlink', unlink), patch('os.path.exists', MagicMock(return_value=False)): -+ self.runner.config = {'support_archive': '', -+ 'support_archive_force_overwrite': True} ++ with patch("os.unlink", unlink), patch( ++ "os.path.exists", MagicMock(return_value=False) ++ ): ++ self.runner.config = { ++ "support_archive": "", ++ "support_archive_force_overwrite": True, ++ } + self.runner.out = MagicMock() + assert self.runner._check_existing_archive() + assert self.runner.out.warning.call_count == 0 + -+ with patch('os.unlink', unlink): -+ self.runner.config = {'support_archive': arch, -+ 'support_archive_force_overwrite': False} ++ with patch("os.unlink", unlink): ++ self.runner.config = { ++ "support_archive": arch, ++ "support_archive_force_overwrite": False, ++ } + self.runner.out = MagicMock() + assert not self.runner._check_existing_archive() -+ assert self.runner.out.warning.call_args[0][0] == 'File {} already exists.'.format(arch) ++ assert self.runner.out.warning.call_args[0][ ++ 0 ++ ] == "File {} already exists.".format(arch) + -+ with patch('os.unlink', unlink): -+ self.runner.config = {'support_archive': arch, -+ 'support_archive_force_overwrite': True} ++ with patch("os.unlink", unlink): ++ self.runner.config = { ++ "support_archive": arch, ++ "support_archive_force_overwrite": True, ++ } + self.runner.out = MagicMock() + assert self.runner._check_existing_archive() -+ assert self.runner.out.warning.call_args[0][0] == 'Overwriting existing archive: {}'.format(arch) ++ assert self.runner.out.warning.call_args[0][ ++ 0 ++ ] == "Overwriting existing archive: {}".format(arch) + + -+@skipIf(not bool(pytest), 'Pytest needs to be installed') ++@skipIf(not bool(pytest), "Pytest needs to be installed") +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ProfileIntegrityTestCase(TestCase): -+ ''' ++ """ + Default profile integrity -+ ''' ++ """ ++ + def setUp(self): -+ ''' ++ """ + Set up test suite. + + :return: -+ ''' ++ """ + self.profiles = {} -+ profiles = os.path.join(os.path.dirname(salt.cli.support.collector.__file__), 'profiles') ++ profiles = os.path.join( ++ os.path.dirname(salt.cli.support.collector.__file__), "profiles" ++ ) + for profile in os.listdir(profiles): -+ self.profiles[profile.split('.')[0]] = os.path.join(profiles, profile) ++ self.profiles[profile.split(".")[0]] = os.path.join(profiles, profile) + + def tearDown(self): -+ ''' ++ """ + Tear down test suite. + + :return: -+ ''' ++ """ + del self.profiles + + def _render_template_to_yaml(self, name, *args, **kwargs): -+ ''' ++ """ + Get template referene for rendering. + :return: -+ ''' ++ """ + with salt.utils.files.fopen(self.profiles[name]) as t_fh: + template = t_fh.read() -+ return yaml.load(jinja2.Environment().from_string(template).render(*args, **kwargs)) ++ return yaml.load( ++ jinja2.Environment().from_string(template).render(*args, **kwargs) ++ ) + + def test_non_template_profiles_parseable(self): -+ ''' ++ """ + Test shipped default profile is YAML parse-able. + + :return: -+ ''' -+ for t_name in ['default', 'jobs-active', 'jobs-last', 'network', 'postgres']: ++ """ ++ for t_name in ["default", "jobs-active", "jobs-last", "network", "postgres"]: + with salt.utils.files.fopen(self.profiles[t_name]) as ref: + try: + yaml.load(ref) @@ -1968,29 +2157,36 @@ index 0000000000000000000000000000000000000000..85ea957d7921257d9ebfff3c13643852 + assert parsed + + def test_users_template_profile(self): -+ ''' ++ """ + Test users template profile. + + :return: -+ ''' -+ users_data = self._render_template_to_yaml('users', salt=MagicMock(return_value=['pokemon'])) -+ assert len(users_data['all-users']) == 5 -+ for user_data in users_data['all-users']: -+ for tgt in ['user.list_groups', 'shadow.info', 'cron.raw_cron']: ++ """ ++ users_data = self._render_template_to_yaml( ++ "users", salt=MagicMock(return_value=["pokemon"]) ++ ) ++ assert len(users_data["all-users"]) == 5 ++ for user_data in users_data["all-users"]: ++ for tgt in ["user.list_groups", "shadow.info", "cron.raw_cron"]: + if tgt in user_data: -+ assert user_data[tgt]['args'] == ['pokemon'] ++ assert user_data[tgt]["args"] == ["pokemon"] + + def test_jobs_trace_template_profile(self): -+ ''' ++ """ + Test jobs-trace template profile. + + :return: -+ ''' -+ jobs_trace = self._render_template_to_yaml('jobs-trace', runners=MagicMock(return_value=['0000'])) -+ assert len(jobs_trace['jobs-details']) == 1 -+ assert jobs_trace['jobs-details'][0]['run:jobs.list_job']['info'] == 'Details on JID 0000' -+ assert jobs_trace['jobs-details'][0]['run:jobs.list_job']['args'] == [0] ++ """ ++ jobs_trace = self._render_template_to_yaml( ++ "jobs-trace", runners=MagicMock(return_value=["0000"]) ++ ) ++ assert len(jobs_trace["jobs-details"]) == 1 ++ assert ( ++ jobs_trace["jobs-details"][0]["run:jobs.list_job"]["info"] ++ == "Details on JID 0000" ++ ) ++ assert jobs_trace["jobs-details"][0]["run:jobs.list_job"]["args"] == [0] -- -2.23.0 +2.29.2 diff --git a/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch b/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch index 9c17720..519ef13 100644 --- a/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch +++ b/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch @@ -1,4 +1,4 @@ -From cc3bd759bc0e4cc3414ccc5a2928c593fa2eee04 Mon Sep 17 00:00:00 2001 +From fec7f65b4debede8cf0eef335182fce2206e200d Mon Sep 17 00:00:00 2001 From: Maximilian Meister Date: Thu, 3 May 2018 15:52:23 +0200 Subject: [PATCH] enable passing a unix_socket for mysql returners @@ -15,14 +15,19 @@ the refactor is done upstream Signed-off-by: Maximilian Meister --- - salt/returners/mysql.py | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) + salt/returners/mysql.py | 63 ++++++++++++++++++++--------------------- + 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/salt/returners/mysql.py b/salt/returners/mysql.py -index 69599ec36a..ff9d380843 100644 +index b7bb05164f..4aa8aeddfa 100644 --- a/salt/returners/mysql.py +++ b/salt/returners/mysql.py -@@ -18,6 +18,7 @@ config. These are the defaults: +@@ -1,4 +1,3 @@ +-# -*- coding: utf-8 -*- + """ + Return data to a mysql server + +@@ -18,6 +17,7 @@ config. These are the defaults: mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 @@ -30,7 +35,7 @@ index 69599ec36a..ff9d380843 100644 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. -@@ -43,6 +44,7 @@ optional. The following ssl options are simply for illustration purposes: +@@ -43,6 +43,7 @@ optional. The following ssl options are simply for illustration purposes: alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' @@ -38,37 +43,195 @@ index 69599ec36a..ff9d380843 100644 Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. -@@ -198,7 +200,8 @@ def _get_options(ret=None): - 'port': 3306, - 'ssl_ca': None, - 'ssl_cert': None, -- 'ssl_key': None} -+ 'ssl_key': None, -+ 'unix_socket': '/tmp/mysql.sock'} +@@ -138,22 +139,15 @@ To override individual configuration items, append --return_kwargs '{"key:": "va + salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' - attrs = {'host': 'host', - 'user': 'user', -@@ -207,7 +210,8 @@ def _get_options(ret=None): - 'port': 'port', - 'ssl_ca': 'ssl_ca', - 'ssl_cert': 'ssl_cert', -- 'ssl_key': 'ssl_key'} -+ 'ssl_key': 'ssl_key', -+ 'unix_socket': 'unix_socket'} + """ +-from __future__ import absolute_import, print_function, unicode_literals - _options = salt.returners.get_returner_options(__virtualname__, - ret, -@@ -261,7 +265,8 @@ def _get_serv(ret=None, commit=False): - passwd=_options.get('pass'), - db=_options.get('db'), - port=_options.get('port'), -- ssl=ssl_options) -+ ssl=ssl_options, -+ unix_socket=_options.get('unix_socket')) + import logging + import sys +- +-# Import python libs + from contextlib import contextmanager + + import salt.exceptions +- +-# Import salt libs + import salt.returners + import salt.utils.jid + import salt.utils.json +- +-# Import 3rd-party libs + from salt.ext import six + + # Let's not allow PyLint complain about string substitution +@@ -205,6 +199,7 @@ def _get_options(ret=None): + "ssl_ca": None, + "ssl_cert": None, + "ssl_key": None, ++ "unix_socket": "/tmp/mysql.sock", + } + + attrs = { +@@ -216,6 +211,7 @@ def _get_options(ret=None): + "ssl_ca": "ssl_ca", + "ssl_cert": "ssl_cert", + "ssl_key": "ssl_key", ++ "unix_socket": "unix_socket", + } + + _options = salt.returners.get_returner_options( +@@ -227,8 +223,8 @@ def _get_options(ret=None): + defaults=defaults, + ) + # post processing +- for k, v in six.iteritems(_options): +- if isinstance(v, six.string_types) and v.lower() == "none": ++ for k, v in _options.items(): ++ if isinstance(v, str) and v.lower() == "none": + # Ensure 'None' is rendered as None + _options[k] = None + if k == "port": +@@ -274,6 +270,7 @@ def _get_serv(ret=None, commit=False): + db=_options.get("db"), + port=_options.get("port"), + ssl=ssl_options, ++ unix_socket=_options.get("unix_socket"), + ) try: - __context__['mysql_returner_conn'] = conn +@@ -291,9 +288,9 @@ def _get_serv(ret=None, commit=False): + yield cursor + except MySQLdb.DatabaseError as err: + error = err.args +- sys.stderr.write(six.text_type(error)) ++ sys.stderr.write(str(error)) + cursor.execute("ROLLBACK") +- six.reraise(*sys.exc_info()) ++ raise + else: + if commit: + cursor.execute("COMMIT") +@@ -515,8 +512,8 @@ def _purge_jobs(timestamp): + log.error( + "mysql returner archiver was unable to delete contents of table 'jids'" + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) + + try: + sql = "delete from `salt_returns` where alter_time < %s" +@@ -526,8 +523,8 @@ def _purge_jobs(timestamp): + log.error( + "mysql returner archiver was unable to delete contents of table 'salt_returns'" + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) + + try: + sql = "delete from `salt_events` where alter_time < %s" +@@ -537,8 +534,8 @@ def _purge_jobs(timestamp): + log.error( + "mysql returner archiver was unable to delete contents of table 'salt_events'" + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) + + return True + +@@ -556,7 +553,7 @@ def _archive_jobs(timestamp): + for table_name in source_tables: + try: + tmp_table_name = table_name + "_archive" +- sql = "create table if not exists {0} like {1}".format( ++ sql = "create table if not exists {} like {}".format( + tmp_table_name, table_name + ) + cur.execute(sql) +@@ -566,11 +563,11 @@ def _archive_jobs(timestamp): + log.error( + "mysql returner archiver was unable to create the archive tables." + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) + + try: +- sql = "insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)".format( ++ sql = "insert into `{}` select * from `{}` where jid in (select distinct jid from salt_returns where alter_time < %s)".format( + target_tables["jids"], "jids" + ) + cur.execute(sql, (timestamp,)) +@@ -579,14 +576,14 @@ def _archive_jobs(timestamp): + log.error( + "mysql returner archiver was unable to copy contents of table 'jids'" + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) + except Exception as e: # pylint: disable=broad-except + log.error(e) + raise + + try: +- sql = "insert into `{0}` select * from `{1}` where alter_time < %s".format( ++ sql = "insert into `{}` select * from `{}` where alter_time < %s".format( + target_tables["salt_returns"], "salt_returns" + ) + cur.execute(sql, (timestamp,)) +@@ -595,11 +592,11 @@ def _archive_jobs(timestamp): + log.error( + "mysql returner archiver was unable to copy contents of table 'salt_returns'" + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) + + try: +- sql = "insert into `{0}` select * from `{1}` where alter_time < %s".format( ++ sql = "insert into `{}` select * from `{}` where alter_time < %s".format( + target_tables["salt_events"], "salt_events" + ) + cur.execute(sql, (timestamp,)) +@@ -608,8 +605,8 @@ def _archive_jobs(timestamp): + log.error( + "mysql returner archiver was unable to copy contents of table 'salt_events'" + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) + + return _purge_jobs(timestamp) + +@@ -623,7 +620,7 @@ def clean_old_jobs(): + if __opts__.get("keep_jobs", False) and int(__opts__.get("keep_jobs", 0)) > 0: + try: + with _get_serv() as cur: +- sql = "select date_sub(now(), interval {0} hour) as stamp;".format( ++ sql = "select date_sub(now(), interval {} hour) as stamp;".format( + __opts__["keep_jobs"] + ) + cur.execute(sql) +@@ -638,5 +635,5 @@ def clean_old_jobs(): + log.error( + "Mysql returner was unable to get timestamp for purge/archive of jobs" + ) +- log.error(six.text_type(e)) +- raise salt.exceptions.SaltRunnerError(six.text_type(e)) ++ log.error(str(e)) ++ raise salt.exceptions.SaltRunnerError(str(e)) -- -2.16.4 +2.29.2 diff --git a/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch b/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch index 000cd9d..fafe014 100644 --- a/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch +++ b/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch @@ -1,4 +1,4 @@ -From 173444cecc1e7b4867570f1f8764db1b7f82061e Mon Sep 17 00:00:00 2001 +From 1cea7d065d8da7c713af8136162c21187d5186f5 Mon Sep 17 00:00:00 2001 From: Cedric Bosdonnat Date: Wed, 14 Oct 2020 12:39:16 +0200 Subject: [PATCH] Ensure virt.update stop_on_reboot is updated with its @@ -14,22 +14,22 @@ this value. 2 files changed, 3 insertions(+) diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index 87ab7ca12d..9bc7bc6093 100644 +index 8e2180608a..e3960a5a90 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py -@@ -2742,6 +2742,7 @@ def update( +@@ -2738,6 +2738,7 @@ def update( ] - data = {k: v for k, v in six.iteritems(locals()) if bool(v)} + data = {k: v for k, v in locals().items() if bool(v)} + data["stop_on_reboot"] = stop_on_reboot if boot_dev: data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} - need_update = salt.utils.xmlutil.change_xml( + need_update = ( diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index ca5e80d2d2..fbc03cf7a6 100644 +index fba821ea53..83152eda6e 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py -@@ -1778,6 +1778,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -1777,6 +1777,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): 1048576 1048576 1 @@ -37,7 +37,7 @@ index ca5e80d2d2..fbc03cf7a6 100644 hvm -@@ -2350,6 +2351,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): +@@ -2349,6 +2350,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): 1048576 1048576 1 @@ -46,6 +46,6 @@ index ca5e80d2d2..fbc03cf7a6 100644 hvm -- -2.28.0 +2.29.2 diff --git a/fall-back-to-pymysql.patch b/fall-back-to-pymysql.patch index 4f3c55b..e3e0e9f 100644 --- a/fall-back-to-pymysql.patch +++ b/fall-back-to-pymysql.patch @@ -1,4 +1,4 @@ -From f0098b4b9e5abaaca7bbc6c17f5a60bb2129dda5 Mon Sep 17 00:00:00 2001 +From 188a97fc20c3e24950b82dc6fcd0da878509cf7a Mon Sep 17 00:00:00 2001 From: Maximilian Meister Date: Thu, 5 Apr 2018 13:23:23 +0200 Subject: [PATCH] fall back to PyMySQL @@ -11,10 +11,10 @@ Signed-off-by: Maximilian Meister 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/mysql.py b/salt/modules/mysql.py -index 87e2361e28..e785e5219c 100644 +index fdfe35158a..385e4d92a3 100644 --- a/salt/modules/mysql.py +++ b/salt/modules/mysql.py -@@ -58,7 +58,7 @@ try: +@@ -55,7 +55,7 @@ try: import MySQLdb.cursors import MySQLdb.converters from MySQLdb.constants import FIELD_TYPE, FLAG @@ -23,7 +23,7 @@ index 87e2361e28..e785e5219c 100644 except ImportError: try: # MySQLdb import failed, try to import PyMySQL -@@ -68,7 +68,7 @@ except ImportError: +@@ -66,7 +66,7 @@ except ImportError: import MySQLdb.cursors import MySQLdb.converters from MySQLdb.constants import FIELD_TYPE, FLAG @@ -33,6 +33,6 @@ index 87e2361e28..e785e5219c 100644 MySQLdb = None -- -2.16.4 +2.29.2 diff --git a/fix-__mount_device-wrapper-254.patch b/fix-__mount_device-wrapper-254.patch index c374831..d0a191d 100644 --- a/fix-__mount_device-wrapper-254.patch +++ b/fix-__mount_device-wrapper-254.patch @@ -1,4 +1,4 @@ -From 7ad2d6067400f55dc7b70745216fab20620f35fd Mon Sep 17 00:00:00 2001 +From 1e00e2b72321b5312efb7b8b426a037c8db72b79 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Wed, 29 Jul 2020 16:11:47 +0200 Subject: [PATCH] Fix __mount_device wrapper (#254) @@ -17,9 +17,9 @@ Fix #58012 (cherry picked from commit 2089645e2478751dc795127cfd14d0385c2e0899) --- changelog/58012.fixed | 1 + - salt/states/btrfs.py | 6 +++--- + salt/states/btrfs.py | 4 ++-- tests/unit/states/test_btrfs.py | 27 +++++++++++++++++++++++++++ - 3 files changed, 31 insertions(+), 3 deletions(-) + 3 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 changelog/58012.fixed diff --git a/changelog/58012.fixed b/changelog/58012.fixed @@ -31,27 +31,25 @@ index 0000000000..13a1ef747d +Fix btrfs state decorator, that produces exceptions when creating subvolumes. \ No newline at end of file diff --git a/salt/states/btrfs.py b/salt/states/btrfs.py -index af78c8ae00..d0d6095c46 100644 +index 1374bbffb4..62a3553758 100644 --- a/salt/states/btrfs.py +++ b/salt/states/btrfs.py -@@ -103,9 +103,9 @@ def __mount_device(action): - ''' +@@ -103,8 +103,8 @@ def __mount_device(action): + @functools.wraps(action) def wrapper(*args, **kwargs): -- name = kwargs['name'] -- device = kwargs['device'] -- use_default = kwargs.get('use_default', False) +- name = kwargs["name"] +- device = kwargs["device"] + name = kwargs.get("name", args[0] if args else None) + device = kwargs.get("device", args[1] if len(args) > 1 else None) -+ use_default = kwargs.get("use_default", False) + use_default = kwargs.get("use_default", False) ret = { - 'name': name, diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py -index c68f6279dc..c722630aef 100644 +index b8f70bccfe..dceb971aa1 100644 --- a/tests/unit/states/test_btrfs.py +++ b/tests/unit/states/test_btrfs.py -@@ -245,6 +245,33 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin): +@@ -231,6 +231,33 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin): mount.assert_called_once() umount.assert_called_once() @@ -82,10 +80,10 @@ index c68f6279dc..c722630aef 100644 + mount.assert_called_once() + umount.assert_called_once() + - @patch('salt.states.btrfs._umount') - @patch('salt.states.btrfs._mount') + @patch("salt.states.btrfs._umount") + @patch("salt.states.btrfs._mount") def test_subvolume_created_exists_test(self, mount, umount): -- -2.27.0 +2.29.2 diff --git a/fix-a-test-and-some-variable-names-229.patch b/fix-a-test-and-some-variable-names-229.patch index 7d20047..74670da 100644 --- a/fix-a-test-and-some-variable-names-229.patch +++ b/fix-a-test-and-some-variable-names-229.patch @@ -1,4 +1,4 @@ -From c1e66b9953c753dc9eff3652aef316e19c22deb4 Mon Sep 17 00:00:00 2001 +From daf29460408a5e0eb042b3c234c7e21a6b994cf1 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 12 May 2020 14:16:23 +0200 Subject: [PATCH] Fix a test and some variable names (#229) @@ -7,60 +7,23 @@ Subject: [PATCH] Fix a test and some variable names (#229) * Fix test_core tests for fqdns errors --- - salt/modules/network.py | 2 +- - tests/unit/grains/test_core.py | 24 +++++++++++++----------- - 2 files changed, 14 insertions(+), 12 deletions(-) + tests/unit/grains/test_core.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/salt/modules/network.py b/salt/modules/network.py -index 880f4f8d5f..9e11eb816e 100644 ---- a/salt/modules/network.py -+++ b/salt/modules/network.py -@@ -1946,4 +1946,4 @@ def fqdns(): - elapsed = time.time() - start - log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed)) - -- return {"fqdns": sorted(list(fqdns))} -\ No newline at end of file -+ return {"fqdns": sorted(list(fqdns))} diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 94e4199814..36aa49f232 100644 +index 196dbcf83d..918a9155cb 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -1122,20 +1122,22 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - - for errno in (0, core.HOST_NOT_FOUND, core.NO_DATA): - mock_log = MagicMock() -+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}): -+ with patch.object(socket, 'gethostbyaddr', -+ side_effect=_gen_gethostbyaddr(errno)): -+ with patch('salt.modules.network.log', mock_log): -+ self.assertEqual(core.fqdns(), {'fqdns': []}) -+ mock_log.debug.assert_called() -+ mock_log.error.assert_not_called() -+ -+ mock_log = MagicMock() -+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}): - with patch.object(socket, 'gethostbyaddr', -- side_effect=_gen_gethostbyaddr(errno)): -- with patch('salt.grains.core.log', mock_log): -+ side_effect=_gen_gethostbyaddr(-1)): -+ with patch('salt.modules.network.log', mock_log): - self.assertEqual(core.fqdns(), {'fqdns': []}) +@@ -1416,7 +1416,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + with patch("salt.modules.network.log", mock_log): + self.assertEqual(core.fqdns(), {"fqdns": []}) mock_log.debug.assert_called_once() -- mock_log.error.assert_not_called() -- -- mock_log = MagicMock() -- with patch.object(socket, 'gethostbyaddr', -- side_effect=_gen_gethostbyaddr(-1)): -- with patch('salt.grains.core.log', mock_log): -- self.assertEqual(core.fqdns(), {'fqdns': []}) -- mock_log.debug.assert_not_called() -- mock_log.error.assert_called_once() +- mock_log.error.assert_called() + mock_log.error.assert_called_once() - @patch.object(salt.utils.platform, 'is_windows', MagicMock(return_value=False)) - @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8'])) + @patch.object(salt.utils.platform, "is_windows", MagicMock(return_value=False)) + @patch( -- -2.26.2 +2.29.2 diff --git a/fix-a-wrong-rebase-in-test_core.py-180.patch b/fix-a-wrong-rebase-in-test_core.py-180.patch index c795da1..309c53e 100644 --- a/fix-a-wrong-rebase-in-test_core.py-180.patch +++ b/fix-a-wrong-rebase-in-test_core.py-180.patch @@ -1,4 +1,4 @@ -From 67830ea17ae1e87a6bffca2a9542788c200d7dd9 Mon Sep 17 00:00:00 2001 +From 3d5f3cff6b43d7aba35063e970d016401bb82921 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Fri, 25 Oct 2019 15:43:16 +0200 Subject: [PATCH] Fix a wrong rebase in test_core.py (#180) @@ -17,89 +17,128 @@ This patch ignore this kind of issue during the grains creation. (cherry picked from commit bd0213bae00b737b24795bec3c030ebfe476e0d8) --- - salt/grains/core.py | 4 ++-- - tests/unit/grains/test_core.py | 45 ------------------------------------------ - 2 files changed, 2 insertions(+), 47 deletions(-) + salt/grains/core.py | 8 +++- + tests/unit/grains/test_core.py | 80 ---------------------------------- + 2 files changed, 6 insertions(+), 82 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 68c43482d3..20950988d9 100644 +index a2983e388b..5dff6ecfd4 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -1000,7 +1000,7 @@ def _virtual(osdata): +@@ -1066,7 +1066,9 @@ def _virtual(osdata): except UnicodeDecodeError: # Some firmwares provide non-valid 'product_name' # files, ignore them - pass -+ log.debug('The content in /sys/devices/virtual/dmi/id/product_name is not valid') - except IOError: ++ log.debug( ++ "The content in /sys/devices/virtual/dmi/id/product_name is not valid" ++ ) + except OSError: pass - elif osdata['kernel'] == 'FreeBSD': -@@ -2568,7 +2568,7 @@ def _hw_data(osdata): + elif osdata["kernel"] == "FreeBSD": +@@ -2716,7 +2718,9 @@ def _hw_data(osdata): except UnicodeDecodeError: # Some firmwares provide non-valid 'product_name' # files, ignore them - pass -+ log.debug('The content in /sys/devices/virtual/dmi/id/product_name is not valid') - except (IOError, OSError) as err: ++ log.debug( ++ "The content in /sys/devices/virtual/dmi/id/product_name is not valid" ++ ) + except OSError as err: # PermissionError is new to Python 3, but corresponds to the EACESS and # EPERM error numbers. Use those instead here for PY2 compatibility. diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 33d6a9507f..7fa2436e58 100644 +index 0dc3423646..85d434dd9d 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -1560,51 +1560,6 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - assert all([x is not None for x in info]) - assert all([isinstance(x, int) for x in info]) +@@ -2047,86 +2047,6 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + result = core.path() + assert result == {"path": path, "systempath": comps}, result -- @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') +- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") +- @patch("os.path.exists") +- @patch("salt.utils.platform.is_proxy") - def test_kernelparams_return(self): - expectations = [ -- ('BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64', -- {'kernelparams': [('BOOT_IMAGE', '/vmlinuz-3.10.0-693.2.2.el7.x86_64')]}), -- ('root=/dev/mapper/centos_daemon-root', -- {'kernelparams': [('root', '/dev/mapper/centos_daemon-root')]}), -- ('rhgb quiet ro', -- {'kernelparams': [('rhgb', None), ('quiet', None), ('ro', None)]}), -- ('param="value1"', -- {'kernelparams': [('param', 'value1')]}), -- ('param="value1 value2 value3"', -- {'kernelparams': [('param', 'value1 value2 value3')]}), -- ('param="value1 value2 value3" LANG="pl" ro', -- {'kernelparams': [('param', 'value1 value2 value3'), ('LANG', 'pl'), ('ro', None)]}), -- ('ipv6.disable=1', -- {'kernelparams': [('ipv6.disable', '1')]}), -- ('param="value1:value2:value3"', -- {'kernelparams': [('param', 'value1:value2:value3')]}), -- ('param="value1,value2,value3"', -- {'kernelparams': [('param', 'value1,value2,value3')]}), -- ('param="value1" param="value2" param="value3"', -- {'kernelparams': [('param', 'value1'), ('param', 'value2'), ('param', 'value3')]}), +- ( +- "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64", +- { +- "kernelparams": [ +- ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64") +- ] +- }, +- ), +- ( +- "root=/dev/mapper/centos_daemon-root", +- {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]}, +- ), +- ( +- "rhgb quiet ro", +- {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]}, +- ), +- ('param="value1"', {"kernelparams": [("param", "value1")]}), +- ( +- 'param="value1 value2 value3"', +- {"kernelparams": [("param", "value1 value2 value3")]}, +- ), +- ( +- 'param="value1 value2 value3" LANG="pl" ro', +- { +- "kernelparams": [ +- ("param", "value1 value2 value3"), +- ("LANG", "pl"), +- ("ro", None), +- ] +- }, +- ), +- ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}), +- ( +- 'param="value1:value2:value3"', +- {"kernelparams": [("param", "value1:value2:value3")]}, +- ), +- ( +- 'param="value1,value2,value3"', +- {"kernelparams": [("param", "value1,value2,value3")]}, +- ), +- ( +- 'param="value1" param="value2" param="value3"', +- { +- "kernelparams": [ +- ("param", "value1"), +- ("param", "value2"), +- ("param", "value3"), +- ] +- }, +- ), - ] - - for cmdline, expectation in expectations: -- with patch('salt.utils.files.fopen', mock_open(read_data=cmdline)): +- with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)): - self.assertEqual(core.kernelparams(), expectation) - -- @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') -- @patch('os.path.exists') -- @patch('salt.utils.platform.is_proxy') +- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") +- @patch("os.path.exists") +- @patch("salt.utils.platform.is_proxy") - def test__hw_data_linux_empty(self, is_proxy, exists): - is_proxy.return_value = False - exists.return_value = True -- with patch('salt.utils.files.fopen', mock_open(read_data='')): -- self.assertEqual(core._hw_data({'kernel': 'Linux'}), { -- 'biosreleasedate': '', -- 'biosversion': '', -- 'manufacturer': '', -- 'productname': '', -- 'serialnumber': '', -- 'uuid': '' -- }) +- with patch("salt.utils.files.fopen", mock_open(read_data="")): +- self.assertEqual( +- core._hw_data({"kernel": "Linux"}), +- { +- "biosreleasedate": "", +- "biosversion": "", +- "manufacturer": "", +- "productname": "", +- "serialnumber": "", +- "uuid": "", +- }, +- ) - - @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') - @skipIf(six.PY2, 'UnicodeDecodeError is throw in Python 3') - @patch('os.path.exists') + @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") + @skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3") + @patch("os.path.exists") -- -2.16.4 +2.29.2 diff --git a/fix-aptpkg-systemd-call-bsc-1143301.patch b/fix-aptpkg-systemd-call-bsc-1143301.patch index 8ef7ab3..0890e7f 100644 --- a/fix-aptpkg-systemd-call-bsc-1143301.patch +++ b/fix-aptpkg-systemd-call-bsc-1143301.patch @@ -1,40 +1,41 @@ -From c2989e749f04aa8477130df649e550f5349a9a1f Mon Sep 17 00:00:00 2001 +From 5dadda6822323f409c99112244c2c809e58126e1 Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Wed, 31 Jul 2019 15:29:03 +0200 Subject: [PATCH] Fix aptpkg systemd call (bsc#1143301) --- salt/modules/aptpkg.py | 2 +- - tests/unit/modules/test_aptpkg.py | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) + tests/unit/modules/test_aptpkg.py | 3 +-- + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 13484c96bc..a5b039fc79 100644 +index bf90d0614f..c47ee852f4 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -168,7 +168,7 @@ def _call_apt(args, scope=True, **kwargs): - ''' - cmd = [] - if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True): -- cmd.extend(['systemd-run', '--scope']) -+ cmd.extend(['systemd-run', '--scope', '--description "{0}"'.format(__name__)]) +@@ -160,7 +160,7 @@ def _call_apt(args, scope=True, **kwargs): + and salt.utils.systemd.has_scope(__context__) + and __salt__["config.get"]("systemd.scope", True) + ): +- cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)]) ++ cmd.extend(["systemd-run", "--scope", '--description "{}"'.format(__name__)]) cmd.extend(args) - params = {'output_loglevel': 'trace', + params = { diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py -index 10e960f090..88eed062c4 100644 +index 77d8b84896..c3769a7df1 100644 --- a/tests/unit/modules/test_aptpkg.py +++ b/tests/unit/modules/test_aptpkg.py -@@ -645,7 +645,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict(aptpkg.__salt__, {'cmd.run_all': MagicMock(), 'config.get': MagicMock(return_value=True)}): - aptpkg._call_apt(['apt-get', 'purge', 'vim']) # pylint: disable=W0106 - aptpkg.__salt__['cmd.run_all'].assert_called_once_with( -- ['systemd-run', '--scope', 'apt-get', 'purge', 'vim'], env={}, -+ ['systemd-run', '--scope', '--description "salt.modules.aptpkg"', 'apt-get', 'purge', 'vim'], env={}, - output_loglevel='trace', python_shell=False) - - def test_call_apt_with_kwargs(self): +@@ -896,8 +896,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin): + [ + "systemd-run", + "--scope", +- "--description", +- '"salt.modules.aptpkg"', ++ '--description "salt.modules.aptpkg"', + "apt-get", + "purge", + "vim", -- -2.16.4 +2.29.2 diff --git a/fix-async-batch-multiple-done-events.patch b/fix-async-batch-multiple-done-events.patch index 633c571..d655138 100644 --- a/fix-async-batch-multiple-done-events.patch +++ b/fix-async-batch-multiple-done-events.patch @@ -1,55 +1,57 @@ -From 42d7e1de2c69d82447e73eab483e5d3c299d55f7 Mon Sep 17 00:00:00 2001 +From 85b8666b138cab170327f0217c799277371b2e80 Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Tue, 7 May 2019 12:24:35 +0200 Subject: [PATCH] Fix async-batch multiple done events --- - salt/cli/batch_async.py | 17 ++++++++++++----- + salt/cli/batch_async.py | 19 ++++++++++++------- tests/unit/cli/test_batch_async.py | 20 +++++++++++++------- - 2 files changed, 25 insertions(+), 12 deletions(-) + 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 9c20b2fc6e..8c8f481e34 100644 +index b0ab9d9f47..7225491228 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -84,6 +84,7 @@ class BatchAsync(object): - listen=True, +@@ -86,6 +86,7 @@ class BatchAsync: io_loop=ioloop, - keep_loop=True) + keep_loop=True, + ) + self.scheduled = False def __set_event_handler(self): - ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid) -@@ -116,8 +117,7 @@ class BatchAsync(object): + ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) +@@ -118,10 +119,7 @@ class BatchAsync: if minion in self.active: self.active.remove(minion) self.done_minions.add(minion) - # call later so that we maybe gather more returns -- self.event.io_loop.call_later(self.batch_delay, self.schedule_next) +- self.event.io_loop.call_later( +- self.batch_delay, self.schedule_next +- ) + self.schedule_next() def _get_next(self): - to_run = self.minions.difference( -@@ -137,7 +137,7 @@ class BatchAsync(object): - self.active = self.active.difference(self.timedout_minions) - running = batch_minions.difference(self.done_minions).difference(self.timedout_minions) + to_run = ( +@@ -146,7 +144,7 @@ class BatchAsync: + self.timedout_minions + ) if timedout_minions: - self.event.io_loop.call_later(self.batch_delay, self.schedule_next) + self.schedule_next() if running: self.event.io_loop.add_callback(self.find_job, running) -@@ -189,7 +189,7 @@ class BatchAsync(object): - "metadata": self.metadata +@@ -197,7 +195,7 @@ class BatchAsync: + "metadata": self.metadata, } - self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid)) + self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid)) - yield self.schedule_next() + yield self.run_next() def end_batch(self): - left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions)) -@@ -204,8 +204,14 @@ class BatchAsync(object): - self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid)) + left = self.minions.symmetric_difference( +@@ -214,8 +212,14 @@ class BatchAsync: + self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) self.event.remove_event_handler(self.__event_handler) - @tornado.gen.coroutine @@ -64,16 +66,16 @@ index 9c20b2fc6e..8c8f481e34 100644 next_batch = self._get_next() if next_batch: self.active = self.active.union(next_batch) -@@ -225,3 +231,4 @@ class BatchAsync(object): +@@ -238,3 +242,4 @@ class BatchAsync: self.active = self.active.difference(next_batch) else: self.end_batch() + self.scheduled = False diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index d519157d92..441f9c58b9 100644 +index d6a4bfcf60..66332a548a 100644 --- a/tests/unit/cli/test_batch_async.py +++ b/tests/unit/cli/test_batch_async.py -@@ -111,14 +111,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -105,14 +105,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): @tornado.testing.gen_test def test_start_batch_calls_next(self): @@ -90,27 +92,27 @@ index d519157d92..441f9c58b9 100644 + self.assertEqual(len(self.batch.run_next.mock_calls), 1) def test_batch_fire_done_event(self): - self.batch.targeted_minions = {'foo', 'baz', 'bar'} -@@ -154,7 +154,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.batch.targeted_minions = {"foo", "baz", "bar"} +@@ -147,7 +147,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): future = tornado.gen.Future() - future.set_result({'minions': ['foo', 'bar']}) + future.set_result({"minions": ["foo", "bar"]}) self.batch.local.run_job_async.return_value = future - ret = self.batch.schedule_next().result() + ret = self.batch.run_next().result() self.assertEqual( self.batch.local.run_job_async.call_args[0], - ({'foo', 'bar'}, 'my.fun', [], 'list') -@@ -253,7 +253,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.done_minions, {'foo'}) + ({"foo", "bar"}, "my.fun", [], "list"), +@@ -250,7 +250,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.assertEqual(self.batch.done_minions, {"foo"}) self.assertEqual( self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.batch_delay, self.batch.schedule_next)) -+ (self.batch.batch_delay, self.batch.run_next)) +- (self.batch.batch_delay, self.batch.schedule_next), ++ (self.batch.batch_delay, self.batch.run_next), + ) def test_batch__event_handler_find_job_return(self): - self.batch.event = MagicMock( -@@ -263,10 +263,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.find_job_returned, {'foo'}) +@@ -262,10 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.assertEqual(self.batch.find_job_returned, {"foo"}) @tornado.testing.gen_test - def test_batch_schedule_next_end_batch_when_no_next(self): @@ -122,9 +124,9 @@ index d519157d92..441f9c58b9 100644 self.assertEqual(len(self.batch.end_batch.mock_calls), 1) @tornado.testing.gen_test -@@ -342,3 +342,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -345,3 +345,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.batch.event.io_loop.add_callback.call_args[0], - (self.batch.find_job, {'foo'}) + (self.batch.find_job, {"foo"}), ) + + def test_only_on_run_next_is_scheduled(self): @@ -133,6 +135,6 @@ index d519157d92..441f9c58b9 100644 + self.batch.schedule_next() + self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0) -- -2.16.4 +2.29.2 diff --git a/fix-async-batch-race-conditions.patch b/fix-async-batch-race-conditions.patch index 6a114d2..f7ced1f 100644 --- a/fix-async-batch-race-conditions.patch +++ b/fix-async-batch-race-conditions.patch @@ -1,19 +1,19 @@ -From dc001cb47fd88a8e8a1bd82a1457325822d1220b Mon Sep 17 00:00:00 2001 +From 4b3badeb52a9de10d6085ee3cc7598a827d1e68f Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Thu, 11 Apr 2019 15:57:59 +0200 Subject: [PATCH] Fix async batch race conditions Close batching when there is no next batch --- - salt/cli/batch_async.py | 80 +++++++++++++++++++------------------- - tests/unit/cli/test_batch_async.py | 35 +++++++---------- - 2 files changed, 54 insertions(+), 61 deletions(-) + salt/cli/batch_async.py | 96 +++++++++++++++--------------- + tests/unit/cli/test_batch_async.py | 38 +++++------- + 2 files changed, 62 insertions(+), 72 deletions(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 3160d46d8b..9c20b2fc6e 100644 +index 1557e5105b..b0ab9d9f47 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -37,14 +37,14 @@ class BatchAsync(object): +@@ -32,14 +32,14 @@ class BatchAsync: - tag: salt/batch//start - data: { "available_minions": self.minions, @@ -30,36 +30,38 @@ index 3160d46d8b..9c20b2fc6e 100644 "done_minions": self.done_minions, "timedout_minions": self.timedout_minions } -@@ -67,7 +67,7 @@ class BatchAsync(object): - self.eauth = batch_get_eauth(clear_load['kwargs']) - self.metadata = clear_load['kwargs'].get('metadata', {}) +@@ -68,7 +68,7 @@ class BatchAsync: + self.eauth = batch_get_eauth(clear_load["kwargs"]) + self.metadata = clear_load["kwargs"].get("metadata", {}) self.minions = set() - self.down_minions = set() + self.targeted_minions = set() self.timedout_minions = set() self.done_minions = set() self.active = set() -@@ -108,8 +108,7 @@ class BatchAsync(object): - minion = data['id'] - if op == 'ping_return': +@@ -110,8 +110,7 @@ class BatchAsync: + minion = data["id"] + if op == "ping_return": self.minions.add(minion) - self.down_minions.remove(minion) - if not self.down_minions: + if self.targeted_minions == self.minions: self.event.io_loop.spawn_callback(self.start_batch) - elif op == 'find_job_return': + elif op == "find_job_return": self.find_job_returned.add(minion) -@@ -120,9 +119,6 @@ class BatchAsync(object): - # call later so that we maybe gather more returns - self.event.io_loop.call_later(self.batch_delay, self.schedule_next) +@@ -124,11 +123,6 @@ class BatchAsync: + self.batch_delay, self.schedule_next + ) -- if self.initialized and self.done_minions == self.minions.difference(self.timedout_minions): +- if self.initialized and self.done_minions == self.minions.difference( +- self.timedout_minions +- ): - self.end_batch() - def _get_next(self): - to_run = self.minions.difference( - self.done_minions).difference( -@@ -135,16 +131,13 @@ class BatchAsync(object): + to_run = ( + self.minions.difference(self.done_minions) +@@ -142,20 +136,17 @@ class BatchAsync: return set(list(to_run)[:next_batch_size]) @tornado.gen.coroutine @@ -72,35 +74,42 @@ index 3160d46d8b..9c20b2fc6e 100644 - if minion in self.active: - self.active.remove(minion) - self.timedout_minions.add(minion) -- running = minions.difference(did_not_return).difference(self.done_minions).difference(self.timedout_minions) +- running = ( +- minions.difference(did_not_return) +- .difference(self.done_minions) +- .difference(self.timedout_minions) + def check_find_job(self, batch_minions): -+ timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions) ++ timedout_minions = batch_minions.difference(self.find_job_returned).difference( ++ self.done_minions + ) + self.timedout_minions = self.timedout_minions.union(timedout_minions) + self.active = self.active.difference(self.timedout_minions) -+ running = batch_minions.difference(self.done_minions).difference(self.timedout_minions) ++ running = batch_minions.difference(self.done_minions).difference( ++ self.timedout_minions ++ ) + if timedout_minions: + self.event.io_loop.call_later(self.batch_delay, self.schedule_next) if running: self.event.io_loop.add_callback(self.find_job, running) -@@ -183,7 +176,7 @@ class BatchAsync(object): - jid=self.ping_jid, +@@ -193,7 +184,7 @@ class BatchAsync: metadata=self.metadata, - **self.eauth) -- self.down_minions = set(ping_return['minions']) -+ self.targeted_minions = set(ping_return['minions']) + **self.eauth + ) +- self.down_minions = set(ping_return["minions"]) ++ self.targeted_minions = set(ping_return["minions"]) @tornado.gen.coroutine def start_batch(self): -@@ -192,36 +185,43 @@ class BatchAsync(object): +@@ -202,39 +193,48 @@ class BatchAsync: self.initialized = True data = { "available_minions": self.minions, - "down_minions": self.down_minions, + "down_minions": self.targeted_minions.difference(self.minions), - "metadata": self.metadata + "metadata": self.metadata, } - self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid)) + self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid)) yield self.schedule_next() def end_batch(self): @@ -109,20 +118,22 @@ index 3160d46d8b..9c20b2fc6e 100644 - "down_minions": self.down_minions, - "done_minions": self.done_minions, - "timedout_minions": self.timedout_minions, -- "metadata": self.metadata +- "metadata": self.metadata, - } -- self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid)) +- self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) - self.event.remove_event_handler(self.__event_handler) -+ left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions)) ++ left = self.minions.symmetric_difference( ++ self.done_minions.union(self.timedout_minions) ++ ) + if not left: + data = { + "available_minions": self.minions, + "down_minions": self.targeted_minions.difference(self.minions), + "done_minions": self.done_minions, + "timedout_minions": self.timedout_minions, -+ "metadata": self.metadata ++ "metadata": self.metadata, + } -+ self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid)) ++ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) + self.event.remove_event_handler(self.__event_handler) @tornado.gen.coroutine @@ -131,116 +142,125 @@ index 3160d46d8b..9c20b2fc6e 100644 if next_batch: - yield self.local.run_job_async( - next_batch, -- self.opts['fun'], -- self.opts['arg'], -- 'list', -- raw=self.opts.get('raw', False), -- ret=self.opts.get('return', ''), -- gather_job_timeout=self.opts['gather_job_timeout'], +- self.opts["fun"], +- self.opts["arg"], +- "list", +- raw=self.opts.get("raw", False), +- ret=self.opts.get("return", ""), +- gather_job_timeout=self.opts["gather_job_timeout"], - jid=self.batch_jid, -- metadata=self.metadata) -- self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch)) +- metadata=self.metadata, +- ) +- self.event.io_loop.call_later( +- self.opts["timeout"], self.find_job, set(next_batch) +- ) self.active = self.active.union(next_batch) + try: + yield self.local.run_job_async( + next_batch, -+ self.opts['fun'], -+ self.opts['arg'], -+ 'list', -+ raw=self.opts.get('raw', False), -+ ret=self.opts.get('return', ''), -+ gather_job_timeout=self.opts['gather_job_timeout'], ++ self.opts["fun"], ++ self.opts["arg"], ++ "list", ++ raw=self.opts.get("raw", False), ++ ret=self.opts.get("return", ""), ++ gather_job_timeout=self.opts["gather_job_timeout"], + jid=self.batch_jid, -+ metadata=self.metadata) -+ self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch)) ++ metadata=self.metadata, ++ ) ++ self.event.io_loop.call_later( ++ self.opts["timeout"], self.find_job, set(next_batch) ++ ) + except Exception as ex: + self.active = self.active.difference(next_batch) + else: + self.end_batch() diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index f65b6a06c3..d519157d92 100644 +index 3f8626a2dd..d6a4bfcf60 100644 --- a/tests/unit/cli/test_batch_async.py +++ b/tests/unit/cli/test_batch_async.py -@@ -75,8 +75,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.batch.local.run_job_async.call_args[0], - ('*', 'test.ping', [], 'glob') +@@ -68,8 +68,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.assertEqual( + self.batch.local.run_job_async.call_args[0], ("*", "test.ping", [], "glob") ) - # assert down_minions == all minions matched by tgt -- self.assertEqual(self.batch.down_minions, set(['foo', 'bar'])) +- self.assertEqual(self.batch.down_minions, {"foo", "bar"}) + # assert targeted_minions == all minions matched by tgt -+ self.assertEqual(self.batch.targeted_minions, set(['foo', 'bar'])) ++ self.assertEqual(self.batch.targeted_minions, {"foo", "bar"}) @tornado.testing.gen_test def test_batch_start_on_gather_job_timeout(self): -@@ -121,7 +121,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -115,7 +115,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.assertEqual(len(self.batch.schedule_next.mock_calls), 1) def test_batch_fire_done_event(self): -+ self.batch.targeted_minions = {'foo', 'baz', 'bar'} - self.batch.minions = set(['foo', 'bar']) -+ self.batch.done_minions = {'foo'} -+ self.batch.timedout_minions = {'bar'} ++ self.batch.targeted_minions = {"foo", "baz", "bar"} + self.batch.minions = {"foo", "bar"} ++ self.batch.done_minions = {"foo"} ++ self.batch.timedout_minions = {"bar"} self.batch.event = MagicMock() - self.batch.metadata = {'mykey': 'myvalue'} + self.batch.metadata = {"mykey": "myvalue"} self.batch.end_batch() -@@ -130,9 +133,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -124,9 +127,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): ( { - 'available_minions': set(['foo', 'bar']), -- 'done_minions': set(), -- 'down_minions': set(), -- 'timedout_minions': set(), -+ 'done_minions': self.batch.done_minions, -+ 'down_minions': {'baz'}, -+ 'timedout_minions': self.batch.timedout_minions, - 'metadata': self.batch.metadata + "available_minions": {"foo", "bar"}, +- "done_minions": set(), +- "down_minions": set(), +- "timedout_minions": set(), ++ "done_minions": self.batch.done_minions, ++ "down_minions": {"baz"}, ++ "timedout_minions": self.batch.timedout_minions, + "metadata": self.batch.metadata, }, - "salt/batch/1235/done" -@@ -212,7 +215,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + "salt/batch/1235/done", +@@ -205,7 +208,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.assertEqual(self.batch._get_next(), set()) def test_batch__event_handler_ping_return(self): -- self.batch.down_minions = {'foo'} -+ self.batch.targeted_minions = {'foo'} +- self.batch.down_minions = {"foo"} ++ self.batch.targeted_minions = {"foo"} self.batch.event = MagicMock( - unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 'foo'}))) - self.batch.start() -@@ -222,7 +225,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) + ) +@@ -216,7 +219,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.assertEqual(self.batch.done_minions, set()) def test_batch__event_handler_call_start_batch_when_all_pings_return(self): -- self.batch.down_minions = {'foo'} -+ self.batch.targeted_minions = {'foo'} +- self.batch.down_minions = {"foo"} ++ self.batch.targeted_minions = {"foo"} self.batch.event = MagicMock( - unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 'foo'}))) - self.batch.start() -@@ -232,7 +235,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - (self.batch.start_batch,)) + unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) + ) +@@ -228,7 +231,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + ) def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self): -- self.batch.down_minions = {'foo', 'bar'} -+ self.batch.targeted_minions = {'foo', 'bar'} +- self.batch.down_minions = {"foo", "bar"} ++ self.batch.targeted_minions = {"foo", "bar"} self.batch.event = MagicMock( - unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 'foo'}))) - self.batch.start() -@@ -260,20 +263,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.find_job_returned, {'foo'}) + unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) + ) +@@ -259,23 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.assertEqual(self.batch.find_job_returned, {"foo"}) @tornado.testing.gen_test - def test_batch__event_handler_end_batch(self): - self.batch.event = MagicMock( -- unpack=MagicMock(return_value=('salt/job/not-my-jid/ret/foo', {'id': 'foo'}))) +- unpack=MagicMock( +- return_value=("salt/job/not-my-jid/ret/foo", {"id": "foo"}) +- ) +- ) - future = tornado.gen.Future() -- future.set_result({'minions': ['foo', 'bar', 'baz']}) +- future.set_result({"minions": ["foo", "bar", "baz"]}) - self.batch.local.run_job_async.return_value = future - self.batch.start() - self.batch.initialized = True -- self.assertEqual(self.batch.down_minions, {'foo', 'bar', 'baz'}) +- self.assertEqual(self.batch.down_minions, {"foo", "bar", "baz"}) + def test_batch_schedule_next_end_batch_when_no_next(self): self.batch.end_batch = MagicMock() -- self.batch.minions = {'foo', 'bar', 'baz'} -- self.batch.done_minions = {'foo', 'bar'} -- self.batch.timedout_minions = {'baz'} +- self.batch.minions = {"foo", "bar", "baz"} +- self.batch.done_minions = {"foo", "bar"} +- self.batch.timedout_minions = {"baz"} - self.batch._BatchAsync__event_handler(MagicMock()) + self.batch._get_next = MagicMock(return_value={}) + self.batch.schedule_next() @@ -248,6 +268,6 @@ index f65b6a06c3..d519157d92 100644 @tornado.testing.gen_test -- -2.16.4 +2.29.2 diff --git a/fix-batch_async-obsolete-test.patch b/fix-batch_async-obsolete-test.patch index d6e4544..86d5f8e 100644 --- a/fix-batch_async-obsolete-test.patch +++ b/fix-batch_async-obsolete-test.patch @@ -1,4 +1,4 @@ -From 49780d409630fe18293a077e767aabfd183ff823 Mon Sep 17 00:00:00 2001 +From 5a83801b7733f09c35a7ff0abb5aa32d4c857e4b Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Tue, 3 Dec 2019 11:22:42 +0100 Subject: [PATCH] Fix batch_async obsolete test @@ -8,26 +8,25 @@ Subject: [PATCH] Fix batch_async obsolete test 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index 12dfe543bc..f1d36a81fb 100644 +index c18b42be57..b04965268a 100644 --- a/tests/unit/cli/test_batch_async.py +++ b/tests/unit/cli/test_batch_async.py -@@ -140,8 +140,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - "salt/batch/1235/done" - ) +@@ -134,7 +134,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + "salt/batch/1235/done", + ), ) +- self.assertEqual(len(self.batch.event.remove_event_handler.mock_calls), 1) + + def test_batch__del__(self): + batch = BatchAsync(MagicMock(), MagicMock(), MagicMock()) + event = MagicMock() + batch.event = event + batch.__del__() - self.assertEqual( -- len(self.batch.event.remove_event_handler.mock_calls), 1) -+ len(event.remove_event_handler.mock_calls), 1) ++ self.assertEqual(len(event.remove_event_handler.mock_calls), 1) @tornado.testing.gen_test def test_batch_next(self): -- -2.16.4 +2.29.2 diff --git a/fix-bsc-1065792.patch b/fix-bsc-1065792.patch index 9994949..67d82e1 100644 --- a/fix-bsc-1065792.patch +++ b/fix-bsc-1065792.patch @@ -1,25 +1,42 @@ -From 4acbe70851e3ef7a04fc5ad0dc9a2519f6989c66 Mon Sep 17 00:00:00 2001 +From 1b9a160f578cf446f5ae622a450d23022e7e3ca5 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Thu, 14 Dec 2017 16:21:40 +0100 Subject: [PATCH] Fix bsc#1065792 --- - salt/states/service.py | 1 + - 1 file changed, 1 insertion(+) + salt/states/service.py | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/salt/states/service.py b/salt/states/service.py -index de7718ea49..987e37cd42 100644 +index d19c245756..4ea36a78f6 100644 --- a/salt/states/service.py +++ b/salt/states/service.py -@@ -80,6 +80,7 @@ def __virtual__(): +@@ -56,16 +56,12 @@ set the reload value to True: + :ref:`Requisites ` documentation. + + """ +-# Import Python libs + + import time + +-# Import Salt libs + import salt.utils.data + import salt.utils.platform + from salt.exceptions import CommandExecutionError +- +-# Import 3rd-party libs + from salt.utils.args import get_function_argspec as _argspec + from salt.utils.systemd import booted + +@@ -79,6 +75,7 @@ def __virtual__(): Only make these states available if a service provider has been detected or assigned for this minion - ''' + """ + __salt__._load_all() - if 'service.start' in __salt__: + if "service.start" in __salt__: return __virtualname__ else: -- -2.16.4 +2.29.2 diff --git a/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch b/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch index 940eb44..c27fedf 100644 --- a/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch +++ b/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch @@ -1,4 +1,4 @@ -From e7514afcba4f57c5cb8599f561fcefdcc3db7314 Mon Sep 17 00:00:00 2001 +From bc7acab857b952353a959339b06c79d851a9d879 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 16 Sep 2020 00:25:10 +0000 Subject: [PATCH] Fix CVE-2020-25592 and add tests (bsc#1178319) @@ -7,184 +7,17 @@ Properly validate eauth credentials and tokens on SSH calls made by Salt API (bsc#1178319) (bsc#1178362) (bsc#1178361) (CVE-2020-25592) (CVE-2020-17490) (CVE-2020-16846) --- - salt/client/ssh/shell.py | 26 ++- - salt/modules/tls.py | 18 +- - salt/netapi/__init__.py | 67 ++++++ - tests/integration/netapi/test_client.py | 296 +++++++++++++++++++++++- - 4 files changed, 388 insertions(+), 19 deletions(-) + salt/netapi/__init__.py | 43 +++++++++++++++++++++++++ + tests/integration/netapi/test_client.py | 13 ++++++-- + 2 files changed, 53 insertions(+), 3 deletions(-) -diff --git a/salt/client/ssh/shell.py b/salt/client/ssh/shell.py -index bd55c514ee..27aba7b382 100644 ---- a/salt/client/ssh/shell.py -+++ b/salt/client/ssh/shell.py -@@ -8,6 +8,7 @@ from __future__ import absolute_import, print_function, unicode_literals - import re - import os - import sys -+import shlex - import time - import logging - import subprocess -@@ -43,10 +44,10 @@ def gen_key(path): - ''' - Generate a key for use with salt-ssh - ''' -- cmd = 'ssh-keygen -P "" -f {0} -t rsa -q'.format(path) -+ cmd = ["ssh-keygen", "-P", '""', "-f", path, "-t", "rsa", "-q"] - if not os.path.isdir(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) -- subprocess.call(cmd, shell=True) -+ subprocess.call(cmd) - - - def gen_shell(opts, **kwargs): -@@ -289,8 +290,7 @@ class Shell(object): - ''' - try: - proc = salt.utils.nb_popen.NonBlockingPopen( -- cmd, -- shell=True, -+ self._split_cmd(cmd), - stderr=subprocess.PIPE, - stdout=subprocess.PIPE, - ) -@@ -369,6 +369,21 @@ class Shell(object): - - return self._run_cmd(cmd) - -+ def _split_cmd(self, cmd): -+ """ -+ Split a command string so that it is suitable to pass to Popen without -+ shell=True. This prevents shell injection attacks in the options passed -+ to ssh or some other command. -+ """ -+ try: -+ ssh_part, cmd_part = cmd.split("/bin/sh") -+ except ValueError: -+ cmd_lst = shlex.split(cmd) -+ else: -+ cmd_lst = shlex.split(ssh_part) -+ cmd_lst.append("/bin/sh {}".format(cmd_part)) -+ return cmd_lst -+ - def _run_cmd(self, cmd, key_accept=False, passwd_retries=3): - ''' - Execute a shell command via VT. This is blocking and assumes that ssh -@@ -378,8 +393,7 @@ class Shell(object): - return '', 'No command or passphrase', 245 - - term = salt.utils.vt.Terminal( -- cmd, -- shell=True, -+ self._split_cmd(cmd), - log_stdout=True, - log_stdout_level='trace', - log_stderr=True, -diff --git a/salt/modules/tls.py b/salt/modules/tls.py -index af845621a3..116b5fe379 100644 ---- a/salt/modules/tls.py -+++ b/salt/modules/tls.py -@@ -798,12 +798,13 @@ def create_ca(ca_name, - if old_key.strip() == keycontent.strip(): - write_key = False - else: -- log.info('Saving old CA ssl key in %s', bck) -- with salt.utils.files.fopen(bck, 'w') as bckf: -+ log.info('Saving old CA ssl key in {0}'.format(bck)) -+ fp = os.open(bck, os.O_CREAT | os.O_RDWR, 0o600) -+ with os.fdopen(fp, 'w') as bckf: - bckf.write(old_key) -- os.chmod(bck, 0o600) - if write_key: -- with salt.utils.files.fopen(ca_keyp, 'wb') as ca_key: -+ fp = os.open(ca_keyp, os.O_CREAT | os.O_RDWR, 0o600) -+ with os.fdopen(fp, 'wb') as ca_key: - ca_key.write(salt.utils.stringutils.to_bytes(keycontent)) - - with salt.utils.files.fopen(certp, 'wb') as ca_crt: -@@ -1115,9 +1116,9 @@ def create_csr(ca_name, - req.sign(key, salt.utils.stringutils.to_str(digest)) - - # Write private key and request -- with salt.utils.files.fopen('{0}/{1}.key'.format(csr_path, -- csr_filename), -- 'wb+') as priv_key: -+ priv_keyp = '{0}/{1}.key'.format(csr_path, csr_filename) -+ fp = os.open(priv_keyp, os.O_CREAT | os.O_RDWR, 0o600) -+ with os.fdopen(fp, 'wb+') as priv_key: - priv_key.write( - salt.utils.stringutils.to_bytes( - OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, -@@ -1266,7 +1267,8 @@ def create_self_signed_cert(tls_dir='tls', - priv_key_path = '{0}/{1}/certs/{2}.key'.format(cert_base_path(), - tls_dir, - cert_filename) -- with salt.utils.files.fopen(priv_key_path, 'wb+') as priv_key: -+ fp = os.open(priv_key_path, os.O_CREAT | os.O_RDWR, 0o600) -+ with os.fdopen(fp, 'wb+') as priv_key: - priv_key.write( - salt.utils.stringutils.to_bytes( - OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, diff --git a/salt/netapi/__init__.py b/salt/netapi/__init__.py -index 31a24bb420..4e5b6b093a 100644 +index dec19b37ef..cba1ec574f 100644 --- a/salt/netapi/__init__.py +++ b/salt/netapi/__init__.py -@@ -3,24 +3,36 @@ - Make api awesomeness - ''' - from __future__ import absolute_import, print_function, unicode_literals -+ -+import copy -+ - # Import Python libs - import inspect -+import logging - import os - - # Import Salt libs - import salt.log # pylint: disable=W0611 -+import salt.auth - import salt.client - import salt.config -+import salt.daemons.masterapi - import salt.runner - import salt.syspaths - import salt.wheel - import salt.utils.args - import salt.client.ssh.client - import salt.exceptions -+import salt.utils.args -+import salt.utils.minions -+import salt.wheel -+from salt.defaults import DEFAULT_TARGET_DELIM - - # Import third party libs - from salt.ext import six - -+log = logging.getLogger(__name__) -+ - - class NetapiClient(object): - ''' -@@ -34,6 +46,15 @@ class NetapiClient(object): - - def __init__(self, opts): - self.opts = opts -+ apiopts = copy.deepcopy(self.opts) -+ apiopts["enable_ssh_minions"] = True -+ apiopts["cachedir"] = os.path.join(opts["cachedir"], "saltapi") -+ if not os.path.exists(apiopts["cachedir"]): -+ os.makedirs(apiopts["cachedir"]) -+ self.resolver = salt.auth.Resolver(apiopts) -+ self.loadauth = salt.auth.LoadAuth(apiopts) -+ self.key = salt.daemons.masterapi.access_keys(apiopts) -+ self.ckminions = salt.utils.minions.CkMinions(apiopts) - - def _is_master_running(self): - ''' -@@ -55,6 +76,49 @@ class NetapiClient(object): - self.opts['sock_dir'], - ipc_file)) +@@ -109,6 +109,49 @@ class NetapiClient: + "Authorization error occurred." + ) + def _prep_auth_info(self, clear_load): + sensitive_load_keys = [] @@ -230,352 +63,58 @@ index 31a24bb420..4e5b6b093a 100644 + ) + def run(self, low): - ''' + """ Execute the specified function in the specified client by passing the -@@ -80,6 +144,9 @@ class NetapiClient(object): - raise salt.exceptions.EauthAuthenticationError( - 'Raw shell option not allowed.') - -+ if low['client'] == 'ssh': -+ self._authorize_ssh(low) -+ - l_fun = getattr(self, low['client']) - f_call = salt.utils.args.format_call(l_fun, low) - return l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {})) diff --git a/tests/integration/netapi/test_client.py b/tests/integration/netapi/test_client.py -index 08030f31ec..b99bdfe313 100644 +index 70471d3148..9eb6e52920 100644 --- a/tests/integration/netapi/test_client.py +++ b/tests/integration/netapi/test_client.py -@@ -1,26 +1,30 @@ - # encoding: utf-8 -- - # Import Python libs - from __future__ import absolute_import, print_function, unicode_literals -+import copy - import logging - import os - import time - -+import salt.config -+import salt.netapi -+import salt.utils.files -+import salt.utils.platform -+import salt.utils.pycrypto -+ - # Import Salt Testing libs - from tests.support.paths import TMP_CONF_DIR, TMP +@@ -15,10 +15,12 @@ from tests.support.helpers import ( + SKIP_IF_NOT_RUNNING_PYTEST, + SaveRequestsPostHandler, + Webserver, ++ requires_sshd_server, + slowTest, + ) + from tests.support.mixins import AdaptedConfigurationTestCaseMixin + from tests.support.mock import patch ++from tests.support.paths import TMP, TMP_CONF_DIR from tests.support.runtests import RUNTIME_VARS from tests.support.unit import TestCase, skipIf - from tests.support.mock import patch --from tests.support.case import SSHCase -+from tests.support.case import ModuleCase, SSHCase -+from salt.exceptions import EauthAuthenticationError - from tests.support.helpers import ( - Webserver, - SaveRequestsPostHandler, - requires_sshd_server - ) --# Import Salt libs --import salt.config --import salt.netapi - - from salt.exceptions import ( - EauthAuthenticationError -@@ -174,6 +178,10 @@ class NetapiSSHClientTest(SSHCase): - ''' - opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, 'master')) +@@ -178,7 +180,12 @@ class NetapiSSHClientTest(SSHCase): + """ + opts = AdaptedConfigurationTestCaseMixin.get_config("client_config").copy() self.netapi = salt.netapi.NetapiClient(opts) -+ opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, "master")) -+ naopts = copy.deepcopy(opts) -+ naopts["ignore_host_keys"] = True -+ self.netapi = salt.netapi.NetapiClient(naopts) - - self.priv_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test') - self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR) -@@ -271,3 +279,281 @@ class NetapiSSHClientTest(SSHCase): - - self.assertEqual(ret, None) - self.assertFalse(os.path.exists('badfile.txt')) -+ -+ @staticmethod -+ def cleanup_file(path): -+ try: -+ os.remove(path) -+ except OSError: -+ pass -+ -+ @staticmethod -+ def cleanup_dir(path): -+ try: -+ salt.utils.files.rm_rf(path) -+ except OSError: -+ pass -+ -+ def test_shell_inject_ssh_priv(self): -+ """ -+ Verify CVE-2020-16846 for ssh_priv variable -+ """ -+ # ZDI-CAN-11143 -+ path = "/tmp/test-11143" -+ self.addCleanup(self.cleanup_file, path) -+ self.addCleanup(self.cleanup_file, "aaa") -+ self.addCleanup(self.cleanup_file, "aaa.pub") -+ self.addCleanup(self.cleanup_dir, "aaa|id>") -+ low = { -+ "roster": "cache", -+ "client": "ssh", -+ "tgt": "www.zerodayinitiative.com", -+ "ssh_priv": "aaa|id>{} #".format(path), -+ "fun": "test.ping", -+ "eauth": "auto", -+ "username": "saltdev_auto", -+ "password": "saltdev", -+ } -+ ret = self.netapi.run(low) -+ self.assertFalse(os.path.exists(path)) -+ -+ def test_shell_inject_tgt(self): -+ """ -+ Verify CVE-2020-16846 for tgt variable -+ """ -+ # ZDI-CAN-11167 -+ path = "/tmp/test-11167" -+ self.addCleanup(self.cleanup_file, path) -+ low = { -+ "roster": "cache", -+ "client": "ssh", -+ "tgt": "root|id>{} #@127.0.0.1".format(path), -+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", -+ "rosters": "/", -+ "fun": "test.ping", -+ "eauth": "auto", -+ "username": "saltdev_auto", -+ "password": "saltdev", -+ } -+ ret = self.netapi.run(low) -+ self.assertFalse(os.path.exists(path)) -+ -+ def test_shell_inject_ssh_options(self): -+ """ -+ Verify CVE-2020-16846 for ssh_options -+ """ -+ # ZDI-CAN-11169 -+ path = "/tmp/test-11169" -+ self.addCleanup(self.cleanup_file, path) -+ low = { -+ "roster": "cache", -+ "client": "ssh", -+ "tgt": "127.0.0.1", -+ "renderer": "cheetah", -+ "fun": "test.ping", -+ "eauth": "auto", -+ "username": "saltdev_auto", -+ "password": "saltdev", -+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", -+ "rosters": "/", -+ "ssh_options": ["|id>{} #".format(path), "lol"], -+ } -+ ret = self.netapi.run(low) -+ self.assertFalse(os.path.exists(path)) -+ -+ def test_shell_inject_ssh_port(self): -+ """ -+ Verify CVE-2020-16846 for ssh_port variable -+ """ -+ # ZDI-CAN-11172 -+ path = "/tmp/test-11172" -+ self.addCleanup(self.cleanup_file, path) -+ low = { -+ "roster": "cache", -+ "client": "ssh", -+ "tgt": "127.0.0.1", -+ "renderer": "cheetah", -+ "fun": "test.ping", -+ "eauth": "auto", -+ "username": "saltdev_auto", -+ "password": "saltdev", -+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", -+ "rosters": "/", -+ "ssh_port": "hhhhh|id>{} #".format(path), -+ } -+ ret = self.netapi.run(low) -+ self.assertFalse(os.path.exists(path)) -+ -+ def test_shell_inject_remote_port_forwards(self): -+ """ -+ Verify CVE-2020-16846 for remote_port_forwards variable -+ """ -+ # ZDI-CAN-11173 -+ path = "/tmp/test-1173" -+ self.addCleanup(self.cleanup_file, path) -+ low = { -+ "roster": "cache", -+ "client": "ssh", -+ "tgt": "127.0.0.1", -+ "renderer": "cheetah", -+ "fun": "test.ping", -+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", -+ "rosters": "/", -+ "ssh_remote_port_forwards": "hhhhh|id>{} #, lol".format(path), -+ "eauth": "auto", -+ "username": "saltdev_auto", -+ "password": "saltdev", -+ } -+ ret = self.netapi.run(low) -+ self.assertFalse(os.path.exists(path)) -+ -+ -+@requires_sshd_server -+class NetapiSSHClientAuthTest(SSHCase): -+ -+ USERA = "saltdev" -+ USERA_PWD = "saltdev" -+ -+ def setUp(self): -+ """ -+ Set up a NetapiClient instance -+ """ +- self.priv_file = os.path.join(RUNTIME_VARS.TMP_SSH_CONF_DIR, "client_key") + opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, "master")) + naopts = copy.deepcopy(opts) + naopts["ignore_host_keys"] = True + self.netapi = salt.netapi.NetapiClient(naopts) + + self.priv_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test") -+ self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR) -+ # Initialize salt-ssh -+ self.run_function("test.ping") -+ self.mod_case = ModuleCase() -+ try: -+ add_user = self.mod_case.run_function( -+ "user.add", [self.USERA], createhome=False -+ ) -+ self.assertTrue(add_user) -+ if salt.utils.platform.is_darwin(): -+ hashed_password = self.USERA_PWD -+ else: -+ hashed_password = salt.utils.pycrypto.gen_hash(password=self.USERA_PWD) -+ add_pwd = self.mod_case.run_function( -+ "shadow.set_password", [self.USERA, hashed_password], -+ ) -+ self.assertTrue(add_pwd) -+ except AssertionError: -+ self.mod_case.run_function("user.delete", [self.USERA], remove=True) -+ self.skipTest("Could not add user or password, skipping test") -+ -+ def tearDown(self): -+ del self.netapi -+ self.mod_case.run_function("user.delete", [self.USERA], remove=True) -+ -+ @classmethod -+ def setUpClass(cls): -+ cls.post_webserver = Webserver(handler=SaveRequestsPostHandler) -+ cls.post_webserver.start() -+ cls.post_web_root = cls.post_webserver.web_root -+ cls.post_web_handler = cls.post_webserver.handler -+ -+ @classmethod -+ def tearDownClass(cls): -+ cls.post_webserver.stop() -+ del cls.post_webserver -+ -+ def test_ssh_auth_bypass(self): -+ """ -+ CVE-2020-25592 - Bogus eauth raises exception. -+ """ -+ low = { -+ "roster": "cache", -+ "client": "ssh", -+ "tgt": "127.0.0.1", -+ "renderer": "cheetah", -+ "fun": "test.ping", + self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR) + self.roster_file = os.path.join(self.rosters, "roster") + +@@ -325,7 +332,7 @@ class NetapiSSHClientTest(SSHCase): + "roster": "cache", + "client": "ssh", + "tgt": "root|id>{} #@127.0.0.1".format(path), +- "roster_file": self.roster_file, + "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", -+ "rosters": "/", -+ "eauth": "xx", -+ } -+ with self.assertRaises(salt.exceptions.EauthAuthenticationError): -+ ret = self.netapi.run(low) -+ -+ def test_ssh_auth_valid(self): -+ """ -+ CVE-2020-25592 - Valid eauth works as expected. -+ """ -+ low = { -+ "client": "ssh", -+ "tgt": "localhost", -+ "fun": "test.ping", -+ "roster_file": "roster", -+ "rosters": [self.rosters], -+ "ssh_priv": self.priv_file, -+ "eauth": "pam", -+ "username": "saltdev", -+ "password": "saltdev", -+ } -+ ret = self.netapi.run(low) -+ assert "localhost" in ret -+ assert ret["localhost"]["return"] is True -+ -+ def test_ssh_auth_invalid(self): -+ """ -+ CVE-2020-25592 - Wrong password raises exception. -+ """ -+ low = { -+ "client": "ssh", -+ "tgt": "localhost", -+ "fun": "test.ping", -+ "roster_file": "roster", -+ "rosters": [self.rosters], -+ "ssh_priv": self.priv_file, -+ "eauth": "pam", -+ "username": "saltdev", -+ "password": "notvalidpassword", -+ } -+ with self.assertRaises(salt.exceptions.EauthAuthenticationError): -+ ret = self.netapi.run(low) -+ -+ def test_ssh_auth_invalid_acl(self): -+ """ -+ CVE-2020-25592 - Eauth ACL enforced. -+ """ -+ low = { -+ "client": "ssh", -+ "tgt": "localhost", -+ "fun": "at.at", -+ "args": ["12:05am", "echo foo"], -+ "roster_file": "roster", -+ "rosters": [self.rosters], -+ "ssh_priv": self.priv_file, -+ "eauth": "pam", -+ "username": "saltdev", -+ "password": "notvalidpassword", -+ } -+ with self.assertRaises(salt.exceptions.EauthAuthenticationError): -+ ret = self.netapi.run(low) -+ -+ def test_ssh_auth_token(self): -+ """ -+ CVE-2020-25592 - Eauth tokens work as expected. -+ """ -+ low = { -+ "eauth": "pam", -+ "username": "saltdev", -+ "password": "saltdev", -+ } -+ ret = self.netapi.loadauth.mk_token(low) -+ assert "token" in ret and ret["token"] -+ low = { -+ "client": "ssh", -+ "tgt": "localhost", -+ "fun": "test.ping", -+ "roster_file": "roster", -+ "rosters": [self.rosters], -+ "ssh_priv": self.priv_file, -+ "token": ret["token"], -+ } -+ ret = self.netapi.run(low) -+ assert "localhost" in ret -+ assert ret["localhost"]["return"] is True + "rosters": "/", + "fun": "test.ping", + "eauth": "auto", +@@ -355,7 +362,7 @@ class NetapiSSHClientTest(SSHCase): + "eauth": "auto", + "username": "saltdev_auto", + "password": "saltdev", +- "roster_file": self.roster_file, ++ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", + "rosters": "/", + "ssh_options": ["|id>{} #".format(path), "lol"], + } -- -2.28.0 +2.29.2 diff --git a/fix-failing-unit-tests-for-batch-async.patch b/fix-failing-unit-tests-for-batch-async.patch index 852eddb..c5246fe 100644 --- a/fix-failing-unit-tests-for-batch-async.patch +++ b/fix-failing-unit-tests-for-batch-async.patch @@ -1,4 +1,4 @@ -From e6f6b38c75027c4c4f6395117b734dce6fb7433e Mon Sep 17 00:00:00 2001 +From 3b96edd8d23c65c6788a9980114a7e1c220c9640 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Fri, 4 Oct 2019 15:00:50 +0100 @@ -6,49 +6,49 @@ Subject: [PATCH] Fix failing unit tests for batch async --- salt/cli/batch_async.py | 2 +- - tests/unit/cli/test_batch_async.py | 57 ++++++++++++++++++++++---------------- - 2 files changed, 34 insertions(+), 25 deletions(-) + tests/unit/cli/test_batch_async.py | 66 +++++++++++++++++------------- + 2 files changed, 39 insertions(+), 29 deletions(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index f9e736f804..6d0dca1da5 100644 +index 89405ba917..b2d04f9d4d 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -88,7 +88,7 @@ class BatchAsync(object): - io_loop=ioloop, - keep_loop=True) +@@ -91,7 +91,7 @@ class BatchAsync: + keep_loop=True, + ) self.scheduled = False - self.patterns = {} + self.patterns = set() def __set_event_handler(self): - ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid) + ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index 441f9c58b9..12dfe543bc 100644 +index 66332a548a..c18b42be57 100644 --- a/tests/unit/cli/test_batch_async.py +++ b/tests/unit/cli/test_batch_async.py -@@ -68,8 +68,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -61,8 +61,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): ret = self.batch.start() # assert start_batch is called later with batch_presence_ping_timeout as param self.assertEqual( - self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.batch_presence_ping_timeout, self.batch.start_batch)) +- (self.batch.batch_presence_ping_timeout, self.batch.start_batch), + self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.start_batch,)) ++ (self.batch.start_batch,), + ) # assert test.ping called self.assertEqual( - self.batch.local.run_job_async.call_args[0], -@@ -88,8 +88,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -81,8 +81,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): ret = self.batch.start() # assert start_batch is called later with gather_job_timeout as param self.assertEqual( - self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.opts['gather_job_timeout'], self.batch.start_batch)) +- (self.batch.opts["gather_job_timeout"], self.batch.start_batch), + self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.start_batch,)) ++ (self.batch.start_batch,), + ) def test_batch_fire_start_event(self): - self.batch.minions = set(['foo', 'bar']) -@@ -113,12 +113,11 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -107,12 +107,11 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): def test_start_batch_calls_next(self): self.batch.run_next = MagicMock(return_value=MagicMock()) self.batch.event = MagicMock() @@ -59,127 +59,138 @@ index 441f9c58b9..12dfe543bc 100644 self.assertEqual(self.batch.initialized, True) - self.assertEqual(len(self.batch.run_next.mock_calls), 1) + self.assertEqual( -+ self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.run_next,)) ++ self.batch.event.io_loop.spawn_callback.call_args[0], (self.batch.run_next,) ++ ) def test_batch_fire_done_event(self): - self.batch.targeted_minions = {'foo', 'baz', 'bar'} -@@ -154,14 +153,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.batch.targeted_minions = {"foo", "baz", "bar"} +@@ -147,14 +146,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): future = tornado.gen.Future() - future.set_result({'minions': ['foo', 'bar']}) + future.set_result({"minions": ["foo", "bar"]}) self.batch.local.run_job_async.return_value = future - ret = self.batch.run_next().result() + self.batch.run_next() self.assertEqual( self.batch.local.run_job_async.call_args[0], - ({'foo', 'bar'}, 'my.fun', [], 'list') + ({"foo", "bar"}, "my.fun", [], "list"), ) self.assertEqual( - self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.opts['timeout'], self.batch.find_job, {'foo', 'bar'}) +- (self.batch.opts["timeout"], self.batch.find_job, {"foo", "bar"}), + self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.find_job, {'foo', 'bar'}) ++ (self.batch.find_job, {"foo", "bar"}), ) - self.assertEqual(self.batch.active, {'bar', 'foo'}) + self.assertEqual(self.batch.active, {"bar", "foo"}) -@@ -252,13 +251,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -249,15 +248,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.assertEqual(self.batch.active, set()) - self.assertEqual(self.batch.done_minions, {'foo'}) + self.assertEqual(self.batch.done_minions, {"foo"}) self.assertEqual( - self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.batch_delay, self.batch.run_next)) +- (self.batch.batch_delay, self.batch.run_next), + self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.schedule_next,)) ++ (self.batch.schedule_next,), + ) def test_batch__event_handler_find_job_return(self): self.batch.event = MagicMock( -- unpack=MagicMock(return_value=('salt/job/1236/ret/foo', {'id': 'foo'}))) -+ unpack=MagicMock(return_value=('salt/job/1236/ret/foo', {'id': 'foo', 'return': 'deadbeaf'}))) +- unpack=MagicMock(return_value=("salt/job/1236/ret/foo", {"id": "foo"})) ++ unpack=MagicMock( ++ return_value=( ++ "salt/job/1236/ret/foo", ++ {"id": "foo", "return": "deadbeaf"}, ++ ) ++ ) + ) self.batch.start() -+ self.batch.patterns.add(('salt/job/1236/ret/*', 'find_job_return')) ++ self.batch.patterns.add(("salt/job/1236/ret/*", "find_job_return")) self.batch._BatchAsync__event_handler(MagicMock()) - self.assertEqual(self.batch.find_job_returned, {'foo'}) + self.assertEqual(self.batch.find_job_returned, {"foo"}) -@@ -275,10 +275,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -274,14 +279,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): future = tornado.gen.Future() future.set_result({}) self.batch.local.run_job_async.return_value = future -+ self.batch.minions = set(['foo', 'bar']) ++ self.batch.minions = {"foo", "bar"} + self.batch.jid_gen = MagicMock(return_value="1234") + tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({'foo', 'bar'}) + self.batch.find_job({"foo", "bar"}) self.assertEqual( - self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.opts['gather_job_timeout'], self.batch.check_find_job, {'foo', 'bar'}) +- ( +- self.batch.opts["gather_job_timeout"], +- self.batch.check_find_job, +- {"foo", "bar"}, +- ), + self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.check_find_job, {'foo', 'bar'}, "1234") ++ (self.batch.check_find_job, {"foo", "bar"}, "1234"), ) @tornado.testing.gen_test -@@ -288,17 +291,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -291,17 +295,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): future = tornado.gen.Future() future.set_result({}) self.batch.local.run_job_async.return_value = future -+ self.batch.minions = set(['foo', 'bar']) ++ self.batch.minions = {"foo", "bar"} + self.batch.jid_gen = MagicMock(return_value="1234") + tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({'foo', 'bar'}) + self.batch.find_job({"foo", "bar"}) self.assertEqual( - self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.opts['gather_job_timeout'], self.batch.check_find_job, {'foo'}) +- (self.batch.opts["gather_job_timeout"], self.batch.check_find_job, {"foo"}), + self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.check_find_job, {'foo'}, "1234") ++ (self.batch.check_find_job, {"foo"}, "1234"), ) def test_batch_check_find_job_did_not_return(self): self.batch.event = MagicMock() - self.batch.active = {'foo'} + self.batch.active = {"foo"} self.batch.find_job_returned = set() -- self.batch.check_find_job({'foo'}) -+ self.batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return') } -+ self.batch.check_find_job({'foo'}, jid="1234") +- self.batch.check_find_job({"foo"}) ++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} ++ self.batch.check_find_job({"foo"}, jid="1234") self.assertEqual(self.batch.find_job_returned, set()) self.assertEqual(self.batch.active, set()) self.assertEqual(len(self.batch.event.io_loop.add_callback.mock_calls), 0) -@@ -306,9 +313,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -309,9 +317,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): def test_batch_check_find_job_did_return(self): self.batch.event = MagicMock() - self.batch.find_job_returned = {'foo'} -- self.batch.check_find_job({'foo'}) -+ self.batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return') } -+ self.batch.check_find_job({'foo'}, jid="1234") + self.batch.find_job_returned = {"foo"} +- self.batch.check_find_job({"foo"}) ++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} ++ self.batch.check_find_job({"foo"}, jid="1234") self.assertEqual( - self.batch.event.io_loop.add_callback.call_args[0], + self.batch.event.io_loop.spawn_callback.call_args[0], - (self.batch.find_job, {'foo'}) + (self.batch.find_job, {"foo"}), ) -@@ -329,7 +337,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -332,7 +341,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): # both not yet done but only 'foo' responded to find_job - not_done = {'foo', 'bar'} + not_done = {"foo", "bar"} - self.batch.check_find_job(not_done) -+ self.batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return') } ++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} + self.batch.check_find_job(not_done, jid="1234") # assert 'bar' removed from active - self.assertEqual(self.batch.active, {'foo'}) -@@ -339,7 +348,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.assertEqual(self.batch.active, {"foo"}) +@@ -342,7 +352,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): # assert 'find_job' schedueled again only for 'foo' self.assertEqual( - self.batch.event.io_loop.add_callback.call_args[0], + self.batch.event.io_loop.spawn_callback.call_args[0], - (self.batch.find_job, {'foo'}) + (self.batch.find_job, {"foo"}), ) -@@ -347,4 +356,4 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): +@@ -350,4 +360,4 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): self.batch.event = MagicMock() self.batch.scheduled = True self.batch.schedule_next() - self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0) + self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0) -- -2.16.4 +2.29.2 diff --git a/fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch b/fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch deleted file mode 100644 index 9bde3a4..0000000 --- a/fix-for-bsc-1102248-psutil-is-broken-and-so-process-.patch +++ /dev/null @@ -1,738 +0,0 @@ -From c3d8ef9d1387ac3d69fbbd1f8042bf89ba87821a Mon Sep 17 00:00:00 2001 -From: Victor Zhestkov -Date: Tue, 13 Oct 2020 09:28:39 +0300 -Subject: [PATCH] Fix for bsc#1102248 - psutil is broken and so Process - is not working on Python 3 as it is implemented - ---- - salt/modules/ps.py | 268 ++++++++++++++++++++++++++------------------- - 1 file changed, 157 insertions(+), 111 deletions(-) - -diff --git a/salt/modules/ps.py b/salt/modules/ps.py -index bb37873f48..9925e29968 100644 ---- a/salt/modules/ps.py -+++ b/salt/modules/ps.py -@@ -1,31 +1,33 @@ - # -*- coding: utf-8 -*- --''' -+""" - A salt interface to psutil, a system and process library. - See http://code.google.com/p/psutil. - - :depends: - psutil Python module, version 0.3.0 or later - - python-utmp package (optional) --''' -+""" - - # Import python libs --from __future__ import absolute_import, unicode_literals, print_function --import time -+from __future__ import absolute_import, print_function, unicode_literals -+ - import datetime - import re -+import time - - # Import salt libs - import salt.utils.data --from salt.exceptions import SaltInvocationError, CommandExecutionError - - # Import third party libs - import salt.utils.decorators.path -+from salt.exceptions import CommandExecutionError, SaltInvocationError - from salt.ext import six -+ - # pylint: disable=import-error - try: - import salt.utils.psutil_compat as psutil - - HAS_PSUTIL = True -- PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) -+ PSUTIL2 = getattr(psutil, "version_info", ()) >= (2, 0) - except ImportError: - HAS_PSUTIL = False - # pylint: enable=import-error -@@ -33,7 +35,10 @@ except ImportError: - - def __virtual__(): - if not HAS_PSUTIL: -- return False, 'The ps module cannot be loaded: python module psutil not installed.' -+ return ( -+ False, -+ "The ps module cannot be loaded: python module psutil not installed.", -+ ) - - # Functions and attributes used in this execution module seem to have been - # added as of psutil 0.3.0, from an inspection of the source code. Only -@@ -44,15 +49,20 @@ def __virtual__(): - # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). - if psutil.version_info >= (0, 3, 0): - return True -- return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) -+ return ( -+ False, -+ "The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0".format( -+ psutil.version_info -+ ), -+ ) - - - def _get_proc_cmdline(proc): -- ''' -+ """ - Returns the cmdline of a Process instance. - - It's backward compatible with < 2.0 versions of psutil. -- ''' -+ """ - try: - return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) - except (psutil.NoSuchProcess, psutil.AccessDenied): -@@ -60,23 +70,25 @@ def _get_proc_cmdline(proc): - - - def _get_proc_create_time(proc): -- ''' -+ """ - Returns the create_time of a Process instance. - - It's backward compatible with < 2.0 versions of psutil. -- ''' -+ """ - try: -- return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) -+ return salt.utils.data.decode( -+ proc.create_time() if PSUTIL2 else proc.create_time -+ ) - except (psutil.NoSuchProcess, psutil.AccessDenied): - return None - - - def _get_proc_name(proc): -- ''' -+ """ - Returns the name of a Process instance. - - It's backward compatible with < 2.0 versions of psutil. -- ''' -+ """ - try: - return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) - except (psutil.NoSuchProcess, psutil.AccessDenied): -@@ -84,11 +96,11 @@ def _get_proc_name(proc): - - - def _get_proc_status(proc): -- ''' -+ """ - Returns the status of a Process instance. - - It's backward compatible with < 2.0 versions of psutil. -- ''' -+ """ - try: - return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) - except (psutil.NoSuchProcess, psutil.AccessDenied): -@@ -96,11 +108,11 @@ def _get_proc_status(proc): - - - def _get_proc_username(proc): -- ''' -+ """ - Returns the username of a Process instance. - - It's backward compatible with < 2.0 versions of psutil. -- ''' -+ """ - try: - return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) - except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): -@@ -108,16 +120,16 @@ def _get_proc_username(proc): - - - def _get_proc_pid(proc): -- ''' -+ """ - Returns the pid of a Process instance. - - It's backward compatible with < 2.0 versions of psutil. -- ''' -+ """ - return proc.pid - - - def top(num_processes=5, interval=3): -- ''' -+ """ - Return a list of top CPU consuming processes during the interval. - num_processes = return the top N CPU consuming processes - interval = the number of seconds to sample CPU usage over -@@ -129,57 +141,63 @@ def top(num_processes=5, interval=3): - salt '*' ps.top - - salt '*' ps.top 5 10 -- ''' -+ """ - result = [] - start_usage = {} - for pid in psutil.pids(): - try: - process = psutil.Process(pid) -- user, system = process.cpu_times() -- except ValueError: -- user, system, _, _ = process.cpu_times() - except psutil.NoSuchProcess: - continue -+ else: -+ try: -+ user, system = process.cpu_times()[:2] -+ except psutil.ZombieProcess: -+ user = system = 0.0 - start_usage[process] = user + system - time.sleep(interval) - usage = set() - for process, start in six.iteritems(start_usage): - try: -- user, system = process.cpu_times() -- except ValueError: -- user, system, _, _ = process.cpu_times() -+ user, system = process.cpu_times()[:2] - except psutil.NoSuchProcess: - continue - now = user + system - diff = now - start - usage.add((diff, process)) - -- for idx, (diff, process) in enumerate(reversed(sorted(usage))): -- if num_processes and idx >= num_processes: -- break -- if len(_get_proc_cmdline(process)) == 0: -- cmdline = _get_proc_name(process) -- else: -- cmdline = _get_proc_cmdline(process) -- info = {'cmd': cmdline, -- 'user': _get_proc_username(process), -- 'status': _get_proc_status(process), -- 'pid': _get_proc_pid(process), -- 'create_time': _get_proc_create_time(process), -- 'cpu': {}, -- 'mem': {}, -+ for diff, process in sorted(usage, key=lambda x: x[0], reverse=True): -+ info = { -+ "cmd": _get_proc_cmdline(process) or _get_proc_name(process), -+ "user": _get_proc_username(process), -+ "status": _get_proc_status(process), -+ "pid": _get_proc_pid(process), -+ "create_time": _get_proc_create_time(process), -+ "cpu": {}, -+ "mem": {}, - } -- for key, value in six.iteritems(process.cpu_times()._asdict()): -- info['cpu'][key] = value -- for key, value in six.iteritems(process.memory_info()._asdict()): -- info['mem'][key] = value -+ try: -+ for key, value in six.iteritems(process.cpu_times()._asdict()): -+ info["cpu"][key] = value -+ for key, value in six.iteritems(process.memory_info()._asdict()): -+ info["mem"][key] = value -+ except psutil.NoSuchProcess: -+ # Process ended since psutil.pids() was run earlier in this -+ # function. Ignore this process and do not include this process in -+ # the return data. -+ continue -+ - result.append(info) - -+ # Stop gathering process info since we've reached the desired number -+ if len(result) >= num_processes: -+ break -+ - return result - - - def get_pid_list(): -- ''' -+ """ - Return a list of process ids (PIDs) for all running processes. - - CLI Example: -@@ -187,12 +205,12 @@ def get_pid_list(): - .. code-block:: bash - - salt '*' ps.get_pid_list -- ''' -+ """ - return psutil.pids() - - - def proc_info(pid, attrs=None): -- ''' -+ """ - Return a dictionary of information for a process id (PID). - - CLI Example: -@@ -209,7 +227,7 @@ def proc_info(pid, attrs=None): - Optional list of desired process attributes. The list of possible - attributes can be found here: - http://pythonhosted.org/psutil/#psutil.Process -- ''' -+ """ - try: - proc = psutil.Process(pid) - return proc.as_dict(attrs) -@@ -218,7 +236,7 @@ def proc_info(pid, attrs=None): - - - def kill_pid(pid, signal=15): -- ''' -+ """ - Kill a process by PID. - - .. code-block:: bash -@@ -239,7 +257,7 @@ def kill_pid(pid, signal=15): - .. code-block:: bash - - salt 'minion' ps.kill_pid 2000 signal=9 -- ''' -+ """ - try: - psutil.Process(pid).send_signal(signal) - return True -@@ -248,7 +266,7 @@ def kill_pid(pid, signal=15): - - - def pkill(pattern, user=None, signal=15, full=False): -- ''' -+ """ - Kill processes matching a pattern. - - .. code-block:: bash -@@ -283,12 +301,15 @@ def pkill(pattern, user=None, signal=15, full=False): - .. code-block:: bash - - salt '*' ps.pkill bash signal=9 user=tom -- ''' -+ """ - - killed = [] - for proc in psutil.process_iter(): -- name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ -+ name_match = ( -+ pattern in " ".join(_get_proc_cmdline(proc)) -+ if full - else pattern in _get_proc_name(proc) -+ ) - user_match = True if user is None else user == _get_proc_username(proc) - if name_match and user_match: - try: -@@ -299,11 +320,11 @@ def pkill(pattern, user=None, signal=15, full=False): - if not killed: - return None - else: -- return {'killed': killed} -+ return {"killed": killed} - - --def pgrep(pattern, user=None, full=False): -- ''' -+def pgrep(pattern, user=None, full=False, pattern_is_regex=False): -+ """ - Return the pids for processes matching a pattern. - - If full is true, the full command line is searched for a match, -@@ -323,6 +344,12 @@ def pgrep(pattern, user=None, full=False): - A boolean value indicating whether only the name of the command or - the full command line should be matched against the pattern. - -+ pattern_is_regex -+ This flag enables ps.pgrep to mirror the regex search functionality -+ found in the pgrep command line utility. -+ -+ .. versionadded:: 3001 -+ - **Examples:** - - Find all httpd processes on all 'www' minions: -@@ -336,20 +363,34 @@ def pgrep(pattern, user=None, full=False): - .. code-block:: bash - - salt '*' ps.pgrep bash user=tom -- ''' -+ """ - - procs = [] -+ -+ if pattern_is_regex: -+ pattern = re.compile(str(pattern)) -+ - for proc in psutil.process_iter(): -- name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ -- else pattern in _get_proc_name(proc) -+ if full: -+ process_line = " ".join(_get_proc_cmdline(proc)) -+ else: -+ process_line = _get_proc_name(proc) -+ -+ if pattern_is_regex: -+ name_match = re.search(pattern, process_line) -+ else: -+ name_match = pattern in process_line -+ - user_match = True if user is None else user == _get_proc_username(proc) -+ - if name_match and user_match: - procs.append(_get_proc_pid(proc)) -+ - return procs or None - - - def cpu_percent(interval=0.1, per_cpu=False): -- ''' -+ """ - Return the percent of time the CPU is busy. - - interval -@@ -363,7 +404,7 @@ def cpu_percent(interval=0.1, per_cpu=False): - .. code-block:: bash - - salt '*' ps.cpu_percent -- ''' -+ """ - if per_cpu: - result = list(psutil.cpu_percent(interval, True)) - else: -@@ -372,7 +413,7 @@ def cpu_percent(interval=0.1, per_cpu=False): - - - def cpu_times(per_cpu=False): -- ''' -+ """ - Return the percent of time the CPU spends in each state, - e.g. user, system, idle, nice, iowait, irq, softirq. - -@@ -385,7 +426,7 @@ def cpu_times(per_cpu=False): - .. code-block:: bash - - salt '*' ps.cpu_times -- ''' -+ """ - if per_cpu: - result = [dict(times._asdict()) for times in psutil.cpu_times(True)] - else: -@@ -394,7 +435,7 @@ def cpu_times(per_cpu=False): - - - def virtual_memory(): -- ''' -+ """ - .. versionadded:: 2014.7.0 - - Return a dict that describes statistics about system memory usage. -@@ -408,15 +449,15 @@ def virtual_memory(): - .. code-block:: bash - - salt '*' ps.virtual_memory -- ''' -+ """ - if psutil.version_info < (0, 6, 0): -- msg = 'virtual_memory is only available in psutil 0.6.0 or greater' -+ msg = "virtual_memory is only available in psutil 0.6.0 or greater" - raise CommandExecutionError(msg) - return dict(psutil.virtual_memory()._asdict()) - - - def swap_memory(): -- ''' -+ """ - .. versionadded:: 2014.7.0 - - Return a dict that describes swap memory statistics. -@@ -430,15 +471,15 @@ def swap_memory(): - .. code-block:: bash - - salt '*' ps.swap_memory -- ''' -+ """ - if psutil.version_info < (0, 6, 0): -- msg = 'swap_memory is only available in psutil 0.6.0 or greater' -+ msg = "swap_memory is only available in psutil 0.6.0 or greater" - raise CommandExecutionError(msg) - return dict(psutil.swap_memory()._asdict()) - - - def disk_partitions(all=False): -- ''' -+ """ - Return a list of disk partitions and their device, mount point, and - filesystem type. - -@@ -451,14 +492,13 @@ def disk_partitions(all=False): - .. code-block:: bash - - salt '*' ps.disk_partitions -- ''' -- result = [dict(partition._asdict()) for partition in -- psutil.disk_partitions(all)] -+ """ -+ result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] - return result - - - def disk_usage(path): -- ''' -+ """ - Given a path, return a dict listing the total available space as well as - the free space, and used space. - -@@ -467,12 +507,12 @@ def disk_usage(path): - .. code-block:: bash - - salt '*' ps.disk_usage /home -- ''' -+ """ - return dict(psutil.disk_usage(path)._asdict()) - - - def disk_partition_usage(all=False): -- ''' -+ """ - Return a list of disk partitions plus the mount point, filesystem and usage - statistics. - -@@ -481,15 +521,15 @@ def disk_partition_usage(all=False): - .. code-block:: bash - - salt '*' ps.disk_partition_usage -- ''' -+ """ - result = disk_partitions(all) - for partition in result: -- partition.update(disk_usage(partition['mountpoint'])) -+ partition.update(disk_usage(partition["mountpoint"])) - return result - - - def total_physical_memory(): -- ''' -+ """ - Return the total number of bytes of physical memory. - - CLI Example: -@@ -497,9 +537,9 @@ def total_physical_memory(): - .. code-block:: bash - - salt '*' ps.total_physical_memory -- ''' -+ """ - if psutil.version_info < (0, 6, 0): -- msg = 'virtual_memory is only available in psutil 0.6.0 or greater' -+ msg = "virtual_memory is only available in psutil 0.6.0 or greater" - raise CommandExecutionError(msg) - try: - return psutil.virtual_memory().total -@@ -510,7 +550,7 @@ def total_physical_memory(): - - - def num_cpus(): -- ''' -+ """ - Return the number of CPUs. - - CLI Example: -@@ -518,7 +558,7 @@ def num_cpus(): - .. code-block:: bash - - salt '*' ps.num_cpus -- ''' -+ """ - try: - return psutil.cpu_count() - except AttributeError: -@@ -528,7 +568,7 @@ def num_cpus(): - - - def boot_time(time_format=None): -- ''' -+ """ - Return the boot time in number of seconds since the epoch began. - - CLI Example: -@@ -545,7 +585,7 @@ def boot_time(time_format=None): - .. code-block:: bash - - salt '*' ps.boot_time -- ''' -+ """ - try: - b_time = int(psutil.boot_time()) - except AttributeError: -@@ -558,12 +598,12 @@ def boot_time(time_format=None): - try: - return b_time.strftime(time_format) - except TypeError as exc: -- raise SaltInvocationError('Invalid format string: {0}'.format(exc)) -+ raise SaltInvocationError("Invalid format string: {0}".format(exc)) - return b_time - - - def network_io_counters(interface=None): -- ''' -+ """ - Return network I/O statistics. - - CLI Example: -@@ -573,7 +613,7 @@ def network_io_counters(interface=None): - salt '*' ps.network_io_counters - - salt '*' ps.network_io_counters interface=eth0 -- ''' -+ """ - if not interface: - return dict(psutil.net_io_counters()._asdict()) - else: -@@ -585,7 +625,7 @@ def network_io_counters(interface=None): - - - def disk_io_counters(device=None): -- ''' -+ """ - Return disk I/O statistics. - - CLI Example: -@@ -595,7 +635,7 @@ def disk_io_counters(device=None): - salt '*' ps.disk_io_counters - - salt '*' ps.disk_io_counters device=sda1 -- ''' -+ """ - if not device: - return dict(psutil.disk_io_counters()._asdict()) - else: -@@ -607,7 +647,7 @@ def disk_io_counters(device=None): - - - def get_users(): -- ''' -+ """ - Return logged-in users. - - CLI Example: -@@ -615,7 +655,7 @@ def get_users(): - .. code-block:: bash - - salt '*' ps.get_users -- ''' -+ """ - try: - recs = psutil.users() - return [dict(x._asdict()) for x in recs] -@@ -634,14 +674,20 @@ def get_users(): - started = rec[8] - if isinstance(started, tuple): - started = started[0] -- result.append({'name': rec[4], 'terminal': rec[2], -- 'started': started, 'host': rec[5]}) -+ result.append( -+ { -+ "name": rec[4], -+ "terminal": rec[2], -+ "started": started, -+ "host": rec[5], -+ } -+ ) - except ImportError: - return False - - - def lsof(name): -- ''' -+ """ - Retrieve the lsof information of the given process name. - - CLI Example: -@@ -649,17 +695,17 @@ def lsof(name): - .. code-block:: bash - - salt '*' ps.lsof apache2 -- ''' -+ """ - sanitize_name = six.text_type(name) -- lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) -+ lsof_infos = __salt__["cmd.run"]("lsof -c " + sanitize_name) - ret = [] - ret.extend([sanitize_name, lsof_infos]) - return ret - - --@salt.utils.decorators.path.which('netstat') -+@salt.utils.decorators.path.which("netstat") - def netstat(name): -- ''' -+ """ - Retrieve the netstat information of the given process name. - - CLI Example: -@@ -667,9 +713,9 @@ def netstat(name): - .. code-block:: bash - - salt '*' ps.netstat apache2 -- ''' -+ """ - sanitize_name = six.text_type(name) -- netstat_infos = __salt__['cmd.run']("netstat -nap") -+ netstat_infos = __salt__["cmd.run"]("netstat -nap") - found_infos = [] - ret = [] - for info in netstat_infos.splitlines(): -@@ -679,9 +725,9 @@ def netstat(name): - return ret - - --@salt.utils.decorators.path.which('ss') -+@salt.utils.decorators.path.which("ss") - def ss(name): -- ''' -+ """ - Retrieve the ss information of the given process name. - - CLI Example: -@@ -692,9 +738,9 @@ def ss(name): - - .. versionadded:: 2016.11.6 - -- ''' -+ """ - sanitize_name = six.text_type(name) -- ss_infos = __salt__['cmd.run']("ss -neap") -+ ss_infos = __salt__["cmd.run"]("ss -neap") - found_infos = [] - ret = [] - for info in ss_infos.splitlines(): -@@ -705,7 +751,7 @@ def ss(name): - - - def psaux(name): -- ''' -+ """ - Retrieve information corresponding to a "ps aux" filtered - with the given pattern. It could be just a name or a regular - expression (using python search from "re" module). -@@ -715,11 +761,11 @@ def psaux(name): - .. code-block:: bash - - salt '*' ps.psaux www-data.+apache2 -- ''' -+ """ - sanitize_name = six.text_type(name) - pattern = re.compile(sanitize_name) - salt_exception_pattern = re.compile("salt.+ps.psaux.+") -- ps_aux = __salt__['cmd.run']("ps aux") -+ ps_aux = __salt__["cmd.run"]("ps aux") - found_infos = [] - ret = [] - nb_lines = 0 --- -2.29.1 - - diff --git a/fix-for-log-checking-in-x509-test.patch b/fix-for-log-checking-in-x509-test.patch index 567ba7b..53679d8 100644 --- a/fix-for-log-checking-in-x509-test.patch +++ b/fix-for-log-checking-in-x509-test.patch @@ -1,4 +1,4 @@ -From e0ca0d0d2a62f18e2712223e130af5faa8e0fe05 Mon Sep 17 00:00:00 2001 +From b4f54187ae7d231250f72244ffd874cc2c846150 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Thu, 28 Nov 2019 15:23:36 +0100 Subject: [PATCH] Fix for log checking in x509 test @@ -9,10 +9,10 @@ We are logging in debug and not in trace mode here. 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/modules/test_x509.py b/tests/unit/modules/test_x509.py -index 624a927bec..976af634c7 100644 +index 40aea12272..e7503395eb 100644 --- a/tests/unit/modules/test_x509.py +++ b/tests/unit/modules/test_x509.py -@@ -68,9 +68,9 @@ class X509TestCase(TestCase, LoaderModuleMockMixin): +@@ -127,9 +127,9 @@ class X509TestCase(TestCase, LoaderModuleMockMixin): subj = FakeSubject() x509._parse_subject(subj) @@ -23,9 +23,9 @@ index 624a927bec..976af634c7 100644 + assert x509.log.debug.call_args[0][1] == list(subj.nid.keys())[0] + assert isinstance(x509.log.debug.call_args[0][2], TypeError) - @skipIf(not HAS_M2CRYPTO, 'Skipping, M2Crypto is unavailble') + @skipIf(not HAS_M2CRYPTO, "Skipping, M2Crypto is unavailable") def test_get_pem_entry(self): -- -2.16.4 +2.29.2 diff --git a/fix-for-return-value-ret-vs-return-in-batch-mode.patch b/fix-for-return-value-ret-vs-return-in-batch-mode.patch deleted file mode 100644 index 04e79d5..0000000 --- a/fix-for-return-value-ret-vs-return-in-batch-mode.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 0c988e1db59a255b2f707c4e626cec21ff06d7a3 Mon Sep 17 00:00:00 2001 -From: Jochen Breuer -Date: Thu, 9 Apr 2020 17:12:54 +0200 -Subject: [PATCH] Fix for return value ret vs return in batch mode - -The least intrusive fix for ret vs return in batch mode. ---- - salt/cli/batch.py | 16 ++++++---- - tests/unit/cli/test_batch.py | 62 ++++++++++++++++++++++++++++++++++++ - 2 files changed, 71 insertions(+), 7 deletions(-) - -diff --git a/salt/cli/batch.py b/salt/cli/batch.py -index 10fc81a5f4..d5b8754ad7 100644 ---- a/salt/cli/batch.py -+++ b/salt/cli/batch.py -@@ -234,14 +234,16 @@ class Batch(object): - if not self.quiet: - salt.utils.stringutils.print_cli('\nExecuting run on {0}\n'.format(sorted(next_))) - # create a new iterator for this batch of minions -+ return_value = self.opts.get("return", self.opts.get("ret", "")) - new_iter = self.local.cmd_iter_no_block( -- *args, -- raw=self.opts.get('raw', False), -- ret=self.opts.get('return', ''), -- show_jid=show_jid, -- verbose=show_verbose, -- gather_job_timeout=self.opts['gather_job_timeout'], -- **self.eauth) -+ *args, -+ raw=self.opts.get("raw", False), -+ ret=return_value, -+ show_jid=show_jid, -+ verbose=show_verbose, -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ **self.eauth -+ ) - # add it to our iterators and to the minion_tracker - iters.append(new_iter) - minion_tracker[new_iter] = {} -diff --git a/tests/unit/cli/test_batch.py b/tests/unit/cli/test_batch.py -index acabbe51f5..d7411e8039 100644 ---- a/tests/unit/cli/test_batch.py -+++ b/tests/unit/cli/test_batch.py -@@ -72,3 +72,65 @@ class BatchTestCase(TestCase): - ''' - ret = Batch.get_bnum(self.batch) - self.assertEqual(ret, None) -+ -+ def test_return_value_in_run_for_ret(self): -+ """ -+ cmd_iter_no_block should have been called with a return no matter if -+ the return value was in ret or return. -+ """ -+ self.batch.opts = { -+ "batch": "100%", -+ "timeout": 5, -+ "fun": "test", -+ "arg": "foo", -+ "gather_job_timeout": 5, -+ "ret": "my_return", -+ } -+ self.batch.minions = ["foo", "bar", "baz"] -+ self.batch.local.cmd_iter_no_block = MagicMock(return_value=iter([])) -+ ret = Batch.run(self.batch) -+ # We need to fetch at least one object to trigger the relevant code path. -+ x = next(ret) -+ self.batch.local.cmd_iter_no_block.assert_called_with( -+ ["baz", "bar", "foo"], -+ "test", -+ "foo", -+ 5, -+ "list", -+ raw=False, -+ ret="my_return", -+ show_jid=False, -+ verbose=False, -+ gather_job_timeout=5, -+ ) -+ -+ def test_return_value_in_run_for_return(self): -+ """ -+ cmd_iter_no_block should have been called with a return no matter if -+ the return value was in ret or return. -+ """ -+ self.batch.opts = { -+ "batch": "100%", -+ "timeout": 5, -+ "fun": "test", -+ "arg": "foo", -+ "gather_job_timeout": 5, -+ "return": "my_return", -+ } -+ self.batch.minions = ["foo", "bar", "baz"] -+ self.batch.local.cmd_iter_no_block = MagicMock(return_value=iter([])) -+ ret = Batch.run(self.batch) -+ # We need to fetch at least one object to trigger the relevant code path. -+ x = next(ret) -+ self.batch.local.cmd_iter_no_block.assert_called_with( -+ ["baz", "bar", "foo"], -+ "test", -+ "foo", -+ 5, -+ "list", -+ raw=False, -+ ret="my_return", -+ show_jid=False, -+ verbose=False, -+ gather_job_timeout=5, -+ ) --- -2.26.1 - - diff --git a/fix-for-suse-expanded-support-detection.patch b/fix-for-suse-expanded-support-detection.patch index 8a72ce9..e9349fd 100644 --- a/fix-for-suse-expanded-support-detection.patch +++ b/fix-for-suse-expanded-support-detection.patch @@ -1,4 +1,4 @@ -From 16d656744d2e7d915757d6f2ae26b57ad8230b0b Mon Sep 17 00:00:00 2001 +From 369a732537937dd6865152a87f04777539b27fcd Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Thu, 6 Sep 2018 17:15:18 +0200 Subject: [PATCH] Fix for SUSE Expanded Support detection @@ -14,26 +14,26 @@ This change also adds a check for redhat-release and then marks the 1 file changed, 9 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 9b244def9c..2851809472 100644 +index 436c058eb6..00bd0565bf 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -1892,6 +1892,15 @@ def os_data(): - log.trace('Parsing distrib info from /etc/centos-release') +@@ -1990,6 +1990,15 @@ def os_data(): + log.trace("Parsing distrib info from /etc/centos-release") # CentOS Linux - grains['lsb_distrib_id'] = 'CentOS' + grains["lsb_distrib_id"] = "CentOS" + # Maybe CentOS Linux; could also be SUSE Expanded Support. + # SUSE ES has both, centos-release and redhat-release. -+ if os.path.isfile('/etc/redhat-release'): -+ with salt.utils.files.fopen('/etc/redhat-release') as ifile: ++ if os.path.isfile("/etc/redhat-release"): ++ with salt.utils.files.fopen("/etc/redhat-release") as ifile: + for line in ifile: + if "red hat enterprise linux server" in line.lower(): + # This is a SUSE Expanded Support Rhel installation -+ grains['lsb_distrib_id'] = 'RedHat' ++ grains["lsb_distrib_id"] = "RedHat" + break - with salt.utils.files.fopen('/etc/centos-release') as ifile: + with salt.utils.files.fopen("/etc/centos-release") as ifile: for line in ifile: # Need to pull out the version and codename -- -2.16.4 +2.29.2 diff --git a/fix-for-temp-folder-definition-in-loader-unit-test.patch b/fix-for-temp-folder-definition-in-loader-unit-test.patch index 86fb7cd..548f3ff 100644 --- a/fix-for-temp-folder-definition-in-loader-unit-test.patch +++ b/fix-for-temp-folder-definition-in-loader-unit-test.patch @@ -1,4 +1,4 @@ -From dd01a0fc594f024eee2267bed2f698f5a6c729bf Mon Sep 17 00:00:00 2001 +From 33766e59bd53fac2c75e6ccfa1f363e2f7b1b65f Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Mon, 16 Mar 2020 15:25:42 +0100 Subject: [PATCH] Fix for temp folder definition in loader unit test @@ -8,13 +8,13 @@ Subject: [PATCH] Fix for temp folder definition in loader unit test 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py -index fe11cd0681..7e369f2c3b 100644 +index 863e2182b9..5b23ad83e3 100644 --- a/tests/unit/test_loader.py +++ b/tests/unit/test_loader.py -@@ -152,12 +152,12 @@ class LazyLoaderUtilsTest(TestCase): +@@ -240,12 +240,12 @@ class LazyLoaderUtilsTest(TestCase): def setUpClass(cls): cls.opts = salt.config.minion_config(None) - cls.opts['grains'] = salt.loader.grains(cls.opts) + cls.opts["grains"] = salt.loader.grains(cls.opts) - if not os.path.isdir(TMP): - os.makedirs(TMP) + if not os.path.isdir(RUNTIME_VARS.TMP): @@ -24,19 +24,19 @@ index fe11cd0681..7e369f2c3b 100644 # Setup the module - self.module_dir = tempfile.mkdtemp(dir=TMP) + self.module_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) - self.module_file = os.path.join(self.module_dir, - '{}.py'.format(self.module_name)) - with salt.utils.files.fopen(self.module_file, 'w') as fh: -@@ -165,7 +165,7 @@ class LazyLoaderUtilsTest(TestCase): + self.module_file = os.path.join( + self.module_dir, "{}.py".format(self.module_name) + ) +@@ -254,7 +254,7 @@ class LazyLoaderUtilsTest(TestCase): fh.flush() os.fsync(fh.fileno()) - self.utils_dir = tempfile.mkdtemp(dir=TMP) + self.utils_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) - self.utils_file = os.path.join(self.utils_dir, - '{}.py'.format(self.utils_name)) - with salt.utils.files.fopen(self.utils_file, 'w') as fh: + self.utils_file = os.path.join(self.utils_dir, "{}.py".format(self.utils_name)) + with salt.utils.files.fopen(self.utils_file, "w") as fh: + fh.write(salt.utils.stringutils.to_str(loader_template_utils)) -- -2.16.4 +2.29.2 diff --git a/fix-git_pillar-merging-across-multiple-__env__-repos.patch b/fix-git_pillar-merging-across-multiple-__env__-repos.patch index 9dca7fb..046b837 100644 --- a/fix-git_pillar-merging-across-multiple-__env__-repos.patch +++ b/fix-git_pillar-merging-across-multiple-__env__-repos.patch @@ -1,4 +1,4 @@ -From 900d63bc5e85496e16373025457561b405f2329f Mon Sep 17 00:00:00 2001 +From f5c9527aeee190a66a908037770c80a75e911d8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Tue, 6 Nov 2018 16:38:54 +0000 @@ -11,37 +11,42 @@ Test git ext_pillar across multiple repos using __env__ Remove unicode references --- - tests/integration/pillar/test_git_pillar.py | 45 +++++++++++++++++++++++++++++ - 1 file changed, 45 insertions(+) + tests/integration/pillar/test_git_pillar.py | 55 +++++++++++++++++++++ + 1 file changed, 55 insertions(+) diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py -index 2e549f3948..d417a7ebc3 100644 +index c0362127f6..979dfebb94 100644 --- a/tests/integration/pillar/test_git_pillar.py +++ b/tests/integration/pillar/test_git_pillar.py -@@ -1382,6 +1382,51 @@ class TestPygit2SSH(GitPillarSSHTestBase): - 'nested_dict': {'master': True}}} +@@ -1600,6 +1600,61 @@ class TestPygit2SSH(GitPillarSSHTestBase): + }, ) + +@skipIf(NO_MOCK, NO_MOCK_REASON) -+@skipIf(_windows_or_mac(), 'minion is windows or mac') ++@skipIf(_windows_or_mac(), "minion is windows or mac") +@skip_if_not_root -+@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER)) -+@skipIf(not HAS_NGINX, 'nginx not present') -+@skipIf(not HAS_VIRTUALENV, 'virtualenv not present') ++@skipIf( ++ not HAS_PYGIT2, ++ "pygit2 >= {} and libgit2 >= {} required".format(PYGIT2_MINVER, LIBGIT2_MINVER), ++) ++@skipIf(not HAS_NGINX, "nginx not present") ++@skipIf(not HAS_VIRTUALENV, "virtualenv not present") +class TestPygit2HTTP(GitPillarHTTPTestBase): -+ ''' ++ """ + Test git_pillar with pygit2 using SSH authentication -+ ''' ++ """ ++ + def test_single_source(self): -+ ''' ++ """ + Test with git_pillar_includes enabled and using "__env__" as the branch + name for the configured repositories. + The "gitinfo" repository contains top.sls file with a local reference + and also referencing external "nowhere.foo" which is provided by "webinfo" + repository mounted as "nowhere". -+ ''' -+ ret = self.get_pillar('''\ ++ """ ++ ret = self.get_pillar( ++ """\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: pygit2 @@ -56,21 +61,26 @@ index 2e549f3948..d417a7ebc3 100644 + - __env__ {url}: + - name: webinfo + - mountpoint: nowhere -+ ''') ++ """ ++ ) + self.assertEqual( + ret, -+ {'branch': 'master', -+ 'motd': 'The force will be with you. Always.', -+ 'mylist': ['master'], -+ 'mydict': {'master': True, -+ 'nested_list': ['master'], -+ 'nested_dict': {'master': True}}} ++ { ++ "branch": "master", ++ "motd": "The force will be with you. Always.", ++ "mylist": ["master"], ++ "mydict": { ++ "master": True, ++ "nested_list": ["master"], ++ "nested_dict": {"master": True}, ++ }, ++ }, + ) + - @requires_system_grains - def test_root_parameter(self, grains): - ''' + @slowTest + def test_root_parameter(self): + """ -- -2.16.4 +2.29.2 diff --git a/fix-grains.test_core-unit-test-277.patch b/fix-grains.test_core-unit-test-277.patch index a65482a..8e9371b 100644 --- a/fix-grains.test_core-unit-test-277.patch +++ b/fix-grains.test_core-unit-test-277.patch @@ -1,4 +1,4 @@ -From 4998996a08db72a1b925b2c3f725c4fba4fe9622 Mon Sep 17 00:00:00 2001 +From e2ff2f339ce7938ecdadf867f285a559bc2431dd Mon Sep 17 00:00:00 2001 From: Dominik Gedon Date: Tue, 6 Oct 2020 14:00:55 +0200 Subject: [PATCH] Fix grains.test_core unit test (#277) @@ -6,41 +6,38 @@ Subject: [PATCH] Fix grains.test_core unit test (#277) This reverts 63b94ae and fixes the grains test_core unit test. The changes are aligned with upstream. --- - tests/unit/grains/test_core.py | 13 ++++++++----- - 1 file changed, 8 insertions(+), 5 deletions(-) + tests/unit/grains/test_core.py | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 36aa49f232..d3b6515d00 100644 +index 918a9155cb..15de4e363e 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -69,10 +69,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - def test_parse_etc_os_release(self, path_isfile_mock): - path_isfile_mock.side_effect = lambda x: x == "/usr/lib/os-release" - with salt.utils.files.fopen(os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")) as os_release_file: +@@ -60,11 +60,10 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + with salt.utils.files.fopen( + os.path.join(OS_RELEASE_DIR, "ubuntu-17.10") + ) as os_release_file: - os_release_content = os_release_file.readlines() - with patch("salt.utils.files.fopen", mock_open()) as os_release_file: - os_release_file.return_value.__iter__.return_value = os_release_content -- os_release = core._parse_os_release(["/etc/os-release", "/usr/lib/os-release"]) + os_release_content = os_release_file.read() + with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)): -+ os_release = core._parse_os_release( + os_release = core._parse_os_release( +- ["/etc/os-release", "/usr/lib/os-release"] + "/etc/os-release", "/usr/lib/os-release" -+ ) - self.assertEqual(os_release, { - "NAME": "Ubuntu", - "VERSION": "17.10 (Artful Aardvark)", -@@ -134,7 +135,9 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - + ) + self.assertEqual( + os_release, +@@ -174,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): def test_missing_os_release(self): - with patch('salt.utils.files.fopen', mock_open(read_data={})): -- os_release = core._parse_os_release(['/etc/os-release', '/usr/lib/os-release']) -+ os_release = core._parse_os_release( + with patch("salt.utils.files.fopen", mock_open(read_data={})): + os_release = core._parse_os_release( +- ["/etc/os-release", "/usr/lib/os-release"] + "/etc/os-release", "/usr/lib/os-release" -+ ) + ) self.assertEqual(os_release, {}) - @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows') -- -2.28.0 +2.29.2 diff --git a/fix-ipv6-scope-bsc-1108557.patch b/fix-ipv6-scope-bsc-1108557.patch index b29edfb..634cc49 100644 --- a/fix-ipv6-scope-bsc-1108557.patch +++ b/fix-ipv6-scope-bsc-1108557.patch @@ -1,4 +1,4 @@ -From 2cb7515f83e2c358b84724e4eb581daa78012fdf Mon Sep 17 00:00:00 2001 +From 082bb6a25b2b025a5c7c6fdbf7dbcbe64a39da2c Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Fri, 28 Sep 2018 15:22:33 +0200 Subject: [PATCH] Fix IPv6 scope (bsc#1108557) @@ -69,14 +69,14 @@ Lintfix: W0611 Reverse skipping tests: if no ipaddress --- - salt/_compat.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + salt/_compat.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/salt/_compat.py b/salt/_compat.py -index e999605d2c..965bb90da3 100644 +index 011eb8af9e..d9425523cf 100644 --- a/salt/_compat.py +++ b/salt/_compat.py -@@ -230,7 +230,81 @@ class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped): +@@ -242,7 +242,81 @@ class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped): self.hostmask = self.network.hostmask @@ -159,6 +159,6 @@ index e999605d2c..965bb90da3 100644 + ipaddress.ip_address = ip_address + ipaddress.ip_interface = ip_interface -- -2.16.4 +2.29.2 diff --git a/fix-issue-2068-test.patch b/fix-issue-2068-test.patch index d0a00ee..0c1eee7 100644 --- a/fix-issue-2068-test.patch +++ b/fix-issue-2068-test.patch @@ -1,4 +1,4 @@ -From bfdd7f946d56d799e89b33f7e3b72426732b0195 Mon Sep 17 00:00:00 2001 +From db77ad3e24daf3bc014dc3d85a49aa1bb33ae1ae Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Wed, 9 Jan 2019 16:08:19 +0100 Subject: [PATCH] Fix issue #2068 test @@ -13,19 +13,19 @@ Minor update: more correct is-dict check. 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/salt/state.py b/salt/state.py -index bc5277554e..2fa5f64ca5 100644 +index b1bce4e0cd..cc6db7e1b2 100644 --- a/salt/state.py +++ b/salt/state.py -@@ -25,6 +25,7 @@ import traceback - import re - import time - import random -+import collections +@@ -12,6 +12,7 @@ The data sent to the state calls is as follows: + """ - # Import salt libs - import salt.loader -@@ -2896,16 +2897,18 @@ class State(object): - ''' + ++import collections + import copy + import datetime + import fnmatch +@@ -3206,16 +3207,18 @@ class State: + """ for chunk in high: state = high[chunk] + if not isinstance(state, collections.Mapping): @@ -35,18 +35,18 @@ index bc5277554e..2fa5f64ca5 100644 + if not isinstance(state[state_ref], list): + continue for argset in state[state_ref]: - if isinstance(argset, six.string_types): + if isinstance(argset, str): needs_default = False break if needs_default: - order = state[state_ref].pop(-1) -- state[state_ref].append('__call__') +- state[state_ref].append("__call__") - state[state_ref].append(order) -+ state[state_ref].insert(-1, '__call__') ++ state[state_ref].insert(-1, "__call__") def call_high(self, high, orchestration_jid=None): - ''' + """ -- -2.16.4 +2.29.2 diff --git a/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch b/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch index b41b086..4f71f90 100644 --- a/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch +++ b/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch @@ -1,4 +1,4 @@ -From 77d53d9567b7aec045a8fffd29afcb76a8405caf Mon Sep 17 00:00:00 2001 +From 00c538383e463febba492e74577ae64be80d4d00 Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Mon, 16 Sep 2019 11:27:30 +0200 Subject: [PATCH] Fix memory leak produced by batch async find_jobs @@ -16,16 +16,16 @@ Multiple fixes: Co-authored-by: Pablo Suárez Hernández --- - salt/cli/batch_async.py | 60 ++++++++++++++++++++++++++++++++----------------- + salt/cli/batch_async.py | 59 ++++++++++++++++++++++++++++------------- salt/client/__init__.py | 1 + - salt/master.py | 1 - + salt/master.py | 2 -- 3 files changed, 41 insertions(+), 21 deletions(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 8c8f481e34..8a67331102 100644 +index 7225491228..388b709416 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -72,6 +72,7 @@ class BatchAsync(object): +@@ -73,6 +73,7 @@ class BatchAsync: self.done_minions = set() self.active = set() self.initialized = False @@ -33,55 +33,58 @@ index 8c8f481e34..8a67331102 100644 self.ping_jid = jid_gen() self.batch_jid = jid_gen() self.find_job_jid = jid_gen() -@@ -89,14 +90,11 @@ class BatchAsync(object): +@@ -91,14 +92,11 @@ class BatchAsync: def __set_event_handler(self): - ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid) - batch_return_pattern = 'salt/job/{0}/ret/*'.format(self.batch_jid) -- find_job_return_pattern = 'salt/job/{0}/ret/*'.format(self.find_job_jid) - self.event.subscribe(ping_return_pattern, match_type='glob') - self.event.subscribe(batch_return_pattern, match_type='glob') -- self.event.subscribe(find_job_return_pattern, match_type='glob') + ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) + batch_return_pattern = "salt/job/{}/ret/*".format(self.batch_jid) +- find_job_return_pattern = "salt/job/{}/ret/*".format(self.find_job_jid) + self.event.subscribe(ping_return_pattern, match_type="glob") + self.event.subscribe(batch_return_pattern, match_type="glob") +- self.event.subscribe(find_job_return_pattern, match_type="glob") - self.event.patterns = { + self.patterns = { - (ping_return_pattern, 'ping_return'), - (batch_return_pattern, 'batch_run'), -- (find_job_return_pattern, 'find_job_return') + (ping_return_pattern, "ping_return"), + (batch_return_pattern, "batch_run"), +- (find_job_return_pattern, "find_job_return"), } self.event.set_event_handler(self.__event_handler) -@@ -104,7 +102,7 @@ class BatchAsync(object): +@@ -106,7 +104,7 @@ class BatchAsync: if not self.event: return mtag, data = self.event.unpack(raw, self.event.serial) - for (pattern, op) in self.event.patterns: + for (pattern, op) in self.patterns: if fnmatch.fnmatch(mtag, pattern): - minion = data['id'] - if op == 'ping_return': -@@ -112,7 +110,8 @@ class BatchAsync(object): + minion = data["id"] + if op == "ping_return": +@@ -114,7 +112,8 @@ class BatchAsync: if self.targeted_minions == self.minions: self.event.io_loop.spawn_callback(self.start_batch) - elif op == 'find_job_return': + elif op == "find_job_return": - self.find_job_returned.add(minion) + if data.get("return", None): + self.find_job_returned.add(minion) - elif op == 'batch_run': + elif op == "batch_run": if minion in self.active: self.active.remove(minion) -@@ -131,31 +130,46 @@ class BatchAsync(object): +@@ -134,7 +133,11 @@ class BatchAsync: return set(list(to_run)[:next_batch_size]) @tornado.gen.coroutine - def check_find_job(self, batch_minions): + def check_find_job(self, batch_minions, jid): -+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid) -+ self.event.unsubscribe(find_job_return_pattern, match_type='glob') ++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) ++ self.event.unsubscribe(find_job_return_pattern, match_type="glob") + self.patterns.remove((find_job_return_pattern, "find_job_return")) + - timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions) - self.timedout_minions = self.timedout_minions.union(timedout_minions) - self.active = self.active.difference(self.timedout_minions) - running = batch_minions.difference(self.done_minions).difference(self.timedout_minions) + timedout_minions = batch_minions.difference(self.find_job_returned).difference( + self.done_minions + ) +@@ -143,27 +146,39 @@ class BatchAsync: + running = batch_minions.difference(self.done_minions).difference( + self.timedout_minions + ) + if timedout_minions: self.schedule_next() @@ -95,56 +98,59 @@ index 8c8f481e34..8a67331102 100644 - not_done = minions.difference(self.done_minions) - ping_return = yield self.local.run_job_async( - not_done, -- 'saltutil.find_job', +- "saltutil.find_job", - [self.batch_jid], -- 'list', -- gather_job_timeout=self.opts['gather_job_timeout'], +- "list", +- gather_job_timeout=self.opts["gather_job_timeout"], - jid=self.find_job_jid, -- **self.eauth) +- **self.eauth +- ) - self.event.io_loop.call_later( -- self.opts['gather_job_timeout'], -- self.check_find_job, -- not_done) -+ not_done = minions.difference(self.done_minions).difference(self.timedout_minions) -+ +- self.opts["gather_job_timeout"], self.check_find_job, not_done ++ not_done = minions.difference(self.done_minions).difference( ++ self.timedout_minions + ) + + if not_done: + jid = self.jid_gen() -+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid) ++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) + self.patterns.add((find_job_return_pattern, "find_job_return")) -+ self.event.subscribe(find_job_return_pattern, match_type='glob') ++ self.event.subscribe(find_job_return_pattern, match_type="glob") + + ret = yield self.local.run_job_async( + not_done, -+ 'saltutil.find_job', ++ "saltutil.find_job", + [self.batch_jid], -+ 'list', -+ gather_job_timeout=self.opts['gather_job_timeout'], ++ "list", ++ gather_job_timeout=self.opts["gather_job_timeout"], + jid=jid, -+ **self.eauth) ++ **self.eauth ++ ) + self.event.io_loop.call_later( -+ self.opts['gather_job_timeout'], -+ self.check_find_job, -+ not_done, -+ jid) - ++ self.opts["gather_job_timeout"], self.check_find_job, not_done, jid ++ ) ++ @tornado.gen.coroutine def start(self): -@@ -203,6 +217,9 @@ class BatchAsync(object): + self.__set_event_handler() +@@ -211,6 +226,9 @@ class BatchAsync: } - self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid)) + self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) self.event.remove_event_handler(self.__event_handler) + for (pattern, label) in self.patterns: + if label in ["ping_return", "batch_run"]: -+ self.event.unsubscribe(pattern, match_type='glob') ++ self.event.unsubscribe(pattern, match_type="glob") def schedule_next(self): if not self.scheduled: -@@ -226,9 +243,12 @@ class BatchAsync(object): - gather_job_timeout=self.opts['gather_job_timeout'], +@@ -235,11 +253,14 @@ class BatchAsync: jid=self.batch_jid, - metadata=self.metadata) + metadata=self.metadata, + ) + - self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch)) + self.event.io_loop.call_later( + self.opts["timeout"], self.find_job, set(next_batch) + ) except Exception as ex: + log.error("Error in scheduling next batch: %s", ex) self.active = self.active.difference(next_batch) @@ -153,30 +159,31 @@ index 8c8f481e34..8a67331102 100644 self.scheduled = False + yield diff --git a/salt/client/__init__.py b/salt/client/__init__.py -index 3bbc7f9de7..a48d79ef8d 100644 +index 1e9f11df4c..cc8fd4048d 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py -@@ -1622,6 +1622,7 @@ class LocalClient(object): - 'key': self.key, - 'tgt_type': tgt_type, - 'ret': ret, -+ 'timeout': timeout, - 'jid': jid} +@@ -1776,6 +1776,7 @@ class LocalClient: + "key": self.key, + "tgt_type": tgt_type, + "ret": ret, ++ "timeout": timeout, + "jid": jid, + } - # if kwargs are passed, pack them. diff --git a/salt/master.py b/salt/master.py -index 5e2277ba76..3abf7ae60b 100644 +index b9bc1a7a67..7a99af357a 100644 --- a/salt/master.py +++ b/salt/master.py -@@ -2044,7 +2044,6 @@ class ClearFuncs(object): +@@ -2232,8 +2232,6 @@ class ClearFuncs(TransportMethods): def publish_batch(self, clear_load, minions, missing): batch_load = {} batch_load.update(clear_load) - import salt.cli.batch_async +- batch = salt.cli.batch_async.BatchAsync( self.local.opts, functools.partial(self._prep_jid, clear_load, {}), -- -2.16.4 +2.29.2 diff --git a/fix-novendorchange-option-284.patch b/fix-novendorchange-option-284.patch index 1733896..5c3178a 100644 --- a/fix-novendorchange-option-284.patch +++ b/fix-novendorchange-option-284.patch @@ -1,4 +1,4 @@ -From f69c1178de003866af412e61e0146597974eec0d Mon Sep 17 00:00:00 2001 +From 4123cf7b9428af1442f4aa0a54489e5c0deb4aaa Mon Sep 17 00:00:00 2001 From: Martin Seidl Date: Tue, 27 Oct 2020 16:12:29 +0100 Subject: [PATCH] Fix novendorchange option (#284) @@ -7,39 +7,43 @@ Subject: [PATCH] Fix novendorchange option (#284) * refactor handling of novendorchange and fix tests --- - salt/modules/zypperpkg.py | 19 ++-- - tests/unit/modules/test_zypperpkg.py | 150 ++++++++++++++++++++++++--- - 2 files changed, 148 insertions(+), 21 deletions(-) + salt/modules/zypperpkg.py | 21 +++--- + tests/unit/modules/test_zypperpkg.py | 100 ++++++++++++++++++++++++++- + 2 files changed, 108 insertions(+), 13 deletions(-) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index ad11da4ad1..d84a6af6e0 100644 +index 5369a0342e..d06c265202 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -1617,7 +1617,7 @@ def upgrade(refresh=True, - dryrun=False, - dist_upgrade=False, - fromrepo=None, -- novendorchange=False, -+ novendorchange=True, - skip_verify=False, - no_recommends=False, - root=None, -@@ -1701,13 +1701,18 @@ def upgrade(refresh=True, - log.info('Targeting repos: %s', fromrepo) +@@ -1707,7 +1707,7 @@ def upgrade( + dryrun=False, + dist_upgrade=False, + fromrepo=None, +- novendorchange=False, ++ novendorchange=True, + skip_verify=False, + no_recommends=False, + root=None, +@@ -1794,19 +1794,18 @@ def upgrade( + log.info("Targeting repos: %s", fromrepo) if dist_upgrade: - if novendorchange: - # TODO: Grains validation should be moved to Zypper class -- if __grains__['osrelease_info'][0] > 11: -- cmd_update.append('--no-allow-vendor-change') -- log.info('Disabling vendor changes') +- if __grains__["osrelease_info"][0] > 11: + # TODO: Grains validation should be moved to Zypper class + if __grains__["osrelease_info"][0] > 11: + if novendorchange: -+ cmd_update.append("--no-allow-vendor-change") -+ log.info("Disabling vendor changes") + cmd_update.append("--no-allow-vendor-change") + log.info("Disabling vendor changes") else: -- log.warning('Disabling vendor changes is not supported on this Zypper version') +- log.warning( +- "Disabling vendor changes is not supported on this Zypper version" +- ) +- +- if no_recommends: +- cmd_update.append("--no-recommends") +- log.info("Disabling recommendations") + cmd_update.append("--allow-vendor-change") + log.info("Enabling vendor changes") + else: @@ -48,121 +52,26 @@ index ad11da4ad1..d84a6af6e0 100644 + ) if no_recommends: - cmd_update.append('--no-recommends') + cmd_update.append("--no-recommends") diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index a3d20f66d5..8cc84485b5 100644 +index a60b1546c6..eaa4d9a76a 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -480,7 +480,11 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}])): +@@ -642,7 +642,9 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): ret = zypper.upgrade(dist_upgrade=True) self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}}) -- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses') -+ zypper_mock.assert_any_call( + zypper_mock.assert_any_call( +- "dist-upgrade", "--auto-agree-with-licenses" + "dist-upgrade", + "--auto-agree-with-licenses", + "--no-allow-vendor-change", -+ ) + ) - with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])): - ret = zypper.upgrade(dist_upgrade=True, dryrun=True) -@@ -488,25 +492,138 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', - '--dry-run', '--debug-solver') + with patch( +@@ -660,6 +662,76 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): + "--debug-solver", + ) -- with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])): -- ret = zypper.upgrade(dist_upgrade=True, dryrun=True, -- fromrepo=["Dummy", "Dummy2"], novendorchange=True) -- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run', -- '--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change') -- zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run', -- '--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change', -- '--debug-solver') -- - with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])): - ret = zypper.upgrade(dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False) - zypper_mock.assert_any_call('update', '--auto-agree-with-licenses', '--repo', "Dummy", '--repo', 'Dummy2') - - with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}])): - ret = zypper.upgrade(dist_upgrade=True, fromrepo=["Dummy", "Dummy2"], novendorchange=True) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--no-allow-vendor-change", -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--no-allow-vendor-change", -+ ) -+ -+ with patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -+ ): -+ ret = zypper.upgrade( -+ dist_upgrade=True, -+ dryrun=True, -+ fromrepo=["Dummy", "Dummy2"], -+ novendorchange=False, -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--allow-vendor-change", -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--allow-vendor-change", -+ "--debug-solver", -+ ) -+ -+ -+ with patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -+ ): -+ ret = zypper.upgrade( -+ dist_upgrade=True, -+ dryrun=True, -+ fromrepo=["Dummy", "Dummy2"], -+ novendorchange=True, -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--no-allow-vendor-change", -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--no-allow-vendor-change", -+ "--debug-solver", -+ ) -+ + with patch( + "salt.modules.zypperpkg.list_pkgs", + MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), @@ -188,9 +97,57 @@ index a3d20f66d5..8cc84485b5 100644 + fromrepo=["Dummy", "Dummy2"], + novendorchange=True, + ) - self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}}) - zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--from', "Dummy", - '--from', 'Dummy2', '--no-allow-vendor-change') ++ zypper_mock.assert_any_call( ++ "dist-upgrade", ++ "--auto-agree-with-licenses", ++ "--dry-run", ++ "--no-allow-vendor-change", ++ ) ++ zypper_mock.assert_any_call( ++ "dist-upgrade", ++ "--auto-agree-with-licenses", ++ "--dry-run", ++ "--no-allow-vendor-change", ++ ) ++ ++ with patch( ++ "salt.modules.zypperpkg.list_pkgs", ++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), ++ ): ++ ret = zypper.upgrade( ++ dist_upgrade=True, ++ dryrun=True, ++ fromrepo=["Dummy", "Dummy2"], ++ novendorchange=False, ++ ) ++ zypper_mock.assert_any_call( ++ "dist-upgrade", ++ "--auto-agree-with-licenses", ++ "--dry-run", ++ "--from", ++ "Dummy", ++ "--from", ++ "Dummy2", ++ "--allow-vendor-change", ++ ) ++ zypper_mock.assert_any_call( ++ "dist-upgrade", ++ "--auto-agree-with-licenses", ++ "--dry-run", ++ "--from", ++ "Dummy", ++ "--from", ++ "Dummy2", ++ "--allow-vendor-change", ++ "--debug-solver", ++ ) ++ + with patch( + "salt.modules.zypperpkg.list_pkgs", + MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), +@@ -728,6 +800,26 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): + "--no-allow-vendor-change", + ) + with patch( + "salt.modules.zypperpkg.list_pkgs", @@ -211,30 +168,24 @@ index a3d20f66d5..8cc84485b5 100644 + "Dummy2", + "--allow-vendor-change", + ) ++ def test_upgrade_kernel(self): - ''' + """ Test kernel package upgrade success. -@@ -558,10 +675,15 @@ Repository 'DUMMY' not found by its alias, number, or URI. - with patch('salt.modules.zypperpkg.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])): - with self.assertRaises(CommandExecutionError) as cmd_exc: - ret = zypper.upgrade(dist_upgrade=True, fromrepo=["DUMMY"]) -- self.assertEqual(cmd_exc.exception.info['changes'], {}) -- self.assertEqual(cmd_exc.exception.info['result']['stdout'], zypper_out) -- zypper_mock.noraise.call.assert_called_with('dist-upgrade', '--auto-agree-with-licenses', -- '--from', 'DUMMY') -+ self.assertEqual(cmd_exc.exception.info["changes"], {}) -+ self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out) -+ zypper_mock.noraise.call.assert_called_with( +@@ -815,7 +907,11 @@ Repository 'DUMMY' not found by its alias, number, or URI. + self.assertEqual(cmd_exc.exception.info["changes"], {}) + self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out) + zypper_mock.noraise.call.assert_called_with( +- "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY" + "dist-upgrade", + "--auto-agree-with-licenses", + "--from", + "DUMMY", + "--no-allow-vendor-change", -+ ) + ) def test_upgrade_available(self): - ''' -- -2.28.0 +2.29.2 diff --git a/fix-the-removed-six.itermitems-and-six.-_type-262.patch b/fix-the-removed-six.itermitems-and-six.-_type-262.patch index 78cf626..9781d37 100644 --- a/fix-the-removed-six.itermitems-and-six.-_type-262.patch +++ b/fix-the-removed-six.itermitems-and-six.-_type-262.patch @@ -1,4 +1,4 @@ -From 5b6ac3bb81f24bbb8c39f80c71c490c339cce756 Mon Sep 17 00:00:00 2001 +From 01e2e60a5aba609d219b73f1018f12517a294a64 Mon Sep 17 00:00:00 2001 From: Cedric Bosdonnat Date: Tue, 15 Sep 2020 13:46:06 +0200 Subject: [PATCH] Fix the removed six.itermitems and six.*_type* (#262) @@ -11,29 +11,81 @@ on python 2.7. * fixup! Fix the removed six.itermitems and six.*_type* --- - salt/_compat.py | 1 + - salt/modules/virt.py | 57 +++++++++++----------- - salt/states/virt.py | 15 +++--- - salt/utils/data.py | 51 ++++++++++---------- - salt/utils/xmlutil.py | 5 +- + salt/_compat.py | 25 ++++++++++++++++--------- + salt/modules/virt.py | 11 ++++------- + salt/states/virt.py | 1 + + salt/utils/xmlutil.py | 3 ++- tests/unit/modules/test_virt.py | 2 +- - tests/unit/utils/test_data.py | 85 ++++++++++++++++++--------------- - 7 files changed, 115 insertions(+), 101 deletions(-) + 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/salt/_compat.py b/salt/_compat.py -index 965bb90da3..22daaa31a0 100644 +index d9425523cf..de100de3fa 100644 --- a/salt/_compat.py +++ b/salt/_compat.py -@@ -39,6 +39,7 @@ except Exception: # pylint: disable=broad-except - # True if we are running on Python 3. - PY3 = sys.version_info.major == 3 - +@@ -7,6 +7,7 @@ Salt compatibility code + import binascii + import logging + import sys +import xml.sax.saxutils as saxutils - if PY3: - import builtins + from salt.exceptions import SaltException + from salt.ext.six import binary_type, integer_types, string_types, text_type +@@ -261,21 +262,25 @@ def ip_address(address): + try: + return ipaddress.IPv4Address(address) + except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: +- log.debug('Error while parsing IPv4 address: %s', address) ++ log.debug("Error while parsing IPv4 address: %s", address) + log.debug(err) + + try: + return IPv6AddressScoped(address) + except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: +- log.debug('Error while parsing IPv6 address: %s', address) ++ log.debug("Error while parsing IPv6 address: %s", address) + log.debug(err) + + if isinstance(address, bytes): +- raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. ' +- 'Did you pass in a bytes (str in Python 2) instead ' +- 'of a unicode object?'.format(repr(address))) ++ raise ipaddress.AddressValueError( ++ "{} does not appear to be an IPv4 or IPv6 address. " ++ "Did you pass in a bytes (str in Python 2) instead " ++ "of a unicode object?".format(repr(address)) ++ ) + +- raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address))) ++ raise ValueError( ++ "{} does not appear to be an IPv4 or IPv6 address".format(repr(address)) ++ ) + + + def ip_interface(address): +@@ -302,16 +307,18 @@ def ip_interface(address): + try: + return ipaddress.IPv4Interface(address) + except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: +- log.debug('Error while getting IPv4 interface for address %s', address) ++ log.debug("Error while getting IPv4 interface for address %s", address) + log.debug(err) + + try: + return ipaddress.IPv6Interface(address) + except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: +- log.debug('Error while getting IPv6 interface for address %s', address) ++ log.debug("Error while getting IPv6 interface for address %s", address) + log.debug(err) + +- raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address)) ++ raise ValueError( ++ "{} does not appear to be an IPv4 or IPv6 interface".format(address) ++ ) + + + if ipaddress: diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index cd80fbe608..c07fabb406 100644 +index ec40f08359..c042738370 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py @@ -88,8 +88,6 @@ import string # pylint: disable=deprecated-module @@ -43,87 +95,20 @@ index cd80fbe608..c07fabb406 100644 -from xml.etree import ElementTree -from xml.sax import saxutils - # Import third party libs import jinja2.exceptions -@@ -104,7 +102,10 @@ import salt.utils.templates + import salt.utils.files +@@ -99,8 +97,9 @@ import salt.utils.stringutils + import salt.utils.templates import salt.utils.xmlutil as xmlutil import salt.utils.yaml - from salt._compat import ipaddress -+from salt._compat import ElementTree -+from salt._compat import saxutils +-from salt._compat import ipaddress ++from salt._compat import ElementTree, ipaddress, saxutils from salt.exceptions import CommandExecutionError, SaltInvocationError +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves.urllib.parse import urlparse, urlunparse from salt.utils.virt import check_remote, download_remote -@@ -657,8 +658,8 @@ def _gen_xml( - context = { - "hypervisor": hypervisor, - "name": name, -- "cpu": str(cpu), -- "mem": str(mem), -+ "cpu": six.text_type(cpu), -+ "mem": six.text_type(mem), - } - if hypervisor in ["qemu", "kvm"]: - context["controller_model"] = False -@@ -722,7 +723,7 @@ def _gen_xml( - "target_dev": _get_disk_target(targets, len(diskp), prefix), - "disk_bus": disk["model"], - "format": disk.get("format", "raw"), -- "index": str(i), -+ "index": six.text_type(i), - } - targets.append(disk_context["target_dev"]) - if disk.get("source_file"): -@@ -827,8 +828,8 @@ def _gen_vol_xml( - "name": name, - "target": {"permissions": permissions, "nocow": nocow}, - "format": format, -- "size": str(size), -- "allocation": str(int(allocation) * 1024), -+ "size": six.text_type(size), -+ "allocation": six.text_type(int(allocation) * 1024), - "backingStore": backing_store, - } - fn_ = "libvirt_volume.jinja" -@@ -1253,7 +1254,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name): - ) - - # Transform the list to remove one level of dictionary and add the name as a property -- disklist = [dict(d, name=name) for disk in disklist for name, d in disk.items()] -+ disklist = [dict(d, name=name) for disk in disklist for name, d in six.iteritems(disk)] - - # Merge with the user-provided disks definitions - if disks: -@@ -1274,7 +1275,7 @@ def _disk_profile(conn, profile, hypervisor, disks, vm_name): - disk["model"] = "ide" - - # Add the missing properties that have defaults -- for key, val in overlay.items(): -+ for key, val in six.iteritems(overlay): - if key not in disk: - disk[key] = val - -@@ -1422,7 +1423,7 @@ def _complete_nics(interfaces, hypervisor): - """ - Apply the default overlay to attributes - """ -- for key, value in overlays[hypervisor].items(): -+ for key, value in six.iteritems(overlays[hypervisor]): - if key not in attributes or not attributes[key]: - attributes[key] = value - -@@ -1449,7 +1450,7 @@ def _nic_profile(profile_name, hypervisor): - """ - Append dictionary profile data to interfaces list - """ -- for interface_name, attributes in profile_dict.items(): -+ for interface_name, attributes in six.iteritems(profile_dict): - attributes["name"] = interface_name - interfaces.append(attributes) - -@@ -1520,7 +1521,7 @@ def _handle_remote_boot_params(orig_boot): +@@ -1516,7 +1515,7 @@ def _handle_remote_boot_params(orig_boot): """ saltinst_dir = None new_boot = orig_boot.copy() @@ -132,139 +117,22 @@ index cd80fbe608..c07fabb406 100644 cases = [ {"efi"}, {"kernel", "initrd", "efi"}, -@@ -2380,8 +2381,8 @@ def update( - # Update the cpu - cpu_node = desc.find("vcpu") - if cpu and int(cpu_node.text) != cpu: -- cpu_node.text = str(cpu) -- cpu_node.set("current", str(cpu)) -+ cpu_node.text = six.text_type(cpu) -+ cpu_node.set("current", six.text_type(cpu)) - need_update = True +@@ -2559,9 +2558,7 @@ def update( - def _set_loader(node, value): -@@ -2394,7 +2395,7 @@ def update( - node.set("template", value) - - def _set_with_mib_unit(node, value): -- node.text = str(value) -+ node.text = six.text_type(value) - node.set("unit", "MiB") - - # Update the kernel boot parameters -@@ -2426,7 +2427,7 @@ def update( - }, - ] - -- data = {k: v for k, v in locals().items() if bool(v)} -+ data = {k: v for k, v in six.iteritems(locals()) if bool(v)} - if boot_dev: - data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} - need_update = need_update or salt.utils.xmlutil.change_xml( -@@ -2547,7 +2548,7 @@ def update( # Attaching device if source_file: - ElementTree.SubElement( +- ElementTree.SubElement( - updated_disk, "source", attrib={"file": source_file} -+ updated_disk, "source", file=source_file - ) +- ) ++ ElementTree.SubElement(updated_disk, "source", file=source_file) changes["disk"]["new"] = new_disks -@@ -2609,7 +2610,7 @@ def update( - except libvirt.libvirtError as err: - if "errors" not in status: - status["errors"] = [] -- status["errors"].append(str(err)) -+ status["errors"].append(six.text_type(err)) - conn.close() - return status -@@ -2823,7 +2824,7 @@ def _node_info(conn): - info = { - "cpucores": raw[6], - "cpumhz": raw[3], -- "cpumodel": str(raw[0]), -+ "cpumodel": six.text_type(raw[0]), - "cpus": raw[2], - "cputhreads": raw[7], - "numanodes": raw[4], -@@ -3628,7 +3629,7 @@ def _define_vol_xml_str(conn, xml, pool=None): # pylint: disable=redefined-oute - poolname = ( - pool if pool else __salt__["config.get"]("virt:storagepool", default_pool) - ) -- pool = conn.storagePoolLookupByName(str(poolname)) -+ pool = conn.storagePoolLookupByName(six.text_type(poolname)) - ret = pool.createXML(xml, 0) is not None - return ret - -@@ -3829,7 +3830,7 @@ def seed_non_shared_migrate(disks, force=False): - - salt '*' virt.seed_non_shared_migrate - """ -- for _, data in disks.items(): -+ for _, data in six.iteritems(disks): - fn_ = data["file"] - form = data["file format"] - size = data["virtual size"].split()[1][1:] -@@ -4852,7 +4853,7 @@ def capabilities(**kwargs): - try: - caps = _capabilities(conn) - except libvirt.libvirtError as err: -- raise CommandExecutionError(str(err)) -+ raise CommandExecutionError(six.text_type(err)) - finally: - conn.close() - return caps -@@ -5352,7 +5353,7 @@ def network_info(name=None, **kwargs): - for net in nets - } - except libvirt.libvirtError as err: -- log.debug("Silenced libvirt error: %s", str(err)) -+ log.debug("Silenced libvirt error: %s", six.text_type(err)) - finally: - conn.close() - return result -@@ -6214,7 +6215,7 @@ def pool_info(name=None, **kwargs): - ] - result = {pool.name(): _pool_extract_infos(pool) for pool in pools} - except libvirt.libvirtError as err: -- log.debug("Silenced libvirt error: %s", str(err)) -+ log.debug("Silenced libvirt error: %s", six.text_type(err)) - finally: - conn.close() - return result -@@ -6591,12 +6592,12 @@ def volume_infos(pool=None, volume=None, **kwargs): - if vol.path(): - as_backing_store = { - path -- for (path, all_paths) in backing_stores.items() -+ for (path, all_paths) in six.iteritems(backing_stores) - if vol.path() in all_paths - } - used_by = [ - vm_name -- for (vm_name, vm_disks) in disks.items() -+ for (vm_name, vm_disks) in six.iteritems(disks) - if vm_disks & as_backing_store or vol.path() in vm_disks - ] - -@@ -6625,9 +6626,9 @@ def volume_infos(pool=None, volume=None, **kwargs): - } - for pool_obj in pools - } -- return {pool_name: volumes for (pool_name, volumes) in vols.items() if volumes} -+ return {pool_name: volumes for (pool_name, volumes) in six.iteritems(vols) if volumes} - except libvirt.libvirtError as err: -- log.debug("Silenced libvirt error: %s", str(err)) -+ log.debug("Silenced libvirt error: %s", six.text_type(err)) - finally: - conn.close() - return result diff --git a/salt/states/virt.py b/salt/states/virt.py -index 3d99fd53c8..1a0c889d58 100644 +index b45cf72ed3..df7ebb63e6 100644 --- a/salt/states/virt.py +++ b/salt/states/virt.py -@@ -23,6 +23,7 @@ import salt.utils.files +@@ -22,6 +22,7 @@ import salt.utils.files import salt.utils.stringutils import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError @@ -272,288 +140,19 @@ index 3d99fd53c8..1a0c889d58 100644 try: import libvirt # pylint: disable=import-error -@@ -97,7 +98,7 @@ def keys(name, basepath="/etc/pki", **kwargs): - # rename them to something hopefully unique to avoid - # overriding anything existing - pillar_kwargs = {} -- for key, value in kwargs.items(): -+ for key, value in six.iteritems(kwargs): - pillar_kwargs["ext_pillar_virt.{}".format(key)] = value - - pillar = __salt__["pillar.ext"]({"libvirt": "_"}, pillar_kwargs) -@@ -187,7 +188,7 @@ def _virt_call( - else: - noaction_domains.append(targeted_domain) - except libvirt.libvirtError as err: -- ignored_domains.append({"domain": targeted_domain, "issue": str(err)}) -+ ignored_domains.append({"domain": targeted_domain, "issue": six.text_type(err)}) - if not changed_domains: - ret["result"] = not ignored_domains and bool(targeted_domains) - ret["comment"] = "No changes had happened" -@@ -461,7 +462,7 @@ def defined( - ret["comment"] = "Domain {} defined".format(name) - except libvirt.libvirtError as err: - # Something bad happened when defining / updating the VM, report it -- ret["comment"] = str(err) -+ ret["comment"] = six.text_type(err) - ret["result"] = False - - return ret -@@ -704,7 +705,7 @@ def running( - - except libvirt.libvirtError as err: - # Something bad happened when starting / updating the VM, report it -- ret["comment"] = str(err) -+ ret["comment"] = six.text_type(err) - ret["result"] = False - - return ret -@@ -867,7 +868,7 @@ def reverted( - } - except CommandExecutionError as err: - if len(domains) > 1: -- ignored_domains.append({"domain": domain, "issue": str(err)}) -+ ignored_domains.append({"domain": domain, "issue": six.text_type(err)}) - if len(domains) > 1: - if result: - ret["changes"]["reverted"].append(result) -@@ -885,9 +886,9 @@ def reverted( - if not ret["changes"]["reverted"]: - ret["changes"].pop("reverted") - except libvirt.libvirtError as err: -- ret["comment"] = str(err) -+ ret["comment"] = six.text_type(err) - except CommandExecutionError as err: -- ret["comment"] = str(err) -+ ret["comment"] = six.text_type(err) - - return ret - -diff --git a/salt/utils/data.py b/salt/utils/data.py -index 1c4c22efb3..d98b56e06f 100644 ---- a/salt/utils/data.py -+++ b/salt/utils/data.py -@@ -4,6 +4,7 @@ Functions for manipulating, inspecting, or otherwise working with data types - and data structures. - """ - -+from __future__ import absolute_import, print_function, unicode_literals - - # Import Python libs - import copy -@@ -71,7 +72,7 @@ class CaseInsensitiveDict(MutableMapping): - return self._data[to_lowercase(key)][1] - - def __iter__(self): -- return (item[0] for item in self._data.values()) -+ return (item[0] for item in six.itervalues(self._data)) - - def __eq__(self, rval): - if not isinstance(rval, Mapping): -@@ -80,20 +81,20 @@ class CaseInsensitiveDict(MutableMapping): - return dict(self.items_lower()) == dict(CaseInsensitiveDict(rval).items_lower()) - - def __repr__(self): -- return repr(dict(self.items())) -+ return repr(dict(six.iteritems(self))) - - def items_lower(self): - """ - Returns a generator iterating over keys and values, with the keys all - being lowercase. - """ -- return ((key, val[1]) for key, val in self._data.items()) -+ return ((key, val[1]) for key, val in six.iteritems(self._data)) - - def copy(self): - """ - Returns a copy of the object - """ -- return CaseInsensitiveDict(self._data.items()) -+ return CaseInsensitiveDict(six.iteritems(self._data)) - - - def __change_case(data, attr, preserve_dict_class=False): -@@ -115,7 +116,7 @@ def __change_case(data, attr, preserve_dict_class=False): - __change_case(key, attr, preserve_dict_class), - __change_case(val, attr, preserve_dict_class), - ) -- for key, val in data.items() -+ for key, val in six.iteritems(data) - ) - if isinstance(data, Sequence): - return data_type( -@@ -145,7 +146,7 @@ def compare_dicts(old=None, new=None): - dict describing the changes that were made. - """ - ret = {} -- for key in set(new or {}).union(old or {}): -+ for key in set((new or {})).union((old or {})): - if key not in old: - # New key - ret[key] = {"old": "", "new": new[key]} -@@ -205,7 +206,7 @@ def _remove_circular_refs(ob, _seen=None): - if isinstance(ob, dict): - res = { - _remove_circular_refs(k, _seen): _remove_circular_refs(v, _seen) -- for k, v in ob.items() -+ for k, v in six.iteritems(ob) - } - elif isinstance(ob, (list, tuple, set, frozenset)): - res = type(ob)(_remove_circular_refs(v, _seen) for v in ob) -@@ -336,7 +337,7 @@ def decode_dict( - ) - # Make sure we preserve OrderedDicts - ret = data.__class__() if preserve_dict_class else {} -- for key, value in data.items(): -+ for key, value in six.iteritems(data): - if isinstance(key, tuple): - key = ( - decode_tuple( -@@ -592,7 +593,7 @@ def encode_dict( - # Clean data object before encoding to avoid circular references - data = _remove_circular_refs(data) - ret = data.__class__() if preserve_dict_class else {} -- for key, value in data.items(): -+ for key, value in six.iteritems(data): - if isinstance(key, tuple): - key = ( - encode_tuple(key, encoding, errors, keep, preserve_dict_class) -@@ -734,8 +735,8 @@ def filter_by(lookup_dict, lookup, traverse, merge=None, default="default", base - # lookup_dict keys - for each in val if isinstance(val, list) else [val]: - for key in lookup_dict: -- test_key = key if isinstance(key, str) else str(key) -- test_each = each if isinstance(each, str) else str(each) -+ test_key = key if isinstance(key, six.string_types) else six.text_type(key) -+ test_each = each if isinstance(each, six.string_types) else six.text_type(each) - if fnmatch.fnmatchcase(test_each, test_key): - ret = lookup_dict[key] - break -@@ -851,11 +852,11 @@ def subdict_match( - # begin with is that (by design) to_unicode will raise a TypeError if a - # non-string/bytestring/bytearray value is passed. - try: -- target = str(target).lower() -+ target = six.text_type(target).lower() - except UnicodeDecodeError: - target = salt.utils.stringutils.to_unicode(target).lower() - try: -- pattern = str(pattern).lower() -+ pattern = six.text_type(pattern).lower() - except UnicodeDecodeError: - pattern = salt.utils.stringutils.to_unicode(pattern).lower() - -@@ -997,7 +998,7 @@ def repack_dictlist(data, strict=False, recurse=False, key_cb=None, val_cb=None) - Takes a list of one-element dicts (as found in many SLS schemas) and - repacks into a single dictionary. - """ -- if isinstance(data, str): -+ if isinstance(data, six.string_types): - try: - data = salt.utils.yaml.safe_load(data) - except salt.utils.yaml.parser.ParserError as err: -@@ -1009,7 +1010,7 @@ def repack_dictlist(data, strict=False, recurse=False, key_cb=None, val_cb=None) - if val_cb is None: - val_cb = lambda x, y: y - -- valid_non_dict = ((str,), (int,), float) -+ valid_non_dict = (six.string_types, six.integer_types, float) - if isinstance(data, list): - for element in data: - if isinstance(element, valid_non_dict): -@@ -1067,7 +1068,7 @@ def is_list(value): - - - @jinja_filter("is_iter") --def is_iter(thing, ignore=(str,)): -+def is_iter(thing, ignore=six.string_types): - """ - Test if an object is iterable, but not a string type. - -@@ -1124,10 +1125,10 @@ def is_true(value=None): - pass - - # Now check for truthiness -- if isinstance(value, ((int,), float)): -+ if isinstance(value, (six.integer_types, float)): - return value > 0 -- if isinstance(value, str): -- return str(value).lower() == "true" -+ if isinstance(value, six.string_types): -+ return six.text_type(value).lower() == "true" - return bool(value) - - -@@ -1167,7 +1168,7 @@ def simple_types_filter(data): - if data is None: - return data - -- simpletypes_keys = ((str,), str, (int,), float, bool) -+ simpletypes_keys = (six.string_types, six.text_type, six.integer_types, float, bool) - simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple]) - - if isinstance(data, (list, tuple)): -@@ -1183,7 +1184,7 @@ def simple_types_filter(data): - - if isinstance(data, dict): - simpledict = {} -- for key, value in data.items(): -+ for key, value in six.iteritems(data): - if key is not None and not isinstance(key, simpletypes_keys): - key = repr(key) - if value is not None and isinstance(value, (dict, list, tuple)): -@@ -1205,8 +1206,8 @@ def stringify(data): - for item in data: - if six.PY2 and isinstance(item, str): - item = salt.utils.stringutils.to_unicode(item) -- elif not isinstance(item, str): -- item = str(item) -+ elif not isinstance(item, six.string_types): -+ item = six.text_type(item) - ret.append(item) - return ret - -@@ -1282,7 +1283,7 @@ def filter_falsey(data, recurse_depth=None, ignore_types=()): - - if isinstance(data, dict): - processed_elements = [ -- (key, filter_element(value)) for key, value in data.items() -+ (key, filter_element(value)) for key, value in six.iteritems(data) - ] - return type(data)( - [ -@@ -1472,7 +1473,7 @@ def get_value(obj, path, default=None): - if obj is None: - return res - if isinstance(obj, dict): -- items = obj.items() -+ items = six.iteritems(obj) - elif isinstance(obj, list): - items = enumerate(obj) - diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py -index 2b9c7bf43f..68191bc528 100644 +index b9f047820b..111ca155d4 100644 --- a/salt/utils/xmlutil.py +++ b/salt/utils/xmlutil.py -@@ -9,6 +9,7 @@ from xml.etree import ElementTree +@@ -7,6 +7,7 @@ import string # pylint: disable=deprecated-module + from xml.etree import ElementTree - # Import salt libs import salt.utils.data +from salt.ext import six def _conv_name(x): -@@ -147,7 +148,7 @@ def set_node_text(node, value): - :param node: the node to set the text to - :param value: the value to set - """ -- node.text = str(value) -+ node.text = six.text_type(value) - - - def clean_node(parent_map, node, ignored=None): -@@ -162,7 +163,7 @@ def clean_node(parent_map, node, ignored=None): +@@ -160,7 +161,7 @@ def clean_node(parent_map, node, ignored=None): has_text = node.text is not None and node.text.strip() parent = parent_map.get(node) if ( @@ -563,10 +162,10 @@ index 2b9c7bf43f..68191bc528 100644 and not has_text ): diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index 5ec8de77e7..27c4b9d1b0 100644 +index 4775fec31f..4a4c0395a7 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py -@@ -48,7 +48,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors +@@ -45,7 +45,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors """ def __init__(self, msg): @@ -575,202 +174,7 @@ index 5ec8de77e7..27c4b9d1b0 100644 self.msg = msg def get_error_message(self): -diff --git a/tests/unit/utils/test_data.py b/tests/unit/utils/test_data.py -index 8a6956d442..fb4a8cc3c2 100644 ---- a/tests/unit/utils/test_data.py -+++ b/tests/unit/utils/test_data.py -@@ -1,14 +1,17 @@ -+# -*- coding: utf-8 -*- - """ - Tests for salt.utils.data - """ - - # Import Python libs -+from __future__ import absolute_import, print_function, unicode_literals - - import logging - - # Import Salt libs - import salt.utils.data - import salt.utils.stringutils -+from salt.ext import six - - # Import 3rd party libs - from salt.ext.six.moves import ( # pylint: disable=import-error,redefined-builtin -@@ -414,18 +417,19 @@ class DataTestCase(TestCase): - ) - self.assertEqual(ret, expected) - -- # The binary data in the data structure should fail to decode, even -- # using the fallback, and raise an exception. -- self.assertRaises( -- UnicodeDecodeError, -- salt.utils.data.decode, -- self.test_data, -- keep=False, -- normalize=True, -- preserve_dict_class=True, -- preserve_tuples=True, -- to_str=True, -- ) -+ if six.PY3: -+ # The binary data in the data structure should fail to decode, even -+ # using the fallback, and raise an exception. -+ self.assertRaises( -+ UnicodeDecodeError, -+ salt.utils.data.decode, -+ self.test_data, -+ keep=False, -+ normalize=True, -+ preserve_dict_class=True, -+ preserve_tuples=True, -+ to_str=True, -+ ) - - # Now munge the expected data so that we get what we would expect if we - # disable preservation of dict class and tuples -@@ -469,9 +473,14 @@ class DataTestCase(TestCase): - - # Test binary blob - self.assertEqual(salt.utils.data.decode(BYTES, keep=True, to_str=True), BYTES) -- self.assertRaises( -- UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False, to_str=True, -- ) -+ if six.PY3: -+ self.assertRaises( -+ UnicodeDecodeError, -+ salt.utils.data.decode, -+ BYTES, -+ keep=False, -+ to_str=True, -+ ) - - def test_decode_fallback(self): - """ -@@ -666,7 +675,7 @@ class DataTestCase(TestCase): - self.assertRaises(TypeError, salt.utils.data.stringify, 9) - self.assertEqual( - salt.utils.data.stringify( -- ["one", "two", "three", 4, 5] -+ ["one", "two", str("three"), 4, 5] - ), # future lint: disable=blacklisted-function - ["one", "two", "three", "4", "5"], - ) -@@ -720,7 +729,7 @@ class FilterFalseyTestCase(TestCase): - # Check returned type equality - self.assertIs(type(old_list), type(new_list)) - # Test with set -- old_set = {"foo", "bar"} -+ old_set = set(["foo", "bar"]) - new_set = salt.utils.data.filter_falsey(old_set) - self.assertEqual(old_set, new_set) - # Check returned type equality -@@ -839,9 +848,9 @@ class FilterFalseyTestCase(TestCase): - Test filtering a set without recursing. - Note that a set cannot contain unhashable types, so recursion is not possible. - """ -- old_set = {"foo", None, 0, ""} -+ old_set = set(["foo", None, 0, ""]) - new_set = salt.utils.data.filter_falsey(old_set) -- expect_set = {"foo"} -+ expect_set = set(["foo"]) - self.assertEqual(expect_set, new_set) - self.assertIs(type(expect_set), type(new_set)) - -@@ -1053,13 +1062,13 @@ class FilterRecursiveDiff(TestCase): - """ - Test cases where equal sets are compared. - """ -- test_set = {0, 1, 2, 3, "foo"} -+ test_set = set([0, 1, 2, 3, "foo"]) - self.assertEqual({}, salt.utils.data.recursive_diff(test_set, test_set)) - - # This is a bit of an oddity, as python seems to sort the sets in memory - # so both sets end up with the same ordering (0..3). -- set_one = {0, 1, 2, 3} -- set_two = {3, 2, 1, 0} -+ set_one = set([0, 1, 2, 3]) -+ set_two = set([3, 2, 1, 0]) - self.assertEqual({}, salt.utils.data.recursive_diff(set_one, set_two)) - - def test_tuple_equality(self): -@@ -1149,13 +1158,13 @@ class FilterRecursiveDiff(TestCase): - Tricky as the sets are compared zipped, so shuffled sets of equal values - are considered different. - """ -- set_one = {0, 1, 2, 4} -- set_two = {0, 1, 3, 4} -- expected_result = {"old": {2}, "new": {3}} -+ set_one = set([0, 1, 2, 4]) -+ set_two = set([0, 1, 3, 4]) -+ expected_result = {"old": set([2]), "new": set([3])} - self.assertEqual( - expected_result, salt.utils.data.recursive_diff(set_one, set_two) - ) -- expected_result = {"new": {2}, "old": {3}} -+ expected_result = {"new": set([2]), "old": set([3])} - self.assertEqual( - expected_result, salt.utils.data.recursive_diff(set_two, set_one) - ) -@@ -1164,8 +1173,8 @@ class FilterRecursiveDiff(TestCase): - # Python 2.7 seems to sort it (i.e. set_one below becomes {0, 1, 'foo', 'bar'} - # However Python 3.6.8 stores it differently each run. - # So just test for "not equal" here. -- set_one = {0, "foo", 1, "bar"} -- set_two = {"foo", 1, "bar", 2} -+ set_one = set([0, "foo", 1, "bar"]) -+ set_two = set(["foo", 1, "bar", 2]) - expected_result = {} - self.assertNotEqual( - expected_result, salt.utils.data.recursive_diff(set_one, set_two) -@@ -1203,18 +1212,18 @@ class FilterRecursiveDiff(TestCase): - expected_result, salt.utils.data.recursive_diff(list_two, list_one) - ) - -- mixed_one = {"foo": {0, 1, 2}, "bar": [0, 1, 2]} -- mixed_two = {"foo": {1, 2, 3}, "bar": [1, 2, 3]} -+ mixed_one = {"foo": set([0, 1, 2]), "bar": [0, 1, 2]} -+ mixed_two = {"foo": set([1, 2, 3]), "bar": [1, 2, 3]} - expected_result = { -- "old": {"foo": {0}, "bar": [0, 1, 2]}, -- "new": {"foo": {3}, "bar": [1, 2, 3]}, -+ "old": {"foo": set([0]), "bar": [0, 1, 2]}, -+ "new": {"foo": set([3]), "bar": [1, 2, 3]}, - } - self.assertEqual( - expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two) - ) - expected_result = { -- "new": {"foo": {0}, "bar": [0, 1, 2]}, -- "old": {"foo": {3}, "bar": [1, 2, 3]}, -+ "new": {"foo": set([0]), "bar": [0, 1, 2]}, -+ "old": {"foo": set([3]), "bar": [1, 2, 3]}, - } - self.assertEqual( - expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one) -@@ -1236,7 +1245,7 @@ class FilterRecursiveDiff(TestCase): - Test case comparing a list with a set, will be compared unordered. - """ - mixed_one = [1, 2, 3] -- mixed_two = {3, 2, 1} -+ mixed_two = set([3, 2, 1]) - expected_result = {} - self.assertEqual( - expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two) -@@ -1351,9 +1360,9 @@ class FilterRecursiveDiff(TestCase): - Test case comparing two sets of unequal length. - This does not do anything special, as it is unordered. - """ -- set_one = {1, 2, 3} -- set_two = {4, 3, 2, 1} -- expected_result = {"old": set(), "new": {4}} -+ set_one = set([1, 2, 3]) -+ set_two = set([4, 3, 2, 1]) -+ expected_result = {"old": set([]), "new": set([4])} - self.assertEqual( - expected_result, salt.utils.data.recursive_diff(set_one, set_two) - ) -- -2.28.0 +2.29.2 diff --git a/fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch b/fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch deleted file mode 100644 index 083a27e..0000000 --- a/fix-typo-on-msgpack-version-when-sanitizing-msgpack-.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 5a2c7671be0fcdf03050049ac4a1bbf4929abf1e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 27 Mar 2020 15:58:40 +0000 -Subject: [PATCH] Fix typo on msgpack version when sanitizing msgpack - kwargs (bsc#1167437) - ---- - salt/utils/msgpack.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/salt/utils/msgpack.py b/salt/utils/msgpack.py -index 1d02aa96ba8b659eb4038f00563c9cfc31a568e5..4b5a256513a524a33d7d42773644567a0970a46b 100644 ---- a/salt/utils/msgpack.py -+++ b/salt/utils/msgpack.py -@@ -61,7 +61,7 @@ def _sanitize_msgpack_kwargs(kwargs): - assert isinstance(kwargs, dict) - if version < (0, 6, 0) and kwargs.pop('strict_map_key', None) is not None: - log.info('removing unsupported `strict_map_key` argument from msgpack call') -- if version < (0, 5, 5) and kwargs.pop('raw', None) is not None: -+ if version < (0, 5, 2) and kwargs.pop('raw', None) is not None: - log.info('removing unsupported `raw` argument from msgpack call') - if version < (0, 4, 0) and kwargs.pop('use_bin_type', None) is not None: - log.info('removing unsupported `use_bin_type` argument from msgpack call') --- -2.23.0 - - diff --git a/fix-unit-test-for-grains-core.patch b/fix-unit-test-for-grains-core.patch index 1d78938..95364fd 100644 --- a/fix-unit-test-for-grains-core.patch +++ b/fix-unit-test-for-grains-core.patch @@ -1,42 +1,41 @@ -From 6bb7b6c4a530abb7e831449545a35ee5ede49dcb Mon Sep 17 00:00:00 2001 +From 192bac1ae2f20b098384264c8802034a340cd124 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Thu, 11 Oct 2018 16:20:40 +0200 Subject: [PATCH] Fix unit test for grains core --- - tests/unit/grains/test_core.py | 11 +++++------ - 1 file changed, 5 insertions(+), 6 deletions(-) + tests/unit/grains/test_core.py | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index b31f5dcddd..c40595eb3f 100644 +index 34aaa4f5bc..7dbf34deac 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -68,11 +68,10 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - def test_parse_etc_os_release(self, path_isfile_mock): - path_isfile_mock.side_effect = lambda x: x == "/usr/lib/os-release" - with salt.utils.files.fopen(os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")) as os_release_file: +@@ -59,10 +59,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + with salt.utils.files.fopen( + os.path.join(OS_RELEASE_DIR, "ubuntu-17.10") + ) as os_release_file: - os_release_content = os_release_file.read() - with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)): -- os_release = core._parse_os_release( -- '/etc/os-release', -- '/usr/lib/os-release') + os_release_content = os_release_file.readlines() + with patch("salt.utils.files.fopen", mock_open()) as os_release_file: + os_release_file.return_value.__iter__.return_value = os_release_content -+ os_release = core._parse_os_release(["/etc/os-release", "/usr/lib/os-release"]) - self.assertEqual(os_release, { - "NAME": "Ubuntu", - "VERSION": "17.10 (Artful Aardvark)", -@@ -134,7 +133,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - + os_release = core._parse_os_release( +- "/etc/os-release", "/usr/lib/os-release" ++ ["/etc/os-release", "/usr/lib/os-release"] + ) + self.assertEqual( + os_release, +@@ -172,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): def test_missing_os_release(self): - with patch('salt.utils.files.fopen', mock_open(read_data={})): -- os_release = core._parse_os_release('/etc/os-release', '/usr/lib/os-release') -+ os_release = core._parse_os_release(['/etc/os-release', '/usr/lib/os-release']) + with patch("salt.utils.files.fopen", mock_open(read_data={})): + os_release = core._parse_os_release( +- "/etc/os-release", "/usr/lib/os-release" ++ ["/etc/os-release", "/usr/lib/os-release"] + ) self.assertEqual(os_release, {}) - @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows') -- -2.16.4 +2.29.2 diff --git a/fix-unit-tests-for-batch-async-after-refactor.patch b/fix-unit-tests-for-batch-async-after-refactor.patch index 1a58e4a..c3d6b36 100644 --- a/fix-unit-tests-for-batch-async-after-refactor.patch +++ b/fix-unit-tests-for-batch-async-after-refactor.patch @@ -1,21 +1,21 @@ -From e9f2af1256a52d58a7c8e6dd0122eb6d5cc47dd3 Mon Sep 17 00:00:00 2001 +From 09a871c197be4933475ee4582755d9b0cb5a700e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 4 Mar 2020 10:13:43 +0000 Subject: [PATCH] Fix unit tests for batch async after refactor --- - tests/unit/cli/test_batch_async.py | 18 +++++++++++++++++- - 1 file changed, 17 insertions(+), 1 deletion(-) + tests/unit/cli/test_batch_async.py | 20 +++++++++++++++++++- + 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index f1d36a81fb..e1ce60859b 100644 +index b04965268a..dcee9a87bd 100644 --- a/tests/unit/cli/test_batch_async.py +++ b/tests/unit/cli/test_batch_async.py -@@ -126,9 +126,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.batch.timedout_minions = {'bar'} +@@ -120,9 +120,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + self.batch.timedout_minions = {"bar"} self.batch.event = MagicMock() - self.batch.metadata = {'mykey': 'myvalue'} + self.batch.metadata = {"mykey": "myvalue"} + old_event = self.batch.event self.batch.end_batch() self.assertEqual( @@ -23,8 +23,8 @@ index f1d36a81fb..e1ce60859b 100644 + old_event.fire_event.call_args[0], ( { - 'available_minions': set(['foo', 'bar']), -@@ -146,6 +147,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): + "available_minions": {"foo", "bar"}, +@@ -140,6 +141,23 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): event = MagicMock() batch.event = event batch.__del__() @@ -36,17 +36,19 @@ index f1d36a81fb..e1ce60859b 100644 + batch = BatchAsync(MagicMock(), MagicMock(), MagicMock()) + event = MagicMock() + batch.event = event -+ batch.patterns = { ('salt/job/1234/ret/*', 'find_job_return'), ('salt/job/4321/ret/*', 'find_job_return') } ++ batch.patterns = { ++ ("salt/job/1234/ret/*", "find_job_return"), ++ ("salt/job/4321/ret/*", "find_job_return"), ++ } + batch.close_safe() + self.assertEqual(batch.local, None) + self.assertEqual(batch.event, None) + self.assertEqual(batch.ioloop, None) -+ self.assertEqual( -+ len(event.unsubscribe.mock_calls), 2) - self.assertEqual( - len(event.remove_event_handler.mock_calls), 1) ++ self.assertEqual(len(event.unsubscribe.mock_calls), 2) + self.assertEqual(len(event.remove_event_handler.mock_calls), 1) + @tornado.testing.gen_test -- -2.23.0 +2.29.2 diff --git a/fix-virt.update-with-cpu-defined-263.patch b/fix-virt.update-with-cpu-defined-263.patch index e61f0d8..450d941 100644 --- a/fix-virt.update-with-cpu-defined-263.patch +++ b/fix-virt.update-with-cpu-defined-263.patch @@ -1,4 +1,4 @@ -From 37800f008e46a7321bcd4b88b4858d3ea1fabcdf Mon Sep 17 00:00:00 2001 +From c05d571058b9520dbaf4aba3de001b1aefe8e2c2 Mon Sep 17 00:00:00 2001 From: Cedric Bosdonnat Date: Tue, 15 Sep 2020 16:03:30 +0200 Subject: [PATCH] Fix virt.update with cpu defined (#263) @@ -10,11 +10,11 @@ updated. 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index c07fabb406..4a8a55ced6 100644 +index c042738370..c1a73fcb7f 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py -@@ -2430,9 +2430,9 @@ def update( - data = {k: v for k, v in six.iteritems(locals()) if bool(v)} +@@ -2441,9 +2441,9 @@ def update( + data = {k: v for k, v in locals().items() if bool(v)} if boot_dev: data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} - need_update = need_update or salt.utils.xmlutil.change_xml( @@ -26,6 +26,6 @@ index c07fabb406..4a8a55ced6 100644 # Update the XML definition with the new disks and diff changes devices_node = desc.find("devices") -- -2.28.0 +2.29.2 diff --git a/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch b/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch index d29f694..8670aa3 100644 --- a/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch +++ b/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch @@ -1,70 +1,79 @@ -From a8f0a15e4067ec278c8a2d690e3bf815523286ca Mon Sep 17 00:00:00 2001 +From f3ac041e34952a4b753e4afc9dc4b6adaa1d0ff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 12 Mar 2020 13:26:51 +0000 -Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test after - rebase +Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test + after rebase --- - tests/integration/modules/test_pkg.py | 56 +++------------------------ - 1 file changed, 6 insertions(+), 50 deletions(-) + tests/integration/modules/test_pkg.py | 63 ++++----------------------- + 1 file changed, 8 insertions(+), 55 deletions(-) diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py -index 6f3767bfbd272848277b877d1fe640caf8f349f6..0f4c5c9d459c56bb485408f943c1dee49c46cd21 100644 +index 3ece73074b..933755a9ec 100644 --- a/tests/integration/modules/test_pkg.py +++ b/tests/integration/modules/test_pkg.py -@@ -134,6 +134,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): - if repo is not None: - self.run_function('pkg.del_repo', [repo]) +@@ -143,6 +143,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): + self.run_function("pkg.del_repo", [repo]) + @slowTest + @destructiveTest -+ @requires_salt_modules('pkg.mod_repo', 'pkg.del_repo', 'pkg.get_repo') ++ @requires_salt_modules("pkg.mod_repo", "pkg.del_repo", "pkg.get_repo") + @requires_network() + @requires_system_grains def test_mod_del_repo_multiline_values(self): - ''' + """ test modifying and deleting a software repository defined with multiline values -@@ -141,8 +145,9 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): - os_grain = self.run_function('grains.item', ['os'])['os'] +@@ -150,10 +154,13 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): + os_grain = self.run_function("grains.item", ["os"])["os"] repo = None try: -- if os_grain in ['CentOS', 'RedHat']: -+ if os_grain in ['CentOS', 'RedHat', 'SUSE']: - my_baseurl = 'http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/' -+ expected_get_repo_baseurl_zypp = 'http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/' - expected_get_repo_baseurl = 'http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/' - major_release = int( - self.run_function( -@@ -189,55 +194,6 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): +- if os_grain in ["CentOS", "RedHat"]: ++ if os_grain in ["CentOS", "RedHat", "SUSE"]: + my_baseurl = ( + "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/" + ) ++ expected_get_repo_baseurl_zypp = ( ++ "http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/" ++ ) + expected_get_repo_baseurl = ( + "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/" + ) +@@ -207,60 +214,6 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): if repo is not None: - self.run_function('pkg.del_repo', [repo]) + self.run_function("pkg.del_repo", [repo]) - def test_mod_del_repo_multiline_values(self): -- ''' +- """ - test modifying and deleting a software repository defined with multiline values -- ''' -- os_grain = self.run_function('grains.item', ['os'])['os'] +- """ +- os_grain = self.run_function("grains.item", ["os"])["os"] - repo = None - try: -- if os_grain in ['CentOS', 'RedHat', 'SUSE']: -- my_baseurl = 'http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/' -- expected_get_repo_baseurl_zypp = 'http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/' -- expected_get_repo_baseurl = 'http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/' -- major_release = int( -- self.run_function( -- 'grains.item', -- ['osmajorrelease'] -- )['osmajorrelease'] +- if os_grain in ["CentOS", "RedHat", "SUSE"]: +- my_baseurl = ( +- "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/" - ) -- repo = 'fakerepo' -- name = 'Fake repo for RHEL/CentOS/SUSE' +- expected_get_repo_baseurl_zypp = ( +- "http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/" +- ) +- expected_get_repo_baseurl = ( +- "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/" +- ) +- major_release = int( +- self.run_function("grains.item", ["osmajorrelease"])[ +- "osmajorrelease" +- ] +- ) +- repo = "fakerepo" +- name = "Fake repo for RHEL/CentOS/SUSE" - baseurl = my_baseurl -- gpgkey = 'https://my.fake.repo/foo/bar/MY-GPG-KEY.pub' -- failovermethod = 'priority' +- gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub" +- failovermethod = "priority" - gpgcheck = 1 - enabled = 1 - ret = self.run_function( -- 'pkg.mod_repo', +- "pkg.mod_repo", - [repo], - name=name, - baseurl=baseurl, @@ -78,20 +87,20 @@ index 6f3767bfbd272848277b877d1fe640caf8f349f6..0f4c5c9d459c56bb485408f943c1dee4 - self.assertNotEqual(ret, {}) - repo_info = ret[next(iter(ret))] - self.assertIn(repo, repo_info) -- self.assertEqual(repo_info[repo]['baseurl'], my_baseurl) -- ret = self.run_function('pkg.get_repo', [repo]) -- self.assertEqual(ret['baseurl'], expected_get_repo_baseurl) -- self.run_function('pkg.mod_repo', [repo]) -- ret = self.run_function('pkg.get_repo', [repo]) -- self.assertEqual(ret['baseurl'], expected_get_repo_baseurl) +- self.assertEqual(repo_info[repo]["baseurl"], my_baseurl) +- ret = self.run_function("pkg.get_repo", [repo]) +- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) +- self.run_function("pkg.mod_repo", [repo]) +- ret = self.run_function("pkg.get_repo", [repo]) +- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) - finally: - if repo is not None: -- self.run_function('pkg.del_repo', [repo]) +- self.run_function("pkg.del_repo", [repo]) - - @requires_salt_modules('pkg.owner') + @requires_salt_modules("pkg.owner") def test_owner(self): - ''' + """ -- -2.23.0 +2.29.2 diff --git a/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch b/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch index ffbbc22..abac036 100644 --- a/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch +++ b/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch @@ -1,43 +1,53 @@ -From eb51734ad93b1fa0c6bc8fde861fdabfe3e0d6b0 Mon Sep 17 00:00:00 2001 +From 81f38c8cb16634b2c86b3e1e7c745870f90771d0 Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Thu, 13 Jun 2019 17:48:55 +0200 Subject: [PATCH] Fix zypper pkg.list_pkgs expectation and dpkg mocking --- - tests/unit/modules/test_dpkg_lowpkg.py | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) + tests/unit/modules/test_dpkg_lowpkg.py | 22 ++++++++++++++++------ + 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py -index a0b3346f9d..bc564f080a 100644 +index 160bbcd5b1..dadbc30dfa 100644 --- a/tests/unit/modules/test_dpkg_lowpkg.py +++ b/tests/unit/modules/test_dpkg_lowpkg.py -@@ -125,9 +125,9 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict(dpkg.__salt__, {'cmd.run_all': mock}): - self.assertEqual(dpkg.file_dict('httpd'), 'Error: error') +@@ -308,9 +308,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): + dpkg.bin_pkg_info("package.deb")["name"], "package_name" + ) -- @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg)) -- @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info)) -- @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3')) -+ @patch('salt.modules.dpkg_lowpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg)) -+ @patch('salt.modules.dpkg_lowpkg._get_pkg_info', MagicMock(return_value=pkgs_info)) -+ @patch('salt.modules.dpkg_lowpkg._get_pkg_license', MagicMock(return_value='BSD v3')) +- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg)) +- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) +- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3")) ++ @patch( ++ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail", ++ MagicMock(return_value=dselect_pkg), ++ ) ++ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) ++ @patch( ++ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3") ++ ) def test_info(self): - ''' + """ Test info -@@ -152,9 +152,9 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): - assert pkg_data['maintainer'] == 'Simpsons Developers ' - assert pkg_data['license'] == 'BSD v3' +@@ -359,9 +364,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): + ) + assert pkg_data["license"] == "BSD v3" -- @patch('salt.modules.dpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg)) -- @patch('salt.modules.dpkg._get_pkg_info', MagicMock(return_value=pkgs_info)) -- @patch('salt.modules.dpkg._get_pkg_license', MagicMock(return_value='BSD v3')) -+ @patch('salt.modules.dpkg_lowpkg._get_pkg_ds_avail', MagicMock(return_value=dselect_pkg)) -+ @patch('salt.modules.dpkg_lowpkg._get_pkg_info', MagicMock(return_value=pkgs_info)) -+ @patch('salt.modules.dpkg_lowpkg._get_pkg_license', MagicMock(return_value='BSD v3')) +- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg)) +- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) +- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3")) ++ @patch( ++ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail", ++ MagicMock(return_value=dselect_pkg), ++ ) ++ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) ++ @patch( ++ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3") ++ ) def test_info_attr(self): - ''' + """ Test info with 'attr' parameter -- -2.16.4 +2.29.2 diff --git a/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch b/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch index 0cda954..766c8ec 100644 --- a/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch +++ b/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch @@ -1,4 +1,4 @@ -From 0612549b3acfeb15e0b499b6f469d64062d6ae2d Mon Sep 17 00:00:00 2001 +From b9ba6875945e1ffafdeb862d8b2ac7fccd9cccf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Mon, 25 Jun 2018 13:06:40 +0100 @@ -14,17 +14,17 @@ Fix '_find_remove_targets' after aligning Zypper with pkg state 1 file changed, 21 deletions(-) diff --git a/salt/states/pkg.py b/salt/states/pkg.py -index c0fa2f6b69..a13d418400 100644 +index a1b2a122bb..f7327a33e3 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py -@@ -450,16 +450,6 @@ def _find_remove_targets(name=None, +@@ -477,16 +477,6 @@ def _find_remove_targets( - if __grains__['os'] == 'FreeBSD' and origin: - cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] -- elif __grains__['os_family'] == 'Suse': + if __grains__["os"] == "FreeBSD" and origin: + cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname] +- elif __grains__["os_family"] == "Suse": - # On SUSE systems. Zypper returns packages without "arch" in name - try: -- namepart, archpart = pkgname.rsplit('.', 1) +- namepart, archpart = pkgname.rsplit(".", 1) - except ValueError: - cver = cur_pkgs.get(pkgname, []) - else: @@ -34,14 +34,14 @@ index c0fa2f6b69..a13d418400 100644 else: cver = cur_pkgs.get(pkgname, []) -@@ -866,17 +856,6 @@ def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): - cver = new_pkgs.get(pkgname.split('%')[0]) - elif __grains__['os_family'] == 'Debian': - cver = new_pkgs.get(pkgname.split('=')[0]) -- elif __grains__['os_family'] == 'Suse': +@@ -930,17 +920,6 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None): + cver = new_pkgs.get(pkgname.split("%")[0]) + elif __grains__["os_family"] == "Debian": + cver = new_pkgs.get(pkgname.split("=")[0]) +- elif __grains__["os_family"] == "Suse": - # On SUSE systems. Zypper returns packages without "arch" in name - try: -- namepart, archpart = pkgname.rsplit('.', 1) +- namepart, archpart = pkgname.rsplit(".", 1) - except ValueError: - cver = new_pkgs.get(pkgname) - else: @@ -53,6 +53,6 @@ index c0fa2f6b69..a13d418400 100644 cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: -- -2.16.4 +2.29.2 diff --git a/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch b/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch index 27b3e19..08a9c4b 100644 --- a/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch +++ b/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch @@ -1,4 +1,4 @@ -From 3df8359421f60140fd335d95c3c06de0bfd6ac4f Mon Sep 17 00:00:00 2001 +From 17ad05e3cbb3718ca12cef20600be81aa5d42d33 Mon Sep 17 00:00:00 2001 From: tyl0re Date: Wed, 17 Jul 2019 10:13:09 +0200 Subject: [PATCH] Fixed Bug LVM has no Parttion Type. the Scipt Later @@ -11,23 +11,41 @@ Subject: [PATCH] Fixed Bug LVM has no Parttion Type. the Scipt Later So the check on not defined fs_type is missing --- - salt/modules/parted_partition.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) + salt/modules/parted_partition.py | 19 ++++++++++++++++--- + 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/salt/modules/parted_partition.py b/salt/modules/parted_partition.py -index 9441fec49fd1833da590b3f65637e8e92b287d1c..7d08a7b315c990e7a87c9c77fd6550a6174b7160 100644 +index 015d4cbc29..bb34cd58b4 100644 --- a/salt/modules/parted_partition.py +++ b/salt/modules/parted_partition.py -@@ -515,7 +515,7 @@ def mkpartfs(device, part_type, fs_type, start, end): - 'Invalid part_type passed to partition.mkpartfs' - ) +@@ -552,10 +552,23 @@ def mkpartfs(device, part_type, fs_type=None, start=None, end=None): -- if not _is_fstype(fs_type): + .. code-block:: bash + +- salt '*' partition.mkpartfs /dev/sda primary fs_type=fat32 start=0 end=639 +- salt '*' partition.mkpartfs /dev/sda primary start=0 end=639 ++ salt '*' partition.mkpartfs /dev/sda logical ext2 440 670 + """ +- out = mkpart(device, part_type, fs_type, start, end) ++ _validate_device(device) ++ ++ if part_type not in {"primary", "logical", "extended"}: ++ raise CommandExecutionError("Invalid part_type passed to partition.mkpartfs") ++ + if fs_type and not _is_fstype(fs_type): - raise CommandExecutionError( - 'Invalid fs_type passed to partition.mkpartfs' - ) ++ raise CommandExecutionError("Invalid fs_type passed to partition.mkpartfs") ++ ++ _validate_partition_boundary(start) ++ _validate_partition_boundary(end) ++ ++ cmd = "parted -m -s -- {} mkpart {} {} {} {}".format( ++ device, part_type, fs_type, start, end ++ ) ++ out = __salt__["cmd.run"](cmd).splitlines() + return out + + -- -2.23.0 +2.29.2 diff --git a/fixes-cve-2018-15750-cve-2018-15751.patch b/fixes-cve-2018-15750-cve-2018-15751.patch index 771b633..9c8999a 100644 --- a/fixes-cve-2018-15750-cve-2018-15751.patch +++ b/fixes-cve-2018-15750-cve-2018-15751.patch @@ -1,4 +1,4 @@ -From 9ec54e8c1394ab678c6129d98f07c6eafd446399 Mon Sep 17 00:00:00 2001 +From 731a53bd241240e08c455a8cb3a59e4d65a6abb5 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 24 Aug 2018 10:35:55 -0500 Subject: [PATCH] Fixes: CVE-2018-15750, CVE-2018-15751 @@ -12,43 +12,47 @@ Handle Auth exceptions in run_job Update tornado test to correct authentication message --- salt/netapi/rest_cherrypy/app.py | 7 ------- - tests/integration/netapi/rest_tornado/test_app.py | 4 ++-- - 2 files changed, 2 insertions(+), 9 deletions(-) + tests/integration/netapi/rest_tornado/test_app.py | 8 ++++++-- + 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py -index fa1b540e5f..f8b500482b 100644 +index e7641ccbc5..5dfbadf759 100644 --- a/salt/netapi/rest_cherrypy/app.py +++ b/salt/netapi/rest_cherrypy/app.py -@@ -1176,13 +1176,6 @@ class LowDataAdapter(object): +@@ -1181,13 +1181,6 @@ class LowDataAdapter: except (TypeError, ValueError): - raise cherrypy.HTTPError(401, 'Invalid token') + raise cherrypy.HTTPError(401, "Invalid token") -- if 'token' in chunk: +- if "token" in chunk: - # Make sure that auth token is hex - try: -- int(chunk['token'], 16) +- int(chunk["token"], 16) - except (TypeError, ValueError): -- raise cherrypy.HTTPError(401, 'Invalid token') +- raise cherrypy.HTTPError(401, "Invalid token") - if client: - chunk['client'] = client + chunk["client"] = client diff --git a/tests/integration/netapi/rest_tornado/test_app.py b/tests/integration/netapi/rest_tornado/test_app.py -index 10ec29f7fa..4102b5645a 100644 +index e3ad8820d3..4e5e741f1d 100644 --- a/tests/integration/netapi/rest_tornado/test_app.py +++ b/tests/integration/netapi/rest_tornado/test_app.py -@@ -282,8 +282,8 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase): - self.assertIn('jid', ret[0]) # the first 2 are regular returns - self.assertIn('jid', ret[1]) - self.assertIn('Failed to authenticate', ret[2]) # bad auth -- self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion'])) -- self.assertEqual(ret[1]['minions'], sorted(['minion', 'sub_minion'])) -+ self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion', 'localhost'])) -+ self.assertEqual(ret[1]['minions'], sorted(['minion', 'sub_minion', 'localhost'])) +@@ -326,8 +326,12 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase): + self.assertIn("jid", ret[0]) # the first 2 are regular returns + self.assertIn("jid", ret[1]) + self.assertIn("Failed to authenticate", ret[2]) # bad auth +- self.assertEqual(ret[0]["minions"], sorted(["minion", "sub_minion"])) +- self.assertEqual(ret[1]["minions"], sorted(["minion", "sub_minion"])) ++ self.assertEqual( ++ ret[0]["minions"], sorted(["minion", "sub_minion", "localhost"]) ++ ) ++ self.assertEqual( ++ ret[1]["minions"], sorted(["minion", "sub_minion", "localhost"]) ++ ) + @slowTest def test_simple_local_async_post_no_tgt(self): - low = [{'client': 'local_async', -- -2.16.4 +2.29.2 diff --git a/fixing-streamclosed-issue.patch b/fixing-streamclosed-issue.patch index 510f565..da2e635 100644 --- a/fixing-streamclosed-issue.patch +++ b/fixing-streamclosed-issue.patch @@ -1,4 +1,4 @@ -From 9a5f007a5baa4ba1d28b0e6708bac8b134e4891c Mon Sep 17 00:00:00 2001 +From 82d1cadff4fa6248a9d891a3c228fc415207d8d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mihai=20Dinc=C4=83?= Date: Tue, 26 Nov 2019 18:26:31 +0100 Subject: [PATCH] Fixing StreamClosed issue @@ -8,18 +8,18 @@ Subject: [PATCH] Fixing StreamClosed issue 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 754c257b36..c4545e3ebc 100644 +index f3d92b88f1..8d2601e636 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -221,7 +221,6 @@ class BatchAsync(object): - "metadata": self.metadata +@@ -232,7 +232,6 @@ class BatchAsync: + "metadata": self.metadata, } - self.event.fire_event(data, "salt/batch/{0}/done".format(self.batch_jid)) + self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) - self.event.remove_event_handler(self.__event_handler) for (pattern, label) in self.patterns: if label in ["ping_return", "batch_run"]: - self.event.unsubscribe(pattern, match_type='glob') -@@ -265,6 +264,7 @@ class BatchAsync(object): + self.event.unsubscribe(pattern, match_type="glob") +@@ -277,6 +276,7 @@ class BatchAsync: def __del__(self): self.local = None @@ -28,6 +28,6 @@ index 754c257b36..c4545e3ebc 100644 self.ioloop = None gc.collect() -- -2.16.4 +2.29.2 diff --git a/get-os_arch-also-without-rpm-package-installed.patch b/get-os_arch-also-without-rpm-package-installed.patch index b94bfed..decb600 100644 --- a/get-os_arch-also-without-rpm-package-installed.patch +++ b/get-os_arch-also-without-rpm-package-installed.patch @@ -1,4 +1,4 @@ -From 98f3bd70aaa145b88e8bd4b947b578435e2b1e57 Mon Sep 17 00:00:00 2001 +From e987664551debb9affce8ce5a70593ef0750dcd5 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Wed, 14 Nov 2018 17:36:23 +0100 Subject: [PATCH] Get os_arch also without RPM package installed @@ -17,29 +17,31 @@ Add UT for OS arch detection when no CPU arch or machine can be determined Remove unsupported testcase --- - tests/unit/utils/test_pkg.py | 48 ++++++++------------------------------------ - 1 file changed, 8 insertions(+), 40 deletions(-) + tests/unit/utils/test_pkg.py | 53 ++++++------------------------------ + 1 file changed, 8 insertions(+), 45 deletions(-) diff --git a/tests/unit/utils/test_pkg.py b/tests/unit/utils/test_pkg.py -index e8b19bef14..361e0bf92f 100644 +index b4a67b8e57..404b01b12b 100644 --- a/tests/unit/utils/test_pkg.py +++ b/tests/unit/utils/test_pkg.py -@@ -2,51 +2,19 @@ - - from __future__ import absolute_import, unicode_literals, print_function - --from tests.support.unit import TestCase --from tests.support.mock import MagicMock, patch -+from tests.support.unit import TestCase, skipIf -+from tests.support.mock import Mock, MagicMock, patch, NO_MOCK, NO_MOCK_REASON +@@ -1,53 +1,16 @@ +-# -*- coding: utf-8 -*- +- +-from __future__ import absolute_import, print_function, unicode_literals +- import salt.utils.pkg from salt.utils.pkg import rpm - +-from tests.support.mock import MagicMock, patch +-from tests.support.unit import TestCase - ++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, Mock, patch ++from tests.support.unit import TestCase, skipIf + -class PkgUtilsTestCase(TestCase): -- ''' +- """ - TestCase for salt.utils.pkg module -- ''' +- """ +- - test_parameters = [ - ("16.0.0.49153-0+f1", "", "16.0.0.49153-0+f1"), - ("> 15.0.0", ">", "15.0.0"), @@ -62,13 +64,13 @@ index e8b19bef14..361e0bf92f 100644 - ("<=>15.0.0", "<=>", "15.0.0"), - ("<>15.0.0", "<>", "15.0.0"), - ("=15.0.0", "=", "15.0.0"), -- ("", "", "") +- ("", "", ""), - ] - - def test_split_comparison(self): -- ''' +- """ - Tests salt.utils.pkg.split_comparison -- ''' +- """ - for test_parameter in self.test_parameters: - oper, verstr = salt.utils.pkg.split_comparison(test_parameter[0]) - self.assertEqual(test_parameter[1], oper) @@ -80,11 +82,11 @@ index e8b19bef14..361e0bf92f 100644 +@skipIf(NO_MOCK, NO_MOCK_REASON) -+@skipIf(pytest is None, 'PyTest is missing') ++@skipIf(pytest is None, "PyTest is missing") class PkgRPMTestCase(TestCase): - ''' + """ Test case for pkg.rpm utils -- -2.16.4 +2.29.2 diff --git a/grains-master-can-read-grains.patch b/grains-master-can-read-grains.patch index e6cae6b..0f91120 100644 --- a/grains-master-can-read-grains.patch +++ b/grains-master-can-read-grains.patch @@ -1,4 +1,4 @@ -From 0b6106815b708bc4cf25b4a02ebc8b7ebf299b39 Mon Sep 17 00:00:00 2001 +From d9618fed8ff241c6f127f08ec59fea9c8b8e12a6 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 27 Oct 2020 13:16:37 +0100 Subject: [PATCH] grains: master can read grains @@ -8,10 +8,10 @@ Subject: [PATCH] grains: master can read grains 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/grains/extra.py b/salt/grains/extra.py -index 6a26aece77..f80061ff4e 100644 +index d25faac3b7..7729a5c0a5 100644 --- a/salt/grains/extra.py +++ b/salt/grains/extra.py -@@ -94,8 +94,14 @@ def __secure_boot(): +@@ -76,8 +76,14 @@ def __secure_boot(): enabled = False sboot = glob.glob("/sys/firmware/efi/vars/SecureBoot-*/data") if len(sboot) == 1: @@ -29,6 +29,6 @@ index 6a26aece77..f80061ff4e 100644 -- -2.29.1 +2.29.2 diff --git a/html.tar.bz2 b/html.tar.bz2 index 2103847..d2f8ee6 100644 --- a/html.tar.bz2 +++ b/html.tar.bz2 @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2c1abe2851b8a9055a361fc2409477ac01ec4829f0588f3b58533cb5f1e4e89 -size 8775440 +oid sha256:7480c92d4197b02504c9a130a0268fd028eb0fd45d3c7a7075b8b78da85050ed +size 9943287 diff --git a/implement-network.fqdns-module-function-bsc-1134860-.patch b/implement-network.fqdns-module-function-bsc-1134860-.patch index 9feae9e..6492017 100644 --- a/implement-network.fqdns-module-function-bsc-1134860-.patch +++ b/implement-network.fqdns-module-function-bsc-1134860-.patch @@ -1,4 +1,4 @@ -From a11587a1209cd198f421fafdb43510b6d651f4b2 Mon Sep 17 00:00:00 2001 +From ac34a8d839f91285f4ced605250422a1ecf5cb55 Mon Sep 17 00:00:00 2001 From: EricS <54029547+ESiebigteroth@users.noreply.github.com> Date: Tue, 3 Sep 2019 11:22:53 +0200 Subject: [PATCH] Implement network.fqdns module function (bsc#1134860) @@ -9,70 +9,30 @@ Subject: [PATCH] Implement network.fqdns module function (bsc#1134860) * Reuse network.fqdns in grains.core.fqdns * Return empty list when fqdns grains is disabled - Co-authored-by: Eric Siebigteroth --- - salt/grains/core.py | 66 +++++------------------------------------- - salt/modules/network.py | 60 ++++++++++++++++++++++++++++++++++++++ - salt/utils/network.py | 12 ++++++++ - tests/unit/grains/test_core.py | 63 +++++++++++++++++++++++++++++++--------- - 4 files changed, 130 insertions(+), 71 deletions(-) + salt/grains/core.py | 58 +++------------------------------- + salt/modules/network.py | 12 +++---- + salt/utils/network.py | 2 +- + tests/unit/grains/test_core.py | 55 ++++++++++++-------------------- + 4 files changed, 31 insertions(+), 96 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 0f3ccd9b92..77ae99590f 100644 +index 5f18ba4a58..0dc1d97f97 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -26,8 +26,9 @@ from errno import EACCES, EPERM - import datetime +@@ -23,7 +23,6 @@ import uuid import warnings - import time -+import salt.modules.network - + import zlib + from errno import EACCES, EPERM -from multiprocessing.pool import ThreadPool -+from salt.utils.network import _get_interfaces - # pylint: disable=import-error - try: -@@ -84,6 +85,7 @@ __salt__ = { - 'cmd.run_all': salt.modules.cmdmod._run_all_quiet, - 'smbios.records': salt.modules.smbios.records, - 'smbios.get': salt.modules.smbios.get, -+ 'network.fqdns': salt.modules.network.fqdns, - } - log = logging.getLogger(__name__) - -@@ -107,7 +109,6 @@ HAS_UNAME = True - if not hasattr(os, 'uname'): - HAS_UNAME = False - --_INTERFACES = {} - - # Possible value for h_errno defined in netdb.h - HOST_NOT_FOUND = 1 -@@ -1553,17 +1554,6 @@ def _linux_bin_exists(binary): - return False - - --def _get_interfaces(): -- ''' -- Provide a dict of the connected interfaces and their ip addresses -- ''' -- -- global _INTERFACES -- if not _INTERFACES: -- _INTERFACES = salt.utils.network.interfaces() -- return _INTERFACES -- -- - def _parse_lsb_release(): - ret = {} - try: -@@ -2271,52 +2261,12 @@ def fqdns(): - ''' - Return all known FQDNs for the system by enumerating all interfaces and + import distro + import salt.exceptions +@@ -2406,59 +2405,10 @@ def fqdns(): then trying to reverse resolve them (excluding 'lo' interface). -+ To disable the fqdns grain, set enable_fqdns_grains: False in the minion configuration file. - ''' + To disable the fqdns grain, set enable_fqdns_grains: False in the minion configuration file. + """ - # Provides: - # fqdns - @@ -82,224 +42,220 @@ index 0f3ccd9b92..77ae99590f 100644 - def _lookup_fqdn(ip): - try: - name, aliaslist, addresslist = socket.gethostbyaddr(ip) -- return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] +- return [socket.getfqdn(name)] + [ +- als for als in aliaslist if salt.utils.network.is_fqdn(als) +- ] - except socket.herror as err: - if err.errno in (0, HOST_NOT_FOUND, NO_DATA): - # No FQDN for this IP address, so we don't need to know this all the time. - log.debug("Unable to resolve address %s: %s", ip, err) - else: - log.error(err_message, ip, err) -- except (socket.error, socket.gaierror, socket.timeout) as err: +- except (OSError, socket.gaierror, socket.timeout) as err: - log.error(err_message, ip, err) - - start = time.time() - -- addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces()) -- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces())) -- err_message = 'Exception during resolving address: %s' +- addresses = salt.utils.network.ip_addrs( +- include_loopback=False, interface_data=_get_interfaces() +- ) +- addresses.extend( +- salt.utils.network.ip_addrs6( +- include_loopback=False, interface_data=_get_interfaces() +- ) +- ) +- err_message = "Exception during resolving address: %s" - - # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel. - # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing - # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. - - try: -- pool = ThreadPool(8) -- results = pool.map(_lookup_fqdn, addresses) -- pool.close() -- pool.join() +- pool = ThreadPool(8) +- results = pool.map(_lookup_fqdn, addresses) +- pool.close() +- pool.join() - except Exception as exc: -- log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) +- log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) - - for item in results: - if item: - fqdns.update(item) - - elapsed = time.time() - start -- log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed)) +- log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed)) - - return {"fqdns": sorted(list(fqdns))} + opt = {"fqdns": []} -+ if __opts__.get('enable_fqdns_grains', True) == True: -+ opt = __salt__['network.fqdns']() ++ if __opts__.get("enable_fqdns_grains", True) == True: ++ opt = __salt__["network.fqdns"]() + return opt def ip_fqdn(): diff --git a/salt/modules/network.py b/salt/modules/network.py -index 38e2bc326e..880f4f8d5f 100644 +index 2e1410c288..59ed43bba6 100644 --- a/salt/modules/network.py +++ b/salt/modules/network.py -@@ -11,6 +11,10 @@ import logging - import re - import os - import socket -+import time -+ -+from multiprocessing.pool import ThreadPool -+ +@@ -2,7 +2,6 @@ + Module for gathering and managing network information + """ - # Import salt libs +-# Import python libs + import datetime + import hashlib + import logging +@@ -12,7 +11,6 @@ import socket + import time + from multiprocessing.pool import ThreadPool + +-# Import salt libs import salt.utils.decorators.path -@@ -1887,3 +1891,59 @@ def iphexval(ip): - a = ip.split('.') - hexval = ['%02X' % int(x) for x in a] # pylint: disable=E1321 - return ''.join(hexval) -+ -+ -+def fqdns(): -+ ''' -+ Return all known FQDNs for the system by enumerating all interfaces and -+ then trying to reverse resolve them (excluding 'lo' interface). -+ ''' -+ # Provides: -+ # fqdns -+ -+ # Possible value for h_errno defined in netdb.h -+ HOST_NOT_FOUND = 1 -+ NO_DATA = 4 -+ -+ grains = {} -+ fqdns = set() -+ -+ def _lookup_fqdn(ip): -+ try: + import salt.utils.functools + import salt.utils.network +@@ -20,8 +18,6 @@ import salt.utils.platform + import salt.utils.validate.net + from salt._compat import ipaddress + from salt.exceptions import CommandExecutionError +- +-# Import 3rd-party libs + from salt.ext.six.moves import range + + log = logging.getLogger(__name__) +@@ -2076,7 +2072,10 @@ def fqdns(): + + def _lookup_fqdn(ip): + try: +- return [socket.getfqdn(socket.gethostbyaddr(ip)[0])] + name, aliaslist, addresslist = socket.gethostbyaddr(ip) -+ return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] -+ except socket.herror as err: -+ if err.errno in (0, HOST_NOT_FOUND, NO_DATA): -+ # No FQDN for this IP address, so we don't need to know this all the time. -+ log.debug("Unable to resolve address %s: %s", ip, err) -+ else: -+ log.error(err_message, err) -+ except (socket.error, socket.gaierror, socket.timeout) as err: -+ log.error(err_message, err) -+ -+ start = time.time() -+ -+ addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=salt.utils.network._get_interfaces()) -+ addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=salt.utils.network._get_interfaces())) -+ err_message = 'Exception during resolving address: %s' -+ -+ # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel. -+ # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing -+ # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. -+ -+ try: -+ pool = ThreadPool(8) -+ results = pool.map(_lookup_fqdn, addresses) -+ pool.close() -+ pool.join() ++ return [socket.getfqdn(name)] + [ ++ als for als in aliaslist if salt.utils.network.is_fqdn(als) ++ ] + except socket.herror as err: + if err.errno in (0, HOST_NOT_FOUND, NO_DATA): + # No FQDN for this IP address, so we don't need to know this all the time. +@@ -2102,13 +2101,12 @@ def fqdns(): + # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing + # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. + +- results = [] + try: + pool = ThreadPool(8) + results = pool.map(_lookup_fqdn, addresses) + pool.close() + pool.join() +- except Exception as exc: # pylint: disable=broad-except + except Exception as exc: -+ log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) -+ -+ for item in results: -+ if item: -+ fqdns.update(item) -+ -+ elapsed = time.time() - start -+ log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed)) -+ -+ return {"fqdns": sorted(list(fqdns))} -\ No newline at end of file + log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) + + for item in results: diff --git a/salt/utils/network.py b/salt/utils/network.py -index 74536cc143..4cc8a05c4a 100644 +index d253ded3ab..25b2d06758 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py -@@ -50,6 +50,18 @@ except (ImportError, OSError, AttributeError, TypeError): - pass +@@ -49,7 +49,7 @@ except (ImportError, OSError, AttributeError, TypeError): + _INTERFACES = {} -+_INTERFACES = {} -+def _get_interfaces(): #! function -+ ''' -+ Provide a dict of the connected interfaces and their ip addresses -+ ''' -+ -+ global _INTERFACES -+ if not _INTERFACES: -+ _INTERFACES = interfaces() -+ return _INTERFACES -+ -+ - def sanitize_host(host): - ''' - Sanitize host string. +-def _get_interfaces(): ++def _get_interfaces(): #! function + """ + Provide a dict of the connected interfaces and their ip addresses + """ diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index ac03b57226..60914204b0 100644 +index d760e57a54..a5ceeb8317 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -35,6 +35,7 @@ import salt.utils.path - import salt.modules.cmdmod - import salt.modules.smbios - import salt.grains.core as core -+import salt.modules.network - - # Import 3rd-party libs - from salt.ext import six -@@ -1029,6 +1030,40 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - with patch.object(salt.utils.dns, 'parse_resolv', MagicMock(return_value=resolv_mock)): +@@ -18,6 +18,7 @@ import salt.utils.network + import salt.utils.path + import salt.utils.platform + from salt._compat import ipaddress ++from salt.ext import six + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, Mock, mock_open, patch + from tests.support.unit import TestCase, skipIf +@@ -1293,14 +1294,14 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + ): assert core.dns() == ret -+ +- def test_enable_fqdns_false(self): + def test_enablefqdnsFalse(self): -+ ''' -+ tests enable_fqdns_grains is set to False -+ ''' -+ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':False}): -+ assert core.fqdns() == {"fqdns": []} -+ -+ + """ + tests enable_fqdns_grains is set to False + """ + with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": False}): + assert core.fqdns() == {"fqdns": []} + +- def test_enable_fqdns_true(self): + def test_enablefqdnsTrue(self): -+ ''' -+ testing that grains uses network.fqdns module -+ ''' -+ with patch.dict('salt.grains.core.__salt__', {'network.fqdns': MagicMock(return_value="my.fake.domain")}): -+ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':True}): -+ assert core.fqdns() == 'my.fake.domain' -+ -+ + """ + testing that grains uses network.fqdns module + """ +@@ -1311,14 +1312,14 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": True}): + assert core.fqdns() == "my.fake.domain" + +- def test_enable_fqdns_none(self): + def test_enablefqdnsNone(self): -+ ''' -+ testing default fqdns grains is returned when enable_fqdns_grains is None -+ ''' -+ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':None}): -+ assert core.fqdns() == {"fqdns": []} -+ -+ + """ + testing default fqdns grains is returned when enable_fqdns_grains is None + """ + with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": None}): + assert core.fqdns() == {"fqdns": []} + +- def test_enable_fqdns_without_patching(self): + def test_enablefqdnswithoutpaching(self): -+ ''' -+ testing fqdns grains is enabled by default -+ ''' -+ with patch.dict('salt.grains.core.__salt__', {'network.fqdns': MagicMock(return_value="my.fake.domain")}): -+ assert core.fqdns() == 'my.fake.domain' -+ -+ - @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') - @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8'])) - @patch('salt.utils.network.ip_addrs6', -@@ -1044,11 +1079,12 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - ('foo.bar.baz', [], ['fe80::a8b2:93ff:fe00:0']), - ('bluesniff.foo.bar', [], ['fe80::a8b2:93ff:dead:beef'])] - ret = {'fqdns': ['bluesniff.foo.bar', 'foo.bar.baz', 'rinzler.evil-corp.com']} -- with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): + """ + testing fqdns grains is enabled by default + """ +@@ -1326,23 +1327,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + "salt.grains.core.__salt__", + {"network.fqdns": MagicMock(return_value="my.fake.domain")}, + ): +- # fqdns is disabled by default on Windows +- if salt.utils.platform.is_windows(): +- assert core.fqdns() == {"fqdns": []} +- else: +- assert core.fqdns() == "my.fake.domain" +- +- def test_enable_fqdns_false_is_proxy(self): +- """ +- testing fqdns grains is disabled by default for proxy minions +- """ +- with patch("salt.utils.platform.is_proxy", return_value=True, autospec=True): +- with patch.dict( +- "salt.grains.core.__salt__", +- {"network.fqdns": MagicMock(return_value="my.fake.domain")}, +- ): +- # fqdns is disabled by default on proxy minions +- assert core.fqdns() == {"fqdns": []} ++ assert core.fqdns() == "my.fake.domain" + + @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") + @patch( +@@ -1367,11 +1352,12 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + ("bluesniff.foo.bar", [], ["fe80::a8b2:93ff:dead:beef"]), + ] + ret = {"fqdns": ["bluesniff.foo.bar", "foo.bar.baz", "rinzler.evil-corp.com"]} +- with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): - fqdns = core.fqdns() - assert "fqdns" in fqdns -- assert len(fqdns['fqdns']) == len(ret['fqdns']) -- assert set(fqdns['fqdns']) == set(ret['fqdns']) -+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}): -+ with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): +- assert len(fqdns["fqdns"]) == len(ret["fqdns"]) +- assert set(fqdns["fqdns"]) == set(ret["fqdns"]) ++ with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}): ++ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): + fqdns = core.fqdns() + assert "fqdns" in fqdns -+ assert len(fqdns['fqdns']) == len(ret['fqdns']) -+ assert set(fqdns['fqdns']) == set(ret['fqdns']) ++ assert len(fqdns["fqdns"]) == len(ret["fqdns"]) ++ assert set(fqdns["fqdns"]) == set(ret["fqdns"]) - @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') - @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4'])) -@@ -1094,14 +1130,15 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - ('rinzler.evil-corp.com', ["false-hostname", "badaliass"], ['5.6.7.8']), - ('foo.bar.baz', [], ['fe80::a8b2:93ff:fe00:0']), - ('bluesniff.foo.bar', ["alias.bluesniff.foo.bar"], ['fe80::a8b2:93ff:dead:beef'])] -- with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): + @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") + @patch("salt.utils.network.ip_addrs", MagicMock(return_value=["1.2.3.4"])) +@@ -1437,14 +1423,15 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + ["fe80::a8b2:93ff:dead:beef"], + ), + ] +- with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): - fqdns = core.fqdns() - assert "fqdns" in fqdns - for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: @@ -307,8 +263,8 @@ index ac03b57226..60914204b0 100644 - - for alias in ["throwmeaway", "false-hostname", "badaliass"]: - assert alias not in fqdns["fqdns"] -+ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}): -+ with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): ++ with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}): ++ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): + fqdns = core.fqdns() + assert "fqdns" in fqdns + for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: @@ -318,8 +274,8 @@ index ac03b57226..60914204b0 100644 + assert alias not in fqdns["fqdns"] def test_core_virtual(self): - ''' + """ -- -2.16.4 +2.29.2 diff --git a/improve-batch_async-to-release-consumed-memory-bsc-1.patch b/improve-batch_async-to-release-consumed-memory-bsc-1.patch index c12f42a..34cf8ee 100644 --- a/improve-batch_async-to-release-consumed-memory-bsc-1.patch +++ b/improve-batch_async-to-release-consumed-memory-bsc-1.patch @@ -1,4 +1,4 @@ -From 65e33acaf10fdd838c0cdf34ec93df3a2ed1f0d2 Mon Sep 17 00:00:00 2001 +From e53d50ce5fabf67eeb5344f7be9cccbb09d0179b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 26 Sep 2019 10:41:06 +0100 @@ -6,38 +6,39 @@ Subject: [PATCH] Improve batch_async to release consumed memory (bsc#1140912) --- - salt/cli/batch_async.py | 73 ++++++++++++++++++++++++++++++------------------- - 1 file changed, 45 insertions(+), 28 deletions(-) + salt/cli/batch_async.py | 89 ++++++++++++++++++++++++----------------- + 1 file changed, 52 insertions(+), 37 deletions(-) diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 8a67331102..2bb50459c8 100644 +index 388b709416..0a0b8f5f83 100644 --- a/salt/cli/batch_async.py +++ b/salt/cli/batch_async.py -@@ -5,6 +5,7 @@ Execute a job on the targeted minions by using a moving window of fixed size `ba +@@ -2,7 +2,7 @@ + Execute a job on the targeted minions by using a moving window of fixed size `batch`. + """ - # Import python libs - from __future__ import absolute_import, print_function, unicode_literals +-import fnmatch +import gc - import tornado - # Import salt libs -@@ -77,6 +78,7 @@ class BatchAsync(object): + # pylint: enable=import-error,no-name-in-module,redefined-builtin + import logging +@@ -78,6 +78,7 @@ class BatchAsync: self.batch_jid = jid_gen() self.find_job_jid = jid_gen() self.find_job_returned = set() + self.ended = False self.event = salt.utils.event.get_event( - 'master', - self.opts['sock_dir'], -@@ -86,6 +88,7 @@ class BatchAsync(object): - io_loop=ioloop, - keep_loop=True) + "master", + self.opts["sock_dir"], +@@ -88,6 +89,7 @@ class BatchAsync: + keep_loop=True, + ) self.scheduled = False + self.patterns = {} def __set_event_handler(self): - ping_return_pattern = 'salt/job/{0}/ret/*'.format(self.ping_jid) -@@ -116,7 +119,7 @@ class BatchAsync(object): + ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) +@@ -118,7 +120,7 @@ class BatchAsync: if minion in self.active: self.active.remove(minion) self.done_minions.add(minion) @@ -45,29 +46,37 @@ index 8a67331102..2bb50459c8 100644 + self.event.io_loop.spawn_callback(self.schedule_next) def _get_next(self): - to_run = self.minions.difference( -@@ -129,23 +132,23 @@ class BatchAsync(object): + to_run = ( +@@ -132,27 +134,27 @@ class BatchAsync: ) return set(list(to_run)[:next_batch_size]) - @tornado.gen.coroutine def check_find_job(self, batch_minions, jid): -- find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid) -- self.event.unsubscribe(find_job_return_pattern, match_type='glob') +- find_job_return_pattern = "salt/job/{}/ret/*".format(jid) +- self.event.unsubscribe(find_job_return_pattern, match_type="glob") - self.patterns.remove((find_job_return_pattern, "find_job_return")) + if self.event: -+ find_job_return_pattern = 'salt/job/{0}/ret/*'.format(jid) -+ self.event.unsubscribe(find_job_return_pattern, match_type='glob') ++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) ++ self.event.unsubscribe(find_job_return_pattern, match_type="glob") + self.patterns.remove((find_job_return_pattern, "find_job_return")) -- timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions) +- timedout_minions = batch_minions.difference(self.find_job_returned).difference( +- self.done_minions +- ) - self.timedout_minions = self.timedout_minions.union(timedout_minions) - self.active = self.active.difference(self.timedout_minions) -- running = batch_minions.difference(self.done_minions).difference(self.timedout_minions) -+ timedout_minions = batch_minions.difference(self.find_job_returned).difference(self.done_minions) +- running = batch_minions.difference(self.done_minions).difference( +- self.timedout_minions +- ) ++ timedout_minions = batch_minions.difference( ++ self.find_job_returned ++ ).difference(self.done_minions) + self.timedout_minions = self.timedout_minions.union(timedout_minions) + self.active = self.active.difference(self.timedout_minions) -+ running = batch_minions.difference(self.done_minions).difference(self.timedout_minions) ++ running = batch_minions.difference(self.done_minions).difference( ++ self.timedout_minions ++ ) - if timedout_minions: - self.schedule_next() @@ -83,61 +92,65 @@ index 8a67331102..2bb50459c8 100644 @tornado.gen.coroutine def find_job(self, minions): -@@ -165,8 +168,8 @@ class BatchAsync(object): - gather_job_timeout=self.opts['gather_job_timeout'], +@@ -175,18 +177,12 @@ class BatchAsync: jid=jid, - **self.eauth) + **self.eauth + ) - self.event.io_loop.call_later( -- self.opts['gather_job_timeout'], -+ yield tornado.gen.sleep(self.opts['gather_job_timeout']) -+ self.event.io_loop.spawn_callback( - self.check_find_job, - not_done, - jid) -@@ -174,10 +177,6 @@ class BatchAsync(object): +- self.opts["gather_job_timeout"], self.check_find_job, not_done, jid +- ) ++ yield tornado.gen.sleep(self.opts["gather_job_timeout"]) ++ self.event.io_loop.spawn_callback(self.check_find_job, not_done, jid) + @tornado.gen.coroutine def start(self): self.__set_event_handler() -- #start batching even if not all minions respond to ping +- # start batching even if not all minions respond to ping - self.event.io_loop.call_later( -- self.batch_presence_ping_timeout or self.opts['gather_job_timeout'], -- self.start_batch) +- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"], +- self.start_batch, +- ) ping_return = yield self.local.run_job_async( - self.opts['tgt'], - 'test.ping', -@@ -191,6 +190,10 @@ class BatchAsync(object): - metadata=self.metadata, - **self.eauth) - self.targeted_minions = set(ping_return['minions']) -+ #start batching even if not all minions respond to ping -+ yield tornado.gen.sleep(self.batch_presence_ping_timeout or self.opts['gather_job_timeout']) + self.opts["tgt"], + "test.ping", +@@ -198,6 +194,11 @@ class BatchAsync: + **self.eauth + ) + self.targeted_minions = set(ping_return["minions"]) ++ # start batching even if not all minions respond to ping ++ yield tornado.gen.sleep( ++ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] ++ ) + self.event.io_loop.spawn_callback(self.start_batch) -+ @tornado.gen.coroutine def start_batch(self): -@@ -202,12 +205,14 @@ class BatchAsync(object): +@@ -209,14 +210,18 @@ class BatchAsync: "down_minions": self.targeted_minions.difference(self.minions), - "metadata": self.metadata + "metadata": self.metadata, } -- self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid)) +- self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid)) - yield self.run_next() -+ ret = self.event.fire_event(data, "salt/batch/{0}/start".format(self.batch_jid)) ++ ret = self.event.fire_event( ++ data, "salt/batch/{}/start".format(self.batch_jid) ++ ) + self.event.io_loop.spawn_callback(self.run_next) + @tornado.gen.coroutine def end_batch(self): - left = self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions)) + left = self.minions.symmetric_difference( + self.done_minions.union(self.timedout_minions) + ) - if not left: + if not left and not self.ended: + self.ended = True data = { "available_minions": self.minions, "down_minions": self.targeted_minions.difference(self.minions), -@@ -220,20 +225,26 @@ class BatchAsync(object): +@@ -229,20 +234,26 @@ class BatchAsync: for (pattern, label) in self.patterns: if label in ["ping_return", "batch_run"]: - self.event.unsubscribe(pattern, match_type='glob') + self.event.unsubscribe(pattern, match_type="glob") + del self + gc.collect() + yield @@ -161,14 +174,16 @@ index 8a67331102..2bb50459c8 100644 - yield self.local.run_job_async( + ret = yield self.local.run_job_async( next_batch, - self.opts['fun'], - self.opts['arg'], -@@ -244,11 +255,17 @@ class BatchAsync(object): - jid=self.batch_jid, - metadata=self.metadata) + self.opts["fun"], + self.opts["arg"], +@@ -254,13 +265,17 @@ class BatchAsync: + metadata=self.metadata, + ) -- self.event.io_loop.call_later(self.opts['timeout'], self.find_job, set(next_batch)) -+ yield tornado.gen.sleep(self.opts['timeout']) +- self.event.io_loop.call_later( +- self.opts["timeout"], self.find_job, set(next_batch) +- ) ++ yield tornado.gen.sleep(self.opts["timeout"]) + self.event.io_loop.spawn_callback(self.find_job, set(next_batch)) except Exception as ex: log.error("Error in scheduling next batch: %s", ex) @@ -185,6 +200,6 @@ index 8a67331102..2bb50459c8 100644 + self.ioloop = None + gc.collect() -- -2.16.4 +2.29.2 diff --git a/include-aliases-in-the-fqdns-grains.patch b/include-aliases-in-the-fqdns-grains.patch index 7823d00..f030206 100644 --- a/include-aliases-in-the-fqdns-grains.patch +++ b/include-aliases-in-the-fqdns-grains.patch @@ -1,4 +1,4 @@ -From 512b189808ea0d7b333587689d7e7eb52d16b189 Mon Sep 17 00:00:00 2001 +From 3c956a1cf1de17c5c49f0856051cabe2ffb4d0f2 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 29 Jan 2019 11:11:38 +0100 Subject: [PATCH] Include aliases in the fqdns grains @@ -15,54 +15,116 @@ Add UT for fqdns aliases Leverage cached interfaces, if any. --- - salt/grains/core.py | 14 ++++++-------- - salt/utils/network.py | 12 ++++++++++++ - tests/unit/grains/test_core.py | 28 +++++++++++++++++++++++++--- - tests/unit/utils/test_network.py | 24 ++++++++++++++++++++++++ - 4 files changed, 67 insertions(+), 11 deletions(-) + salt/grains/core.py | 69 +++++++++++++++++++++----------- + salt/utils/network.py | 16 ++++++++ + tests/unit/grains/test_core.py | 45 ++++++++++++++++++--- + tests/unit/utils/test_network.py | 37 +++++++++++++++++ + 4 files changed, 138 insertions(+), 29 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 7b7e328520..309e4c9c4a 100644 +index bc3cf129cd..006878f806 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -2275,14 +2275,13 @@ def fqdns(): - grains = {} - fqdns = set() +@@ -1733,29 +1733,31 @@ def _parse_cpe_name(cpe): -- addresses = salt.utils.network.ip_addrs(include_loopback=False, -- interface_data=_INTERFACES) -- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, -- interface_data=_INTERFACES)) -- err_message = 'An exception occurred resolving address \'%s\': %s' -+ addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces()) -+ addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces())) -+ err_message = 'Exception during resolving address: %s' - for ip in addresses: - try: -- fqdns.add(socket.getfqdn(socket.gethostbyaddr(ip)[0])) + + def _parse_cpe_name(cpe): +- ''' ++ """ + Parse CPE_NAME data from the os-release + + Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe + + :param cpe: + :return: +- ''' ++ """ + part = { +- 'o': 'operating system', +- 'h': 'hardware', +- 'a': 'application', ++ "o": "operating system", ++ "h": "hardware", ++ "a": "application", + } + ret = {} +- cpe = (cpe or '').split(':') +- if len(cpe) > 4 and cpe[0] == 'cpe': +- if cpe[1].startswith('/'): # WFN to URI +- ret['vendor'], ret['product'], ret['version'] = cpe[2:5] +- ret['phase'] = cpe[5] if len(cpe) > 5 else None +- ret['part'] = part.get(cpe[1][1:]) +- elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string +- ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]] +- ret['part'] = part.get(cpe[2]) ++ cpe = (cpe or "").split(":") ++ if len(cpe) > 4 and cpe[0] == "cpe": ++ if cpe[1].startswith("/"): # WFN to URI ++ ret["vendor"], ret["product"], ret["version"] = cpe[2:5] ++ ret["phase"] = cpe[5] if len(cpe) > 5 else None ++ ret["part"] = part.get(cpe[1][1:]) ++ elif len(cpe) == 13 and cpe[1] == "2.3": # WFN to a string ++ ret["vendor"], ret["product"], ret["version"], ret["phase"] = [ ++ x if x != "*" else None for x in cpe[3:7] ++ ] ++ ret["part"] = part.get(cpe[2]) + + return ret + +@@ -2396,15 +2398,36 @@ def fqdns(): + """ + # Provides: + # fqdns +- opt = {"fqdns": []} +- if __opts__.get( +- "enable_fqdns_grains", +- False +- if salt.utils.platform.is_windows() or salt.utils.platform.is_proxy() +- else True, +- ): +- opt = __salt__["network.fqdns"]() +- return opt ++ ++ grains = {} ++ fqdns = set() ++ ++ addresses = salt.utils.network.ip_addrs( ++ include_loopback=False, interface_data=_get_interfaces() ++ ) ++ addresses.extend( ++ salt.utils.network.ip_addrs6( ++ include_loopback=False, interface_data=_get_interfaces() ++ ) ++ ) ++ err_message = "Exception during resolving address: %s" ++ for ip in addresses: ++ try: + name, aliaslist, addresslist = socket.gethostbyaddr(ip) -+ fqdns.update([socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]) - except socket.herror as err: - if err.errno in (0, HOST_NOT_FOUND, NO_DATA): - # No FQDN for this IP address, so we don't need to know this all the time. -@@ -2292,8 +2291,7 @@ def fqdns(): - except (socket.error, socket.gaierror, socket.timeout) as err: - log.error(err_message, ip, err) - -- grains['fqdns'] = sorted(list(fqdns)) -- return grains ++ fqdns.update( ++ [socket.getfqdn(name)] ++ + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] ++ ) ++ except socket.herror as err: ++ if err.errno in (0, HOST_NOT_FOUND, NO_DATA): ++ # No FQDN for this IP address, so we don't need to know this all the time. ++ log.debug("Unable to resolve address %s: %s", ip, err) ++ else: ++ log.error(err_message, ip, err) ++ except (OSError, socket.gaierror, socket.timeout) as err: ++ log.error(err_message, ip, err) ++ + return {"fqdns": sorted(list(fqdns))} def ip_fqdn(): diff --git a/salt/utils/network.py b/salt/utils/network.py -index 906d1cb3bc..2ae2e213b7 100644 +index b3e8db3886..dd7fceb91a 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py -@@ -1958,3 +1958,15 @@ def parse_host_port(host_port): - raise ValueError('bad hostname: "{}"'.format(host)) - - return host, port +@@ -2208,3 +2208,19 @@ def filter_by_networks(values, networks): + raise ValueError("Do not know how to filter a {}".format(type(values))) + else: + return values + + +def is_fqdn(hostname): @@ -74,42 +136,63 @@ index 906d1cb3bc..2ae2e213b7 100644 + """ + + compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(? Date: Tue, 19 May 2020 10:34:35 +0200 Subject: [PATCH] info_installed works without status attr now @@ -8,39 +8,39 @@ detect if a package was installed or not. Now info_installed adds the 'status' for the 'lowpkg.info' request again. --- salt/modules/aptpkg.py | 9 +++++++++ - tests/unit/modules/test_aptpkg.py | 17 +++++++++++++++++ - 2 files changed, 26 insertions(+) + tests/unit/modules/test_aptpkg.py | 20 ++++++++++++++++++++ + 2 files changed, 29 insertions(+) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 2835d32263..765d69aff2 100644 +index db0480b45d..e4a9872aad 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -2867,6 +2867,15 @@ def info_installed(*names, **kwargs): - failhard = kwargs.pop('failhard', True) - kwargs.pop('errors', None) # Only for compatibility with RPM - attr = kwargs.pop('attr', None) # Package attributes to return +@@ -2923,6 +2923,15 @@ def info_installed(*names, **kwargs): + failhard = kwargs.pop("failhard", True) + kwargs.pop("errors", None) # Only for compatibility with RPM + attr = kwargs.pop("attr", None) # Package attributes to return + + # status is needed to see if a package is installed. So we have to add it, + # even if it's excluded via attr parameter. Otherwise all packages are + # returned. + if attr: -+ attr_list = set(attr.split(',')) -+ attr_list.add('status') -+ attr = ','.join(attr_list) ++ attr_list = set(attr.split(",")) ++ attr_list.add("status") ++ attr = ",".join(attr_list) + - all_versions = kwargs.pop('all_versions', False) # This is for backward compatible structure only - - if kwargs: + all_versions = kwargs.pop( + "all_versions", False + ) # This is for backward compatible structure only diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py -index ba1d874e69..b0193aeaf7 100644 +index 3c9744e224..51dfce29eb 100644 --- a/tests/unit/modules/test_aptpkg.py +++ b/tests/unit/modules/test_aptpkg.py -@@ -257,6 +257,23 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - self.assertEqual(aptpkg.info_installed('wget'), installed) +@@ -297,6 +297,26 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): + self.assertEqual(aptpkg.info_installed("wget"), installed) self.assertEqual(len(aptpkg.info_installed()), 1) + def test_info_installed_attr_without_status(self): -+ ''' ++ """ + Test info_installed 'attr' for inclusion of 'status' attribute. + + Since info_installed should only return installed packages, we need to @@ -49,17 +49,20 @@ index ba1d874e69..b0193aeaf7 100644 + to check if the package is installed and would return everything. + + :return: -+ ''' -+ with patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)}) as wget_lowpkg: -+ ret = aptpkg.info_installed('wget', attr='version') -+ calls = wget_lowpkg['lowpkg.info'].call_args_list.pop() -+ self.assertIn('status', calls.kwargs['attr']) -+ self.assertIn('version', calls.kwargs['attr']) ++ """ ++ with patch( ++ "salt.modules.aptpkg.__salt__", ++ {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)}, ++ ) as wget_lowpkg: ++ ret = aptpkg.info_installed("wget", attr="version") ++ calls = wget_lowpkg["lowpkg.info"].call_args_list.pop() ++ self.assertIn("status", calls.kwargs["attr"]) ++ self.assertIn("version", calls.kwargs["attr"]) + - @patch('salt.modules.aptpkg.__salt__', {'lowpkg.info': MagicMock(return_value=LOWPKG_INFO)}) - def test_info_installed_attr(self): - ''' + @patch( + "salt.modules.aptpkg.__salt__", + {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)}, -- -2.27.0 +2.29.2 diff --git a/integration-of-msi-authentication-with-azurearm-clou.patch b/integration-of-msi-authentication-with-azurearm-clou.patch index 9792079..2f8a78c 100644 --- a/integration-of-msi-authentication-with-azurearm-clou.patch +++ b/integration-of-msi-authentication-with-azurearm-clou.patch @@ -1,18 +1,23 @@ -From c750e854c637e405a788f91d5b9a7bd1a0a6edfd Mon Sep 17 00:00:00 2001 +From bb2070d4f4e8fbb5a963c521d61feb7419abdec1 Mon Sep 17 00:00:00 2001 From: ed lane Date: Thu, 30 Aug 2018 06:07:08 -0600 Subject: [PATCH] Integration of MSI authentication with azurearm cloud driver (#105) --- - salt/cloud/clouds/azurearm.py | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) + salt/cloud/clouds/azurearm.py | 98 +++++++++++++++-------------------- + 1 file changed, 43 insertions(+), 55 deletions(-) diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py -index 047fdac0a9..2c1fa04ae8 100644 +index 54fc7b497b..8b9254cecb 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py -@@ -58,6 +58,9 @@ The Azure ARM cloud module is used to control access to Microsoft Azure Resource +@@ -1,4 +1,3 @@ +-# -*- coding: utf-8 -*- + """ + Azure ARM Cloud Module + ====================== +@@ -61,6 +60,9 @@ The Azure ARM cloud module is used to control access to Microsoft Azure Resource virtual machine type will be "Windows". Only set this parameter on profiles which install Windows operating systems. @@ -22,25 +27,361 @@ index 047fdac0a9..2c1fa04ae8 100644 Example ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/azure.conf`` configuration: -@@ -258,7 +261,8 @@ def get_configured_provider(): - provider = __is_provider_configured( - __opts__, - __active_provider_name__ or __virtualname__, -- ('subscription_id', 'username', 'password') -+ required_keys=('subscription_id', 'username', 'password'), -+ log_message=False +@@ -91,7 +93,6 @@ Example ``/etc/salt/cloud.providers`` or + + + # pylint: disable=wrong-import-position,wrong-import-order +-from __future__ import absolute_import, print_function, unicode_literals + + import importlib + import logging +@@ -121,7 +122,6 @@ from salt.exceptions import ( + # Salt libs + from salt.ext import six + +-# Import 3rd-party libs + HAS_LIBS = False + try: + import azure.mgmt.compute.models as compute_models +@@ -179,7 +179,7 @@ def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument ) - return provider -@@ -301,6 +305,7 @@ def get_conn(client_type): + for resource in provider_query.resource_types: +- if six.text_type(resource.resource_type) == kwargs["resource_type"]: ++ if str(resource.resource_type) == kwargs["resource_type"]: + resource_dict = resource.as_dict() + api_versions = resource_dict["api_versions"] + except CloudError as exc: +@@ -263,6 +263,7 @@ def get_conn(client_type): ) if tenant is not None: + # using Service Principle style authentication... client_id = config.get_cloud_config_value( - 'client_id', - get_configured_provider(), __opts__, search_global=False + "client_id", get_configured_provider(), __opts__, search_global=False + ) +@@ -319,7 +320,7 @@ def avail_locations(call=None): + ) + locations = [] + for resource in provider_query.resource_types: +- if six.text_type(resource.resource_type) == "virtualMachines": ++ if str(resource.resource_type) == "virtualMachines": + resource_dict = resource.as_dict() + locations = resource_dict["locations"] + for location in locations: +@@ -399,7 +400,7 @@ def avail_images(call=None): + results = pool.map_async(_get_publisher_images, publishers) + results.wait() + +- ret = {k: v for result in results.get() for k, v in six.iteritems(result)} ++ ret = {k: v for result in results.get() for k, v in result.items()} + + return ret + +@@ -529,7 +530,7 @@ def list_nodes_full(call=None): + results = pool.map_async(_get_node_info, nodes) + results.wait() + +- group_ret = {k: v for result in results.get() for k, v in six.iteritems(result)} ++ group_ret = {k: v for result in results.get() for k, v in result.items()} + ret.update(group_ret) + + return ret +@@ -707,7 +708,7 @@ def create_network_interface(call=None, kwargs=None): + ) + + if kwargs.get("iface_name") is None: +- kwargs["iface_name"] = "{0}-iface0".format(vm_["name"]) ++ kwargs["iface_name"] = "{}-iface0".format(vm_["name"]) + + try: + subnet_obj = netconn.subnets.get( +@@ -717,7 +718,7 @@ def create_network_interface(call=None, kwargs=None): + ) + except CloudError as exc: + raise SaltCloudSystemExit( +- '{0} (Resource Group: "{1}", VNET: "{2}", Subnet: "{3}")'.format( ++ '{} (Resource Group: "{}", VNET: "{}", Subnet: "{}")'.format( + exc.message, + kwargs["network_resource_group"], + kwargs["network"], +@@ -740,11 +741,11 @@ def create_network_interface(call=None, kwargs=None): + ) + pool_ids.append({"id": lbbep_data.as_dict()["id"]}) + except CloudError as exc: +- log.error("There was a cloud error: %s", six.text_type(exc)) ++ log.error("There was a cloud error: %s", str(exc)) + except KeyError as exc: + log.error( + "There was an error getting the Backend Pool ID: %s", +- six.text_type(exc), ++ str(exc), + ) + ip_kwargs["load_balancer_backend_address_pools"] = pool_ids + +@@ -755,7 +756,7 @@ def create_network_interface(call=None, kwargs=None): + ip_kwargs["private_ip_allocation_method"] = IPAllocationMethod.dynamic + + if kwargs.get("allocate_public_ip") is True: +- pub_ip_name = "{0}-ip".format(kwargs["iface_name"]) ++ pub_ip_name = "{}-ip".format(kwargs["iface_name"]) + poller = netconn.public_ip_addresses.create_or_update( + resource_group_name=kwargs["resource_group"], + public_ip_address_name=pub_ip_name, +@@ -773,11 +774,11 @@ def create_network_interface(call=None, kwargs=None): + ) + if pub_ip_data.ip_address: # pylint: disable=no-member + ip_kwargs["public_ip_address"] = PublicIPAddress( +- id=six.text_type(pub_ip_data.id), # pylint: disable=no-member ++ id=str(pub_ip_data.id), # pylint: disable=no-member + ) + ip_configurations = [ + NetworkInterfaceIPConfiguration( +- name="{0}-ip".format(kwargs["iface_name"]), ++ name="{}-ip".format(kwargs["iface_name"]), + subnet=subnet_obj, + **ip_kwargs + ) +@@ -790,7 +791,7 @@ def create_network_interface(call=None, kwargs=None): + raise ValueError("Timed out waiting for public IP Address.") + time.sleep(5) + else: +- priv_ip_name = "{0}-ip".format(kwargs["iface_name"]) ++ priv_ip_name = "{}-ip".format(kwargs["iface_name"]) + ip_configurations = [ + NetworkInterfaceIPConfiguration( + name=priv_ip_name, subnet=subnet_obj, **ip_kwargs +@@ -900,7 +901,7 @@ def request_instance(vm_): + ) + vm_["iface_id"] = iface_data["id"] + +- disk_name = "{0}-vol0".format(vm_["name"]) ++ disk_name = "{}-vol0".format(vm_["name"]) + + vm_username = config.get_cloud_config_value( + "ssh_username", +@@ -922,8 +923,8 @@ def request_instance(vm_): + ssh_publickeyfile_contents = spkc_.read() + except Exception as exc: # pylint: disable=broad-except + raise SaltCloudConfigError( +- "Failed to read ssh publickey file '{0}': " +- "{1}".format(ssh_publickeyfile, exc.args[-1]) ++ "Failed to read ssh publickey file '{}': " ++ "{}".format(ssh_publickeyfile, exc.args[-1]) + ) + + disable_password_authentication = config.get_cloud_config_value( +@@ -941,7 +942,7 @@ def request_instance(vm_): + if not win_installer and ssh_publickeyfile_contents is not None: + sshpublickey = SshPublicKey( + key_data=ssh_publickeyfile_contents, +- path="/home/{0}/.ssh/authorized_keys".format(vm_username), ++ path="/home/{}/.ssh/authorized_keys".format(vm_username), + ) + sshconfiguration = SshConfiguration(public_keys=[sshpublickey],) + linuxconfiguration = LinuxConfiguration( +@@ -991,9 +992,9 @@ def request_instance(vm_): + availability_set = config.get_cloud_config_value( + "availability_set", vm_, __opts__, search_global=False, default=None + ) +- if availability_set is not None and isinstance(availability_set, six.string_types): ++ if availability_set is not None and isinstance(availability_set, str): + availability_set = { +- "id": "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/availabilitySets/{2}".format( ++ "id": "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}".format( + subscription_id, vm_["resource_group"], availability_set + ) + } +@@ -1004,7 +1005,7 @@ def request_instance(vm_): + + storage_endpoint_suffix = cloud_env.suffixes.storage_endpoint + +- if isinstance(vm_.get("volumes"), six.string_types): ++ if isinstance(vm_.get("volumes"), str): + volumes = salt.utils.yaml.safe_load(vm_["volumes"]) + else: + volumes = vm_.get("volumes") +@@ -1018,16 +1019,14 @@ def request_instance(vm_): + lun = 0 + luns = [] + for volume in volumes: +- if isinstance(volume, six.string_types): ++ if isinstance(volume, str): + volume = {"name": volume} + + volume.setdefault( + "name", + volume.get( + "name", +- volume.get( +- "name", "{0}-datadisk{1}".format(vm_["name"], six.text_type(lun)) +- ), ++ volume.get("name", "{}-datadisk{}".format(vm_["name"], str(lun))), + ), + ) + +@@ -1050,7 +1049,7 @@ def request_instance(vm_): + del volume["media_link"] + elif volume.get("vhd") == "unmanaged": + volume["vhd"] = VirtualHardDisk( +- uri="https://{0}.blob.{1}/vhds/{2}-datadisk{3}.vhd".format( ++ uri="https://{}.blob.{}/vhds/{}-datadisk{}.vhd".format( + vm_["storage_account"], + storage_endpoint_suffix, + vm_["name"], +@@ -1090,7 +1089,7 @@ def request_instance(vm_): + create_option=DiskCreateOptionTypes.from_image, + name=disk_name, + vhd=VirtualHardDisk( +- uri="https://{0}.blob.{1}/vhds/{2}.vhd".format( ++ uri="https://{}.blob.{}/vhds/{}.vhd".format( + vm_["storage_account"], storage_endpoint_suffix, disk_name, + ), + ), +@@ -1209,7 +1208,7 @@ def request_instance(vm_): + __utils__["cloud.fire_event"]( + "event", + "requesting instance", +- "salt/cloud/{0}/requesting".format(vm_["name"]), ++ "salt/cloud/{}/requesting".format(vm_["name"]), + args=__utils__["cloud.filter_event"]( + "requesting", vm_, ["name", "profile", "provider", "driver"] + ), +@@ -1260,7 +1259,7 @@ def create(vm_): + __utils__["cloud.fire_event"]( + "event", + "starting create", +- "salt/cloud/{0}/creating".format(vm_["name"]), ++ "salt/cloud/{}/creating".format(vm_["name"]), + args=__utils__["cloud.filter_event"]( + "creating", vm_, ["name", "profile", "provider", "driver"] + ), +@@ -1278,9 +1277,7 @@ def create(vm_): + vm_request = request_instance(vm_=vm_) + + if not vm_request or "error" in vm_request: +- err_message = "Error creating VM {0}! ({1})".format( +- vm_["name"], six.text_type(vm_request) +- ) ++ err_message = "Error creating VM {}! ({})".format(vm_["name"], str(vm_request)) + log.error(err_message) + raise SaltCloudSystemExit(err_message) + +@@ -1322,7 +1319,7 @@ def create(vm_): + try: + log.warning(exc) + finally: +- raise SaltCloudSystemExit(six.text_type(exc)) ++ raise SaltCloudSystemExit(str(exc)) + + vm_["ssh_host"] = data + if not vm_.get("ssh_username"): +@@ -1341,7 +1338,7 @@ def create(vm_): + __utils__["cloud.fire_event"]( + "event", + "created instance", +- "salt/cloud/{0}/created".format(vm_["name"]), ++ "salt/cloud/{}/created".format(vm_["name"]), + args=__utils__["cloud.filter_event"]( + "created", vm_, ["name", "profile", "provider", "driver"] + ), +@@ -1548,9 +1545,7 @@ def _get_cloud_environment(): + cloud_env = getattr(cloud_env_module, cloud_environment or "AZURE_PUBLIC_CLOUD") + except (AttributeError, ImportError): + raise SaltCloudSystemExit( +- "The azure {0} cloud environment is not available.".format( +- cloud_environment +- ) ++ "The azure {} cloud environment is not available.".format(cloud_environment) + ) + + return cloud_env +@@ -1585,7 +1580,7 @@ def _get_block_blob_service(kwargs=None): + resource_group, storage_account + ) + storage_keys = {v.key_name: v.value for v in storage_keys.keys} +- storage_key = next(six.itervalues(storage_keys)) ++ storage_key = next(iter(storage_keys.values())) + + cloud_env = _get_cloud_environment() + +@@ -1620,7 +1615,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument + "server_encrypted": blob.properties.server_encrypted, + } + except Exception as exc: # pylint: disable=broad-except +- log.warning(six.text_type(exc)) ++ log.warning(str(exc)) + + return ret + +@@ -1655,9 +1650,7 @@ def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argum + compconn.disks.delete(kwargs["resource_group"], kwargs["blob"]) + except Exception as exc: # pylint: disable=broad-except + log.error( +- "Error deleting managed disk %s - %s", +- kwargs.get("blob"), +- six.text_type(exc), ++ "Error deleting managed disk %s - %s", kwargs.get("blob"), str(exc), + ) + return False + +@@ -1834,7 +1827,7 @@ def create_or_update_vmextension( + except CloudError as exc: + __utils__["azurearm.log_cloud_error"]( + "compute", +- "Error attempting to create the VM extension: {0}".format(exc.message), ++ "Error attempting to create the VM extension: {}".format(exc.message), + ) + ret = {"error": exc.message} + +@@ -1881,11 +1874,9 @@ def stop(name, call=None): + ret = {"error": exc.message} + if not ret: + __utils__["azurearm.log_cloud_error"]( +- "compute", "Unable to find virtual machine with name: {0}".format(name) ++ "compute", "Unable to find virtual machine with name: {}".format(name) + ) +- ret = { +- "error": "Unable to find virtual machine with name: {0}".format(name) +- } ++ ret = {"error": "Unable to find virtual machine with name: {}".format(name)} + else: + try: + instance = compconn.virtual_machines.deallocate( +@@ -1896,7 +1887,7 @@ def stop(name, call=None): + ret = vm_result.as_dict() + except CloudError as exc: + __utils__["azurearm.log_cloud_error"]( +- "compute", "Error attempting to stop {0}: {1}".format(name, exc.message) ++ "compute", "Error attempting to stop {}: {}".format(name, exc.message) + ) + ret = {"error": exc.message} + +@@ -1945,11 +1936,9 @@ def start(name, call=None): + ret = {"error": exc.message} + if not ret: + __utils__["azurearm.log_cloud_error"]( +- "compute", "Unable to find virtual machine with name: {0}".format(name) ++ "compute", "Unable to find virtual machine with name: {}".format(name) + ) +- ret = { +- "error": "Unable to find virtual machine with name: {0}".format(name) +- } ++ ret = {"error": "Unable to find virtual machine with name: {}".format(name)} + else: + try: + instance = compconn.virtual_machines.start( +@@ -1960,8 +1949,7 @@ def start(name, call=None): + ret = vm_result.as_dict() + except CloudError as exc: + __utils__["azurearm.log_cloud_error"]( +- "compute", +- "Error attempting to start {0}: {1}".format(name, exc.message), ++ "compute", "Error attempting to start {}: {}".format(name, exc.message), + ) + ret = {"error": exc.message} + -- -2.16.4 +2.29.2 diff --git a/invalidate-file-list-cache-when-cache-file-modified-.patch b/invalidate-file-list-cache-when-cache-file-modified-.patch index a4826a3..981febd 100644 --- a/invalidate-file-list-cache-when-cache-file-modified-.patch +++ b/invalidate-file-list-cache-when-cache-file-modified-.patch @@ -1,4 +1,4 @@ -From 1ca1bb7c01b1e589147c32b16eda719537ab5b62 Mon Sep 17 00:00:00 2001 +From c9268ec731371cdd7b2fc129ad111d9f73800752 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Tue, 22 Sep 2020 15:15:51 +0100 @@ -8,16 +8,16 @@ Subject: [PATCH] Invalidate file list cache when cache file modified Add test_future_file_list_cache_file_ignored unit test --- salt/fileserver/__init__.py | 2 +- - tests/unit/test_fileserver.py | 47 +++++++++++++++++++++++++++++++++-- - 2 files changed, 46 insertions(+), 3 deletions(-) + tests/unit/test_fileserver.py | 53 +++++++++++++++++++++++------------ + 2 files changed, 36 insertions(+), 19 deletions(-) diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py -index 919987e2fc..1b8de51bdc 100644 +index c8c417168f..b9e345d8c3 100644 --- a/salt/fileserver/__init__.py +++ b/salt/fileserver/__init__.py -@@ -142,7 +142,7 @@ def check_file_list_cache(opts, form, list_cache, w_lock): - 'file=%s mtime=%s current_time=%s', - list_cache, current_time, file_mtime +@@ -132,7 +132,7 @@ def check_file_list_cache(opts, form, list_cache, w_lock): + current_time, + file_mtime, ) - age = 0 + age = -1 @@ -25,46 +25,58 @@ index 919987e2fc..1b8de51bdc 100644 age = current_time - file_mtime else: diff --git a/tests/unit/test_fileserver.py b/tests/unit/test_fileserver.py -index d38e22c8e1..b92b32947b 100644 +index 0bf30ee5cc..a1087bf4b0 100644 --- a/tests/unit/test_fileserver.py +++ b/tests/unit/test_fileserver.py -@@ -6,11 +6,17 @@ - # Import Python libs - from __future__ import absolute_import, print_function, unicode_literals +@@ -1,14 +1,15 @@ +-# -*- coding: utf-8 -*- + """ + :codeauthor: Joao Mesquita + """ --# Import Salt Testing libs --from tests.support.unit import TestCase +-# Import Python libs +-from __future__ import absolute_import, print_function, unicode_literals + +-from salt import fileserver +import datetime +import os +import time +-# Import Salt Testing libs +import salt.utils.files - from salt import fileserver - -+# Import Salt Testing libs ++from salt import fileserver +from tests.support.helpers import with_tempdir -+from tests.support.unit import TestCase -+ + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.unit import TestCase - class MapDiffTestCase(TestCase): - def test_diff_with_diffent_keys(self): -@@ -28,3 +34,40 @@ class MapDiffTestCase(TestCase): - map1 = {'file1': 12345} - map2 = {'file1': 1234} +@@ -31,22 +32,38 @@ class MapDiffTestCase(TestCase): assert fileserver.diff_mtime_map(map1, map2) is True -+ -+ + + +-class VCSBackendWhitelistCase(TestCase, LoaderModuleMockMixin): +class VCSBackendWhitelistCase(TestCase): -+ def setup_loader_modules(self): -+ return {fileserver: {}} -+ + def setup_loader_modules(self): + return {fileserver: {}} + +- def test_whitelist(self): + @with_tempdir() + def test_future_file_list_cache_file_ignored(self, cachedir): -+ opts = { + opts = { +- "fileserver_backend": ["roots", "git", "hgfs", "svn"], + "fileserver_backend": ["roots"], + "cachedir": cachedir, -+ "extension_modules": "", -+ } + "extension_modules": "", + } +- fs = fileserver.Fileserver(opts) +- assert fs.servers.whitelist == [ +- "git", +- "gitfs", +- "hg", +- "hgfs", +- "svn", +- "svnfs", +- "roots", +- ], fs.servers.whitelist + + back_cachedir = os.path.join(cachedir, "file_lists/roots") + os.makedirs(os.path.join(back_cachedir)) @@ -90,6 +102,6 @@ index d38e22c8e1..b92b32947b 100644 + ret[1] is True + ), "Cache file list cache file is not refreshed when future modification time" -- -2.28.0 +2.29.2 diff --git a/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch b/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch index fc103b0..ebbc276 100644 --- a/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch +++ b/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch @@ -1,4 +1,4 @@ -From 2b5903d2429607a3f46d648520e24c357a56aea6 Mon Sep 17 00:00:00 2001 +From a6e490d8cede6e66bb5f22f314e1ec4e898dfa3c Mon Sep 17 00:00:00 2001 From: Can Bulut Bayburt <1103552+cbbayburt@users.noreply.github.com> Date: Wed, 4 Dec 2019 15:59:46 +0100 Subject: [PATCH] Let salt-ssh use 'platform-python' binary in RHEL8 @@ -14,7 +14,7 @@ creating the sh shim. 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py -index 1373274739..d9e91b0f50 100644 +index 287d0b8c4c..ef9eb0c07e 100644 --- a/salt/client/ssh/__init__.py +++ b/salt/client/ssh/__init__.py @@ -147,7 +147,7 @@ elif [ "$SUDO" ] && [ -n "$SUDO_USER" ] @@ -27,6 +27,6 @@ index 1373274739..d9e91b0f50 100644 do if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));" -- -2.16.4 +2.29.2 diff --git a/loader-invalidate-the-import-cachefor-extra-modules.patch b/loader-invalidate-the-import-cachefor-extra-modules.patch deleted file mode 100644 index de72b97..0000000 --- a/loader-invalidate-the-import-cachefor-extra-modules.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 444e00c6601b878444923f573fdb5f000342be9a Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Thu, 12 Mar 2020 16:39:42 +0100 -Subject: [PATCH] loader: invalidate the import cachefor extra modules - -Because we are mangling with importlib, we can find from time to -time an invalidation issue with sys.path_importer_cache, that -requires the removal of FileFinder that remain None for the -extra_module_dirs - -(cherry picked from commit 0fb8e707a45d5caf40759e8b4943590d6fce5046) ---- - salt/loader.py | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/salt/loader.py b/salt/loader.py -index 742b2f8e22..5bd4773645 100644 ---- a/salt/loader.py -+++ b/salt/loader.py -@@ -1544,9 +1544,11 @@ class LazyLoader(salt.utils.lazy.LazyDict): - self._clean_module_dirs.append(directory) - - def __clean_sys_path(self): -+ invalidate_path_importer_cache = False - for directory in self._clean_module_dirs: - if directory in sys.path: - sys.path.remove(directory) -+ invalidate_path_importer_cache = True - self._clean_module_dirs = [] - - # Be sure that sys.path_importer_cache do not contains any -@@ -1554,6 +1556,16 @@ class LazyLoader(salt.utils.lazy.LazyDict): - if USE_IMPORTLIB: - importlib.invalidate_caches() - -+ # Because we are mangling with importlib, we can find from -+ # time to time an invalidation issue with -+ # sys.path_importer_cache, that requires the removal of -+ # FileFinder that remain None for the extra_module_dirs -+ if invalidate_path_importer_cache: -+ for directory in self.extra_module_dirs: -+ if directory in sys.path_importer_cache \ -+ and sys.path_importer_cache[directory] is None: -+ del sys.path_importer_cache[directory] -+ - def _load_module(self, name): - mod = None - fpath, suffix = self.file_mapping[name][:2] --- -2.16.4 - - diff --git a/loop-fix-variable-names-for-until_no_eval.patch b/loop-fix-variable-names-for-until_no_eval.patch index 4a7e1d6..03d3416 100644 --- a/loop-fix-variable-names-for-until_no_eval.patch +++ b/loop-fix-variable-names-for-until_no_eval.patch @@ -1,4 +1,4 @@ -From 2670f83fd1309fbf9fdc98f15f9a6e6a3ecc038d Mon Sep 17 00:00:00 2001 +From 239e897776b889105cfd6f54092100c86f52ce21 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 24 Mar 2020 17:46:23 +0100 Subject: [PATCH] loop: fix variable names for until_no_eval @@ -8,12 +8,12 @@ Subject: [PATCH] loop: fix variable names for until_no_eval 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/states/loop.py b/salt/states/loop.py -index 726c8c80165803f3b2d98bf7a197013c53f3ebc8..b631e6c8f62416c04b458a595dc31393987eb904 100644 +index de37b7d60c..533166c5dc 100644 --- a/salt/states/loop.py +++ b/salt/states/loop.py -@@ -185,10 +185,10 @@ def until_no_eval( - ''.format(name, expected)) - if ret['comment']: +@@ -182,10 +182,10 @@ def until_no_eval( + ) + if ret["comment"]: return ret - if not m_args: - m_args = [] @@ -27,6 +27,6 @@ index 726c8c80165803f3b2d98bf7a197013c53f3ebc8..b631e6c8f62416c04b458a595dc31393 if init_wait: time.sleep(init_wait) -- -2.23.0 +2.29.2 diff --git a/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch b/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch index 3b1f943..fb57693 100644 --- a/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch +++ b/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch @@ -1,7 +1,8 @@ -From c9538180f4dd8875ab57dfa3f51ff59608d2481b Mon Sep 17 00:00:00 2001 +From 6381be1a6e6d863f85dd33c82b9b949b552a7e49 Mon Sep 17 00:00:00 2001 From: Joachim Gleissner Date: Tue, 18 Sep 2018 15:07:13 +0200 -Subject: [PATCH] loosen azure sdk dependencies in azurearm cloud driver +Subject: [PATCH] loosen azure sdk dependencies in azurearm cloud + driver Remove dependency to azure-cli, which is not used at all. Use azure-storage-sdk as fallback if multiapi version is not available. @@ -12,22 +13,22 @@ remove unused import from azurearm driver 1 file changed, 6 insertions(+) diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py -index 2c1fa04ae8..d5757c6d28 100644 +index 8b9254cecb..0e92a56156 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py -@@ -104,6 +104,7 @@ import time - - # Salt libs - from salt.ext import six +@@ -98,6 +98,7 @@ import importlib + import logging + import os + import os.path +import pkgutil - import salt.cache - import salt.config as config - import salt.loader -@@ -126,6 +127,11 @@ try: - import azure.mgmt.network.models as network_models + import pprint + import string + import time +@@ -129,6 +130,11 @@ try: from azure.storage.blob.blockblobservice import BlockBlobService from msrestazure.azure_exceptions import CloudError -+ if pkgutil.find_loader('azure.multiapi'): + ++ if pkgutil.find_loader("azure.multiapi"): + # use multiapi version if available + from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount + else: @@ -36,6 +37,6 @@ index 2c1fa04ae8..d5757c6d28 100644 except ImportError: pass -- -2.16.4 +2.29.2 diff --git a/make-aptpkg.list_repos-compatible-on-enabled-disable.patch b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch index 99c22b9..1e05b8b 100644 --- a/make-aptpkg.list_repos-compatible-on-enabled-disable.patch +++ b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch @@ -1,26 +1,34 @@ -From 93f69a227b7f8c3d4625c0699ab3923d4a0b3127 Mon Sep 17 00:00:00 2001 +From 7d507f8f5879a1de3e707fdb5cadd618a150123f Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Fri, 16 Nov 2018 10:54:12 +0100 Subject: [PATCH] Make aptpkg.list_repos compatible on enabled/disabled output --- - salt/modules/aptpkg.py | 1 + - 1 file changed, 1 insertion(+) + salt/modules/aptpkg.py | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index b5503f0b10..8f4d95a195 100644 +index 1e2866b47b..70e173806a 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -1641,6 +1641,7 @@ def list_repos(): - repo['file'] = source.file - repo['comps'] = getattr(source, 'comps', []) - repo['disabled'] = source.disabled -+ repo['enabled'] = not repo['disabled'] # This is for compatibility with the other modules - repo['dist'] = source.dist - repo['type'] = source.type - repo['uri'] = source.uri.rstrip('/') +@@ -1681,10 +1681,13 @@ def list_repos(**kwargs): + repo["file"] = source.file + repo["comps"] = getattr(source, "comps", []) + repo["disabled"] = source.disabled ++ repo["enabled"] = not repo[ ++ "disabled" ++ ] # This is for compatibility with the other modules + repo["dist"] = source.dist + repo["type"] = source.type +- repo["uri"] = source.uri +- repo["line"] = source.line.strip() ++ repo["uri"] = source.uri.rstrip("/") ++ repo["line"] = salt.utils.pkg.deb.strip_uri(source.line.strip()) + repo["architectures"] = getattr(source, "architectures", []) + repos.setdefault(source.uri, []).append(repo) + return repos -- -2.16.4 +2.29.2 diff --git a/make-lazyloader.__init__-call-to-_refresh_file_mappi.patch b/make-lazyloader.__init__-call-to-_refresh_file_mappi.patch deleted file mode 100644 index 277808a..0000000 --- a/make-lazyloader.__init__-call-to-_refresh_file_mappi.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 6af6a52165c70c3be7c8d339a3dd5e539f3c1772 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Thu, 23 Apr 2020 09:54:53 +0100 -Subject: [PATCH] Make LazyLoader.__init__ call to - _refresh_file_mapping thread-safe (bsc#1169604) - ---- - salt/loader.py | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/salt/loader.py b/salt/loader.py -index 5bd4773645c77a133701982e19d19739be00a38f..54dadb0b513dbaa4914b0d4b1d343dde709699ad 100644 ---- a/salt/loader.py -+++ b/salt/loader.py -@@ -1251,7 +1251,8 @@ class LazyLoader(salt.utils.lazy.LazyDict): - self.suffix_order.append(suffix) - - self._lock = threading.RLock() -- self._refresh_file_mapping() -+ with self._lock: -+ self._refresh_file_mapping() - - super(LazyLoader, self).__init__() # late init the lazy loader - # create all of the import namespaces --- -2.23.0 - - diff --git a/make-profiles-a-package.patch b/make-profiles-a-package.patch index 58f3855..e10a211 100644 --- a/make-profiles-a-package.patch +++ b/make-profiles-a-package.patch @@ -1,4 +1,4 @@ -From 2aeefa07ff52048e2db5c8c4ebb1cde6efe87cee Mon Sep 17 00:00:00 2001 +From 44dfbc906e4c19eef6c9cfe96c76a99e6077c7ec Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 8 Oct 2018 17:52:07 +0200 Subject: [PATCH] Make profiles a package. @@ -22,6 +22,6 @@ index 0000000000..b86aef30b8 +Profiles for salt-support. +''' -- -2.16.4 +2.29.2 diff --git a/make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch b/make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch deleted file mode 100644 index 7c0f22c..0000000 --- a/make-salt.ext.tornado.gen-to-use-salt.ext.backports_.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 023d1256106319d042233021c0f200bcdc0cd1f0 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 13 Mar 2020 13:01:57 +0000 -Subject: [PATCH] Make salt.ext.tornado.gen to use salt.ext.backports_abc - on Python 2 - ---- - salt/ext/tornado/gen.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/salt/ext/tornado/gen.py b/salt/ext/tornado/gen.py -index 6cb19730bf1ef3893a4626e9e144eac1c6fa9683..72f422ce28fa43132782a7a0d61b31acd32d138b 100644 ---- a/salt/ext/tornado/gen.py -+++ b/salt/ext/tornado/gen.py -@@ -115,13 +115,13 @@ try: - # py35+ - from collections.abc import Generator as GeneratorType # type: ignore - except ImportError: -- from backports_abc import Generator as GeneratorType # type: ignore -+ from salt.ext.backports_abc import Generator as GeneratorType # type: ignore - - try: - # py35+ - from inspect import isawaitable # type: ignore - except ImportError: -- from backports_abc import isawaitable -+ from salt.ext.backports_abc import isawaitable - except ImportError: - if 'APPENGINE_RUNTIME' not in os.environ: - raise --- -2.23.0 - - diff --git a/make-setup.py-script-to-not-require-setuptools-9.1.patch b/make-setup.py-script-to-not-require-setuptools-9.1.patch index 90e47a7..67bf87c 100644 --- a/make-setup.py-script-to-not-require-setuptools-9.1.patch +++ b/make-setup.py-script-to-not-require-setuptools-9.1.patch @@ -1,4 +1,4 @@ -From b73d7f73bebcde2936a55245471fbcb383778b6d Mon Sep 17 00:00:00 2001 +From 64c2735b64a074acc1ef05a82f9fcf342426f87e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 25 Mar 2020 13:09:52 +0000 @@ -9,17 +9,17 @@ Subject: [PATCH] Make setup.py script to not require setuptools > 9.1 1 file changed, 8 deletions(-) diff --git a/setup.py b/setup.py -index e852080e4b..033ccee8c3 100755 +index 39a66fefba..d9c3d6e303 100755 --- a/setup.py +++ b/setup.py -@@ -727,14 +727,6 @@ class Install(install): +@@ -805,14 +805,6 @@ class Install(install): install.finalize_options(self) def run(self): -- if LooseVersion(setuptools.__version__) < LooseVersion('9.1'): +- if LooseVersion(setuptools.__version__) < LooseVersion("9.1"): - sys.stderr.write( -- '\n\nInstalling Salt requires setuptools >= 9.1\n' -- 'Available setuptools version is {}\n\n'.format(setuptools.__version__) +- "\n\nInstalling Salt requires setuptools >= 9.1\n" +- "Available setuptools version is {}\n\n".format(setuptools.__version__) - ) - sys.stderr.flush() - sys.exit(1) @@ -28,6 +28,6 @@ index e852080e4b..033ccee8c3 100755 # _version.py in the build command self.distribution.running_salt_install = True -- -2.16.4 +2.29.2 diff --git a/move-server_id-deprecation-warning-to-reduce-log-spa.patch b/move-server_id-deprecation-warning-to-reduce-log-spa.patch index 4000a59..1053782 100644 --- a/move-server_id-deprecation-warning-to-reduce-log-spa.patch +++ b/move-server_id-deprecation-warning-to-reduce-log-spa.patch @@ -1,57 +1,61 @@ -From c375d1e25e8b5c77b6a8f89855f17df6e49db9f2 Mon Sep 17 00:00:00 2001 +From caffb14059c2d4ab186cb24918f4e53332f697af Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Fri, 14 Jun 2019 15:13:12 +0200 Subject: [PATCH] Move server_id deprecation warning to reduce log spamming (bsc#1135567) (bsc#1135732) --- - salt/grains/core.py | 4 ---- - salt/minion.py | 9 +++++++++ - 2 files changed, 9 insertions(+), 4 deletions(-) + salt/grains/core.py | 7 ------- + salt/minion.py | 10 ++++++++++ + 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py -index b58c29dbc3..0f3ccd9b92 100644 +index d7d03c5e70..5f18ba4a58 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -2890,10 +2890,6 @@ def get_server_id(): - if bool(use_crc): - id_hash = getattr(zlib, use_crc, zlib.adler32)(__opts__.get('id', '').encode()) & 0xffffffff +@@ -3066,13 +3066,6 @@ def get_server_id(): + & 0xFFFFFFFF + ) else: -- salt.utils.versions.warn_until('Sodium', 'This server_id is computed nor by Adler32 neither by CRC32. ' -- 'Please use "server_id_use_crc" option and define algorithm you' -- 'prefer (default "Adler32"). The server_id will be computed with' -- 'Adler32 by default.') +- salt.utils.versions.warn_until( +- "Sodium", +- "This server_id is computed nor by Adler32 neither by CRC32. " +- 'Please use "server_id_use_crc" option and define algorithm you' +- 'prefer (default "Adler32"). The server_id will be computed with' +- "Adler32 by default.", +- ) id_hash = _get_hash_by_shell() - server_id = {'server_id': id_hash} + server_id = {"server_id": id_hash} diff --git a/salt/minion.py b/salt/minion.py -index 457f485b0a..4730f68b87 100644 +index 4da665a130..4d271c6d08 100644 --- a/salt/minion.py +++ b/salt/minion.py -@@ -97,6 +97,7 @@ from salt.utils.odict import OrderedDict - from salt.utils.process import (default_signals, - SignalHandlingProcess, - ProcessManager) +@@ -82,6 +82,7 @@ from salt.utils.event import tagify + from salt.utils.network import parse_host_port + from salt.utils.odict import OrderedDict + from salt.utils.process import ProcessManager, SignalHandlingProcess, default_signals +from salt.utils.versions import warn_until - from salt.exceptions import ( - CommandExecutionError, - CommandNotFoundError, -@@ -1002,6 +1003,14 @@ class MinionManager(MinionBase): - if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): + from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq + + HAS_PSUTIL = False +@@ -1096,6 +1097,15 @@ class MinionManager(MinionBase): + ): masters = [masters] -+ if not self.opts.get('server_id_use_crc'): ++ if not self.opts.get("server_id_use_crc"): + warn_until( -+ 'Sodium', -+ 'This server_id is computed nor by Adler32 neither by CRC32. ' -+ 'Please use "server_id_use_crc" option and define algorithm you' -+ 'prefer (default "Adler32"). The server_id will be computed with' -+ 'Adler32 by default.') ++ "Sodium", ++ "This server_id is computed nor by Adler32 neither by CRC32. " ++ 'Please use "server_id_use_crc" option and define algorithm you' ++ 'prefer (default "Adler32"). The server_id will be computed with' ++ "Adler32 by default.", ++ ) + beacons_leader = True for master in masters: s_opts = copy.deepcopy(self.opts) -- -2.16.4 +2.29.2 diff --git a/msgpack-support-versions-1.0.0.patch b/msgpack-support-versions-1.0.0.patch deleted file mode 100644 index 4ed2394..0000000 --- a/msgpack-support-versions-1.0.0.patch +++ /dev/null @@ -1,72 +0,0 @@ -From ef23c1d53e99e19e5b03658aa62b67cfef9adce5 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Thu, 7 May 2020 12:40:55 +0200 -Subject: [PATCH] msgpack: support versions >= 1.0.0 - -A recent change in msgpack >= 1.0.0, update the default value for the -parameter `raw` to False. This change breaks Salt for those versions. - -This patch add the parameter `raw=True` to all the unpack operations, -restoring the old default. - -Fix bsc#1171257 - -(cherry picked from commit 1b3939fb01fc3405d8d222f118617220aecee092) ---- - salt/utils/msgpack.py | 20 +++++++++++++++++--- - 1 file changed, 17 insertions(+), 3 deletions(-) - -diff --git a/salt/utils/msgpack.py b/salt/utils/msgpack.py -index 4b5a256513..027fe81a18 100644 ---- a/salt/utils/msgpack.py -+++ b/salt/utils/msgpack.py -@@ -69,12 +69,26 @@ def _sanitize_msgpack_kwargs(kwargs): - return kwargs - - -+def _sanitize_msgpack_unpack_kwargs(kwargs): -+ """ -+ Clean up msgpack keyword arguments for unpack operations, based on -+ the version -+ https://github.com/msgpack/msgpack-python/blob/master/ChangeLog.rst -+ """ -+ assert isinstance(kwargs, dict) -+ if version >= (1, 0, 0) and kwargs.get("raw", None) is None: -+ log.info("adding `raw=True` argument to msgpack call") -+ kwargs["raw"] = True -+ -+ return _sanitize_msgpack_kwargs(kwargs) -+ -+ - class Unpacker(msgpack.Unpacker): - ''' - Wraps the msgpack.Unpacker and removes non-relevant arguments - ''' - def __init__(self, *args, **kwargs): -- msgpack.Unpacker.__init__(self, *args, **_sanitize_msgpack_kwargs(kwargs)) -+ msgpack.Unpacker.__init__(self, *args, **_sanitize_msgpack_unpack_kwargs(kwargs)) - - - def pack(o, stream, **kwargs): -@@ -113,7 +127,7 @@ def unpack(stream, **kwargs): - By default, this function uses the msgpack module and falls back to - msgpack_pure, if the msgpack is not available. - ''' -- return msgpack.unpack(stream, **_sanitize_msgpack_kwargs(kwargs)) -+ return msgpack.unpack(stream, **_sanitize_msgpack_unpack_kwargs(kwargs)) - - - def unpackb(packed, **kwargs): -@@ -125,7 +139,7 @@ def unpackb(packed, **kwargs): - By default, this function uses the msgpack module and falls back to - msgpack_pure. - ''' -- return msgpack.unpackb(packed, **_sanitize_msgpack_kwargs(kwargs)) -+ return msgpack.unpackb(packed, **_sanitize_msgpack_unpack_kwargs(kwargs)) - - - # alias for compatibility to simplejson/marshal/pickle. --- -2.26.1 - - diff --git a/opensuse-3000-libvirt-engine-fixes-251.patch b/opensuse-3000-libvirt-engine-fixes-251.patch index e5ee199..f16cadc 100644 --- a/opensuse-3000-libvirt-engine-fixes-251.patch +++ b/opensuse-3000-libvirt-engine-fixes-251.patch @@ -1,4 +1,4 @@ -From 1f753894b1a5a3f17d1452a572a9a126fa526998 Mon Sep 17 00:00:00 2001 +From 78f2a450ea51a7e72eb0e712131e23fe2777a4ac Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Fri, 3 Jul 2020 14:43:53 +0200 Subject: [PATCH] Opensuse 3000 libvirt engine fixes (#251) @@ -21,10 +21,10 @@ at the engine start. Co-authored-by: Cédric Bosdonnat --- - changelog/57746.fixed | 1 + - salt/engines/libvirt_events.py | 685 ++++++++++++---------- - tests/unit/engines/test_libvirt_events.py | 187 +++--- - 3 files changed, 495 insertions(+), 378 deletions(-) + changelog/57746.fixed | 1 + + salt/engines/libvirt_events.py | 2 -- + tests/unit/engines/test_libvirt_events.py | 4 ---- + 3 files changed, 1 insertion(+), 6 deletions(-) create mode 100644 changelog/57746.fixed diff --git a/changelog/57746.fixed b/changelog/57746.fixed @@ -35,1306 +35,38 @@ index 0000000000..5102bb04e9 @@ -0,0 +1 @@ +Fix the registration of libvirt pool and nodedev events diff --git a/salt/engines/libvirt_events.py b/salt/engines/libvirt_events.py -index cdb5d1dfe8..96ba9efc91 100644 +index 45983fe9a0..c090b64a10 100644 --- a/salt/engines/libvirt_events.py +++ b/salt/engines/libvirt_events.py -@@ -1,6 +1,6 @@ - # -*- coding: utf-8 -*- - --''' -+""" - An engine that listens for libvirt events and resends them to the salt event bus. - - The minimal configuration is the following and will listen to all events on the -@@ -35,7 +35,7 @@ CALLBACK_DEFS constant. If the filters list contains ``all``, all - events will be relayed. - - Be aware that the list of events increases with libvirt versions, for example --network events have been added in libvirt 1.2.1. -+network events have been added in libvirt 1.2.1 and storage events in 2.0.0. - - Running the engine on non-root - ------------------------------ -@@ -63,7 +63,7 @@ A polkit rule like the following one will allow `salt` user to connect to libvir - :depends: libvirt 1.0.0+ python binding - +@@ -63,10 +63,8 @@ A polkit rule like the following one will allow `salt` user to connect to libvir .. versionadded:: 2019.2.0 --''' -+""" + """ - from __future__ import absolute_import, unicode_literals, print_function +- import logging -@@ -73,6 +73,7 @@ import salt.utils.event + +-# Import salt libs + import salt.utils.event # pylint: disable=no-name-in-module,import-error - from salt.ext.six.moves.urllib.parse import urlparse -+ - # pylint: enable=no-name-in-module,import-error - - log = logging.getLogger(__name__) -@@ -85,112 +86,125 @@ except ImportError: - - - def __virtual__(): -- ''' -+ """ - Only load if libvirt python binding is present -- ''' -+ """ - if libvirt is None: -- msg = 'libvirt module not found' -+ msg = "libvirt module not found" - elif libvirt.getVersion() < 1000000: -- msg = 'libvirt >= 1.0.0 required' -+ msg = "libvirt >= 1.0.0 required" - else: -- msg = '' -+ msg = "" - return not bool(msg), msg - - - REGISTER_FUNCTIONS = { -- 'domain': 'domainEventRegisterAny', -- 'network': 'networkEventRegisterAny', -- 'pool': 'storagePoolEventRegisterAny', -- 'nodedev': 'nodeDeviceEventRegisterAny', -- 'secret': 'secretEventRegisterAny' -+ "domain": "domainEventRegisterAny", -+ "network": "networkEventRegisterAny", -+ "pool": "storagePoolEventRegisterAny", -+ "nodedev": "nodeDeviceEventRegisterAny", -+ "secret": "secretEventRegisterAny", - } - - # Handle either BLOCK_JOB or BLOCK_JOB_2, but prefer the latter --if hasattr(libvirt, 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2'): -- BLOCK_JOB_ID = 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2' -+if hasattr(libvirt, "VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2"): -+ BLOCK_JOB_ID = "VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2" - else: -- BLOCK_JOB_ID = 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB' -+ BLOCK_JOB_ID = "VIR_DOMAIN_EVENT_ID_BLOCK_JOB" - - CALLBACK_DEFS = { -- 'domain': (('lifecycle', None), -- ('reboot', None), -- ('rtc_change', None), -- ('watchdog', None), -- ('graphics', None), -- ('io_error', 'VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON'), -- ('control_error', None), -- ('disk_change', None), -- ('tray_change', None), -- ('pmwakeup', None), -- ('pmsuspend', None), -- ('balloon_change', None), -- ('pmsuspend_disk', None), -- ('device_removed', None), -- ('block_job', BLOCK_JOB_ID), -- ('tunable', None), -- ('agent_lifecycle', None), -- ('device_added', None), -- ('migration_iteration', None), -- ('job_completed', None), -- ('device_removal_failed', None), -- ('metadata_change', None), -- ('block_threshold', None)), -- 'network': (('lifecycle', None),), -- 'pool': (('lifecycle', None), -- ('refresh', None)), -- 'nodedev': (('lifecycle', None), -- ('update', None)), -- 'secret': (('lifecycle', None), -- ('value_changed', None)) -+ "domain": ( -+ ("lifecycle", None), -+ ("reboot", None), -+ ("rtc_change", None), -+ ("watchdog", None), -+ ("graphics", None), -+ ("io_error", "VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON"), -+ ("control_error", None), -+ ("disk_change", None), -+ ("tray_change", None), -+ ("pmwakeup", None), -+ ("pmsuspend", None), -+ ("balloon_change", None), -+ ("pmsuspend_disk", None), -+ ("device_removed", None), -+ ("block_job", BLOCK_JOB_ID), -+ ("tunable", None), -+ ("agent_lifecycle", None), -+ ("device_added", None), -+ ("migration_iteration", None), -+ ("job_completed", None), -+ ("device_removal_failed", None), -+ ("metadata_change", None), -+ ("block_threshold", None), -+ ), -+ "network": (("lifecycle", None),), -+ "pool": ( -+ ("lifecycle", "VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE"), -+ ("refresh", "VIR_STORAGE_POOL_EVENT_ID_REFRESH"), -+ ), -+ "nodedev": ( -+ ("lifecycle", "VIR_NODE_DEVICE_EVENT_ID_LIFECYCLE"), -+ ("update", "VIR_NODE_DEVICE_EVENT_ID_UPDATE"), -+ ), -+ "secret": (("lifecycle", None), ("value_changed", None)), - } - - - def _compute_subprefix(attr): -- ''' -+ """ - Get the part before the first '_' or the end of attr including - the potential '_' -- ''' -- return ''.join((attr.split('_')[0], '_' if len(attr.split('_')) > 1 else '')) -+ """ -+ return "".join((attr.split("_")[0], "_" if len(attr.split("_")) > 1 else "")) - - - def _get_libvirt_enum_string(prefix, value): -- ''' -+ """ - Convert the libvirt enum integer value into a human readable string. - - :param prefix: start of the libvirt attribute to look for. - :param value: integer to convert to string -- ''' -- attributes = [attr[len(prefix):] for attr in libvirt.__dict__ if attr.startswith(prefix)] -+ """ -+ attributes = [ -+ attr[len(prefix) :] for attr in libvirt.__dict__ if attr.startswith(prefix) -+ ] - - # Filter out the values starting with a common base as they match another enum - prefixes = [_compute_subprefix(p) for p in attributes] - counts = {p: prefixes.count(p) for p in prefixes} -- sub_prefixes = [p for p, count in counts.items() if count > 1 or (p.endswith('_') and p[:-1] in prefixes)] -- filtered = [attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes] -+ sub_prefixes = [ -+ p -+ for p, count in counts.items() -+ if count > 1 or (p.endswith("_") and p[:-1] in prefixes) -+ ] -+ filtered = [ -+ attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes -+ ] - - for candidate in filtered: -- if value == getattr(libvirt, ''.join((prefix, candidate))): -- name = candidate.lower().replace('_', ' ') -+ if value == getattr(libvirt, "".join((prefix, candidate))): -+ name = candidate.lower().replace("_", " ") - return name -- return 'unknown' -+ return "unknown" - - - def _get_domain_event_detail(event, detail): -- ''' -+ """ - Convert event and detail numeric values into a tuple of human readable strings -- ''' -- event_name = _get_libvirt_enum_string('VIR_DOMAIN_EVENT_', event) -- if event_name == 'unknown': -- return event_name, 'unknown' -+ """ -+ event_name = _get_libvirt_enum_string("VIR_DOMAIN_EVENT_", event) -+ if event_name == "unknown": -+ return event_name, "unknown" - -- prefix = 'VIR_DOMAIN_EVENT_{0}_'.format(event_name.upper()) -+ prefix = "VIR_DOMAIN_EVENT_{0}_".format(event_name.upper()) - detail_name = _get_libvirt_enum_string(prefix, detail) - - return event_name, detail_name - - - def _salt_send_event(opaque, conn, data): -- ''' -+ """ - Convenience function adding common data to the event and sending it - on the salt event bus. - -@@ -198,10 +212,10 @@ def _salt_send_event(opaque, conn, data): - This is a dict with 'prefix', 'object' and 'event' keys. - :param conn: libvirt connection - :param data: additional event data dict to send -- ''' -- tag_prefix = opaque['prefix'] -- object_type = opaque['object'] -- event_type = opaque['event'] -+ """ -+ tag_prefix = opaque["prefix"] -+ object_type = opaque["object"] -+ event_type = opaque["event"] - - # Prepare the connection URI to fit in the tag - # qemu+ssh://user@host:1234/system -> qemu+ssh/user@host:1234/system -@@ -209,30 +223,28 @@ def _salt_send_event(opaque, conn, data): - uri_tag = [uri.scheme] - if uri.netloc: - uri_tag.append(uri.netloc) -- path = uri.path.strip('/') -+ path = uri.path.strip("/") - if path: - uri_tag.append(path) - uri_str = "/".join(uri_tag) - - # Append some common data -- all_data = { -- 'uri': conn.getURI() -- } -+ all_data = {"uri": conn.getURI()} - all_data.update(data) - -- tag = '/'.join((tag_prefix, uri_str, object_type, event_type)) -+ tag = "/".join((tag_prefix, uri_str, object_type, event_type)) - - # Actually send the event in salt -- if __opts__.get('__role') == 'master': -- salt.utils.event.get_master_event( -- __opts__, -- __opts__['sock_dir']).fire_event(all_data, tag) -+ if __opts__.get("__role") == "master": -+ salt.utils.event.get_master_event(__opts__, __opts__["sock_dir"]).fire_event( -+ all_data, tag -+ ) - else: -- __salt__['event.send'](tag, all_data) -+ __salt__["event.send"](tag, all_data) - - - def _salt_send_domain_event(opaque, conn, domain, event, event_data): -- ''' -+ """ - Helper function send a salt event for a libvirt domain. - - :param opaque: the opaque data that is passed to the callback. -@@ -241,375 +253,428 @@ def _salt_send_domain_event(opaque, conn, domain, event, event_data): - :param domain: name of the domain related to the event - :param event: name of the event - :param event_data: additional event data dict to send -- ''' -+ """ - data = { -- 'domain': { -- 'name': domain.name(), -- 'id': domain.ID(), -- 'uuid': domain.UUIDString() -+ "domain": { -+ "name": domain.name(), -+ "id": domain.ID(), -+ "uuid": domain.UUIDString(), - }, -- 'event': event -+ "event": event, - } - data.update(event_data) - _salt_send_event(opaque, conn, data) - - - def _domain_event_lifecycle_cb(conn, domain, event, detail, opaque): -- ''' -+ """ - Domain lifecycle events handler -- ''' -+ """ - event_str, detail_str = _get_domain_event_detail(event, detail) - -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'event': event_str, -- 'detail': detail_str -- }) -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ {"event": event_str, "detail": detail_str}, -+ ) - - - def _domain_event_reboot_cb(conn, domain, opaque): -- ''' -+ """ - Domain reboot events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {}) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {}) - - - def _domain_event_rtc_change_cb(conn, domain, utcoffset, opaque): -- ''' -+ """ - Domain RTC change events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'utcoffset': utcoffset -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, conn, domain, opaque["event"], {"utcoffset": utcoffset} -+ ) - - - def _domain_event_watchdog_cb(conn, domain, action, opaque): -- ''' -+ """ - Domain watchdog events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_WATCHDOG_', action) -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ {"action": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_WATCHDOG_", action)}, -+ ) - - - def _domain_event_io_error_cb(conn, domain, srcpath, devalias, action, reason, opaque): -- ''' -+ """ - Domain I/O Error events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'srcPath': srcpath, -- 'dev': devalias, -- 'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_IO_ERROR_', action), -- 'reason': reason -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ { -+ "srcPath": srcpath, -+ "dev": devalias, -+ "action": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_IO_ERROR_", action), -+ "reason": reason, -+ }, -+ ) - - --def _domain_event_graphics_cb(conn, domain, phase, local, remote, auth, subject, opaque): -- ''' -+def _domain_event_graphics_cb( -+ conn, domain, phase, local, remote, auth, subject, opaque -+): -+ """ - Domain graphics events handler -- ''' -- prefix = 'VIR_DOMAIN_EVENT_GRAPHICS_' -+ """ -+ prefix = "VIR_DOMAIN_EVENT_GRAPHICS_" - - def get_address(addr): -- ''' -+ """ - transform address structure into event data piece -- ''' -- return {'family': _get_libvirt_enum_string('{0}_ADDRESS_'.format(prefix), addr['family']), -- 'node': addr['node'], -- 'service': addr['service']} -+ """ -+ return { -+ "family": _get_libvirt_enum_string( -+ "{0}_ADDRESS_".format(prefix), addr["family"] -+ ), -+ "node": addr["node"], -+ "service": addr["service"], -+ } - -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'phase': _get_libvirt_enum_string(prefix, phase), -- 'local': get_address(local), -- 'remote': get_address(remote), -- 'authScheme': auth, -- 'subject': [{'type': item[0], 'name': item[1]} for item in subject] -- }) -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ { -+ "phase": _get_libvirt_enum_string(prefix, phase), -+ "local": get_address(local), -+ "remote": get_address(remote), -+ "authScheme": auth, -+ "subject": [{"type": item[0], "name": item[1]} for item in subject], -+ }, -+ ) - - - def _domain_event_control_error_cb(conn, domain, opaque): -- ''' -+ """ - Domain control error events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {}) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {}) - - - def _domain_event_disk_change_cb(conn, domain, old_src, new_src, dev, reason, opaque): -- ''' -+ """ - Domain disk change events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'oldSrcPath': old_src, -- 'newSrcPath': new_src, -- 'dev': dev, -- 'reason': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_DISK_', reason) -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ { -+ "oldSrcPath": old_src, -+ "newSrcPath": new_src, -+ "dev": dev, -+ "reason": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_DISK_", reason), -+ }, -+ ) - - - def _domain_event_tray_change_cb(conn, domain, dev, reason, opaque): -- ''' -+ """ - Domain tray change events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'dev': dev, -- 'reason': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_TRAY_CHANGE_', reason) -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ { -+ "dev": dev, -+ "reason": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_TRAY_CHANGE_", reason), -+ }, -+ ) - - - def _domain_event_pmwakeup_cb(conn, domain, reason, opaque): -- ''' -+ """ - Domain wakeup events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'reason': 'unknown' # currently unused -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused -+ ) - - - def _domain_event_pmsuspend_cb(conn, domain, reason, opaque): -- ''' -+ """ - Domain suspend events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'reason': 'unknown' # currently unused -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused -+ ) - - - def _domain_event_balloon_change_cb(conn, domain, actual, opaque): -- ''' -+ """ - Domain balloon change events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'actual': actual -- }) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"actual": actual}) - - - def _domain_event_pmsuspend_disk_cb(conn, domain, reason, opaque): -- ''' -+ """ - Domain disk suspend events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'reason': 'unknown' # currently unused -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused -+ ) - - - def _domain_event_block_job_cb(conn, domain, disk, job_type, status, opaque): -- ''' -+ """ - Domain block job events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'disk': disk, -- 'type': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_TYPE_', job_type), -- 'status': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_', status) -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ { -+ "disk": disk, -+ "type": _get_libvirt_enum_string("VIR_DOMAIN_BLOCK_JOB_TYPE_", job_type), -+ "status": _get_libvirt_enum_string("VIR_DOMAIN_BLOCK_JOB_", status), -+ }, -+ ) - - - def _domain_event_device_removed_cb(conn, domain, dev, opaque): -- ''' -+ """ - Domain device removal events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'dev': dev -- }) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev}) - - - def _domain_event_tunable_cb(conn, domain, params, opaque): -- ''' -+ """ - Domain tunable events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'params': params -- }) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"params": params}) - - - # pylint: disable=invalid-name - def _domain_event_agent_lifecycle_cb(conn, domain, state, reason, opaque): -- ''' -+ """ - Domain agent lifecycle events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'state': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_', state), -- 'reason': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_', reason) -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ { -+ "state": _get_libvirt_enum_string( -+ "VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_", state -+ ), -+ "reason": _get_libvirt_enum_string( -+ "VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_", reason -+ ), -+ }, -+ ) - - - def _domain_event_device_added_cb(conn, domain, dev, opaque): -- ''' -+ """ - Domain device addition events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'dev': dev -- }) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev}) - - - # pylint: disable=invalid-name - def _domain_event_migration_iteration_cb(conn, domain, iteration, opaque): -- ''' -+ """ - Domain migration iteration events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'iteration': iteration -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, conn, domain, opaque["event"], {"iteration": iteration} -+ ) - - - def _domain_event_job_completed_cb(conn, domain, params, opaque): -- ''' -+ """ - Domain job completion events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'params': params -- }) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"params": params}) - - - def _domain_event_device_removal_failed_cb(conn, domain, dev, opaque): -- ''' -+ """ - Domain device removal failure events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'dev': dev -- }) -+ """ -+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev}) - - - def _domain_event_metadata_change_cb(conn, domain, mtype, nsuri, opaque): -- ''' -+ """ - Domain metadata change events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'type': _get_libvirt_enum_string('VIR_DOMAIN_METADATA_', mtype), -- 'nsuri': nsuri -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ { -+ "type": _get_libvirt_enum_string("VIR_DOMAIN_METADATA_", mtype), -+ "nsuri": nsuri, -+ }, -+ ) - - --def _domain_event_block_threshold_cb(conn, domain, dev, path, threshold, excess, opaque): -- ''' -+def _domain_event_block_threshold_cb( -+ conn, domain, dev, path, threshold, excess, opaque -+): -+ """ - Domain block threshold events handler -- ''' -- _salt_send_domain_event(opaque, conn, domain, opaque['event'], { -- 'dev': dev, -- 'path': path, -- 'threshold': threshold, -- 'excess': excess -- }) -+ """ -+ _salt_send_domain_event( -+ opaque, -+ conn, -+ domain, -+ opaque["event"], -+ {"dev": dev, "path": path, "threshold": threshold, "excess": excess}, -+ ) - - - def _network_event_lifecycle_cb(conn, net, event, detail, opaque): -- ''' -+ """ - Network lifecycle events handler -- ''' -- -- _salt_send_event(opaque, conn, { -- 'network': { -- 'name': net.name(), -- 'uuid': net.UUIDString() -+ """ -+ -+ _salt_send_event( -+ opaque, -+ conn, -+ { -+ "network": {"name": net.name(), "uuid": net.UUIDString()}, -+ "event": _get_libvirt_enum_string("VIR_NETWORK_EVENT_", event), -+ "detail": "unknown", # currently unused - }, -- 'event': _get_libvirt_enum_string('VIR_NETWORK_EVENT_', event), -- 'detail': 'unknown' # currently unused -- }) -+ ) - - - def _pool_event_lifecycle_cb(conn, pool, event, detail, opaque): -- ''' -+ """ - Storage pool lifecycle events handler -- ''' -- _salt_send_event(opaque, conn, { -- 'pool': { -- 'name': pool.name(), -- 'uuid': pool.UUIDString() -+ """ -+ _salt_send_event( -+ opaque, -+ conn, -+ { -+ "pool": {"name": pool.name(), "uuid": pool.UUIDString()}, -+ "event": _get_libvirt_enum_string("VIR_STORAGE_POOL_EVENT_", event), -+ "detail": "unknown", # currently unused - }, -- 'event': _get_libvirt_enum_string('VIR_STORAGE_POOL_EVENT_', event), -- 'detail': 'unknown' # currently unused -- }) -+ ) - - - def _pool_event_refresh_cb(conn, pool, opaque): -- ''' -+ """ - Storage pool refresh events handler -- ''' -- _salt_send_event(opaque, conn, { -- 'pool': { -- 'name': pool.name(), -- 'uuid': pool.UUIDString() -+ """ -+ _salt_send_event( -+ opaque, -+ conn, -+ { -+ "pool": {"name": pool.name(), "uuid": pool.UUIDString()}, -+ "event": opaque["event"], - }, -- 'event': opaque['event'] -- }) -+ ) - - - def _nodedev_event_lifecycle_cb(conn, dev, event, detail, opaque): -- ''' -+ """ - Node device lifecycle events handler -- ''' -- _salt_send_event(opaque, conn, { -- 'nodedev': { -- 'name': dev.name() -+ """ -+ _salt_send_event( -+ opaque, -+ conn, -+ { -+ "nodedev": {"name": dev.name()}, -+ "event": _get_libvirt_enum_string("VIR_NODE_DEVICE_EVENT_", event), -+ "detail": "unknown", # currently unused - }, -- 'event': _get_libvirt_enum_string('VIR_NODE_DEVICE_EVENT_', event), -- 'detail': 'unknown' # currently unused -- }) -+ ) - - - def _nodedev_event_update_cb(conn, dev, opaque): -- ''' -+ """ - Node device update events handler -- ''' -- _salt_send_event(opaque, conn, { -- 'nodedev': { -- 'name': dev.name() -- }, -- 'event': opaque['event'] -- }) -+ """ -+ _salt_send_event( -+ opaque, conn, {"nodedev": {"name": dev.name()}, "event": opaque["event"]} -+ ) - - - def _secret_event_lifecycle_cb(conn, secret, event, detail, opaque): -- ''' -+ """ - Secret lifecycle events handler -- ''' -- _salt_send_event(opaque, conn, { -- 'secret': { -- 'uuid': secret.UUIDString() -+ """ -+ _salt_send_event( -+ opaque, -+ conn, -+ { -+ "secret": {"uuid": secret.UUIDString()}, -+ "event": _get_libvirt_enum_string("VIR_SECRET_EVENT_", event), -+ "detail": "unknown", # currently unused - }, -- 'event': _get_libvirt_enum_string('VIR_SECRET_EVENT_', event), -- 'detail': 'unknown' # currently unused -- }) -+ ) - - - def _secret_event_value_changed_cb(conn, secret, opaque): -- ''' -+ """ - Secret value change events handler -- ''' -- _salt_send_event(opaque, conn, { -- 'secret': { -- 'uuid': secret.UUIDString() -- }, -- 'event': opaque['event'] -- }) -+ """ -+ _salt_send_event( -+ opaque, -+ conn, -+ {"secret": {"uuid": secret.UUIDString()}, "event": opaque["event"]}, -+ ) - - - def _cleanup(cnx): -- ''' -+ """ - Close the libvirt connection - - :param cnx: libvirt connection -- ''' -- log.debug('Closing libvirt connection: %s', cnx.getURI()) -+ """ -+ log.debug("Closing libvirt connection: %s", cnx.getURI()) - cnx.close() - - - def _callbacks_cleanup(cnx, callback_ids): -- ''' -+ """ - Unregister all the registered callbacks - - :param cnx: libvirt connection - :param callback_ids: dictionary mapping a libvirt object type to an ID list - of callbacks to deregister -- ''' -+ """ - for obj, ids in callback_ids.items(): - register_name = REGISTER_FUNCTIONS[obj] -- deregister_name = register_name.replace('Reg', 'Dereg') -+ deregister_name = register_name.replace("Reg", "Dereg") - deregister = getattr(cnx, deregister_name) - for callback_id in ids: - deregister(callback_id) - - - def _register_callback(cnx, tag_prefix, obj, event, real_id): -- ''' -+ """ - Helper function registering a callback - - :param cnx: libvirt connection -@@ -620,10 +685,10 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id): - :param real_id: the libvirt name of an alternative event id to use or None - - :rtype integer value needed to deregister the callback -- ''' -+ """ - libvirt_name = real_id - if real_id is None: -- libvirt_name = 'VIR_{0}_EVENT_ID_{1}'.format(obj, event).upper() -+ libvirt_name = "VIR_{0}_EVENT_ID_{1}".format(obj, event).upper() - - if not hasattr(libvirt, libvirt_name): - log.warning('Skipping "%s/%s" events: libvirt too old', obj, event) -@@ -633,34 +698,34 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id): - callback_name = "_{0}_event_{1}_cb".format(obj, event) - callback = globals().get(callback_name, None) - if callback is None: -- log.error('Missing function %s in engine', callback_name) -+ log.error("Missing function %s in engine", callback_name) - return None - - register = getattr(cnx, REGISTER_FUNCTIONS[obj]) -- return register(None, libvirt_id, callback, -- {'prefix': tag_prefix, -- 'object': obj, -- 'event': event}) -+ return register( -+ None, -+ libvirt_id, -+ callback, -+ {"prefix": tag_prefix, "object": obj, "event": event}, -+ ) - - - def _append_callback_id(ids, obj, callback_id): -- ''' -+ """ - Helper function adding a callback ID to the IDs dict. - The callback ids dict maps an object to event callback ids. - - :param ids: dict of callback IDs to update - :param obj: one of the keys of REGISTER_FUNCTIONS - :param callback_id: the result of _register_callback -- ''' -+ """ - if obj not in ids: - ids[obj] = [] - ids[obj].append(callback_id) - - --def start(uri=None, -- tag_prefix='salt/engines/libvirt_events', -- filters=None): -- ''' -+def start(uri=None, tag_prefix="salt/engines/libvirt_events", filters=None): -+ """ - Listen to libvirt events and forward them to salt. - - :param uri: libvirt URI to listen on. -@@ -668,14 +733,14 @@ def start(uri=None, - :param tag_prefix: the begining of the salt event tag to use. - Defaults to 'salt/engines/libvirt_events' - :param filters: the list of event of listen on. Defaults to 'all' -- ''' -+ """ - if filters is None: -- filters = ['all'] -+ filters = ["all"] - try: - libvirt.virEventRegisterDefaultImpl() - - cnx = libvirt.openReadOnly(uri) -- log.debug('Opened libvirt uri: %s', cnx.getURI()) -+ log.debug("Opened libvirt uri: %s", cnx.getURI()) - - callback_ids = {} - all_filters = "all" in filters -@@ -683,17 +748,19 @@ def start(uri=None, - for obj, event_defs in CALLBACK_DEFS.items(): - for event, real_id in event_defs: - event_filter = "/".join((obj, event)) -- if event_filter not in filters and obj not in filters and not all_filters: -+ if ( -+ event_filter not in filters -+ and obj not in filters -+ and not all_filters -+ ): - continue -- registered_id = _register_callback(cnx, tag_prefix, -- obj, event, real_id) -+ registered_id = _register_callback(cnx, tag_prefix, obj, event, real_id) - if registered_id: - _append_callback_id(callback_ids, obj, registered_id) - - exit_loop = False - while not exit_loop: - exit_loop = libvirt.virEventRunDefaultImpl() < 0 -- log.debug('=== in the loop exit_loop %s ===', exit_loop) - - except Exception as err: # pylint: disable=broad-except - log.exception(err) diff --git a/tests/unit/engines/test_libvirt_events.py b/tests/unit/engines/test_libvirt_events.py -index d9143a320b..5f1488e422 100644 +index 63623c4d79..5cf80f0bc7 100644 --- a/tests/unit/engines/test_libvirt_events.py +++ b/tests/unit/engines/test_libvirt_events.py -@@ -1,16 +1,14 @@ - # -*- coding: utf-8 -*- --''' -+""" +@@ -1,12 +1,8 @@ + """ unit tests for the libvirt_events engine --''' -+""" - # Import Python libs - from __future__ import absolute_import, print_function, unicode_literals + """ +-# Import Python libs - # Import Salt Testing Libs - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.unit import TestCase --from tests.support.mock import ( -- MagicMock, -- patch) -+from tests.support.mock import MagicMock, patch - - # Import Salt Libs +-# Import Salt Libs import salt.engines.libvirt_events as libvirt_events -@@ -20,68 +18,78 @@ import salt.engines.libvirt_events as libvirt_events - - - class EngineLibvirtEventTestCase(TestCase, LoaderModuleMockMixin): -- ''' -+ """ - Test cases for salt.engine.libvirt_events -- ''' -+ """ - - def setup_loader_modules(self): -- patcher = patch('salt.engines.libvirt_events.libvirt') -+ patcher = patch("salt.engines.libvirt_events.libvirt") - self.mock_libvirt = patcher.start() - self.mock_libvirt.getVersion.return_value = 2000000 -- self.mock_libvirt.virEventRunDefaultImpl.return_value = -1 # Don't loop for ever -+ self.mock_libvirt.virEventRunDefaultImpl.return_value = ( -+ -1 -+ ) # Don't loop for ever - self.mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0 - self.mock_libvirt.VIR_DOMAIN_EVENT_ID_REBOOT = 1 -+ self.mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE = 0 -+ self.mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_REFRESH = 1 -+ self.mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_LIFECYCLE = 0 -+ self.mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_UPDATE = 1 - self.addCleanup(patcher.stop) -- self.addCleanup(delattr, self, 'mock_libvirt') -+ self.addCleanup(delattr, self, "mock_libvirt") - return {libvirt_events: {}} - -- @patch('salt.engines.libvirt_events.libvirt', -- VIR_PREFIX_NONE=0, -- VIR_PREFIX_ONE=1, -- VIR_PREFIX_TWO=2, -- VIR_PREFIX_SUB_FOO=0, -- VIR_PREFIX_SUB_BAR=1, -- VIR_PREFIX_SUB_FOOBAR=2) -+ @patch( -+ "salt.engines.libvirt_events.libvirt", -+ VIR_PREFIX_NONE=0, -+ VIR_PREFIX_ONE=1, -+ VIR_PREFIX_TWO=2, -+ VIR_PREFIX_SUB_FOO=0, -+ VIR_PREFIX_SUB_BAR=1, -+ VIR_PREFIX_SUB_FOOBAR=2, -+ ) - def test_get_libvirt_enum_string_subprefix(self, libvirt_mock): -- ''' -+ """ - Make sure the libvirt enum value to string works reliably with - elements with a sub prefix, eg VIR_PREFIX_SUB_* in this case. -- ''' -+ """ - # Test case with a sub prefix - -- assert libvirt_events._get_libvirt_enum_string('VIR_PREFIX_', 2) == 'two' -+ assert libvirt_events._get_libvirt_enum_string("VIR_PREFIX_", 2) == "two" - -- @patch('salt.engines.libvirt_events.libvirt', -- VIR_PREFIX_FOO=0, -- VIR_PREFIX_BAR_FOO=1) -+ @patch( -+ "salt.engines.libvirt_events.libvirt", VIR_PREFIX_FOO=0, VIR_PREFIX_BAR_FOO=1 -+ ) - def test_get_libvirt_enum_string_underscores(self, libvirt_mock): -- ''' -+ """ - Make sure the libvirt enum value to string works reliably and items - with an underscore aren't confused with sub prefixes. -- ''' -- assert libvirt_events._get_libvirt_enum_string('VIR_PREFIX_', 1) == 'bar foo' - -- @patch('salt.engines.libvirt_events.libvirt', -- VIR_DOMAIN_EVENT_CRASHED_PANICKED=0, -- VIR_DOMAIN_EVENT_DEFINED=0, -- VIR_DOMAIN_EVENT_UNDEFINED=1, -- VIR_DOMAIN_EVENT_CRASHED=2, -- VIR_DOMAIN_EVENT_DEFINED_ADDED=0, -- VIR_DOMAIN_EVENT_DEFINED_UPDATED=1) -+ """ -+ assert libvirt_events._get_libvirt_enum_string("VIR_PREFIX_", 1) == "bar foo" -+ -+ @patch( -+ "salt.engines.libvirt_events.libvirt", -+ VIR_DOMAIN_EVENT_CRASHED_PANICKED=0, -+ VIR_DOMAIN_EVENT_DEFINED=0, -+ VIR_DOMAIN_EVENT_UNDEFINED=1, -+ VIR_DOMAIN_EVENT_CRASHED=2, -+ VIR_DOMAIN_EVENT_DEFINED_ADDED=0, -+ VIR_DOMAIN_EVENT_DEFINED_UPDATED=1, -+ ) - def test_get_domain_event_detail(self, mock_libvirt): -- ''' -+ """ - Test get_domain_event_detail function -- ''' -- assert libvirt_events._get_domain_event_detail(1, 2) == ('undefined', 'unknown') -- assert libvirt_events._get_domain_event_detail(0, 1) == ('defined', 'updated') -- assert libvirt_events._get_domain_event_detail(4, 2) == ('unknown', 'unknown') -+ """ -+ assert libvirt_events._get_domain_event_detail(1, 2) == ("undefined", "unknown") -+ assert libvirt_events._get_domain_event_detail(0, 1) == ("defined", "updated") -+ assert libvirt_events._get_domain_event_detail(4, 2) == ("unknown", "unknown") - -- @patch('salt.engines.libvirt_events.libvirt', VIR_NETWORK_EVENT_ID_LIFECYCLE=1000) -+ @patch("salt.engines.libvirt_events.libvirt", VIR_NETWORK_EVENT_ID_LIFECYCLE=1000) - def test_event_register(self, mock_libvirt): -- ''' -+ """ - Test that the libvirt_events engine actually registers events catch them and cleans - before leaving the place. -- ''' -+ """ - mock_cnx = MagicMock() - mock_libvirt.openReadOnly.return_value = mock_cnx - -@@ -90,71 +98,112 @@ class EngineLibvirtEventTestCase(TestCase, LoaderModuleMockMixin): - - mock_cnx.networkEventRegisterAny.return_value = 10000 - -- libvirt_events.start('test:///', 'test/prefix') -+ libvirt_events.start("test:///", "test/prefix") - - # Check that the connection has been opened -- mock_libvirt.openReadOnly.assert_called_once_with('test:///') -+ mock_libvirt.openReadOnly.assert_called_once_with("test:///") - - # Check that the connection has been closed - mock_cnx.close.assert_called_once() - - # Check events registration and deregistration - mock_cnx.domainEventRegisterAny.assert_any_call( -- None, mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, -- libvirt_events._domain_event_lifecycle_cb, -- {'prefix': 'test/prefix', 'object': 'domain', 'event': 'lifecycle'}) -+ None, -+ mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, -+ libvirt_events._domain_event_lifecycle_cb, -+ {"prefix": "test/prefix", "object": "domain", "event": "lifecycle"}, -+ ) - mock_cnx.networkEventRegisterAny.assert_any_call( -- None, mock_libvirt.VIR_NETWORK_EVENT_ID_LIFECYCLE, -- libvirt_events._network_event_lifecycle_cb, -- {'prefix': 'test/prefix', 'object': 'network', 'event': 'lifecycle'}) -+ None, -+ mock_libvirt.VIR_NETWORK_EVENT_ID_LIFECYCLE, -+ libvirt_events._network_event_lifecycle_cb, -+ {"prefix": "test/prefix", "object": "network", "event": "lifecycle"}, -+ ) -+ mock_cnx.storagePoolEventRegisterAny.assert_any_call( -+ None, -+ mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE, -+ libvirt_events._pool_event_lifecycle_cb, -+ {"prefix": "test/prefix", "object": "pool", "event": "lifecycle"}, -+ ) -+ mock_cnx.storagePoolEventRegisterAny.assert_any_call( -+ None, -+ mock_libvirt.VIR_STORAGE_POOL_EVENT_ID_REFRESH, -+ libvirt_events._pool_event_refresh_cb, -+ {"prefix": "test/prefix", "object": "pool", "event": "refresh"}, -+ ) -+ mock_cnx.nodeDeviceEventRegisterAny.assert_any_call( -+ None, -+ mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_LIFECYCLE, -+ libvirt_events._nodedev_event_lifecycle_cb, -+ {"prefix": "test/prefix", "object": "nodedev", "event": "lifecycle"}, -+ ) -+ mock_cnx.nodeDeviceEventRegisterAny.assert_any_call( -+ None, -+ mock_libvirt.VIR_NODE_DEVICE_EVENT_ID_UPDATE, -+ libvirt_events._nodedev_event_update_cb, -+ {"prefix": "test/prefix", "object": "nodedev", "event": "update"}, -+ ) - - # Check that the deregister events are called with the result of register - mock_cnx.networkEventDeregisterAny.assert_called_with( -- mock_cnx.networkEventRegisterAny.return_value) -+ mock_cnx.networkEventRegisterAny.return_value -+ ) - - # Check that the default 'all' filter actually worked -- counts = {obj: len(callback_def) for obj, callback_def in libvirt_events.CALLBACK_DEFS.items()} -+ counts = { -+ obj: len(callback_def) -+ for obj, callback_def in libvirt_events.CALLBACK_DEFS.items() -+ } - for obj, count in counts.items(): - register = libvirt_events.REGISTER_FUNCTIONS[obj] - assert getattr(mock_cnx, register).call_count == count - - def test_event_skipped(self): -- ''' -+ """ - Test that events are skipped if their ID isn't defined in the libvirt - module (older libvirt) -- ''' -- self.mock_libvirt.mock_add_spec([ -- 'openReadOnly', -- 'virEventRegisterDefaultImpl', -- 'virEventRunDefaultImpl', -- 'VIR_DOMAIN_EVENT_ID_LIFECYCLE'], spec_set=True) -- -- libvirt_events.start('test:///', 'test/prefix') -+ """ -+ self.mock_libvirt.mock_add_spec( -+ [ -+ "openReadOnly", -+ "virEventRegisterDefaultImpl", -+ "virEventRunDefaultImpl", -+ "VIR_DOMAIN_EVENT_ID_LIFECYCLE", -+ ], -+ spec_set=True, -+ ) -+ -+ libvirt_events.start("test:///", "test/prefix") - - # Check events registration and deregistration - mock_cnx = self.mock_libvirt.openReadOnly.return_value - - mock_cnx.domainEventRegisterAny.assert_any_call( -- None, self.mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, -- libvirt_events._domain_event_lifecycle_cb, -- {'prefix': 'test/prefix', 'object': 'domain', 'event': 'lifecycle'}) -+ None, -+ self.mock_libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, -+ libvirt_events._domain_event_lifecycle_cb, -+ {"prefix": "test/prefix", "object": "domain", "event": "lifecycle"}, -+ ) - - # Network events should have been skipped - mock_cnx.networkEventRegisterAny.assert_not_called() - - def test_event_filtered(self): -- ''' -+ """ - Test that events are skipped if their ID isn't defined in the libvirt - module (older libvirt) -- ''' -- libvirt_events.start('test', 'test/prefix', 'domain/lifecycle') -+ """ -+ libvirt_events.start("test", "test/prefix", "domain/lifecycle") - - # Check events registration and deregistration - mock_cnx = self.mock_libvirt.openReadOnly.return_value - - mock_cnx.domainEventRegisterAny.assert_any_call( -- None, 0, libvirt_events._domain_event_lifecycle_cb, -- {'prefix': 'test/prefix', 'object': 'domain', 'event': 'lifecycle'}) -+ None, -+ 0, -+ libvirt_events._domain_event_lifecycle_cb, -+ {"prefix": "test/prefix", "object": "domain", "event": "lifecycle"}, -+ ) - - # Network events should have been filtered out - mock_cnx.networkEventRegisterAny.assert_not_called() +-# Import Salt Testing Libs + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, patch + from tests.support.unit import TestCase -- -2.27.0 +2.29.2 diff --git a/opensuse-3000-virt-defined-states-222.patch b/opensuse-3000-virt-defined-states-222.patch index 7721acf..afaa867 100644 --- a/opensuse-3000-virt-defined-states-222.patch +++ b/opensuse-3000-virt-defined-states-222.patch @@ -1,4 +1,4 @@ -From e5d42c6313ba051f22f83cbde3da9410fd7fc3b9 Mon Sep 17 00:00:00 2001 +From 8deed909147041f8befad8fee9d27bb81595ed23 Mon Sep 17 00:00:00 2001 From: Cedric Bosdonnat Date: Fri, 13 Mar 2020 16:38:08 +0100 Subject: [PATCH] openSUSE-3000 virt-defined-states (#222) @@ -44,756 +44,369 @@ virt.running state now may call virt.update with None mem and cpu parameters. This was not handled in _gen_xml(). Also add some more tests cases matching this for virt.update. --- - salt/modules/virt.py | 16 +- - salt/states/virt.py | 673 +++++++++++++++----- - tests/unit/modules/test_virt.py | 26 + - tests/unit/states/test_virt.py | 1346 ++++++++++++++++++++++++++++++++------- - 4 files changed, 1665 insertions(+), 396 deletions(-) + salt/modules/virt.py | 44 +- + salt/states/virt.py | 268 +++++++--- + tests/unit/modules/test_virt.py | 845 +----------------------------- + tests/unit/states/test_virt.py | 893 +++++++++++++++++++++++++++----- + 4 files changed, 971 insertions(+), 1079 deletions(-) diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index 3889238ecd..f0820e8825 100644 +index 362c2a68b5..7314bf1d6e 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py -@@ -1783,6 +1783,7 @@ def update(name, - graphics=None, - live=True, - boot=None, -+ test=False, - **kwargs): - ''' - Update the definition of an existing domain. -@@ -1835,6 +1836,10 @@ def update(name, +@@ -2579,7 +2579,6 @@ def update( + live=True, + boot=None, + test=False, +- boot_dev=None, + **kwargs + ): + """ +@@ -2653,17 +2652,9 @@ def update( .. versionadded:: 3000 -+ :param test: run in dry-run mode if set to True -+ +- :param boot_dev: +- Space separated list of devices to boot from sorted by decreasing priority. +- Values can be ``hd``, ``fd``, ``cdrom`` or ``network``. +- +- By default, the value will ``"hd"``. +- +- .. versionadded:: 3002 +- + :param test: run in dry-run mode if set to True + +- .. versionadded:: 3001 + .. versionadded:: sodium -+ + :return: - Returns a dictionary indicating the status of what has been done. It is structured in -@@ -1880,8 +1885,8 @@ def update(name, - boot = _handle_remote_boot_params(boot) +@@ -2713,7 +2704,6 @@ def update( - new_desc = ElementTree.fromstring(_gen_xml(name, -- cpu, -- mem, -+ cpu or 0, -+ mem or 0, - all_disks, - _get_merged_nics(hypervisor, nic_profile, interfaces), - hypervisor, -@@ -1973,11 +1978,12 @@ def update(name, - if changes['disk']: - for idx, item in enumerate(changes['disk']['sorted']): - source_file = all_disks[idx]['source_file'] -- if item in changes['disk']['new'] and source_file and not os.path.isfile(source_file): -+ if item in changes['disk']['new'] and source_file and not os.path.isfile(source_file) and not test: - _qemu_image_create(all_disks[idx]) + new_desc = ElementTree.fromstring( + _gen_xml( +- conn, + name, + cpu or 0, + mem or 0, +@@ -2879,26 +2869,22 @@ def update( + # Set the new definition + if need_update: + # Create missing disks if needed +- try: +- if changes["disk"]: +- for idx, item in enumerate(changes["disk"]["sorted"]): +- source_file = all_disks[idx].get("source_file") +- # We don't want to create image disks for cdrom devices +- if all_disks[idx].get("device", "disk") == "cdrom": +- continue +- if ( +- item in changes["disk"]["new"] +- and source_file +- and not os.path.isfile(source_file) +- ): +- _qemu_image_create(all_disks[idx]) +- elif item in changes["disk"]["new"] and not source_file: +- _disk_volume_create(conn, all_disks[idx]) ++ if changes["disk"]: ++ for idx, item in enumerate(changes["disk"]["sorted"]): ++ source_file = all_disks[idx]["source_file"] ++ if ( ++ item in changes["disk"]["new"] ++ and source_file ++ and not os.path.isfile(source_file) ++ and not test ++ ): ++ _qemu_image_create(all_disks[idx]) - try: -- conn.defineXML(salt.utils.stringutils.to_str(ElementTree.tostring(desc))) -+ if not test: -+ conn.defineXML(salt.utils.stringutils.to_str(ElementTree.tostring(desc))) - status['definition'] = True ++ try: + if not test: +- xml_desc = ElementTree.tostring(desc) +- log.debug("Update virtual machine definition: %s", xml_desc) +- conn.defineXML(salt.utils.stringutils.to_str(xml_desc)) ++ conn.defineXML( ++ salt.utils.stringutils.to_str(ElementTree.tostring(desc)) ++ ) + status["definition"] = True except libvirt.libvirtError as err: conn.close() -@@ -2010,7 +2016,7 @@ def update(name, - - for cmd in commands: - try: -- ret = getattr(domain, cmd['cmd'])(*cmd['args']) -+ ret = getattr(domain, cmd['cmd'])(*cmd['args']) if not test else 0 - device_type = cmd['device'] - if device_type in ['cpu', 'mem']: - status[device_type] = not bool(ret) diff --git a/salt/states/virt.py b/salt/states/virt.py -index 55a9ad2616..819776d707 100644 +index 200c79d35c..2394d0745e 100644 --- a/salt/states/virt.py +++ b/salt/states/virt.py -@@ -14,6 +14,7 @@ for the generation and signing of certificates for systems running libvirt: +@@ -12,6 +12,7 @@ for the generation and signing of certificates for systems running libvirt: + """ + - # Import Python libs - from __future__ import absolute_import, print_function, unicode_literals +import copy import fnmatch + import logging import os +@@ -285,37 +286,15 @@ def defined( + arch=None, + boot=None, + update=True, +- boot_dev=None, + ): + """ + Starts an existing guest, or defines and starts a new VM with specified arguments. -@@ -245,6 +246,187 @@ def powered_off(name, connection=None, username=None, password=None): - connection=connection, username=username, password=password) - - -+def defined(name, -+ cpu=None, -+ mem=None, -+ vm_type=None, -+ disk_profile=None, -+ disks=None, -+ nic_profile=None, -+ interfaces=None, -+ graphics=None, -+ seed=True, -+ install=True, -+ pub_key=None, -+ priv_key=None, -+ connection=None, -+ username=None, -+ password=None, -+ os_type=None, -+ arch=None, -+ boot=None, -+ update=True): -+ ''' -+ Starts an existing guest, or defines and starts a new VM with specified arguments. -+ +- .. versionadded:: 3001 + .. versionadded:: sodium -+ -+ :param name: name of the virtual machine to run -+ :param cpu: number of CPUs for the virtual machine to create + + :param name: name of the virtual machine to run + :param cpu: number of CPUs for the virtual machine to create +- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to +- contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, +- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The +- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. +- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be +- an integer. +- +- .. code-block:: python +- +- { +- 'boot': 1g, +- 'current': 1g, +- 'max': 1g, +- 'slots': 10, +- 'hard_limit': '1024' +- 'soft_limit': '512m' +- 'swap_hard_limit': '1g' +- 'min_guarantee': '512mib' +- } +- +- .. versionchanged:: 3002 +- + :param mem: amount of memory in MiB for the new virtual machine -+ :param vm_type: force virtual machine type for the new VM. The default value is taken from -+ the host capabilities. This could be useful for example to use ``'qemu'`` type instead -+ of the ``'kvm'`` one. -+ :param disk_profile: -+ Name of the disk profile to use for the new virtual machine -+ :param disks: -+ List of disk to create for the new virtual machine. -+ See :ref:`init-disk-def` for more details on the items on this list. -+ :param nic_profile: -+ Name of the network interfaces profile to use for the new virtual machine -+ :param interfaces: -+ List of network interfaces to create for the new virtual machine. -+ See :ref:`init-nic-def` for more details on the items on this list. -+ :param graphics: -+ Graphics device to create for the new virtual machine. -+ See :ref:`init-graphics-def` for more details on this dictionary -+ :param saltenv: -+ Fileserver environment (Default: ``'base'``). -+ See :mod:`cp module for more details ` -+ :param seed: ``True`` to seed the disk image. Only used when the ``image`` parameter is provided. -+ (Default: ``True``) -+ :param install: install salt minion if absent (Default: ``True``) -+ :param pub_key: public key to seed with (Default: ``None``) -+ :param priv_key: public key to seed with (Default: ``None``) -+ :param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``) -+ :param connection: libvirt connection URI, overriding defaults -+ :param username: username to connect with, overriding defaults -+ :param password: password to connect with, overriding defaults -+ :param os_type: -+ type of virtualization as found in the ``//os/type`` element of the libvirt definition. -+ The default value is taken from the host capabilities, with a preference for ``hvm``. -+ Only used when creating a new virtual machine. -+ :param arch: -+ architecture of the virtual machine. The default value is taken from the host capabilities, -+ but ``x86_64`` is prefed over ``i686``. Only used when creating a new virtual machine. -+ -+ :param boot: + :param vm_type: force virtual machine type for the new VM. The default value is taken from + the host capabilities. This could be useful for example to use ``'qemu'`` type instead + of the ``'kvm'`` one. +@@ -353,27 +332,23 @@ def defined( + but ``x86_64`` is prefed over ``i686``. Only used when creating a new virtual machine. + + :param boot: +- Specifies kernel, initial ramdisk and kernel command line parameters for the virtual machine. +- This is an optional parameter, all of the keys are optional within the dictionary. +- +- Refer to :ref:`init-boot-def` for the complete boot parameters description. + Specifies kernel for the virtual machine, as well as boot parameters + for the virtual machine. This is an optionl parameter, and all of the + keys are optional within the dictionary. If a remote path is provided + to kernel or initrd, salt will handle the downloading of the specified + remote fild, and will modify the XML accordingly. -+ + +- To update any boot parameters, specify the new path for each. To remove any boot parameters, +- pass a None object, for instance: 'kernel': ``None``. + .. code-block:: python -+ + +- .. versionadded:: 3000 + { + 'kernel': '/root/f8-i386-vmlinuz', + 'initrd': '/root/f8-i386-initrd', + 'cmdline': 'console=ttyS0 ks=http://example.com/f8-i386/os/' + } -+ -+ :param update: set to ``False`` to prevent updating a defined domain. (Default: ``True``) -+ + + :param update: set to ``False`` to prevent updating a defined domain. (Default: ``True``) + +- .. deprecated:: 3001 +- +- :param boot_dev: +- Space separated list of devices to boot from sorted by decreasing priority. +- Values can be ``hd``, ``fd``, ``cdrom`` or ``network``. +- +- By default, the value will ``"hd"``. +- +- .. versionadded:: 3002 + .. deprecated:: sodium -+ -+ .. rubric:: Example States -+ -+ Make sure a virtual machine called ``domain_name`` is defined: -+ -+ .. code-block:: yaml -+ -+ domain_name: -+ virt.defined: -+ - cpu: 2 -+ - mem: 2048 -+ - disk_profile: prod -+ - disks: -+ - name: system -+ size: 8192 -+ overlay_image: True -+ pool: default -+ image: /path/to/image.qcow2 -+ - name: data -+ size: 16834 -+ - nic_profile: prod -+ - interfaces: -+ - name: eth0 -+ mac: 01:23:45:67:89:AB -+ - name: eth1 -+ type: network -+ source: admin -+ - graphics: -+ type: spice -+ listen: -+ type: address -+ address: 192.168.0.125 -+ -+ ''' -+ -+ ret = {'name': name, -+ 'changes': {}, -+ 'result': True if not __opts__['test'] else None, -+ 'comment': '' -+ } -+ -+ try: -+ if name in __salt__['virt.list_domains'](connection=connection, username=username, password=password): -+ status = {} -+ if update: -+ status = __salt__['virt.update'](name, -+ cpu=cpu, -+ mem=mem, -+ disk_profile=disk_profile, -+ disks=disks, -+ nic_profile=nic_profile, -+ interfaces=interfaces, -+ graphics=graphics, -+ live=True, -+ connection=connection, -+ username=username, -+ password=password, -+ boot=boot, -+ test=__opts__['test']) -+ ret['changes'][name] = status -+ if not status.get('definition'): -+ ret['comment'] = 'Domain {0} unchanged'.format(name) -+ ret['result'] = True -+ elif status.get('errors'): -+ ret['comment'] = 'Domain {0} updated with live update(s) failures'.format(name) -+ else: -+ ret['comment'] = 'Domain {0} updated'.format(name) -+ else: -+ if not __opts__['test']: -+ __salt__['virt.init'](name, -+ cpu=cpu, -+ mem=mem, -+ os_type=os_type, -+ arch=arch, -+ hypervisor=vm_type, -+ disk=disk_profile, -+ disks=disks, -+ nic=nic_profile, -+ interfaces=interfaces, -+ graphics=graphics, -+ seed=seed, -+ install=install, -+ pub_key=pub_key, -+ priv_key=priv_key, -+ connection=connection, -+ username=username, -+ password=password, -+ boot=boot, -+ start=False) -+ ret['changes'][name] = {'definition': True} -+ ret['comment'] = 'Domain {0} defined'.format(name) -+ except libvirt.libvirtError as err: -+ # Something bad happened when defining / updating the VM, report it -+ ret['comment'] = six.text_type(err) -+ ret['result'] = False -+ -+ return ret -+ -+ - def running(name, - cpu=None, - mem=None, -@@ -326,9 +508,10 @@ def running(name, - :param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``) - - .. versionadded:: 2019.2.0 -- :param update: set to ``True`` to update a defined module. (Default: ``False``) -+ :param update: set to ``True`` to update a defined domain. (Default: ``False``) + + .. rubric:: Example States + +@@ -385,7 +360,6 @@ def defined( + virt.defined: + - cpu: 2 + - mem: 2048 +- - boot_dev: network hd + - disk_profile: prod + - disks: + - name: system +@@ -438,7 +412,6 @@ def defined( + password=password, + boot=boot, + test=__opts__["test"], +- boot_dev=boot_dev, + ) + ret["changes"][name] = status + if not status.get("definition"): +@@ -473,7 +446,6 @@ def defined( + password=password, + boot=boot, + start=False, +- boot_dev=boot_dev, + ) + ret["changes"][name] = {"definition": True} + ret["comment"] = "Domain {} defined".format(name) +@@ -489,6 +461,7 @@ def running( + name, + cpu=None, + mem=None, ++ image=None, + vm_type=None, + disk_profile=None, + disks=None, +@@ -506,7 +479,6 @@ def running( + os_type=None, + arch=None, + boot=None, +- boot_dev=None, + ): + """ + Starts an existing guest, or defines and starts a new VM with specified arguments. +@@ -584,7 +556,7 @@ def running( + :param update: set to ``True`` to update a defined domain. (Default: ``False``) .. versionadded:: 2019.2.0 +- .. deprecated:: 3001 + .. deprecated:: sodium :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 -@@ -424,93 +607,74 @@ def running(name, - address: 192.168.0.125 +@@ -676,10 +648,32 @@ def running( - ''' -- -- ret = {'name': name, -- 'changes': {}, -- 'result': True, -- 'comment': '{0} is running'.format(name) -- } -- -- try: -+ merged_disks = disks + """ + merged_disks = disks + if image: -+ default_disks = [{'system': {}}] -+ disknames = ['system'] ++ default_disks = [{"system": {}}] ++ disknames = ["system"] + if disk_profile: + disklist = copy.deepcopy( -+ __salt__['config.get']('virt:disk', {}).get(disk_profile, default_disks)) ++ __salt__["config.get"]("virt:disk", {}).get(disk_profile, default_disks) ++ ) + disknames = disklist.keys() -+ disk = {'name': disknames[0], 'image': image} ++ disk = {"name": disknames[0], "image": image} + if merged_disks: -+ first_disk = [d for d in merged_disks if d.get('name') == disknames[0]] -+ if first_disk and 'image' not in first_disk[0]: -+ first_disk[0]['image'] = image ++ first_disk = [d for d in merged_disks if d.get("name") == disknames[0]] ++ if first_disk and "image" not in first_disk[0]: ++ first_disk[0]["image"] = image + else: + merged_disks.append(disk) + else: + merged_disks = [disk] + salt.utils.versions.warn_until( -+ 'Sodium', -+ '\'image\' parameter has been deprecated. Rather use the \'disks\' parameter ' -+ 'to override or define the image. \'image\' will be removed in {version}.' ++ "Sodium", ++ "'image' parameter has been deprecated. Rather use the 'disks' parameter " ++ "to override or define the image. 'image' will be removed in {version}.", + ) -+ -+ if not update: -+ salt.utils.versions.warn_until('Magnesium', -+ '\'update\' parameter has been deprecated. Future behavior will be the one of update=True' -+ 'It will be removed in {version}.') -+ ret = defined(name, -+ cpu=cpu, -+ mem=mem, -+ vm_type=vm_type, -+ disk_profile=disk_profile, -+ disks=merged_disks, -+ nic_profile=nic_profile, -+ interfaces=interfaces, -+ graphics=graphics, -+ seed=seed, -+ install=install, -+ pub_key=pub_key, -+ priv_key=priv_key, -+ os_type=os_type, -+ arch=arch, -+ boot=boot, -+ update=update, -+ connection=connection, -+ username=username, -+ password=password) -+ -+ result = True if not __opts__['test'] else None -+ if ret['result'] is None or ret['result']: -+ changed = ret['changes'][name].get('definition', False) - try: - domain_state = __salt__['virt.vm_state'](name) - if domain_state.get(name) != 'running': -- action_msg = 'started' -- if update: -- status = __salt__['virt.update'](name, -- cpu=cpu, -- mem=mem, -- disk_profile=disk_profile, -- disks=disks, -- nic_profile=nic_profile, -- interfaces=interfaces, -- graphics=graphics, -- live=False, -- connection=connection, -- username=username, -- password=password, -- boot=boot) -- if status['definition']: -- action_msg = 'updated and started' -- __salt__['virt.start'](name) -- ret['changes'][name] = 'Domain {0}'.format(action_msg) -- ret['comment'] = 'Domain {0} {1}'.format(name, action_msg) -- else: -- if update: -- status = __salt__['virt.update'](name, -- cpu=cpu, -- mem=mem, -- disk_profile=disk_profile, -- disks=disks, -- nic_profile=nic_profile, -- interfaces=interfaces, -- graphics=graphics, -- connection=connection, -- username=username, -- password=password, -- boot=boot) -- ret['changes'][name] = status -- if status.get('errors', None): -- ret['comment'] = 'Domain {0} updated, but some live update(s) failed'.format(name) -- elif not status['definition']: -- ret['comment'] = 'Domain {0} exists and is running'.format(name) -- else: -- ret['comment'] = 'Domain {0} updated, restart to fully apply the changes'.format(name) -- else: -- ret['comment'] = 'Domain {0} exists and is running'.format(name) -- except CommandExecutionError: -- if image: -- salt.utils.versions.warn_until( -- 'Sodium', -- '\'image\' parameter has been deprecated. Rather use the \'disks\' parameter ' -- 'to override or define the image. \'image\' will be removed in {version}.' -- ) -- __salt__['virt.init'](name, -- cpu=cpu, -- mem=mem, -- os_type=os_type, -- arch=arch, -- image=image, -- hypervisor=vm_type, -- disk=disk_profile, -- disks=disks, -- nic=nic_profile, -- interfaces=interfaces, -- graphics=graphics, -- seed=seed, -- install=install, -- pub_key=pub_key, -- priv_key=priv_key, -- connection=connection, -- username=username, -- password=password, -- boot=boot) -- ret['changes'][name] = 'Domain defined and started' -- ret['comment'] = 'Domain {0} defined and started'.format(name) -- except libvirt.libvirtError as err: -- # Something bad happened when starting / updating the VM, report it -- ret['comment'] = six.text_type(err) -- ret['result'] = False -+ if not __opts__['test']: -+ __salt__['virt.start'](name, connection=connection, username=username, password=password) -+ comment = 'Domain {} started'.format(name) -+ if not ret['comment'].endswith('unchanged'): -+ comment = '{} and started'.format(ret['comment']) -+ ret['comment'] = comment -+ ret['changes'][name]['started'] = True -+ elif not changed: -+ ret['comment'] = 'Domain {0} exists and is running'.format(name) -+ -+ except libvirt.libvirtError as err: -+ # Something bad happened when starting / updating the VM, report it -+ ret['comment'] = six.text_type(err) -+ ret['result'] = False - return ret + if not update: + salt.utils.versions.warn_until( +- "Aluminium", ++ "Magnesium", + "'update' parameter has been deprecated. Future behavior will be the one of update=True" + "It will be removed in {version}.", + ) +@@ -701,7 +695,6 @@ def running( + arch=arch, + boot=boot, + update=update, +- boot_dev=boot_dev, + connection=connection, + username=username, + password=password, +@@ -953,7 +946,7 @@ def network_defined( + :param username: username to connect with, overriding defaults + :param password: password to connect with, overriding defaults -@@ -670,6 +834,106 @@ def reverted(name, snapshot=None, cleanup=False): # pylint: disable=redefined-o - return ret - - -+def network_defined(name, -+ bridge, -+ forward, -+ vport=None, -+ tag=None, -+ ipv4_config=None, -+ ipv6_config=None, -+ autostart=True, -+ connection=None, -+ username=None, -+ password=None): -+ ''' -+ Defines a new network with specified arguments. -+ -+ :param bridge: Bridge name -+ :param forward: Forward mode(bridge, router, nat) -+ :param vport: Virtualport type (Default: ``'None'``) -+ :param tag: Vlan tag (Default: ``'None'``) -+ :param ipv4_config: -+ IPv4 network configuration. See the :py:func`virt.network_define -+ ` function corresponding parameter documentation -+ for more details on this dictionary. -+ (Default: None). -+ :param ipv6_config: -+ IPv6 network configuration. See the :py:func`virt.network_define -+ ` function corresponding parameter documentation -+ for more details on this dictionary. -+ (Default: None). -+ :param autostart: Network autostart (default ``'True'``) -+ :param connection: libvirt connection URI, overriding defaults -+ :param username: username to connect with, overriding defaults -+ :param password: password to connect with, overriding defaults -+ +- .. versionadded:: 3001 + .. versionadded:: sodium -+ -+ .. code-block:: yaml -+ -+ network_name: -+ virt.network_defined -+ -+ .. code-block:: yaml -+ -+ network_name: -+ virt.network_defined: -+ - bridge: main -+ - forward: bridge -+ - vport: openvswitch -+ - tag: 180 -+ - autostart: True -+ -+ .. code-block:: yaml -+ -+ network_name: -+ virt.network_defined: -+ - bridge: natted -+ - forward: nat -+ - ipv4_config: -+ cidr: 192.168.42.0/24 -+ dhcp_ranges: -+ - start: 192.168.42.10 -+ end: 192.168.42.25 -+ - start: 192.168.42.100 -+ end: 192.168.42.150 -+ - autostart: True -+ -+ ''' -+ ret = {'name': name, -+ 'changes': {}, -+ 'result': True if not __opts__['test'] else None, -+ 'comment': '' -+ } -+ -+ try: -+ info = __salt__['virt.network_info'](name, connection=connection, username=username, password=password) -+ if info and info[name]: -+ ret['comment'] = 'Network {0} exists'.format(name) -+ ret['result'] = True -+ else: -+ if not __opts__['test']: -+ __salt__['virt.network_define'](name, -+ bridge, -+ forward, -+ vport=vport, -+ tag=tag, -+ ipv4_config=ipv4_config, -+ ipv6_config=ipv6_config, -+ autostart=autostart, -+ start=False, -+ connection=connection, -+ username=username, -+ password=password) -+ ret['changes'][name] = 'Network defined' -+ ret['comment'] = 'Network {0} defined'.format(name) -+ except libvirt.libvirtError as err: -+ ret['result'] = False -+ ret['comment'] = err.get_error_message() -+ -+ return ret -+ -+ - def network_running(name, - bridge, - forward, -@@ -715,13 +979,13 @@ def network_running(name, .. code-block:: yaml -- domain_name: -- virt.network_define -+ network_name: -+ virt.network_running +@@ -1170,7 +1163,7 @@ def pool_defined( + """ + Defines a new pool with specified arguments. - .. code-block:: yaml - - network_name: -- virt.network_define: -+ virt.network_running: - - bridge: main - - forward: bridge - - vport: openvswitch -@@ -731,7 +995,7 @@ def network_running(name, - .. code-block:: yaml - - network_name: -- virt.network_define: -+ virt.network_running: - - bridge: natted - - forward: nat - - ipv4_config: -@@ -744,44 +1008,46 @@ def network_running(name, - - autostart: True - - ''' -- ret = {'name': name, -- 'changes': {}, -- 'result': True, -- 'comment': '' -- } -- -- try: -- info = __salt__['virt.network_info'](name, connection=connection, username=username, password=password) -- if info: -- if info[name]['active']: -- ret['comment'] = 'Network {0} exists and is running'.format(name) -+ ret = network_defined(name, -+ bridge, -+ forward, -+ vport=vport, -+ tag=tag, -+ ipv4_config=ipv4_config, -+ ipv6_config=ipv6_config, -+ autostart=autostart, -+ connection=connection, -+ username=username, -+ password=password) -+ -+ defined = name in ret['changes'] and ret['changes'][name].startswith('Network defined') -+ -+ result = True if not __opts__['test'] else None -+ if ret['result'] is None or ret['result']: -+ try: -+ info = __salt__['virt.network_info'](name, connection=connection, username=username, password=password) -+ # In the corner case where test=True and the network wasn't defined -+ # we may not get the network in the info dict and that is normal. -+ if info.get(name, {}).get('active', False): -+ ret['comment'] = '{} and is running'.format(ret['comment']) - else: -- __salt__['virt.network_start'](name, connection=connection, username=username, password=password) -- ret['changes'][name] = 'Network started' -- ret['comment'] = 'Network {0} started'.format(name) -- else: -- __salt__['virt.network_define'](name, -- bridge, -- forward, -- vport=vport, -- tag=tag, -- ipv4_config=ipv4_config, -- ipv6_config=ipv6_config, -- autostart=autostart, -- start=True, -- connection=connection, -- username=username, -- password=password) -- ret['changes'][name] = 'Network defined and started' -- ret['comment'] = 'Network {0} defined and started'.format(name) -- except libvirt.libvirtError as err: -- ret['result'] = False -- ret['comment'] = err.get_error_message() -+ if not __opts__['test']: -+ __salt__['virt.network_start'](name, connection=connection, username=username, password=password) -+ change = 'Network started' -+ if name in ret['changes']: -+ change = '{} and started'.format(ret['changes'][name]) -+ ret['changes'][name] = change -+ ret['comment'] = '{} and started'.format(ret['comment']) -+ ret['result'] = result -+ -+ except libvirt.libvirtError as err: -+ ret['result'] = False -+ ret['comment'] = err.get_error_message() - - return ret - - --def pool_running(name, -+def pool_defined(name, - ptype=None, - target=None, - permissions=None, -@@ -792,9 +1058,9 @@ def pool_running(name, - username=None, - password=None): - ''' -- Defines and starts a new pool with specified arguments. -+ Defines a new pool with specified arguments. - -- .. versionadded:: 2019.2.0 +- .. versionadded:: 3001 + .. versionadded:: sodium :param ptype: libvirt pool type :param target: full path to the target device or folder. (Default: ``None``) -@@ -816,12 +1082,7 @@ def pool_running(name, - .. code-block:: yaml +@@ -1269,24 +1262,14 @@ def pool_defined( - pool_name: -- virt.pool_define -- -- .. code-block:: yaml -- -- pool_name: -- virt.pool_define: -+ virt.pool_defined: - - ptype: netfs - - target: /mnt/cifs - - permissions: -@@ -884,29 +1145,19 @@ def pool_running(name, - username=username, - password=password) + action = "" + if info[name]["state"] != "running": +- if ptype in BUILDABLE_POOL_TYPES: +- if not __opts__["test"]: +- # Storage pools build like disk or logical will fail if the disk or LV group +- # was already existing. Since we can't easily figure that out, just log the +- # possible libvirt error. +- try: +- __salt__["virt.pool_build"]( +- name, +- connection=connection, +- username=username, +- password=password, +- ) +- except libvirt.libvirtError as err: +- log.warning( +- "Failed to build libvirt storage pool: %s", +- err.get_error_message(), +- ) +- action = ", built" ++ if not __opts__["test"]: ++ __salt__["virt.pool_build"]( ++ name, ++ connection=connection, ++ username=username, ++ password=password, ++ ) ++ action = ", built" -- action = "started" -- if info[name]['state'] == 'running': -- action = "restarted" -+ action = '' -+ if info[name]['state'] != 'running': - if not __opts__['test']: -- __salt__['virt.pool_stop'](name, connection=connection, username=username, password=password) -- -- if not __opts__['test']: -- __salt__['virt.pool_build'](name, connection=connection, username=username, password=password) -- __salt__['virt.pool_start'](name, connection=connection, username=username, password=password) -+ __salt__['virt.pool_build'](name, connection=connection, username=username, password=password) -+ action = ', built' + action = ( + "{}, autostart flag changed".format(action) +@@ -1322,22 +1305,9 @@ def pool_defined( + password=password, + ) -- autostart_str = ', autostart flag changed' if needs_autostart else '' -- ret['changes'][name] = 'Pool updated, built{0} and {1}'.format(autostart_str, action) -- ret['comment'] = 'Pool {0} updated, built{1} and {2}'.format(name, autostart_str, action) -+ action = '{}, autostart flag changed'.format(action) if needs_autostart else action -+ ret['changes'][name] = 'Pool updated{0}'.format(action) -+ ret['comment'] = 'Pool {0} updated{1}'.format(name, action) - - else: -- if info[name]['state'] == 'running': -- ret['comment'] = 'Pool {0} unchanged and is running'.format(name) -- ret['result'] = True -- else: -- ret['changes'][name] = 'Pool started' -- ret['comment'] = 'Pool {0} started'.format(name) -- if not __opts__['test']: -- __salt__['virt.pool_start'](name, connection=connection, username=username, password=password) -+ ret['comment'] = 'Pool {0} unchanged'.format(name) -+ ret['result'] = True - else: - needs_autostart = autostart - if not __opts__['test']: -@@ -932,17 +1183,12 @@ def pool_running(name, - connection=connection, - username=username, - password=password) -- -- __salt__['virt.pool_start'](name, -- connection=connection, -- username=username, -- password=password) +- if ptype in BUILDABLE_POOL_TYPES: +- # Storage pools build like disk or logical will fail if the disk or LV group +- # was already existing. Since we can't easily figure that out, just log the +- # possible libvirt error. +- try: +- __salt__["virt.pool_build"]( +- name, +- connection=connection, +- username=username, +- password=password, +- ) +- except libvirt.libvirtError as err: +- log.warning( +- "Failed to build libvirt storage pool: %s", +- err.get_error_message(), +- ) ++ __salt__["virt.pool_build"]( ++ name, connection=connection, username=username, password=password ++ ) if needs_autostart: -- ret['changes'][name] = 'Pool defined, started and marked for autostart' -- ret['comment'] = 'Pool {0} defined, started and marked for autostart'.format(name) -+ ret['changes'][name] = 'Pool defined, marked for autostart' -+ ret['comment'] = 'Pool {0} defined, marked for autostart'.format(name) - else: -- ret['changes'][name] = 'Pool defined and started' -- ret['comment'] = 'Pool {0} defined and started'.format(name) -+ ret['changes'][name] = 'Pool defined' -+ ret['comment'] = 'Pool {0} defined'.format(name) - - if needs_autostart: - if not __opts__['test']: -@@ -958,6 +1204,117 @@ def pool_running(name, + ret["changes"][name] = "Pool defined, marked for autostart" + ret["comment"] = "Pool {} defined, marked for autostart".format(name) +@@ -1494,6 +1464,138 @@ def pool_running( return ret -+def pool_running(name, -+ ptype=None, -+ target=None, -+ permissions=None, -+ source=None, -+ transient=False, -+ autostart=True, -+ connection=None, -+ username=None, -+ password=None): -+ ''' ++def pool_running( ++ name, ++ ptype=None, ++ target=None, ++ permissions=None, ++ source=None, ++ transient=False, ++ autostart=True, ++ connection=None, ++ username=None, ++ password=None, ++): ++ """ + Defines and starts a new pool with specified arguments. + + .. versionadded:: 2019.2.0 @@ -838,1540 +451,2026 @@ index 55a9ad2616..819776d707 100644 + format: cifs + - autostart: True + -+ ''' -+ ret = pool_defined(name, -+ ptype=ptype, -+ target=target, -+ permissions=permissions, -+ source=source, -+ transient=transient, -+ autostart=autostart, -+ connection=connection, -+ username=username, -+ password=password) -+ defined = name in ret['changes'] and ret['changes'][name].startswith('Pool defined') -+ updated = name in ret['changes'] and ret['changes'][name].startswith('Pool updated') ++ """ ++ ret = pool_defined( ++ name, ++ ptype=ptype, ++ target=target, ++ permissions=permissions, ++ source=source, ++ transient=transient, ++ autostart=autostart, ++ connection=connection, ++ username=username, ++ password=password, ++ ) ++ defined = name in ret["changes"] and ret["changes"][name].startswith("Pool defined") ++ updated = name in ret["changes"] and ret["changes"][name].startswith("Pool updated") + -+ result = True if not __opts__['test'] else None -+ if ret['result'] is None or ret['result']: ++ result = True if not __opts__["test"] else None ++ if ret["result"] is None or ret["result"]: + try: -+ info = __salt__['virt.pool_info'](name, connection=connection, username=username, password=password) -+ action = 'started' ++ info = __salt__["virt.pool_info"]( ++ name, connection=connection, username=username, password=password ++ ) ++ action = "started" + # In the corner case where test=True and the pool wasn't defined + # we may get not get our pool in the info dict and that is normal. -+ is_running = info.get(name, {}).get('state', 'stopped') == 'running' ++ is_running = info.get(name, {}).get("state", "stopped") == "running" + if is_running: + if updated: -+ action = 'built, restarted' -+ if not __opts__['test']: -+ __salt__['virt.pool_stop'](name, connection=connection, username=username, password=password) -+ if not __opts__['test']: -+ __salt__['virt.pool_build'](name, connection=connection, username=username, password=password) ++ action = "built, restarted" ++ if not __opts__["test"]: ++ __salt__["virt.pool_stop"]( ++ name, ++ connection=connection, ++ username=username, ++ password=password, ++ ) ++ if not __opts__["test"]: ++ __salt__["virt.pool_build"]( ++ name, ++ connection=connection, ++ username=username, ++ password=password, ++ ) + else: -+ action = 'already running' ++ action = "already running" + result = True + + if not is_running or updated or defined: -+ if not __opts__['test']: -+ __salt__['virt.pool_start'](name, connection=connection, username=username, password=password) ++ if not __opts__["test"]: ++ __salt__["virt.pool_start"]( ++ name, ++ connection=connection, ++ username=username, ++ password=password, ++ ) + -+ comment = 'Pool {0}'.format(name) -+ change = 'Pool' -+ if name in ret['changes']: -+ comment = '{0},'.format(ret['comment']) -+ change = '{0},'.format(ret['changes'][name]) ++ comment = "Pool {}".format(name) ++ change = "Pool" ++ if name in ret["changes"]: ++ comment = "{},".format(ret["comment"]) ++ change = "{},".format(ret["changes"][name]) + -+ if action != 'already running': -+ ret['changes'][name] = '{0} {1}'.format(change, action) ++ if action != "already running": ++ ret["changes"][name] = "{} {}".format(change, action) + -+ ret['comment'] = '{0} {1}'.format(comment, action) -+ ret['result'] = result ++ ret["comment"] = "{} {}".format(comment, action) ++ ret["result"] = result + + except libvirt.libvirtError as err: -+ ret['comment'] = err.get_error_message() -+ ret['result'] = False ++ ret["comment"] = err.get_error_message() ++ ret["result"] = False + + return ret + + - def pool_deleted(name, - purge=False, - connection=None, + def pool_deleted(name, purge=False, connection=None, username=None, password=None): + """ + Deletes a virtual storage pool. diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index d762dcc479..8690154662 100644 +index e9e73d7b5d..db6ba007b7 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py -@@ -1272,6 +1272,32 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - define_mock = MagicMock(return_value=True) - self.mock_conn.defineXML = define_mock +@@ -1849,40 +1849,21 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + self.assertEqual( + { + "definition": False, +- "disk": {"attached": [], "detached": [], "updated": []}, ++ "disk": {"attached": [], "detached": []}, + "interface": {"attached": [], "detached": []}, + }, +- virt.update("my_vm"), ++ virt.update("my vm"), + ) -+ # No parameter passed case -+ self.assertEqual({ -+ 'definition': False, -+ 'disk': {'attached': [], 'detached': []}, -+ 'interface': {'attached': [], 'detached': []} -+ }, virt.update('my vm')) -+ -+ # Same parameters passed than in default virt.defined state case -+ self.assertEqual({ -+ 'definition': False, -+ 'disk': {'attached': [], 'detached': []}, -+ 'interface': {'attached': [], 'detached': []} -+ }, virt.update('my vm', -+ cpu=None, -+ mem=None, -+ disk_profile=None, -+ disks=None, -+ nic_profile=None, -+ interfaces=None, -+ graphics=None, -+ live=True, -+ connection=None, -+ username=None, -+ password=None, -+ boot=None)) -+ - # Update vcpus case - setvcpus_mock = MagicMock(return_value=0) - domain_mock.setVcpusFlags = setvcpus_mock +- # mem + cpu case +- define_mock.reset_mock() +- domain_mock.setMemoryFlags.return_value = 0 +- domain_mock.setVcpusFlags.return_value = 0 +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- "mem": True, +- "cpu": True, +- }, +- virt.update("my_vm", mem=2048, cpu=2), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual("2", setxml.find("vcpu").text) +- self.assertEqual("2147483648", setxml.find("memory").text) +- self.assertEqual(2048 * 1024, domain_mock.setMemoryFlags.call_args[0][0]) +- + # Same parameters passed than in default virt.defined state case + self.assertEqual( + { + "definition": False, +- "disk": {"attached": [], "detached": [], "updated": []}, ++ "disk": {"attached": [], "detached": []}, + "interface": {"attached": [], "detached": []}, + }, + virt.update( +- "my_vm", ++ "my vm", + cpu=None, + mem=None, + disk_profile=None, +@@ -1905,829 +1886,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + { + "definition": True, + "cpu": True, +- "disk": {"attached": [], "detached": [], "updated": []}, ++ "disk": {"attached": [], "detached": []}, + "interface": {"attached": [], "detached": []}, + }, +- virt.update("my_vm", cpu=2), ++ virt.update("my vm", cpu=2), + ) + setxml = ET.fromstring(define_mock.call_args[0][0]) + self.assertEqual(setxml.find("vcpu").text, "2") + self.assertEqual(setvcpus_mock.call_args[0][0], 2) + +- boot = { +- "kernel": "/root/f8-i386-vmlinuz", +- "initrd": "/root/f8-i386-initrd", +- "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/", +- } +- +- # Update boot devices case +- define_mock.reset_mock() +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", boot_dev="cdrom network hd"), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual( +- ["cdrom", "network", "hd"], +- [node.get("dev") for node in setxml.findall("os/boot")], +- ) +- +- # Update unchanged boot devices case +- define_mock.reset_mock() +- self.assertEqual( +- { +- "definition": False, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", boot_dev="hd"), +- ) +- define_mock.assert_not_called() +- +- # Update with boot parameter case +- define_mock.reset_mock() +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", boot=boot), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz") +- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd") +- self.assertEqual( +- setxml.find("os").find("cmdline").text, +- "console=ttyS0 ks=http://example.com/f8-i386/os/", +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz") +- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd") +- self.assertEqual( +- setxml.find("os").find("cmdline").text, +- "console=ttyS0 ks=http://example.com/f8-i386/os/", +- ) +- +- boot_uefi = { +- "loader": "/usr/share/OVMF/OVMF_CODE.fd", +- "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd", +- } +- +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", boot=boot_uefi), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual( +- setxml.find("os").find("loader").text, "/usr/share/OVMF/OVMF_CODE.fd" +- ) +- self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes") +- self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash") +- self.assertEqual( +- setxml.find("os").find("nvram").attrib["template"], +- "/usr/share/OVMF/OVMF_VARS.ms.fd", +- ) +- +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", boot={"efi": True}), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual(setxml.find("os").attrib.get("firmware"), "efi") +- +- invalid_boot = { +- "loader": "/usr/share/OVMF/OVMF_CODE.fd", +- "initrd": "/root/f8-i386-initrd", +- } +- +- with self.assertRaises(SaltInvocationError): +- virt.update("my_vm", boot=invalid_boot) +- +- with self.assertRaises(SaltInvocationError): +- virt.update("my_vm", boot={"efi": "Not a boolean value"}) +- +- # Update memtune parameter case +- memtune = { +- "soft_limit": "0.5g", +- "hard_limit": "1024", +- "swap_hard_limit": "2048m", +- "min_guarantee": "1 g", +- } +- +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", mem=memtune), +- ) +- +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual( +- setxml.find("memtune").find("soft_limit").text, str(int(0.5 * 1024 ** 3)) +- ) +- self.assertEqual(setxml.find("memtune").find("soft_limit").get("unit"), "bytes") +- self.assertEqual( +- setxml.find("memtune").find("hard_limit").text, str(1024 * 1024 ** 2) +- ) +- self.assertEqual( +- setxml.find("memtune").find("swap_hard_limit").text, str(2048 * 1024 ** 2) +- ) +- self.assertEqual( +- setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) +- ) +- +- invalid_unit = {"soft_limit": "2HB"} +- +- with self.assertRaises(SaltInvocationError): +- virt.update("my_vm", mem=invalid_unit) +- +- invalid_number = { +- "soft_limit": "3.4.MB", +- } +- +- with self.assertRaises(SaltInvocationError): +- virt.update("my_vm", mem=invalid_number) +- +- # Update memory case +- setmem_mock = MagicMock(return_value=0) +- domain_mock.setMemoryFlags = setmem_mock +- +- self.assertEqual( +- { +- "definition": True, +- "mem": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", mem=2048), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual(setxml.find("memory").text, str(2048 * 1024 ** 2)) +- self.assertEqual(setxml.find("memory").get("unit"), "bytes") +- self.assertEqual(setmem_mock.call_args[0][0], 2048 * 1024) +- +- mem_dict = {"boot": "0.5g", "current": "2g", "max": "1g", "slots": 12} +- self.assertEqual( +- { +- "definition": True, +- "mem": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", mem=mem_dict), +- ) +- +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual(setxml.find("memory").get("unit"), "bytes") +- self.assertEqual(setxml.find("memory").text, str(int(0.5 * 1024 ** 3))) +- self.assertEqual(setxml.find("maxMemory").text, str(1 * 1024 ** 3)) +- self.assertEqual(setxml.find("currentMemory").text, str(2 * 1024 ** 3)) +- +- max_slot_reverse = { +- "slots": "10", +- "max": "3096m", +- } +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", mem=max_slot_reverse), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) +- self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") +- +- # Update disks case +- devattach_mock = MagicMock(return_value=0) +- devdetach_mock = MagicMock(return_value=0) +- domain_mock.attachDevice = devattach_mock +- domain_mock.detachDevice = devdetach_mock +- mock_chmod = MagicMock() +- mock_run = MagicMock() +- with patch.dict( +- os.__dict__, {"chmod": mock_chmod, "makedirs": MagicMock()} +- ): # pylint: disable=no-member +- with patch.dict( +- virt.__salt__, {"cmd.run": mock_run} +- ): # pylint: disable=no-member +- ret = virt.update( +- "my_vm", +- disk_profile="default", +- disks=[ +- { +- "name": "cddrive", +- "device": "cdrom", +- "source_file": None, +- "model": "ide", +- }, +- {"name": "added", "size": 2048}, +- ], +- ) +- added_disk_path = os.path.join( +- virt.__salt__["config.get"]("virt:images"), "my_vm_added.qcow2" +- ) # pylint: disable=no-member +- self.assertEqual( +- mock_run.call_args[0][0], +- 'qemu-img create -f qcow2 "{}" 2048M'.format(added_disk_path), +- ) +- self.assertEqual(mock_chmod.call_args[0][0], added_disk_path) +- self.assertListEqual( +- [None, os.path.join(root_dir, "my_vm_added.qcow2")], +- [ +- ET.fromstring(disk).find("source").get("file") +- if str(disk).find(" -1 +- else None +- for disk in ret["disk"]["attached"] +- ], +- ) +- +- self.assertListEqual( +- ["my_vm_data", "libvirt-pool/my_vm_data2"], +- [ +- ET.fromstring(disk).find("source").get("volume") +- or ET.fromstring(disk).find("source").get("name") +- for disk in ret["disk"]["detached"] +- ], +- ) +- self.assertEqual(devattach_mock.call_count, 2) +- self.assertEqual(devdetach_mock.call_count, 2) +- +- # Update nics case +- yaml_config = """ +- virt: +- nic: +- myprofile: +- - network: default +- name: eth0 +- """ +- mock_config = salt.utils.yaml.safe_load(yaml_config) +- devattach_mock.reset_mock() +- devdetach_mock.reset_mock() +- with patch.dict( +- salt.modules.config.__opts__, mock_config # pylint: disable=no-member +- ): +- ret = virt.update( +- "my_vm", +- nic_profile="myprofile", +- interfaces=[ +- { +- "name": "eth0", +- "type": "network", +- "source": "default", +- "mac": "52:54:00:39:02:b1", +- }, +- {"name": "eth1", "type": "network", "source": "newnet"}, +- ], +- ) +- self.assertEqual( +- ["newnet"], +- [ +- ET.fromstring(nic).find("source").get("network") +- for nic in ret["interface"]["attached"] +- ], +- ) +- self.assertEqual( +- ["oldnet"], +- [ +- ET.fromstring(nic).find("source").get("network") +- for nic in ret["interface"]["detached"] +- ], +- ) +- devattach_mock.assert_called_once() +- devdetach_mock.assert_called_once() +- +- # Remove nics case +- devattach_mock.reset_mock() +- devdetach_mock.reset_mock() +- ret = virt.update("my_vm", nic_profile=None, interfaces=[]) +- self.assertEqual([], ret["interface"]["attached"]) +- self.assertEqual(2, len(ret["interface"]["detached"])) +- devattach_mock.assert_not_called() +- devdetach_mock.assert_called() +- +- # Remove disks case (yeah, it surely is silly) +- devattach_mock.reset_mock() +- devdetach_mock.reset_mock() +- ret = virt.update("my_vm", disk_profile=None, disks=[]) +- self.assertEqual([], ret["disk"]["attached"]) +- self.assertEqual(3, len(ret["disk"]["detached"])) +- devattach_mock.assert_not_called() +- devdetach_mock.assert_called() +- +- # Graphics change test case +- self.assertEqual( +- { +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", graphics={"type": "vnc"}), +- ) +- setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual("vnc", setxml.find("devices/graphics").get("type")) +- +- # Update with no diff case +- pool_mock = MagicMock() +- default_pool_desc = "" +- rbd_pool_desc = """ +- +- test-rbd +- +- +- +- libvirt-pool +- +- +- +- +- +- """ +- pool_mock.XMLDesc.side_effect = [ +- default_pool_desc, +- rbd_pool_desc, +- default_pool_desc, +- rbd_pool_desc, +- ] +- self.mock_conn.storagePoolLookupByName.return_value = pool_mock +- self.mock_conn.listStoragePools.return_value = ["test-rbd", "default"] +- self.assertEqual( +- { +- "definition": False, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update( +- "my_vm", +- cpu=1, +- mem=1024, +- disk_profile="default", +- disks=[ +- {"name": "data", "size": 2048, "pool": "default"}, +- { +- "name": "data2", +- "size": 4096, +- "pool": "test-rbd", +- "format": "raw", +- }, +- ], +- nic_profile="myprofile", +- interfaces=[ +- { +- "name": "eth0", +- "type": "network", +- "source": "default", +- "mac": "52:54:00:39:02:b1", +- }, +- {"name": "eth1", "type": "network", "source": "oldnet"}, +- ], +- graphics={ +- "type": "spice", +- "listen": {"type": "address", "address": "127.0.0.1"}, +- }, +- ), +- ) +- +- # Failed XML description update case +- self.mock_conn.defineXML.side_effect = self.mock_libvirt.libvirtError( +- "Test error" +- ) +- setmem_mock.reset_mock() +- with self.assertRaises(self.mock_libvirt.libvirtError): +- virt.update("my_vm", mem=2048) +- +- # Failed single update failure case +- self.mock_conn.defineXML = MagicMock(return_value=True) +- setmem_mock.side_effect = self.mock_libvirt.libvirtError( +- "Failed to live change memory" +- ) +- self.assertEqual( +- { +- "definition": True, +- "errors": ["Failed to live change memory"], +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", mem=2048), +- ) +- +- # Failed multiple updates failure case +- self.assertEqual( +- { +- "definition": True, +- "errors": ["Failed to live change memory"], +- "cpu": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("my_vm", cpu=4, mem=2048), +- ) +- +- def test_update_backing_store(self): +- """ +- Test updating a disk with a backing store +- """ +- xml = """ +- +- my_vm +- 1048576 +- 1048576 +- 1 +- +- hvm +- +- +- +- +- +- +- +- +- +- +- +- +-
+- +- +- +- """ +- domain_mock = self.set_mock_vm("my_vm", xml) +- domain_mock.OSType.return_value = "hvm" +- self.mock_conn.defineXML.return_value = True +- updatedev_mock = MagicMock(return_value=0) +- domain_mock.updateDeviceFlags = updatedev_mock +- self.mock_conn.listStoragePools.return_value = ["default"] +- self.mock_conn.storagePoolLookupByName.return_value.XMLDesc.return_value = ( +- "" +- ) +- +- ret = virt.update( +- "my_vm", +- disks=[ +- { +- "name": "system", +- "pool": "default", +- "backing_store_path": "/path/to/base.qcow2", +- "backing_store_format": "qcow2", +- }, +- ], +- ) +- self.assertFalse(ret["definition"]) +- self.assertFalse(ret["disk"]["attached"]) +- self.assertFalse(ret["disk"]["detached"]) +- +- def test_update_removables(self): +- """ +- Test attaching, detaching, changing removable devices +- """ +- xml = """ +- +- my_vm +- 1048576 +- 1048576 +- 1 +- +- hvm +- +- +- +- +- +- +- +- +- +- +- +-
+- +- +- +- +- +- +-
+- +- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +- +- +- +- +- +- """ +- domain_mock = self.set_mock_vm("my_vm", xml) +- domain_mock.OSType.return_value = "hvm" +- self.mock_conn.defineXML.return_value = True +- updatedev_mock = MagicMock(return_value=0) +- domain_mock.updateDeviceFlags = updatedev_mock +- +- ret = virt.update( +- "my_vm", +- disks=[ +- { +- "name": "dvd1", +- "device": "cdrom", +- "source_file": None, +- "model": "ide", +- }, +- { +- "name": "dvd2", +- "device": "cdrom", +- "source_file": "/srv/dvd-image-4.iso", +- "model": "ide", +- }, +- { +- "name": "dvd3", +- "device": "cdrom", +- "source_file": "/srv/dvd-image-2.iso", +- "model": "ide", +- }, +- { +- "name": "dvd4", +- "device": "cdrom", +- "source_file": "/srv/dvd-image-5.iso", +- "model": "ide", +- }, +- { +- "name": "dvd5", +- "device": "cdrom", +- "source_file": "/srv/dvd-image-6.iso", +- "model": "ide", +- }, +- ], +- ) +- +- self.assertTrue(ret["definition"]) +- self.assertFalse(ret["disk"]["attached"]) +- self.assertFalse(ret["disk"]["detached"]) +- self.assertEqual( +- [ +- { +- "type": "file", +- "device": "cdrom", +- "driver": { +- "name": "qemu", +- "type": "raw", +- "cache": "none", +- "io": "native", +- }, +- "backingStore": None, +- "target": {"dev": "hda", "bus": "ide"}, +- "readonly": None, +- "alias": {"name": "ide0-0-0"}, +- "address": { +- "type": "drive", +- "controller": "0", +- "bus": "0", +- "target": "0", +- "unit": "0", +- }, +- }, +- { +- "type": "file", +- "device": "cdrom", +- "driver": { +- "name": "qemu", +- "type": "raw", +- "cache": "none", +- "io": "native", +- }, +- "target": {"dev": "hdb", "bus": "ide"}, +- "readonly": None, +- "alias": {"name": "ide0-0-1"}, +- "address": { +- "type": "drive", +- "controller": "0", +- "bus": "0", +- "target": "0", +- "unit": "1", +- }, +- "source": {"file": "/srv/dvd-image-4.iso"}, +- }, +- { +- "type": "file", +- "device": "cdrom", +- "driver": { +- "name": "qemu", +- "type": "raw", +- "cache": "none", +- "io": "native", +- }, +- "backingStore": None, +- "target": {"dev": "hdd", "bus": "ide"}, +- "readonly": None, +- "alias": {"name": "ide0-0-3"}, +- "address": { +- "type": "drive", +- "controller": "0", +- "bus": "0", +- "target": "0", +- "unit": "3", +- }, +- "source": {"file": "/srv/dvd-image-5.iso"}, +- }, +- { +- "type": "file", +- "device": "cdrom", +- "driver": { +- "name": "qemu", +- "type": "raw", +- "cache": "none", +- "io": "native", +- }, +- "backingStore": None, +- "target": {"dev": "hde", "bus": "ide"}, +- "readonly": None, +- "source": {"file": "/srv/dvd-image-6.iso"}, +- }, +- ], +- [ +- salt.utils.xmlutil.to_dict(ET.fromstring(disk), True) +- for disk in ret["disk"]["updated"] +- ], +- ) +- +- def test_update_xen_boot_params(self): +- """ +- Test virt.update() a Xen definition no boot parameter. +- """ +- root_dir = os.path.join(salt.syspaths.ROOT_DIR, "srv", "salt-images") +- xml_boot = """ +- +- vm +- 1048576 +- 1048576 +- 1 +- +- hvm +- /usr/lib/xen/boot/hvmloader +- +- +- """ +- domain_mock_boot = self.set_mock_vm("vm", xml_boot) +- domain_mock_boot.OSType = MagicMock(return_value="hvm") +- define_mock_boot = MagicMock(return_value=True) +- define_mock_boot.setVcpusFlags = MagicMock(return_value=0) +- self.mock_conn.defineXML = define_mock_boot +- self.assertEqual( +- { +- "cpu": False, +- "definition": True, +- "disk": {"attached": [], "detached": [], "updated": []}, +- "interface": {"attached": [], "detached": []}, +- }, +- virt.update("vm", cpu=2), +- ) +- setxml = ET.fromstring(define_mock_boot.call_args[0][0]) +- self.assertEqual(setxml.find("os").find("loader").attrib.get("type"), "rom") +- self.assertEqual( +- setxml.find("os").find("loader").text, "/usr/lib/xen/boot/hvmloader" +- ) +- +- def test_update_existing_boot_params(self): +- """ +- Test virt.update() with existing boot parameters. +- """ +- xml_boot = """ +- +- vm_with_boot_param +- 1048576 +- 1048576 +- 1 +- +- hvm +- /boot/oldkernel +- /boot/initrdold.img +- console=ttyS0 ks=http://example.com/old/os/ +- /usr/share/old/OVMF_CODE.fd +- /usr/share/old/OVMF_VARS.ms.fd +- +- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +-
+- +- +- +- +-