osc copypac from project:systemsmanagement:saltstack:testing package:salt revision:419

OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=190
This commit is contained in:
Pablo Suárez Hernández 2021-09-27 10:08:32 +00:00 committed by Git OBS Bridge
parent 5af0dfb7fe
commit d0e1a8d380
176 changed files with 12367 additions and 60196 deletions

View File

@ -1,48 +0,0 @@
From ffe924ef060a9b9540a4dcd117e045eaefa62513 Mon Sep 17 00:00:00 2001
From: Alexander Graul <mail@agraul.de>
Date: Tue, 9 Mar 2021 13:46:03 +0100
Subject: [PATCH] 3002: Set distro requirement to oldest supported
version (#327)
In the released Salt packages, python3-distro is taken from the
officially repositories on supported operating systems. The oldest
supported python3-distro version is 1.0.1 in Ubuntu18.04 universe and
Debian 9. FreeBSD is an exception and requires 1.3.0.
The mismatch between the version specified in requirements/base.txt and
what is actually used by the released packages can be confusing.
(cherry picked from commit 5c9c0ab9cdf2bf67bfdd259b53aa15297d1656ce)
(cherry picked from commit 0ff35358f79e9df8b06fb345fd79c1d22ed91179)
Co-authored-by: Pedro Algarvio <pedro@algarvio.me>
---
requirements/base.txt | 2 +-
requirements/static/pkg/freebsd.in | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/requirements/base.txt b/requirements/base.txt
index ffe4bc98f1..6af972bd1b 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -3,7 +3,7 @@ msgpack>=0.5,!=0.5.5
PyYAML
MarkupSafe
requests>=1.0.0
-distro>=1.5
+distro>=1.0.1
# Requirements for Tornado 4.5.3 (vendored as salt.ext.tornado)
singledispatch==3.4.0.3; python_version < '3.4'
# Required by Tornado to handle threads stuff.
diff --git a/requirements/static/pkg/freebsd.in b/requirements/static/pkg/freebsd.in
index 879a378822..7cfa3dcce8 100644
--- a/requirements/static/pkg/freebsd.in
+++ b/requirements/static/pkg/freebsd.in
@@ -8,3 +8,4 @@ python-dateutil>=2.8.0
python-gnupg>=0.4.4
setproctitle>=1.1.10
timelib>=0.2.5
+distro>=1.3.0
--
2.30.1

File diff suppressed because it is too large Load Diff

View File

@ -1 +1 @@
71392e10750f7481475066788a23a39ad92d0c64
deacfe2304a0b9f34a9130b61bd69dea851962a7

View File

@ -3,7 +3,7 @@
<param name="url">https://github.com/openSUSE/salt-packaging.git</param>
<param name="subdir">salt</param>
<param name="filename">package</param>
<param name="revision">3002.2</param>
<param name="revision">3003.3</param>
<param name="scm">git</param>
</service>
<service name="extract_file" mode="disabled">
@ -12,8 +12,8 @@
</service>
<service name="download_url" mode="disabled">
<param name="host">codeload.github.com</param>
<param name="path">openSUSE/salt/tar.gz/v3002.2-suse</param>
<param name="filename">v3002.2.tar.gz</param>
<param name="path">openSUSE/salt/tar.gz/v3003.3-suse</param>
<param name="filename">v3003.3.tar.gz</param>
</service>
<service name="update_changelog" mode="disabled"></service>
</services>

View File

@ -1,223 +0,0 @@
From 828650500159fd7040d2fa76b2fc4d2b627f7065 Mon Sep 17 00:00:00 2001
From: Alberto Planas <aplanas@gmail.com>
Date: Tue, 22 Oct 2019 11:02:33 +0200
Subject: [PATCH] Accumulated changes from Yomi (#167)
* core.py: ignore wrong product_name files
Some firmwares (like some NUC machines), do not provide valid
/sys/class/dmi/id/product_name strings. In those cases an
UnicodeDecodeError exception happens.
This patch ignore this kind of issue during the grains creation.
(cherry picked from commit 2d57d2a6063488ad9329a083219e3826e945aa2d)
* zypperpkg: understand product type
(cherry picked from commit b865491b74679140f7a71c5ba50d482db47b600f)
---
salt/grains/core.py | 6 +--
salt/modules/zypperpkg.py | 22 ----------
tests/unit/grains/test_core.py | 64 +++++++++++++++++++++++++++-
tests/unit/modules/test_zypperpkg.py | 38 +++++++++++++++++
4 files changed, 103 insertions(+), 27 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 0dc1d97f97..a2983e388b 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1046,7 +1046,7 @@ def _virtual(osdata):
if os.path.isfile("/sys/devices/virtual/dmi/id/product_name"):
try:
with salt.utils.files.fopen(
- "/sys/devices/virtual/dmi/id/product_name", "rb"
+ "/sys/devices/virtual/dmi/id/product_name", "r"
) as fhr:
output = salt.utils.stringutils.to_unicode(
fhr.read(), errors="replace"
@@ -1066,9 +1066,7 @@ def _virtual(osdata):
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
- log.debug(
- "The content in /sys/devices/virtual/dmi/id/product_name is not valid"
- )
+ pass
except OSError:
pass
elif osdata["kernel"] == "FreeBSD":
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index 2daec0f380..b5621174a4 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -958,28 +958,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
}
]
- for include in includes:
- if include in ("pattern", "patch"):
- if include == "pattern":
- pkgs = list_installed_patterns(root=root)
- elif include == "patch":
- pkgs = list_installed_patches(root=root)
- else:
- pkgs = []
- for pkg in pkgs:
- pkg_extended_name = "{}:{}".format(include, pkg)
- info = info_available(pkg_extended_name, refresh=False, root=root)
- _ret[pkg_extended_name] = [
- {
- "epoch": None,
- "version": info[pkg]["version"],
- "release": None,
- "arch": info[pkg]["arch"],
- "install_date": None,
- "install_date_time_t": None,
- }
- ]
-
__context__[contextkey] = _ret
return __salt__["pkg_resource.format_pkg_list"](
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index a5ceeb8317..0dc3423646 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -2047,13 +2047,74 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
result = core.path()
assert result == {"path": path, "systempath": comps}, result
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ @patch("os.path.exists")
+ @patch("salt.utils.platform.is_proxy")
+ def test_kernelparams_return(self):
+ expectations = [
+ (
+ "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64",
+ {
+ "kernelparams": [
+ ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64")
+ ]
+ },
+ ),
+ (
+ "root=/dev/mapper/centos_daemon-root",
+ {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]},
+ ),
+ (
+ "rhgb quiet ro",
+ {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]},
+ ),
+ ('param="value1"', {"kernelparams": [("param", "value1")]}),
+ (
+ 'param="value1 value2 value3"',
+ {"kernelparams": [("param", "value1 value2 value3")]},
+ ),
+ (
+ 'param="value1 value2 value3" LANG="pl" ro',
+ {
+ "kernelparams": [
+ ("param", "value1 value2 value3"),
+ ("LANG", "pl"),
+ ("ro", None),
+ ]
+ },
+ ),
+ ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}),
+ (
+ 'param="value1:value2:value3"',
+ {"kernelparams": [("param", "value1:value2:value3")]},
+ ),
+ (
+ 'param="value1,value2,value3"',
+ {"kernelparams": [("param", "value1,value2,value3")]},
+ ),
+ (
+ 'param="value1" param="value2" param="value3"',
+ {
+ "kernelparams": [
+ ("param", "value1"),
+ ("param", "value2"),
+ ("param", "value3"),
+ ]
+ },
+ ),
+ ]
+
+ for cmdline, expectation in expectations:
+ with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)):
+ self.assertEqual(core.kernelparams(), expectation)
+
@skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
@patch("os.path.exists")
@patch("salt.utils.platform.is_proxy")
def test__hw_data_linux_empty(self, is_proxy, exists):
is_proxy.return_value = False
exists.return_value = True
- with patch("salt.utils.files.fopen", mock_open(read_data=b"")):
+ with patch("salt.utils.files.fopen", mock_open(read_data="")):
self.assertEqual(
core._hw_data({"kernel": "Linux"}),
{
@@ -2067,6 +2128,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
)
@skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ @skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3")
@patch("os.path.exists")
@patch("salt.utils.platform.is_proxy")
def test__hw_data_linux_unicode_error(self, is_proxy, exists):
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index 5d4e7766b6..1b62122e0e 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -1424,6 +1424,44 @@ Repository 'DUMMY' not found by its alias, number, or URI.
ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}}
)
+ @patch("salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False))
+ @patch(
+ "salt.modules.zypperpkg.list_products",
+ MagicMock(return_value={"openSUSE": {"installed": False, "summary": "test"}}),
+ )
+ @patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(
+ side_effect=[{"product:openSUSE": "15.2"}, {"product:openSUSE": "15.3"}]
+ ),
+ )
+ def test_install_product_ok(self):
+ """
+ Test successfully product installation.
+ """
+ with patch.dict(
+ zypper.__salt__,
+ {
+ "pkg_resource.parse_targets": MagicMock(
+ return_value=(["product:openSUSE"], None)
+ )
+ },
+ ):
+ with patch(
+ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
+ ) as zypper_mock:
+ ret = zypper.install("product:openSUSE", includes=["product"])
+ zypper_mock.assert_called_once_with(
+ "--no-refresh",
+ "install",
+ "--auto-agree-with-licenses",
+ "--name",
+ "product:openSUSE",
+ )
+ self.assertDictEqual(
+ ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}}
+ )
+
def test_remove_purge(self):
"""
Test package removal
--
2.29.2

View File

@ -1,117 +0,0 @@
From 7d35fdba84b6e1b62a3abc71e518366a35efb662 Mon Sep 17 00:00:00 2001
From: Alberto Planas <aplanas@gmail.com>
Date: Tue, 30 Jul 2019 11:23:12 +0200
Subject: [PATCH] Accumulated changes required for Yomi (#165)
* cmdmod: fix runas and group in run_chroot
The parameters runas and group for cmdmod.run() will change the efective
user and group before executing the command. But in a chroot environment is
expected that the change happends inside the chroot, not outside, as the
user and groups are refering to objects that can only exist inside the
environment.
This patch add the userspec parameter to the chroot command, to change
the user in the correct place.
(cherry picked from commit f0434aaeeee3ace4e3fc65c04e69984f08b2541e)
* chroot: add missing sys directory
(cherry picked from commit cdf74426bcad4e8bf329bf604c77ea83bfca8b2c)
* chroot: change variable name to root
(cherry picked from commit 7f68b65b1b0f9eec2a6b07b02714ead0121f0e4b)
* chroot: fix bug in safe_kwargs iteration
(cherry picked from commit 39da1c69ea2781bed6e9d8e6879b70d65fa5a5b0)
* test_cmdmod: fix test_run_cwd_in_combination_with_runas
(cherry picked from commit 42640ecf161caf64c61e9b02927882f92c850092)
* test_cmdmod: add test_run_chroot_runas test
(cherry picked from commit d900035089a22f6741d2095fd1f6694597041a88)
* freezer: do not fail in cache dir is present
(cherry picked from commit 25137c51e6d6e53e3099b6cddbf51d4cb2c53d8d)
* freezer: clean freeze YAML profile on restore
(cherry picked from commit 56b97c997257f12038399549dc987b7723ab225f)
* zypperpkg: fix pkg.list_pkgs cache
The cache from pkg.list_pkgs for the zypper installer is too aggresive.
Some parameters will deliver different package lists, like root and
includes. The current cache do not take those parameters into
consideration, so the next time that this function is called, the last
list of packages will be returned, without checking if the current
parameters match the old one.
This patch create a different cache key for each parameter combination,
so the cached data will be separated too.
(cherry picked from commit 9c54bb3e8c93ba21fc583bdefbcadbe53cbcd7b5)
---
salt/modules/zypperpkg.py | 1 -
tests/unit/modules/test_zypperpkg.py | 22 +++++++++++++++++++++-
2 files changed, 21 insertions(+), 2 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index c996935bff..b099f3e5d7 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -879,7 +879,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
# inclusion types are passed
contextkey = "pkg.list_pkgs_{}_{}".format(root, includes)
- # TODO(aplanas): this cached value depends on the parameters
if contextkey not in __context__:
ret = {}
cmd = ["rpm"]
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index 032785395e..5d4e7766b6 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -912,7 +912,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
), patch.dict(
zypper.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
- pkg_resource.__salt__, {"pkg.parse_arch": zypper.parse_arch}
+ pkg_resource.__salt__,
+ {"pkg.parse_arch_from_name": zypper.parse_arch_from_name},
):
pkgs = zypper.list_pkgs(
attr=["epoch", "release", "arch", "install_date_time_t"]
@@ -1950,3 +1951,22 @@ pattern() = package-c"""
"package-a": {"installed": True, "summary": "description a",},
"package-b": {"installed": False, "summary": "description b",},
}
+
+ def test__clean_cache_empty(self):
+ """Test that an empty cached can be cleaned"""
+ context = {}
+ with patch.dict(zypper.__context__, context):
+ zypper._clean_cache()
+ assert context == {}
+
+ def test__clean_cache_filled(self):
+ """Test that a filled cached can be cleaned"""
+ context = {
+ "pkg.list_pkgs_/mnt_[]": None,
+ "pkg.list_pkgs_/mnt_[patterns]": None,
+ "pkg.list_provides": None,
+ "pkg.other_data": None,
+ }
+ with patch.dict(zypper.__context__, context):
+ zypper._clean_cache()
+ self.assertEqual(zypper.__context__, {"pkg.other_data": None})
--
2.29.2

View File

@ -1,45 +1,48 @@
From ec0d11ed66e8541a9ccaebc85aab4724013fb71f Mon Sep 17 00:00:00 2001
From 2e810cc876f7b7110326231de51d78ff5d12eae6 Mon Sep 17 00:00:00 2001
From: Pau Garcia Quiles <pau.garcia@suse.com>
Date: Tue, 13 Apr 2021 10:31:09 +0200
Subject: [PATCH] Add Alibaba Cloud Linux 2 by backporting upstream's
grain and discarding my own (#352)
---
salt/grains/core.py | 4 ++--
tests/unit/grains/test_core.py | 26 +++++++++++++++++---------
2 files changed, 19 insertions(+), 11 deletions(-)
salt/grains/core.py | 2 ++
tests/unit/grains/test_core.py | 28 ++++++++++++++++++++++++++++
2 files changed, 30 insertions(+)
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 09f9d29788..2b965a2a8a 100644
index 19937f008e..bce8c95179 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1547,7 +1547,7 @@ _OS_NAME_MAP = {
"slesexpand": "RES",
@@ -1560,6 +1560,7 @@ _OS_NAME_MAP = {
"linuxmint": "Mint",
"neon": "KDE neon",
- "alibaba": "Alibaba Cloud (Aliyun)",
"pop": "Pop",
+ "alibabaclo": "Alinux",
}
# Map the 'os' grain to the 'os_family' grain
@@ -1622,7 +1622,7 @@ _OS_FAMILY_MAP = {
"AIX": "AIX",
@@ -1637,6 +1638,7 @@ _OS_FAMILY_MAP = {
"TurnKey": "Debian",
"Pop": "Debian",
"AstraLinuxCE": "Debian",
- "Alibaba Cloud (Aliyun)": "RedHat",
+ "Alinux": "RedHat",
}
# Matches any possible format:
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 6aa05abe40..8280d6de47 100644
index ac2d515bcd..fa06bb27ab 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -782,17 +782,25 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
Test if OS grains are parsed correctly in Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS
'''
_os_release_map = {
- '_linux_distribution': ('Alibaba Cloud Linux (Aliyun Linux)', '2.1903', 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)'),
@@ -846,6 +846,34 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
}
self._run_os_grains_tests("astralinuxce-2.12.22", _os_release_map, expectation)
+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
+ def test_aliyunlinux2_os_grains(self):
+ '''
+ Test if OS grains are parsed correctly in Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS
+ '''
+ _os_release_map = {
+ "os_release_file": {
+ "NAME": "Alibaba Cloud Linux (Aliyun Linux)",
+ "VERSION": "2.1903 LTS (Hunting Beagle)",
@ -49,16 +52,8 @@ index 6aa05abe40..8280d6de47 100644
+ "ANSI_COLOR": "0;31",
+ },
+ "_linux_distribution": ("alinux", "2.1903", "LTS"),
}
expectation = {
- 'os': 'Alibaba Cloud (Aliyun)',
- 'os_family': 'RedHat',
- 'oscodename': 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)',
- 'osfullname': 'Alibaba Cloud Linux (Aliyun Linux)',
- 'osrelease': '2.1903',
- 'osrelease_info': (2, 1903),
- 'osmajorrelease': 2,
- 'osfinger': 'Alibaba Cloud Linux (Aliyun Linux)-2',
+ }
+ expectation = {
+ "os": "Alinux",
+ "os_family": "RedHat",
+ "oscodename": "Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)",
@ -67,10 +62,13 @@ index 6aa05abe40..8280d6de47 100644
+ "osrelease_info": (2, 1903),
+ "osmajorrelease": 2,
+ "osfinger": "Alibaba Cloud Linux (Aliyun Linux)-2",
}
self._run_os_grains_tests(None, _os_release_map, expectation)
+ }
+ self._run_os_grains_tests(None, _os_release_map, expectation)
+
@skipIf(not salt.utils.platform.is_windows(), "System is not Windows")
def test_windows_platform_data(self):
"""
--
2.30.2
2.33.0

View File

@ -1,959 +0,0 @@
From 2e300c770c227cf394929b7d5d025d5c52f1ae2c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Mon, 14 May 2018 11:33:13 +0100
Subject: [PATCH] Add "all_versions" parameter to include all installed
version on rpm.info
Enable "all_versions" parameter for zypper.info_installed
Enable "all_versions" parameter for yumpkg.info_installed
Prevent adding failed packages when pkg name contains the arch (on SUSE)
Add 'all_versions' documentation for info_installed on yum/zypper modules
Add unit tests for info_installed with all_versions
Refactor: use dict.setdefault instead if-else statement
Allow removing only specific package versions with zypper and yum
---
salt/states/pkg.py | 285 +++++++++++++++++++++++----------------------
1 file changed, 146 insertions(+), 139 deletions(-)
diff --git a/salt/states/pkg.py b/salt/states/pkg.py
index 51b5a06e8f..a1b2a122bb 100644
--- a/salt/states/pkg.py
+++ b/salt/states/pkg.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
Installation of packages using OS package managers such as yum or apt-get
=========================================================================
@@ -71,21 +70,16 @@ state module
used. This will be addressed in a future release of Salt.
"""
-# Import python libs
-from __future__ import absolute_import, print_function, unicode_literals
import fnmatch
import logging
import os
import re
-# Import Salt libs
import salt.utils.pkg
import salt.utils.platform
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
-
-# Import 3rd-party libs
from salt.ext import six
from salt.modules.pkg_resource import _repack_pkgs
from salt.output import nested
@@ -323,7 +317,7 @@ def _find_download_targets(
"name": name,
"changes": {},
"result": True,
- "comment": "Version {0} of package '{1}' is already "
+ "comment": "Version {} of package '{}' is already "
"downloaded".format(version, name),
}
@@ -334,7 +328,7 @@ def _find_download_targets(
"name": name,
"changes": {},
"result": True,
- "comment": "Package {0} is already " "downloaded".format(name),
+ "comment": "Package {} is already " "downloaded".format(name),
}
version_spec = False
@@ -349,13 +343,13 @@ def _find_download_targets(
comments.append(
"The following package(s) were not found, and no "
"possible matches were found in the package db: "
- "{0}".format(", ".join(sorted(problems["no_suggest"])))
+ "{}".format(", ".join(sorted(problems["no_suggest"])))
)
if problems.get("suggest"):
- for pkgname, suggestions in six.iteritems(problems["suggest"]):
+ for pkgname, suggestions in problems["suggest"].items():
comments.append(
- "Package '{0}' not found (possible matches: "
- "{1})".format(pkgname, ", ".join(suggestions))
+ "Package '{}' not found (possible matches: "
+ "{})".format(pkgname, ", ".join(suggestions))
)
if comments:
if len(comments) > 1:
@@ -371,7 +365,7 @@ def _find_download_targets(
# Check current downloaded versions against specified versions
targets = {}
problems = []
- for pkgname, pkgver in six.iteritems(to_download):
+ for pkgname, pkgver in to_download.items():
cver = cur_pkgs.get(pkgname, {})
# Package not yet downloaded, so add to targets
if not cver:
@@ -401,7 +395,7 @@ def _find_download_targets(
if not targets:
# All specified packages are already downloaded
- msg = "All specified packages{0} are already downloaded".format(
+ msg = "All specified packages{} are already downloaded".format(
" (matching specified versions)" if version_spec else ""
)
return {"name": name, "changes": {}, "result": True, "comment": msg}
@@ -425,7 +419,7 @@ def _find_advisory_targets(name=None, advisory_ids=None, **kwargs):
"name": name,
"changes": {},
"result": True,
- "comment": "Advisory patch {0} is already " "installed".format(name),
+ "comment": "Advisory patch {} is already " "installed".format(name),
}
# Find out which advisory patches will be targeted in the call to pkg.install
@@ -477,12 +471,22 @@ def _find_remove_targets(
# Check current versions against specified versions
targets = []
problems = []
- for pkgname, pkgver in six.iteritems(to_remove):
+ for pkgname, pkgver in to_remove.items():
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
origin = bool(re.search("/", pkgname))
if __grains__["os"] == "FreeBSD" and origin:
- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == pkgname]
+ cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname]
+ elif __grains__["os_family"] == "Suse":
+ # On SUSE systems. Zypper returns packages without "arch" in name
+ try:
+ namepart, archpart = pkgname.rsplit(".", 1)
+ except ValueError:
+ cver = cur_pkgs.get(pkgname, [])
+ else:
+ if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",):
+ pkgname = namepart
+ cver = cur_pkgs.get(pkgname, [])
else:
cver = cur_pkgs.get(pkgname, [])
@@ -518,7 +522,7 @@ def _find_remove_targets(
if not targets:
# All specified packages are already absent
- msg = "All specified packages{0} are already absent".format(
+ msg = "All specified packages{} are already absent".format(
" (matching specified versions)" if version_spec else ""
)
return {"name": name, "changes": {}, "result": True, "comment": msg}
@@ -619,7 +623,7 @@ def _find_install_targets(
"name": name,
"changes": {},
"result": False,
- "comment": "Invalidly formatted '{0}' parameter. See "
+ "comment": "Invalidly formatted '{}' parameter. See "
"minion log.".format("pkgs" if pkgs else "sources"),
}
@@ -634,7 +638,7 @@ def _find_install_targets(
"name": name,
"changes": {},
"result": False,
- "comment": "Package {0} not found in the "
+ "comment": "Package {} not found in the "
"repository.".format(name),
}
if version is None:
@@ -656,7 +660,7 @@ def _find_install_targets(
origin = bool(re.search("/", name))
if __grains__["os"] == "FreeBSD" and origin:
- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == name]
+ cver = [k for k, v in cur_pkgs.items() if v["origin"] == name]
else:
cver = cur_pkgs.get(name, [])
@@ -667,7 +671,7 @@ def _find_install_targets(
"name": name,
"changes": {},
"result": True,
- "comment": "Version {0} of package '{1}' is already "
+ "comment": "Version {} of package '{}' is already "
"installed".format(version, name),
}
@@ -678,7 +682,7 @@ def _find_install_targets(
"name": name,
"changes": {},
"result": True,
- "comment": "Package {0} is already " "installed".format(name),
+ "comment": "Package {} is already " "installed".format(name),
}
version_spec = False
@@ -687,21 +691,19 @@ def _find_install_targets(
# enforced. Takes extra time. Disable for improved performance
if not skip_suggestions:
# Perform platform-specific pre-flight checks
- not_installed = dict(
- [
- (name, version)
- for name, version in desired.items()
- if not (
- name in cur_pkgs
- and (
- version is None
- or _fulfills_version_string(
- cur_pkgs[name], version, ignore_epoch=ignore_epoch
- )
+ not_installed = {
+ name: version
+ for name, version in desired.items()
+ if not (
+ name in cur_pkgs
+ and (
+ version is None
+ or _fulfills_version_string(
+ cur_pkgs[name], version, ignore_epoch=ignore_epoch
)
)
- ]
- )
+ )
+ }
if not_installed:
try:
problems = _preflight_check(not_installed, **kwargs)
@@ -713,13 +715,13 @@ def _find_install_targets(
comments.append(
"The following package(s) were not found, and no "
"possible matches were found in the package db: "
- "{0}".format(", ".join(sorted(problems["no_suggest"])))
+ "{}".format(", ".join(sorted(problems["no_suggest"])))
)
if problems.get("suggest"):
- for pkgname, suggestions in six.iteritems(problems["suggest"]):
+ for pkgname, suggestions in problems["suggest"].items():
comments.append(
- "Package '{0}' not found (possible matches: "
- "{1})".format(pkgname, ", ".join(suggestions))
+ "Package '{}' not found (possible matches: "
+ "{})".format(pkgname, ", ".join(suggestions))
)
if comments:
if len(comments) > 1:
@@ -733,9 +735,7 @@ def _find_install_targets(
# Resolve the latest package version for any packages with "latest" in the
# package version
- wants_latest = (
- [] if sources else [x for x, y in six.iteritems(desired) if y == "latest"]
- )
+ wants_latest = [] if sources else [x for x, y in desired.items() if y == "latest"]
if wants_latest:
resolved_latest = __salt__["pkg.latest_version"](
*wants_latest, refresh=refresh, **kwargs
@@ -766,7 +766,7 @@ def _find_install_targets(
problems = []
warnings = []
failed_verify = False
- for package_name, version_string in six.iteritems(desired):
+ for package_name, version_string in desired.items():
cver = cur_pkgs.get(package_name, [])
if resolve_capabilities and not cver and package_name in cur_prov:
cver = cur_pkgs.get(cur_prov.get(package_name)[0], [])
@@ -795,12 +795,12 @@ def _find_install_targets(
problems.append(err.format(version_string, "file not found"))
continue
elif not os.path.exists(cached_path):
- problems.append("{0} does not exist on minion".format(version_string))
+ problems.append("{} does not exist on minion".format(version_string))
continue
source_info = __salt__["lowpkg.bin_pkg_info"](cached_path)
if source_info is None:
warnings.append(
- "Failed to parse metadata for {0}".format(version_string)
+ "Failed to parse metadata for {}".format(version_string)
)
continue
else:
@@ -923,13 +923,24 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None):
has_origin = "/" in pkgname
if __grains__["os"] == "FreeBSD" and has_origin:
- cver = [k for k, v in six.iteritems(new_pkgs) if v["origin"] == pkgname]
+ cver = [k for k, v in new_pkgs.items() if v["origin"] == pkgname]
elif __grains__["os"] == "MacOS" and has_origin:
cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split("/")[-1]))
elif __grains__["os"] == "OpenBSD":
cver = new_pkgs.get(pkgname.split("%")[0])
elif __grains__["os_family"] == "Debian":
cver = new_pkgs.get(pkgname.split("=")[0])
+ elif __grains__["os_family"] == "Suse":
+ # On SUSE systems. Zypper returns packages without "arch" in name
+ try:
+ namepart, archpart = pkgname.rsplit(".", 1)
+ except ValueError:
+ cver = new_pkgs.get(pkgname)
+ else:
+ if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",):
+ cver = new_pkgs.get(namepart)
+ else:
+ cver = new_pkgs.get(pkgname)
else:
cver = new_pkgs.get(pkgname)
if not cver and pkgname in new_caps:
@@ -964,7 +975,7 @@ def _get_desired_pkg(name, desired):
oper = ""
else:
oper = "="
- return "{0}{1}{2}".format(name, oper, "" if not desired[name] else desired[name])
+ return "{}{}{}".format(name, oper, "" if not desired[name] else desired[name])
def _preflight_check(desired, fromrepo, **kwargs):
@@ -1709,8 +1720,8 @@ def installed(
"comment": "pkg.verify not implemented",
}
- if not isinstance(version, six.string_types) and version is not None:
- version = six.text_type(version)
+ if not isinstance(version, str) and version is not None:
+ version = str(version)
kwargs["allow_updates"] = allow_updates
@@ -1754,7 +1765,7 @@ def installed(
"name": name,
"changes": {},
"result": False,
- "comment": six.text_type(exc),
+ "comment": str(exc),
}
if "result" in hold_ret and not hold_ret["result"]:
@@ -1763,7 +1774,7 @@ def installed(
"changes": {},
"result": False,
"comment": "An error was encountered while "
- "holding/unholding package(s): {0}".format(hold_ret["comment"]),
+ "holding/unholding package(s): {}".format(hold_ret["comment"]),
}
else:
modified_hold = [
@@ -1779,16 +1790,16 @@ def installed(
]
for i in modified_hold:
- result["comment"] += ".\n{0}".format(i["comment"])
+ result["comment"] += ".\n{}".format(i["comment"])
result["result"] = i["result"]
result["changes"][i["name"]] = i["changes"]
for i in not_modified_hold:
- result["comment"] += ".\n{0}".format(i["comment"])
+ result["comment"] += ".\n{}".format(i["comment"])
result["result"] = i["result"]
for i in failed_hold:
- result["comment"] += ".\n{0}".format(i["comment"])
+ result["comment"] += ".\n{}".format(i["comment"])
result["result"] = i["result"]
return result
@@ -1805,8 +1816,8 @@ def installed(
# Remove any targets not returned by _find_install_targets
if pkgs:
- pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)]
- pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)])
+ pkgs = [dict([(x, y)]) for x, y in targets.items()]
+ pkgs.extend([dict([(x, y)]) for x, y in to_reinstall.items()])
elif sources:
oldsources = sources
sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets]
@@ -1823,12 +1834,12 @@ def installed(
summary = ", ".join([_get_desired_pkg(x, targets) for x in targets])
comment.append(
"The following packages would be "
- "installed/updated: {0}".format(summary)
+ "installed/updated: {}".format(summary)
)
if to_unpurge:
comment.append(
"The following packages would have their selection status "
- "changed from 'purge' to 'install': {0}".format(", ".join(to_unpurge))
+ "changed from 'purge' to 'install': {}".format(", ".join(to_unpurge))
)
if to_reinstall:
# Add a comment for each package in to_reinstall with its
@@ -1852,7 +1863,7 @@ def installed(
else:
pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall)
comment.append(
- "Package '{0}' would be reinstalled because the "
+ "Package '{}' would be reinstalled because the "
"following files have been altered:".format(pkgstr)
)
comment.append(_nested_output(altered_files[reinstall_pkg]))
@@ -1896,7 +1907,7 @@ def installed(
ret["changes"] = {}
ret["comment"] = (
"An error was encountered while installing "
- "package(s): {0}".format(exc)
+ "package(s): {}".format(exc)
)
if warnings:
ret.setdefault("warnings", []).extend(warnings)
@@ -1907,7 +1918,7 @@ def installed(
if isinstance(pkg_ret, dict):
changes["installed"].update(pkg_ret)
- elif isinstance(pkg_ret, six.string_types):
+ elif isinstance(pkg_ret, str):
comment.append(pkg_ret)
# Code below will be looking for a dictionary. If this is a string
# it means that there was an exception raised and that no packages
@@ -1921,7 +1932,7 @@ def installed(
action = "pkg.hold" if kwargs["hold"] else "pkg.unhold"
hold_ret = __salt__[action](name=name, pkgs=desired)
except (CommandExecutionError, SaltInvocationError) as exc:
- comment.append(six.text_type(exc))
+ comment.append(str(exc))
ret = {
"name": name,
"changes": changes,
@@ -1938,7 +1949,7 @@ def installed(
"changes": {},
"result": False,
"comment": "An error was encountered while "
- "holding/unholding package(s): {0}".format(hold_ret["comment"]),
+ "holding/unholding package(s): {}".format(hold_ret["comment"]),
}
if warnings:
ret.setdefault("warnings", []).extend(warnings)
@@ -1996,11 +2007,11 @@ def installed(
summary = ", ".join([_get_desired_pkg(x, desired) for x in modified])
if len(summary) < 20:
comment.append(
- "The following packages were installed/updated: " "{0}".format(summary)
+ "The following packages were installed/updated: " "{}".format(summary)
)
else:
comment.append(
- "{0} targeted package{1} {2} installed/updated.".format(
+ "{} targeted package{} {} installed/updated.".format(
len(modified),
"s" if len(modified) > 1 else "",
"were" if len(modified) > 1 else "was",
@@ -2014,14 +2025,14 @@ def installed(
comment.append(i["comment"])
if len(changes[change_name]["new"]) > 0:
changes[change_name]["new"] += "\n"
- changes[change_name]["new"] += "{0}".format(i["changes"]["new"])
+ changes[change_name]["new"] += "{}".format(i["changes"]["new"])
if len(changes[change_name]["old"]) > 0:
changes[change_name]["old"] += "\n"
- changes[change_name]["old"] += "{0}".format(i["changes"]["old"])
+ changes[change_name]["old"] += "{}".format(i["changes"]["old"])
else:
comment.append(i["comment"])
changes[change_name] = {}
- changes[change_name]["new"] = "{0}".format(i["changes"]["new"])
+ changes[change_name]["new"] = "{}".format(i["changes"]["new"])
# Any requested packages that were not targeted for install or reinstall
if not_modified:
@@ -2031,11 +2042,11 @@ def installed(
summary = ", ".join([_get_desired_pkg(x, desired) for x in not_modified])
if len(not_modified) <= 20:
comment.append(
- "The following packages were already installed: " "{0}".format(summary)
+ "The following packages were already installed: " "{}".format(summary)
)
else:
comment.append(
- "{0} targeted package{1} {2} already installed".format(
+ "{} targeted package{} {} already installed".format(
len(not_modified),
"s" if len(not_modified) > 1 else "",
"were" if len(not_modified) > 1 else "was",
@@ -2054,7 +2065,7 @@ def installed(
else:
summary = ", ".join([_get_desired_pkg(x, desired) for x in failed])
comment.insert(
- 0, "The following packages failed to " "install/update: {0}".format(summary)
+ 0, "The following packages failed to " "install/update: {}".format(summary)
)
result = False
@@ -2118,7 +2129,7 @@ def installed(
pkgstr = modified_pkg
else:
pkgstr = _get_desired_pkg(modified_pkg, desired)
- msg = "Package {0} was reinstalled.".format(pkgstr)
+ msg = "Package {} was reinstalled.".format(pkgstr)
if modified_pkg in altered_files:
msg += " The following files were remediated:"
comment.append(msg)
@@ -2133,7 +2144,7 @@ def installed(
pkgstr = failed_pkg
else:
pkgstr = _get_desired_pkg(failed_pkg, desired)
- msg = "Reinstall was not successful for package {0}.".format(pkgstr)
+ msg = "Reinstall was not successful for package {}.".format(pkgstr)
if failed_pkg in altered_files:
msg += " The following files could not be remediated:"
comment.append(msg)
@@ -2274,12 +2285,12 @@ def downloaded(
ret["result"] = False
ret[
"comment"
- ] = "An error was encountered while checking targets: " "{0}".format(targets)
+ ] = "An error was encountered while checking targets: " "{}".format(targets)
return ret
if __opts__["test"]:
summary = ", ".join(targets)
- ret["comment"] = "The following packages would be " "downloaded: {0}".format(
+ ret["comment"] = "The following packages would be " "downloaded: {}".format(
summary
)
return ret
@@ -2306,7 +2317,7 @@ def downloaded(
ret["changes"] = {}
ret["comment"] = (
"An error was encountered while downloading "
- "package(s): {0}".format(exc)
+ "package(s): {}".format(exc)
)
return ret
@@ -2316,13 +2327,13 @@ def downloaded(
if failed:
summary = ", ".join([_get_desired_pkg(x, targets) for x in failed])
ret["result"] = False
- ret["comment"] = "The following packages failed to " "download: {0}".format(
+ ret["comment"] = "The following packages failed to " "download: {}".format(
summary
)
if not ret["changes"] and not ret["comment"]:
ret["result"] = True
- ret["comment"] = "Packages downloaded: " "{0}".format(", ".join(targets))
+ ret["comment"] = "Packages downloaded: " "{}".format(", ".join(targets))
return ret
@@ -2382,14 +2393,14 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
ret["result"] = False
ret[
"comment"
- ] = "An error was encountered while checking targets: " "{0}".format(targets)
+ ] = "An error was encountered while checking targets: " "{}".format(targets)
return ret
if __opts__["test"]:
summary = ", ".join(targets)
ret[
"comment"
- ] = "The following advisory patches would be " "downloaded: {0}".format(summary)
+ ] = "The following advisory patches would be " "downloaded: {}".format(summary)
return ret
try:
@@ -2408,7 +2419,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
ret["changes"] = {}
ret["comment"] = (
"An error was encountered while downloading "
- "package(s): {0}".format(exc)
+ "package(s): {}".format(exc)
)
return ret
@@ -2417,7 +2428,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
ret["result"] = True
ret["comment"] = (
"Advisory patch is not needed or related packages "
- "are already {0}".format(status)
+ "are already {}".format(status)
)
return ret
@@ -2674,7 +2685,7 @@ def latest(
"changes": {},
"result": False,
"comment": "An error was encountered while checking the "
- "newest available version of package(s): {0}".format(exc),
+ "newest available version of package(s): {}".format(exc),
}
try:
@@ -2683,9 +2694,9 @@ def latest(
return {"name": name, "changes": {}, "result": False, "comment": exc.strerror}
# Repack the cur/avail data if only a single package is being checked
- if isinstance(cur, six.string_types):
+ if isinstance(cur, str):
cur = {desired_pkgs[0]: cur}
- if isinstance(avail, six.string_types):
+ if isinstance(avail, str):
avail = {desired_pkgs[0]: avail}
targets = {}
@@ -2695,7 +2706,7 @@ def latest(
# Package either a) is up-to-date, or b) does not exist
if not cur.get(pkg):
# Package does not exist
- msg = "No information found for '{0}'.".format(pkg)
+ msg = "No information found for '{}'.".format(pkg)
log.error(msg)
problems.append(msg)
elif (
@@ -2741,12 +2752,12 @@ def latest(
comments.append(
"The following packages are already up-to-date: "
+ ", ".join(
- ["{0} ({1})".format(x, cur[x]) for x in sorted(up_to_date)]
+ ["{} ({})".format(x, cur[x]) for x in sorted(up_to_date)]
)
)
else:
comments.append(
- "{0} packages are already up-to-date".format(up_to_date_count)
+ "{} packages are already up-to-date".format(up_to_date_count)
)
return {
@@ -2784,7 +2795,7 @@ def latest(
"changes": {},
"result": False,
"comment": "An error was encountered while installing "
- "package(s): {0}".format(exc),
+ "package(s): {}".format(exc),
}
if changes:
@@ -2800,7 +2811,7 @@ def latest(
comments = []
if failed:
- msg = "The following packages failed to update: " "{0}".format(
+ msg = "The following packages failed to update: " "{}".format(
", ".join(sorted(failed))
)
comments.append(msg)
@@ -2808,19 +2819,17 @@ def latest(
msg = (
"The following packages were successfully "
"installed/upgraded: "
- "{0}".format(", ".join(sorted(successful)))
+ "{}".format(", ".join(sorted(successful)))
)
comments.append(msg)
if up_to_date:
if len(up_to_date) <= 10:
msg = (
"The following packages were already up-to-date: "
- "{0}".format(", ".join(sorted(up_to_date)))
+ "{}".format(", ".join(sorted(up_to_date)))
)
else:
- msg = "{0} packages were already up-to-date ".format(
- len(up_to_date)
- )
+ msg = "{} packages were already up-to-date ".format(len(up_to_date))
comments.append(msg)
return {
@@ -2832,18 +2841,18 @@ def latest(
else:
if len(targets) > 10:
comment = (
- "{0} targeted packages failed to update. "
+ "{} targeted packages failed to update. "
"See debug log for details.".format(len(targets))
)
elif len(targets) > 1:
comment = (
"The following targeted packages failed to update. "
- "See debug log for details: ({0}).".format(
+ "See debug log for details: ({}).".format(
", ".join(sorted(targets))
)
)
else:
- comment = "Package {0} failed to " "update.".format(
+ comment = "Package {} failed to " "update.".format(
next(iter(list(targets.keys())))
)
if up_to_date:
@@ -2851,10 +2860,10 @@ def latest(
comment += (
" The following packages were already "
"up-to-date: "
- "{0}".format(", ".join(sorted(up_to_date)))
+ "{}".format(", ".join(sorted(up_to_date)))
)
else:
- comment += "{0} packages were already " "up-to-date".format(
+ comment += "{} packages were already " "up-to-date".format(
len(up_to_date)
)
@@ -2866,13 +2875,13 @@ def latest(
}
else:
if len(desired_pkgs) > 10:
- comment = "All {0} packages are up-to-date.".format(len(desired_pkgs))
+ comment = "All {} packages are up-to-date.".format(len(desired_pkgs))
elif len(desired_pkgs) > 1:
- comment = "All packages are up-to-date " "({0}).".format(
+ comment = "All packages are up-to-date " "({}).".format(
", ".join(sorted(desired_pkgs))
)
else:
- comment = "Package {0} is already " "up-to-date".format(desired_pkgs[0])
+ comment = "Package {} is already " "up-to-date".format(desired_pkgs[0])
return {"name": name, "changes": {}, "result": True, "comment": comment}
@@ -2894,8 +2903,7 @@ def _uninstall(
"name": name,
"changes": {},
"result": False,
- "comment": "Invalid action '{0}'. "
- "This is probably a bug.".format(action),
+ "comment": "Invalid action '{}'. " "This is probably a bug.".format(action),
}
try:
@@ -2908,7 +2916,7 @@ def _uninstall(
"changes": {},
"result": False,
"comment": "An error was encountered while parsing targets: "
- "{0}".format(exc),
+ "{}".format(exc),
}
targets = _find_remove_targets(
name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs
@@ -2921,7 +2929,7 @@ def _uninstall(
"changes": {},
"result": False,
"comment": "An error was encountered while checking targets: "
- "{0}".format(targets),
+ "{}".format(targets),
}
if action == "purge":
old_removed = __salt__["pkg.list_pkgs"](
@@ -2936,7 +2944,7 @@ def _uninstall(
"changes": {},
"result": True,
"comment": "None of the targeted packages are installed"
- "{0}".format(" or partially installed" if action == "purge" else ""),
+ "{}".format(" or partially installed" if action == "purge" else ""),
}
if __opts__["test"]:
@@ -2944,11 +2952,11 @@ def _uninstall(
"name": name,
"changes": {},
"result": None,
- "comment": "The following packages will be {0}d: "
- "{1}.".format(action, ", ".join(targets)),
+ "comment": "The following packages will be {}d: "
+ "{}.".format(action, ", ".join(targets)),
}
- changes = __salt__["pkg.{0}".format(action)](
+ changes = __salt__["pkg.{}".format(action)](
name, pkgs=pkgs, version=version, **kwargs
)
new = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
@@ -2975,8 +2983,8 @@ def _uninstall(
"name": name,
"changes": changes,
"result": False,
- "comment": "The following packages failed to {0}: "
- "{1}.".format(action, ", ".join(failed)),
+ "comment": "The following packages failed to {}: "
+ "{}.".format(action, ", ".join(failed)),
}
comments = []
@@ -2984,14 +2992,13 @@ def _uninstall(
if not_installed:
comments.append(
"The following packages were not installed: "
- "{0}".format(", ".join(not_installed))
+ "{}".format(", ".join(not_installed))
)
comments.append(
- "The following packages were {0}d: "
- "{1}.".format(action, ", ".join(targets))
+ "The following packages were {}d: " "{}.".format(action, ", ".join(targets))
)
else:
- comments.append("All targeted packages were {0}d.".format(action))
+ comments.append("All targeted packages were {}d.".format(action))
return {
"name": name,
@@ -3089,7 +3096,7 @@ def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **
ret["changes"] = {}
ret[
"comment"
- ] = "An error was encountered while removing " "package(s): {0}".format(exc)
+ ] = "An error was encountered while removing " "package(s): {}".format(exc)
return ret
@@ -3181,7 +3188,7 @@ def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **k
ret["changes"] = {}
ret[
"comment"
- ] = "An error was encountered while purging " "package(s): {0}".format(exc)
+ ] = "An error was encountered while purging " "package(s): {}".format(exc)
return ret
@@ -3247,17 +3254,17 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs):
"new": pkgver,
"old": __salt__["pkg.version"](pkgname, **kwargs),
}
- for pkgname, pkgver in six.iteritems(packages)
+ for pkgname, pkgver in packages.items()
}
if isinstance(pkgs, list):
packages = [pkg for pkg in packages if pkg in pkgs]
expected = {
pkgname: pkgver
- for pkgname, pkgver in six.iteritems(expected)
+ for pkgname, pkgver in expected.items()
if pkgname in pkgs
}
except Exception as exc: # pylint: disable=broad-except
- ret["comment"] = six.text_type(exc)
+ ret["comment"] = str(exc)
return ret
else:
ret["comment"] = "refresh must be either True or False"
@@ -3284,16 +3291,16 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs):
ret["changes"] = {}
ret[
"comment"
- ] = "An error was encountered while updating " "packages: {0}".format(exc)
+ ] = "An error was encountered while updating " "packages: {}".format(exc)
return ret
# If a package list was provided, ensure those packages were updated
missing = []
if isinstance(pkgs, list):
- missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret["changes"]]
+ missing = [pkg for pkg in expected.keys() if pkg not in ret["changes"]]
if missing:
- ret["comment"] = "The following package(s) failed to update: {0}".format(
+ ret["comment"] = "The following package(s) failed to update: {}".format(
", ".join(missing)
)
ret["result"] = False
@@ -3362,8 +3369,8 @@ def group_installed(name, skip=None, include=None, **kwargs):
ret["comment"] = "skip must be formatted as a list"
return ret
for idx, item in enumerate(skip):
- if not isinstance(item, six.string_types):
- skip[idx] = six.text_type(item)
+ if not isinstance(item, str):
+ skip[idx] = str(item)
if include is None:
include = []
@@ -3372,15 +3379,15 @@ def group_installed(name, skip=None, include=None, **kwargs):
ret["comment"] = "include must be formatted as a list"
return ret
for idx, item in enumerate(include):
- if not isinstance(item, six.string_types):
- include[idx] = six.text_type(item)
+ if not isinstance(item, str):
+ include[idx] = str(item)
try:
diff = __salt__["pkg.group_diff"](name)
except CommandExecutionError as err:
ret["comment"] = (
"An error was encountered while installing/updating "
- "group '{0}': {1}.".format(name, err)
+ "group '{}': {}.".format(name, err)
)
return ret
@@ -3390,7 +3397,7 @@ def group_installed(name, skip=None, include=None, **kwargs):
if invalid_skip:
ret[
"comment"
- ] = "The following mandatory packages cannot be skipped: {0}".format(
+ ] = "The following mandatory packages cannot be skipped: {}".format(
", ".join(invalid_skip)
)
return ret
@@ -3401,7 +3408,7 @@ def group_installed(name, skip=None, include=None, **kwargs):
if not targets:
ret["result"] = True
- ret["comment"] = "Group '{0}' is already installed".format(name)
+ ret["comment"] = "Group '{}' is already installed".format(name)
return ret
partially_installed = (
@@ -3415,9 +3422,9 @@ def group_installed(name, skip=None, include=None, **kwargs):
if partially_installed:
ret[
"comment"
- ] = "Group '{0}' is partially installed and will be updated".format(name)
+ ] = "Group '{}' is partially installed and will be updated".format(name)
else:
- ret["comment"] = "Group '{0}' will be installed".format(name)
+ ret["comment"] = "Group '{}' will be installed".format(name)
return ret
try:
@@ -3432,19 +3439,19 @@ def group_installed(name, skip=None, include=None, **kwargs):
ret["changes"] = {}
ret["comment"] = (
"An error was encountered while "
- "installing/updating group '{0}': {1}".format(name, exc)
+ "installing/updating group '{}': {}".format(name, exc)
)
return ret
failed = [x for x in targets if x not in __salt__["pkg.list_pkgs"](**kwargs)]
if failed:
- ret["comment"] = "Failed to install the following packages: {0}".format(
+ ret["comment"] = "Failed to install the following packages: {}".format(
", ".join(failed)
)
return ret
ret["result"] = True
- ret["comment"] = "Group '{0}' was {1}".format(
+ ret["comment"] = "Group '{}' was {}".format(
name, "updated" if partially_installed else "installed"
)
return ret
@@ -3561,6 +3568,6 @@ def mod_watch(name, **kwargs):
return {
"name": name,
"changes": {},
- "comment": "pkg.{0} does not work with the watch requisite".format(sfun),
+ "comment": "pkg.{} does not work with the watch requisite".format(sfun),
"result": False,
}
--
2.29.2

View File

@ -1,131 +0,0 @@
From beec6f3945bda722bfe9c0aa606065f04c89bc62 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Julio=20Gonz=C3=A1lez=20Gil?=
<juliogonzalez@users.noreply.github.com>
Date: Wed, 24 Mar 2021 14:12:34 +0100
Subject: [PATCH] Add AlmaLinux and Alibaba Cloud Linux to the OS
Family list (#341)
* Add AlmaLinux and Alibaba Cloud Linux to the OS Family list
* Fix some grains tests
---
salt/grains/core.py | 4 +++
tests/unit/grains/test_core.py | 51 +++++++++++++++++++++++++++++++++-
2 files changed, 54 insertions(+), 1 deletion(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 5634327623..09f9d29788 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1532,6 +1532,7 @@ _OS_NAME_MAP = {
"oracleserv": "OEL",
"cloudserve": "CloudLinux",
"cloudlinux": "CloudLinux",
+ "almalinux": "AlmaLinux",
"pidora": "Fedora",
"scientific": "ScientificLinux",
"synology": "Synology",
@@ -1546,6 +1547,7 @@ _OS_NAME_MAP = {
"slesexpand": "RES",
"linuxmint": "Mint",
"neon": "KDE neon",
+ "alibaba": "Alibaba Cloud (Aliyun)",
}
# Map the 'os' grain to the 'os_family' grain
@@ -1563,6 +1565,7 @@ _OS_FAMILY_MAP = {
"Scientific": "RedHat",
"Amazon": "RedHat",
"CloudLinux": "RedHat",
+ "AlmaLinux": "RedHat",
"OVS": "RedHat",
"OEL": "RedHat",
"XCP": "RedHat",
@@ -1619,6 +1622,7 @@ _OS_FAMILY_MAP = {
"AIX": "AIX",
"TurnKey": "Debian",
"AstraLinuxCE": "Debian",
+ "Alibaba Cloud (Aliyun)": "RedHat",
}
# Matches any possible format:
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 15de4e363e..6aa05abe40 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -678,6 +678,35 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
}
self._run_os_grains_tests(None, _os_release_map, expectation)
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ def test_almalinux_8_os_grains(self):
+ """
+ Test if OS grains are parsed correctly in AlmaLinux 8
+ """
+ _os_release_map = {
+ "os_release_file": {
+ "NAME": "AlmaLinux",
+ "VERSION_ID": "8.3",
+ "PRETTY_NAME": "AlmaLinux 8",
+ "ID": "almalinux",
+ "ANSI_COLOR": "0;31",
+ "CPE_NAME": "cpe:/o:almalinux:almalinux:8.3",
+ },
+ "_linux_distribution": ("almaLinux", "8.3", ""),
+ }
+
+ expectation = {
+ "os": "AlmaLinux",
+ "os_family": "RedHat",
+ "oscodename": "AlmaLinux 8",
+ "osfullname": "AlmaLinux",
+ "osrelease": "8.3",
+ "osrelease_info": (8, 3,),
+ "osmajorrelease": 8,
+ "osfinger": "AlmaLinux-8",
+ }
+ self._run_os_grains_tests(None, _os_release_map, expectation)
+
def test_unicode_error(self):
raise_unicode_mock = MagicMock(
name="raise_unicode_error", side_effect=UnicodeError
@@ -733,7 +762,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
Test if OS grains are parsed correctly in Astra Linux CE 2.12.22 "orel"
"""
_os_release_map = {
- "linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"),
+ "_linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"),
}
expectation = {
"os": "AstraLinuxCE",
@@ -747,6 +776,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
}
self._run_os_grains_tests("astralinuxce-2.12.22", _os_release_map, expectation)
+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
+ def test_aliyunlinux2_os_grains(self):
+ '''
+ Test if OS grains are parsed correctly in Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS
+ '''
+ _os_release_map = {
+ '_linux_distribution': ('Alibaba Cloud Linux (Aliyun Linux)', '2.1903', 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)'),
+ }
+ expectation = {
+ 'os': 'Alibaba Cloud (Aliyun)',
+ 'os_family': 'RedHat',
+ 'oscodename': 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)',
+ 'osfullname': 'Alibaba Cloud Linux (Aliyun Linux)',
+ 'osrelease': '2.1903',
+ 'osrelease_info': (2, 1903),
+ 'osmajorrelease': 2,
+ 'osfinger': 'Alibaba Cloud Linux (Aliyun Linux)-2',
+ }
+ self._run_os_grains_tests(None, _os_release_map, expectation)
+
@skipIf(not salt.utils.platform.is_windows(), "System is not Windows")
def test_windows_platform_data(self):
"""
--
2.30.2

View File

@ -1,4 +1,4 @@
From d5569023c64a3fcec57a7aa6823ee94e8be91b3d Mon Sep 17 00:00:00 2001
From 30366101c20eefd2411482138edfa0ca0c8a3b06 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Julio=20Gonz=C3=A1lez=20Gil?=
<juliogonzalez@users.noreply.github.com>
Date: Wed, 12 Feb 2020 10:05:45 +0100
@ -11,24 +11,24 @@ Subject: [PATCH] Add Astra Linux Common Edition to the OS Family list
2 files changed, 21 insertions(+)
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 5dff6ecfd4..5634327623 100644
index e007f40c92..19937f008e 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1618,6 +1618,7 @@ _OS_FAMILY_MAP = {
"Funtoo": "Gentoo",
@@ -1636,6 +1636,7 @@ _OS_FAMILY_MAP = {
"AIX": "AIX",
"TurnKey": "Debian",
"Pop": "Debian",
+ "AstraLinuxCE": "Debian",
}
# Matches any possible format:
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 85d434dd9d..196dbcf83d 100644
index 7173f04979..e8845e2dfa 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -728,6 +728,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
@@ -826,6 +826,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
}
self._run_os_grains_tests("ubuntu-17.10", _os_release_map, expectation)
self._run_os_grains_tests("pop-20.10", _os_release_map, expectation)
+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
+ def test_astralinuxce_2_os_grains(self):
@ -36,7 +36,7 @@ index 85d434dd9d..196dbcf83d 100644
+ Test if OS grains are parsed correctly in Astra Linux CE 2.12.22 "orel"
+ """
+ _os_release_map = {
+ "linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"),
+ "_linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"),
+ }
+ expectation = {
+ "os": "AstraLinuxCE",
@ -54,6 +54,6 @@ index 85d434dd9d..196dbcf83d 100644
def test_windows_platform_data(self):
"""
--
2.29.2
2.33.0

View File

@ -1,55 +0,0 @@
From 66f6c2540a151487b26c89a2bb66199d6c65c18d Mon Sep 17 00:00:00 2001
From: Marcelo Chiaradia <mchiaradia@suse.com>
Date: Thu, 4 Apr 2019 13:57:38 +0200
Subject: [PATCH] Add 'batch_presence_ping_timeout' and
'batch_presence_ping_gather_job_timeout' parameters for synchronous batching
---
salt/cli/batch.py | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/salt/cli/batch.py b/salt/cli/batch.py
index 527cffdeb7..2bc5444aef 100644
--- a/salt/cli/batch.py
+++ b/salt/cli/batch.py
@@ -77,6 +77,13 @@ def batch_get_opts(
if key not in opts:
opts[key] = val
+ opts["batch_presence_ping_timeout"] = kwargs.get(
+ "batch_presence_ping_timeout", opts["timeout"]
+ )
+ opts["batch_presence_ping_gather_job_timeout"] = kwargs.get(
+ "batch_presence_ping_gather_job_timeout", opts["gather_job_timeout"]
+ )
+
return opts
@@ -115,7 +122,7 @@ class Batch:
self.opts["tgt"],
"test.ping",
[],
- self.opts["timeout"],
+ self.opts.get("batch_presence_ping_timeout", self.opts["timeout"]),
]
selected_target_option = self.opts.get("selected_target_option", None)
@@ -126,7 +133,12 @@ class Batch:
self.pub_kwargs["yield_pub_data"] = True
ping_gen = self.local.cmd_iter(
- *args, gather_job_timeout=self.opts["gather_job_timeout"], **self.pub_kwargs
+ *args,
+ gather_job_timeout=self.opts.get(
+ "batch_presence_ping_gather_job_timeout",
+ self.opts["gather_job_timeout"],
+ ),
+ **self.pub_kwargs
)
# Broadcast to targets
--
2.29.2

View File

@ -1,73 +0,0 @@
From c845d56fdf1762586b1f210b1eb49193893d4312 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Tue, 9 Oct 2018 14:08:50 +0200
Subject: [PATCH] Add CPE_NAME for osversion* grain parsing (U#49946)
Remove unnecessary linebreak
Override VERSION_ID from os-release, if CPE_NAME is given
Add unit test for WFN format of CPE_NAME
Add unit test for v2.3 of CPE format
Add unit test for broken CPE_NAME
Prevent possible crash if CPE_NAME is wrongly written in the distro
Add part parsing
Keep CPE_NAME only for opensuse series
Remove linebreak
Expand unit test to verify part name
Fix proper part name in the string-bound CPE
---
salt/grains/core.py | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 5535584d1b..bc3cf129cd 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1732,6 +1732,34 @@ def _parse_cpe_name(cpe):
return ret
+def _parse_cpe_name(cpe):
+ '''
+ Parse CPE_NAME data from the os-release
+
+ Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
+
+ :param cpe:
+ :return:
+ '''
+ part = {
+ 'o': 'operating system',
+ 'h': 'hardware',
+ 'a': 'application',
+ }
+ ret = {}
+ cpe = (cpe or '').split(':')
+ if len(cpe) > 4 and cpe[0] == 'cpe':
+ if cpe[1].startswith('/'): # WFN to URI
+ ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
+ ret['phase'] = cpe[5] if len(cpe) > 5 else None
+ ret['part'] = part.get(cpe[1][1:])
+ elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string
+ ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
+ ret['part'] = part.get(cpe[2])
+
+ return ret
+
+
def os_data():
"""
Return grains pertaining to the operating system
--
2.29.2

View File

@ -1,18 +1,19 @@
From 713ccfdc5c6733495d3ce7f26a8cfeddb8e9e9c4 Mon Sep 17 00:00:00 2001
From bdb48ed82c755407bc413fa445e057a6da5f1e87 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Thu, 21 Jun 2018 11:57:57 +0100
Subject: [PATCH] Add custom SUSE capabilities as Grains
Add new custom SUSE capability for saltutil state module
---
salt/grains/extra.py | 7 +++++++
1 file changed, 7 insertions(+)
salt/grains/extra.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
index 2fdbe6526a..ddc22293ea 100644
index 2fdbe6526a..0eec27e628 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
@@ -66,3 +66,10 @@ def config():
@@ -66,3 +66,11 @@ def config():
log.warning("Bad syntax in grains file! Skipping.")
return {}
return {}
@ -21,9 +22,10 @@ index 2fdbe6526a..ddc22293ea 100644
+def suse_backported_capabilities():
+ return {
+ '__suse_reserved_pkg_all_versions_support': True,
+ '__suse_reserved_pkg_patches_support': True
+ '__suse_reserved_pkg_patches_support': True,
+ '__suse_reserved_saltutil_states_support': True
+ }
--
2.29.2
2.33.0

View File

@ -1,179 +0,0 @@
From 355e1e29e8f3286eeb13bc2d05089c096c9e01e3 Mon Sep 17 00:00:00 2001
From: Alexander Graul <agraul@suse.com>
Date: Mon, 18 May 2020 16:39:27 +0200
Subject: [PATCH] Add docker logout (#237)
Docker logout works analog to login. It takes none, one or more registries as
arguments. If there are no arguments, all known (specified in pillar)
docker registries are logged out of. If arguments are present, they are
interpreted as a list of docker registries to log out of.
---
salt/modules/dockermod.py | 80 ++++++++++++++++++++++++++++
tests/unit/modules/test_dockermod.py | 59 ++++++++++++++++++++
2 files changed, 139 insertions(+)
diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
index 934038c927..176b4db926 100644
--- a/salt/modules/dockermod.py
+++ b/salt/modules/dockermod.py
@@ -1586,6 +1586,86 @@ def logout(*registries):
return ret
+def logout(*registries):
+ """
+ .. versionadded:: 3001
+
+ Performs a ``docker logout`` to remove the saved authentication details for
+ one or more configured repositories.
+
+ Multiple registry URLs (matching those configured in Pillar) can be passed,
+ and Salt will attempt to logout of *just* those registries. If no registry
+ URLs are provided, Salt will attempt to logout of *all* configured
+ registries.
+
+ **RETURN DATA**
+
+ A dictionary containing the following keys:
+
+ - ``Results`` - A dictionary mapping registry URLs to the authentication
+ result. ``True`` means a successful logout, ``False`` means a failed
+ logout.
+ - ``Errors`` - A list of errors encountered during the course of this
+ function.
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt myminion docker.logout
+ salt myminion docker.logout hub
+ salt myminion docker.logout hub https://mydomain.tld/registry/
+ """
+ # NOTE: This function uses the "docker logout" CLI command to remove
+ # authentication information from config.json. docker-py does not support
+ # this usecase (see https://github.com/docker/docker-py/issues/1091)
+
+ # To logout of all known (to Salt) docker registries, they have to be collected first
+ registry_auth = __salt__["config.get"]("docker-registries", {})
+ ret = {"retcode": 0}
+ errors = ret.setdefault("Errors", [])
+ if not isinstance(registry_auth, dict):
+ errors.append("'docker-registries' Pillar value must be a dictionary")
+ registry_auth = {}
+ for reg_name, reg_conf in six.iteritems(
+ __salt__["config.option"]("*-docker-registries", wildcard=True)
+ ):
+ try:
+ registry_auth.update(reg_conf)
+ except TypeError:
+ errors.append(
+ "Docker registry '{0}' was not specified as a "
+ "dictionary".format(reg_name)
+ )
+
+ # If no registries passed, we will logout of all known registries
+ if not registries:
+ registries = list(registry_auth)
+
+ results = ret.setdefault("Results", {})
+ for registry in registries:
+ if registry not in registry_auth:
+ errors.append("No match found for registry '{0}'".format(registry))
+ continue
+ else:
+ cmd = ["docker", "logout"]
+ if registry.lower() != "hub":
+ cmd.append(registry)
+ log.debug("Attempting to logout of docker registry '%s'", registry)
+ logout_cmd = __salt__["cmd.run_all"](
+ cmd, python_shell=False, output_loglevel="quiet",
+ )
+ results[registry] = logout_cmd["retcode"] == 0
+ if not results[registry]:
+ if logout_cmd["stderr"]:
+ errors.append(logout_cmd["stderr"])
+ elif logout_cmd["stdout"]:
+ errors.append(logout_cmd["stdout"])
+ if errors:
+ ret["retcode"] = 1
+ return ret
+
+
# Functions for information gathering
def depends(name):
"""
diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py
index 34e2e9c610..48526acb71 100644
--- a/tests/unit/modules/test_dockermod.py
+++ b/tests/unit/modules/test_dockermod.py
@@ -199,6 +199,65 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
output_loglevel="quiet",
)
+ def test_logout_calls_docker_cli_logout_all(self):
+ client = Mock()
+ get_client_mock = MagicMock(return_value=client)
+ ref_out = {"stdout": "", "stderr": "", "retcode": 0}
+ registry_auth_data = {
+ "portus.example.com:5000": {
+ "username": "admin",
+ "password": "linux12345",
+ "email": "tux@example.com",
+ },
+ "portus2.example.com:5000": {
+ "username": "admin",
+ "password": "linux12345",
+ "email": "tux@example.com",
+ },
+ }
+
+ docker_mock = MagicMock(return_value=ref_out)
+ with patch.object(docker_mod, "_get_client", get_client_mock):
+ dunder_salt = {
+ "config.get": MagicMock(return_value=registry_auth_data),
+ "cmd.run_all": docker_mock,
+ "config.option": MagicMock(return_value={}),
+ }
+ with patch.dict(docker_mod.__salt__, dunder_salt):
+ ret = docker_mod.logout()
+ assert "retcode" in ret
+ assert ret["retcode"] == 0
+ assert docker_mock.call_count == 2
+
+ def test_logout_calls_docker_cli_logout_single(self):
+ client = Mock()
+ get_client_mock = MagicMock(return_value=client)
+ ref_out = {"stdout": "", "stderr": "", "retcode": 0}
+ registry_auth_data = {
+ "portus.example.com:5000": {
+ "username": "admin",
+ "password": "linux12345",
+ "email": "tux@example.com",
+ }
+ }
+ docker_mock = MagicMock(return_value=ref_out)
+ with patch.object(docker_mod, "_get_client", get_client_mock):
+ dunder_salt = {
+ "config.get": MagicMock(return_value=registry_auth_data),
+ "cmd.run_all": docker_mock,
+ "config.option": MagicMock(return_value={}),
+ }
+ with patch.dict(docker_mod.__salt__, dunder_salt):
+ ret = docker_mod.logout("portus.example.com:5000")
+ assert "retcode" in ret
+ assert ret["retcode"] == 0
+ docker_mock.assert_called_with(
+ ["docker", "logout", "portus.example.com:5000"],
+ python_shell=False,
+ output_loglevel="quiet",
+ )
+
+
def test_logout_calls_docker_cli_logout_all(self):
client = Mock()
get_client_mock = MagicMock(return_value=client)
--
2.29.2

View File

@ -1,469 +0,0 @@
From 6176ef8aa39626dcb450a1665231a796e9544342 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Thu, 6 Dec 2018 16:26:23 +0100
Subject: [PATCH] Add hold/unhold functions
Add unhold function
Add warnings
---
salt/modules/zypperpkg.py | 186 +++++++++++++++++++++++++++-----------
1 file changed, 131 insertions(+), 55 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index 44bcbbf2f2..6fa6e3e0a1 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
Package support for openSUSE via the zypper package manager
@@ -12,8 +11,6 @@ Package support for openSUSE via the zypper package manager
"""
-# Import python libs
-from __future__ import absolute_import, print_function, unicode_literals
import datetime
import fnmatch
@@ -24,7 +21,6 @@ import time
from xml.dom import minidom as dom
from xml.parsers.expat import ExpatError
-# Import salt libs
import salt.utils.data
import salt.utils.environment
import salt.utils.event
@@ -35,9 +31,9 @@ import salt.utils.pkg
import salt.utils.pkg.rpm
import salt.utils.stringutils
import salt.utils.systemd
+import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
-# Import 3rd-party libs
# pylint: disable=import-error,redefined-builtin,no-name-in-module
from salt.ext import six
from salt.ext.six.moves import configparser
@@ -51,8 +47,8 @@ log = logging.getLogger(__name__)
HAS_ZYPP = False
ZYPP_HOME = "/etc/zypp"
-LOCKS = "{0}/locks".format(ZYPP_HOME)
-REPOS = "{0}/repos.d".format(ZYPP_HOME)
+LOCKS = "{}/locks".format(ZYPP_HOME)
+REPOS = "{}/repos.d".format(ZYPP_HOME)
DEFAULT_PRIORITY = 99
PKG_ARCH_SEPARATOR = "."
@@ -75,7 +71,7 @@ def __virtual__():
return __virtualname__
-class _Zypper(object):
+class _Zypper:
"""
Zypper parallel caller.
Validates the result and either raises an exception or reports an error.
@@ -339,7 +335,7 @@ class _Zypper(object):
attrs=["pid", "name", "cmdline", "create_time"],
)
data["cmdline"] = " ".join(data["cmdline"])
- data["info"] = "Blocking process created at {0}.".format(
+ data["info"] = "Blocking process created at {}.".format(
datetime.datetime.utcfromtimestamp(
data["create_time"]
).isoformat()
@@ -347,7 +343,7 @@ class _Zypper(object):
data["success"] = True
except Exception as err: # pylint: disable=broad-except
data = {
- "info": "Unable to retrieve information about blocking process: {0}".format(
+ "info": "Unable to retrieve information about blocking process: {}".format(
err.message
),
"success": False,
@@ -382,7 +378,7 @@ class _Zypper(object):
)
if self.error_msg and not self.__no_raise and not self.__ignore_repo_failure:
raise CommandExecutionError(
- "Zypper command failure: {0}".format(self.error_msg)
+ "Zypper command failure: {}".format(self.error_msg)
)
return (
@@ -397,7 +393,7 @@ class _Zypper(object):
__zypper__ = _Zypper()
-class Wildcard(object):
+class Wildcard:
"""
.. versionadded:: 2017.7.0
@@ -439,7 +435,7 @@ class Wildcard(object):
for vrs in self._get_scope_versions(self._get_available_versions())
]
)
- return versions and "{0}{1}".format(self._op or "", versions[-1]) or None
+ return versions and "{}{}".format(self._op or "", versions[-1]) or None
def _get_available_versions(self):
"""
@@ -451,17 +447,15 @@ class Wildcard(object):
).getElementsByTagName("solvable")
if not solvables:
raise CommandExecutionError(
- "No packages found matching '{0}'".format(self.name)
+ "No packages found matching '{}'".format(self.name)
)
return sorted(
- set(
- [
- slv.getAttribute(self._attr_solvable_version)
- for slv in solvables
- if slv.getAttribute(self._attr_solvable_version)
- ]
- )
+ {
+ slv.getAttribute(self._attr_solvable_version)
+ for slv in solvables
+ if slv.getAttribute(self._attr_solvable_version)
+ }
)
def _get_scope_versions(self, pkg_versions):
@@ -489,7 +483,7 @@ class Wildcard(object):
self._op = version.replace(exact_version, "") or None
if self._op and self._op not in self.Z_OP:
raise CommandExecutionError(
- 'Zypper do not supports operator "{0}".'.format(self._op)
+ 'Zypper do not supports operator "{}".'.format(self._op)
)
self.version = exact_version
@@ -539,14 +533,11 @@ def list_upgrades(refresh=True, root=None, **kwargs):
cmd = ["list-updates"]
if "fromrepo" in kwargs:
repos = kwargs["fromrepo"]
- if isinstance(repos, six.string_types):
+ if isinstance(repos, str):
repos = [repos]
for repo in repos:
cmd.extend(
- [
- "--repo",
- repo if isinstance(repo, six.string_types) else six.text_type(repo),
- ]
+ ["--repo", repo if isinstance(repo, str) else str(repo),]
)
log.debug("Targeting repos: %s", repos)
for update_node in (
@@ -610,7 +601,7 @@ def info_installed(*names, **kwargs):
for _nfo in pkg_nfo:
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
- for key, value in six.iteritems(_nfo):
+ for key, value in _nfo.items():
if key == "source_rpm":
t_nfo["source"] = value
else:
@@ -1033,9 +1024,7 @@ def list_repo_pkgs(*args, **kwargs):
fromrepo = kwargs.pop("fromrepo", "") or ""
ret = {}
- targets = [
- arg if isinstance(arg, six.string_types) else six.text_type(arg) for arg in args
- ]
+ targets = [arg if isinstance(arg, str) else str(arg) for arg in args]
def _is_match(pkgname):
"""
@@ -1124,7 +1113,7 @@ def _get_repo_info(alias, repos_cfg=None, root=None):
try:
meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias))
meta["alias"] = alias
- for key, val in six.iteritems(meta):
+ for key, val in meta.items():
if val in ["0", "1"]:
meta[key] = int(meta[key]) == 1
elif val == "NONE":
@@ -1197,7 +1186,7 @@ def del_repo(repo, root=None):
"message": msg[0].childNodes[0].nodeValue,
}
- raise CommandExecutionError("Repository '{0}' not found.".format(repo))
+ raise CommandExecutionError("Repository '{}' not found.".format(repo))
def mod_repo(repo, **kwargs):
@@ -1252,13 +1241,13 @@ def mod_repo(repo, **kwargs):
url = kwargs.get("url", kwargs.get("mirrorlist", kwargs.get("baseurl")))
if not url:
raise CommandExecutionError(
- "Repository '{0}' not found, and neither 'baseurl' nor "
+ "Repository '{}' not found, and neither 'baseurl' nor "
"'mirrorlist' was specified".format(repo)
)
if not _urlparse(url).scheme:
raise CommandExecutionError(
- "Repository '{0}' not found and URL for baseurl/mirrorlist "
+ "Repository '{}' not found and URL for baseurl/mirrorlist "
"is malformed".format(repo)
)
@@ -1281,7 +1270,7 @@ def mod_repo(repo, **kwargs):
if new_url == base_url:
raise CommandExecutionError(
- "Repository '{0}' already exists as '{1}'.".format(repo, alias)
+ "Repository '{}' already exists as '{}'.".format(repo, alias)
)
# Add new repo
@@ -1291,7 +1280,7 @@ def mod_repo(repo, **kwargs):
repos_cfg = _get_configured_repos(root=root)
if repo not in repos_cfg.sections():
raise CommandExecutionError(
- "Failed add new repository '{0}' for unspecified reason. "
+ "Failed add new repository '{}' for unspecified reason. "
"Please check zypper logs.".format(repo)
)
added = True
@@ -1327,12 +1316,10 @@ def mod_repo(repo, **kwargs):
cmd_opt.append(kwargs["gpgcheck"] and "--gpgcheck" or "--no-gpgcheck")
if "priority" in kwargs:
- cmd_opt.append(
- "--priority={0}".format(kwargs.get("priority", DEFAULT_PRIORITY))
- )
+ cmd_opt.append("--priority={}".format(kwargs.get("priority", DEFAULT_PRIORITY)))
if "humanname" in kwargs:
- cmd_opt.append("--name='{0}'".format(kwargs.get("humanname")))
+ cmd_opt.append("--name='{}'".format(kwargs.get("humanname")))
if kwargs.get("gpgautoimport") is True:
global_cmd_opt.append("--gpg-auto-import-keys")
@@ -1589,7 +1576,7 @@ def install(
if pkg_type == "repository":
targets = []
- for param, version_num in six.iteritems(pkg_params):
+ for param, version_num in pkg_params.items():
if version_num is None:
log.debug("targeting package: %s", param)
targets.append(param)
@@ -1597,7 +1584,7 @@ def install(
prefix, verstr = salt.utils.pkg.split_comparison(version_num)
if not prefix:
prefix = "="
- target = "{0}{1}{2}".format(param, prefix, verstr)
+ target = "{}{}{}".format(param, prefix, verstr)
log.debug("targeting package: %s", target)
targets.append(target)
elif pkg_type == "advisory":
@@ -1606,7 +1593,7 @@ def install(
for advisory_id in pkg_params:
if advisory_id not in cur_patches:
raise CommandExecutionError(
- 'Advisory id "{0}" not found'.format(advisory_id)
+ 'Advisory id "{}" not found'.format(advisory_id)
)
else:
# If we add here the `patch:` prefix, the
@@ -1703,7 +1690,7 @@ def install(
if errors:
raise CommandExecutionError(
- "Problem encountered {0} package(s)".format(
+ "Problem encountered {} package(s)".format(
"downloading" if downloadonly else "installing"
),
info={"errors": errors, "changes": ret},
@@ -1797,7 +1784,7 @@ def upgrade(
cmd_update.append("--dry-run")
if fromrepo:
- if isinstance(fromrepo, six.string_types):
+ if isinstance(fromrepo, str):
fromrepo = [fromrepo]
for repo in fromrepo:
cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
@@ -2052,7 +2039,7 @@ def list_locks(root=None):
)
if lock.get("solvable_name"):
locks[lock.pop("solvable_name")] = lock
- except IOError:
+ except OSError:
pass
except Exception: # pylint: disable=broad-except
log.warning("Detected a problem when accessing {}".format(_locks))
@@ -2089,7 +2076,7 @@ def clean_locks(root=None):
return out
-def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
+def unhold(name=None, pkgs=None, **kwargs):
"""
Remove specified package lock.
@@ -2104,8 +2091,50 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume
salt '*' pkg.remove_lock <package1>,<package2>,<package3>
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
"""
+ ret = {}
+ if (not name and not pkgs) or (name and pkgs):
+ raise CommandExecutionError("Name or packages must be specified.")
+ elif name:
+ pkgs = [name]
+
+ locks = list_locks()
+ try:
+ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys())
+ except MinionError as exc:
+ raise CommandExecutionError(exc)
+
+ removed = []
+ missing = []
+ for pkg in pkgs:
+ if locks.get(pkg):
+ removed.append(pkg)
+ ret[pkg]["comment"] = "Package {} is no longer held.".format(pkg)
+ else:
+ missing.append(pkg)
+ ret[pkg]["comment"] = "Package {} unable to be unheld.".format(pkg)
+
+ if removed:
+ __zypper__.call("rl", *removed)
+
+ return ret
+
+
+def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
+ """
+ Remove specified package lock.
+
+ CLI Example:
+
+ .. code-block:: bash
- locks = list_locks(root)
+ salt '*' pkg.remove_lock <package name>
+ salt '*' pkg.remove_lock <package1>,<package2>,<package3>
+ salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
+ """
+ salt.utils.versions.warn_until(
+ "Sodium", "This function is deprecated. Please use unhold() instead."
+ )
+ locks = list_locks()
try:
packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys())
except MinionError as exc:
@@ -2125,7 +2154,51 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume
return {"removed": len(removed), "not_found": missing}
-def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
+def hold(name=None, pkgs=None, **kwargs):
+ """
+ Add a package lock. Specify packages to lock by exact name.
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt '*' pkg.add_lock <package name>
+ salt '*' pkg.add_lock <package1>,<package2>,<package3>
+ salt '*' pkg.add_lock pkgs='["foo", "bar"]'
+
+ :param name:
+ :param pkgs:
+ :param kwargs:
+ :return:
+ """
+ ret = {}
+ if (not name and not pkgs) or (name and pkgs):
+ raise CommandExecutionError("Name or packages must be specified.")
+ elif name:
+ pkgs = [name]
+
+ locks = list_locks()
+ added = []
+ try:
+ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys())
+ except MinionError as exc:
+ raise CommandExecutionError(exc)
+
+ for pkg in pkgs:
+ ret[pkg] = {"name": pkg, "changes": {}, "result": False, "comment": ""}
+ if not locks.get(pkg):
+ added.append(pkg)
+ ret[pkg]["comment"] = "Package {} is now being held.".format(pkg)
+ else:
+ ret[pkg]["comment"] = "Package {} is already set to be held.".format(pkg)
+
+ if added:
+ __zypper__.call("al", *added)
+
+ return ret
+
+
+def add_lock(packages, **kwargs): # pylint: disable=unused-argument
"""
Add a package lock. Specify packages to lock by exact name.
@@ -2140,7 +2213,10 @@ def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
salt '*' pkg.add_lock <package1>,<package2>,<package3>
salt '*' pkg.add_lock pkgs='["foo", "bar"]'
"""
- locks = list_locks(root)
+ salt.utils.versions.warn_until(
+ "Sodium", "This function is deprecated. Please use hold() instead."
+ )
+ locks = list_locks()
added = []
try:
packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys())
@@ -2495,7 +2571,7 @@ def search(criteria, refresh=False, **kwargs):
.getElementsByTagName("solvable")
)
if not solvables:
- raise CommandExecutionError("No packages found matching '{0}'".format(criteria))
+ raise CommandExecutionError("No packages found matching '{}'".format(criteria))
out = {}
for solvable in solvables:
@@ -2649,13 +2725,13 @@ def download(*packages, **kwargs):
if failed:
pkg_ret[
"_error"
- ] = "The following package(s) failed to download: {0}".format(
+ ] = "The following package(s) failed to download: {}".format(
", ".join(failed)
)
return pkg_ret
raise CommandExecutionError(
- "Unable to download packages: {0}".format(", ".join(packages))
+ "Unable to download packages: {}".format(", ".join(packages))
)
@@ -2726,7 +2802,7 @@ def diff(*paths, **kwargs):
if pkg_to_paths:
local_pkgs = __salt__["pkg.download"](*pkg_to_paths.keys(), **kwargs)
- for pkg, files in six.iteritems(pkg_to_paths):
+ for pkg, files in pkg_to_paths.items():
for path in files:
ret[path] = (
__salt__["lowpkg.diff"](local_pkgs[pkg]["path"], path)
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +1,20 @@
From e7723f081cc79088156a986cf940349fec7f00a3 Mon Sep 17 00:00:00 2001
From 03b40485102e88e217814ea4e08fb857ad16cbff Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
Date: Wed, 18 Aug 2021 15:05:42 +0300
Subject: [PATCH] Add missing aarch64 to rpm package architectures
(#405)
Subject: [PATCH] Add missing aarch64 to rpm package architectures (#405)
Required to prevent false negative results on using pkg.installed
with architecture specification in package name (ex. `bash.aarch64`)
---
salt/utils/pkg/rpm.py | 2 +-
tests/unit/modules/test_zypperpkg.py | 20 ++++++++++++++++++++
2 files changed, 21 insertions(+), 1 deletion(-)
tests/unit/modules/test_zypperpkg.py | 34 ++++++++++++++++++++++++++++
2 files changed, 35 insertions(+), 1 deletion(-)
diff --git a/salt/utils/pkg/rpm.py b/salt/utils/pkg/rpm.py
index d1b149ea0b..8b8ea2e4b1 100644
index 3e990cc05d..8203d2f989 100644
--- a/salt/utils/pkg/rpm.py
+++ b/salt/utils/pkg/rpm.py
@@ -33,7 +33,7 @@ ARCHES_ALPHA = (
@@ -30,7 +30,7 @@ ARCHES_ALPHA = (
"alphaev68",
"alphaev7",
)
@ -25,13 +24,27 @@ index d1b149ea0b..8b8ea2e4b1 100644
ARCHES = (
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index 5c01bbbfbd..d6a6a6d852 100644
index 2d7e5f0858..20bf5eaaad 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -2477,3 +2477,23 @@ pattern() = package-c"""
with patch("salt.modules.zypperpkg.__zypper__", zypper_mock):
assert zypper.services_need_restart() == expected
zypper_mock(root=None).nolock.call.assert_called_with("ps", "-sss")
@@ -2475,3 +2475,37 @@ pattern() = package-c"""
with patch.dict(zypper.__salt__, salt_mock):
self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt"))
salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
+
+ def test_services_need_restart(self):
+ """
+ Test that zypper ps is used correctly to list services that need to
+ be restarted.
+ """
+ expected = ["salt-minion", "firewalld"]
+ zypper_output = "salt-minion\nfirewalld"
+ zypper_mock = Mock()
+ zypper_mock(root=None).nolock.call = Mock(return_value=zypper_output)
+
+ with patch("salt.modules.zypperpkg.__zypper__", zypper_mock):
+ assert zypper.services_need_restart() == expected
+ zypper_mock(root=None).nolock.call.assert_called_with("ps", "-sss")
+
+ def test_normalize_name(self):
+ """
@ -53,6 +66,6 @@ index 5c01bbbfbd..d6a6a6d852 100644
+ result = zypper.normalize_name("foo.noarch")
+ assert result == "foo", result
--
2.32.0
2.33.0

View File

@ -1,116 +0,0 @@
From c5e5dc304e897f8c1664cce29fe9ee63d84f3ae6 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Fri, 12 Oct 2018 16:20:40 +0200
Subject: [PATCH] Add multi-file support and globbing to the filetree
(U#50018)
Add more possible logs
Support multiple files grabbing
Collect system logs and boot logs
Support globbing in filetree
---
salt/cli/support/intfunc.py | 49 ++++++++++++++++-----------
salt/cli/support/profiles/default.yml | 7 ++++
2 files changed, 37 insertions(+), 19 deletions(-)
diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py
index d3d8f83cb8..a9f76a6003 100644
--- a/salt/cli/support/intfunc.py
+++ b/salt/cli/support/intfunc.py
@@ -3,6 +3,7 @@ Internal functions.
"""
# Maybe this needs to be a modules in a future?
+import glob
import os
import salt.utils.files
@@ -11,7 +12,7 @@ from salt.cli.support.console import MessagesOutput
out = MessagesOutput()
-def filetree(collector, path):
+def filetree(collector, *paths):
"""
Add all files in the tree. If the "path" is a file,
only that file will be added.
@@ -19,22 +20,32 @@ def filetree(collector, path):
:param path: File or directory
:return:
"""
- if not path:
- out.error("Path not defined", ident=2)
- else:
- # The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
- # pylint: disable=W8470
- if os.path.isfile(path):
- filename = os.path.basename(path)
- try:
- file_ref = salt.utils.files.fopen(path) # pylint: disable=W
- out.put("Add {}".format(filename), indent=2)
- collector.add(filename)
- collector.link(title=path, path=file_ref)
- except Exception as err:
- out.error(err, ident=4)
- # pylint: enable=W8470
+ _paths = []
+ # Unglob
+ for path in paths:
+ _paths += glob.glob(path)
+ for path in set(_paths):
+ if not path:
+ out.error("Path not defined", ident=2)
+ elif not os.path.exists(path):
+ out.warning("Path {} does not exists".format(path))
else:
- for fname in os.listdir(path):
- fname = os.path.join(path, fname)
- filetree(collector, fname)
+ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
+ # pylint: disable=W8470
+ if os.path.isfile(path):
+ filename = os.path.basename(path)
+ try:
+ file_ref = salt.utils.files.fopen(path) # pylint: disable=W
+ out.put("Add {}".format(filename), indent=2)
+ collector.add(filename)
+ collector.link(title=path, path=file_ref)
+ except Exception as err:
+ out.error(err, ident=4)
+ # pylint: enable=W8470
+ else:
+ try:
+ for fname in os.listdir(path):
+ fname = os.path.join(path, fname)
+ filetree(collector, [fname])
+ except Exception as err:
+ out.error(err, ident=4)
diff --git a/salt/cli/support/profiles/default.yml b/salt/cli/support/profiles/default.yml
index 01d9a26193..3defb5eef3 100644
--- a/salt/cli/support/profiles/default.yml
+++ b/salt/cli/support/profiles/default.yml
@@ -62,10 +62,17 @@ general-health:
- ps.top:
info: Top CPU consuming processes
+boot_log:
+ - filetree:
+ info: Collect boot logs
+ args:
+ - /var/log/boot.*
+
system.log:
# This works on any file system object.
- filetree:
info: Add system log
args:
- /var/log/syslog
+ - /var/log/messages
--
2.29.2

View File

@ -1,27 +0,0 @@
From 70d13dcc62286d5195bbf28b53aae61616cc0f8f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Thu, 26 Mar 2020 13:08:16 +0000
Subject: [PATCH] Add new custom SUSE capability for saltutil state
module
---
salt/grains/extra.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
index ddc22293ea..0eec27e628 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
@@ -71,5 +71,6 @@ def config():
def suse_backported_capabilities():
return {
'__suse_reserved_pkg_all_versions_support': True,
- '__suse_reserved_pkg_patches_support': True
+ '__suse_reserved_pkg_patches_support': True,
+ '__suse_reserved_saltutil_states_support': True
}
--
2.29.2

View File

@ -1,107 +0,0 @@
From cee4cc182b4740c912861c712dea7bc44eb70ffb Mon Sep 17 00:00:00 2001
From: Martin Seidl <mseidl@suse.de>
Date: Mon, 7 Dec 2020 01:10:51 +0100
Subject: [PATCH] add patch support for allow vendor change option with
zypper
---
salt/modules/zypperpkg.py | 46 +++++++++++++++++++++++++++------------
1 file changed, 32 insertions(+), 14 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index 6f22994bf0..4a5cb85e7c 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -35,7 +35,6 @@ import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
# pylint: disable=import-error,redefined-builtin,no-name-in-module
-from salt.ext import six
from salt.ext.six.moves import configparser
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
from salt.utils.versions import LooseVersion
@@ -1431,6 +1430,7 @@ def install(
no_recommends=False,
root=None,
inclusion_detection=False,
+ novendorchange=True,
**kwargs
):
"""
@@ -1478,6 +1478,10 @@ def install(
skip_verify
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
+
+ novendorchange
+ Disallow vendor change
+
version
Can be either a version number, or the combination of a comparison
operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4').
@@ -1638,6 +1642,22 @@ def install(
cmd_install.append(
kwargs.get("resolve_capabilities") and "--capability" or "--name"
)
+ if novendorchange:
+ if __grains__["osrelease_info"][0] > 11:
+ cmd_install.append("--no-allow-vendor-change")
+ log.info("Disabling vendor changes")
+ else:
+ log.warning(
+ "Enabling/Disabling vendor changes is not supported on this Zypper version"
+ )
+ else:
+ if __grains__["osrelease_info"][0] > 11:
+ cmd_install.append("--allow-vendor-change")
+ log.info("Enabling vendor changes")
+ else:
+ log.warning(
+ "Enabling/Disabling vendor changes is not supported on this Zypper version"
+ )
if not refresh:
cmd_install.insert(0, "--no-refresh")
@@ -1649,7 +1669,6 @@ def install(
cmd_install.extend(fromrepoopt)
if no_recommends:
cmd_install.append("--no-recommends")
-
errors = []
# Split the targets into batches of 500 packages each, so that
@@ -1793,19 +1812,18 @@ def upgrade(
cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
log.info("Targeting repos: %s", fromrepo)
- if dist_upgrade:
- # TODO: Grains validation should be moved to Zypper class
- if __grains__["osrelease_info"][0] > 11:
- if novendorchange:
- cmd_update.append("--no-allow-vendor-change")
- log.info("Disabling vendor changes")
- else:
- cmd_update.append("--allow-vendor-change")
- log.info("Enabling vendor changes")
+ # TODO: Grains validation should be moved to Zypper class
+ if __grains__["osrelease_info"][0] > 11:
+ if novendorchange:
+ cmd_update.append("--no-allow-vendor-change")
+ log.info("Disabling vendor changes")
else:
- log.warning(
- "Enabling/Disabling vendor changes is not supported on this Zypper version"
- )
+ cmd_update.append("--allow-vendor-change")
+ log.info("Enabling vendor changes")
+ else:
+ log.warning(
+ "Enabling/Disabling vendor changes is not supported on this Zypper version"
+ )
if no_recommends:
cmd_update.append("--no-recommends")
--
2.29.2

View File

@ -1,404 +0,0 @@
From c79f4a8619ff1275b2ec4400c1fb27d24c22a7eb Mon Sep 17 00:00:00 2001
From: Alexander Graul <mail@agraul.de>
Date: Tue, 8 Dec 2020 15:35:49 +0100
Subject: [PATCH] Add pkg.services_need_restart (#302)
* Add utils.systemd.pid_to_service function
This function translates a given PID to the systemd service name in case
the process belongs to a running service. It uses DBUS for the
translation if DBUS is available, falling back to parsing
``systemctl status -o json'' output.
* Add zypperpkg.services_need_restart
pkg.services_need_restart returns a list of system services that were
affected by package manager operations such as updates, downgrades or
reinstallations without having been restarted. This might cause issues,
e.g. in the case a shared object was loaded by a process and then
replaced by the package manager.
(cherry picked from commit b950fcdbd6cc8cb08e1413a0ed05e0ae21717cea)
* Add aptpkg.services_need_restart
pkg.services_need_restart returns a list of system services that were
affected by package manager operations such as updates, downgrades or
reinstallations without having been restarted. This might cause issues,
e.g. in the case a shared object was loaded by a process and then
replaced by the package manager.
Requires checkrestart, which is part of the debian-goodies package and
available from official Ubuntu and Debian repositories.
(cherry picked from commit b981f6ecb1a551b98c5cebab4975fc09c6a55a22)
* Add yumpkg.services_need_restart
pkg.services_need_restart returns a list of system services that were
affected by package manager operations such as updates, downgrades or
reinstallations without having been restarted. This might cause issues,
e.g. in the case a shared object was loaded by a process and then
replaced by the package manager.
Requires dnf with the needs-restarting plugin, which is part of
dnf-plugins-core and installed by default on RHEL/CentOS/Fedora.
Also requires systemd for the mapping between PIDs and systemd services.
(cherry picked from commit 5e2be1095729c9f73394e852b82749950957e6fb)
* Add changelog entry for issue #58261
(cherry picked from commit 148877ed8ff7a47132c1186274739e648f7acf1c)
* Simplify dnf needs-restarting output parsing
Co-authored-by: Wayne Werner <waynejwerner@gmail.com>
(cherry picked from commit beb5d60f3cc64b880ec25ca188f8a73f6ec493dd)
---
changelog/58261.added | 1 +
salt/modules/aptpkg.py | 42 ++++++++++++++++-
salt/modules/yumpkg.py | 36 +++++++++++++++
salt/modules/zypperpkg.py | 25 ++++++++++
salt/utils/systemd.py | 69 ++++++++++++++++++++++++++++
tests/unit/modules/test_aptpkg.py | 22 ++++++++-
tests/unit/modules/test_yumpkg.py | 32 ++++++++++++-
tests/unit/modules/test_zypperpkg.py | 14 ++++++
8 files changed, 238 insertions(+), 3 deletions(-)
create mode 100644 changelog/58261.added
diff --git a/changelog/58261.added b/changelog/58261.added
new file mode 100644
index 0000000000..537a43e80d
--- /dev/null
+++ b/changelog/58261.added
@@ -0,0 +1 @@
+Added ``pkg.services_need_restart`` which lists system services that should be restarted after package management operations.
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
index 03e99af733..a0e0cc30c1 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
@@ -38,7 +38,12 @@ import salt.utils.stringutils
import salt.utils.systemd
import salt.utils.versions
import salt.utils.yaml
-from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
+from salt.exceptions import (
+ CommandExecutionError,
+ CommandNotFoundError,
+ MinionError,
+ SaltInvocationError,
+)
from salt.modules.cmdmod import _parse_env
log = logging.getLogger(__name__)
@@ -3029,3 +3034,38 @@ def list_downloaded(root=None, **kwargs):
).isoformat(),
}
return ret
+
+
+def services_need_restart(**kwargs):
+ """
+ .. versionadded:: NEXT
+
+ List services that use files which have been changed by the
+ package manager. It might be needed to restart them.
+
+ Requires checkrestart from the debian-goodies package.
+
+ CLI Examples:
+
+ .. code-block:: bash
+
+ salt '*' pkg.services_need_restart
+ """
+ if not salt.utils.path.which_bin(["checkrestart"]):
+ raise CommandNotFoundError(
+ "'checkrestart' is needed. It is part of the 'debian-goodies' "
+ "package which can be installed from official repositories."
+ )
+
+ cmd = ["checkrestart", "--machine"]
+ services = set()
+
+ cr_output = __salt__["cmd.run_stdout"](cmd, python_shell=False)
+ for line in cr_output.split("\n"):
+ if not line.startswith("SERVICE:"):
+ continue
+ end_of_name = line.find(",")
+ service = line[8:end_of_name] # skip "SERVICE:"
+ services.add(service)
+
+ return list(services)
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
index dd843f985b..df174e737d 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
@@ -3434,3 +3434,39 @@ def del_repo_key(keyid, root=None, **kwargs):
"""
return __salt__["lowpkg.remove_gpg_key"](keyid, root)
+
+
+def services_need_restart(**kwargs):
+ """
+ .. versionadded:: NEXT
+
+ List services that use files which have been changed by the
+ package manager. It might be needed to restart them.
+
+ Requires systemd.
+
+ CLI Examples:
+
+ .. code-block:: bash
+
+ salt '*' pkg.services_need_restart
+ """
+ if _yum() != "dnf":
+ raise CommandExecutionError("dnf is required to list outdated services.")
+ if not salt.utils.systemd.booted(__context__):
+ raise CommandExecutionError("systemd is required to list outdated services.")
+
+ cmd = ["dnf", "--quiet", "needs-restarting"]
+ dnf_output = __salt__["cmd.run_stdout"](cmd, python_shell=False)
+ if not dnf_output:
+ return []
+
+ services = set()
+ for line in dnf_output.split("\n"):
+ pid, has_delim, _ = line.partition(":")
+ if has_delim:
+ service = salt.utils.systemd.pid_to_service(pid.strip())
+ if service:
+ services.add(service)
+
+ return list(services)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index 5e13c68708..6f22994bf0 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -3092,3 +3092,28 @@ def del_repo_key(keyid, root=None, **kwargs):
"""
return __salt__["lowpkg.remove_gpg_key"](keyid, root)
+
+
+def services_need_restart(root=None, **kwargs):
+ """
+ .. versionadded:: NEXT
+
+ List services that use files which have been changed by the
+ package manager. It might be needed to restart them.
+
+ root
+ operate on a different root directory.
+
+ CLI Examples:
+
+ .. code-block:: bash
+
+ salt '*' pkg.services_need_restart
+
+ """
+ cmd = ["ps", "-sss"]
+
+ zypper_output = __zypper__(root=root).nolock.call(*cmd)
+ services = zypper_output.split()
+
+ return services
diff --git a/salt/utils/systemd.py b/salt/utils/systemd.py
index 4d902bc920..f42d0421f8 100644
--- a/salt/utils/systemd.py
+++ b/salt/utils/systemd.py
@@ -11,6 +11,12 @@ import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
+try:
+ import dbus
+except ImportError:
+ dbus = None
+
+
log = logging.getLogger(__name__)
@@ -114,3 +120,66 @@ def has_scope(context=None):
if _sd_version is None:
return False
return _sd_version >= 205
+
+
+def pid_to_service(pid):
+ """
+ Check if a PID belongs to a systemd service and return its name.
+ Return None if the PID does not belong to a service.
+
+ Uses DBUS if available.
+ """
+ if dbus:
+ return _pid_to_service_dbus(pid)
+ else:
+ return _pid_to_service_systemctl(pid)
+
+
+def _pid_to_service_systemctl(pid):
+ systemd_cmd = ["systemctl", "--output", "json", "status", str(pid)]
+ try:
+ systemd_output = subprocess.run(
+ systemd_cmd, check=True, text=True, capture_output=True
+ )
+ status_json = salt.utils.json.find_json(systemd_output.stdout)
+ except (ValueError, subprocess.CalledProcessError):
+ return None
+
+ name = status_json.get("_SYSTEMD_UNIT")
+ if name and name.endswith(".service"):
+ return _strip_suffix(name)
+ else:
+ return None
+
+
+def _pid_to_service_dbus(pid):
+ """
+ Use DBUS to check if a PID belongs to a running systemd service and return the service name if it does.
+ """
+ bus = dbus.SystemBus()
+ systemd_object = bus.get_object(
+ "org.freedesktop.systemd1", "/org/freedesktop/systemd1"
+ )
+ systemd = dbus.Interface(systemd_object, "org.freedesktop.systemd1.Manager")
+ try:
+ service_path = systemd.GetUnitByPID(pid)
+ service_object = bus.get_object("org.freedesktop.systemd1", service_path)
+ service_props = dbus.Interface(
+ service_object, "org.freedesktop.DBus.Properties"
+ )
+ service_name = service_props.Get("org.freedesktop.systemd1.Unit", "Id")
+ name = str(service_name)
+
+ if name and name.endswith(".service"):
+ return _strip_suffix(name)
+ else:
+ return None
+ except dbus.DBusException:
+ return None
+
+
+def _strip_suffix(service_name):
+ """
+ Strip ".service" suffix from a given service name.
+ """
+ return service_name[:-8]
diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
index eb3f9e2da7..1d4d2f7fdc 100644
--- a/tests/unit/modules/test_aptpkg.py
+++ b/tests/unit/modules/test_aptpkg.py
@@ -13,7 +13,6 @@ import textwrap
import pytest
import salt.modules.aptpkg as aptpkg
from salt.exceptions import CommandExecutionError, SaltInvocationError
-from salt.ext import six
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, Mock, call, patch
from tests.support.unit import TestCase, skipIf
@@ -1001,3 +1000,24 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin):
# We should attempt to call the cmd 5 times
self.assertEqual(cmd_mock.call_count, 5)
cmd_mock.has_calls(expected_calls)
+
+ @patch("salt.utils.path.which_bin", Mock(return_value="/usr/sbin/checkrestart"))
+ def test_services_need_restart(self):
+ """
+ Test that checkrestart output is parsed correctly
+ """
+ cr_output = """
+PROCESSES: 24
+PROGRAMS: 17
+PACKAGES: 8
+SERVICE:rsyslog,385,/usr/sbin/rsyslogd
+SERVICE:cups-daemon,390,/usr/sbin/cupsd
+ """
+
+ with patch.dict(
+ aptpkg.__salt__, {"cmd.run_stdout": Mock(return_value=cr_output)}
+ ):
+ assert sorted(aptpkg.services_need_restart()) == [
+ "cups-daemon",
+ "rsyslog",
+ ]
diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py
index e65a1f8b8b..b97e82d307 100644
--- a/tests/unit/modules/test_yumpkg.py
+++ b/tests/unit/modules/test_yumpkg.py
@@ -7,7 +7,7 @@ import salt.modules.yumpkg as yumpkg
import salt.utils.platform
from salt.exceptions import CommandExecutionError, SaltInvocationError
from tests.support.mixins import LoaderModuleMockMixin
-from tests.support.mock import MagicMock, Mock, mock_open, patch
+from tests.support.mock import MagicMock, Mock, call, mock_open, patch
from tests.support.unit import TestCase, skipIf
try:
@@ -1745,3 +1745,33 @@ class YumUtilsTestCase(TestCase, LoaderModuleMockMixin):
python_shell=True,
username="Darth Vader",
)
+
+ @skipIf(not salt.utils.systemd.booted(), "Requires systemd")
+ @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf"))
+ def test_services_need_restart(self):
+ """
+ Test that dnf needs-restarting output is parsed and
+ salt.utils.systemd.pid_to_service is called as expected.
+ """
+ expected = ["firewalld", "salt-minion"]
+
+ dnf_mock = Mock(
+ return_value="123 : /usr/bin/firewalld\n456 : /usr/bin/salt-minion\n"
+ )
+ systemd_mock = Mock(side_effect=["firewalld", "salt-minion"])
+ with patch.dict(yumpkg.__salt__, {"cmd.run_stdout": dnf_mock}), patch(
+ "salt.utils.systemd.pid_to_service", systemd_mock
+ ):
+ assert sorted(yumpkg.services_need_restart()) == expected
+ systemd_mock.assert_has_calls([call("123"), call("456")])
+
+ @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf"))
+ def test_services_need_restart_requires_systemd(self):
+ """Test that yumpkg.services_need_restart raises an error if systemd is unavailable."""
+ with patch("salt.utils.systemd.booted", Mock(return_value=False)):
+ pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
+
+ @patch("salt.modules.yumpkg._yum", Mock(return_value="yum"))
+ def test_services_need_restart_requires_dnf(self):
+ """Test that yumpkg.services_need_restart raises an error if DNF is unavailable."""
+ pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index 018c1ffbca..9c4a224c55 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -2213,3 +2213,17 @@ pattern() = package-c"""
with patch.dict(zypper.__salt__, salt_mock):
self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt"))
salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
+
+ def test_services_need_restart(self):
+ """
+ Test that zypper ps is used correctly to list services that need to
+ be restarted.
+ """
+ expected = ["salt-minion", "firewalld"]
+ zypper_output = "salt-minion\nfirewalld"
+ zypper_mock = Mock()
+ zypper_mock(root=None).nolock.call = Mock(return_value=zypper_output)
+
+ with patch("salt.modules.zypperpkg.__zypper__", zypper_mock):
+ assert zypper.services_need_restart() == expected
+ zypper_mock(root=None).nolock.call.assert_called_with("ps", "-sss")
--
2.29.2

View File

@ -1,463 +0,0 @@
From 99aa26e7ab4840cf38f54e7692d7d1eede3adeb4 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Mon, 12 Mar 2018 12:01:39 +0100
Subject: [PATCH] Add SaltSSH multi-version support across Python
interpeters.
Bugfix: crashes when OPTIONS.saltdir is a file
salt-ssh: allow server and client to run different python major version
Handle non-directory on the /tmp
Bugfix: prevent partial fileset removal in /tmp
salt-ssh: compare checksums to detect newly generated thin on the server
Reset time at thin unpack
Bugfix: get a proper option for CLI and opts of wiping the tmp
Add docstring to get_tops
Remove unnecessary noise in imports
Refactor get_tops collector
Add logging to the get_tops
Update call script
Remove pre-caution
Update log debug message for tops collector
Reset default compression, if unknown is passed
Refactor archive creation flow
Add external shell-callable function to collect tops
Simplify tops gathering, bugfix alternative to Py2
find working executable
Add basic shareable module classifier
Add proper error handler, unmuting exceptions during top collection
Use common shared directory for compatible libraries
fix searching for python versions
Flatten error message string
Bail-out immediately if <2.6 version detected
Simplify shell cmd to get the version on Python 2.x
Remove stub that was previously moved upfront
Lintfix: PEP8 ident
Add logging on the error, when Python-2 version cannot be detected properly
Generate salt-call source, based on conditions
Add logging on remove failure on thin.tgz archive
Add config-based external tops gatherer
Change signature to pass the extended configuration to the thin generator
Update docstring to the salt-call generator
Implement get namespaces inclusion to the salt-call script on the client machine
Use new signature of the get call
Implement namespace selector, based on the current Python interpreter version
Add deps as a list, instead of a map
Add debug logging
Implement packaging an alternative version
Update salt-call script so it swaps the namespace according to the configuration
Compress thin.zip if zlib is available
Fix a system exit error message
Move compression fall-back operation
Add debug logging prior to the thin archive removal
Flatten the archive extension choice
Lintfix: PEP8 an empty line required
Bugfix: ZFS modules (zfs, zpool) crashes on non-ZFS systems
Add unit test case for the Salt SSH parts
Add unit test for missing dependencies on get_ext_tops
Postpone inheritance implementation
Refactor unit test for get_ext_tops
Add unit test for get_ext_tops checks interpreter configuration
Check python interpreter lock version
Add unit test for get_ext_tops checks the python locked interepreter value
Bugfix: report into warning log module name, not its config
Add unit test for dependencies check python version lock (inherently)
Mock os.path.isfile function
Update warning logging information
Add unit test for get_ext_tops module configuration validation
Do not use list of dicts for namespaces, just dict for namespaces.
Add unit test for get_ext_tops config verification
Fix unit tests for the new config structure
Add unit test for thin.gte call
Add unit test for dependency path adding function
Add unit test for thin_path function
Add unit test for salt-call source generator
Add unit test for get_ext_namespaces on empty configuration
Add get_ext_namespaces for namespace extractions into a tuple for python version
Remove unused variable
Add unit test for getting namespace failure when python maj/min versions are not defined
Add unit test to add tops based on the current interpreter
Add unit test for get_tops with extra modules
Add unit test for shared object modules top addition
Add unit test for thin_sum hashing
Add unit test for min_sum hashing
Add unit test for gen_thin verify for 2.6 Python version is a minimum requirement
Fix gen_thin exception on Python 3
Use object attribute instead of indeces. Remove an empty line.
Add unit test for gen_thin compression type fallback
Move helper functions up by the class code
Update unit test doc
Add check for correct archiving mode is opened
Add unit test for gen_thin if control files are written correctly
Update docstring for fake version info constructor method
Add fake tarfile mock handler
Mock-out missing methods inside gen_thin
Move tarfile.open check to the end of the test
Add unit test for tree addition to the archive
Add shareable module to the gen_thin unit test
Fix docstring
Add unit test for an alternative version pack
Lintfix
Add documentation about updated Salt SSH features
Fix typo
Lintfix: PEP8 extra-line needed
Make the command more readable
Write all supported minimal python versions into a config file on the target machine
Get supported Python executable based on the config py-map
Add unit test for get_supported_py_config function typecheck
Add unit test for get_supported_py_config function base tops
Add unit test for get_supported_py_config function ext tops
Fix unit test for catching "supported-versions" was written down
Rephrase Salt SSH doc description
Re-phrase docstring for alternative Salt installation
require same major version while minor is allowed to be higher
Bugfix: remove minor version from the namespaced, version-specific directory
Fix unit tests for minor version removal of namespaced version-specific directory
Initialise the options directly to be structure-ready object.
Disable wiping if state is executed
Properly mock a tempfile object
Support Python 2.6 versions
Add digest collector for file trees etc
Bufix: recurse calls damages the configuration (reference problem)
Collect digest of the code
Get code checksum into the shim options
Get all the code content, not just Python sources
Bugfix: Python3 compat - string required instead of bytes
Lintfix: too many empty lines
Lintfix: blocked function used
Bugfix: key error master_tops_first
Fix unit tests for the checksum generator
Use code checksum to update thin archive on client's cache
Lintfix
Set master_top_first to False by default
---
doc/topics/releases/fluorine.rst | 178 +++++++++++++++++++++++++++++++
salt/client/ssh/ssh_py_shim.py | 3 +
2 files changed, 181 insertions(+)
create mode 100644 doc/topics/releases/fluorine.rst
diff --git a/doc/topics/releases/fluorine.rst b/doc/topics/releases/fluorine.rst
new file mode 100644
index 0000000000..40c69e25cc
--- /dev/null
+++ b/doc/topics/releases/fluorine.rst
@@ -0,0 +1,178 @@
+:orphan:
+
+======================================
+Salt Release Notes - Codename Fluorine
+======================================
+
+
+Minion Startup Events
+---------------------
+
+When a minion starts up it sends a notification on the event bus with a tag
+that looks like this: `salt/minion/<minion_id>/start`. For historical reasons
+the minion also sends a similar event with an event tag like this:
+`minion_start`. This duplication can cause a lot of clutter on the event bus
+when there are many minions. Set `enable_legacy_startup_events: False` in the
+minion config to ensure only the `salt/minion/<minion_id>/start` events are
+sent.
+
+The new :conf_minion:`enable_legacy_startup_events` minion config option
+defaults to ``True``, but will be set to default to ``False`` beginning with
+the Neon release of Salt.
+
+The Salt Syndic currently sends an old style `syndic_start` event as well. The
+syndic respects :conf_minion:`enable_legacy_startup_events` as well.
+
+
+Deprecations
+------------
+
+Module Deprecations
+===================
+
+The ``napalm_network`` module had the following changes:
+
+- Support for the ``template_path`` has been removed in the ``load_template``
+ function. This is because support for NAPALM native templates has been
+ dropped.
+
+The ``trafficserver`` module had the following changes:
+
+- Support for the ``match_var`` function was removed. Please use the
+ ``match_metric`` function instead.
+- Support for the ``read_var`` function was removed. Please use the
+ ``read_config`` function instead.
+- Support for the ``set_var`` function was removed. Please use the
+ ``set_config`` function instead.
+
+The ``win_update`` module has been removed. It has been replaced by ``win_wua``
+module.
+
+The ``win_wua`` module had the following changes:
+
+- Support for the ``download_update`` function has been removed. Please use the
+ ``download`` function instead.
+- Support for the ``download_updates`` function has been removed. Please use the
+ ``download`` function instead.
+- Support for the ``install_update`` function has been removed. Please use the
+ ``install`` function instead.
+- Support for the ``install_updates`` function has been removed. Please use the
+ ``install`` function instead.
+- Support for the ``list_update`` function has been removed. Please use the
+ ``get`` function instead.
+- Support for the ``list_updates`` function has been removed. Please use the
+ ``list`` function instead.
+
+Pillar Deprecations
+===================
+
+The ``vault`` pillar had the following changes:
+
+- Support for the ``profile`` argument was removed. Any options passed up until
+ and following the first ``path=`` are discarded.
+
+Roster Deprecations
+===================
+
+The ``cache`` roster had the following changes:
+
+- Support for ``roster_order`` as a list or tuple has been removed. As of the
+ ``Fluorine`` release, ``roster_order`` must be a dictionary.
+- The ``roster_order`` option now includes IPv6 in addition to IPv4 for the
+ ``private``, ``public``, ``global`` or ``local`` settings. The syntax for these
+ settings has changed to ``ipv4-*`` or ``ipv6-*``, respectively.
+
+State Deprecations
+==================
+
+The ``docker`` state has been removed. The following functions should be used
+instead.
+
+- The ``docker.running`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.running`` function instead.
+- The ``docker.stopped`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.stopped`` function instead.
+- The ``docker.absent`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.absent`` function instead.
+- The ``docker.absent`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.absent`` function instead.
+- The ``docker.network_present`` function was removed. Please update applicable
+ SLS files to use the ``docker_network.present`` function instead.
+- The ``docker.network_absent`` function was removed. Please update applicable
+ SLS files to use the ``docker_network.absent`` function instead.
+- The ``docker.image_present`` function was removed. Please update applicable SLS
+ files to use the ``docker_image.present`` function instead.
+- The ``docker.image_absent`` function was removed. Please update applicable SLS
+ files to use the ``docker_image.absent`` function instead.
+- The ``docker.volume_present`` function was removed. Please update applicable SLS
+ files to use the ``docker_volume.present`` function instead.
+- The ``docker.volume_absent`` function was removed. Please update applicable SLS
+ files to use the ``docker_volume.absent`` function instead.
+
+The ``docker_network`` state had the following changes:
+
+- Support for the ``driver`` option has been removed from the ``absent`` function.
+ This option had no functionality in ``docker_network.absent``.
+
+The ``git`` state had the following changes:
+
+- Support for the ``ref`` option in the ``detached`` state has been removed.
+ Please use the ``rev`` option instead.
+
+The ``k8s`` state has been removed. The following functions should be used
+instead:
+
+- The ``k8s.label_absent`` function was removed. Please update applicable SLS
+ files to use the ``kubernetes.node_label_absent`` function instead.
+- The ``k8s.label_present`` function was removed. Please updated applicable SLS
+ files to use the ``kubernetes.node_label_present`` function instead.
+- The ``k8s.label_folder_absent`` function was removed. Please update applicable
+ SLS files to use the ``kubernetes.node_label_folder_absent`` function instead.
+
+The ``netconfig`` state had the following changes:
+
+- Support for the ``template_path`` option in the ``managed`` state has been
+ removed. This is because support for NAPALM native templates has been dropped.
+
+The ``trafficserver`` state had the following changes:
+
+- Support for the ``set_var`` function was removed. Please use the ``config``
+ function instead.
+
+The ``win_update`` state has been removed. Please use the ``win_wua`` state instead.
+
+SaltSSH major updates
+=====================
+
+SaltSSH now works across different major Python versions. Python 2.7 ~ Python 3.x
+are now supported transparently. Requirement is, however, that the SaltMaster should
+have installed Salt, including all related dependencies for Python 2 and Python 3.
+Everything needs to be importable from the respective Python environment.
+
+SaltSSH can bundle up an arbitrary version of Salt. If there would be an old box for
+example, running an outdated and unsupported Python 2.6, it is still possible from
+a SaltMaster with Python 3.5 or newer to access it. This feature requires an additional
+configuration in /etc/salt/master as follows:
+
+
+.. code-block:: yaml
+
+ ssh_ext_alternatives:
+ 2016.3: # Namespace, can be actually anything.
+ py-version: [2, 6] # Constraint to specific interpreter version
+ path: /opt/2016.3/salt # Main Salt installation
+ dependencies: # List of dependencies and their installation paths
+ jinja2: /opt/jinja2
+ yaml: /opt/yaml
+ tornado: /opt/tornado
+ msgpack: /opt/msgpack
+ certifi: /opt/certifi
+ singledispatch: /opt/singledispatch.py
+ singledispatch_helpers: /opt/singledispatch_helpers.py
+ markupsafe: /opt/markupsafe
+ backports_abc: /opt/backports_abc.py
+
+It is also possible to use several alternative versions of Salt. You can for instance generate
+a minimal tarball using runners and include that. But this is only possible, when such specific
+Salt version is also available on the Master machine, although does not need to be directly
+installed together with the older Python interpreter.
diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py
index c0ce0fd7de..5ddd282ed0 100644
--- a/salt/client/ssh/ssh_py_shim.py
+++ b/salt/client/ssh/ssh_py_shim.py
@@ -171,6 +171,9 @@ def unpack_thin(thin_path):
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
+ checksum_path = os.path.normpath(os.path.join(OPTIONS.saltdir, "thin_checksum"))
+ with open(checksum_path, "w") as chk:
+ chk.write(OPTIONS.checksum + "\n")
os.umask(old_umask) # pylint: disable=blacklisted-function
try:
os.unlink(thin_path)
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -1,145 +0,0 @@
From ca2ad86438293af6715a9890b168f159ff4d9b9b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= <cbosdonnat@suse.com>
Date: Thu, 18 Oct 2018 13:32:59 +0200
Subject: [PATCH] Add virt.all_capabilities
In order to get all possible capabilities from a host, the user has to
call virt.capabilities, and then loop over the guests and domains
before calling virt.domain_capabilities for each of them.
This commit embeds all this logic to get them all in a single
virt.all_capabilities call.
---
salt/modules/virt.py | 73 +++++++++++++++++++++++++++++++--
tests/unit/modules/test_virt.py | 2 +-
2 files changed, 71 insertions(+), 4 deletions(-)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index 313181c49e..362c2a68b5 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -5568,11 +5568,76 @@ def _parse_domain_caps(caps):
return result
+def _parse_domain_caps(caps):
+ """
+ Parse the XML document of domain capabilities into a structure.
+ """
+ result = {
+ "emulator": caps.find("path").text if caps.find("path") is not None else None,
+ "domain": caps.find("domain").text if caps.find("domain") is not None else None,
+ "machine": caps.find("machine").text
+ if caps.find("machine") is not None
+ else None,
+ "arch": caps.find("arch").text if caps.find("arch") is not None else None,
+ }
+
+
+def all_capabilities(**kwargs):
+ """
+ Return the host and domain capabilities in a single call.
+
+ .. versionadded:: 3001
+
+ :param connection: libvirt connection URI, overriding defaults
+ :param username: username to connect with, overriding defaults
+ :param password: password to connect with, overriding defaults
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt '*' virt.all_capabilities
+
+ """
+ conn = __get_conn(**kwargs)
+ try:
+ host_caps = ElementTree.fromstring(conn.getCapabilities())
+ domains = [
+ [
+ (guest.get("arch", {}).get("name", None), key)
+ for key in guest.get("arch", {}).get("domains", {}).keys()
+ ]
+ for guest in [
+ _parse_caps_guest(guest) for guest in host_caps.findall("guest")
+ ]
+ ]
+ flattened = [pair for item in (x for x in domains) for pair in item]
+ result = {
+ "host": {
+ "host": _parse_caps_host(host_caps.find("host")),
+ "guests": [
+ _parse_caps_guest(guest) for guest in host_caps.findall("guest")
+ ],
+ },
+ "domains": [
+ _parse_domain_caps(
+ ElementTree.fromstring(
+ conn.getDomainCapabilities(None, arch, None, domain)
+ )
+ )
+ for (arch, domain) in flattened
+ ],
+ }
+ return result
+ finally:
+ conn.close()
+
+
def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
"""
Return the domain capabilities given an emulator, architecture, machine or virtualization type.
- .. versionadded:: 2019.2.0
+ .. versionadded:: Fluorine
:param emulator: return the capabilities for the given emulator binary
:param arch: return the capabilities for the given CPU architecture
@@ -5611,7 +5676,7 @@ def all_capabilities(**kwargs):
"""
Return the host and domain capabilities in a single call.
- .. versionadded:: 3001
+ .. versionadded:: Neon
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
@@ -5625,6 +5690,7 @@ def all_capabilities(**kwargs):
"""
conn = __get_conn(**kwargs)
+ result = {}
try:
host_caps = ElementTree.fromstring(conn.getCapabilities())
domains = [
@@ -5653,10 +5719,11 @@ def all_capabilities(**kwargs):
for (arch, domain) in flattened
],
}
- return result
finally:
conn.close()
+ return result
+
def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs):
"""
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
index cce107c9e4..e9e73d7b5d 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
@@ -4063,7 +4063,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"44454c4c-3400-105a-8033-b3c04f4b344a", caps["host"]["host"]["uuid"]
)
self.assertEqual(
- {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]},
+ {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]}
)
def test_network_tag(self):
--
2.29.2

View File

@ -1,4 +1,4 @@
From 34a913b0b54b55edf042dc899250e56ef0eaec77 Mon Sep 17 00:00:00 2001
From e3e55336b0d457cb55cd83236e9ac8e0dc671d2e Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
Date: Mon, 5 Jul 2021 18:57:26 +0300
Subject: [PATCH] Adding preliminary support for Rocky. (#59682) (#391)
@ -32,30 +32,30 @@ index 0000000000..93b4a3d1fc
@@ -0,0 +1 @@
+Rocky Linux has been added to the RedHat os_family.
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 2b965a2a8a..ace0e4bff9 100644
index bce8c95179..f79110124f 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1547,6 +1547,7 @@ _OS_NAME_MAP = {
"slesexpand": "RES",
@@ -1560,6 +1560,7 @@ _OS_NAME_MAP = {
"linuxmint": "Mint",
"neon": "KDE neon",
"pop": "Pop",
+ "rocky": "Rocky",
"alibabaclo": "Alinux",
}
@@ -1621,6 +1622,7 @@ _OS_FAMILY_MAP = {
"Funtoo": "Gentoo",
@@ -1637,6 +1638,7 @@ _OS_FAMILY_MAP = {
"AIX": "AIX",
"TurnKey": "Debian",
"Pop": "Debian",
+ "Rocky": "RedHat",
"AstraLinuxCE": "Debian",
"Alinux": "RedHat",
}
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 8280d6de47..61a6956e32 100644
index fa06bb27ab..fcc7586775 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -678,6 +678,35 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
@@ -708,6 +708,35 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
}
self._run_os_grains_tests(None, _os_release_map, expectation)
@ -92,6 +92,6 @@ index 8280d6de47..61a6956e32 100644
def test_almalinux_8_os_grains(self):
"""
--
2.32.0
2.33.0

View File

@ -1,4 +1,4 @@
From 12d67e0cfa54399f3a0b6ae0d4faa09793fa2b0f Mon Sep 17 00:00:00 2001
From 3beb3379dafe1adf9c1a43694f7b71938be3f583 Mon Sep 17 00:00:00 2001
From: Jochen Breuer <jbreuer@suse.de>
Date: Wed, 1 Apr 2020 16:13:23 +0200
Subject: [PATCH] Adds explicit type cast for port
@ -8,26 +8,25 @@ and a wrong set of remotes was returned.
The type casting to int solves this issue.
---
salt/utils/network.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
salt/utils/network.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/salt/utils/network.py b/salt/utils/network.py
index 25b2d06758..1705a5809d 100644
index 5fc9a34ca4..0dd20c5599 100644
--- a/salt/utils/network.py
+++ b/salt/utils/network.py
@@ -1626,9 +1626,9 @@ def _netlink_tool_remote_on(port, which_end):
local_host, local_port = chunks[3].rsplit(":", 1)
@@ -1703,6 +1703,10 @@ def _netlink_tool_remote_on(port, which_end):
chunks = line.split()
remote_host, remote_port = chunks[4].rsplit(":", 1)
- if which_end == "remote_port" and int(remote_port) != port:
+ if which_end == "remote_port" and int(remote_port) != int(port):
continue
- if which_end == "local_port" and int(local_port) != port:
+ continue
+ if which_end == "local_port" and int(local_port) != int(port):
continue
+ continue
remotes.add(remote_host.strip("[]"))
if valid is False:
--
2.29.2
2.33.0

View File

@ -1,28 +0,0 @@
From 5e8a9c9eaa18c53b259a3bb1da8df51f5382ed6b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Mon, 22 Feb 2021 11:39:19 +0000
Subject: [PATCH] Allow extra_filerefs as sanitized kwargs for SSH
client
(cherry picked from commit 89f843398849633af52cceab2155e9cedf8ad3dd)
---
salt/client/ssh/client.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py
index 2cf42f53e7..3631c3bb67 100644
--- a/salt/client/ssh/client.py
+++ b/salt/client/ssh/client.py
@@ -60,6 +60,7 @@ class SSHClient:
("rosters", list),
("ignore_host_keys", bool),
("raw_shell", bool),
+ ("extra_filerefs", str),
]
sane_kwargs = {}
for name, kind in roster_vals:
--
2.30.1

View File

@ -1,29 +0,0 @@
From 125f973014b8d5ffa13ae7dd231043e39af75ea0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Wed, 3 Jul 2019 09:34:50 +0100
Subject: [PATCH] Allow passing kwargs to pkg.list_downloaded
(bsc#1140193)
Add unit test for pkg.list_downloaded with kwargs
---
salt/modules/zypperpkg.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index 75cb5ce4a8..c996935bff 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -2754,7 +2754,7 @@ def download(*packages, **kwargs):
)
-def list_downloaded(root=None):
+def list_downloaded(root=None, **kwargs):
"""
.. versionadded:: 2017.7.0
--
2.29.2

View File

@ -1,292 +0,0 @@
From 33ad6876a04e800afc08748133dc568a5e362903 Mon Sep 17 00:00:00 2001
From: Martin Seidl <mseidl@suse.de>
Date: Wed, 17 Mar 2021 14:05:42 +0100
Subject: [PATCH] Allow vendor change option with zypper (#313)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* add patch support for allow vendor change option with zypper
* adjust unit tests vendor change refactor, dropping cli arg
* Fix pr issues
Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
* Fix unit test for allow vendor change on upgrade
* Add unit test with unsupported zypper version
Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
---
salt/modules/zypperpkg.py | 58 +++++++++++++---
tests/unit/modules/test_zypperpkg.py | 99 ++++++++++++++++++----------
2 files changed, 112 insertions(+), 45 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index 6f22994bf0..b35792237c 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -105,6 +105,10 @@ class _Zypper:
ZYPPER_LOCK = "/var/run/zypp.pid"
TAG_RELEASED = "zypper/released"
TAG_BLOCKED = "zypper/blocked"
+ # Dist upgrade vendor change support (SLE12+)
+ dup_avc = False
+ # Install/Patch/Upgrade vendor change support (SLE15+)
+ inst_avc = False
def __init__(self):
"""
@@ -218,6 +222,21 @@ class _Zypper:
def pid(self):
return self.__call_result.get("pid", "")
+ def refresh_zypper_flags(self):
+ try:
+ zypp_version = version('zypper')
+ # zypper version 1.11.34 in SLE12 update supports vendor change for only dist upgrade
+ if version_cmp(zypp_version, '1.11.34') >= 0:
+ # zypper version supports vendor change for dist upgrade
+ self.dup_avc = True
+ # zypper version 1.14.8 in SLE15 update supports vendor change in install/patch/upgrading
+ if version_cmp(zypp_version, '1.14.8') >= 0:
+ self.inst_avc = True
+ else:
+ log.error("Failed to compare Zypper version")
+ except Exception as ex:
+ log.error("Unable to get Zypper version: {}".format(ex))
+
def _is_error(self):
"""
Is this is an error code?
@@ -1431,6 +1450,7 @@ def install(
no_recommends=False,
root=None,
inclusion_detection=False,
+ novendorchange=True,
**kwargs
):
"""
@@ -1478,6 +1498,9 @@ def install(
skip_verify
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
+ novendorchange
+ Disallow vendor change
+
version
Can be either a version number, or the combination of a comparison
operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4').
@@ -1638,6 +1661,15 @@ def install(
cmd_install.append(
kwargs.get("resolve_capabilities") and "--capability" or "--name"
)
+ # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+
+ if not novendorchange:
+ __zypper__(root=root).refresh_zypper_flags()
+ if __zypper__(root=root).inst_avc:
+ cmd_install.append("--allow-vendor-change")
+ log.info("Enabling vendor changes")
+ else:
+ log.warning("Enabling/Disabling vendor changes is not supported on this Zypper version")
+
if not refresh:
cmd_install.insert(0, "--no-refresh")
@@ -1793,19 +1825,25 @@ def upgrade(
cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
log.info("Targeting repos: %s", fromrepo)
- if dist_upgrade:
- # TODO: Grains validation should be moved to Zypper class
- if __grains__["osrelease_info"][0] > 11:
- if novendorchange:
- cmd_update.append("--no-allow-vendor-change")
- log.info("Disabling vendor changes")
- else:
+ if not novendorchange:
+ __zypper__(root=root).refresh_zypper_flags()
+ if dist_upgrade:
+ if __zypper__(root=root).dup_avc:
cmd_update.append("--allow-vendor-change")
log.info("Enabling vendor changes")
+ else:
+ log.warning(
+ "Enabling/Disabling vendor changes is not supported on this Zypper version"
+ )
else:
- log.warning(
- "Enabling/Disabling vendor changes is not supported on this Zypper version"
- )
+ # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+
+ if __zypper__(root=root).inst_avc:
+ cmd_update.append("--allow-vendor-change")
+ log.info("Enabling vendor changes")
+ else:
+ log.warning(
+ "Enabling/Disabling vendor changes is not supported on this Zypper version"
+ )
if no_recommends:
cmd_update.append("--no-recommends")
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index 9c4a224c55..f32c382d7f 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -644,7 +644,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
zypper_mock.assert_any_call(
"dist-upgrade",
"--auto-agree-with-licenses",
- "--no-allow-vendor-change",
)
with patch(
@@ -691,46 +690,80 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"dist-upgrade",
"--auto-agree-with-licenses",
"--dry-run",
- "--no-allow-vendor-change",
)
zypper_mock.assert_any_call(
"dist-upgrade",
"--auto-agree-with-licenses",
"--dry-run",
- "--no-allow-vendor-change",
)
with patch(
"salt.modules.zypperpkg.list_pkgs",
- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])
):
- ret = zypper.upgrade(
- dist_upgrade=True,
- dryrun=True,
- fromrepo=["Dummy", "Dummy2"],
- novendorchange=False,
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- "--from",
- "Dummy",
- "--from",
- "Dummy2",
- "--allow-vendor-change",
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- "--from",
- "Dummy",
- "--from",
- "Dummy2",
- "--allow-vendor-change",
- "--debug-solver",
- )
+ with patch.dict(zypper.__salt__,
+ {'pkg_resource.version': MagicMock(return_value='1.15'),
+ 'lowpkg.version_cmp': MagicMock(return_value=1)}):
+ ret = zypper.upgrade(
+ dist_upgrade=True,
+ dryrun=True,
+ fromrepo=["Dummy", "Dummy2"],
+ novendorchange=False,
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--from",
+ "Dummy",
+ "--from",
+ "Dummy2",
+ "--allow-vendor-change",
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--from",
+ "Dummy",
+ "--from",
+ "Dummy2",
+ "--allow-vendor-change",
+ "--debug-solver",
+ )
+
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])
+ ):
+ with patch.dict(zypper.__salt__,
+ {'pkg_resource.version': MagicMock(return_value='1.11'),
+ 'lowpkg.version_cmp': MagicMock(return_value=1)}):
+ ret = zypper.upgrade(
+ dist_upgrade=True,
+ dryrun=True,
+ fromrepo=["Dummy", "Dummy2"],
+ novendorchange=False,
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--from",
+ "Dummy",
+ "--from",
+ "Dummy2",
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--from",
+ "Dummy",
+ "--from",
+ "Dummy2",
+ "--debug-solver",
+ )
with patch(
"salt.modules.zypperpkg.list_pkgs",
@@ -750,7 +783,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"Dummy",
"--from",
"Dummy2",
- "--no-allow-vendor-change",
)
zypper_mock.assert_any_call(
"dist-upgrade",
@@ -760,7 +792,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"Dummy",
"--from",
"Dummy2",
- "--no-allow-vendor-change",
"--debug-solver",
)
@@ -797,7 +828,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"Dummy",
"--from",
"Dummy2",
- "--no-allow-vendor-change",
)
with patch(
@@ -911,7 +941,6 @@ Repository 'DUMMY' not found by its alias, number, or URI.
"--auto-agree-with-licenses",
"--from",
"DUMMY",
- "--no-allow-vendor-change",
)
def test_upgrade_available(self):
--
2.30.1

View File

@ -1,11 +1,41 @@
From a6f8803f6374f646802a898e43bc772d05960d89 Mon Sep 17 00:00:00 2001
From 07d1b742f16799d3df9d7eeb04bbce5d814e519d Mon Sep 17 00:00:00 2001
From: Martin Seidl <mseidl@suse.de>
Date: Thu, 24 Jun 2021 10:08:06 +0200
Subject: [PATCH] Move vendor change logic to zypper class (#355)
Date: Tue, 27 Oct 2020 16:12:29 +0100
Subject: [PATCH] Allow vendor change option with zypper
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Fix novendorchange option (#284)
* Fixed novendorchange handling in zypperpkg
* refactor handling of novendorchange and fix tests
add patch support for allow vendor change option with zypper
Revert "add patch support for allow vendor change option with zypper"
This reverts commit cee4cc182b4740c912861c712dea7bc44eb70ffb.
Allow vendor change option with zypper (#313)
* add patch support for allow vendor change option with zypper
* adjust unit tests vendor change refactor, dropping cli arg
* Fix pr issues
Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
* Fix unit test for allow vendor change on upgrade
* Add unit test with unsupported zypper version
Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
Move vendor change logic to zypper class (#355)
* move vendor change logic to zypper class
* fix thing in zypperkg
@ -29,26 +59,24 @@ Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
Co-authored-by: Jochen Breuer <jbreuer@suse.de>
Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
---
salt/modules/zypperpkg.py | 110 +++---
tests/unit/modules/test_zypperpkg.py | 513 +++++++++++++++++++--------
2 files changed, 428 insertions(+), 195 deletions(-)
salt/modules/zypperpkg.py | 105 +++++--
tests/unit/modules/test_zypperpkg.py | 418 ++++++++++++++++++++++++---
2 files changed, 462 insertions(+), 61 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index b35792237c..e064e2cb4e 100644
index 1777bec031..7216e25b86 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -105,10 +105,6 @@ class _Zypper:
ZYPPER_LOCK = "/var/run/zypp.pid"
TAG_RELEASED = "zypper/released"
TAG_BLOCKED = "zypper/blocked"
- # Dist upgrade vendor change support (SLE12+)
- dup_avc = False
- # Install/Patch/Upgrade vendor change support (SLE15+)
- inst_avc = False
@@ -35,6 +35,8 @@ import salt.utils.stringutils
import salt.utils.systemd
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
+
+# pylint: disable=import-error,redefined-builtin,no-name-in-module
from salt.utils.versions import LooseVersion
def __init__(self):
"""
@@ -138,6 +134,13 @@ class _Zypper:
log = logging.getLogger(__name__)
@@ -128,6 +130,13 @@ class _Zypper:
self.__systemd_scope = False
self.__root = None
@ -62,7 +90,7 @@ index b35792237c..e064e2cb4e 100644
# Call status
self.__called = False
@@ -182,6 +185,8 @@ class _Zypper:
@@ -172,6 +181,8 @@ class _Zypper:
self.__no_raise = True
elif item == "refreshable":
self.__refresh = True
@ -71,7 +99,7 @@ index b35792237c..e064e2cb4e 100644
elif item == "call":
return self.__call
else:
@@ -222,15 +227,27 @@ class _Zypper:
@@ -212,6 +223,33 @@ class _Zypper:
def pid(self):
return self.__call_result.get("pid", "")
@ -87,22 +115,25 @@ index b35792237c..e064e2cb4e 100644
+ )
+ return self
+
def refresh_zypper_flags(self):
try:
- zypp_version = version('zypper')
+ def refresh_zypper_flags(self):
+ try:
+ zypp_version = version("zypper")
# zypper version 1.11.34 in SLE12 update supports vendor change for only dist upgrade
- if version_cmp(zypp_version, '1.11.34') >= 0:
+ # zypper version 1.11.34 in SLE12 update supports vendor change for only dist upgrade
+ if version_cmp(zypp_version, "1.11.34") >= 0:
# zypper version supports vendor change for dist upgrade
self.dup_avc = True
# zypper version 1.14.8 in SLE15 update supports vendor change in install/patch/upgrading
- if version_cmp(zypp_version, '1.14.8') >= 0:
+ # zypper version supports vendor change for dist upgrade
+ self.dup_avc = True
+ # zypper version 1.14.8 in SLE15 update supports vendor change in install/patch/upgrading
+ if version_cmp(zypp_version, "1.14.8") >= 0:
self.inst_avc = True
else:
log.error("Failed to compare Zypper version")
@@ -351,6 +368,15 @@ class _Zypper:
+ self.inst_avc = True
+ else:
+ log.error("Failed to compare Zypper version")
+ except Exception as ex:
+ log.error("Unable to get Zypper version: {}".format(ex))
+
def _is_error(self):
"""
Is this is an error code?
@@ -326,6 +364,15 @@ class _Zypper:
if self.__systemd_scope:
cmd.extend(["systemd-run", "--scope"])
cmd.extend(self.__cmd)
@ -118,43 +149,38 @@ index b35792237c..e064e2cb4e 100644
log.debug("Calling Zypper: %s", " ".join(cmd))
self.__call_result = __salt__["cmd.run_all"](cmd, **kwargs)
if self._check_result():
@@ -1451,6 +1477,7 @@ def install(
@@ -1435,6 +1482,8 @@ def install(
no_recommends=False,
root=None,
inclusion_detection=False,
novendorchange=True,
+ novendorchange=True,
+ allowvendorchange=False,
**kwargs
):
"""
@@ -1499,7 +1526,11 @@ def install(
@@ -1482,6 +1531,13 @@ def install(
skip_verify
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
novendorchange
- Disallow vendor change
+ novendorchange
+ DEPRECATED(use allowvendorchange): If set to True, do not allow vendor changes. Default: True
+
+ allowvendorchange
+ If set to True, vendor change is allowed. Default: False
+ If both allowvendorchange and novendorchange are passed, only allowvendorchange is used.
+
version
Can be either a version number, or the combination of a comparison
@@ -1662,14 +1693,6 @@ def install(
operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4').
@@ -1647,6 +1703,7 @@ def install(
cmd_install.append(
kwargs.get("resolve_capabilities") and "--capability" or "--name"
)
# Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+
- if not novendorchange:
- __zypper__(root=root).refresh_zypper_flags()
- if __zypper__(root=root).inst_avc:
- cmd_install.append("--allow-vendor-change")
- log.info("Enabling vendor changes")
- else:
- log.warning("Enabling/Disabling vendor changes is not supported on this Zypper version")
-
+ # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+
if not refresh:
cmd_install.insert(0, "--no-refresh")
@@ -1696,6 +1719,7 @@ def install(
@@ -1683,6 +1740,7 @@ def install(
systemd_scope=systemd_scope,
root=root,
)
@ -162,7 +188,7 @@ index b35792237c..e064e2cb4e 100644
.call(*cmd)
.splitlines()
):
@@ -1708,7 +1732,9 @@ def install(
@@ -1695,7 +1753,9 @@ def install(
while downgrades:
cmd = cmd_install + ["--force"] + downgrades[:500]
downgrades = downgrades[500:]
@ -173,15 +199,17 @@ index b35792237c..e064e2cb4e 100644
_clean_cache()
new = (
@@ -1740,6 +1766,7 @@ def upgrade(
@@ -1726,7 +1786,8 @@ def upgrade(
dryrun=False,
dist_upgrade=False,
fromrepo=None,
novendorchange=True,
- novendorchange=False,
+ novendorchange=True,
+ allowvendorchange=False,
skip_verify=False,
no_recommends=False,
root=None,
@@ -1778,7 +1805,11 @@ def upgrade(
@@ -1765,7 +1826,11 @@ def upgrade(
Specify a list of package repositories to upgrade from. Default: None
novendorchange
@ -194,28 +222,19 @@ index b35792237c..e064e2cb4e 100644
skip_verify
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
@@ -1825,40 +1856,21 @@ def upgrade(
@@ -1812,31 +1877,21 @@ def upgrade(
cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
log.info("Targeting repos: %s", fromrepo)
- if not novendorchange:
- __zypper__(root=root).refresh_zypper_flags()
- if dist_upgrade:
- if __zypper__(root=root).dup_avc:
- cmd_update.append("--allow-vendor-change")
- log.info("Enabling vendor changes")
- if novendorchange:
- # TODO: Grains validation should be moved to Zypper class
- if __grains__["osrelease_info"][0] > 11:
- cmd_update.append("--no-allow-vendor-change")
- log.info("Disabling vendor changes")
- else:
- log.warning(
- "Enabling/Disabling vendor changes is not supported on this Zypper version"
- )
- else:
- # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+
- if __zypper__(root=root).inst_avc:
- cmd_update.append("--allow-vendor-change")
- log.info("Enabling vendor changes")
- else:
- log.warning(
- "Enabling/Disabling vendor changes is not supported on this Zypper version"
- "Disabling vendor changes is not supported on this Zypper version"
- )
-
- if no_recommends:
@ -248,19 +267,10 @@ index b35792237c..e064e2cb4e 100644
new = list_pkgs(root=root)
ret = salt.utils.data.compare_dicts(old, new)
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index f32c382d7f..5c01bbbfbd 100644
index 0ba5595d65..78fe226914 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -14,7 +14,7 @@ from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
from salt.ext.six.moves import configparser
from tests.support.mixins import LoaderModuleMockMixin
-from tests.support.mock import MagicMock, Mock, call, patch
+from tests.support.mock import MagicMock, Mock, call, mock_open, patch
from tests.support.unit import TestCase
@@ -135,6 +135,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
@@ -137,6 +137,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
stdout_xml_snippet = '<?xml version="1.0"?><test foo="bar"/>'
sniffer = RunSniffer(stdout=stdout_xml_snippet)
@ -268,7 +278,7 @@ index f32c382d7f..5c01bbbfbd 100644
with patch.dict("salt.modules.zypperpkg.__salt__", {"cmd.run_all": sniffer}):
self.assertEqual(zypper.__zypper__.call("foo"), stdout_xml_snippet)
self.assertEqual(len(sniffer.calls), 1)
@@ -590,13 +591,373 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
@@ -592,13 +593,373 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
{"vim": "7.4.326-2.62", "fakepkg": ""},
)
@ -643,7 +653,7 @@ index f32c382d7f..5c01bbbfbd 100644
"salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
), patch(
"salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
@@ -635,17 +996,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
@@ -637,16 +998,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.1,1.2"}})
zypper_mock.assert_any_call("update", "--auto-agree-with-licenses")
@ -654,109 +664,52 @@ index f32c382d7f..5c01bbbfbd 100644
- ret = zypper.upgrade(dist_upgrade=True)
- self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "dist-upgrade", "--auto-agree-with-licenses"
- )
-
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
@@ -677,94 +1027,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"Dummy2",
@@ -662,6 +1013,22 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"--debug-solver",
)
- with patch(
- "salt.modules.zypperpkg.list_pkgs",
- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
- ):
- ret = zypper.upgrade(
- dist_upgrade=True,
- fromrepo=["Dummy", "Dummy2"],
- novendorchange=True,
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- )
-
- with patch(
- "salt.modules.zypperpkg.list_pkgs",
- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])
- ):
- with patch.dict(zypper.__salt__,
- {'pkg_resource.version': MagicMock(return_value='1.15'),
- 'lowpkg.version_cmp': MagicMock(return_value=1)}):
- ret = zypper.upgrade(
- dist_upgrade=True,
- dryrun=True,
- fromrepo=["Dummy", "Dummy2"],
- novendorchange=False,
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- "--from",
- "Dummy",
- "--from",
- "Dummy2",
- "--allow-vendor-change",
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- "--from",
- "Dummy",
- "--from",
- "Dummy2",
- "--allow-vendor-change",
- "--debug-solver",
- )
-
- with patch(
- "salt.modules.zypperpkg.list_pkgs",
- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])
- ):
- with patch.dict(zypper.__salt__,
- {'pkg_resource.version': MagicMock(return_value='1.11'),
- 'lowpkg.version_cmp': MagicMock(return_value=1)}):
- ret = zypper.upgrade(
- dist_upgrade=True,
- dryrun=True,
- fromrepo=["Dummy", "Dummy2"],
- novendorchange=False,
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- "--from",
- "Dummy",
- "--from",
- "Dummy2",
- )
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--dry-run",
- "--from",
- "Dummy",
- "--from",
- "Dummy2",
- "--debug-solver",
- )
-
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
+ ):
+ ret = zypper.upgrade(
+ dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False
+ )
+ zypper_mock.assert_any_call(
+ "update",
+ "--auto-agree-with-licenses",
+ "--repo",
+ "Dummy",
+ "--repo",
+ "Dummy2",
+ )
+
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
@@ -811,52 +1073,13 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
@@ -680,7 +1047,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"Dummy",
"--from",
"Dummy2",
- "--no-allow-vendor-change",
)
zypper_mock.assert_any_call(
"dist-upgrade",
@@ -690,7 +1056,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"Dummy",
"--from",
"Dummy2",
- "--no-allow-vendor-change",
"--debug-solver",
)
@@ -710,33 +1075,13 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"Dummy2",
)
@ -777,26 +730,7 @@ index f32c382d7f..5c01bbbfbd 100644
- "Dummy",
- "--from",
- "Dummy2",
- )
-
- with patch(
- "salt.modules.zypperpkg.list_pkgs",
- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
- ):
- ret = zypper.upgrade(
- dist_upgrade=True,
- fromrepo=["Dummy", "Dummy2"],
- novendorchange=False,
- )
- self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
- zypper_mock.assert_any_call(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--from",
- "Dummy",
- "--from",
- "Dummy2",
- "--allow-vendor-change",
- "--no-allow-vendor-change",
- )
-
def test_upgrade_kernel(self):
@ -810,7 +744,7 @@ index f32c382d7f..5c01bbbfbd 100644
"salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
), patch(
"salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
@@ -915,12 +1138,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
@@ -795,12 +1140,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
self.pid = 1234
self.exit_code = 555
self.noraise = MagicMock()
@ -825,19 +759,16 @@ index f32c382d7f..5c01bbbfbd 100644
"salt.modules.zypperpkg.__zypper__", FailingZypperDummy()
) as zypper_mock, patch(
"salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
@@ -937,10 +1161,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
@@ -817,7 +1163,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
self.assertEqual(cmd_exc.exception.info["changes"], {})
self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out)
zypper_mock.noraise.call.assert_called_with(
- "dist-upgrade",
- "--auto-agree-with-licenses",
- "--from",
- "DUMMY",
- "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY"
+ "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY",
)
def test_upgrade_available(self):
--
2.31.1
2.33.0

View File

@ -1,91 +0,0 @@
From 6111853f13c9c1e8eaaa1acd521cd3abfbfff766 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Thu, 13 Aug 2020 13:49:16 +0100
Subject: [PATCH] ansiblegate: take care of failed, skipped and
unreachable tasks (bsc#1173911)
Add 'retcode' from ansible-playbook execution to the returned data (bsc#1173909)
Always add retcode to ansible.playbooks output
Adjust ansible.playbooks output comment properly
Add new unit test for ansible.playbooks
Add unit tests for ansible.playbooks state
---
tests/unit/modules/test_ansiblegate.py | 12 ++++++++++++
tests/unit/states/test_ansiblegate.py | 7 ++++---
2 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/tests/unit/modules/test_ansiblegate.py b/tests/unit/modules/test_ansiblegate.py
index 6724d37c40..3d406a9d42 100644
--- a/tests/unit/modules/test_ansiblegate.py
+++ b/tests/unit/modules/test_ansiblegate.py
@@ -209,3 +209,15 @@ description:
timeout=1200,
)
assert ret == {"completed": True, "timeout": 1200}
+
+ @patch("salt.utils.path.which", MagicMock(return_value=True))
+ def test_ansible_playbooks_return_retcode(self):
+ """
+ Test ansible.playbooks execution module function include retcode in the return.
+ :return:
+ """
+ ref_out = {"retcode": 0, "stdout": '{"foo": "bar"}'}
+ cmd_run_all = MagicMock(return_value=ref_out)
+ with patch.dict(ansible.__salt__, {"cmd.run_all": cmd_run_all}):
+ ret = ansible.playbooks("fake-playbook.yml")
+ assert "retcode" in ret
diff --git a/tests/unit/states/test_ansiblegate.py b/tests/unit/states/test_ansiblegate.py
index ac677fc5db..c21a4f642f 100644
--- a/tests/unit/states/test_ansiblegate.py
+++ b/tests/unit/states/test_ansiblegate.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Import Salt Testing Libs
import json
import os
@@ -43,6 +42,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {ansible: {}}
+ @patch("salt.utils.path.which", MagicMock(return_value=True))
def test_ansible_playbooks_states_success(self):
"""
Test ansible.playbooks states executions success.
@@ -57,7 +57,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(
ansible.__salt__,
{"ansible.playbooks": MagicMock(return_value=success_output)},
- ), patch("salt.utils.path.which", MagicMock(return_value=True)):
+ ):
with patch.dict(ansible.__opts__, {"test": False}):
ret = ansible.playbooks("foobar")
self.assertTrue(ret["result"])
@@ -73,6 +73,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin):
},
)
+ @patch("salt.utils.path.which", MagicMock(return_value=True))
def test_ansible_playbooks_states_failed(self):
"""
Test ansible.playbooks failed states executions.
@@ -87,7 +88,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(
ansible.__salt__,
{"ansible.playbooks": MagicMock(return_value=failed_output)},
- ), patch("salt.utils.path.which", MagicMock(return_value=True)):
+ ):
with patch.dict(ansible.__opts__, {"test": False}):
ret = ansible.playbooks("foobar")
self.assertFalse(ret["result"])
--
2.29.2

View File

@ -1,235 +0,0 @@
From 85464ec6c34fceee3379d268745c3294d27e7fb4 Mon Sep 17 00:00:00 2001
From: Steve Kowalik <steven@wedontsleep.org>
Date: Mon, 17 Feb 2020 15:34:00 +1100
Subject: [PATCH] Apply patch from upstream to support Python 3.8
Apply saltstack/salt#56031 to support Python 3.8, which removed a
deprecated module and changed some behaviour. Add a {Build,}Requires on
python-distro, since it is now required.
---
pkg/suse/salt.spec | 2 ++
salt/renderers/stateconf.py | 49 ++++++++++++++++---------------------
2 files changed, 23 insertions(+), 28 deletions(-)
diff --git a/pkg/suse/salt.spec b/pkg/suse/salt.spec
index a17d2381ce..0df9d6c283 100644
--- a/pkg/suse/salt.spec
+++ b/pkg/suse/salt.spec
@@ -62,6 +62,7 @@ BuildRequires: python-psutil
BuildRequires: python-requests >= 1.0.0
BuildRequires: python-tornado >= 4.2.1
BuildRequires: python-yaml
+BuildRequires: python-distro
# requirements/opt.txt (not all)
# BuildRequires: python-MySQL-python
# BuildRequires: python-timelib
@@ -112,6 +113,7 @@ Requires: python-psutil
Requires: python-requests >= 1.0.0
Requires: python-tornado >= 4.2.1
Requires: python-yaml
+Requires: python-distro
%if 0%{?suse_version}
# requirements/opt.txt (not all)
Recommends: python-MySQL-python
diff --git a/salt/renderers/stateconf.py b/salt/renderers/stateconf.py
index 298ae28338..f0527d51d7 100644
--- a/salt/renderers/stateconf.py
+++ b/salt/renderers/stateconf.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
A flexible renderer that takes a templating engine and a data format
@@ -26,8 +25,6 @@ A flexible renderer that takes a templating engine and a data format
# - apache: >= 0.1.0
#
-# Import python libs
-from __future__ import absolute_import, print_function, unicode_literals
import copy
import getopt
@@ -36,12 +33,9 @@ import os
import re
from itertools import chain
-# Import salt libs
import salt.utils.files
import salt.utils.stringutils
from salt.exceptions import SaltRenderError
-
-# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import StringIO # pylint: disable=import-error
@@ -135,7 +129,7 @@ def render(input, saltenv="base", sls="", argline="", **kws):
sid = has_names_decls(data)
if sid:
raise SaltRenderError(
- "'names' declaration(found in state id: {0}) is "
+ "'names' declaration(found in state id: {}) is "
"not supported with implicitly ordered states! You "
"should generate the states in a template for-loop "
"instead.".format(sid)
@@ -203,11 +197,11 @@ def render(input, saltenv="base", sls="", argline="", **kws):
name, rt_argline = (args[1] + " ").split(" ", 1)
render_template = renderers[name] # e.g., the mako renderer
except KeyError as err:
- raise SaltRenderError("Renderer: {0} is not available!".format(err))
+ raise SaltRenderError("Renderer: {} is not available!".format(err))
except IndexError:
raise INVALID_USAGE_ERROR
- if isinstance(input, six.string_types):
+ if isinstance(input, str):
with salt.utils.files.fopen(input, "r") as ifile:
sls_templ = salt.utils.stringutils.to_unicode(ifile.read())
else: # assume file-like
@@ -227,7 +221,7 @@ def render(input, saltenv="base", sls="", argline="", **kws):
prefix = sls + "::"
tmplctx = {
k[len(prefix) :] if k.startswith(prefix) else k: v
- for k, v in six.iteritems(tmplctx)
+ for k, v in tmplctx.items()
}
else:
tmplctx = {}
@@ -262,8 +256,8 @@ def rewrite_single_shorthand_state_decl(data): # pylint: disable=C0103
state_id_decl:
state.func: []
"""
- for sid, states in six.iteritems(data):
- if isinstance(states, six.string_types):
+ for sid, states in data.items():
+ if isinstance(states, str):
data[sid] = {states: []}
@@ -328,7 +322,7 @@ def nvlist(thelist, names=None):
for nvitem in thelist:
if isinstance(nvitem, dict):
# then nvitem is a name-value item(a dict) of the list.
- name, value = next(six.iteritems(nvitem))
+ name, value = next(iter(nvitem.items()))
if names is None or name in names:
yield nvitem, name, value
@@ -349,17 +343,16 @@ def nvlist2(thelist, names=None):
"""
for _, _, value in nvlist(thelist, names):
- for each in nvlist(value):
- yield each
+ yield from nvlist(value)
def statelist(states_dict, sid_excludes=frozenset(["include", "exclude"])):
- for sid, states in six.iteritems(states_dict):
+ for sid, states in states_dict.items():
if sid.startswith("__"):
continue
if sid in sid_excludes:
continue
- for sname, args in six.iteritems(states):
+ for sname, args in states.items():
if sname.startswith("__"):
continue
yield sid, states, sname, args
@@ -401,11 +394,11 @@ def rename_state_ids(data, sls, is_extend=False):
newsid = _local_to_abs_sid(sid, sls)
if newsid in data:
raise SaltRenderError(
- "Can't rename state id({0}) into {1} because the later "
+ "Can't rename state id({}) into {} because the later "
"already exists!".format(sid, newsid)
)
# add a '- name: sid' to those states without '- name'.
- for sname, args in six.iteritems(data[sid]):
+ for sname, args in data[sid].items():
if state_name(sname) == STATE_NAME:
continue
for arg in args:
@@ -430,7 +423,7 @@ EXTENDED_REQUIRE_IN = {}
# explicit require_in/watch_in/listen_in/onchanges_in/onfail_in can only contain states after it
def add_implicit_requires(data):
def T(sid, state): # pylint: disable=C0103
- return "{0}:{1}".format(sid, state_name(state))
+ return "{}:{}".format(sid, state_name(state))
states_before = set()
states_after = set()
@@ -462,7 +455,7 @@ def add_implicit_requires(data):
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_after:
raise SaltRenderError(
- "State({0}) can't require/watch/listen/onchanges/onfail a state({1}) defined "
+ "State({}) can't require/watch/listen/onchanges/onfail a state({}) defined "
"after it!".format(tag, T(rsid, rstate))
)
@@ -472,7 +465,7 @@ def add_implicit_requires(data):
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_before:
raise SaltRenderError(
- "State({0}) can't require_in/watch_in/listen_in/onchanges_in/onfail_in a state({1}) "
+ "State({}) can't require_in/watch_in/listen_in/onchanges_in/onfail_in a state({}) "
"defined before it!".format(tag, T(rsid, rstate))
)
@@ -492,7 +485,7 @@ def add_start_state(data, sls):
start_sid = __opts__["stateconf_start_state"]
if start_sid in data:
raise SaltRenderError(
- "Can't generate start state({0})! The same state id already "
+ "Can't generate start state({})! The same state id already "
"exists!".format(start_sid)
)
if not data:
@@ -502,14 +495,14 @@ def add_start_state(data, sls):
# no __sls__, or it's the first state whose id declaration has a
# __sls__ == sls.
non_sids = ("include", "exclude", "extend")
- for sid, states in six.iteritems(data):
+ for sid, states in data.items():
if sid in non_sids or sid.startswith("__"):
continue
if "__sls__" not in states or states["__sls__"] == sls:
break
else:
raise SaltRenderError("Can't determine the first state in the sls file!")
- reqin = {state_name(next(six.iterkeys(data[sid]))): sid}
+ reqin = {state_name(next(iter(data[sid].keys()))): sid}
data[start_sid] = {STATE_FUNC: [{"require_in": [reqin]}]}
@@ -517,7 +510,7 @@ def add_goal_state(data):
goal_sid = __opts__["stateconf_goal_state"]
if goal_sid in data:
raise SaltRenderError(
- "Can't generate goal state({0})! The same state id already "
+ "Can't generate goal state({})! The same state id already "
"exists!".format(goal_sid)
)
else:
@@ -561,7 +554,7 @@ STATE_CONF_EXT = {} # stateconf.set under extend: ...
def extract_state_confs(data, is_extend=False):
- for state_id, state_dict in six.iteritems(data):
+ for state_id, state_dict in data.items():
if state_id == "extend" and not is_extend:
extract_state_confs(state_dict, True)
continue
@@ -578,7 +571,7 @@ def extract_state_confs(data, is_extend=False):
for sdk in state_dict[key]:
if not isinstance(sdk, dict):
continue
- key, val = next(six.iteritems(sdk))
+ key, val = next(iter(sdk.items()))
conf[key] = val
if not is_extend and state_id in STATE_CONF_EXT:
--
2.29.2

View File

@ -1,69 +0,0 @@
From 843c76e5889659ec80fea3f39b750b9f907a902d Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
Date: Wed, 17 Feb 2021 16:47:11 +0300
Subject: [PATCH] Async batch implementation fix (#320)
---
salt/client/__init__.py | 38 --------------------------------------
1 file changed, 38 deletions(-)
diff --git a/salt/client/__init__.py b/salt/client/__init__.py
index cc8fd4048d..ddb437604b 100644
--- a/salt/client/__init__.py
+++ b/salt/client/__init__.py
@@ -534,12 +534,6 @@ class LocalClient:
{'dave': {...}}
{'stewart': {...}}
"""
- # We need to re-import salt.utils.args here
- # even though it has already been imported.
- # when cmd_batch is called via the NetAPI
- # the module is unavailable.
- import salt.utils.args
-
# Late import - not used anywhere else in this file
import salt.cli.batch
@@ -557,38 +551,6 @@ class LocalClient:
eauth = salt.cli.batch.batch_get_eauth(kwargs)
- arg = salt.utils.args.condition_input(arg, kwarg)
- opts = {
- "tgt": tgt,
- "fun": fun,
- "arg": arg,
- "tgt_type": tgt_type,
- "ret": ret,
- "batch": batch,
- "failhard": kwargs.get("failhard", self.opts.get("failhard", False)),
- "raw": kwargs.get("raw", False),
- }
-
- if "timeout" in kwargs:
- opts["timeout"] = kwargs["timeout"]
- if "gather_job_timeout" in kwargs:
- opts["gather_job_timeout"] = kwargs["gather_job_timeout"]
- if "batch_wait" in kwargs:
- opts["batch_wait"] = int(kwargs["batch_wait"])
-
- eauth = {}
- if "eauth" in kwargs:
- eauth["eauth"] = kwargs.pop("eauth")
- if "username" in kwargs:
- eauth["username"] = kwargs.pop("username")
- if "password" in kwargs:
- eauth["password"] = kwargs.pop("password")
- if "token" in kwargs:
- eauth["token"] = kwargs.pop("token")
-
- for key, val in self.opts.items():
- if key not in opts:
- opts[key] = val
batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True)
for ret in batch.run():
yield ret
--
2.30.0

File diff suppressed because it is too large Load Diff

View File

@ -1,39 +0,0 @@
From 2fecfe18cf17389714ab5bed0ff59bec2d1e1c36 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Mon, 29 Jul 2019 11:17:53 +0100
Subject: [PATCH] Avoid traceback when http.query request cannot be
performed (bsc#1128554)
Improve error logging when http.query cannot be performed
---
salt/utils/http.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/salt/utils/http.py b/salt/utils/http.py
index 5ab4503f61..9522bd6ee4 100644
--- a/salt/utils/http.py
+++ b/salt/utils/http.py
@@ -628,12 +628,17 @@ def query(
except salt.ext.tornado.httpclient.HTTPError as exc:
ret["status"] = exc.code
ret["error"] = str(exc)
+ log.error(
+ "Cannot perform 'http.query': {} - {}".format(url_full, ret["error"])
+ )
return ret
except (socket.herror, OSError, socket.timeout, socket.gaierror) as exc:
if status is True:
ret["status"] = 0
ret["error"] = str(exc)
- log.debug("Cannot perform 'http.query': %s - %s", url_full, ret["error"])
+ log.error(
+ "Cannot perform 'http.query': {} - {}".format(url_full, ret["error"])
+ )
return ret
if stream is True or handle is True:
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -1,227 +0,0 @@
From fba6631e0a66a5f8ea76a104e9acf385ce06471c Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
Date: Wed, 18 Aug 2021 15:05:30 +0300
Subject: [PATCH] Backport of upstream PR59492 to 3002.2 (#404)
* Fix failing integration tests
* Fix unless logic and failing tests
* Revert some of the changes in the onlyif code
Co-authored-by: twangboy <slee@saltstack.com>
---
salt/state.py | 24 +++++++++------
.../files/file/base/issue-35384.sls | 7 +++++
tests/unit/test_state.py | 30 ++++++++++++++-----
3 files changed, 44 insertions(+), 17 deletions(-)
diff --git a/salt/state.py b/salt/state.py
index 070a914636..64c5225728 100644
--- a/salt/state.py
+++ b/salt/state.py
@@ -929,7 +929,8 @@ class State:
def _run_check_onlyif(self, low_data, cmd_opts):
"""
- Check that unless doesn't return 0, and that onlyif returns a 0.
+ Make sure that all commands return True for the state to run. If any
+ command returns False (non 0), the state will not run
"""
ret = {"result": False}
@@ -938,7 +939,9 @@ class State:
else:
low_data_onlyif = low_data["onlyif"]
+ # If any are False the state will NOT run
def _check_cmd(cmd):
+ # Don't run condition (False)
if cmd != 0 and ret["result"] is False:
ret.update(
{
@@ -1001,7 +1004,8 @@ class State:
def _run_check_unless(self, low_data, cmd_opts):
"""
- Check that unless doesn't return 0, and that onlyif returns a 0.
+ Check if any of the commands return False (non 0). If any are False the
+ state will run.
"""
ret = {"result": False}
@@ -1010,8 +1014,10 @@ class State:
else:
low_data_unless = low_data["unless"]
+ # If any are False the state will run
def _check_cmd(cmd):
- if cmd == 0 and ret["result"] is False:
+ # Don't run condition
+ if cmd == 0:
ret.update(
{
"comment": "unless condition is true",
@@ -1020,9 +1026,10 @@ class State:
}
)
return False
- elif cmd != 0:
+ else:
+ ret.pop("skip_watch", None)
ret.update({"comment": "unless condition is false", "result": False})
- return True
+ return True
for entry in low_data_unless:
if isinstance(entry, str):
@@ -1034,7 +1041,7 @@ class State:
except CommandExecutionError:
# Command failed, so notify unless to skip the item
cmd = 0
- if not _check_cmd(cmd):
+ if _check_cmd(cmd):
return ret
elif isinstance(entry, dict):
if "fun" not in entry:
@@ -1047,7 +1054,7 @@ class State:
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
- if not _check_cmd(self.state_con["retcode"]):
+ if _check_cmd(self.state_con["retcode"]):
return ret
elif result:
ret.update(
@@ -1057,11 +1064,11 @@ class State:
"result": True,
}
)
- return ret
else:
ret.update(
{"comment": "unless condition is false", "result": False}
)
+ return ret
else:
ret.update(
{
@@ -1069,7 +1076,6 @@ class State:
"result": False,
}
)
- return ret
# No reason to stop, return ret
return ret
diff --git a/tests/integration/files/file/base/issue-35384.sls b/tests/integration/files/file/base/issue-35384.sls
index 3c41617ca8..2aa436bb37 100644
--- a/tests/integration/files/file/base/issue-35384.sls
+++ b/tests/integration/files/file/base/issue-35384.sls
@@ -2,5 +2,12 @@ cmd_run_unless_multiple:
cmd.run:
- name: echo "hello"
- unless:
+ {% if grains["os"] == "Windows" %}
+ - "exit 0"
+ - "exit 1"
+ - "exit 0"
+ {% else %}
- "$(which true)"
- "$(which false)"
+ - "$(which true)"
+ {% endif %}
diff --git a/tests/unit/test_state.py b/tests/unit/test_state.py
index 95018a9cf3..79a261d837 100644
--- a/tests/unit/test_state.py
+++ b/tests/unit/test_state.py
@@ -142,7 +142,7 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
def test_verify_onlyif_cmd_error(self):
"""
Simulates a failure in cmd.retcode from onlyif
- This could occur is runas is specified with a user that does not exist
+ This could occur if runas is specified with a user that does not exist
"""
low_data = {
"onlyif": "somecommand",
@@ -175,7 +175,7 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
def test_verify_unless_cmd_error(self):
"""
Simulates a failure in cmd.retcode from unless
- This could occur is runas is specified with a user that does not exist
+ This could occur if runas is specified with a user that does not exist
"""
low_data = {
"unless": "somecommand",
@@ -206,6 +206,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
self.assertEqual(expected_result, return_result)
def test_verify_unless_list_cmd(self):
+ """
+ If any of the unless commands return False (non 0) then the state should
+ run (no skip_watch).
+ """
low_data = {
"state": "cmd",
"name": 'echo "something"',
@@ -217,9 +221,8 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
"fun": "run",
}
expected_result = {
- "comment": "unless condition is true",
- "result": True,
- "skip_watch": True,
+ "comment": "unless condition is false",
+ "result": False,
}
with patch("salt.state.State._gather_pillar") as state_patch:
minion_opts = self.get_temp_config("minion")
@@ -228,6 +231,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
self.assertEqual(expected_result, return_result)
def test_verify_unless_list_cmd_different_order(self):
+ """
+ If any of the unless commands return False (non 0) then the state should
+ run (no skip_watch). The order shouldn't matter.
+ """
low_data = {
"state": "cmd",
"name": 'echo "something"',
@@ -239,9 +246,8 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
"fun": "run",
}
expected_result = {
- "comment": "unless condition is true",
- "result": True,
- "skip_watch": True,
+ "comment": "unless condition is false",
+ "result": False,
}
with patch("salt.state.State._gather_pillar") as state_patch:
minion_opts = self.get_temp_config("minion")
@@ -272,6 +278,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
self.assertEqual(expected_result, return_result)
def test_verify_unless_list_cmd_valid(self):
+ """
+ If any of the unless commands return False (non 0) then the state should
+ run (no skip_watch). This tests all commands return False.
+ """
low_data = {
"state": "cmd",
"name": 'echo "something"',
@@ -308,6 +318,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
self.assertEqual(expected_result, return_result)
def test_verify_unless_list_cmd_invalid(self):
+ """
+ If any of the unless commands return False (non 0) then the state should
+ run (no skip_watch). This tests all commands return True
+ """
low_data = {
"state": "cmd",
"name": 'echo "something"',
--
2.32.0

View File

@ -1,127 +0,0 @@
From a782af246a2f3d4b91afee2ee847c87f71e8904b Mon Sep 17 00:00:00 2001
From: Alexander Graul <agraul@suse.com>
Date: Fri, 25 Jun 2021 13:34:38 +0200
Subject: [PATCH] Backport Thread.is_alive fix (#390)
* Change thread.isAlive() to thread.is_alive()
(cherry picked from commit b1dc0cee03896c8abad55a609805b0be6c7aaefa)
* Run pre-commit on salt/utils/timed_subprocess.py
(cherry picked from commit 178e3b83e6c21abf5d6db454c19c104ceb8bd92c)
* Fix the six removal made by pre-commit
(cherry picked from commit aaa8ca3b7f129568637799d6d49d7ad3708f73bc)
* Remove the PY2 code in salt/utils/timed_subprocess.py
(cherry picked from commit 3a702a510b965e9af1ad318c953e19114925357e)
Co-authored-by: Petr Messner <petr@leadhub.co>
Co-authored-by: Petr Messner <petr.messner@gmail.com>
---
salt/utils/timed_subprocess.py | 39 ++++++++++++++--------------------
1 file changed, 16 insertions(+), 23 deletions(-)
diff --git a/salt/utils/timed_subprocess.py b/salt/utils/timed_subprocess.py
index 5c4ac35ac3..b043a3bde2 100644
--- a/salt/utils/timed_subprocess.py
+++ b/salt/utils/timed_subprocess.py
@@ -1,8 +1,6 @@
-# -*- coding: utf-8 -*-
"""
For running command line executables with a timeout
"""
-from __future__ import absolute_import, print_function, unicode_literals
import shlex
import subprocess
@@ -10,10 +8,9 @@ import threading
import salt.exceptions
import salt.utils.data
-from salt.ext import six
-class TimedProc(object):
+class TimedProc:
"""
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
"""
@@ -46,7 +43,7 @@ class TimedProc(object):
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError(
- "Error: timeout {0} must be a number".format(self.timeout)
+ "Error: timeout {} must be a number".format(self.timeout)
)
if kwargs.get("shell", False):
args = salt.utils.data.decode(args, to_str=True)
@@ -59,28 +56,24 @@ class TimedProc(object):
try:
args = shlex.split(args)
except AttributeError:
- args = shlex.split(six.text_type(args))
+ args = shlex.split(str(args))
str_args = []
for arg in args:
- if not isinstance(arg, six.string_types):
- str_args.append(six.text_type(arg))
+ if not isinstance(arg, str):
+ str_args.append(str(arg))
else:
str_args.append(arg)
args = str_args
else:
- if not isinstance(args, (list, tuple, six.string_types)):
+ if not isinstance(args, (list, tuple, str)):
# Handle corner case where someone does a 'cmd.run 3'
- args = six.text_type(args)
+ args = str(args)
# Ensure that environment variables are strings
- for key, val in six.iteritems(kwargs.get("env", {})):
- if not isinstance(val, six.string_types):
- kwargs["env"][key] = six.text_type(val)
- if not isinstance(key, six.string_types):
- kwargs["env"][six.text_type(key)] = kwargs["env"].pop(key)
- if six.PY2 and "env" in kwargs:
- # Ensure no unicode in custom env dict, as it can cause
- # problems with subprocess.
- kwargs["env"] = salt.utils.data.encode_dict(kwargs["env"])
+ for key, val in kwargs.get("env", {}).items():
+ if not isinstance(val, str):
+ kwargs["env"][key] = str(val)
+ if not isinstance(key, str):
+ kwargs["env"][str(key)] = kwargs["env"].pop(key)
args = salt.utils.data.decode(args)
self.process = subprocess.Popen(args, **kwargs)
self.command = args
@@ -103,18 +96,18 @@ class TimedProc(object):
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
- if rt.isAlive():
+ if rt.is_alive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
- if rt.isAlive():
+ if rt.is_alive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
- "{0} : Timed out after {1} seconds".format(
- self.command, six.text_type(self.timeout),
+ "{} : Timed out after {} seconds".format(
+ self.command, str(self.timeout),
)
)
return self.process.returncode
--
2.32.0

File diff suppressed because it is too large Load Diff

View File

@ -1,238 +0,0 @@
From 1606379714f4776e2b529fb1d45891266985c896 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Fri, 28 Feb 2020 15:11:53 +0000
Subject: [PATCH] Batch Async: Catch exceptions and safety unregister
and close instances
---
salt/cli/batch_async.py | 160 ++++++++++++++++++++++++----------------
1 file changed, 96 insertions(+), 64 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index 1e2ac5b0d3..3dc04826d1 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -107,22 +107,25 @@ class BatchAsync:
def __event_handler(self, raw):
if not self.event:
return
- mtag, data = self.event.unpack(raw, self.event.serial)
- for (pattern, op) in self.patterns:
- if mtag.startswith(pattern[:-1]):
- minion = data["id"]
- if op == "ping_return":
- self.minions.add(minion)
- if self.targeted_minions == self.minions:
- self.event.io_loop.spawn_callback(self.start_batch)
- elif op == "find_job_return":
- if data.get("return", None):
- self.find_job_returned.add(minion)
- elif op == "batch_run":
- if minion in self.active:
- self.active.remove(minion)
- self.done_minions.add(minion)
- self.event.io_loop.spawn_callback(self.schedule_next)
+ try:
+ mtag, data = self.event.unpack(raw, self.event.serial)
+ for (pattern, op) in self.patterns:
+ if mtag.startswith(pattern[:-1]):
+ minion = data["id"]
+ if op == "ping_return":
+ self.minions.add(minion)
+ if self.targeted_minions == self.minions:
+ self.event.io_loop.spawn_callback(self.start_batch)
+ elif op == "find_job_return":
+ if data.get("return", None):
+ self.find_job_returned.add(minion)
+ elif op == "batch_run":
+ if minion in self.active:
+ self.active.remove(minion)
+ self.done_minions.add(minion)
+ self.event.io_loop.spawn_callback(self.schedule_next)
+ except Exception as ex:
+ log.error("Exception occured while processing event: {}".format(ex))
def _get_next(self):
to_run = (
@@ -154,53 +157,67 @@ class BatchAsync:
if timedout_minions:
self.schedule_next()
- if running:
+ if self.event and running:
self.find_job_returned = self.find_job_returned.difference(running)
self.event.io_loop.spawn_callback(self.find_job, running)
@tornado.gen.coroutine
def find_job(self, minions):
- not_done = minions.difference(self.done_minions).difference(
- self.timedout_minions
- )
-
- if not_done:
- jid = self.jid_gen()
- find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
- self.patterns.add((find_job_return_pattern, "find_job_return"))
- self.event.subscribe(find_job_return_pattern, match_type="glob")
-
- ret = yield self.local.run_job_async(
- not_done,
- "saltutil.find_job",
- [self.batch_jid],
- "list",
- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=jid,
- **self.eauth
+ if self.event:
+ not_done = minions.difference(self.done_minions).difference(
+ self.timedout_minions
)
- yield tornado.gen.sleep(self.opts["gather_job_timeout"])
- self.event.io_loop.spawn_callback(self.check_find_job, not_done, jid)
+ try:
+ if not_done:
+ jid = self.jid_gen()
+ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
+ self.patterns.add((find_job_return_pattern, "find_job_return"))
+ self.event.subscribe(find_job_return_pattern, match_type="glob")
+ ret = yield self.local.run_job_async(
+ not_done,
+ "saltutil.find_job",
+ [self.batch_jid],
+ "list",
+ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=jid,
+ **self.eauth
+ )
+ yield tornado.gen.sleep(self.opts["gather_job_timeout"])
+ if self.event:
+ self.event.io_loop.spawn_callback(
+ self.check_find_job, not_done, jid
+ )
+ except Exception as ex:
+ log.error(
+ "Exception occured handling batch async: {}. Aborting execution.".format(
+ ex
+ )
+ )
+ self.close_safe()
@tornado.gen.coroutine
def start(self):
- self.__set_event_handler()
- ping_return = yield self.local.run_job_async(
- self.opts["tgt"],
- "test.ping",
- [],
- self.opts.get("selected_target_option", self.opts.get("tgt_type", "glob")),
- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=self.ping_jid,
- metadata=self.metadata,
- **self.eauth
- )
- self.targeted_minions = set(ping_return["minions"])
- # start batching even if not all minions respond to ping
- yield tornado.gen.sleep(
- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
- )
- self.event.io_loop.spawn_callback(self.start_batch)
+ if self.event:
+ self.__set_event_handler()
+ ping_return = yield self.local.run_job_async(
+ self.opts["tgt"],
+ "test.ping",
+ [],
+ self.opts.get(
+ "selected_target_option", self.opts.get("tgt_type", "glob")
+ ),
+ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=self.ping_jid,
+ metadata=self.metadata,
+ **self.eauth
+ )
+ self.targeted_minions = set(ping_return["minions"])
+ # start batching even if not all minions respond to ping
+ yield tornado.gen.sleep(
+ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
+ )
+ if self.event:
+ self.event.io_loop.spawn_callback(self.start_batch)
@tornado.gen.coroutine
def start_batch(self):
@@ -215,7 +232,8 @@ class BatchAsync:
ret = self.event.fire_event(
data, "salt/batch/{}/start".format(self.batch_jid)
)
- self.event.io_loop.spawn_callback(self.run_next)
+ if self.event:
+ self.event.io_loop.spawn_callback(self.run_next)
@tornado.gen.coroutine
def end_batch(self):
@@ -232,11 +250,21 @@ class BatchAsync:
"metadata": self.metadata,
}
self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
- for (pattern, label) in self.patterns:
- if label in ["ping_return", "batch_run"]:
- self.event.unsubscribe(pattern, match_type="glob")
- del self
- gc.collect()
+
+ # release to the IOLoop to allow the event to be published
+ # before closing batch async execution
+ yield tornado.gen.sleep(1)
+ self.close_safe()
+
+ def close_safe(self):
+ for (pattern, label) in self.patterns:
+ self.event.unsubscribe(pattern, match_type="glob")
+ self.event.remove_event_handler(self.__event_handler)
+ self.event = None
+ self.local = None
+ self.ioloop = None
+ del self
+ gc.collect()
@tornado.gen.coroutine
def schedule_next(self):
@@ -244,7 +272,8 @@ class BatchAsync:
self.scheduled = True
# call later so that we maybe gather more returns
yield tornado.gen.sleep(self.batch_delay)
- self.event.io_loop.spawn_callback(self.run_next)
+ if self.event:
+ self.event.io_loop.spawn_callback(self.run_next)
@tornado.gen.coroutine
def run_next(self):
@@ -266,17 +295,20 @@ class BatchAsync:
)
yield tornado.gen.sleep(self.opts["timeout"])
- self.event.io_loop.spawn_callback(self.find_job, set(next_batch))
+
+ # The batch can be done already at this point, which means no self.event
+ if self.event:
+ self.event.io_loop.spawn_callback(self.find_job, set(next_batch))
except Exception as ex:
- log.error("Error in scheduling next batch: %s", ex)
+ log.error("Error in scheduling next batch: %s. Aborting execution", ex)
self.active = self.active.difference(next_batch)
+ self.close_safe()
else:
yield self.end_batch()
gc.collect()
def __del__(self):
self.local = None
- self.event.remove_event_handler(self.__event_handler)
self.event = None
self.ioloop = None
gc.collect()
--
2.29.2

View File

@ -1,27 +0,0 @@
From 31fedcb3173f73fbffc3b053bc64c94a7b608118 Mon Sep 17 00:00:00 2001
From: Silvio Moioli <smoioli@suse.de>
Date: Mon, 2 Mar 2020 11:23:59 +0100
Subject: [PATCH] batch_async: avoid using fnmatch to match event
(#217)
---
salt/cli/batch_async.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index 8d2601e636..1e2ac5b0d3 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -109,7 +109,7 @@ class BatchAsync:
return
mtag, data = self.event.unpack(raw, self.event.serial)
for (pattern, op) in self.patterns:
- if fnmatch.fnmatch(mtag, pattern):
+ if mtag.startswith(pattern[:-1]):
minion = data["id"]
if op == "ping_return":
self.minions.add(minion)
--
2.29.2

View File

@ -1,4 +1,4 @@
From cd64b9a063771829f85d6be0e42259825cfb10c8 Mon Sep 17 00:00:00 2001
From f0025c6d00f174db587726bb15b78713cbbcf996 Mon Sep 17 00:00:00 2001
From: "Daniel A. Wozniak" <dwozniak@saltstack.com>
Date: Mon, 2 Aug 2021 13:50:37 -0700
Subject: [PATCH] Better handling of bad public keys from minions
@ -31,7 +31,7 @@ index 0000000000..0cd55b19a6
@@ -0,0 +1 @@
+Better handling of bad RSA public keys from minions
diff --git a/salt/crypt.py b/salt/crypt.py
index 0a8b728f50..e6e4f3181e 100644
index f3da78f9ba..789c562e25 100644
--- a/salt/crypt.py
+++ b/salt/crypt.py
@@ -36,6 +36,7 @@ import salt.utils.verify
@ -42,7 +42,7 @@ index 0a8b728f50..e6e4f3181e 100644
MasterExit,
SaltClientError,
SaltReqTimeoutError,
@@ -217,10 +218,16 @@ def get_rsa_pub_key(path):
@@ -220,10 +221,16 @@ def get_rsa_pub_key(path):
with salt.utils.files.fopen(path, "rb") as f:
data = f.read().replace(b"RSA ", b"")
bio = BIO.MemoryBuffer(data)
@ -79,18 +79,18 @@ index 033a19cc54..1da15f9e69 100644
"""
Used in modules or grains when a required binary is not available
diff --git a/salt/key.py b/salt/key.py
index 75777ede06..59090c979c 100644
index 16d20b1303..3b931152cc 100644
--- a/salt/key.py
+++ b/salt/key.py
@@ -11,6 +11,7 @@ import fnmatch
@@ -9,6 +9,7 @@ import itertools
import logging
import os
import shutil
+import sys
# Import salt libs
import salt.cache
@@ -652,17 +653,27 @@ class Key(object):
import salt.client
@@ -643,17 +644,27 @@ class Key:
keydirs.append(self.REJ)
if include_denied:
keydirs.append(self.DEN)
@ -112,7 +112,7 @@ index 75777ede06..59090c979c 100644
)
eload = {"result": True, "act": "accept", "id": key}
self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (IOError, OSError):
except OSError:
pass
+ for keydir, key in invalid_keys:
+ matches[keydir].remove(key)
@ -121,10 +121,10 @@ index 75777ede06..59090c979c 100644
def accept_all(self):
diff --git a/salt/transport/mixins/auth.py b/salt/transport/mixins/auth.py
index 003cbd8275..0f0c615408 100644
index 102af568f3..29b38d3027 100644
--- a/salt/transport/mixins/auth.py
+++ b/salt/transport/mixins/auth.py
@@ -184,11 +184,11 @@ class AESReqServerMixin(object):
@@ -174,11 +174,11 @@ class AESReqServerMixin:
tagged "auth" and returns a dict with information about the auth
event
@ -141,7 +141,7 @@ index 003cbd8275..0f0c615408 100644
"""
if not salt.utils.verify.valid_id(self.opts, load["id"]):
@@ -460,7 +460,7 @@ class AESReqServerMixin(object):
@@ -450,7 +450,7 @@ class AESReqServerMixin:
# and an empty request comes in
try:
pub = salt.crypt.get_rsa_pub_key(pubfn)
@ -151,10 +151,10 @@ index 003cbd8275..0f0c615408 100644
return {"enc": "clear", "load": {"ret": False}}
diff --git a/tests/pytests/integration/cli/test_salt_key.py b/tests/pytests/integration/cli/test_salt_key.py
index 0edb2cf86c..2583348ce6 100644
index 3ec87fe580..8f29929747 100644
--- a/tests/pytests/integration/cli/test_salt_key.py
+++ b/tests/pytests/integration/cli/test_salt_key.py
@@ -328,3 +328,31 @@ def test_keys_generation_keysize_max(salt_key_cli):
@@ -316,3 +316,31 @@ def test_keys_generation_keysize_max(salt_key_cli):
)
assert ret.exitcode != 0
assert "error: The maximum value for keysize is 32768" in ret.stderr
@ -213,6 +213,6 @@ index 0000000000..aa8f439b8c
+ with pytest.raises(salt.crypt.InvalidKeyError):
+ salt.crypt.get_rsa_pub_key(key_path)
--
2.32.0
2.33.0

View File

@ -1,107 +0,0 @@
From d9362f10681a2dfdb057939eee1ffae3a35d4a8d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Fri, 12 Apr 2019 16:47:03 +0100
Subject: [PATCH] Calculate FQDNs in parallel to avoid blockings
(bsc#1129079)
Fix pylint issue
---
salt/grains/core.py | 55 +++++++++++++++++++++++++++++++++------------
1 file changed, 41 insertions(+), 14 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 006878f806..883e3ebe09 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -20,8 +20,10 @@ import socket
import sys
import time
import uuid
+import warnings
import zlib
from errno import EACCES, EPERM
+from multiprocessing.dummy import Pool as ThreadPool
import distro
import salt.exceptions
@@ -44,6 +46,14 @@ import salt.utils.versions
from salt.ext.six.moves import range
from salt.utils.network import _get_interfaces
+# pylint: disable=import-error
+try:
+ import dateutil.tz
+
+ _DATEUTIL_TZ = True
+except ImportError:
+ _DATEUTIL_TZ = False
+
# rewrite distro.linux_distribution to allow best=True kwarg in version(), needed to get the minor version numbers in CentOS
def _linux_distribution():
@@ -2402,22 +2412,12 @@ def fqdns():
grains = {}
fqdns = set()
- addresses = salt.utils.network.ip_addrs(
- include_loopback=False, interface_data=_get_interfaces()
- )
- addresses.extend(
- salt.utils.network.ip_addrs6(
- include_loopback=False, interface_data=_get_interfaces()
- )
- )
- err_message = "Exception during resolving address: %s"
- for ip in addresses:
+ def _lookup_fqdn(ip):
try:
name, aliaslist, addresslist = socket.gethostbyaddr(ip)
- fqdns.update(
- [socket.getfqdn(name)]
- + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]
- )
+ return [socket.getfqdn(name)] + [
+ als for als in aliaslist if salt.utils.network.is_fqdn(als)
+ ]
except socket.herror as err:
if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
# No FQDN for this IP address, so we don't need to know this all the time.
@@ -2427,6 +2427,33 @@ def fqdns():
except (OSError, socket.gaierror, socket.timeout) as err:
log.error(err_message, ip, err)
+ start = time.time()
+
+ addresses = salt.utils.network.ip_addrs(
+ include_loopback=False, interface_data=_get_interfaces()
+ )
+ addresses.extend(
+ salt.utils.network.ip_addrs6(
+ include_loopback=False, interface_data=_get_interfaces()
+ )
+ )
+ err_message = "Exception during resolving address: %s"
+
+ # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel.
+ # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing
+ # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds.
+ pool = ThreadPool(8)
+ results = pool.map(_lookup_fqdn, addresses)
+ pool.close()
+ pool.join()
+
+ for item in results:
+ if item:
+ fqdns.update(item)
+
+ elapsed = time.time() - start
+ log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed))
+
return {"fqdns": sorted(list(fqdns))}
--
2.29.2

View File

@ -1,252 +0,0 @@
From 5db9ccdb4f557cdbff670b18c45e55124e29c57c Mon Sep 17 00:00:00 2001
From: Jochen Breuer <jbreuer@suse.de>
Date: Tue, 10 Mar 2020 14:02:17 +0100
Subject: [PATCH] Changed imports to vendored Tornado
---
salt/cli/batch_async.py | 25 ++++++++++++-----------
salt/master.py | 2 +-
tests/unit/cli/test_batch_async.py | 32 +++++++++++++++---------------
3 files changed, 30 insertions(+), 29 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index 3dc04826d1..09aa85258b 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -8,6 +8,7 @@ import gc
import logging
import salt.client
+import salt.ext.tornado
import tornado
from salt.cli.batch import batch_get_eauth, batch_get_opts, get_bnum
@@ -46,7 +47,7 @@ class BatchAsync:
"""
def __init__(self, parent_opts, jid_gen, clear_load):
- ioloop = tornado.ioloop.IOLoop.current()
+ ioloop = salt.ext.tornado.ioloop.IOLoop.current()
self.local = salt.client.get_local_client(
parent_opts["conf_file"], io_loop=ioloop
)
@@ -161,7 +162,7 @@ class BatchAsync:
self.find_job_returned = self.find_job_returned.difference(running)
self.event.io_loop.spawn_callback(self.find_job, running)
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
def find_job(self, minions):
if self.event:
not_done = minions.difference(self.done_minions).difference(
@@ -182,7 +183,7 @@ class BatchAsync:
jid=jid,
**self.eauth
)
- yield tornado.gen.sleep(self.opts["gather_job_timeout"])
+ yield salt.ext.tornado.gen.sleep(self.opts["gather_job_timeout"])
if self.event:
self.event.io_loop.spawn_callback(
self.check_find_job, not_done, jid
@@ -195,7 +196,7 @@ class BatchAsync:
)
self.close_safe()
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
def start(self):
if self.event:
self.__set_event_handler()
@@ -213,13 +214,13 @@ class BatchAsync:
)
self.targeted_minions = set(ping_return["minions"])
# start batching even if not all minions respond to ping
- yield tornado.gen.sleep(
+ yield salt.ext.tornado.gen.sleep(
self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
)
if self.event:
self.event.io_loop.spawn_callback(self.start_batch)
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
def start_batch(self):
if not self.initialized:
self.batch_size = get_bnum(self.opts, self.minions, True)
@@ -235,7 +236,7 @@ class BatchAsync:
if self.event:
self.event.io_loop.spawn_callback(self.run_next)
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
def end_batch(self):
left = self.minions.symmetric_difference(
self.done_minions.union(self.timedout_minions)
@@ -253,7 +254,7 @@ class BatchAsync:
# release to the IOLoop to allow the event to be published
# before closing batch async execution
- yield tornado.gen.sleep(1)
+ yield salt.ext.tornado.gen.sleep(1)
self.close_safe()
def close_safe(self):
@@ -266,16 +267,16 @@ class BatchAsync:
del self
gc.collect()
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
def schedule_next(self):
if not self.scheduled:
self.scheduled = True
# call later so that we maybe gather more returns
- yield tornado.gen.sleep(self.batch_delay)
+ yield salt.ext.tornado.gen.sleep(self.batch_delay)
if self.event:
self.event.io_loop.spawn_callback(self.run_next)
- @tornado.gen.coroutine
+ @salt.ext.tornado.gen.coroutine
def run_next(self):
self.scheduled = False
next_batch = self._get_next()
@@ -294,7 +295,7 @@ class BatchAsync:
metadata=self.metadata,
)
- yield tornado.gen.sleep(self.opts["timeout"])
+ yield salt.ext.tornado.gen.sleep(self.opts["timeout"])
# The batch can be done already at this point, which means no self.event
if self.event:
diff --git a/salt/master.py b/salt/master.py
index 7a99af357a..ab85c7f5c6 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -2237,7 +2237,7 @@ class ClearFuncs(TransportMethods):
functools.partial(self._prep_jid, clear_load, {}),
batch_load,
)
- ioloop = tornado.ioloop.IOLoop.current()
+ ioloop = salt.ext.tornado.ioloop.IOLoop.current()
ioloop.add_callback(batch.start)
return {
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
index dcee9a87bd..82a712b15b 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
@@ -1,8 +1,8 @@
-import tornado
+import salt.ext.tornado
from salt.cli.batch_async import BatchAsync
+from salt.ext.tornado.testing import AsyncTestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
from tests.support.unit import TestCase, skipIf
-from tornado.testing import AsyncTestCase
@skipIf(NO_MOCK, NO_MOCK_REASON)
@@ -52,10 +52,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch.start_batch()
self.assertEqual(self.batch.batch_size, 2)
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_start_on_batch_presence_ping_timeout(self):
self.batch.event = MagicMock()
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
ret = self.batch.start()
@@ -71,10 +71,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
# assert targeted_minions == all minions matched by tgt
self.assertEqual(self.batch.targeted_minions, {"foo", "bar"})
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_start_on_gather_job_timeout(self):
self.batch.event = MagicMock()
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
self.batch.batch_presence_ping_timeout = None
@@ -103,7 +103,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
),
)
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_start_batch_calls_next(self):
self.batch.run_next = MagicMock(return_value=MagicMock())
self.batch.event = MagicMock()
@@ -160,14 +160,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(len(event.unsubscribe.mock_calls), 2)
self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_next(self):
self.batch.event = MagicMock()
self.batch.opts["fun"] = "my.fun"
self.batch.opts["arg"] = []
self.batch._get_next = MagicMock(return_value={"foo", "bar"})
self.batch.batch_size = 2
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
self.batch.run_next()
@@ -290,38 +290,38 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch._BatchAsync__event_handler(MagicMock())
self.assertEqual(self.batch.find_job_returned, {"foo"})
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_run_next_end_batch_when_no_next(self):
self.batch.end_batch = MagicMock()
self.batch._get_next = MagicMock(return_value={})
self.batch.run_next()
self.assertEqual(len(self.batch.end_batch.mock_calls), 1)
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_find_job(self):
self.batch.event = MagicMock()
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
self.batch.minions = {"foo", "bar"}
self.batch.jid_gen = MagicMock(return_value="1234")
- tornado.gen.sleep = MagicMock(return_value=future)
+ salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
self.batch.find_job({"foo", "bar"})
self.assertEqual(
self.batch.event.io_loop.spawn_callback.call_args[0],
(self.batch.check_find_job, {"foo", "bar"}, "1234"),
)
- @tornado.testing.gen_test
+ @salt.ext.tornado.testing.gen_test
def test_batch_find_job_with_done_minions(self):
self.batch.done_minions = {"bar"}
self.batch.event = MagicMock()
- future = tornado.gen.Future()
+ future = salt.ext.tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
self.batch.minions = {"foo", "bar"}
self.batch.jid_gen = MagicMock(return_value="1234")
- tornado.gen.sleep = MagicMock(return_value=future)
+ salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
self.batch.find_job({"foo", "bar"})
self.assertEqual(
self.batch.event.io_loop.spawn_callback.call_args[0],
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -1,631 +0,0 @@
From 6837044f5a207cf39f3064428b0ed276226a5e39 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Fri, 9 Jul 2021 09:05:55 +0100
Subject: [PATCH] Do noop for services states when running systemd in
offline mode (bsc#1187787)
transactional_updates: do not execute states in parallel but use a queue (bsc#1188170)
Add changes suggested by pre-commit
Fix unit tests for transactional_updates module
Add unit tests to cover queue cases on transaction_update states
Refactor offline checkers and add unit tests
Fix regression that always consider offline mode
Add proper mocking and skip tests when running in offline mode
---
salt/modules/systemd_service.py | 5 +
salt/modules/transactional_update.py | 56 +++-
salt/states/service.py | 14 +
tests/integration/states/test_service.py | 4 +
.../unit/modules/test_transactional_update.py | 264 +++++++++++++++++-
tests/unit/states/test_service.py | 43 ++-
6 files changed, 377 insertions(+), 9 deletions(-)
diff --git a/salt/modules/systemd_service.py b/salt/modules/systemd_service.py
index 49e5bd813f..8d495433f8 100644
--- a/salt/modules/systemd_service.py
+++ b/salt/modules/systemd_service.py
@@ -102,6 +102,11 @@ def _check_available(name):
"""
Returns boolean telling whether or not the named service is available
"""
+ if offline():
+ raise CommandExecutionError(
+ "Cannot run in offline mode. Failed to get information on unit '%s'" % name
+ )
+
_status = _systemctl_status(name)
sd_version = salt.utils.systemd.version(__context__)
if sd_version is not None and sd_version >= 231:
diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py
index 9cdaddb91a..3af9d91822 100644
--- a/salt/modules/transactional_update.py
+++ b/salt/modules/transactional_update.py
@@ -281,10 +281,14 @@ import os
import sys
import tempfile
+# required by _check_queue invocation later
+import time # pylint: disable=unused-import
+
import salt.client.ssh.state
import salt.client.ssh.wrapper.state
import salt.exceptions
import salt.utils.args
+from salt.modules.state import _check_queue, _prior_running_states, _wait, running
__func_alias__ = {"apply_": "apply"}
@@ -295,7 +299,14 @@ def __virtual__():
"""
transactional-update command is required.
"""
+ global _check_queue, _wait, _prior_running_states, running
if __utils__["path.which"]("transactional-update"):
+ _check_queue = salt.utils.functools.namespaced_function(_check_queue, globals())
+ _wait = salt.utils.functools.namespaced_function(_wait, globals())
+ _prior_running_states = salt.utils.functools.namespaced_function(
+ _prior_running_states, globals()
+ )
+ running = salt.utils.functools.namespaced_function(running, globals())
return True
else:
return (False, "Module transactional_update requires a transactional system")
@@ -1068,7 +1079,13 @@ def _create_and_execute_salt_state(
def sls(
- mods, saltenv="base", test=None, exclude=None, activate_transaction=False, **kwargs
+ mods,
+ saltenv="base",
+ test=None,
+ exclude=None,
+ activate_transaction=False,
+ queue=False,
+ **kwargs
):
"""Execute the states in one or more SLS files inside a transaction.
@@ -1093,6 +1110,13 @@ def sls(
(i.e there is a new snaphot in the system), a new reboot will
be scheduled (default False)
+ queue
+ Instead of failing immediately when another state run is in progress,
+ queue the new state run to begin running once the other has finished.
+
+ This option starts a new thread for each queued state run, so use this
+ option sparingly. (Default: False)
+
For a formal description of the possible parameters accepted in
this function, check `state.sls` documentation.
@@ -1104,6 +1128,10 @@ def sls(
salt microos transactional_update.sls stuff activate_transaction=True
"""
+ conflict = _check_queue(queue, kwargs)
+ if conflict is not None:
+ return conflict
+
# Get a copy of the pillar data, to avoid overwriting the current
# pillar, instead the one delegated
pillar = copy.deepcopy(__pillar__)
@@ -1156,7 +1184,7 @@ def sls(
)
-def highstate(activate_transaction=False, **kwargs):
+def highstate(activate_transaction=False, queue=False, **kwargs):
"""Retrieve the state data from the salt master for this minion and
execute it inside a transaction.
@@ -1168,6 +1196,13 @@ def highstate(activate_transaction=False, **kwargs):
(i.e there is a new snaphot in the system), a new reboot will
be scheduled (default False)
+ queue
+ Instead of failing immediately when another state run is in progress,
+ queue the new state run to begin running once the other has finished.
+
+ This option starts a new thread for each queued state run, so use this
+ option sparingly. (Default: False)
+
CLI Example:
.. code-block:: bash
@@ -1177,6 +1212,10 @@ def highstate(activate_transaction=False, **kwargs):
salt microos transactional_update.highstate activate_transaction=True
"""
+ conflict = _check_queue(queue, kwargs)
+ if conflict is not None:
+ return conflict
+
# Get a copy of the pillar data, to avoid overwriting the current
# pillar, instead the one delegated
pillar = copy.deepcopy(__pillar__)
@@ -1210,7 +1249,7 @@ def highstate(activate_transaction=False, **kwargs):
)
-def single(fun, name, test=None, activate_transaction=False, **kwargs):
+def single(fun, name, test=None, activate_transaction=False, queue=False, **kwargs):
"""Execute a single state function with the named kwargs, returns
False if insufficient data is sent to the command
@@ -1224,6 +1263,13 @@ def single(fun, name, test=None, activate_transaction=False, **kwargs):
(i.e there is a new snaphot in the system), a new reboot will
be scheduled (default False)
+ queue
+ Instead of failing immediately when another state run is in progress,
+ queue the new state run to begin running once the other has finished.
+
+ This option starts a new thread for each queued state run, so use this
+ option sparingly. (Default: False)
+
CLI Example:
.. code-block:: bash
@@ -1232,6 +1278,10 @@ def single(fun, name, test=None, activate_transaction=False, **kwargs):
salt microos transactional_update.single pkg.installed name=emacs activate_transaction=True
"""
+ conflict = _check_queue(queue, kwargs)
+ if conflict is not None:
+ return conflict
+
# Get a copy of the pillar data, to avoid overwriting the current
# pillar, instead the one delegated
pillar = copy.deepcopy(__pillar__)
diff --git a/salt/states/service.py b/salt/states/service.py
index 4ea36a78f6..3a216920f4 100644
--- a/salt/states/service.py
+++ b/salt/states/service.py
@@ -342,6 +342,10 @@ def _disable(name, started, result=True, **kwargs):
return ret
+def _offline():
+ return "service.offline" in __salt__ and __salt__["service.offline"]()
+
+
def _available(name, ret):
"""
Check if the service is available
@@ -436,6 +440,11 @@ def running(name, enable=None, sig=None, init_delay=None, **kwargs):
if isinstance(enable, str):
enable = salt.utils.data.is_true(enable)
+ if _offline():
+ ret["result"] = True
+ ret["comment"] = "Running in OFFLINE mode. Nothing to do"
+ return ret
+
# Check if the service is available
try:
if not _available(name, ret):
@@ -631,6 +640,11 @@ def dead(name, enable=None, sig=None, init_delay=None, **kwargs):
if isinstance(enable, str):
enable = salt.utils.data.is_true(enable)
+ if _offline():
+ ret["result"] = True
+ ret["comment"] = "Running in OFFLINE mode. Nothing to do"
+ return ret
+
# Check if the service is available
try:
if not _available(name, ret):
diff --git a/tests/integration/states/test_service.py b/tests/integration/states/test_service.py
index 81359d44ea..9c89d2cfd0 100644
--- a/tests/integration/states/test_service.py
+++ b/tests/integration/states/test_service.py
@@ -26,6 +26,7 @@ class ServiceTest(ModuleCase, SaltReturnAssertsMixin):
cmd_name = "crontab"
os_family = self.run_function("grains.get", ["os_family"])
os_release = self.run_function("grains.get", ["osrelease"])
+ is_systemd = self.run_function("grains.get", ["systemd"])
self.stopped = False
self.running = True
if os_family == "RedHat":
@@ -53,6 +54,9 @@ class ServiceTest(ModuleCase, SaltReturnAssertsMixin):
if os_family != "Windows" and salt.utils.path.which(cmd_name) is None:
self.skipTest("{} is not installed".format(cmd_name))
+ if is_systemd and self.run_function("service.offline"):
+ self.skipTest("systemd is OFFLINE")
+
def tearDown(self):
if self.post_srv_disable:
self.run_function("service.disable", name=self.service_name)
diff --git a/tests/unit/modules/test_transactional_update.py b/tests/unit/modules/test_transactional_update.py
index 2d30f296d7..6f8587baa0 100644
--- a/tests/unit/modules/test_transactional_update.py
+++ b/tests/unit/modules/test_transactional_update.py
@@ -1,6 +1,7 @@
import sys
import pytest
+import salt.modules.state as statemod
import salt.modules.transactional_update as tu
import salt.utils.platform
from salt.exceptions import CommandExecutionError
@@ -16,7 +17,10 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
"""
def setup_loader_modules(self):
- return {tu: {"__salt__": {}, "__utils__": {}}}
+ return {
+ tu: {"__salt__": {}, "__utils__": {}},
+ statemod: {"__salt__": {}, "__context__": {}},
+ }
def test__global_params_no_self_update(self):
"""Test transactional_update._global_params without self_update"""
@@ -643,11 +647,103 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
opts_mock = {
"hash_type": "md5",
}
+ salt_mock = {
+ "saltutil.is_running": MagicMock(return_value=[]),
+ }
get_sls_opts.return_value = opts_mock
- with patch.dict(tu.__opts__, opts_mock):
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
assert tu.sls("module") == "result"
_create_and_execute_salt_state.assert_called_once()
+ @patch("salt.modules.transactional_update._create_and_execute_salt_state")
+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate")
+ @patch("salt.fileclient.get_file_client")
+ @patch("salt.utils.state.get_sls_opts")
+ def test_sls_queue_true(
+ self,
+ get_sls_opts,
+ get_file_client,
+ TransactionalUpdateHighstate,
+ _create_and_execute_salt_state,
+ ):
+ """Test transactional_update.sls"""
+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate
+ TransactionalUpdateHighstate.render_highstate.return_value = (None, [])
+ TransactionalUpdateHighstate.state.reconcile_extend.return_value = (None, [])
+ TransactionalUpdateHighstate.state.requisite_in.return_value = (None, [])
+ TransactionalUpdateHighstate.state.verify_high.return_value = []
+
+ _create_and_execute_salt_state.return_value = "result"
+ opts_mock = {
+ "hash_type": "md5",
+ }
+ salt_mock = {
+ "saltutil.is_running": MagicMock(
+ side_effect=[
+ [
+ {
+ "fun": "state.running",
+ "pid": "4126",
+ "jid": "20150325123407204096",
+ }
+ ],
+ [],
+ ]
+ ),
+ }
+ get_sls_opts.return_value = opts_mock
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
+ assert tu.sls("module", queue=True) == "result"
+ _create_and_execute_salt_state.assert_called_once()
+
+ @patch("salt.modules.transactional_update._create_and_execute_salt_state")
+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate")
+ @patch("salt.fileclient.get_file_client")
+ @patch("salt.utils.state.get_sls_opts")
+ def test_sls_queue_false_failing(
+ self,
+ get_sls_opts,
+ get_file_client,
+ TransactionalUpdateHighstate,
+ _create_and_execute_salt_state,
+ ):
+ """Test transactional_update.sls"""
+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate
+ TransactionalUpdateHighstate.render_highstate.return_value = (None, [])
+ TransactionalUpdateHighstate.state.reconcile_extend.return_value = (None, [])
+ TransactionalUpdateHighstate.state.requisite_in.return_value = (None, [])
+ TransactionalUpdateHighstate.state.verify_high.return_value = []
+
+ _create_and_execute_salt_state.return_value = "result"
+ opts_mock = {
+ "hash_type": "md5",
+ }
+ salt_mock = {
+ "saltutil.is_running": MagicMock(
+ side_effect=[
+ [
+ {
+ "fun": "state.running",
+ "pid": "4126",
+ "jid": "20150325123407204096",
+ }
+ ],
+ [],
+ ]
+ ),
+ }
+ get_sls_opts.return_value = opts_mock
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
+ assert tu.sls("module", queue=False) == [
+ 'The function "state.running" is running as PID 4126 and was started at 2015, Mar 25 12:34:07.204096 with jid 20150325123407204096'
+ ]
+
@patch("salt.modules.transactional_update._create_and_execute_salt_state")
@patch("salt.modules.transactional_update.TransactionalUpdateHighstate")
@patch("salt.fileclient.get_file_client")
@@ -666,11 +762,95 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
opts_mock = {
"hash_type": "md5",
}
+ salt_mock = {
+ "saltutil.is_running": MagicMock(return_value=[]),
+ }
get_sls_opts.return_value = opts_mock
- with patch.dict(tu.__opts__, opts_mock):
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
assert tu.highstate() == "result"
_create_and_execute_salt_state.assert_called_once()
+ @patch("salt.modules.transactional_update._create_and_execute_salt_state")
+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate")
+ @patch("salt.fileclient.get_file_client")
+ @patch("salt.utils.state.get_sls_opts")
+ def test_highstate_queue_true(
+ self,
+ get_sls_opts,
+ get_file_client,
+ TransactionalUpdateHighstate,
+ _create_and_execute_salt_state,
+ ):
+ """Test transactional_update.highstage"""
+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate
+
+ _create_and_execute_salt_state.return_value = "result"
+ opts_mock = {
+ "hash_type": "md5",
+ }
+ salt_mock = {
+ "saltutil.is_running": MagicMock(
+ side_effect=[
+ [
+ {
+ "fun": "state.running",
+ "pid": "4126",
+ "jid": "20150325123407204096",
+ }
+ ],
+ [],
+ ]
+ ),
+ }
+ get_sls_opts.return_value = opts_mock
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
+ assert tu.highstate(queue=True) == "result"
+ _create_and_execute_salt_state.assert_called_once()
+
+ @patch("salt.modules.transactional_update._create_and_execute_salt_state")
+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate")
+ @patch("salt.fileclient.get_file_client")
+ @patch("salt.utils.state.get_sls_opts")
+ def test_highstate_queue_false_failing(
+ self,
+ get_sls_opts,
+ get_file_client,
+ TransactionalUpdateHighstate,
+ _create_and_execute_salt_state,
+ ):
+ """Test transactional_update.highstage"""
+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate
+
+ _create_and_execute_salt_state.return_value = "result"
+ opts_mock = {
+ "hash_type": "md5",
+ }
+ salt_mock = {
+ "saltutil.is_running": MagicMock(
+ side_effect=[
+ [
+ {
+ "fun": "state.running",
+ "pid": "4126",
+ "jid": "20150325123407204096",
+ }
+ ],
+ [],
+ ]
+ ),
+ }
+ get_sls_opts.return_value = opts_mock
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
+ assert tu.highstate(queue=False) == [
+ 'The function "state.running" is running as PID 4126 and was started at 2015, Mar 25 12:34:07.204096 with jid 20150325123407204096'
+ ]
+
@patch("salt.modules.transactional_update._create_and_execute_salt_state")
@patch("salt.client.ssh.state.SSHState")
@patch("salt.utils.state.get_sls_opts")
@@ -683,7 +863,83 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
opts_mock = {
"hash_type": "md5",
}
+ salt_mock = {
+ "saltutil.is_running": MagicMock(return_value=[]),
+ }
get_sls_opts.return_value = opts_mock
- with patch.dict(tu.__opts__, opts_mock):
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
assert tu.single("pkg.installed", name="emacs") == "result"
_create_and_execute_salt_state.assert_called_once()
+
+ @patch("salt.modules.transactional_update._create_and_execute_salt_state")
+ @patch("salt.client.ssh.state.SSHState")
+ @patch("salt.utils.state.get_sls_opts")
+ def test_single_queue_false_failing(
+ self, get_sls_opts, SSHState, _create_and_execute_salt_state
+ ):
+ """Test transactional_update.single"""
+ SSHState.return_value = SSHState
+ SSHState.verify_data.return_value = None
+
+ _create_and_execute_salt_state.return_value = "result"
+ opts_mock = {
+ "hash_type": "md5",
+ }
+ salt_mock = {
+ "saltutil.is_running": MagicMock(
+ side_effect=[
+ [
+ {
+ "fun": "state.running",
+ "pid": "4126",
+ "jid": "20150325123407204096",
+ }
+ ],
+ [],
+ ]
+ ),
+ }
+ get_sls_opts.return_value = opts_mock
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
+ assert tu.single("pkg.installed", name="emacs", queue=False) == [
+ 'The function "state.running" is running as PID 4126 and was started at 2015, Mar 25 12:34:07.204096 with jid 20150325123407204096'
+ ]
+
+ @patch("salt.modules.transactional_update._create_and_execute_salt_state")
+ @patch("salt.client.ssh.state.SSHState")
+ @patch("salt.utils.state.get_sls_opts")
+ def test_single_queue_true(
+ self, get_sls_opts, SSHState, _create_and_execute_salt_state
+ ):
+ """Test transactional_update.single"""
+ SSHState.return_value = SSHState
+ SSHState.verify_data.return_value = None
+
+ _create_and_execute_salt_state.return_value = "result"
+ opts_mock = {
+ "hash_type": "md5",
+ }
+ salt_mock = {
+ "saltutil.is_running": MagicMock(
+ side_effect=[
+ [
+ {
+ "fun": "state.running",
+ "pid": "4126",
+ "jid": "20150325123407204096",
+ }
+ ],
+ [],
+ ]
+ ),
+ }
+ get_sls_opts.return_value = opts_mock
+ with patch.dict(tu.__opts__, opts_mock), patch.dict(
+ statemod.__salt__, salt_mock
+ ):
+ assert tu.single("pkg.installed", name="emacs", queue=True) == "result"
+ _create_and_execute_salt_state.assert_called_once()
diff --git a/tests/unit/states/test_service.py b/tests/unit/states/test_service.py
index 51755fc5a1..de09f2f8ab 100644
--- a/tests/unit/states/test_service.py
+++ b/tests/unit/states/test_service.py
@@ -304,6 +304,24 @@ class ServiceTestCase(TestCase, LoaderModuleMockMixin):
service.__context__, {"service.state": "running"}
)
+ def test_running_in_offline_mode(self):
+ """
+ Tests the case in which a service.running state is executed on an offline environemnt
+
+ """
+ name = "thisisnotarealservice"
+ with patch.object(service, "_offline", MagicMock(return_value=True)):
+ ret = service.running(name=name)
+ self.assertDictEqual(
+ ret,
+ {
+ "changes": {},
+ "comment": "Running in OFFLINE mode. Nothing to do",
+ "result": True,
+ "name": name,
+ },
+ )
+
def test_dead(self):
"""
Test to ensure that the named service is dead
@@ -443,6 +461,24 @@ class ServiceTestCase(TestCase, LoaderModuleMockMixin):
},
)
+ def test_dead_in_offline_mode(self):
+ """
+ Tests the case in which a service.dead state is executed on an offline environemnt
+
+ """
+ name = "thisisnotarealservice"
+ with patch.object(service, "_offline", MagicMock(return_value=True)):
+ ret = service.dead(name=name)
+ self.assertDictEqual(
+ ret,
+ {
+ "changes": {},
+ "comment": "Running in OFFLINE mode. Nothing to do",
+ "result": True,
+ "name": name,
+ },
+ )
+
def test_enabled(self):
"""
Test to verify that the service is enabled
@@ -567,8 +603,11 @@ class ServiceTestCaseFunctional(TestCase, LoaderModuleMockMixin):
@slowTest
def test_running_with_reload(self):
with patch.dict(service.__opts__, {"test": False}):
- service.dead(self.service_name, enable=False)
- result = service.running(name=self.service_name, enable=True, reload=False)
+ with patch("salt.utils.systemd.offline", MagicMock(return_value=False)):
+ service.dead(self.service_name, enable=False)
+ result = service.running(
+ name=self.service_name, enable=True, reload=False
+ )
if salt.utils.platform.is_windows():
comment = "Started Service {}".format(self.service_name)
--
2.32.0

View File

@ -1,77 +0,0 @@
From e986ed8fc0d5da74374d9ded82e10c16fc984ca8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Wed, 29 May 2019 11:03:16 +0100
Subject: [PATCH] Do not break repo files with multiple line values on
yumpkg (bsc#1135360)
---
tests/integration/modules/test_pkg.py | 51 +++++++++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py
index 7a720523da..e32013800d 100644
--- a/tests/integration/modules/test_pkg.py
+++ b/tests/integration/modules/test_pkg.py
@@ -194,6 +194,57 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
if repo is not None:
self.run_function("pkg.del_repo", [repo])
+ def test_mod_del_repo_multiline_values(self):
+ """
+ test modifying and deleting a software repository defined with multiline values
+ """
+ os_grain = self.run_function("grains.item", ["os"])["os"]
+ repo = None
+ try:
+ if os_grain in ["CentOS", "RedHat", "SUSE"]:
+ my_baseurl = (
+ "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/"
+ )
+ expected_get_repo_baseurl = (
+ "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/"
+ )
+ major_release = int(
+ self.run_function("grains.item", ["osmajorrelease"])[
+ "osmajorrelease"
+ ]
+ )
+ repo = "fakerepo"
+ name = "Fake repo for RHEL/CentOS/SUSE"
+ baseurl = my_baseurl
+ gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub"
+ failovermethod = "priority"
+ gpgcheck = 1
+ enabled = 1
+ ret = self.run_function(
+ "pkg.mod_repo",
+ [repo],
+ name=name,
+ baseurl=baseurl,
+ gpgkey=gpgkey,
+ gpgcheck=gpgcheck,
+ enabled=enabled,
+ failovermethod=failovermethod,
+ )
+ # return data from pkg.mod_repo contains the file modified at
+ # the top level, so use next(iter(ret)) to get that key
+ self.assertNotEqual(ret, {})
+ repo_info = ret[next(iter(ret))]
+ self.assertIn(repo, repo_info)
+ self.assertEqual(repo_info[repo]["baseurl"], my_baseurl)
+ ret = self.run_function("pkg.get_repo", [repo])
+ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
+ self.run_function("pkg.mod_repo", [repo])
+ ret = self.run_function("pkg.get_repo", [repo])
+ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
+ finally:
+ if repo is not None:
+ self.run_function("pkg.del_repo", [repo])
+
@requires_salt_modules("pkg.owner")
def test_owner(self):
"""
--
2.29.2

View File

@ -1,34 +0,0 @@
From 998136ffd4c8442e0c3a7030af3d8196abec6be1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Tue, 7 May 2019 15:33:51 +0100
Subject: [PATCH] Do not crash when there are IPv6 established
connections (bsc#1130784)
Add unit test for '_netlink_tool_remote_on'
---
salt/utils/network.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/salt/utils/network.py b/salt/utils/network.py
index dd7fceb91a..d253ded3ab 100644
--- a/salt/utils/network.py
+++ b/salt/utils/network.py
@@ -1623,8 +1623,13 @@ def _netlink_tool_remote_on(port, which_end):
elif "ESTAB" not in line:
continue
chunks = line.split()
+ local_host, local_port = chunks[3].rsplit(":", 1)
remote_host, remote_port = chunks[4].rsplit(":", 1)
+ if which_end == "remote_port" and int(remote_port) != port:
+ continue
+ if which_end == "local_port" and int(local_port) != port:
+ continue
remotes.add(remote_host.strip("[]"))
if valid is False:
--
2.29.2

View File

@ -1,4 +1,4 @@
From cec95ba8f9b561d7ca4c66be9483e4b9386cb741 Mon Sep 17 00:00:00 2001
From f9a66dbf39345b2b371b18e8bf9d89835d6381b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Mon, 25 Jan 2021 12:15:59 +0000
@ -7,15 +7,15 @@ Subject: [PATCH] Do not crash when unexpected cmd output at listing
Add unit tests to cover unexpected output when listing patches
---
salt/modules/yumpkg.py | 20 ++++++++--
tests/unit/modules/test_yumpkg.py | 63 +++++++++++++++++++++++++++++++
2 files changed, 80 insertions(+), 3 deletions(-)
salt/modules/yumpkg.py | 20 +++++++--
tests/pytests/unit/modules/test_yumpkg.py | 53 +++++++++++++++++++++++
2 files changed, 70 insertions(+), 3 deletions(-)
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
index df174e737d..82adbbd59d 100644
index fd79109e40..c800dafa82 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
@@ -3291,10 +3291,17 @@ def _get_patches(installed_only=False):
@@ -3325,10 +3325,17 @@ def _get_patches(installed_only=False):
cmd = [_yum(), "--quiet", "updateinfo", "list", "all"]
ret = __salt__["cmd.run_stdout"](cmd, python_shell=False, env={"SALT_RUNNING": "1"})
@ -36,7 +36,7 @@ index df174e737d..82adbbd59d 100644
if advisory_id not in patches:
patches[advisory_id] = {
"installed": True if inst == "i" else False,
@@ -3305,6 +3312,13 @@ def _get_patches(installed_only=False):
@@ -3339,6 +3346,13 @@ def _get_patches(installed_only=False):
if inst != "i":
patches[advisory_id]["installed"] = False
@ -50,15 +50,15 @@ index df174e737d..82adbbd59d 100644
if installed_only:
patches = {k: v for k, v in patches.items() if v["installed"]}
return patches
diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py
index b97e82d307..96d3f12b17 100644
--- a/tests/unit/modules/test_yumpkg.py
+++ b/tests/unit/modules/test_yumpkg.py
@@ -383,6 +383,69 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
_patch in patches["my-fake-patch-installed-1234"]["summary"]
)
diff --git a/tests/pytests/unit/modules/test_yumpkg.py b/tests/pytests/unit/modules/test_yumpkg.py
index ef7100fe9d..df01853927 100644
--- a/tests/pytests/unit/modules/test_yumpkg.py
+++ b/tests/pytests/unit/modules/test_yumpkg.py
@@ -420,6 +420,59 @@ def test_list_patches():
assert _patch in patches["my-fake-patch-installed-1234"]["summary"]
+ def test_list_patches_with_unexpected_output(self):
+def test_list_patches_with_unexpected_output():
+ """
+ Test patches listin with unexpected output from updateinfo list
+
@ -101,30 +101,20 @@ index b97e82d307..96d3f12b17 100644
+ {"cmd.run_stdout": MagicMock(return_value=os.linesep.join(yum_out))},
+ ):
+ patches = yumpkg.list_patches()
+ self.assertFalse(patches["my-fake-patch-not-installed-1234"]["installed"])
+ self.assertTrue(
+ len(patches["my-fake-patch-not-installed-1234"]["summary"]) == 3
+ )
+ for _patch in expected_patches["my-fake-patch-not-installed-1234"][
+ "summary"
+ ]:
+ self.assertTrue(
+ _patch in patches["my-fake-patch-not-installed-1234"]["summary"]
+ )
+
+ self.assertTrue(patches["my-fake-patch-installed-1234"]["installed"])
+ self.assertTrue(
+ len(patches["my-fake-patch-installed-1234"]["summary"]) == 2
+ )
+ assert not patches["my-fake-patch-not-installed-1234"]["installed"]
+ assert len(patches["my-fake-patch-not-installed-1234"]["summary"]) == 3
+ for _patch in expected_patches["my-fake-patch-not-installed-1234"]["summary"]:
+ assert _patch in patches["my-fake-patch-not-installed-1234"]["summary"]
+ assert patches["my-fake-patch-installed-1234"]["installed"]
+ assert len(patches["my-fake-patch-installed-1234"]["summary"]) == 2
+ for _patch in expected_patches["my-fake-patch-installed-1234"]["summary"]:
+ self.assertTrue(
+ _patch in patches["my-fake-patch-installed-1234"]["summary"]
+ )
+ assert _patch in patches["my-fake-patch-installed-1234"]["summary"]
+
def test_latest_version_with_options(self):
+
def test_latest_version_with_options():
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})):
--
2.29.2
2.33.0

View File

@ -1,4 +1,4 @@
From 57f9da0bd7727c46eab866941fee46a3eaf8c8ea Mon Sep 17 00:00:00 2001
From e0b91c626c10b29d328fa92415393cd57bb4c962 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Fri, 21 Sep 2018 17:31:39 +0200
Subject: [PATCH] Do not load pip state if there is no 3rd party
@ -6,39 +6,16 @@ Subject: [PATCH] Do not load pip state if there is no 3rd party
Safe import 3rd party dependency
---
salt/modules/pip.py | 93 ++++++++++++++++++++++++---------------------
1 file changed, 50 insertions(+), 43 deletions(-)
salt/modules/pip.py | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/salt/modules/pip.py b/salt/modules/pip.py
index f7c101f6e4..742e0dd48a 100644
index f68cafaeaf..14cfafed4b 100644
--- a/salt/modules/pip.py
+++ b/salt/modules/pip.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
r"""
Install Python packages with pip to either the system or a virtualenv
@@ -77,9 +76,7 @@ of the 2015.5 branch:
The issue is described here: https://github.com/saltstack/salt/issues/46163
"""
-from __future__ import absolute_import, print_function, unicode_literals
-# Import python libs
import logging
import os
import re
@@ -89,7 +86,6 @@ import tempfile
import pkg_resources # pylint: disable=3rd-party-module-not-gated
-# Import Salt libs
import salt.utils.data
import salt.utils.files
import salt.utils.json
@@ -101,6 +97,12 @@ import salt.utils.versions
@@ -96,6 +96,12 @@ import salt.utils.url
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
from salt.ext import six
+try:
+ import pkg_resources
@ -47,9 +24,9 @@ index f7c101f6e4..742e0dd48a 100644
+
+
# This needs to be named logger so we don't shadow it in pip.install
logger = logging.getLogger(__name__) # pylint: disable=C0103
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@@ -118,7 +120,12 @@ def __virtual__():
@@ -113,7 +119,12 @@ def __virtual__():
entire filesystem. If it's not installed in a conventional location, the
user is required to provide the location of pip each time it is used.
"""
@ -63,298 +40,7 @@ index f7c101f6e4..742e0dd48a 100644
def _pip_bin_env(cwd, bin_env):
@@ -140,7 +147,7 @@ def _clear_context(bin_env=None):
"""
contextkey = "pip.version"
if bin_env is not None:
- contextkey = "{0}.{1}".format(contextkey, bin_env)
+ contextkey = "{}.{}".format(contextkey, bin_env)
__context__.pop(contextkey, None)
@@ -196,7 +203,7 @@ def _get_pip_bin(bin_env):
bin_path,
)
raise CommandNotFoundError(
- "Could not find a pip binary in virtualenv {0}".format(bin_env)
+ "Could not find a pip binary in virtualenv {}".format(bin_env)
)
# bin_env is the python or pip binary
@@ -209,11 +216,11 @@ def _get_pip_bin(bin_env):
return [os.path.normpath(bin_env)]
raise CommandExecutionError(
- "Could not find a pip binary within {0}".format(bin_env)
+ "Could not find a pip binary within {}".format(bin_env)
)
else:
raise CommandNotFoundError(
- "Access denied to {0}, could not find a pip binary".format(bin_env)
+ "Access denied to {}, could not find a pip binary".format(bin_env)
)
@@ -283,7 +290,7 @@ def _resolve_requirements_chain(requirements):
chain = []
- if isinstance(requirements, six.string_types):
+ if isinstance(requirements, str):
requirements = [requirements]
for req_file in requirements:
@@ -300,7 +307,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
cleanup_requirements = []
if requirements is not None:
- if isinstance(requirements, six.string_types):
+ if isinstance(requirements, str):
requirements = [r.strip() for r in requirements.split(",")]
elif not isinstance(requirements, list):
raise TypeError("requirements must be a string or list")
@@ -314,7 +321,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
if not cached_requirements:
ret = {
"result": False,
- "comment": "pip requirements file '{0}' not found".format(
+ "comment": "pip requirements file '{}' not found".format(
requirement
),
}
@@ -412,15 +419,15 @@ def _format_env_vars(env_vars):
ret = {}
if env_vars:
if isinstance(env_vars, dict):
- for key, val in six.iteritems(env_vars):
- if not isinstance(key, six.string_types):
+ for key, val in env_vars.items():
+ if not isinstance(key, str):
key = str(key) # future lint: disable=blacklisted-function
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
val = str(val) # future lint: disable=blacklisted-function
ret[key] = val
else:
raise CommandExecutionError(
- "env_vars {0} is not a dictionary".format(env_vars)
+ "env_vars {} is not a dictionary".format(env_vars)
)
return ret
@@ -762,9 +769,9 @@ def install(
if log:
if os.path.isdir(log):
- raise IOError("'{0}' is a directory. Use --log path_to_file".format(log))
+ raise OSError("'{}' is a directory. Use --log path_to_file".format(log))
elif not os.access(log, os.W_OK):
- raise IOError("'{0}' is not writeable".format(log))
+ raise OSError("'{}' is not writeable".format(log))
cmd.extend(["--log", log])
@@ -790,12 +797,12 @@ def install(
int(timeout)
except ValueError:
raise ValueError(
- "'{0}' is not a valid timeout, must be an integer".format(timeout)
+ "'{}' is not a valid timeout, must be an integer".format(timeout)
)
cmd.extend(["--timeout", timeout])
if find_links:
- if isinstance(find_links, six.string_types):
+ if isinstance(find_links, str):
find_links = [l.strip() for l in find_links.split(",")]
for link in find_links:
@@ -803,7 +810,7 @@ def install(
salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link)
):
raise CommandExecutionError(
- "'{0}' is not a valid URL or path".format(link)
+ "'{}' is not a valid URL or path".format(link)
)
cmd.extend(["--find-links", link])
@@ -815,13 +822,13 @@ def install(
if index_url:
if not salt.utils.url.validate(index_url, VALID_PROTOS):
- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url))
+ raise CommandExecutionError("'{}' is not a valid URL".format(index_url))
cmd.extend(["--index-url", index_url])
if extra_index_url:
if not salt.utils.url.validate(extra_index_url, VALID_PROTOS):
raise CommandExecutionError(
- "'{0}' is not a valid URL".format(extra_index_url)
+ "'{}' is not a valid URL".format(extra_index_url)
)
cmd.extend(["--extra-index-url", extra_index_url])
@@ -836,13 +843,13 @@ def install(
" use index_url and/or extra_index_url instead"
)
- if isinstance(mirrors, six.string_types):
+ if isinstance(mirrors, str):
mirrors = [m.strip() for m in mirrors.split(",")]
cmd.append("--use-mirrors")
for mirror in mirrors:
if not mirror.startswith("http://"):
- raise CommandExecutionError("'{0}' is not a valid URL".format(mirror))
+ raise CommandExecutionError("'{}' is not a valid URL".format(mirror))
cmd.extend(["--mirrors", mirror])
if disable_version_check:
@@ -883,7 +890,7 @@ def install(
if exists_action.lower() not in ("s", "i", "w", "b"):
raise CommandExecutionError(
"The exists_action pip option only supports the values "
- "s, i, w, and b. '{0}' is not valid.".format(exists_action)
+ "s, i, w, and b. '{}' is not valid.".format(exists_action)
)
cmd.extend(["--exists-action", exists_action])
@@ -911,14 +918,14 @@ def install(
cmd.extend(["--cert", cert])
if global_options:
- if isinstance(global_options, six.string_types):
+ if isinstance(global_options, str):
global_options = [go.strip() for go in global_options.split(",")]
for opt in global_options:
cmd.extend(["--global-option", opt])
if install_options:
- if isinstance(install_options, six.string_types):
+ if isinstance(install_options, str):
install_options = [io.strip() for io in install_options.split(",")]
for opt in install_options:
@@ -929,7 +936,7 @@ def install(
try:
pkgs = [p.strip() for p in pkgs.split(",")]
except AttributeError:
- pkgs = [p.strip() for p in six.text_type(pkgs).split(",")]
+ pkgs = [p.strip() for p in str(pkgs).split(",")]
pkgs = salt.utils.data.stringify(salt.utils.data.decode_list(pkgs))
# It's possible we replaced version-range commas with semicolons so
@@ -945,7 +952,7 @@ def install(
if editable:
egg_match = re.compile(r"(?:#|#.*?&)egg=([^&]*)")
- if isinstance(editable, six.string_types):
+ if isinstance(editable, str):
editable = [e.strip() for e in editable.split(",")]
for entry in editable:
@@ -964,14 +971,14 @@ def install(
cmd.append("--allow-all-external")
if allow_external:
- if isinstance(allow_external, six.string_types):
+ if isinstance(allow_external, str):
allow_external = [p.strip() for p in allow_external.split(",")]
for pkg in allow_external:
cmd.extend(["--allow-external", pkg])
if allow_unverified:
- if isinstance(allow_unverified, six.string_types):
+ if isinstance(allow_unverified, str):
allow_unverified = [p.strip() for p in allow_unverified.split(",")]
for pkg in allow_unverified:
@@ -1106,8 +1113,8 @@ def uninstall(
try:
# TODO make this check if writeable
os.path.exists(log)
- except IOError:
- raise IOError("'{0}' is not writeable".format(log))
+ except OSError:
+ raise OSError("'{}' is not writeable".format(log))
cmd.extend(["--log", log])
@@ -1133,12 +1140,12 @@ def uninstall(
int(timeout)
except ValueError:
raise ValueError(
- "'{0}' is not a valid timeout, must be an integer".format(timeout)
+ "'{}' is not a valid timeout, must be an integer".format(timeout)
)
cmd.extend(["--timeout", timeout])
if pkgs:
- if isinstance(pkgs, six.string_types):
+ if isinstance(pkgs, str):
pkgs = [p.strip() for p in pkgs.split(",")]
if requirements:
for requirement in requirements:
@@ -1323,7 +1330,7 @@ def version(bin_env=None, cwd=None, user=None):
cwd = _pip_bin_env(cwd, bin_env)
contextkey = "pip.version"
if bin_env is not None:
- contextkey = "{0}.{1}".format(contextkey, bin_env)
+ contextkey = "{}.{}".format(contextkey, bin_env)
if contextkey in __context__:
return __context__[contextkey]
@@ -1402,7 +1409,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None):
if match:
name, version_ = match.groups()
else:
- logger.error("Can't parse line '{0}'".format(line))
+ logger.error("Can't parse line '{}'".format(line))
continue
packages[name] = version_
@@ -1414,7 +1421,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None):
raise CommandExecutionError("Invalid JSON", info=result)
for pkg in pkgs:
- packages[pkg["name"]] = "{0} [{1}]".format(
+ packages[pkg["name"]] = "{} [{}]".format(
pkg["latest_version"], pkg["latest_filetype"]
)
@@ -1602,17 +1609,17 @@ def list_all_versions(
"""
cwd = _pip_bin_env(cwd, bin_env)
cmd = _get_pip_bin(bin_env)
- cmd.extend(["install", "{0}==versions".format(pkg)])
+ cmd.extend(["install", "{}==versions".format(pkg)])
if index_url:
if not salt.utils.url.validate(index_url, VALID_PROTOS):
- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url))
+ raise CommandExecutionError("'{}' is not a valid URL".format(index_url))
cmd.extend(["--index-url", index_url])
if extra_index_url:
if not salt.utils.url.validate(extra_index_url, VALID_PROTOS):
raise CommandExecutionError(
- "'{0}' is not a valid URL".format(extra_index_url)
+ "'{}' is not a valid URL".format(extra_index_url)
)
cmd.extend(["--extra-index-url", extra_index_url])
@@ -1632,7 +1639,7 @@ def list_all_versions(
if not include_rc:
filtered.append("rc")
if filtered:
- excludes = re.compile(r"^((?!{0}).)*$".format("|".join(filtered)))
+ excludes = re.compile(r"^((?!{}).)*$".format("|".join(filtered)))
else:
excludes = re.compile(r"")
--
2.29.2
2.33.0

View File

@ -1,570 +0,0 @@
From 5d465a5b392efa1b4df7870161b32e0125efa4af Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Fri, 28 Jun 2019 15:17:56 +0100
Subject: [PATCH] Do not make ansiblegate to crash on Python3 minions
Fix pylint issues
Move MockTimedProc implementation to tests.support.mock
Add unit test for ansible caller
---
salt/modules/ansiblegate.py | 7 +-
tests/support/mock.py | 128 +++++++++-------
tests/unit/modules/test_ansiblegate.py | 201 +++++++++++++++++++++++++
tests/unit/modules/test_cmdmod.py | 1 +
4 files changed, 280 insertions(+), 57 deletions(-)
create mode 100644 tests/unit/modules/test_ansiblegate.py
diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py
index 0279a26017..5d4b986ec2 100644
--- a/salt/modules/ansiblegate.py
+++ b/salt/modules/ansiblegate.py
@@ -160,6 +160,7 @@ class AnsibleModuleCaller:
:param kwargs: keywords to the module
:return:
"""
+ python_exec = "python3"
module = self._resolver.load_module(module)
if not hasattr(module, "main"):
@@ -182,9 +183,9 @@ class AnsibleModuleCaller:
timeout=self.timeout,
)
proc_out.run()
- proc_out_stdout = salt.utils.stringutils.to_str(proc_out.stdout)
+ proc_out_stdout = proc_out.stdout.decode()
proc_exc = salt.utils.timed_subprocess.TimedProc(
- [sys.executable, module.__file__],
+ [python_exec, module.__file__],
stdin=proc_out_stdout,
stdout=subprocess.PIPE,
timeout=self.timeout,
@@ -298,7 +299,7 @@ def help(module=None, *args):
'Available sections on module "{}"'.format(
module.__name__.replace("ansible.modules.", "")
)
- ] = list(doc)
+ ] = [i for i in doc.keys()]
else:
for arg in args:
info = doc.get(arg)
diff --git a/tests/support/mock.py b/tests/support/mock.py
index 7ef02e0701..87d052c399 100644
--- a/tests/support/mock.py
+++ b/tests/support/mock.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
@@ -14,7 +13,6 @@
"""
# pylint: disable=unused-import,function-redefined,blacklisted-module,blacklisted-external-module
-from __future__ import absolute_import
import collections
import copy
@@ -42,8 +40,6 @@ from mock import (
patch,
sentinel,
)
-
-# Import salt libs
from salt.ext import six
# pylint: disable=no-name-in-module,no-member
@@ -57,7 +53,7 @@ if sys.version_info < (3, 6) and __mock_version < (2,):
raise ImportError("Please install mock>=2.0.0")
-class MockFH(object):
+class MockFH:
def __init__(self, filename, read_data, *args, **kwargs):
self.filename = filename
self.read_data = read_data
@@ -89,7 +85,7 @@ class MockFH(object):
"""
# Newline will always be a bytestring on PY2 because mock_open will have
# normalized it to one.
- newline = b"\n" if isinstance(read_data, six.binary_type) else "\n"
+ newline = b"\n" if isinstance(read_data, bytes) else "\n"
read_data = [line + newline for line in read_data.split(newline)]
@@ -103,8 +99,7 @@ class MockFH(object):
# newline that we added in the list comprehension.
read_data[-1] = read_data[-1][:-1]
- for line in read_data:
- yield line
+ yield from read_data
@property
def write_calls(self):
@@ -126,18 +121,18 @@ class MockFH(object):
def __check_read_data(self):
if not self.__read_data_ok:
if self.binary_mode:
- if not isinstance(self.read_data, six.binary_type):
+ if not isinstance(self.read_data, bytes):
raise TypeError(
- "{0} opened in binary mode, expected read_data to be "
- "bytes, not {1}".format(
+ "{} opened in binary mode, expected read_data to be "
+ "bytes, not {}".format(
self.filename, type(self.read_data).__name__
)
)
else:
if not isinstance(self.read_data, str):
raise TypeError(
- "{0} opened in non-binary mode, expected read_data to "
- "be str, not {1}".format(
+ "{} opened in non-binary mode, expected read_data to "
+ "be str, not {}".format(
self.filename, type(self.read_data).__name__
)
)
@@ -147,8 +142,8 @@ class MockFH(object):
def _read(self, size=0):
self.__check_read_data()
if not self.read_mode:
- raise IOError("File not open for reading")
- if not isinstance(size, six.integer_types) or size < 0:
+ raise OSError("File not open for reading")
+ if not isinstance(size, int) or size < 0:
raise TypeError("a positive integer is required")
joined = self.empty_string.join(self.read_data_iter)
@@ -169,7 +164,7 @@ class MockFH(object):
# TODO: Implement "size" argument
self.__check_read_data()
if not self.read_mode:
- raise IOError("File not open for reading")
+ raise OSError("File not open for reading")
ret = list(self.read_data_iter)
self.__loc += sum(len(x) for x in ret)
return ret
@@ -178,7 +173,7 @@ class MockFH(object):
# TODO: Implement "size" argument
self.__check_read_data()
if not self.read_mode:
- raise IOError("File not open for reading")
+ raise OSError("File not open for reading")
try:
ret = next(self.read_data_iter)
self.__loc += len(ret)
@@ -189,7 +184,7 @@ class MockFH(object):
def __iter__(self):
self.__check_read_data()
if not self.read_mode:
- raise IOError("File not open for reading")
+ raise OSError("File not open for reading")
while True:
try:
ret = next(self.read_data_iter)
@@ -200,30 +195,22 @@ class MockFH(object):
def _write(self, content):
if not self.write_mode:
- raise IOError("File not open for writing")
- if six.PY2:
- if isinstance(content, six.text_type):
- # encoding intentionally not specified to force a
- # UnicodeEncodeError when non-ascii unicode type is passed
- content.encode()
- else:
- content_type = type(content)
- if self.binary_mode and content_type is not bytes:
- raise TypeError(
- "a bytes-like object is required, not '{0}'".format(
- content_type.__name__
- )
- )
- elif not self.binary_mode and content_type is not str:
- raise TypeError(
- "write() argument must be str, not {0}".format(
- content_type.__name__
- )
+ raise OSError("File not open for writing")
+ content_type = type(content)
+ if self.binary_mode and content_type is not bytes:
+ raise TypeError(
+ "a bytes-like object is required, not '{}'".format(
+ content_type.__name__
)
+ )
+ elif not self.binary_mode and content_type is not str:
+ raise TypeError(
+ "write() argument must be str, not {}".format(content_type.__name__)
+ )
def _writelines(self, lines):
if not self.write_mode:
- raise IOError("File not open for writing")
+ raise OSError("File not open for writing")
for line in lines:
self._write(line)
@@ -234,26 +221,24 @@ class MockFH(object):
pass
-class MockCall(object):
+class MockCall:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __repr__(self):
# future lint: disable=blacklisted-function
- ret = str("MockCall(")
+ ret = "MockCall("
for arg in self.args:
- ret += repr(arg) + str(", ")
+ ret += repr(arg) + ", "
if not self.kwargs:
if self.args:
# Remove trailing ', '
ret = ret[:-2]
else:
- for key, val in six.iteritems(self.kwargs):
- ret += str("{0}={1}").format(
- salt.utils.stringutils.to_str(key), repr(val)
- )
- ret += str(")")
+ for key, val in self.kwargs.items():
+ ret += "{}={}".format(salt.utils.stringutils.to_str(key), repr(val))
+ ret += ")"
return ret
# future lint: enable=blacklisted-function
@@ -264,7 +249,7 @@ class MockCall(object):
return self.args == other.args and self.kwargs == other.kwargs
-class MockOpen(object):
+class MockOpen:
r'''
This class can be used to mock the use of ``open()``.
@@ -379,7 +364,7 @@ class MockOpen(object):
# .__class__() used here to preserve the dict class in the event that
# an OrderedDict was used.
new_read_data = read_data.__class__()
- for key, val in six.iteritems(read_data):
+ for key, val in read_data.items():
try:
val = salt.utils.data.decode(val, to_str=True)
except TypeError:
@@ -424,7 +409,7 @@ class MockOpen(object):
except IndexError:
# We've run out of file contents, abort!
raise RuntimeError(
- "File matching expression '{0}' opened more times than "
+ "File matching expression '{}' opened more times than "
"expected".format(matched_pattern)
)
@@ -443,7 +428,7 @@ class MockOpen(object):
except KeyError:
# No matching glob in read_data, treat this as a file that does
# not exist and raise the appropriate exception.
- raise IOError(errno.ENOENT, "No such file or directory", name)
+ raise OSError(errno.ENOENT, "No such file or directory", name)
def write_calls(self, path=None):
"""
@@ -451,7 +436,7 @@ class MockOpen(object):
the results to files matching a given pattern.
"""
ret = []
- for filename, handles in six.iteritems(self.filehandles):
+ for filename, handles in self.filehandles.items():
if path is None or fnmatch.fnmatch(filename, path):
for fh_ in handles:
ret.extend(fh_.write_calls)
@@ -463,19 +448,54 @@ class MockOpen(object):
narrow the results to files matching a given pattern.
"""
ret = []
- for filename, handles in six.iteritems(self.filehandles):
+ for filename, handles in self.filehandles.items():
if path is None or fnmatch.fnmatch(filename, path):
for fh_ in handles:
ret.extend(fh_.writelines_calls)
return ret
-class MockTimedProc(object):
+class MockTimedProc:
+ """
+ Class used as a stand-in for salt.utils.timed_subprocess.TimedProc
+ """
+
+ class _Process:
+ """
+ Used to provide a dummy "process" attribute
+ """
+
+ def __init__(self, returncode=0, pid=12345):
+ self.returncode = returncode
+ self.pid = pid
+
+ def __init__(self, stdout=None, stderr=None, returncode=0, pid=12345):
+ if stdout is not None and not isinstance(stdout, bytes):
+ raise TypeError("Must pass stdout to MockTimedProc as bytes")
+ if stderr is not None and not isinstance(stderr, bytes):
+ raise TypeError("Must pass stderr to MockTimedProc as bytes")
+ self._stdout = stdout
+ self._stderr = stderr
+ self.process = self._Process(returncode=returncode, pid=pid)
+
+ def run(self):
+ pass
+
+ @property
+ def stdout(self):
+ return self._stdout
+
+ @property
+ def stderr(self):
+ return self._stderr
+
+
+class MockTimedProc:
"""
Class used as a stand-in for salt.utils.timed_subprocess.TimedProc
"""
- class _Process(object):
+ class _Process:
"""
Used to provide a dummy "process" attribute
"""
diff --git a/tests/unit/modules/test_ansiblegate.py b/tests/unit/modules/test_ansiblegate.py
new file mode 100644
index 0000000000..61aad44b5c
--- /dev/null
+++ b/tests/unit/modules/test_ansiblegate.py
@@ -0,0 +1,201 @@
+#
+# Author: Bo Maryniuk <bo@suse.de>
+#
+# Copyright 2017 SUSE LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import salt.modules.ansiblegate as ansible
+import salt.utils.platform
+from salt.exceptions import LoaderError
+from salt.ext import six
+from tests.support.mixins import LoaderModuleMockMixin
+from tests.support.mock import MagicMock, MockTimedProc, patch
+from tests.support.unit import TestCase, skipIf
+
+try:
+ import pytest
+except ImportError as import_error:
+ pytest = None
+NO_PYTEST = not bool(pytest)
+
+
+@skipIf(NO_PYTEST, False)
+@skipIf(salt.utils.platform.is_windows(), "Not supported on Windows")
+class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin):
+ def setUp(self):
+ self.resolver = ansible.AnsibleModuleResolver({})
+ self.resolver._modules_map = {
+ "one.two.three": os.sep + os.path.join("one", "two", "three.py"),
+ "four.five.six": os.sep + os.path.join("four", "five", "six.py"),
+ "three.six.one": os.sep + os.path.join("three", "six", "one.py"),
+ }
+
+ def tearDown(self):
+ self.resolver = None
+
+ def setup_loader_modules(self):
+ return {ansible: {}}
+
+ def test_ansible_module_help(self):
+ """
+ Test help extraction from the module
+ :return:
+ """
+
+ class Module:
+ """
+ An ansible module mock.
+ """
+
+ __name__ = "foo"
+ DOCUMENTATION = """
+---
+one:
+ text here
+---
+two:
+ text here
+description:
+ describe the second part
+ """
+
+ with patch.object(ansible, "_resolver", self.resolver), patch.object(
+ ansible._resolver, "load_module", MagicMock(return_value=Module())
+ ):
+ ret = ansible.help("dummy")
+ assert sorted(
+ ret.get('Available sections on module "{}"'.format(Module().__name__))
+ ) == ["one", "two"]
+ assert ret.get("Description") == "describe the second part"
+
+ def test_module_resolver_modlist(self):
+ """
+ Test Ansible resolver modules list.
+ :return:
+ """
+ assert self.resolver.get_modules_list() == [
+ "four.five.six",
+ "one.two.three",
+ "three.six.one",
+ ]
+ for ptr in ["five", "fi", "ve"]:
+ assert self.resolver.get_modules_list(ptr) == ["four.five.six"]
+ for ptr in ["si", "ix", "six"]:
+ assert self.resolver.get_modules_list(ptr) == [
+ "four.five.six",
+ "three.six.one",
+ ]
+ assert self.resolver.get_modules_list("one") == [
+ "one.two.three",
+ "three.six.one",
+ ]
+ assert self.resolver.get_modules_list("one.two") == ["one.two.three"]
+ assert self.resolver.get_modules_list("four") == ["four.five.six"]
+
+ def test_resolver_module_loader_failure(self):
+ """
+ Test Ansible module loader.
+ :return:
+ """
+ mod = "four.five.six"
+ with pytest.raises(ImportError) as import_error:
+ self.resolver.load_module(mod)
+
+ mod = "i.even.do.not.exist.at.all"
+ with pytest.raises(LoaderError) as loader_error:
+ self.resolver.load_module(mod)
+
+ def test_resolver_module_loader(self):
+ """
+ Test Ansible module loader.
+ :return:
+ """
+ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch(
+ "salt.modules.ansiblegate.importlib.import_module", lambda x: x
+ ):
+ assert (
+ self.resolver.load_module("four.five.six")
+ == "ansible.modules.four.five.six"
+ )
+
+ def test_resolver_module_loader_import_failure(self):
+ """
+ Test Ansible module loader failure.
+ :return:
+ """
+ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch(
+ "salt.modules.ansiblegate.importlib.import_module", lambda x: x
+ ):
+ with pytest.raises(LoaderError) as loader_error:
+ self.resolver.load_module("something.strange")
+
+ def test_virtual_function(self):
+ """
+ Test Ansible module __virtual__ when ansible is not installed on the minion.
+ :return:
+ """
+ with patch("salt.modules.ansiblegate.ansible", None):
+ assert ansible.__virtual__() == "ansible"
+
+ def test_ansible_module_call(self):
+ """
+ Test Ansible module call from ansible gate module
+
+ :return:
+ """
+
+ class Module:
+ """
+ An ansible module mock.
+ """
+
+ __name__ = "one.two.three"
+ __file__ = "foofile"
+
+ def main():
+ pass
+
+ ANSIBLE_MODULE_ARGS = '{"ANSIBLE_MODULE_ARGS": ["arg_1", {"kwarg1": "foobar"}]}'
+
+ proc = MagicMock(
+ side_effect=[
+ MockTimedProc(stdout=ANSIBLE_MODULE_ARGS.encode(), stderr=None),
+ MockTimedProc(stdout=b'{"completed": true}', stderr=None),
+ ]
+ )
+
+ with patch.object(ansible, "_resolver", self.resolver), patch.object(
+ ansible._resolver, "load_module", MagicMock(return_value=Module())
+ ):
+ _ansible_module_caller = ansible.AnsibleModuleCaller(ansible._resolver)
+ with patch("salt.utils.timed_subprocess.TimedProc", proc):
+ ret = _ansible_module_caller.call(
+ "one.two.three", "arg_1", kwarg1="foobar"
+ )
+ proc.assert_any_call(
+ [
+ "echo",
+ '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}',
+ ],
+ stdout=-1,
+ timeout=1200,
+ )
+ proc.assert_any_call(
+ ["python3", "foofile"],
+ stdin=ANSIBLE_MODULE_ARGS,
+ stdout=-1,
+ timeout=1200,
+ )
+ assert ret == {"completed": True, "timeout": 1200}
diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py
index 15b97f8568..f3348bc379 100644
--- a/tests/unit/modules/test_cmdmod.py
+++ b/tests/unit/modules/test_cmdmod.py
@@ -24,6 +24,7 @@ DEFAULT_SHELL = "foo/bar"
MOCK_SHELL_FILE = "# List of acceptable shells\n" "\n" "/bin/bash\n"
+@skipIf(NO_MOCK, NO_MOCK_REASON)
class CMDMODTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit tests for the salt.modules.cmdmod module
--
2.29.2

View File

@ -1,4 +1,4 @@
From 8eaeb751d4077d6514577b53a9dbe23df231018e Mon Sep 17 00:00:00 2001
From e82c8832aed9ef46f5021558758ef9d944d89214 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Mon, 8 Mar 2021 12:35:14 +0000
@ -127,19 +127,19 @@ index e9d80fc4ad..f98fdcb0e9 100644
),
value_node.start_mark,
diff --git a/tests/unit/utils/test_yamlloader.py b/tests/unit/utils/test_yamlloader.py
index a1e17af760..3f2e4403ba 100644
index e1d60aeed0..a5de963817 100644
--- a/tests/unit/utils/test_yamlloader.py
+++ b/tests/unit/utils/test_yamlloader.py
@@ -13,7 +13,7 @@ import salt.utils.files
@@ -5,7 +5,7 @@
import textwrap
# Import 3rd-party libs
from salt.ext import six
import salt.utils.files
-from salt.utils.yamlloader import SaltYamlSafeLoader
+from salt.utils.yamlloader import SaltYamlSafeLoader, yaml
from tests.support.mock import mock_open, patch
# Import Salt Testing Libs
@@ -177,3 +177,7 @@ class YamlLoaderTestCase(TestCase):
from tests.support.unit import TestCase
from yaml.constructor import ConstructorError
@@ -133,3 +133,7 @@ class YamlLoaderTestCase(TestCase):
),
{"foo": {"b": {"foo": "bar", "one": 1, "list": [1, "two", 3]}}},
)
@ -148,6 +148,6 @@ index a1e17af760..3f2e4403ba 100644
+ if hasattr(yaml, "CSafeLoader"):
+ assert yaml.SafeLoader != yaml.CSafeLoader
--
2.30.1
2.33.0

View File

@ -1,27 +0,0 @@
From 81d0105b0c0464c375070ffbc863a020a67e7965 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Wed, 26 Aug 2020 10:24:58 +0100
Subject: [PATCH] Do not raise StreamClosedError traceback but only log
it (bsc#1175549)
---
salt/transport/ipc.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py
index f411907da2..5ff0956dde 100644
--- a/salt/transport/ipc.py
+++ b/salt/transport/ipc.py
@@ -688,7 +688,6 @@ class IPCMessageSubscriber(IPCClient):
except StreamClosedError as exc:
log.trace("Subscriber disconnected from IPC %s", self.socket_path)
self._read_stream_future = None
- exc_to_raise = exc
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred in Subscriber while handling stream: %s", exc)
self._read_stream_future = None
--
2.29.2

View File

@ -1,26 +0,0 @@
From 3dc61b426cee5c40976ee25a0357fd07244a630b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Tue, 13 Oct 2020 12:02:00 +0100
Subject: [PATCH] Drop wrong mock from chroot unit test
---
tests/unit/modules/test_chroot.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py
index 196e3ad27f..a0f3f8e6af 100644
--- a/tests/unit/modules/test_chroot.py
+++ b/tests/unit/modules/test_chroot.py
@@ -71,7 +71,6 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin):
self.assertTrue(chroot.create("/chroot"))
makedirs.assert_called()
- @patch("salt.modules.chroot.exist")
@patch("salt.utils.files.fopen")
def test_in_chroot(self, fopen):
"""
--
2.29.2

View File

@ -1,99 +0,0 @@
From 79ae019ac7515614c6fbc620e66575f015bc447d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Tue, 5 Jan 2021 09:34:45 +0000
Subject: [PATCH] Drop wrong virt capabilities code after rebasing
patches
---
salt/modules/virt.py | 66 --------------------------------------------
1 file changed, 66 deletions(-)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index e3960a5a90..786bfa1e58 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -143,7 +143,6 @@ import salt.utils.xmlutil as xmlutil
import salt.utils.yaml
from salt._compat import ElementTree, ipaddress, saxutils
from salt.exceptions import CommandExecutionError, SaltInvocationError
-from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
from salt.utils.virt import check_remote, download_remote
@@ -5416,71 +5415,6 @@ def _parse_domain_caps(caps):
return result
-def _parse_domain_caps(caps):
- """
- Parse the XML document of domain capabilities into a structure.
- """
- result = {
- "emulator": caps.find("path").text if caps.find("path") is not None else None,
- "domain": caps.find("domain").text if caps.find("domain") is not None else None,
- "machine": caps.find("machine").text
- if caps.find("machine") is not None
- else None,
- "arch": caps.find("arch").text if caps.find("arch") is not None else None,
- }
-
-
-def all_capabilities(**kwargs):
- """
- Return the host and domain capabilities in a single call.
-
- .. versionadded:: 3001
-
- :param connection: libvirt connection URI, overriding defaults
- :param username: username to connect with, overriding defaults
- :param password: password to connect with, overriding defaults
-
- CLI Example:
-
- .. code-block:: bash
-
- salt '*' virt.all_capabilities
-
- """
- conn = __get_conn(**kwargs)
- try:
- host_caps = ElementTree.fromstring(conn.getCapabilities())
- domains = [
- [
- (guest.get("arch", {}).get("name", None), key)
- for key in guest.get("arch", {}).get("domains", {}).keys()
- ]
- for guest in [
- _parse_caps_guest(guest) for guest in host_caps.findall("guest")
- ]
- ]
- flattened = [pair for item in (x for x in domains) for pair in item]
- result = {
- "host": {
- "host": _parse_caps_host(host_caps.find("host")),
- "guests": [
- _parse_caps_guest(guest) for guest in host_caps.findall("guest")
- ],
- },
- "domains": [
- _parse_domain_caps(
- ElementTree.fromstring(
- conn.getDomainCapabilities(None, arch, None, domain)
- )
- )
- for (arch, domain) in flattened
- ],
- }
- return result
- finally:
- conn.close()
-
-
def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
"""
Return the domain capabilities given an emulator, architecture, machine or virtualization type.
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
From 1fd51c17bc03e679a040f2c6d9ac107a2c57b7c8 Mon Sep 17 00:00:00 2001
From 9071189b7395284f0328b59c999c18919e12ae32 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Wed, 7 Jul 2021 15:41:48 +0100
@ -13,11 +13,13 @@ Subject: [PATCH] Enhance openscap module: add "xccdf_eval" call (#386)
* Add changes suggested by pre-commit
Co-authored-by: Michael Calmer <mc@suse.de>
Fix error handling in openscap module (bsc#1188647) (#409)
---
changelog/59756.added | 1 +
salt/modules/openscap.py | 120 ++++++++++++-
tests/unit/modules/test_openscap.py | 262 +++++++++++++++++++++++++---
3 files changed, 353 insertions(+), 30 deletions(-)
salt/modules/openscap.py | 126 +++++++++++++--
tests/unit/modules/test_openscap.py | 234 ++++++++++++++++++++++++++++
3 files changed, 352 insertions(+), 9 deletions(-)
create mode 100644 changelog/59756.added
diff --git a/changelog/59756.added b/changelog/59756.added
@ -28,7 +30,7 @@ index 0000000000..a59fb21eef
@@ -0,0 +1 @@
+adding new call for openscap xccdf eval supporting new parameters
diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py
index 6f8ff4a76d..f75e1c5e6b 100644
index 6f8ff4a76d..216fd89eef 100644
--- a/salt/modules/openscap.py
+++ b/salt/modules/openscap.py
@@ -1,20 +1,15 @@
@ -62,7 +64,7 @@ index 6f8ff4a76d..f75e1c5e6b 100644
self.add_argument("action", choices=["eval"])
add_arg = None
for params, kwparams in _XCCDF_MAP["eval"]["parser_arguments"]:
@@ -61,6 +56,115 @@ _OSCAP_EXIT_CODES_MAP = {
@@ -61,6 +56,117 @@ _OSCAP_EXIT_CODES_MAP = {
}
@ -163,7 +165,9 @@ index 6f8ff4a76d..f75e1c5e6b 100644
+ tempdir = tempfile.mkdtemp()
+ proc = Popen(cmd_opts, stdout=PIPE, stderr=PIPE, cwd=tempdir)
+ (stdoutdata, error) = proc.communicate()
+ success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
+ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False)
+ if proc.returncode < 0:
+ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii')
+ returncode = proc.returncode
+ if success:
+ __salt__["cp.push_dir"](tempdir)
@ -178,7 +182,7 @@ index 6f8ff4a76d..f75e1c5e6b 100644
def xccdf(params):
"""
Run ``oscap xccdf`` commands on minions.
@@ -91,7 +195,7 @@ def xccdf(params):
@@ -91,14 +197,16 @@ def xccdf(params):
args, argv = _ArgumentParser(action=action).parse_known_args(args=params)
except Exception as err: # pylint: disable=broad-except
success = False
@ -187,30 +191,21 @@ index 6f8ff4a76d..f75e1c5e6b 100644
if success:
cmd = _XCCDF_MAP[action]["cmd_pattern"].format(args.profile, policy)
tempdir = tempfile.mkdtemp()
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
(stdoutdata, error) = proc.communicate()
- success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
+ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False)
+ if proc.returncode < 0:
+ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii')
returncode = proc.returncode
if success:
__salt__["cp.push_dir"](tempdir)
diff --git a/tests/unit/modules/test_openscap.py b/tests/unit/modules/test_openscap.py
index 04cf00a1d3..e5be151bf2 100644
index 045c37f7c9..301c1869ec 100644
--- a/tests/unit/modules/test_openscap.py
+++ b/tests/unit/modules/test_openscap.py
@@ -1,18 +1,8 @@
-# -*- coding: utf-8 -*-
-
-# Import python libs
-from __future__ import absolute_import, print_function, unicode_literals
-
from subprocess import PIPE
-# Import salt libs
import salt.modules.openscap as openscap
-
-# Import 3rd-party libs
from salt.ext import six
from tests.support.mock import MagicMock, Mock, patch
-
-# Import salt test libs
from tests.support.unit import TestCase
@@ -32,6 +22,7 @@ class OpenscapTestCase(TestCase):
@@ -21,6 +21,7 @@ class OpenscapTestCase(TestCase):
"salt.modules.openscap.tempfile.mkdtemp",
Mock(return_value=self.random_temp_dir),
),
@ -218,60 +213,7 @@ index 04cf00a1d3..e5be151bf2 100644
]
for patcher in patchers:
self.apply_patch(patcher)
@@ -50,7 +41,7 @@ class OpenscapTestCase(TestCase):
),
):
response = openscap.xccdf(
- "eval --profile Default {0}".format(self.policy_file)
+ "eval --profile Default {}".format(self.policy_file)
)
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
@@ -97,7 +88,7 @@ class OpenscapTestCase(TestCase):
),
):
response = openscap.xccdf(
- "eval --profile Default {0}".format(self.policy_file)
+ "eval --profile Default {}".format(self.policy_file)
)
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
@@ -136,10 +127,7 @@ class OpenscapTestCase(TestCase):
def test_openscap_xccdf_eval_fail_no_profile(self):
response = openscap.xccdf("eval --param Default /unknown/param")
- if six.PY2:
- error = "argument --profile is required"
- else:
- error = "the following arguments are required: --profile"
+ error = "the following arguments are required: --profile"
self.assertEqual(
response,
{"error": error, "upload_dir": None, "success": False, "returncode": None},
@@ -199,7 +187,7 @@ class OpenscapTestCase(TestCase):
),
):
response = openscap.xccdf(
- "eval --profile Default {0}".format(self.policy_file)
+ "eval --profile Default {}".format(self.policy_file)
)
self.assertEqual(
@@ -213,11 +201,8 @@ class OpenscapTestCase(TestCase):
)
def test_openscap_xccdf_eval_fail_not_implemented_action(self):
- response = openscap.xccdf("info {0}".format(self.policy_file))
- if six.PY2:
- mock_err = "argument action: invalid choice: 'info' (choose from u'eval')"
- else:
- mock_err = "argument action: invalid choice: 'info' (choose from 'eval')"
+ response = openscap.xccdf("info {}".format(self.policy_file))
+ mock_err = "argument action: invalid choice: 'info' (choose from 'eval')"
self.assertEqual(
response,
@@ -228,3 +213,236 @@ class OpenscapTestCase(TestCase):
@@ -211,3 +212,236 @@ class OpenscapTestCase(TestCase):
"returncode": None,
},
)
@ -310,8 +252,8 @@ index 04cf00a1d3..e5be151bf2 100644
+ openscap.Popen.assert_called_once_with(
+ expected_cmd,
+ cwd=openscap.tempfile.mkdtemp.return_value,
+ stderr=PIPE,
+ stdout=PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ )
+ openscap.__salt__["cp.push_dir"].assert_called_once_with(
+ self.random_temp_dir
@ -364,8 +306,8 @@ index 04cf00a1d3..e5be151bf2 100644
+ openscap.Popen.assert_called_once_with(
+ expected_cmd,
+ cwd=openscap.tempfile.mkdtemp.return_value,
+ stderr=PIPE,
+ stdout=PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ )
+ openscap.__salt__["cp.push_dir"].assert_called_once_with(
+ self.random_temp_dir
@ -415,8 +357,8 @@ index 04cf00a1d3..e5be151bf2 100644
+ openscap.Popen.assert_called_once_with(
+ expected_cmd,
+ cwd=openscap.tempfile.mkdtemp.return_value,
+ stderr=PIPE,
+ stdout=PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ )
+ openscap.__salt__["cp.push_dir"].assert_called_once_with(
+ self.random_temp_dir
@ -475,8 +417,8 @@ index 04cf00a1d3..e5be151bf2 100644
+ openscap.Popen.assert_called_once_with(
+ expected_cmd,
+ cwd=openscap.tempfile.mkdtemp.return_value,
+ stderr=PIPE,
+ stdout=PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ )
+
+ def test_new_openscap_xccdf_eval_evaluation_error(self):
@ -509,6 +451,6 @@ index 04cf00a1d3..e5be151bf2 100644
+ },
+ )
--
2.32.0
2.33.0

View File

@ -1,51 +0,0 @@
From 1cea7d065d8da7c713af8136162c21187d5186f5 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat <cbosdonnat@suse.com>
Date: Wed, 14 Oct 2020 12:39:16 +0200
Subject: [PATCH] Ensure virt.update stop_on_reboot is updated with its
default value (#280)
While all virt.update properties default values should not be used when
updating the XML definition, the stop_on_reboot default value (False)
needs to be passed still or the user will never be able to update with
this value.
---
salt/modules/virt.py | 1 +
tests/unit/modules/test_virt.py | 2 ++
2 files changed, 3 insertions(+)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index 8e2180608a..e3960a5a90 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -2738,6 +2738,7 @@ def update(
]
data = {k: v for k, v in locals().items() if bool(v)}
+ data["stop_on_reboot"] = stop_on_reboot
if boot_dev:
data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
need_update = (
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
index fba821ea53..83152eda6e 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
@@ -1777,6 +1777,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
<memory unit='KiB'>1048576</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<vcpu placement='auto'>1</vcpu>
+ <on_reboot>restart</on_reboot>
<os>
<type arch='x86_64' machine='pc-i440fx-2.6'>hvm</type>
<boot dev="hd"/>
@@ -2349,6 +2350,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
<memory unit='KiB'>1048576</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<vcpu placement='auto'>1</vcpu>
+ <on_reboot>restart</on_reboot>
<os>
<type arch='x86_64' machine='pc-i440fx-2.6'>hvm</type>
</os>
--
2.29.2

View File

@ -1,69 +0,0 @@
From 57ed9c41a177f57e3d56465662750617ac36cc95 Mon Sep 17 00:00:00 2001
From: Joe Eacott <jeacott@vmware.com>
Date: Mon, 28 Jun 2021 16:46:35 -0600
Subject: [PATCH] Exclude the full path of a download URL to prevent
injection of malicious code (bsc#1190265) (CVE-2021-21996)
---
salt/fileclient.py | 7 +++++++
tests/unit/test_fileclient.py | 18 ++++++++++++++++++
2 files changed, 25 insertions(+)
diff --git a/salt/fileclient.py b/salt/fileclient.py
index 88dcf1668d..bdf450ffe6 100644
--- a/salt/fileclient.py
+++ b/salt/fileclient.py
@@ -28,6 +28,7 @@ import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
+import salt.utils.verify
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError
@@ -858,6 +859,12 @@ class Client:
else:
file_name = url_data.path
+ # clean_path returns an empty string if the check fails
+ root_path = salt.utils.path.join(cachedir, "extrn_files", saltenv, netloc)
+ new_path = os.path.sep.join([root_path, file_name])
+ if not salt.utils.verify.clean_path(root_path, new_path, subdir=True):
+ return "Invalid path"
+
if len(file_name) > MAX_FILENAME_LENGTH:
file_name = salt.utils.hashutils.sha256_digest(file_name)
diff --git a/tests/unit/test_fileclient.py b/tests/unit/test_fileclient.py
index 3aa7b7cf84..b6cc84a871 100644
--- a/tests/unit/test_fileclient.py
+++ b/tests/unit/test_fileclient.py
@@ -63,6 +63,24 @@ class FileclientTestCase(TestCase):
) as c_ref_itr:
assert c_ref_itr == "/__test__/files/base/testfile"
+ def test_cache_extrn_path_valid(self):
+ """
+ Tests for extrn_filepath for a given url
+ """
+ file_name = "http://localhost:8000/test/location/src/dev/usr/file"
+
+ ret = fileclient.Client(self.opts)._extrn_path(file_name, "base")
+ assert ret == os.path.join("__test__", "extrn_files", "base", ret)
+
+ def test_cache_extrn_path_invalid(self):
+ """
+ Tests for extrn_filepath for a given url
+ """
+ file_name = "http://localhost:8000/../../../../../usr/bin/bad"
+
+ ret = fileclient.Client(self.opts)._extrn_path(file_name, "base")
+ assert ret == "Invalid path"
+
def test_extrn_path_with_long_filename(self):
safe_file_name = os.path.split(
fileclient.Client(self.opts)._extrn_path(
--
2.33.0

View File

@ -1,38 +0,0 @@
From 188a97fc20c3e24950b82dc6fcd0da878509cf7a Mon Sep 17 00:00:00 2001
From: Maximilian Meister <mmeister@suse.de>
Date: Thu, 5 Apr 2018 13:23:23 +0200
Subject: [PATCH] fall back to PyMySQL
same is already done in modules (see #26803)
Signed-off-by: Maximilian Meister <mmeister@suse.de>
---
salt/modules/mysql.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/salt/modules/mysql.py b/salt/modules/mysql.py
index fdfe35158a..385e4d92a3 100644
--- a/salt/modules/mysql.py
+++ b/salt/modules/mysql.py
@@ -55,7 +55,7 @@ try:
import MySQLdb.cursors
import MySQLdb.converters
from MySQLdb.constants import FIELD_TYPE, FLAG
- from MySQLdb import OperationalError
+ from MySQLdb.connections import OperationalError
except ImportError:
try:
# MySQLdb import failed, try to import PyMySQL
@@ -66,7 +66,7 @@ except ImportError:
import MySQLdb.cursors
import MySQLdb.converters
from MySQLdb.constants import FIELD_TYPE, FLAG
- from MySQLdb import OperationalError
+ from MySQLdb.err import OperationalError
except ImportError:
MySQLdb = None
--
2.29.2

View File

@ -1,9 +1,8 @@
From 271826b0baa6b2281bc2eac9118a0fcc4675f106 Mon Sep 17 00:00:00 2001
From 1b54843abe5fad0bac844d6d5d9707df3e501aae Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Wed, 19 May 2021 16:24:27 +0100
Subject: [PATCH] Figure out Python interpreter to use inside
containers
Subject: [PATCH] Figure out Python interpreter to use inside containers
Fix unit test for dockermod.call function
---
@ -12,10 +11,10 @@ Fix unit test for dockermod.call function
2 files changed, 41 insertions(+), 17 deletions(-)
diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
index 176b4db926..cad307e7af 100644
index ab2296a945..6d60a9a5aa 100644
--- a/salt/modules/dockermod.py
+++ b/salt/modules/dockermod.py
@@ -217,7 +217,6 @@ import re
@@ -209,7 +209,6 @@ import re
import shutil
import string
import subprocess
@ -23,7 +22,7 @@ index 176b4db926..cad307e7af 100644
import time
import uuid
@@ -6865,9 +6864,32 @@ def call(name, function, *args, **kwargs):
@@ -6728,9 +6727,32 @@ def call(name, function, *args, **kwargs):
name, thin_path, os.path.join(thin_dest_path, os.path.basename(thin_path))
)
@ -55,22 +54,22 @@ index 176b4db926..cad307e7af 100644
- "python",
+ container_python_bin,
"-c",
("import tarfile; " 'tarfile.open("{0}/{1}").extractall(path="{0}")').format(
thin_dest_path, os.path.basename(thin_path)
@@ -6880,7 +6902,7 @@ def call(name, function, *args, **kwargs):
"import tarfile; "
'tarfile.open("{0}/{1}").extractall(path="{0}")'.format(
@@ -6744,7 +6766,7 @@ def call(name, function, *args, **kwargs):
try:
salt_argv = (
[
- "python{0}".format(sys.version_info[0]),
- "python{}".format(sys.version_info[0]),
+ container_python_bin,
os.path.join(thin_dest_path, "salt-call"),
"--metadata",
"--local",
diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py
index 48526acb71..ebe97a83f5 100644
index 2c3665de85..fcedaf9272 100644
--- a/tests/unit/modules/test_dockermod.py
+++ b/tests/unit/modules/test_dockermod.py
@@ -1049,33 +1049,35 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
@@ -987,33 +987,35 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
# [ call(name, [args]), ...
self.maxDiff = None
self.assertIn("mkdir", docker_run_all_mock.mock_calls[0][1][1])
@ -121,6 +120,6 @@ index 48526acb71..ebe97a83f5 100644
self.assertEqual({"retcode": 0, "comment": "container cmd"}, ret)
--
2.31.1
2.33.0

View File

@ -1,89 +0,0 @@
From 1e00e2b72321b5312efb7b8b426a037c8db72b79 Mon Sep 17 00:00:00 2001
From: Alberto Planas <aplanas@suse.com>
Date: Wed, 29 Jul 2020 16:11:47 +0200
Subject: [PATCH] Fix __mount_device wrapper (#254)
Some recent change in Salt is now doing the right thing, and calling the
different states with separated args and kwargs. This change trigger a
hidden bug in the __mount_device decorator, that expect those parameter
to be in kwargs, as is happening during the test.
This patch change the way that the wrapper inside the decorator search
for the name and device parameters, first looking into kwargs and later
in args if possible. A new test is introduced to exercise both cases.
Fix #58012
(cherry picked from commit 2089645e2478751dc795127cfd14d0385c2e0899)
---
changelog/58012.fixed | 1 +
salt/states/btrfs.py | 4 ++--
tests/unit/states/test_btrfs.py | 27 +++++++++++++++++++++++++++
3 files changed, 30 insertions(+), 2 deletions(-)
create mode 100644 changelog/58012.fixed
diff --git a/changelog/58012.fixed b/changelog/58012.fixed
new file mode 100644
index 0000000000..13a1ef747d
--- /dev/null
+++ b/changelog/58012.fixed
@@ -0,0 +1 @@
+Fix btrfs state decorator, that produces exceptions when creating subvolumes.
\ No newline at end of file
diff --git a/salt/states/btrfs.py b/salt/states/btrfs.py
index 1374bbffb4..62a3553758 100644
--- a/salt/states/btrfs.py
+++ b/salt/states/btrfs.py
@@ -103,8 +103,8 @@ def __mount_device(action):
@functools.wraps(action)
def wrapper(*args, **kwargs):
- name = kwargs["name"]
- device = kwargs["device"]
+ name = kwargs.get("name", args[0] if args else None)
+ device = kwargs.get("device", args[1] if len(args) > 1 else None)
use_default = kwargs.get("use_default", False)
ret = {
diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py
index b8f70bccfe..dceb971aa1 100644
--- a/tests/unit/states/test_btrfs.py
+++ b/tests/unit/states/test_btrfs.py
@@ -231,6 +231,33 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
mount.assert_called_once()
umount.assert_called_once()
+ @skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
+ @patch("salt.states.btrfs._umount")
+ @patch("salt.states.btrfs._mount")
+ def test_subvolume_created_exists_decorator(self, mount, umount):
+ """
+ Test creating a subvolume using a non-kwargs call
+ """
+ mount.return_value = "/tmp/xxx"
+ salt_mock = {
+ "btrfs.subvolume_exists": MagicMock(return_value=True),
+ }
+ opts_mock = {
+ "test": False,
+ }
+ with patch.dict(btrfs.__salt__, salt_mock), patch.dict(
+ btrfs.__opts__, opts_mock
+ ):
+ assert btrfs.subvolume_created("@/var", "/dev/sda1") == {
+ "name": "@/var",
+ "result": True,
+ "changes": {},
+ "comment": ["Subvolume @/var already present"],
+ }
+ salt_mock["btrfs.subvolume_exists"].assert_called_with("/tmp/xxx/@/var")
+ mount.assert_called_once()
+ umount.assert_called_once()
+
@patch("salt.states.btrfs._umount")
@patch("salt.states.btrfs._mount")
def test_subvolume_created_exists_test(self, mount, umount):
--
2.29.2

View File

@ -1,144 +0,0 @@
From 3d5f3cff6b43d7aba35063e970d016401bb82921 Mon Sep 17 00:00:00 2001
From: Alberto Planas <aplanas@gmail.com>
Date: Fri, 25 Oct 2019 15:43:16 +0200
Subject: [PATCH] Fix a wrong rebase in test_core.py (#180)
* core: ignore wrong product_name files
Some firmwares (like some NUC machines), do not provide valid
/sys/class/dmi/id/product_name strings. In those cases an
UnicodeDecodeError exception happens.
This patch ignore this kind of issue during the grains creation.
(cherry picked from commit 27b001bd5408359aa5dd219bfd900095ed592fe8)
* core: remove duplicate dead code
(cherry picked from commit bd0213bae00b737b24795bec3c030ebfe476e0d8)
---
salt/grains/core.py | 8 +++-
tests/unit/grains/test_core.py | 80 ----------------------------------
2 files changed, 6 insertions(+), 82 deletions(-)
diff --git a/salt/grains/core.py b/salt/grains/core.py
index a2983e388b..5dff6ecfd4 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1066,7 +1066,9 @@ def _virtual(osdata):
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
- pass
+ log.debug(
+ "The content in /sys/devices/virtual/dmi/id/product_name is not valid"
+ )
except OSError:
pass
elif osdata["kernel"] == "FreeBSD":
@@ -2716,7 +2718,9 @@ def _hw_data(osdata):
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
- pass
+ log.debug(
+ "The content in /sys/devices/virtual/dmi/id/product_name is not valid"
+ )
except OSError as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 0dc3423646..85d434dd9d 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -2047,86 +2047,6 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
result = core.path()
assert result == {"path": path, "systempath": comps}, result
- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
- @patch("os.path.exists")
- @patch("salt.utils.platform.is_proxy")
- def test_kernelparams_return(self):
- expectations = [
- (
- "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64",
- {
- "kernelparams": [
- ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64")
- ]
- },
- ),
- (
- "root=/dev/mapper/centos_daemon-root",
- {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]},
- ),
- (
- "rhgb quiet ro",
- {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]},
- ),
- ('param="value1"', {"kernelparams": [("param", "value1")]}),
- (
- 'param="value1 value2 value3"',
- {"kernelparams": [("param", "value1 value2 value3")]},
- ),
- (
- 'param="value1 value2 value3" LANG="pl" ro',
- {
- "kernelparams": [
- ("param", "value1 value2 value3"),
- ("LANG", "pl"),
- ("ro", None),
- ]
- },
- ),
- ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}),
- (
- 'param="value1:value2:value3"',
- {"kernelparams": [("param", "value1:value2:value3")]},
- ),
- (
- 'param="value1,value2,value3"',
- {"kernelparams": [("param", "value1,value2,value3")]},
- ),
- (
- 'param="value1" param="value2" param="value3"',
- {
- "kernelparams": [
- ("param", "value1"),
- ("param", "value2"),
- ("param", "value3"),
- ]
- },
- ),
- ]
-
- for cmdline, expectation in expectations:
- with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)):
- self.assertEqual(core.kernelparams(), expectation)
-
- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
- @patch("os.path.exists")
- @patch("salt.utils.platform.is_proxy")
- def test__hw_data_linux_empty(self, is_proxy, exists):
- is_proxy.return_value = False
- exists.return_value = True
- with patch("salt.utils.files.fopen", mock_open(read_data="")):
- self.assertEqual(
- core._hw_data({"kernel": "Linux"}),
- {
- "biosreleasedate": "",
- "biosversion": "",
- "manufacturer": "",
- "productname": "",
- "serialnumber": "",
- "uuid": "",
- },
- )
-
@skipIf(not salt.utils.platform.is_linux(), "System is not Linux")
@skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3")
@patch("os.path.exists")
--
2.29.2

View File

@ -1,41 +0,0 @@
From 5dadda6822323f409c99112244c2c809e58126e1 Mon Sep 17 00:00:00 2001
From: Mihai Dinca <mdinca@suse.de>
Date: Wed, 31 Jul 2019 15:29:03 +0200
Subject: [PATCH] Fix aptpkg systemd call (bsc#1143301)
---
salt/modules/aptpkg.py | 2 +-
tests/unit/modules/test_aptpkg.py | 3 +--
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
index bf90d0614f..c47ee852f4 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
@@ -160,7 +160,7 @@ def _call_apt(args, scope=True, **kwargs):
and salt.utils.systemd.has_scope(__context__)
and __salt__["config.get"]("systemd.scope", True)
):
- cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)])
+ cmd.extend(["systemd-run", "--scope", '--description "{}"'.format(__name__)])
cmd.extend(args)
params = {
diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py
index 77d8b84896..c3769a7df1 100644
--- a/tests/unit/modules/test_aptpkg.py
+++ b/tests/unit/modules/test_aptpkg.py
@@ -896,8 +896,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin):
[
"systemd-run",
"--scope",
- "--description",
- '"salt.modules.aptpkg"',
+ '--description "salt.modules.aptpkg"',
"apt-get",
"purge",
"vim",
--
2.29.2

View File

@ -1,140 +0,0 @@
From 85b8666b138cab170327f0217c799277371b2e80 Mon Sep 17 00:00:00 2001
From: Mihai Dinca <mdinca@suse.de>
Date: Tue, 7 May 2019 12:24:35 +0200
Subject: [PATCH] Fix async-batch multiple done events
---
salt/cli/batch_async.py | 19 ++++++++++++-------
tests/unit/cli/test_batch_async.py | 20 +++++++++++++-------
2 files changed, 25 insertions(+), 14 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index b0ab9d9f47..7225491228 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -86,6 +86,7 @@ class BatchAsync:
io_loop=ioloop,
keep_loop=True,
)
+ self.scheduled = False
def __set_event_handler(self):
ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
@@ -118,10 +119,7 @@ class BatchAsync:
if minion in self.active:
self.active.remove(minion)
self.done_minions.add(minion)
- # call later so that we maybe gather more returns
- self.event.io_loop.call_later(
- self.batch_delay, self.schedule_next
- )
+ self.schedule_next()
def _get_next(self):
to_run = (
@@ -146,7 +144,7 @@ class BatchAsync:
self.timedout_minions
)
if timedout_minions:
- self.event.io_loop.call_later(self.batch_delay, self.schedule_next)
+ self.schedule_next()
if running:
self.event.io_loop.add_callback(self.find_job, running)
@@ -197,7 +195,7 @@ class BatchAsync:
"metadata": self.metadata,
}
self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid))
- yield self.schedule_next()
+ yield self.run_next()
def end_batch(self):
left = self.minions.symmetric_difference(
@@ -214,8 +212,14 @@ class BatchAsync:
self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
self.event.remove_event_handler(self.__event_handler)
- @tornado.gen.coroutine
def schedule_next(self):
+ if not self.scheduled:
+ self.scheduled = True
+ # call later so that we maybe gather more returns
+ self.event.io_loop.call_later(self.batch_delay, self.run_next)
+
+ @tornado.gen.coroutine
+ def run_next(self):
next_batch = self._get_next()
if next_batch:
self.active = self.active.union(next_batch)
@@ -238,3 +242,4 @@ class BatchAsync:
self.active = self.active.difference(next_batch)
else:
self.end_batch()
+ self.scheduled = False
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
index d6a4bfcf60..66332a548a 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
@@ -105,14 +105,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
@tornado.testing.gen_test
def test_start_batch_calls_next(self):
- self.batch.schedule_next = MagicMock(return_value=MagicMock())
+ self.batch.run_next = MagicMock(return_value=MagicMock())
self.batch.event = MagicMock()
future = tornado.gen.Future()
future.set_result(None)
- self.batch.schedule_next = MagicMock(return_value=future)
+ self.batch.run_next = MagicMock(return_value=future)
self.batch.start_batch()
self.assertEqual(self.batch.initialized, True)
- self.assertEqual(len(self.batch.schedule_next.mock_calls), 1)
+ self.assertEqual(len(self.batch.run_next.mock_calls), 1)
def test_batch_fire_done_event(self):
self.batch.targeted_minions = {"foo", "baz", "bar"}
@@ -147,7 +147,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
- ret = self.batch.schedule_next().result()
+ ret = self.batch.run_next().result()
self.assertEqual(
self.batch.local.run_job_async.call_args[0],
({"foo", "bar"}, "my.fun", [], "list"),
@@ -250,7 +250,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch.done_minions, {"foo"})
self.assertEqual(
self.batch.event.io_loop.call_later.call_args[0],
- (self.batch.batch_delay, self.batch.schedule_next),
+ (self.batch.batch_delay, self.batch.run_next),
)
def test_batch__event_handler_find_job_return(self):
@@ -262,10 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch.find_job_returned, {"foo"})
@tornado.testing.gen_test
- def test_batch_schedule_next_end_batch_when_no_next(self):
+ def test_batch_run_next_end_batch_when_no_next(self):
self.batch.end_batch = MagicMock()
self.batch._get_next = MagicMock(return_value={})
- self.batch.schedule_next()
+ self.batch.run_next()
self.assertEqual(len(self.batch.end_batch.mock_calls), 1)
@tornado.testing.gen_test
@@ -345,3 +345,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch.event.io_loop.add_callback.call_args[0],
(self.batch.find_job, {"foo"}),
)
+
+ def test_only_on_run_next_is_scheduled(self):
+ self.batch.event = MagicMock()
+ self.batch.scheduled = True
+ self.batch.schedule_next()
+ self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0)
--
2.29.2

View File

@ -1,273 +0,0 @@
From 4b3badeb52a9de10d6085ee3cc7598a827d1e68f Mon Sep 17 00:00:00 2001
From: Mihai Dinca <mdinca@suse.de>
Date: Thu, 11 Apr 2019 15:57:59 +0200
Subject: [PATCH] Fix async batch race conditions
Close batching when there is no next batch
---
salt/cli/batch_async.py | 96 +++++++++++++++---------------
tests/unit/cli/test_batch_async.py | 38 +++++-------
2 files changed, 62 insertions(+), 72 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index 1557e5105b..b0ab9d9f47 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -32,14 +32,14 @@ class BatchAsync:
- tag: salt/batch/<batch-jid>/start
- data: {
"available_minions": self.minions,
- "down_minions": self.down_minions
+ "down_minions": targeted_minions - presence_ping_minions
}
When the batch ends, an `done` event is fired:
- tag: salt/batch/<batch-jid>/done
- data: {
"available_minions": self.minions,
- "down_minions": self.down_minions,
+ "down_minions": targeted_minions - presence_ping_minions
"done_minions": self.done_minions,
"timedout_minions": self.timedout_minions
}
@@ -68,7 +68,7 @@ class BatchAsync:
self.eauth = batch_get_eauth(clear_load["kwargs"])
self.metadata = clear_load["kwargs"].get("metadata", {})
self.minions = set()
- self.down_minions = set()
+ self.targeted_minions = set()
self.timedout_minions = set()
self.done_minions = set()
self.active = set()
@@ -110,8 +110,7 @@ class BatchAsync:
minion = data["id"]
if op == "ping_return":
self.minions.add(minion)
- self.down_minions.remove(minion)
- if not self.down_minions:
+ if self.targeted_minions == self.minions:
self.event.io_loop.spawn_callback(self.start_batch)
elif op == "find_job_return":
self.find_job_returned.add(minion)
@@ -124,11 +123,6 @@ class BatchAsync:
self.batch_delay, self.schedule_next
)
- if self.initialized and self.done_minions == self.minions.difference(
- self.timedout_minions
- ):
- self.end_batch()
-
def _get_next(self):
to_run = (
self.minions.difference(self.done_minions)
@@ -142,20 +136,17 @@ class BatchAsync:
return set(list(to_run)[:next_batch_size])
@tornado.gen.coroutine
- def check_find_job(self, minions):
- did_not_return = minions.difference(self.find_job_returned)
- if did_not_return:
- for minion in did_not_return:
- if minion in self.find_job_returned:
- self.find_job_returned.remove(minion)
- if minion in self.active:
- self.active.remove(minion)
- self.timedout_minions.add(minion)
- running = (
- minions.difference(did_not_return)
- .difference(self.done_minions)
- .difference(self.timedout_minions)
+ def check_find_job(self, batch_minions):
+ timedout_minions = batch_minions.difference(self.find_job_returned).difference(
+ self.done_minions
)
+ self.timedout_minions = self.timedout_minions.union(timedout_minions)
+ self.active = self.active.difference(self.timedout_minions)
+ running = batch_minions.difference(self.done_minions).difference(
+ self.timedout_minions
+ )
+ if timedout_minions:
+ self.event.io_loop.call_later(self.batch_delay, self.schedule_next)
if running:
self.event.io_loop.add_callback(self.find_job, running)
@@ -193,7 +184,7 @@ class BatchAsync:
metadata=self.metadata,
**self.eauth
)
- self.down_minions = set(ping_return["minions"])
+ self.targeted_minions = set(ping_return["minions"])
@tornado.gen.coroutine
def start_batch(self):
@@ -202,39 +193,48 @@ class BatchAsync:
self.initialized = True
data = {
"available_minions": self.minions,
- "down_minions": self.down_minions,
+ "down_minions": self.targeted_minions.difference(self.minions),
"metadata": self.metadata,
}
self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid))
yield self.schedule_next()
def end_batch(self):
- data = {
- "available_minions": self.minions,
- "down_minions": self.down_minions,
- "done_minions": self.done_minions,
- "timedout_minions": self.timedout_minions,
- "metadata": self.metadata,
- }
- self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
- self.event.remove_event_handler(self.__event_handler)
+ left = self.minions.symmetric_difference(
+ self.done_minions.union(self.timedout_minions)
+ )
+ if not left:
+ data = {
+ "available_minions": self.minions,
+ "down_minions": self.targeted_minions.difference(self.minions),
+ "done_minions": self.done_minions,
+ "timedout_minions": self.timedout_minions,
+ "metadata": self.metadata,
+ }
+ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
+ self.event.remove_event_handler(self.__event_handler)
@tornado.gen.coroutine
def schedule_next(self):
next_batch = self._get_next()
if next_batch:
- yield self.local.run_job_async(
- next_batch,
- self.opts["fun"],
- self.opts["arg"],
- "list",
- raw=self.opts.get("raw", False),
- ret=self.opts.get("return", ""),
- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=self.batch_jid,
- metadata=self.metadata,
- )
- self.event.io_loop.call_later(
- self.opts["timeout"], self.find_job, set(next_batch)
- )
self.active = self.active.union(next_batch)
+ try:
+ yield self.local.run_job_async(
+ next_batch,
+ self.opts["fun"],
+ self.opts["arg"],
+ "list",
+ raw=self.opts.get("raw", False),
+ ret=self.opts.get("return", ""),
+ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=self.batch_jid,
+ metadata=self.metadata,
+ )
+ self.event.io_loop.call_later(
+ self.opts["timeout"], self.find_job, set(next_batch)
+ )
+ except Exception as ex:
+ self.active = self.active.difference(next_batch)
+ else:
+ self.end_batch()
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
index 3f8626a2dd..d6a4bfcf60 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
@@ -68,8 +68,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(
self.batch.local.run_job_async.call_args[0], ("*", "test.ping", [], "glob")
)
- # assert down_minions == all minions matched by tgt
- self.assertEqual(self.batch.down_minions, {"foo", "bar"})
+ # assert targeted_minions == all minions matched by tgt
+ self.assertEqual(self.batch.targeted_minions, {"foo", "bar"})
@tornado.testing.gen_test
def test_batch_start_on_gather_job_timeout(self):
@@ -115,7 +115,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(len(self.batch.schedule_next.mock_calls), 1)
def test_batch_fire_done_event(self):
+ self.batch.targeted_minions = {"foo", "baz", "bar"}
self.batch.minions = {"foo", "bar"}
+ self.batch.done_minions = {"foo"}
+ self.batch.timedout_minions = {"bar"}
self.batch.event = MagicMock()
self.batch.metadata = {"mykey": "myvalue"}
self.batch.end_batch()
@@ -124,9 +127,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
(
{
"available_minions": {"foo", "bar"},
- "done_minions": set(),
- "down_minions": set(),
- "timedout_minions": set(),
+ "done_minions": self.batch.done_minions,
+ "down_minions": {"baz"},
+ "timedout_minions": self.batch.timedout_minions,
"metadata": self.batch.metadata,
},
"salt/batch/1235/done",
@@ -205,7 +208,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch._get_next(), set())
def test_batch__event_handler_ping_return(self):
- self.batch.down_minions = {"foo"}
+ self.batch.targeted_minions = {"foo"}
self.batch.event = MagicMock(
unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
)
@@ -216,7 +219,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch.done_minions, set())
def test_batch__event_handler_call_start_batch_when_all_pings_return(self):
- self.batch.down_minions = {"foo"}
+ self.batch.targeted_minions = {"foo"}
self.batch.event = MagicMock(
unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
)
@@ -228,7 +231,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
)
def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self):
- self.batch.down_minions = {"foo", "bar"}
+ self.batch.targeted_minions = {"foo", "bar"}
self.batch.event = MagicMock(
unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
)
@@ -259,23 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch.find_job_returned, {"foo"})
@tornado.testing.gen_test
- def test_batch__event_handler_end_batch(self):
- self.batch.event = MagicMock(
- unpack=MagicMock(
- return_value=("salt/job/not-my-jid/ret/foo", {"id": "foo"})
- )
- )
- future = tornado.gen.Future()
- future.set_result({"minions": ["foo", "bar", "baz"]})
- self.batch.local.run_job_async.return_value = future
- self.batch.start()
- self.batch.initialized = True
- self.assertEqual(self.batch.down_minions, {"foo", "bar", "baz"})
+ def test_batch_schedule_next_end_batch_when_no_next(self):
self.batch.end_batch = MagicMock()
- self.batch.minions = {"foo", "bar", "baz"}
- self.batch.done_minions = {"foo", "bar"}
- self.batch.timedout_minions = {"baz"}
- self.batch._BatchAsync__event_handler(MagicMock())
+ self.batch._get_next = MagicMock(return_value={})
+ self.batch.schedule_next()
self.assertEqual(len(self.batch.end_batch.mock_calls), 1)
@tornado.testing.gen_test
--
2.29.2

View File

@ -1,32 +0,0 @@
From 5a83801b7733f09c35a7ff0abb5aa32d4c857e4b Mon Sep 17 00:00:00 2001
From: Mihai Dinca <mdinca@suse.de>
Date: Tue, 3 Dec 2019 11:22:42 +0100
Subject: [PATCH] Fix batch_async obsolete test
---
tests/unit/cli/test_batch_async.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
index c18b42be57..b04965268a 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
@@ -134,7 +134,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
"salt/batch/1235/done",
),
)
- self.assertEqual(len(self.batch.event.remove_event_handler.mock_calls), 1)
+
+ def test_batch__del__(self):
+ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
+ event = MagicMock()
+ batch.event = event
+ batch.__del__()
+ self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
@tornado.testing.gen_test
def test_batch_next(self):
--
2.29.2

View File

@ -1,34 +1,17 @@
From 1b9a160f578cf446f5ae622a450d23022e7e3ca5 Mon Sep 17 00:00:00 2001
From 22fe4809712dbc59ba2d8c3c2045f531f81bc517 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Thu, 14 Dec 2017 16:21:40 +0100
Subject: [PATCH] Fix bsc#1065792
---
salt/states/service.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
salt/states/service.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/salt/states/service.py b/salt/states/service.py
index d19c245756..4ea36a78f6 100644
index 536e64a430..27595f7703 100644
--- a/salt/states/service.py
+++ b/salt/states/service.py
@@ -56,16 +56,12 @@ set the reload value to True:
:ref:`Requisites <requisites>` documentation.
"""
-# Import Python libs
import time
-# Import Salt libs
import salt.utils.data
import salt.utils.platform
from salt.exceptions import CommandExecutionError
-
-# Import 3rd-party libs
from salt.utils.args import get_function_argspec as _argspec
from salt.utils.systemd import booted
@@ -79,6 +75,7 @@ def __virtual__():
@@ -78,6 +78,7 @@ def __virtual__():
Only make these states available if a service provider has been detected or
assigned for this minion
"""
@ -37,6 +20,6 @@ index d19c245756..4ea36a78f6 100644
return __virtualname__
else:
--
2.29.2
2.33.0

View File

@ -1,120 +0,0 @@
From bc7acab857b952353a959339b06c79d851a9d879 Mon Sep 17 00:00:00 2001
From: "Daniel A. Wozniak" <dwozniak@saltstack.com>
Date: Wed, 16 Sep 2020 00:25:10 +0000
Subject: [PATCH] Fix CVE-2020-25592 and add tests (bsc#1178319)
Properly validate eauth credentials and tokens on SSH calls made by Salt API
(bsc#1178319) (bsc#1178362) (bsc#1178361) (CVE-2020-25592) (CVE-2020-17490) (CVE-2020-16846)
---
salt/netapi/__init__.py | 43 +++++++++++++++++++++++++
tests/integration/netapi/test_client.py | 13 ++++++--
2 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/salt/netapi/__init__.py b/salt/netapi/__init__.py
index dec19b37ef..cba1ec574f 100644
--- a/salt/netapi/__init__.py
+++ b/salt/netapi/__init__.py
@@ -109,6 +109,49 @@ class NetapiClient:
"Authorization error occurred."
)
+ def _prep_auth_info(self, clear_load):
+ sensitive_load_keys = []
+ key = None
+ if "token" in clear_load:
+ auth_type = "token"
+ err_name = "TokenAuthenticationError"
+ sensitive_load_keys = ["token"]
+ return auth_type, err_name, key, sensitive_load_keys
+ elif "eauth" in clear_load:
+ auth_type = "eauth"
+ err_name = "EauthAuthenticationError"
+ sensitive_load_keys = ["username", "password"]
+ return auth_type, err_name, key, sensitive_load_keys
+ raise salt.exceptions.EauthAuthenticationError(
+ "No authentication credentials given"
+ )
+
+ def _authorize_ssh(self, low):
+ auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(low)
+ auth_check = self.loadauth.check_authentication(low, auth_type, key=key)
+ auth_list = auth_check.get("auth_list", [])
+ error = auth_check.get("error")
+ if error:
+ raise salt.exceptions.EauthAuthenticationError(error)
+ delimiter = low.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
+ _res = self.ckminions.check_minions(
+ low["tgt"], low.get("tgt_type", "glob"), delimiter
+ )
+ minions = _res.get("minions", list())
+ missing = _res.get("missing", list())
+ authorized = self.ckminions.auth_check(
+ auth_list,
+ low["fun"],
+ low.get("arg", []),
+ low["tgt"],
+ low.get("tgt_type", "glob"),
+ minions=minions,
+ )
+ if not authorized:
+ raise salt.exceptions.EauthAuthenticationError(
+ "Authorization error occurred."
+ )
+
def run(self, low):
"""
Execute the specified function in the specified client by passing the
diff --git a/tests/integration/netapi/test_client.py b/tests/integration/netapi/test_client.py
index 70471d3148..9eb6e52920 100644
--- a/tests/integration/netapi/test_client.py
+++ b/tests/integration/netapi/test_client.py
@@ -15,10 +15,12 @@ from tests.support.helpers import (
SKIP_IF_NOT_RUNNING_PYTEST,
SaveRequestsPostHandler,
Webserver,
+ requires_sshd_server,
slowTest,
)
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import patch
+from tests.support.paths import TMP, TMP_CONF_DIR
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
@@ -178,7 +180,12 @@ class NetapiSSHClientTest(SSHCase):
"""
opts = AdaptedConfigurationTestCaseMixin.get_config("client_config").copy()
self.netapi = salt.netapi.NetapiClient(opts)
- self.priv_file = os.path.join(RUNTIME_VARS.TMP_SSH_CONF_DIR, "client_key")
+ opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, "master"))
+ naopts = copy.deepcopy(opts)
+ naopts["ignore_host_keys"] = True
+ self.netapi = salt.netapi.NetapiClient(naopts)
+
+ self.priv_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test")
self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR)
self.roster_file = os.path.join(self.rosters, "roster")
@@ -325,7 +332,7 @@ class NetapiSSHClientTest(SSHCase):
"roster": "cache",
"client": "ssh",
"tgt": "root|id>{} #@127.0.0.1".format(path),
- "roster_file": self.roster_file,
+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
"rosters": "/",
"fun": "test.ping",
"eauth": "auto",
@@ -355,7 +362,7 @@ class NetapiSSHClientTest(SSHCase):
"eauth": "auto",
"username": "saltdev_auto",
"password": "saltdev",
- "roster_file": self.roster_file,
+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster",
"rosters": "/",
"ssh_options": ["|id>{} #".format(path), "lol"],
}
--
2.29.2

View File

@ -1,40 +0,0 @@
From b7d11d8caf3eb4fb39a070201be87bb1b3abd525 Mon Sep 17 00:00:00 2001
From: Vladimir Nadvornik <nadvornik@suse.cz>
Date: Wed, 11 Aug 2021 12:19:09 +0200
Subject: [PATCH] Fix error handling in openscap module (bsc#1188647)
(#409)
---
salt/modules/openscap.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py
index f75e1c5e6b..216fd89eef 100644
--- a/salt/modules/openscap.py
+++ b/salt/modules/openscap.py
@@ -153,7 +153,9 @@ def xccdf_eval(xccdffile, ovalfiles=None, **kwargs):
tempdir = tempfile.mkdtemp()
proc = Popen(cmd_opts, stdout=PIPE, stderr=PIPE, cwd=tempdir)
(stdoutdata, error) = proc.communicate()
- success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
+ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False)
+ if proc.returncode < 0:
+ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii')
returncode = proc.returncode
if success:
__salt__["cp.push_dir"](tempdir)
@@ -202,7 +204,9 @@ def xccdf(params):
tempdir = tempfile.mkdtemp()
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
(stdoutdata, error) = proc.communicate()
- success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
+ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False)
+ if proc.returncode < 0:
+ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii')
returncode = proc.returncode
if success:
__salt__["cp.push_dir"](tempdir)
--
2.32.0

View File

@ -1,4 +1,4 @@
From 30a2c8c042f0fe57253a8ab47220d897bc89bd17 Mon Sep 17 00:00:00 2001
From 9413059223107924c6594e6c72e50fcbcc441e60 Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
Date: Thu, 24 Jun 2021 13:17:13 +0300
Subject: [PATCH] Fix exception in yumpkg.remove for not installed
@ -6,14 +6,14 @@ Subject: [PATCH] Fix exception in yumpkg.remove for not installed
---
salt/modules/yumpkg.py | 2 ++
tests/unit/modules/test_yumpkg.py | 25 +++++++++++++++++++++++++
2 files changed, 27 insertions(+)
tests/pytests/unit/modules/test_yumpkg.py | 37 +++++++++++++++++++++++
2 files changed, 39 insertions(+)
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
index 0fb41a0400..c9f9f2c2d3 100644
index dd81c6f1e9..273f0fb370 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
@@ -2051,6 +2051,8 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
@@ -2087,6 +2087,8 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
old = list_pkgs()
targets = []
for target in pkg_params:
@ -22,21 +22,20 @@ index 0fb41a0400..c9f9f2c2d3 100644
version_to_remove = pkg_params[target]
installed_versions = old[target].split(",")
diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py
index e22c0b9251..373d2e09cb 100644
--- a/tests/unit/modules/test_yumpkg.py
+++ b/tests/unit/modules/test_yumpkg.py
@@ -1099,6 +1099,31 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
diff --git a/tests/pytests/unit/modules/test_yumpkg.py b/tests/pytests/unit/modules/test_yumpkg.py
index 7e3ed517ea..b5572db123 100644
--- a/tests/pytests/unit/modules/test_yumpkg.py
+++ b/tests/pytests/unit/modules/test_yumpkg.py
@@ -1219,6 +1219,43 @@ def test_install_error_reporting():
assert exc_info.value.info == expected, exc_info.value.info
+ def test_remove_not_existing(self):
+def test_remove_not_installed():
+ """
+ Test if no exception on removing not installed package
+ Tests that no exception raised on removing not installed package
+ """
+ name = "foo"
+ def list_pkgs_mock():
+ return {}
+ list_pkgs_mock = MagicMock(return_value={})
+ cmd_mock = MagicMock(
+ return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
+ )
@ -51,14 +50,27 @@ index e22c0b9251..373d2e09cb 100644
+ "salt.utils.systemd.has_scope", MagicMock(return_value=False)
+ ), patch.dict(yumpkg.__salt__, salt_mock):
+
+ with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}):
+ # Test yum
+ with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict(
+ yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}
+ ):
+ yumpkg.remove(name)
+ cmd_mock.assert_not_called()
+
def test_install_with_epoch(self):
"""
Tests that we properly identify a version containing an epoch as an
+ # Test dnf
+ yumpkg.__context__.pop("yum_bin")
+ cmd_mock.reset_mock()
+ with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
+ yumpkg.__grains__, {"os": "Fedora", "osrelease": 27}
+ ):
+ yumpkg.remove(name)
+ cmd_mock.assert_not_called()
+
+
def test_upgrade_with_options():
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
--
2.32.0
2.33.0

View File

@ -1,196 +0,0 @@
From 3b96edd8d23c65c6788a9980114a7e1c220c9640 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Fri, 4 Oct 2019 15:00:50 +0100
Subject: [PATCH] Fix failing unit tests for batch async
---
salt/cli/batch_async.py | 2 +-
tests/unit/cli/test_batch_async.py | 66 +++++++++++++++++-------------
2 files changed, 39 insertions(+), 29 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index 89405ba917..b2d04f9d4d 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -91,7 +91,7 @@ class BatchAsync:
keep_loop=True,
)
self.scheduled = False
- self.patterns = {}
+ self.patterns = set()
def __set_event_handler(self):
ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
index 66332a548a..c18b42be57 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
@@ -61,8 +61,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
ret = self.batch.start()
# assert start_batch is called later with batch_presence_ping_timeout as param
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
- (self.batch.batch_presence_ping_timeout, self.batch.start_batch),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
+ (self.batch.start_batch,),
)
# assert test.ping called
self.assertEqual(
@@ -81,8 +81,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
ret = self.batch.start()
# assert start_batch is called later with gather_job_timeout as param
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
- (self.batch.opts["gather_job_timeout"], self.batch.start_batch),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
+ (self.batch.start_batch,),
)
def test_batch_fire_start_event(self):
@@ -107,12 +107,11 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
def test_start_batch_calls_next(self):
self.batch.run_next = MagicMock(return_value=MagicMock())
self.batch.event = MagicMock()
- future = tornado.gen.Future()
- future.set_result(None)
- self.batch.run_next = MagicMock(return_value=future)
self.batch.start_batch()
self.assertEqual(self.batch.initialized, True)
- self.assertEqual(len(self.batch.run_next.mock_calls), 1)
+ self.assertEqual(
+ self.batch.event.io_loop.spawn_callback.call_args[0], (self.batch.run_next,)
+ )
def test_batch_fire_done_event(self):
self.batch.targeted_minions = {"foo", "baz", "bar"}
@@ -147,14 +146,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
future.set_result({"minions": ["foo", "bar"]})
self.batch.local.run_job_async.return_value = future
- ret = self.batch.run_next().result()
+ self.batch.run_next()
self.assertEqual(
self.batch.local.run_job_async.call_args[0],
({"foo", "bar"}, "my.fun", [], "list"),
)
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
- (self.batch.opts["timeout"], self.batch.find_job, {"foo", "bar"}),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
+ (self.batch.find_job, {"foo", "bar"}),
)
self.assertEqual(self.batch.active, {"bar", "foo"})
@@ -249,15 +248,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.assertEqual(self.batch.active, set())
self.assertEqual(self.batch.done_minions, {"foo"})
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
- (self.batch.batch_delay, self.batch.run_next),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
+ (self.batch.schedule_next,),
)
def test_batch__event_handler_find_job_return(self):
self.batch.event = MagicMock(
- unpack=MagicMock(return_value=("salt/job/1236/ret/foo", {"id": "foo"}))
+ unpack=MagicMock(
+ return_value=(
+ "salt/job/1236/ret/foo",
+ {"id": "foo", "return": "deadbeaf"},
+ )
+ )
)
self.batch.start()
+ self.batch.patterns.add(("salt/job/1236/ret/*", "find_job_return"))
self.batch._BatchAsync__event_handler(MagicMock())
self.assertEqual(self.batch.find_job_returned, {"foo"})
@@ -274,14 +279,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
+ self.batch.minions = {"foo", "bar"}
+ self.batch.jid_gen = MagicMock(return_value="1234")
+ tornado.gen.sleep = MagicMock(return_value=future)
self.batch.find_job({"foo", "bar"})
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
- (
- self.batch.opts["gather_job_timeout"],
- self.batch.check_find_job,
- {"foo", "bar"},
- ),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
+ (self.batch.check_find_job, {"foo", "bar"}, "1234"),
)
@tornado.testing.gen_test
@@ -291,17 +295,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
future = tornado.gen.Future()
future.set_result({})
self.batch.local.run_job_async.return_value = future
+ self.batch.minions = {"foo", "bar"}
+ self.batch.jid_gen = MagicMock(return_value="1234")
+ tornado.gen.sleep = MagicMock(return_value=future)
self.batch.find_job({"foo", "bar"})
self.assertEqual(
- self.batch.event.io_loop.call_later.call_args[0],
- (self.batch.opts["gather_job_timeout"], self.batch.check_find_job, {"foo"}),
+ self.batch.event.io_loop.spawn_callback.call_args[0],
+ (self.batch.check_find_job, {"foo"}, "1234"),
)
def test_batch_check_find_job_did_not_return(self):
self.batch.event = MagicMock()
self.batch.active = {"foo"}
self.batch.find_job_returned = set()
- self.batch.check_find_job({"foo"})
+ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
+ self.batch.check_find_job({"foo"}, jid="1234")
self.assertEqual(self.batch.find_job_returned, set())
self.assertEqual(self.batch.active, set())
self.assertEqual(len(self.batch.event.io_loop.add_callback.mock_calls), 0)
@@ -309,9 +317,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
def test_batch_check_find_job_did_return(self):
self.batch.event = MagicMock()
self.batch.find_job_returned = {"foo"}
- self.batch.check_find_job({"foo"})
+ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
+ self.batch.check_find_job({"foo"}, jid="1234")
self.assertEqual(
- self.batch.event.io_loop.add_callback.call_args[0],
+ self.batch.event.io_loop.spawn_callback.call_args[0],
(self.batch.find_job, {"foo"}),
)
@@ -332,7 +341,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
# both not yet done but only 'foo' responded to find_job
not_done = {"foo", "bar"}
- self.batch.check_find_job(not_done)
+ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
+ self.batch.check_find_job(not_done, jid="1234")
# assert 'bar' removed from active
self.assertEqual(self.batch.active, {"foo"})
@@ -342,7 +352,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
# assert 'find_job' schedueled again only for 'foo'
self.assertEqual(
- self.batch.event.io_loop.add_callback.call_args[0],
+ self.batch.event.io_loop.spawn_callback.call_args[0],
(self.batch.find_job, {"foo"}),
)
@@ -350,4 +360,4 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch.event = MagicMock()
self.batch.scheduled = True
self.batch.schedule_next()
- self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0)
+ self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0)
--
2.29.2

View File

@ -1,80 +0,0 @@
From 74d8f5f2d896e5e8bbf7d3fb614ae32f2cf489a5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Wed, 11 Aug 2021 11:44:54 +0100
Subject: [PATCH] Fix failing unit tests for systemd
---
tests/unit/modules/test_systemd_service.py | 24 ++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/tests/unit/modules/test_systemd_service.py b/tests/unit/modules/test_systemd_service.py
index bbd89bb3d0..51be130d29 100644
--- a/tests/unit/modules/test_systemd_service.py
+++ b/tests/unit/modules/test_systemd_service.py
@@ -165,21 +165,27 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin):
# systemd < 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 230}):
- with patch.object(systemd, "_systemctl_status", mock):
+ with patch.object(systemd, "_systemctl_status", mock), patch.object(
+ systemd, "offline", MagicMock(return_value=False)
+ ):
self.assertTrue(systemd.available("sshd.service"))
self.assertFalse(systemd.available("foo.service"))
# systemd >= 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 231}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
- with patch.object(systemd, "_systemctl_status", mock):
+ with patch.object(systemd, "_systemctl_status", mock), patch.object(
+ systemd, "offline", MagicMock(return_value=False)
+ ):
self.assertTrue(systemd.available("sshd.service"))
self.assertFalse(systemd.available("bar.service"))
# systemd < 231 with retcode/output changes backported (e.g. RHEL 7.3)
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 219}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
- with patch.object(systemd, "_systemctl_status", mock):
+ with patch.object(systemd, "_systemctl_status", mock), patch.object(
+ systemd, "offline", MagicMock(return_value=False)
+ ):
self.assertTrue(systemd.available("sshd.service"))
self.assertFalse(systemd.available("bar.service"))
@@ -191,21 +197,27 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin):
# systemd < 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 230}):
- with patch.object(systemd, "_systemctl_status", mock):
+ with patch.object(systemd, "_systemctl_status", mock), patch.object(
+ systemd, "offline", MagicMock(return_value=False)
+ ):
self.assertFalse(systemd.missing("sshd.service"))
self.assertTrue(systemd.missing("foo.service"))
# systemd >= 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 231}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
- with patch.object(systemd, "_systemctl_status", mock):
+ with patch.object(systemd, "_systemctl_status", mock), patch.object(
+ systemd, "offline", MagicMock(return_value=False)
+ ):
self.assertFalse(systemd.missing("sshd.service"))
self.assertTrue(systemd.missing("bar.service"))
# systemd < 231 with retcode/output changes backported (e.g. RHEL 7.3)
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 219}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
- with patch.object(systemd, "_systemctl_status", mock):
+ with patch.object(systemd, "_systemctl_status", mock), patch.object(
+ systemd, "offline", MagicMock(return_value=False)
+ ):
self.assertFalse(systemd.missing("sshd.service"))
self.assertTrue(systemd.missing("bar.service"))
--
2.32.0

View File

@ -1,31 +0,0 @@
From b4f54187ae7d231250f72244ffd874cc2c846150 Mon Sep 17 00:00:00 2001
From: Jochen Breuer <jbreuer@suse.de>
Date: Thu, 28 Nov 2019 15:23:36 +0100
Subject: [PATCH] Fix for log checking in x509 test
We are logging in debug and not in trace mode here.
---
tests/unit/modules/test_x509.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/unit/modules/test_x509.py b/tests/unit/modules/test_x509.py
index 40aea12272..e7503395eb 100644
--- a/tests/unit/modules/test_x509.py
+++ b/tests/unit/modules/test_x509.py
@@ -127,9 +127,9 @@ class X509TestCase(TestCase, LoaderModuleMockMixin):
subj = FakeSubject()
x509._parse_subject(subj)
- assert x509.log.trace.call_args[0][0] == "Missing attribute '%s'. Error: %s"
- assert x509.log.trace.call_args[0][1] == list(subj.nid.keys())[0]
- assert isinstance(x509.log.trace.call_args[0][2], TypeError)
+ assert x509.log.debug.call_args[0][0] == "Missing attribute '%s'. Error: %s"
+ assert x509.log.debug.call_args[0][1] == list(subj.nid.keys())[0]
+ assert isinstance(x509.log.debug.call_args[0][2], TypeError)
@skipIf(not HAS_M2CRYPTO, "Skipping, M2Crypto is unavailable")
def test_get_pem_entry(self):
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
From 33766e59bd53fac2c75e6ccfa1f363e2f7b1b65f Mon Sep 17 00:00:00 2001
From: Jochen Breuer <jbreuer@suse.de>
Date: Mon, 16 Mar 2020 15:25:42 +0100
Subject: [PATCH] Fix for temp folder definition in loader unit test
---
tests/unit/test_loader.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py
index 863e2182b9..5b23ad83e3 100644
--- a/tests/unit/test_loader.py
+++ b/tests/unit/test_loader.py
@@ -240,12 +240,12 @@ class LazyLoaderUtilsTest(TestCase):
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts["grains"] = salt.loader.grains(cls.opts)
- if not os.path.isdir(TMP):
- os.makedirs(TMP)
+ if not os.path.isdir(RUNTIME_VARS.TMP):
+ os.makedirs(RUNTIME_VARS.TMP)
def setUp(self):
# Setup the module
- self.module_dir = tempfile.mkdtemp(dir=TMP)
+ self.module_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.module_file = os.path.join(
self.module_dir, "{}.py".format(self.module_name)
)
@@ -254,7 +254,7 @@ class LazyLoaderUtilsTest(TestCase):
fh.flush()
os.fsync(fh.fileno())
- self.utils_dir = tempfile.mkdtemp(dir=TMP)
+ self.utils_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.utils_file = os.path.join(self.utils_dir, "{}.py".format(self.utils_name))
with salt.utils.files.fopen(self.utils_file, "w") as fh:
fh.write(salt.utils.stringutils.to_str(loader_template_utils))
--
2.29.2

View File

@ -1,86 +0,0 @@
From f5c9527aeee190a66a908037770c80a75e911d8c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Tue, 6 Nov 2018 16:38:54 +0000
Subject: [PATCH] Fix git_pillar merging across multiple __env__
repositories (bsc#1112874)
Resolve target branch when using __env__
Test git ext_pillar across multiple repos using __env__
Remove unicode references
---
tests/integration/pillar/test_git_pillar.py | 55 +++++++++++++++++++++
1 file changed, 55 insertions(+)
diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py
index c0362127f6..979dfebb94 100644
--- a/tests/integration/pillar/test_git_pillar.py
+++ b/tests/integration/pillar/test_git_pillar.py
@@ -1600,6 +1600,61 @@ class TestPygit2SSH(GitPillarSSHTestBase):
},
)
+
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+@skipIf(_windows_or_mac(), "minion is windows or mac")
+@skip_if_not_root
+@skipIf(
+ not HAS_PYGIT2,
+ "pygit2 >= {} and libgit2 >= {} required".format(PYGIT2_MINVER, LIBGIT2_MINVER),
+)
+@skipIf(not HAS_NGINX, "nginx not present")
+@skipIf(not HAS_VIRTUALENV, "virtualenv not present")
+class TestPygit2HTTP(GitPillarHTTPTestBase):
+ """
+ Test git_pillar with pygit2 using SSH authentication
+ """
+
+ def test_single_source(self):
+ """
+ Test with git_pillar_includes enabled and using "__env__" as the branch
+ name for the configured repositories.
+ The "gitinfo" repository contains top.sls file with a local reference
+ and also referencing external "nowhere.foo" which is provided by "webinfo"
+ repository mounted as "nowhere".
+ """
+ ret = self.get_pillar(
+ """\
+ file_ignore_regex: []
+ file_ignore_glob: []
+ git_pillar_provider: pygit2
+ git_pillar_pubkey: {pubkey_nopass}
+ git_pillar_privkey: {privkey_nopass}
+ cachedir: {cachedir}
+ extension_modules: {extmods}
+ ext_pillar:
+ - git:
+ - __env__ {url_extra_repo}:
+ - name: gitinfo
+ - __env__ {url}:
+ - name: webinfo
+ - mountpoint: nowhere
+ """
+ )
+ self.assertEqual(
+ ret,
+ {
+ "branch": "master",
+ "motd": "The force will be with you. Always.",
+ "mylist": ["master"],
+ "mydict": {
+ "master": True,
+ "nested_list": ["master"],
+ "nested_dict": {"master": True},
+ },
+ },
+ )
+
@slowTest
def test_root_parameter(self):
"""
--
2.29.2

View File

@ -1,43 +0,0 @@
From e2ff2f339ce7938ecdadf867f285a559bc2431dd Mon Sep 17 00:00:00 2001
From: Dominik Gedon <dgedon@suse.de>
Date: Tue, 6 Oct 2020 14:00:55 +0200
Subject: [PATCH] Fix grains.test_core unit test (#277)
This reverts 63b94ae and fixes the grains test_core unit test. The
changes are aligned with upstream.
---
tests/unit/grains/test_core.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 918a9155cb..15de4e363e 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -60,11 +60,10 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
with salt.utils.files.fopen(
os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")
) as os_release_file:
- os_release_content = os_release_file.readlines()
- with patch("salt.utils.files.fopen", mock_open()) as os_release_file:
- os_release_file.return_value.__iter__.return_value = os_release_content
+ os_release_content = os_release_file.read()
+ with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)):
os_release = core._parse_os_release(
- ["/etc/os-release", "/usr/lib/os-release"]
+ "/etc/os-release", "/usr/lib/os-release"
)
self.assertEqual(
os_release,
@@ -174,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
def test_missing_os_release(self):
with patch("salt.utils.files.fopen", mock_open(read_data={})):
os_release = core._parse_os_release(
- ["/etc/os-release", "/usr/lib/os-release"]
+ "/etc/os-release", "/usr/lib/os-release"
)
self.assertEqual(os_release, {})
--
2.29.2

View File

@ -1,164 +0,0 @@
From 082bb6a25b2b025a5c7c6fdbf7dbcbe64a39da2c Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Fri, 28 Sep 2018 15:22:33 +0200
Subject: [PATCH] Fix IPv6 scope (bsc#1108557)
Fix ipaddress imports
Remove unused import
Fix ipaddress import
Fix unicode imports in compat
Override standard IPv6Address class
Check version via object
Isolate Py2 and Py3 mode
Add logging
Add debugging to the ip_address method (py2 and py3)
Remove multiple returns and add check for address syntax
Remove unnecessary variable for import detection
Remove duplicated code
Remove unnecessary operator
Remove multiple returns
Use ternary operator instead
Remove duplicated code
Move docstrings to their native places
Add real exception message
Add logging to the ip_interface
Add scope on str
Lintfix: mute not called constructors
Add extra detection for hexadecimal packed bytes on Python2. This cannot be detected with type comparison, because bytes == str and at the same time bytes != str if compatibility is not around
Fix py2 case where the same class cannot initialise itself on Python2 via super.
Simplify checking clause
Do not use introspection for method swap
Fix wrong type swap
Add Py3.4 old implementation's fix
Lintfix
Lintfix refactor: remove duplicate returns as not needed
Revert method remapping with pylint updates
Remove unnecessary manipulation with IPv6 scope outside of the IPv6Address object instance
Lintfix: W0611
Reverse skipping tests: if no ipaddress
---
salt/_compat.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 74 insertions(+)
diff --git a/salt/_compat.py b/salt/_compat.py
index 011eb8af9e..d9425523cf 100644
--- a/salt/_compat.py
+++ b/salt/_compat.py
@@ -242,7 +242,81 @@ class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped):
self.hostmask = self.network.hostmask
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return ipaddress.IPv4Address(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+ log.debug('Error while parsing IPv4 address: %s', address)
+ log.debug(err)
+
+ try:
+ return IPv6AddressScoped(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+ log.debug('Error while parsing IPv6 address: %s', address)
+ log.debug(err)
+
+ if isinstance(address, bytes):
+ raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. '
+ 'Did you pass in a bytes (str in Python 2) instead '
+ 'of a unicode object?'.format(repr(address)))
+
+ raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address)))
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return ipaddress.IPv4Interface(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+ log.debug('Error while getting IPv4 interface for address %s', address)
+ log.debug(err)
+
+ try:
+ return ipaddress.IPv6Interface(address)
+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
+ log.debug('Error while getting IPv6 interface for address %s', address)
+ log.debug(err)
+
+ raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address))
+
+
if ipaddress:
ipaddress.IPv6Address = IPv6AddressScoped
if sys.version_info.major == 2:
ipaddress.IPv6Interface = IPv6InterfaceScoped
+ ipaddress.ip_address = ip_address
+ ipaddress.ip_interface = ip_interface
--
2.29.2

View File

@ -1,44 +0,0 @@
From cc017f6ed279af7fe02c890e4a7725e6903f364d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Mon, 26 Apr 2021 12:13:59 +0100
Subject: [PATCH] Fix issue parsing errors in ansiblegate state module
---
salt/states/ansiblegate.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/salt/states/ansiblegate.py b/salt/states/ansiblegate.py
index 5daba0f37f..bd00653928 100644
--- a/salt/states/ansiblegate.py
+++ b/salt/states/ansiblegate.py
@@ -183,7 +183,11 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=
checks = __salt__["ansible.playbooks"](
name, rundir=rundir, check=True, diff=True, **ansible_kwargs
)
- if all(
+ if "stats" not in checks:
+ ret["comment"] = checks.get("stderr", checks)
+ ret["result"] = False
+ ret["changes"] = {}
+ elif all(
not check["changed"]
and not check["failures"]
and not check["unreachable"]
@@ -212,7 +216,11 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=
results = __salt__["ansible.playbooks"](
name, rundir=rundir, diff=True, **ansible_kwargs
)
- if all(
+ if "stats" not in results:
+ ret["comment"] = results.get("stderr", results)
+ ret["result"] = False
+ ret["changes"] = {}
+ elif all(
not check["changed"]
and not check["failures"]
and not check["unreachable"]
--
2.31.1

View File

@ -1,189 +0,0 @@
From 00c538383e463febba492e74577ae64be80d4d00 Mon Sep 17 00:00:00 2001
From: Mihai Dinca <mdinca@suse.de>
Date: Mon, 16 Sep 2019 11:27:30 +0200
Subject: [PATCH] Fix memory leak produced by batch async find_jobs
mechanism (bsc#1140912)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Multiple fixes:
- use different JIDs per find_job
- fix bug in detection of find_job returns
- fix timeout passed from request payload
- better cleanup at the end of batching
Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
---
salt/cli/batch_async.py | 59 ++++++++++++++++++++++++++++-------------
salt/client/__init__.py | 1 +
salt/master.py | 2 --
3 files changed, 41 insertions(+), 21 deletions(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index 7225491228..388b709416 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -73,6 +73,7 @@ class BatchAsync:
self.done_minions = set()
self.active = set()
self.initialized = False
+ self.jid_gen = jid_gen
self.ping_jid = jid_gen()
self.batch_jid = jid_gen()
self.find_job_jid = jid_gen()
@@ -91,14 +92,11 @@ class BatchAsync:
def __set_event_handler(self):
ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
batch_return_pattern = "salt/job/{}/ret/*".format(self.batch_jid)
- find_job_return_pattern = "salt/job/{}/ret/*".format(self.find_job_jid)
self.event.subscribe(ping_return_pattern, match_type="glob")
self.event.subscribe(batch_return_pattern, match_type="glob")
- self.event.subscribe(find_job_return_pattern, match_type="glob")
- self.event.patterns = {
+ self.patterns = {
(ping_return_pattern, "ping_return"),
(batch_return_pattern, "batch_run"),
- (find_job_return_pattern, "find_job_return"),
}
self.event.set_event_handler(self.__event_handler)
@@ -106,7 +104,7 @@ class BatchAsync:
if not self.event:
return
mtag, data = self.event.unpack(raw, self.event.serial)
- for (pattern, op) in self.event.patterns:
+ for (pattern, op) in self.patterns:
if fnmatch.fnmatch(mtag, pattern):
minion = data["id"]
if op == "ping_return":
@@ -114,7 +112,8 @@ class BatchAsync:
if self.targeted_minions == self.minions:
self.event.io_loop.spawn_callback(self.start_batch)
elif op == "find_job_return":
- self.find_job_returned.add(minion)
+ if data.get("return", None):
+ self.find_job_returned.add(minion)
elif op == "batch_run":
if minion in self.active:
self.active.remove(minion)
@@ -134,7 +133,11 @@ class BatchAsync:
return set(list(to_run)[:next_batch_size])
@tornado.gen.coroutine
- def check_find_job(self, batch_minions):
+ def check_find_job(self, batch_minions, jid):
+ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
+ self.event.unsubscribe(find_job_return_pattern, match_type="glob")
+ self.patterns.remove((find_job_return_pattern, "find_job_return"))
+
timedout_minions = batch_minions.difference(self.find_job_returned).difference(
self.done_minions
)
@@ -143,27 +146,39 @@ class BatchAsync:
running = batch_minions.difference(self.done_minions).difference(
self.timedout_minions
)
+
if timedout_minions:
self.schedule_next()
+
if running:
+ self.find_job_returned = self.find_job_returned.difference(running)
self.event.io_loop.add_callback(self.find_job, running)
@tornado.gen.coroutine
def find_job(self, minions):
- not_done = minions.difference(self.done_minions)
- ping_return = yield self.local.run_job_async(
- not_done,
- "saltutil.find_job",
- [self.batch_jid],
- "list",
- gather_job_timeout=self.opts["gather_job_timeout"],
- jid=self.find_job_jid,
- **self.eauth
- )
- self.event.io_loop.call_later(
- self.opts["gather_job_timeout"], self.check_find_job, not_done
+ not_done = minions.difference(self.done_minions).difference(
+ self.timedout_minions
)
+ if not_done:
+ jid = self.jid_gen()
+ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
+ self.patterns.add((find_job_return_pattern, "find_job_return"))
+ self.event.subscribe(find_job_return_pattern, match_type="glob")
+
+ ret = yield self.local.run_job_async(
+ not_done,
+ "saltutil.find_job",
+ [self.batch_jid],
+ "list",
+ gather_job_timeout=self.opts["gather_job_timeout"],
+ jid=jid,
+ **self.eauth
+ )
+ self.event.io_loop.call_later(
+ self.opts["gather_job_timeout"], self.check_find_job, not_done, jid
+ )
+
@tornado.gen.coroutine
def start(self):
self.__set_event_handler()
@@ -211,6 +226,9 @@ class BatchAsync:
}
self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
self.event.remove_event_handler(self.__event_handler)
+ for (pattern, label) in self.patterns:
+ if label in ["ping_return", "batch_run"]:
+ self.event.unsubscribe(pattern, match_type="glob")
def schedule_next(self):
if not self.scheduled:
@@ -235,11 +253,14 @@ class BatchAsync:
jid=self.batch_jid,
metadata=self.metadata,
)
+
self.event.io_loop.call_later(
self.opts["timeout"], self.find_job, set(next_batch)
)
except Exception as ex:
+ log.error("Error in scheduling next batch: %s", ex)
self.active = self.active.difference(next_batch)
else:
self.end_batch()
self.scheduled = False
+ yield
diff --git a/salt/client/__init__.py b/salt/client/__init__.py
index 1e9f11df4c..cc8fd4048d 100644
--- a/salt/client/__init__.py
+++ b/salt/client/__init__.py
@@ -1776,6 +1776,7 @@ class LocalClient:
"key": self.key,
"tgt_type": tgt_type,
"ret": ret,
+ "timeout": timeout,
"jid": jid,
}
diff --git a/salt/master.py b/salt/master.py
index b9bc1a7a67..7a99af357a 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -2232,8 +2232,6 @@ class ClearFuncs(TransportMethods):
def publish_batch(self, clear_load, minions, missing):
batch_load = {}
batch_load.update(clear_load)
- import salt.cli.batch_async
-
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
--
2.29.2

View File

@ -1,191 +0,0 @@
From 4123cf7b9428af1442f4aa0a54489e5c0deb4aaa Mon Sep 17 00:00:00 2001
From: Martin Seidl <mseidl@suse.de>
Date: Tue, 27 Oct 2020 16:12:29 +0100
Subject: [PATCH] Fix novendorchange option (#284)
* Fixed novendorchange handling in zypperpkg
* refactor handling of novendorchange and fix tests
---
salt/modules/zypperpkg.py | 21 +++---
tests/unit/modules/test_zypperpkg.py | 100 ++++++++++++++++++++++++++-
2 files changed, 108 insertions(+), 13 deletions(-)
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index 5369a0342e..d06c265202 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -1707,7 +1707,7 @@ def upgrade(
dryrun=False,
dist_upgrade=False,
fromrepo=None,
- novendorchange=False,
+ novendorchange=True,
skip_verify=False,
no_recommends=False,
root=None,
@@ -1794,19 +1794,18 @@ def upgrade(
log.info("Targeting repos: %s", fromrepo)
if dist_upgrade:
- if novendorchange:
- # TODO: Grains validation should be moved to Zypper class
- if __grains__["osrelease_info"][0] > 11:
+ # TODO: Grains validation should be moved to Zypper class
+ if __grains__["osrelease_info"][0] > 11:
+ if novendorchange:
cmd_update.append("--no-allow-vendor-change")
log.info("Disabling vendor changes")
else:
- log.warning(
- "Disabling vendor changes is not supported on this Zypper version"
- )
-
- if no_recommends:
- cmd_update.append("--no-recommends")
- log.info("Disabling recommendations")
+ cmd_update.append("--allow-vendor-change")
+ log.info("Enabling vendor changes")
+ else:
+ log.warning(
+ "Enabling/Disabling vendor changes is not supported on this Zypper version"
+ )
if no_recommends:
cmd_update.append("--no-recommends")
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index a60b1546c6..eaa4d9a76a 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -642,7 +642,9 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
ret = zypper.upgrade(dist_upgrade=True)
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
zypper_mock.assert_any_call(
- "dist-upgrade", "--auto-agree-with-licenses"
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--no-allow-vendor-change",
)
with patch(
@@ -660,6 +662,76 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"--debug-solver",
)
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
+ ):
+ ret = zypper.upgrade(
+ dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False
+ )
+ zypper_mock.assert_any_call(
+ "update",
+ "--auto-agree-with-licenses",
+ "--repo",
+ "Dummy",
+ "--repo",
+ "Dummy2",
+ )
+
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
+ ):
+ ret = zypper.upgrade(
+ dist_upgrade=True,
+ fromrepo=["Dummy", "Dummy2"],
+ novendorchange=True,
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--no-allow-vendor-change",
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--no-allow-vendor-change",
+ )
+
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
+ ):
+ ret = zypper.upgrade(
+ dist_upgrade=True,
+ dryrun=True,
+ fromrepo=["Dummy", "Dummy2"],
+ novendorchange=False,
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--from",
+ "Dummy",
+ "--from",
+ "Dummy2",
+ "--allow-vendor-change",
+ )
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--dry-run",
+ "--from",
+ "Dummy",
+ "--from",
+ "Dummy2",
+ "--allow-vendor-change",
+ "--debug-solver",
+ )
+
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
@@ -728,6 +800,26 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
"--no-allow-vendor-change",
)
+ with patch(
+ "salt.modules.zypperpkg.list_pkgs",
+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
+ ):
+ ret = zypper.upgrade(
+ dist_upgrade=True,
+ fromrepo=["Dummy", "Dummy2"],
+ novendorchange=False,
+ )
+ self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
+ zypper_mock.assert_any_call(
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--from",
+ "Dummy",
+ "--from",
+ "Dummy2",
+ "--allow-vendor-change",
+ )
+
def test_upgrade_kernel(self):
"""
Test kernel package upgrade success.
@@ -815,7 +907,11 @@ Repository 'DUMMY' not found by its alias, number, or URI.
self.assertEqual(cmd_exc.exception.info["changes"], {})
self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out)
zypper_mock.noraise.call.assert_called_with(
- "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY"
+ "dist-upgrade",
+ "--auto-agree-with-licenses",
+ "--from",
+ "DUMMY",
+ "--no-allow-vendor-change",
)
def test_upgrade_available(self):
--
2.29.2

View File

@ -1,287 +0,0 @@
From 828ca76e2083d87ace12b488277e51d4e30c8c9a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Thu, 21 Jan 2021 11:19:07 +0000
Subject: [PATCH] Fix onlyif/unless when multiple conditions
(bsc#1180818)
Add unit tests to ensure right onlyif/unless behavior
Add extra unit test to cover valid cases
Add unit tests cases to ensure proper onlyif/unless behavior
Change tests to use 'exit' cmd and work outside Linux
---
salt/state.py | 20 ++++--
tests/unit/test_state.py | 148 ++++++++++++++++++++++++++++++++++++++-
2 files changed, 163 insertions(+), 5 deletions(-)
diff --git a/salt/state.py b/salt/state.py
index cc6db7e1b2..070a914636 100644
--- a/salt/state.py
+++ b/salt/state.py
@@ -947,8 +947,10 @@ class State:
"result": True,
}
)
+ return False
elif cmd == 0:
ret.update({"comment": "onlyif condition is true", "result": False})
+ return True
for entry in low_data_onlyif:
if isinstance(entry, str):
@@ -960,7 +962,8 @@ class State:
# Command failed, notify onlyif to skip running the item
cmd = 100
log.debug("Last command return code: %s", cmd)
- _check_cmd(cmd)
+ if not _check_cmd(cmd):
+ return ret
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in onlyif: {}".format(entry)
@@ -972,7 +975,8 @@ class State:
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
- _check_cmd(self.state_con["retcode"])
+ if not _check_cmd(self.state_con["retcode"]):
+ return ret
elif not result:
ret.update(
{
@@ -981,6 +985,7 @@ class State:
"result": True,
}
)
+ return ret
else:
ret.update({"comment": "onlyif condition is true", "result": False})
@@ -991,6 +996,7 @@ class State:
"result": False,
}
)
+ return ret
return ret
def _run_check_unless(self, low_data, cmd_opts):
@@ -1013,8 +1019,10 @@ class State:
"result": True,
}
)
+ return False
elif cmd != 0:
ret.update({"comment": "unless condition is false", "result": False})
+ return True
for entry in low_data_unless:
if isinstance(entry, str):
@@ -1026,7 +1034,8 @@ class State:
except CommandExecutionError:
# Command failed, so notify unless to skip the item
cmd = 0
- _check_cmd(cmd)
+ if not _check_cmd(cmd):
+ return ret
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in unless: {}".format(entry)
@@ -1038,7 +1047,8 @@ class State:
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
- _check_cmd(self.state_con["retcode"])
+ if not _check_cmd(self.state_con["retcode"]):
+ return ret
elif result:
ret.update(
{
@@ -1047,6 +1057,7 @@ class State:
"result": True,
}
)
+ return ret
else:
ret.update(
{"comment": "unless condition is false", "result": False}
@@ -1058,6 +1069,7 @@ class State:
"result": False,
}
)
+ return ret
# No reason to stop, return ret
return ret
diff --git a/tests/unit/test_state.py b/tests/unit/test_state.py
index b1bcf8fe83..95018a9cf3 100644
--- a/tests/unit/test_state.py
+++ b/tests/unit/test_state.py
@@ -205,6 +205,152 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
)
self.assertEqual(expected_result, return_result)
+ def test_verify_unless_list_cmd(self):
+ low_data = {
+ "state": "cmd",
+ "name": 'echo "something"',
+ "__sls__": "tests.cmd",
+ "__env__": "base",
+ "__id__": "check unless",
+ "unless": ["exit 0", "exit 1"],
+ "order": 10001,
+ "fun": "run",
+ }
+ expected_result = {
+ "comment": "unless condition is true",
+ "result": True,
+ "skip_watch": True,
+ }
+ with patch("salt.state.State._gather_pillar") as state_patch:
+ minion_opts = self.get_temp_config("minion")
+ state_obj = salt.state.State(minion_opts)
+ return_result = state_obj._run_check_unless(low_data, {})
+ self.assertEqual(expected_result, return_result)
+
+ def test_verify_unless_list_cmd_different_order(self):
+ low_data = {
+ "state": "cmd",
+ "name": 'echo "something"',
+ "__sls__": "tests.cmd",
+ "__env__": "base",
+ "__id__": "check unless",
+ "unless": ["exit 1", "exit 0"],
+ "order": 10001,
+ "fun": "run",
+ }
+ expected_result = {
+ "comment": "unless condition is true",
+ "result": True,
+ "skip_watch": True,
+ }
+ with patch("salt.state.State._gather_pillar") as state_patch:
+ minion_opts = self.get_temp_config("minion")
+ state_obj = salt.state.State(minion_opts)
+ return_result = state_obj._run_check_unless(low_data, {})
+ self.assertEqual(expected_result, return_result)
+
+ def test_verify_onlyif_list_cmd_different_order(self):
+ low_data = {
+ "state": "cmd",
+ "name": 'echo "something"',
+ "__sls__": "tests.cmd",
+ "__env__": "base",
+ "__id__": "check onlyif",
+ "onlyif": ["exit 1", "exit 0"],
+ "order": 10001,
+ "fun": "run",
+ }
+ expected_result = {
+ "comment": "onlyif condition is false",
+ "result": True,
+ "skip_watch": True,
+ }
+ with patch("salt.state.State._gather_pillar") as state_patch:
+ minion_opts = self.get_temp_config("minion")
+ state_obj = salt.state.State(minion_opts)
+ return_result = state_obj._run_check_onlyif(low_data, {})
+ self.assertEqual(expected_result, return_result)
+
+ def test_verify_unless_list_cmd_valid(self):
+ low_data = {
+ "state": "cmd",
+ "name": 'echo "something"',
+ "__sls__": "tests.cmd",
+ "__env__": "base",
+ "__id__": "check unless",
+ "unless": ["exit 1", "exit 1"],
+ "order": 10001,
+ "fun": "run",
+ }
+ expected_result = {"comment": "unless condition is false", "result": False}
+ with patch("salt.state.State._gather_pillar") as state_patch:
+ minion_opts = self.get_temp_config("minion")
+ state_obj = salt.state.State(minion_opts)
+ return_result = state_obj._run_check_unless(low_data, {})
+ self.assertEqual(expected_result, return_result)
+
+ def test_verify_onlyif_list_cmd_valid(self):
+ low_data = {
+ "state": "cmd",
+ "name": 'echo "something"',
+ "__sls__": "tests.cmd",
+ "__env__": "base",
+ "__id__": "check onlyif",
+ "onlyif": ["exit 0", "exit 0"],
+ "order": 10001,
+ "fun": "run",
+ }
+ expected_result = {"comment": "onlyif condition is true", "result": False}
+ with patch("salt.state.State._gather_pillar") as state_patch:
+ minion_opts = self.get_temp_config("minion")
+ state_obj = salt.state.State(minion_opts)
+ return_result = state_obj._run_check_onlyif(low_data, {})
+ self.assertEqual(expected_result, return_result)
+
+ def test_verify_unless_list_cmd_invalid(self):
+ low_data = {
+ "state": "cmd",
+ "name": 'echo "something"',
+ "__sls__": "tests.cmd",
+ "__env__": "base",
+ "__id__": "check unless",
+ "unless": ["exit 0", "exit 0"],
+ "order": 10001,
+ "fun": "run",
+ }
+ expected_result = {
+ "comment": "unless condition is true",
+ "result": True,
+ "skip_watch": True,
+ }
+ with patch("salt.state.State._gather_pillar") as state_patch:
+ minion_opts = self.get_temp_config("minion")
+ state_obj = salt.state.State(minion_opts)
+ return_result = state_obj._run_check_unless(low_data, {})
+ self.assertEqual(expected_result, return_result)
+
+ def test_verify_onlyif_list_cmd_invalid(self):
+ low_data = {
+ "state": "cmd",
+ "name": 'echo "something"',
+ "__sls__": "tests.cmd",
+ "__env__": "base",
+ "__id__": "check onlyif",
+ "onlyif": ["exit 1", "exit 1"],
+ "order": 10001,
+ "fun": "run",
+ }
+ expected_result = {
+ "comment": "onlyif condition is false",
+ "result": True,
+ "skip_watch": True,
+ }
+ with patch("salt.state.State._gather_pillar") as state_patch:
+ minion_opts = self.get_temp_config("minion")
+ state_obj = salt.state.State(minion_opts)
+ return_result = state_obj._run_check_onlyif(low_data, {})
+ self.assertEqual(expected_result, return_result)
+
def test_verify_unless_parse(self):
low_data = {
"unless": [{"fun": "test.arg", "args": ["arg1", "arg2"]}],
@@ -376,7 +522,7 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
"__sls__": "tests.cmd",
"__env__": "base",
"__id__": "check onlyif",
- "onlyif": ["/bin/true", "/bin/false"],
+ "onlyif": ["exit 0", "exit 1"],
"order": 10001,
"fun": "run",
}
--
2.29.2

View File

@ -1,29 +0,0 @@
From d8538a57553d94290870671db1d5a4fcd4d7e709 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Fri, 26 Feb 2021 09:15:03 +0000
Subject: [PATCH] Fix regression on cmd.run when passing tuples as cmd
(bsc#1182740)
(cherry picked from commit 9a76246adedb60e24a75682077654a352a965cb9)
---
salt/modules/cmdmod.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py
index bbc303c3f8..f24e7cc9ae 100644
--- a/salt/modules/cmdmod.py
+++ b/salt/modules/cmdmod.py
@@ -78,7 +78,7 @@ def __virtual__():
def _log_cmd(cmd):
- if not isinstance(cmd, list):
+ if isinstance(cmd, str):
return cmd.split()[0].strip()
return cmd[0].strip()
--
2.30.1

View File

@ -1,180 +0,0 @@
From 01e2e60a5aba609d219b73f1018f12517a294a64 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat <cbosdonnat@suse.com>
Date: Tue, 15 Sep 2020 13:46:06 +0200
Subject: [PATCH] Fix the removed six.itermitems and six.*_type* (#262)
* Fix the removed six.itermitems and six.*_type*
Upstream py2 to py3 cleanup tool removes a bunch of six calls that we
still need when backporting since our Salt minion might still be running
on python 2.7.
* fixup! Fix the removed six.itermitems and six.*_type*
---
salt/_compat.py | 25 ++++++++++++++++---------
salt/modules/virt.py | 11 ++++-------
salt/states/virt.py | 1 +
salt/utils/xmlutil.py | 3 ++-
tests/unit/modules/test_virt.py | 2 +-
5 files changed, 24 insertions(+), 18 deletions(-)
diff --git a/salt/_compat.py b/salt/_compat.py
index d9425523cf..de100de3fa 100644
--- a/salt/_compat.py
+++ b/salt/_compat.py
@@ -7,6 +7,7 @@ Salt compatibility code
import binascii
import logging
import sys
+import xml.sax.saxutils as saxutils
from salt.exceptions import SaltException
from salt.ext.six import binary_type, integer_types, string_types, text_type
@@ -261,21 +262,25 @@ def ip_address(address):
try:
return ipaddress.IPv4Address(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
- log.debug('Error while parsing IPv4 address: %s', address)
+ log.debug("Error while parsing IPv4 address: %s", address)
log.debug(err)
try:
return IPv6AddressScoped(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
- log.debug('Error while parsing IPv6 address: %s', address)
+ log.debug("Error while parsing IPv6 address: %s", address)
log.debug(err)
if isinstance(address, bytes):
- raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. '
- 'Did you pass in a bytes (str in Python 2) instead '
- 'of a unicode object?'.format(repr(address)))
+ raise ipaddress.AddressValueError(
+ "{} does not appear to be an IPv4 or IPv6 address. "
+ "Did you pass in a bytes (str in Python 2) instead "
+ "of a unicode object?".format(repr(address))
+ )
- raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address)))
+ raise ValueError(
+ "{} does not appear to be an IPv4 or IPv6 address".format(repr(address))
+ )
def ip_interface(address):
@@ -302,16 +307,18 @@ def ip_interface(address):
try:
return ipaddress.IPv4Interface(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
- log.debug('Error while getting IPv4 interface for address %s', address)
+ log.debug("Error while getting IPv4 interface for address %s", address)
log.debug(err)
try:
return ipaddress.IPv6Interface(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
- log.debug('Error while getting IPv6 interface for address %s', address)
+ log.debug("Error while getting IPv6 interface for address %s", address)
log.debug(err)
- raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address))
+ raise ValueError(
+ "{} does not appear to be an IPv4 or IPv6 interface".format(address)
+ )
if ipaddress:
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index ec40f08359..c042738370 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -88,8 +88,6 @@ import string # pylint: disable=deprecated-module
import subprocess
import sys
import time
-from xml.etree import ElementTree
-from xml.sax import saxutils
import jinja2.exceptions
import salt.utils.files
@@ -99,8 +97,9 @@ import salt.utils.stringutils
import salt.utils.templates
import salt.utils.xmlutil as xmlutil
import salt.utils.yaml
-from salt._compat import ipaddress
+from salt._compat import ElementTree, ipaddress, saxutils
from salt.exceptions import CommandExecutionError, SaltInvocationError
+from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
from salt.utils.virt import check_remote, download_remote
@@ -1516,7 +1515,7 @@ def _handle_remote_boot_params(orig_boot):
"""
saltinst_dir = None
new_boot = orig_boot.copy()
- keys = orig_boot.keys()
+ keys = set(orig_boot.keys())
cases = [
{"efi"},
{"kernel", "initrd", "efi"},
@@ -2559,9 +2558,7 @@ def update(
# Attaching device
if source_file:
- ElementTree.SubElement(
- updated_disk, "source", attrib={"file": source_file}
- )
+ ElementTree.SubElement(updated_disk, "source", file=source_file)
changes["disk"]["new"] = new_disks
diff --git a/salt/states/virt.py b/salt/states/virt.py
index b45cf72ed3..df7ebb63e6 100644
--- a/salt/states/virt.py
+++ b/salt/states/virt.py
@@ -22,6 +22,7 @@ import salt.utils.files
import salt.utils.stringutils
import salt.utils.versions
from salt.exceptions import CommandExecutionError, SaltInvocationError
+from salt.ext import six
try:
import libvirt # pylint: disable=import-error
diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py
index b9f047820b..111ca155d4 100644
--- a/salt/utils/xmlutil.py
+++ b/salt/utils/xmlutil.py
@@ -7,6 +7,7 @@ import string # pylint: disable=deprecated-module
from xml.etree import ElementTree
import salt.utils.data
+from salt.ext import six
def _conv_name(x):
@@ -160,7 +161,7 @@ def clean_node(parent_map, node, ignored=None):
has_text = node.text is not None and node.text.strip()
parent = parent_map.get(node)
if (
- len(node.attrib.keys() - (ignored or [])) == 0
+ len(set(node.attrib.keys()) - set(ignored or [])) == 0
and not list(node)
and not has_text
):
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
index 4775fec31f..4a4c0395a7 100644
--- a/tests/unit/modules/test_virt.py
+++ b/tests/unit/modules/test_virt.py
@@ -45,7 +45,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
"""
def __init__(self, msg):
- super().__init__(msg)
+ super(Exception, self).__init__(msg)
self.msg = msg
def get_error_message(self):
--
2.29.2

View File

@ -1,41 +0,0 @@
From 192bac1ae2f20b098384264c8802034a340cd124 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Thu, 11 Oct 2018 16:20:40 +0200
Subject: [PATCH] Fix unit test for grains core
---
tests/unit/grains/test_core.py | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 34aaa4f5bc..7dbf34deac 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -59,10 +59,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
with salt.utils.files.fopen(
os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")
) as os_release_file:
- os_release_content = os_release_file.read()
- with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)):
+ os_release_content = os_release_file.readlines()
+ with patch("salt.utils.files.fopen", mock_open()) as os_release_file:
+ os_release_file.return_value.__iter__.return_value = os_release_content
os_release = core._parse_os_release(
- "/etc/os-release", "/usr/lib/os-release"
+ ["/etc/os-release", "/usr/lib/os-release"]
)
self.assertEqual(
os_release,
@@ -172,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
def test_missing_os_release(self):
with patch("salt.utils.files.fopen", mock_open(read_data={})):
os_release = core._parse_os_release(
- "/etc/os-release", "/usr/lib/os-release"
+ ["/etc/os-release", "/usr/lib/os-release"]
)
self.assertEqual(os_release, {})
--
2.29.2

View File

@ -1,54 +0,0 @@
From 09a871c197be4933475ee4582755d9b0cb5a700e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Wed, 4 Mar 2020 10:13:43 +0000
Subject: [PATCH] Fix unit tests for batch async after refactor
---
tests/unit/cli/test_batch_async.py | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py
index b04965268a..dcee9a87bd 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
@@ -120,9 +120,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
self.batch.timedout_minions = {"bar"}
self.batch.event = MagicMock()
self.batch.metadata = {"mykey": "myvalue"}
+ old_event = self.batch.event
self.batch.end_batch()
self.assertEqual(
- self.batch.event.fire_event.call_args[0],
+ old_event.fire_event.call_args[0],
(
{
"available_minions": {"foo", "bar"},
@@ -140,6 +141,23 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
event = MagicMock()
batch.event = event
batch.__del__()
+ self.assertEqual(batch.local, None)
+ self.assertEqual(batch.event, None)
+ self.assertEqual(batch.ioloop, None)
+
+ def test_batch_close_safe(self):
+ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
+ event = MagicMock()
+ batch.event = event
+ batch.patterns = {
+ ("salt/job/1234/ret/*", "find_job_return"),
+ ("salt/job/4321/ret/*", "find_job_return"),
+ }
+ batch.close_safe()
+ self.assertEqual(batch.local, None)
+ self.assertEqual(batch.event, None)
+ self.assertEqual(batch.ioloop, None)
+ self.assertEqual(len(event.unsubscribe.mock_calls), 2)
self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
@tornado.testing.gen_test
--
2.29.2

View File

@ -1,31 +0,0 @@
From c05d571058b9520dbaf4aba3de001b1aefe8e2c2 Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat <cbosdonnat@suse.com>
Date: Tue, 15 Sep 2020 16:03:30 +0200
Subject: [PATCH] Fix virt.update with cpu defined (#263)
In case the cpu was defined the rest of the definition wasn't completely
updated.
---
salt/modules/virt.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index c042738370..c1a73fcb7f 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -2441,9 +2441,9 @@ def update(
data = {k: v for k, v in locals().items() if bool(v)}
if boot_dev:
data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
- need_update = need_update or salt.utils.xmlutil.change_xml(
+ need_update = salt.utils.xmlutil.change_xml(
desc, data, params_mapping
- )
+ ) or need_update
# Update the XML definition with the new disks and diff changes
devices_node = desc.find("devices")
--
2.29.2

View File

@ -1,35 +1,35 @@
From f3ac041e34952a4b753e4afc9dc4b6adaa1d0ff2 Mon Sep 17 00:00:00 2001
From e3ef9165b66c3d74a3c3dbfe82ba58f7fa1613e2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Thu, 12 Mar 2020 13:26:51 +0000
Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test
after rebase
Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test after
rebase
---
tests/integration/modules/test_pkg.py | 63 ++++-----------------------
1 file changed, 8 insertions(+), 55 deletions(-)
tests/integration/modules/test_pkg.py | 34 +++++++++++++++++++++------
1 file changed, 27 insertions(+), 7 deletions(-)
diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py
index 3ece73074b..933755a9ec 100644
index ccf69998fc..6a84ea0bc3 100644
--- a/tests/integration/modules/test_pkg.py
+++ b/tests/integration/modules/test_pkg.py
@@ -143,6 +143,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
@@ -138,6 +138,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
self.run_function("pkg.del_repo", [repo])
@slowTest
+ @destructiveTest
+ @requires_salt_modules("pkg.mod_repo", "pkg.del_repo", "pkg.get_repo")
+ @requires_network()
@pytest.mark.slow_test
+ @pytest.mark.destructive_test
+ @pytest.mark.requires_salt_modules("pkg.mod_repo", "pkg.del_repo", "pkg.get_repo")
+ @pytest.mark.requires_network()
+ @requires_system_grains
def test_mod_del_repo_multiline_values(self):
"""
test modifying and deleting a software repository defined with multiline values
@@ -150,10 +154,13 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
@@ -145,10 +149,13 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
os_grain = self.run_function("grains.item", ["os"])["os"]
repo = None
try:
- if os_grain in ["CentOS", "RedHat"]:
+ if os_grain in ["CentOS", "RedHat", "SUSE"]:
- if os_grain in ["CentOS", "RedHat", "VMware Photon OS"]:
+ if os_grain in ["CentOS", "RedHat", "VMware Photon OS", "SUSE"]:
my_baseurl = (
"http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/"
)
@ -39,68 +39,44 @@ index 3ece73074b..933755a9ec 100644
expected_get_repo_baseurl = (
"http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/"
)
@@ -207,60 +214,6 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
if repo is not None:
self.run_function("pkg.del_repo", [repo])
- def test_mod_del_repo_multiline_values(self):
- """
- test modifying and deleting a software repository defined with multiline values
- """
- os_grain = self.run_function("grains.item", ["os"])["os"]
- repo = None
- try:
- if os_grain in ["CentOS", "RedHat", "SUSE"]:
- my_baseurl = (
- "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/"
- )
- expected_get_repo_baseurl_zypp = (
- "http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/"
- )
- expected_get_repo_baseurl = (
- "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/"
- )
- major_release = int(
- self.run_function("grains.item", ["osmajorrelease"])[
- "osmajorrelease"
- ]
- )
- repo = "fakerepo"
- name = "Fake repo for RHEL/CentOS/SUSE"
- baseurl = my_baseurl
- gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub"
- failovermethod = "priority"
- gpgcheck = 1
- enabled = 1
- ret = self.run_function(
- "pkg.mod_repo",
- [repo],
- name=name,
- baseurl=baseurl,
- gpgkey=gpgkey,
- gpgcheck=gpgcheck,
- enabled=enabled,
- failovermethod=failovermethod,
- )
@@ -174,17 +181,30 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
enabled=enabled,
failovermethod=failovermethod,
)
- # return data from pkg.mod_repo contains the file modified at
- # the top level, so use next(iter(ret)) to get that key
- self.assertNotEqual(ret, {})
self.assertNotEqual(ret, {})
- repo_info = ret[next(iter(ret))]
- self.assertIn(repo, repo_info)
+ repo_info = {repo: ret}
self.assertIn(repo, repo_info)
- self.assertEqual(repo_info[repo]["baseurl"], my_baseurl)
- ret = self.run_function("pkg.get_repo", [repo])
+ if os_grain == "SUSE":
+ self.assertEqual(
+ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp
+ )
+ else:
+ self.assertEqual(repo_info[repo]["baseurl"], my_baseurl)
ret = self.run_function("pkg.get_repo", [repo])
- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
- self.run_function("pkg.mod_repo", [repo])
- ret = self.run_function("pkg.get_repo", [repo])
+ if os_grain == "SUSE":
+ self.assertEqual(
+ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp
+ )
+ else:
+ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
self.run_function("pkg.mod_repo", [repo])
ret = self.run_function("pkg.get_repo", [repo])
- self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
- finally:
- if repo is not None:
- self.run_function("pkg.del_repo", [repo])
-
@requires_salt_modules("pkg.owner")
def test_owner(self):
"""
+ if os_grain == "SUSE":
+ self.assertEqual(
+ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp
+ )
+ else:
+ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl)
finally:
if repo is not None:
self.run_function("pkg.del_repo", [repo])
--
2.29.2
2.33.0

View File

@ -1,53 +0,0 @@
From 81f38c8cb16634b2c86b3e1e7c745870f90771d0 Mon Sep 17 00:00:00 2001
From: Mihai Dinca <mdinca@suse.de>
Date: Thu, 13 Jun 2019 17:48:55 +0200
Subject: [PATCH] Fix zypper pkg.list_pkgs expectation and dpkg mocking
---
tests/unit/modules/test_dpkg_lowpkg.py | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py
index 160bbcd5b1..dadbc30dfa 100644
--- a/tests/unit/modules/test_dpkg_lowpkg.py
+++ b/tests/unit/modules/test_dpkg_lowpkg.py
@@ -308,9 +308,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
dpkg.bin_pkg_info("package.deb")["name"], "package_name"
)
- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg))
- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3"))
+ @patch(
+ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail",
+ MagicMock(return_value=dselect_pkg),
+ )
+ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
+ @patch(
+ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3")
+ )
def test_info(self):
"""
Test info
@@ -359,9 +364,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
)
assert pkg_data["license"] == "BSD v3"
- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg))
- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3"))
+ @patch(
+ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail",
+ MagicMock(return_value=dselect_pkg),
+ )
+ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info))
+ @patch(
+ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3")
+ )
def test_info_attr(self):
"""
Test info with 'attr' parameter
--
2.29.2

View File

@ -1,58 +0,0 @@
From b9ba6875945e1ffafdeb862d8b2ac7fccd9cccf5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Mon, 25 Jun 2018 13:06:40 +0100
Subject: [PATCH] Fix zypper.list_pkgs to be aligned with pkg state
Handle packages with multiple version properly with zypper
Add unit test coverage for multiple version packages on Zypper
Fix '_find_remove_targets' after aligning Zypper with pkg state
---
salt/states/pkg.py | 21 ---------------------
1 file changed, 21 deletions(-)
diff --git a/salt/states/pkg.py b/salt/states/pkg.py
index a1b2a122bb..f7327a33e3 100644
--- a/salt/states/pkg.py
+++ b/salt/states/pkg.py
@@ -477,16 +477,6 @@ def _find_remove_targets(
if __grains__["os"] == "FreeBSD" and origin:
cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname]
- elif __grains__["os_family"] == "Suse":
- # On SUSE systems. Zypper returns packages without "arch" in name
- try:
- namepart, archpart = pkgname.rsplit(".", 1)
- except ValueError:
- cver = cur_pkgs.get(pkgname, [])
- else:
- if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",):
- pkgname = namepart
- cver = cur_pkgs.get(pkgname, [])
else:
cver = cur_pkgs.get(pkgname, [])
@@ -930,17 +920,6 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None):
cver = new_pkgs.get(pkgname.split("%")[0])
elif __grains__["os_family"] == "Debian":
cver = new_pkgs.get(pkgname.split("=")[0])
- elif __grains__["os_family"] == "Suse":
- # On SUSE systems. Zypper returns packages without "arch" in name
- try:
- namepart, archpart = pkgname.rsplit(".", 1)
- except ValueError:
- cver = new_pkgs.get(pkgname)
- else:
- if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",):
- cver = new_pkgs.get(namepart)
- else:
- cver = new_pkgs.get(pkgname)
else:
cver = new_pkgs.get(pkgname)
if not cver and pkgname in new_caps:
--
2.29.2

View File

@ -1,51 +0,0 @@
From 17ad05e3cbb3718ca12cef20600be81aa5d42d33 Mon Sep 17 00:00:00 2001
From: tyl0re <andreas@vogler.name>
Date: Wed, 17 Jul 2019 10:13:09 +0200
Subject: [PATCH] Fixed Bug LVM has no Parttion Type. the Scipt Later
it is checked if fs_type: cmd = ('parted', '-m', '-s', '--', device,
'mkpart', part_type, fs_type, start, end) else: cmd = ('parted', '-m', '-s',
'--', device, 'mkpart', part_type, start, end) But never reached. The Check
was in earlier Versions with parted.py 443 if fs_type and fs_type not in
set(['ext2', 'fat32', 'fat16', 'linux-swap', 'reiserfs', 444 'hfs', 'hfs+',
'hfsx', 'NTFS', 'ufs', 'xfs', 'zfs']):
So the check on not defined fs_type is missing
---
salt/modules/parted_partition.py | 19 ++++++++++++++++---
1 file changed, 16 insertions(+), 3 deletions(-)
diff --git a/salt/modules/parted_partition.py b/salt/modules/parted_partition.py
index 015d4cbc29..bb34cd58b4 100644
--- a/salt/modules/parted_partition.py
+++ b/salt/modules/parted_partition.py
@@ -552,10 +552,23 @@ def mkpartfs(device, part_type, fs_type=None, start=None, end=None):
.. code-block:: bash
- salt '*' partition.mkpartfs /dev/sda primary fs_type=fat32 start=0 end=639
- salt '*' partition.mkpartfs /dev/sda primary start=0 end=639
+ salt '*' partition.mkpartfs /dev/sda logical ext2 440 670
"""
- out = mkpart(device, part_type, fs_type, start, end)
+ _validate_device(device)
+
+ if part_type not in {"primary", "logical", "extended"}:
+ raise CommandExecutionError("Invalid part_type passed to partition.mkpartfs")
+
+ if fs_type and not _is_fstype(fs_type):
+ raise CommandExecutionError("Invalid fs_type passed to partition.mkpartfs")
+
+ _validate_partition_boundary(start)
+ _validate_partition_boundary(end)
+
+ cmd = "parted -m -s -- {} mkpart {} {} {} {}".format(
+ device, part_type, fs_type, start, end
+ )
+ out = __salt__["cmd.run"](cmd).splitlines()
return out
--
2.29.2

View File

@ -1,4 +1,4 @@
From 5761a11227c8d78df62d1a1552a50c0a4b76ae33 Mon Sep 17 00:00:00 2001
From 0def15837c3470f20ce85ec81e2c1d42cd933c23 Mon Sep 17 00:00:00 2001
From: nicholasmhughes <nicholasmhughes@gmail.com>
Date: Fri, 14 Feb 2020 22:03:42 -0500
Subject: [PATCH] fixes #56144 to enable hotadd profile support
@ -9,7 +9,7 @@ Subject: [PATCH] fixes #56144 to enable hotadd profile support
2 files changed, 20 insertions(+)
diff --git a/doc/topics/cloud/vmware.rst b/doc/topics/cloud/vmware.rst
index e4cb607e8d..0ac7c255a8 100644
index bbc5cdff11..1a18ebf226 100644
--- a/doc/topics/cloud/vmware.rst
+++ b/doc/topics/cloud/vmware.rst
@@ -457,6 +457,14 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
@ -28,10 +28,10 @@ index e4cb607e8d..0ac7c255a8 100644
Specifies the additional configuration information for the virtual machine. This
describes a set of modifications to the additional options. If the key is already
diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py
index 5ebf448abc..edaca9618b 100644
index 1e9943ad78..4999ca089f 100644
--- a/salt/cloud/clouds/vmware.py
+++ b/salt/cloud/clouds/vmware.py
@@ -2824,6 +2824,12 @@ def create(vm_):
@@ -2821,6 +2821,12 @@ def create(vm_):
win_run_once = config.get_cloud_config_value(
"win_run_once", vm_, __opts__, search_global=False, default=None
)
@ -44,7 +44,7 @@ index 5ebf448abc..edaca9618b 100644
# Get service instance object
si = _get_si()
@@ -3042,6 +3048,12 @@ def create(vm_):
@@ -3039,6 +3045,12 @@ def create(vm_):
)
config_spec.deviceChange = specs["device_specs"]
@ -55,9 +55,9 @@ index 5ebf448abc..edaca9618b 100644
+ config_spec.memoryHotAddEnabled = bool(mem_hot_add)
+
if extra_config:
for key, value in six.iteritems(extra_config):
for key, value in extra_config.items():
option = vim.option.OptionValue(key=key, value=value)
--
2.29.2
2.33.0

View File

@ -1,58 +0,0 @@
From 731a53bd241240e08c455a8cb3a59e4d65a6abb5 Mon Sep 17 00:00:00 2001
From: Erik Johnson <palehose@gmail.com>
Date: Fri, 24 Aug 2018 10:35:55 -0500
Subject: [PATCH] Fixes: CVE-2018-15750, CVE-2018-15751
Ensure that tokens are hex to avoid hanging/errors in cherrypy
Add empty token salt-api integration tests
Handle Auth exceptions in run_job
Update tornado test to correct authentication message
---
salt/netapi/rest_cherrypy/app.py | 7 -------
tests/integration/netapi/rest_tornado/test_app.py | 8 ++++++--
2 files changed, 6 insertions(+), 9 deletions(-)
diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py
index e7641ccbc5..5dfbadf759 100644
--- a/salt/netapi/rest_cherrypy/app.py
+++ b/salt/netapi/rest_cherrypy/app.py
@@ -1181,13 +1181,6 @@ class LowDataAdapter:
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
- if "token" in chunk:
- # Make sure that auth token is hex
- try:
- int(chunk["token"], 16)
- except (TypeError, ValueError):
- raise cherrypy.HTTPError(401, "Invalid token")
-
if client:
chunk["client"] = client
diff --git a/tests/integration/netapi/rest_tornado/test_app.py b/tests/integration/netapi/rest_tornado/test_app.py
index e3ad8820d3..4e5e741f1d 100644
--- a/tests/integration/netapi/rest_tornado/test_app.py
+++ b/tests/integration/netapi/rest_tornado/test_app.py
@@ -326,8 +326,12 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase):
self.assertIn("jid", ret[0]) # the first 2 are regular returns
self.assertIn("jid", ret[1])
self.assertIn("Failed to authenticate", ret[2]) # bad auth
- self.assertEqual(ret[0]["minions"], sorted(["minion", "sub_minion"]))
- self.assertEqual(ret[1]["minions"], sorted(["minion", "sub_minion"]))
+ self.assertEqual(
+ ret[0]["minions"], sorted(["minion", "sub_minion", "localhost"])
+ )
+ self.assertEqual(
+ ret[1]["minions"], sorted(["minion", "sub_minion", "localhost"])
+ )
@slowTest
def test_simple_local_async_post_no_tgt(self):
--
2.29.2

View File

@ -1,33 +0,0 @@
From 82d1cadff4fa6248a9d891a3c228fc415207d8d6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mihai=20Dinc=C4=83?= <dincamihai@users.noreply.github.com>
Date: Tue, 26 Nov 2019 18:26:31 +0100
Subject: [PATCH] Fixing StreamClosed issue
---
salt/cli/batch_async.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index f3d92b88f1..8d2601e636 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -232,7 +232,6 @@ class BatchAsync:
"metadata": self.metadata,
}
self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
- self.event.remove_event_handler(self.__event_handler)
for (pattern, label) in self.patterns:
if label in ["ping_return", "batch_run"]:
self.event.unsubscribe(pattern, match_type="glob")
@@ -277,6 +276,7 @@ class BatchAsync:
def __del__(self):
self.local = None
+ self.event.remove_event_handler(self.__event_handler)
self.event = None
self.ioloop = None
gc.collect()
--
2.29.2

View File

@ -1,92 +0,0 @@
From e987664551debb9affce8ce5a70593ef0750dcd5 Mon Sep 17 00:00:00 2001
From: Bo Maryniuk <bo@suse.de>
Date: Wed, 14 Nov 2018 17:36:23 +0100
Subject: [PATCH] Get os_arch also without RPM package installed
backport pkg.rpm test
Add pkg.rpm unit test case
Fix docstring
Add UT for getting OS architecture fallback, when no RPM found (initrd, e.g.)
Add UT for OS architecture detection on fallback, when no CPU arch can be determined
Add UT for OS arch detection when no CPU arch or machine can be determined
Remove unsupported testcase
---
tests/unit/utils/test_pkg.py | 53 ++++++------------------------------
1 file changed, 8 insertions(+), 45 deletions(-)
diff --git a/tests/unit/utils/test_pkg.py b/tests/unit/utils/test_pkg.py
index b4a67b8e57..404b01b12b 100644
--- a/tests/unit/utils/test_pkg.py
+++ b/tests/unit/utils/test_pkg.py
@@ -1,53 +1,16 @@
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import, print_function, unicode_literals
-
import salt.utils.pkg
from salt.utils.pkg import rpm
-from tests.support.mock import MagicMock, patch
-from tests.support.unit import TestCase
-
+from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, Mock, patch
+from tests.support.unit import TestCase, skipIf
-class PkgUtilsTestCase(TestCase):
- """
- TestCase for salt.utils.pkg module
- """
-
- test_parameters = [
- ("16.0.0.49153-0+f1", "", "16.0.0.49153-0+f1"),
- ("> 15.0.0", ">", "15.0.0"),
- ("< 15.0.0", "<", "15.0.0"),
- ("<< 15.0.0", "<<", "15.0.0"),
- (">> 15.0.0", ">>", "15.0.0"),
- (">= 15.0.0", ">=", "15.0.0"),
- ("<= 15.0.0", "<=", "15.0.0"),
- ("!= 15.0.0", "!=", "15.0.0"),
- ("<=> 15.0.0", "<=>", "15.0.0"),
- ("<> 15.0.0", "<>", "15.0.0"),
- ("= 15.0.0", "=", "15.0.0"),
- (">15.0.0", ">", "15.0.0"),
- ("<15.0.0", "<", "15.0.0"),
- ("<<15.0.0", "<<", "15.0.0"),
- (">>15.0.0", ">>", "15.0.0"),
- (">=15.0.0", ">=", "15.0.0"),
- ("<=15.0.0", "<=", "15.0.0"),
- ("!=15.0.0", "!=", "15.0.0"),
- ("<=>15.0.0", "<=>", "15.0.0"),
- ("<>15.0.0", "<>", "15.0.0"),
- ("=15.0.0", "=", "15.0.0"),
- ("", "", ""),
- ]
-
- def test_split_comparison(self):
- """
- Tests salt.utils.pkg.split_comparison
- """
- for test_parameter in self.test_parameters:
- oper, verstr = salt.utils.pkg.split_comparison(test_parameter[0])
- self.assertEqual(test_parameter[1], oper)
- self.assertEqual(test_parameter[2], verstr)
+try:
+ import pytest
+except ImportError:
+ pytest = None
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+@skipIf(pytest is None, "PyTest is missing")
class PkgRPMTestCase(TestCase):
"""
Test case for pkg.rpm utils
--
2.29.2

View File

@ -1,34 +0,0 @@
From d9618fed8ff241c6f127f08ec59fea9c8b8e12a6 Mon Sep 17 00:00:00 2001
From: Alberto Planas <aplanas@suse.com>
Date: Tue, 27 Oct 2020 13:16:37 +0100
Subject: [PATCH] grains: master can read grains
---
salt/grains/extra.py | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
index d25faac3b7..7729a5c0a5 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
@@ -76,8 +76,14 @@ def __secure_boot():
enabled = False
sboot = glob.glob("/sys/firmware/efi/vars/SecureBoot-*/data")
if len(sboot) == 1:
- with salt.utils.files.fopen(sboot[0], "rb") as fd:
- enabled = fd.read()[-1:] == b"\x01"
+ # The minion is usually running as a privileged user, but is
+ # not the case for the master. Seems that the master can also
+ # pick the grains, and this file can only be readed by "root"
+ try:
+ with salt.utils.files.fopen(sboot[0], "rb") as fd:
+ enabled = fd.read()[-1:] == b"\x01"
+ except PermissionError:
+ pass
return enabled
--
2.29.2

View File

@ -1,51 +0,0 @@
From 27c7a9f62b1a589365785c9428293653ac76fee3 Mon Sep 17 00:00:00 2001
From: Alberto Planas <aplanas@suse.com>
Date: Mon, 10 May 2021 16:26:02 +0200
Subject: [PATCH] grains.extra: support old non-intel kernels
(bsc#1180650) (#368)
---
salt/grains/extra.py | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
index 7729a5c0a5..f2abd1281c 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
@@ -71,10 +71,10 @@ def suse_backported_capabilities():
}
-def __secure_boot():
+def __secure_boot(efivars_dir):
"""Detect if secure-boot is enabled."""
enabled = False
- sboot = glob.glob("/sys/firmware/efi/vars/SecureBoot-*/data")
+ sboot = glob.glob(os.path.join(efivars_dir, "SecureBoot-*/data"))
if len(sboot) == 1:
# The minion is usually running as a privileged user, but is
# not the case for the master. Seems that the master can also
@@ -89,9 +89,17 @@ def __secure_boot():
def uefi():
"""Populate UEFI grains."""
+ efivars_dir = next(
+ iter(
+ filter(
+ os.path.exists, ["/sys/firmware/efi/efivars", "/sys/firmware/efi/vars"]
+ )
+ ),
+ None,
+ )
grains = {
- "efi": os.path.exists("/sys/firmware/efi/systab"),
- "efi-secure-boot": __secure_boot(),
+ "efi": bool(efivars_dir),
+ "efi-secure-boot": __secure_boot(efivars_dir) if efivars_dir else False,
}
return grains
--
2.31.1

View File

@ -1,109 +0,0 @@
From e0b7511e30da289b4100aa156b67b652681afc03 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Thu, 8 Jul 2021 08:57:13 +0100
Subject: [PATCH] Handle "master tops" data when states are applied by
"transactional_update" (bsc#1187787) (#398)
* Handle master tops data when states are applied by transactional_update (bsc#1187787)
* Fix unit tests for transactional_update module
---
salt/modules/transactional_update.py | 9 +++++++--
.../unit/modules/test_transactional_update.py | 20 +++++++++----------
2 files changed, 17 insertions(+), 12 deletions(-)
diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py
index 7bbdb697b8..9cdaddb91a 100644
--- a/salt/modules/transactional_update.py
+++ b/salt/modules/transactional_update.py
@@ -301,6 +301,11 @@ def __virtual__():
return (False, "Module transactional_update requires a transactional system")
+class TransactionalUpdateHighstate(salt.client.ssh.state.SSHHighState):
+ def _master_tops(self):
+ return self.client.master_tops()
+
+
def _global_params(self_update, snapshot=None, quiet=False):
"""Utility function to prepare common global parameters."""
params = ["--non-interactive", "--drop-if-no-change"]
@@ -1107,7 +1112,7 @@ def sls(
# Clone the options data and apply some default values. May not be
# needed, as this module just delegate
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
- st_ = salt.client.ssh.state.SSHHighState(
+ st_ = TransactionalUpdateHighstate(
opts, pillar, __salt__, salt.fileclient.get_file_client(__opts__)
)
@@ -1180,7 +1185,7 @@ def highstate(activate_transaction=False, **kwargs):
# Clone the options data and apply some default values. May not be
# needed, as this module just delegate
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
- st_ = salt.client.ssh.state.SSHHighState(
+ st_ = TransactionalUpdateHighstate(
opts, pillar, __salt__, salt.fileclient.get_file_client(__opts__)
)
diff --git a/tests/unit/modules/test_transactional_update.py b/tests/unit/modules/test_transactional_update.py
index 19e477d02f..2d30f296d7 100644
--- a/tests/unit/modules/test_transactional_update.py
+++ b/tests/unit/modules/test_transactional_update.py
@@ -622,22 +622,22 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
utils_mock["files.rm_rf"].assert_called_once()
@patch("salt.modules.transactional_update._create_and_execute_salt_state")
- @patch("salt.client.ssh.state.SSHHighState")
+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate")
@patch("salt.fileclient.get_file_client")
@patch("salt.utils.state.get_sls_opts")
def test_sls(
self,
get_sls_opts,
get_file_client,
- SSHHighState,
+ TransactionalUpdateHighstate,
_create_and_execute_salt_state,
):
"""Test transactional_update.sls"""
- SSHHighState.return_value = SSHHighState
- SSHHighState.render_highstate.return_value = (None, [])
- SSHHighState.state.reconcile_extend.return_value = (None, [])
- SSHHighState.state.requisite_in.return_value = (None, [])
- SSHHighState.state.verify_high.return_value = []
+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate
+ TransactionalUpdateHighstate.render_highstate.return_value = (None, [])
+ TransactionalUpdateHighstate.state.reconcile_extend.return_value = (None, [])
+ TransactionalUpdateHighstate.state.requisite_in.return_value = (None, [])
+ TransactionalUpdateHighstate.state.verify_high.return_value = []
_create_and_execute_salt_state.return_value = "result"
opts_mock = {
@@ -649,18 +649,18 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin):
_create_and_execute_salt_state.assert_called_once()
@patch("salt.modules.transactional_update._create_and_execute_salt_state")
- @patch("salt.client.ssh.state.SSHHighState")
+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate")
@patch("salt.fileclient.get_file_client")
@patch("salt.utils.state.get_sls_opts")
def test_highstate(
self,
get_sls_opts,
get_file_client,
- SSHHighState,
+ TransactionalUpdateHighstate,
_create_and_execute_salt_state,
):
"""Test transactional_update.highstage"""
- SSHHighState.return_value = SSHHighState
+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate
_create_and_execute_salt_state.return_value = "result"
opts_mock = {
--
2.32.0

View File

@ -1,152 +0,0 @@
From b154f0a17c85c2fe0b85226dfeb3919bd833a85c Mon Sep 17 00:00:00 2001
From: Cedric Bosdonnat <cedric.bosdonnat@free.fr>
Date: Fri, 21 May 2021 13:04:46 +0200
Subject: [PATCH] Handle volumes on stopped pools in virt.vm_info
(#373)
For VMs having at least a disk on a stopped volume, we don't want the
user to get an exception when running virt.vm_info. Instead just provide
less information.
---
changelog/60132.fixed | 1 +
salt/modules/virt.py | 73 +++++++++++--------
.../pytests/unit/modules/virt/test_domain.py | 9 ++-
3 files changed, 50 insertions(+), 33 deletions(-)
create mode 100644 changelog/60132.fixed
diff --git a/changelog/60132.fixed b/changelog/60132.fixed
new file mode 100644
index 0000000000..1e3bc96b98
--- /dev/null
+++ b/changelog/60132.fixed
@@ -0,0 +1 @@
+Gracefuly handle errors in virt.vm_info
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index 6409089109..d8a8c51ce5 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -515,41 +515,50 @@ def _get_disks(conn, dom):
def _get_disk_volume_data(pool_name, volume_name):
qemu_target = "{}/{}".format(pool_name, volume_name)
pool = conn.storagePoolLookupByName(pool_name)
- vol = pool.storageVolLookupByName(volume_name)
- vol_info = vol.info()
- extra_properties = {
- "virtual size": vol_info[1],
- "disk size": vol_info[2],
- }
-
- backing_files = [
- {
- "file": node.find("source").get("file"),
- "file format": node.find("format").get("type"),
+ extra_properties = {}
+ try:
+ vol = pool.storageVolLookupByName(volume_name)
+ vol_info = vol.info()
+ extra_properties = {
+ "virtual size": vol_info[1],
+ "disk size": vol_info[2],
}
- for node in elem.findall(".//backingStore[source]")
- ]
- if backing_files:
- # We had the backing files in a flat list, nest them again.
- extra_properties["backing file"] = backing_files[0]
- parent = extra_properties["backing file"]
- for sub_backing_file in backing_files[1:]:
- parent["backing file"] = sub_backing_file
- parent = sub_backing_file
+ backing_files = [
+ {
+ "file": node.find("source").get("file"),
+ "file format": node.find("format").get("type"),
+ }
+ for node in elem.findall(".//backingStore[source]")
+ ]
- else:
- # In some cases the backing chain is not displayed by the domain definition
- # Try to see if we have some of it in the volume definition.
- vol_desc = ElementTree.fromstring(vol.XMLDesc())
- backing_path = vol_desc.find("./backingStore/path")
- backing_format = vol_desc.find("./backingStore/format")
- if backing_path is not None:
- extra_properties["backing file"] = {"file": backing_path.text}
- if backing_format is not None:
- extra_properties["backing file"][
- "file format"
- ] = backing_format.get("type")
+ if backing_files:
+ # We had the backing files in a flat list, nest them again.
+ extra_properties["backing file"] = backing_files[0]
+ parent = extra_properties["backing file"]
+ for sub_backing_file in backing_files[1:]:
+ parent["backing file"] = sub_backing_file
+ parent = sub_backing_file
+
+ else:
+ # In some cases the backing chain is not displayed by the domain definition
+ # Try to see if we have some of it in the volume definition.
+ vol_desc = ElementTree.fromstring(vol.XMLDesc())
+ backing_path = vol_desc.find("./backingStore/path")
+ backing_format = vol_desc.find("./backingStore/format")
+ if backing_path is not None:
+ extra_properties["backing file"] = {
+ "file": backing_path.text
+ }
+ if backing_format is not None:
+ extra_properties["backing file"][
+ "file format"
+ ] = backing_format.get("type")
+ except libvirt.libvirtError:
+ # The volume won't be found if the pool is not started, just output less infos
+ log.info(
+ "Couldn't extract all volume informations: pool is likely not running or refreshed"
+ )
return (qemu_target, extra_properties)
if disk_type == "file":
diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py
index 76433eaef4..a9453e4a66 100644
--- a/tests/pytests/unit/modules/virt/test_domain.py
+++ b/tests/pytests/unit/modules/virt/test_domain.py
@@ -192,6 +192,11 @@ def test_get_disks(make_mock_vm, make_mock_storage_pool):
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</disk>
+ <disk type='volume' device='disk'>
+ <driver name='qemu' type='qcow2' cache='none' io='native'/>
+ <source pool='stopped' volume='vm05_data'/>
+ <target dev='vdd' bus='virtio'/>
+ </disk>
<disk type='network' device='cdrom'>
<driver name='qemu' type='raw' cache='none' io='native'/>
<source protocol='http' name='/pub/iso/myimage.iso' query='foo=bar&amp;baz=flurb' index='1'>
@@ -205,11 +210,12 @@ def test_get_disks(make_mock_vm, make_mock_storage_pool):
</devices>
</domain>
"""
- domain_mock = make_mock_vm(vm_def)
+ make_mock_vm(vm_def)
pool_mock = make_mock_storage_pool(
"default", "dir", ["srv01_system", "srv01_data", "vm05_system"]
)
+ make_mock_storage_pool("stopped", "dir", [])
# Append backing store to srv01_data volume XML description
srv1data_mock = pool_mock.storageVolLookupByName("srv01_data")
@@ -256,6 +262,7 @@ def test_get_disks(make_mock_vm, make_mock_storage_pool):
},
},
},
+ "vdd": {"type": "disk", "file": "stopped/vm05_data", "file format": "qcow2"},
"hda": {
"type": "cdrom",
"file format": "raw",
--
2.31.1

Some files were not shown because too many files have changed in this diff Show More