SHA256
1
0
forked from pool/salt

Accepting request 1008560 from systemsmanagement:saltstack

- Make pass renderer configurable and fix detected issues
- Workaround fopen line buffering for binary mode (bsc#1203834)
- Handle non-UTF-8 bytes in core grains generation (bsc#1202165)
- Fix Syndic authentication errors (bsc#1199562)
- Added:
  * make-pass-renderer-configurable-other-fixes-532.patch
  * ignore-non-utf8-characters-while-reading-files-with-.patch
  * fopen-workaround-bad-buffering-for-binary-mode-563.patch
  * backport-syndic-auth-fixes.patch

- Add Amazon EC2 detection for virtual grains (bsc#1195624)
- Fix the regression in schedule module releasded in 3004 (bsc#1202631)
- Fix state.apply in test mode with file state module
  on user/group checking (bsc#1202167)
- Change the delimeters to prevent possible tracebacks
  on some packages with dpkg_lowpkg
- Make zypperpkg to retry if RPM lock is temporarily unavailable (bsc#1200596)
- Added:
  * fix-the-regression-in-schedule-module-releasded-in-3.patch
  * retry-if-rpm-lock-is-temporarily-unavailable-547.patch
  * change-the-delimeters-to-prevent-possible-tracebacks.patch
  * add-amazon-ec2-detection-for-virtual-grains-bsc-1195.patch
  * fix-state.apply-in-test-mode-with-file-state-module-.patch

- Fix test_ipc unit test
- Added:
  * fix-test_ipc-unit-tests.patch

OBS-URL: https://build.opensuse.org/request/show/1008560
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/salt?expand=0&rev=131
This commit is contained in:
Fabian Vogt 2022-10-10 16:43:22 +00:00 committed by Git OBS Bridge
commit a328fd397d
14 changed files with 2855 additions and 2 deletions

View File

@ -1 +1 @@
e07459bfeea39239f6b446f40f6502e72dea488f
e04acec89d982e3bd465742afffe6ae5ec82620b

View File

@ -3,7 +3,7 @@
<param name="url">https://github.com/openSUSE/salt-packaging.git</param>
<param name="subdir">salt</param>
<param name="filename">package</param>
<param name="revision">3004</param>
<param name="revision">release/3004</param>
<param name="scm">git</param>
</service>
<service name="extract_file" mode="disabled">

View File

@ -0,0 +1,224 @@
From 77e90c4925a4268c5975cf1ce0bb0e4c457618c1 Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <Victor.Zhestkov@suse.com>
Date: Thu, 1 Sep 2022 14:46:24 +0300
Subject: [PATCH] Add Amazon EC2 detection for virtual grains
(bsc#1195624)
* Add ignore_retcode to quiet run functions
* Implement Amazon EC2 detection for virtual grains
* Add test for virtual grain detection of Amazon EC2
* Also detect the product of Amazon EC2 instance
* Add changelog entry
---
changelog/62539.added | 1 +
salt/grains/core.py | 18 ++++
salt/modules/cmdmod.py | 4 +
tests/pytests/unit/grains/test_core.py | 117 +++++++++++++++++++++++++
4 files changed, 140 insertions(+)
create mode 100644 changelog/62539.added
diff --git a/changelog/62539.added b/changelog/62539.added
new file mode 100644
index 0000000000..5f402d61c2
--- /dev/null
+++ b/changelog/62539.added
@@ -0,0 +1 @@
+Implementation of Amazon EC2 instance detection and setting `virtual_subtype` grain accordingly including the product if possible to identify.
diff --git a/salt/grains/core.py b/salt/grains/core.py
index c5d996d1bb..9530a43fc5 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1173,6 +1173,24 @@ def _virtual(osdata):
if grains.get("virtual_subtype") and grains["virtual"] == "physical":
grains["virtual"] = "virtual"
+ # Try to detect if the instance is running on Amazon EC2
+ if grains["virtual"] in ("qemu", "kvm", "xen"):
+ dmidecode = salt.utils.path.which("dmidecode")
+ if dmidecode:
+ ret = __salt__["cmd.run_all"](
+ [dmidecode, "-t", "system"], ignore_retcode=True
+ )
+ output = ret["stdout"]
+ if "Manufacturer: Amazon EC2" in output:
+ grains["virtual_subtype"] = "Amazon EC2"
+ product = re.match(
+ r".*Product Name: ([^\r\n]*).*", output, flags=re.DOTALL
+ )
+ if product:
+ grains["virtual_subtype"] = "Amazon EC2 ({})".format(product[1])
+ elif re.match(r".*Version: [^\r\n]+\.amazon.*", output, flags=re.DOTALL):
+ grains["virtual_subtype"] = "Amazon EC2"
+
for command in failed_commands:
log.info(
"Although '%s' was found in path, the current user "
diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py
index 61b328b13b..cd42e2cda0 100644
--- a/salt/modules/cmdmod.py
+++ b/salt/modules/cmdmod.py
@@ -907,6 +907,7 @@ def _run_quiet(
success_retcodes=None,
success_stdout=None,
success_stderr=None,
+ ignore_retcode=None,
):
"""
Helper for running commands quietly for minion startup
@@ -933,6 +934,7 @@ def _run_quiet(
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
+ ignore_retcode=ignore_retcode,
)["stdout"]
@@ -955,6 +957,7 @@ def _run_all_quiet(
success_retcodes=None,
success_stdout=None,
success_stderr=None,
+ ignore_retcode=None,
):
"""
@@ -987,6 +990,7 @@ def _run_all_quiet(
success_retcodes=success_retcodes,
success_stdout=success_stdout,
success_stderr=success_stderr,
+ ignore_retcode=ignore_retcode,
)
diff --git a/tests/pytests/unit/grains/test_core.py b/tests/pytests/unit/grains/test_core.py
index bc3947fa1b..84dd97d62f 100644
--- a/tests/pytests/unit/grains/test_core.py
+++ b/tests/pytests/unit/grains/test_core.py
@@ -2720,3 +2720,120 @@ def test_get_server_id():
with patch.dict(core.__opts__, {"id": "otherid"}):
assert core.get_server_id() != expected
+
+
+@pytest.mark.skip_unless_on_linux
+def test_virtual_set_virtual_ec2():
+ osdata = {}
+
+ (
+ osdata["kernel"],
+ osdata["nodename"],
+ osdata["kernelrelease"],
+ osdata["kernelversion"],
+ osdata["cpuarch"],
+ _,
+ ) = platform.uname()
+
+ which_mock = MagicMock(
+ side_effect=[
+ # Check with virt-what
+ "/usr/sbin/virt-what",
+ "/usr/sbin/virt-what",
+ None,
+ "/usr/sbin/dmidecode",
+ # Check with systemd-detect-virt
+ None,
+ "/usr/bin/systemd-detect-virt",
+ None,
+ "/usr/sbin/dmidecode",
+ # Check with systemd-detect-virt when no dmidecode available
+ None,
+ "/usr/bin/systemd-detect-virt",
+ None,
+ None,
+ ]
+ )
+ cmd_run_all_mock = MagicMock(
+ side_effect=[
+ # Check with virt-what
+ {"retcode": 0, "stderr": "", "stdout": "xen"},
+ {
+ "retcode": 0,
+ "stderr": "",
+ "stdout": "\n".join(
+ [
+ "dmidecode 3.2",
+ "Getting SMBIOS data from sysfs.",
+ "SMBIOS 2.7 present.",
+ "",
+ "Handle 0x0100, DMI type 1, 27 bytes",
+ "System Information",
+ " Manufacturer: Xen",
+ " Product Name: HVM domU",
+ " Version: 4.11.amazon",
+ " Serial Number: 12345678-abcd-4321-dcba-0123456789ab",
+ " UUID: 01234567-dcba-1234-abcd-abcdef012345",
+ " Wake-up Type: Power Switch",
+ " SKU Number: Not Specified",
+ " Family: Not Specified",
+ "",
+ "Handle 0x2000, DMI type 32, 11 bytes",
+ "System Boot Information",
+ " Status: No errors detected",
+ ]
+ ),
+ },
+ # Check with systemd-detect-virt
+ {"retcode": 0, "stderr": "", "stdout": "kvm"},
+ {
+ "retcode": 0,
+ "stderr": "",
+ "stdout": "\n".join(
+ [
+ "dmidecode 3.2",
+ "Getting SMBIOS data from sysfs.",
+ "SMBIOS 2.7 present.",
+ "",
+ "Handle 0x0001, DMI type 1, 27 bytes",
+ "System Information",
+ " Manufacturer: Amazon EC2",
+ " Product Name: m5.large",
+ " Version: Not Specified",
+ " Serial Number: 01234567-dcba-1234-abcd-abcdef012345",
+ " UUID: 12345678-abcd-4321-dcba-0123456789ab",
+ " Wake-up Type: Power Switch",
+ " SKU Number: Not Specified",
+ " Family: Not Specified",
+ ]
+ ),
+ },
+ # Check with systemd-detect-virt when no dmidecode available
+ {"retcode": 0, "stderr": "", "stdout": "kvm"},
+ ]
+ )
+
+ with patch("salt.utils.path.which", which_mock), patch.dict(
+ core.__salt__,
+ {
+ "cmd.run": salt.modules.cmdmod.run,
+ "cmd.run_all": cmd_run_all_mock,
+ "cmd.retcode": salt.modules.cmdmod.retcode,
+ "smbios.get": salt.modules.smbios.get,
+ },
+ ):
+
+ virtual_grains = core._virtual(osdata.copy())
+
+ assert virtual_grains["virtual"] == "xen"
+ assert virtual_grains["virtual_subtype"] == "Amazon EC2"
+
+ virtual_grains = core._virtual(osdata.copy())
+
+ assert virtual_grains["virtual"] == "kvm"
+ assert virtual_grains["virtual_subtype"] == "Amazon EC2 (m5.large)"
+
+ virtual_grains = core._virtual(osdata.copy())
+
+ assert virtual_grains["virtual"] == "kvm"
+ assert "virtual_subtype" not in virtual_grains
--
2.37.2

View File

@ -0,0 +1,355 @@
From 54ab69e74beb83710d0bf6049039d13e260d5517 Mon Sep 17 00:00:00 2001
From: Alexander Graul <agraul@suse.com>
Date: Tue, 13 Sep 2022 11:26:21 +0200
Subject: [PATCH] Backport Syndic auth fixes
[3004.2] Syndic Fixes
(cherry picked from commit 643bd4b572ca97466e085ecd1d84da45b1684332)
Co-authored-by: Megan Wilhite <megan.wilhite@gmail.com>
---
changelog/61868.fixed | 1 +
salt/transport/mixins/auth.py | 2 +-
salt/transport/tcp.py | 2 +-
salt/transport/zeromq.py | 2 +-
tests/pytests/unit/transport/test_tcp.py | 149 +++++++++++++++++++-
tests/pytests/unit/transport/test_zeromq.py | 73 +++++++++-
6 files changed, 224 insertions(+), 5 deletions(-)
create mode 100644 changelog/61868.fixed
diff --git a/changelog/61868.fixed b/changelog/61868.fixed
new file mode 100644
index 0000000000..0169c48e99
--- /dev/null
+++ b/changelog/61868.fixed
@@ -0,0 +1 @@
+Make sure the correct key is being used when verifying or validating communication, eg. when a Salt syndic is involved use syndic_master.pub and when a Salt minion is involved use minion_master.pub.
diff --git a/salt/transport/mixins/auth.py b/salt/transport/mixins/auth.py
index 1e2e8e6b7b..e5c6a5345f 100644
--- a/salt/transport/mixins/auth.py
+++ b/salt/transport/mixins/auth.py
@@ -43,7 +43,7 @@ class AESPubClientMixin:
)
# Verify that the signature is valid
- master_pubkey_path = os.path.join(self.opts["pki_dir"], "minion_master.pub")
+ master_pubkey_path = os.path.join(self.opts["pki_dir"], self.auth.mpub)
if not salt.crypt.verify_signature(
master_pubkey_path, payload["load"], payload.get("sig")
):
diff --git a/salt/transport/tcp.py b/salt/transport/tcp.py
index f00b3c40eb..2821be82c7 100644
--- a/salt/transport/tcp.py
+++ b/salt/transport/tcp.py
@@ -295,7 +295,7 @@ class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
signed_msg = pcrypt.loads(ret[dictkey])
# Validate the master's signature.
- master_pubkey_path = os.path.join(self.opts["pki_dir"], "minion_master.pub")
+ master_pubkey_path = os.path.join(self.opts["pki_dir"], self.auth.mpub)
if not salt.crypt.verify_signature(
master_pubkey_path, signed_msg["data"], signed_msg["sig"]
):
diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py
index aa06298ee1..8199378239 100644
--- a/salt/transport/zeromq.py
+++ b/salt/transport/zeromq.py
@@ -255,7 +255,7 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
signed_msg = pcrypt.loads(ret[dictkey])
# Validate the master's signature.
- master_pubkey_path = os.path.join(self.opts["pki_dir"], "minion_master.pub")
+ master_pubkey_path = os.path.join(self.opts["pki_dir"], self.auth.mpub)
if not salt.crypt.verify_signature(
master_pubkey_path, signed_msg["data"], signed_msg["sig"]
):
diff --git a/tests/pytests/unit/transport/test_tcp.py b/tests/pytests/unit/transport/test_tcp.py
index 3b6e175472..e41edcc37e 100644
--- a/tests/pytests/unit/transport/test_tcp.py
+++ b/tests/pytests/unit/transport/test_tcp.py
@@ -1,13 +1,53 @@
import contextlib
+import os
import socket
import attr
import pytest
import salt.exceptions
+import salt.transport.mixins.auth
import salt.transport.tcp
from salt.ext.tornado import concurrent, gen, ioloop
from saltfactories.utils.ports import get_unused_localhost_port
-from tests.support.mock import MagicMock, patch
+from tests.support.mock import MagicMock, PropertyMock, create_autospec, patch
+
+
+@pytest.fixture
+def fake_keys():
+ with patch("salt.crypt.AsyncAuth.get_keys", autospec=True):
+ yield
+
+
+@pytest.fixture
+def fake_crypto():
+ with patch("salt.transport.tcp.PKCS1_OAEP", create=True) as fake_crypto:
+ yield fake_crypto
+
+
+@pytest.fixture
+def fake_authd():
+ @salt.ext.tornado.gen.coroutine
+ def return_nothing():
+ raise salt.ext.tornado.gen.Return()
+
+ with patch(
+ "salt.crypt.AsyncAuth.authenticated", new_callable=PropertyMock
+ ) as mock_authed, patch(
+ "salt.crypt.AsyncAuth.authenticate",
+ autospec=True,
+ return_value=return_nothing(),
+ ), patch(
+ "salt.crypt.AsyncAuth.gen_token", autospec=True, return_value=42
+ ):
+ mock_authed.return_value = False
+ yield
+
+
+@pytest.fixture
+def fake_crypticle():
+ with patch("salt.crypt.Crypticle") as fake_crypticle:
+ fake_crypticle.generate_key_string.return_value = "fakey fake"
+ yield fake_crypticle
@pytest.fixture
@@ -405,3 +445,110 @@ def test_client_reconnect_backoff(client_socket):
client.io_loop.run_sync(client._connect)
finally:
client.close()
+
+
+async def test_when_async_req_channel_with_syndic_role_should_use_syndic_master_pub_file_to_verify_master_sig(
+ fake_keys, fake_crypto, fake_crypticle
+):
+ # Syndics use the minion pki dir, but they also create a syndic_master.pub
+ # file for comms with the Salt master
+ expected_pubkey_path = os.path.join("/etc/salt/pki/minion", "syndic_master.pub")
+ fake_crypto.new.return_value.decrypt.return_value = "decrypted_return_value"
+ mockloop = MagicMock()
+ opts = {
+ "master_uri": "tcp://127.0.0.1:4506",
+ "interface": "127.0.0.1",
+ "ret_port": 4506,
+ "ipv6": False,
+ "sock_dir": ".",
+ "pki_dir": "/etc/salt/pki/minion",
+ "id": "syndic",
+ "__role": "syndic",
+ "keysize": 4096,
+ }
+ client = salt.transport.tcp.AsyncTCPReqChannel(opts, io_loop=mockloop)
+
+ dictkey = "pillar"
+ target = "minion"
+
+ # Mock auth and message client.
+ client.auth._authenticate_future = MagicMock()
+ client.auth._authenticate_future.done.return_value = True
+ client.auth._authenticate_future.exception.return_value = None
+ client.auth._crypticle = MagicMock()
+ client.message_client = create_autospec(client.message_client)
+
+ @salt.ext.tornado.gen.coroutine
+ def mocksend(msg, timeout=60, tries=3):
+ raise salt.ext.tornado.gen.Return({"pillar": "data", "key": "value"})
+
+ client.message_client.send = mocksend
+
+ # Note the 'ver' value in 'load' does not represent the the 'version' sent
+ # in the top level of the transport's message.
+ load = {
+ "id": target,
+ "grains": {},
+ "saltenv": "base",
+ "pillarenv": "base",
+ "pillar_override": True,
+ "extra_minion_data": {},
+ "ver": "2",
+ "cmd": "_pillar",
+ }
+ fake_nonce = 42
+ with patch(
+ "salt.crypt.verify_signature", autospec=True, return_value=True
+ ) as fake_verify, patch(
+ "salt.payload.loads",
+ autospec=True,
+ return_value={"key": "value", "nonce": fake_nonce, "pillar": "data"},
+ ), patch(
+ "uuid.uuid4", autospec=True
+ ) as fake_uuid:
+ fake_uuid.return_value.hex = fake_nonce
+ ret = await client.crypted_transfer_decode_dictentry(
+ load,
+ dictkey="pillar",
+ )
+
+ assert fake_verify.mock_calls[0].args[0] == expected_pubkey_path
+
+
+async def test_mixin_should_use_correct_path_when_syndic(
+ fake_keys, fake_authd, fake_crypticle
+):
+ mockloop = MagicMock()
+ expected_pubkey_path = os.path.join("/etc/salt/pki/minion", "syndic_master.pub")
+ opts = {
+ "master_uri": "tcp://127.0.0.1:4506",
+ "interface": "127.0.0.1",
+ "ret_port": 4506,
+ "ipv6": False,
+ "sock_dir": ".",
+ "pki_dir": "/etc/salt/pki/minion",
+ "id": "syndic",
+ "__role": "syndic",
+ "keysize": 4096,
+ "sign_pub_messages": True,
+ }
+
+ with patch(
+ "salt.crypt.verify_signature", autospec=True, return_value=True
+ ) as fake_verify, patch(
+ "salt.utils.msgpack.loads",
+ autospec=True,
+ return_value={"enc": "aes", "load": "", "sig": "fake_signature"},
+ ):
+ client = salt.transport.tcp.AsyncTCPPubChannel(opts, io_loop=mockloop)
+ client.message_client = MagicMock()
+ client.message_client.on_recv.side_effect = lambda x: x(b"some_data")
+ await client.connect()
+ client.auth._crypticle = fake_crypticle
+
+ @client.on_recv
+ def test_recv_function(*args, **kwargs):
+ ...
+
+ await test_recv_function
+ assert fake_verify.mock_calls[0].args[0] == expected_pubkey_path
diff --git a/tests/pytests/unit/transport/test_zeromq.py b/tests/pytests/unit/transport/test_zeromq.py
index 1f0515c91a..c3093f4b19 100644
--- a/tests/pytests/unit/transport/test_zeromq.py
+++ b/tests/pytests/unit/transport/test_zeromq.py
@@ -23,7 +23,7 @@ import salt.utils.process
import salt.utils.stringutils
from salt.master import SMaster
from salt.transport.zeromq import AsyncReqMessageClientPool
-from tests.support.mock import MagicMock, patch
+from tests.support.mock import MagicMock, create_autospec, patch
try:
from M2Crypto import RSA
@@ -608,6 +608,7 @@ async def test_req_chan_decode_data_dict_entry_v2(pki_dir):
auth = client.auth
auth._crypticle = salt.crypt.Crypticle(opts, AES_KEY)
client.auth = MagicMock()
+ client.auth.mpub = auth.mpub
client.auth.authenticated = True
client.auth.get_keys = auth.get_keys
client.auth.crypticle.dumps = auth.crypticle.dumps
@@ -672,6 +673,7 @@ async def test_req_chan_decode_data_dict_entry_v2_bad_nonce(pki_dir):
auth = client.auth
auth._crypticle = salt.crypt.Crypticle(opts, AES_KEY)
client.auth = MagicMock()
+ client.auth.mpub = auth.mpub
client.auth.authenticated = True
client.auth.get_keys = auth.get_keys
client.auth.crypticle.dumps = auth.crypticle.dumps
@@ -735,6 +737,7 @@ async def test_req_chan_decode_data_dict_entry_v2_bad_signature(pki_dir):
auth = client.auth
auth._crypticle = salt.crypt.Crypticle(opts, AES_KEY)
client.auth = MagicMock()
+ client.auth.mpub = auth.mpub
client.auth.authenticated = True
client.auth.get_keys = auth.get_keys
client.auth.crypticle.dumps = auth.crypticle.dumps
@@ -814,6 +817,7 @@ async def test_req_chan_decode_data_dict_entry_v2_bad_key(pki_dir):
auth = client.auth
auth._crypticle = salt.crypt.Crypticle(opts, AES_KEY)
client.auth = MagicMock()
+ client.auth.mpub = auth.mpub
client.auth.authenticated = True
client.auth.get_keys = auth.get_keys
client.auth.crypticle.dumps = auth.crypticle.dumps
@@ -1273,3 +1277,70 @@ async def test_req_chan_auth_v2_new_minion_without_master_pub(pki_dir, io_loop):
assert "sig" in ret
ret = client.auth.handle_signin_response(signin_payload, ret)
assert ret == "retry"
+
+
+async def test_when_async_req_channel_with_syndic_role_should_use_syndic_master_pub_file_to_verify_master_sig(
+ pki_dir,
+):
+ # Syndics use the minion pki dir, but they also create a syndic_master.pub
+ # file for comms with the Salt master
+ expected_pubkey_path = str(pki_dir.join("minion").join("syndic_master.pub"))
+ mockloop = MagicMock()
+ opts = {
+ "master_uri": "tcp://127.0.0.1:4506",
+ "interface": "127.0.0.1",
+ "ret_port": 4506,
+ "ipv6": False,
+ "sock_dir": ".",
+ "pki_dir": str(pki_dir.join("minion")),
+ "id": "syndic",
+ "__role": "syndic",
+ "keysize": 4096,
+ }
+ master_opts = dict(opts, pki_dir=str(pki_dir.join("master")))
+ server = salt.transport.zeromq.ZeroMQReqServerChannel(master_opts)
+ client = salt.transport.zeromq.AsyncZeroMQReqChannel(opts, io_loop=mockloop)
+
+ dictkey = "pillar"
+ target = "minion"
+ pillar_data = {"pillar1": "data1"}
+
+ # Mock auth and message client.
+ client.auth._authenticate_future = MagicMock()
+ client.auth._authenticate_future.done.return_value = True
+ client.auth._authenticate_future.exception.return_value = None
+ client.auth._crypticle = salt.crypt.Crypticle(opts, AES_KEY)
+ client.message_client = create_autospec(client.message_client)
+
+ @salt.ext.tornado.gen.coroutine
+ def mocksend(msg, timeout=60, tries=3):
+ client.message_client.msg = msg
+ load = client.auth.crypticle.loads(msg["load"])
+ ret = server._encrypt_private(
+ pillar_data, dictkey, target, nonce=load["nonce"], sign_messages=True
+ )
+ raise salt.ext.tornado.gen.Return(ret)
+
+ client.message_client.send = mocksend
+
+ # Note the 'ver' value in 'load' does not represent the the 'version' sent
+ # in the top level of the transport's message.
+ load = {
+ "id": target,
+ "grains": {},
+ "saltenv": "base",
+ "pillarenv": "base",
+ "pillar_override": True,
+ "extra_minion_data": {},
+ "ver": "2",
+ "cmd": "_pillar",
+ }
+ with patch(
+ "salt.crypt.verify_signature", autospec=True, return_value=True
+ ) as fake_verify:
+ ret = await client.crypted_transfer_decode_dictentry(
+ load,
+ dictkey="pillar",
+ )
+
+ assert fake_verify.mock_calls[0].args[0] == expected_pubkey_path
--
2.37.3

View File

@ -0,0 +1,80 @@
From e28385eb37932809a11ec81c81834a51e094f507 Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <Victor.Zhestkov@suse.com>
Date: Thu, 1 Sep 2022 14:42:24 +0300
Subject: [PATCH] Change the delimeters to prevent possible tracebacks
on some packages with dpkg_lowpkg
* Use another separator on query to dpkg-query
* Fix the test test_dpkg_lowpkg::test_info
---
salt/modules/dpkg_lowpkg.py | 13 ++++++++-----
tests/unit/modules/test_dpkg_lowpkg.py | 4 ++--
2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py
index 2c25b1fb2a..fc93d99549 100644
--- a/salt/modules/dpkg_lowpkg.py
+++ b/salt/modules/dpkg_lowpkg.py
@@ -309,9 +309,8 @@ def _get_pkg_info(*packages, **kwargs):
"origin:${Origin}\\n"
"homepage:${Homepage}\\n"
"status:${db:Status-Abbrev}\\n"
- "======\\n"
"description:${Description}\\n"
- "------\\n'"
+ "\\n*/~^\\\\*\\n'"
)
cmd += " {}".format(" ".join(packages))
cmd = cmd.strip()
@@ -325,9 +324,13 @@ def _get_pkg_info(*packages, **kwargs):
else:
return ret
- for pkg_info in [elm for elm in re.split(r"------", call["stdout"]) if elm.strip()]:
+ for pkg_info in [
+ elm
+ for elm in re.split(r"\r?\n\*/~\^\\\*(\r?\n|)", call["stdout"])
+ if elm.strip()
+ ]:
pkg_data = {}
- pkg_info, pkg_descr = re.split(r"======", pkg_info)
+ pkg_info, pkg_descr = pkg_info.split("\ndescription:", 1)
for pkg_info_line in [
el.strip() for el in pkg_info.split(os.linesep) if el.strip()
]:
@@ -344,7 +347,7 @@ def _get_pkg_info(*packages, **kwargs):
if build_date:
pkg_data["build_date"] = build_date
pkg_data["build_date_time_t"] = build_date_t
- pkg_data["description"] = pkg_descr.split(":", 1)[-1]
+ pkg_data["description"] = pkg_descr
ret.append(pkg_data)
return ret
diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py
index d00fc46c66..a97519f489 100644
--- a/tests/unit/modules/test_dpkg_lowpkg.py
+++ b/tests/unit/modules/test_dpkg_lowpkg.py
@@ -290,7 +290,6 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
"origin:",
"homepage:http://tiswww.case.edu/php/chet/bash/bashtop.html",
"status:ii ",
- "======",
"description:GNU Bourne Again SHell",
" Bash is an sh-compatible command language interpreter that"
" executes",
@@ -307,7 +306,8 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin):
" The Programmable Completion Code, by Ian Macdonald, is now"
" found in",
" the bash-completion package.",
- "------",
+ "",
+ "*/~^\\*", # pylint: disable=W1401
]
),
}
--
2.37.2

View File

@ -0,0 +1,247 @@
From ed567e5f339f7bf95d4361ac47e67427db71714c Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <Victor.Zhestkov@suse.com>
Date: Thu, 1 Sep 2022 14:44:26 +0300
Subject: [PATCH] Fix state.apply in test mode with file state module
on user/group checking (bsc#1202167)
* Do not fail on checking user/group in test mode
* fixes saltstack/salt#61846 reporting of errors in test mode
Co-authored-by: nicholasmhughes <nicholasmhughes@gmail.com>
* Add tests for _check_user usage
Co-authored-by: nicholasmhughes <nicholasmhughes@gmail.com>
---
changelog/61846.fixed | 1 +
salt/states/file.py | 5 ++
tests/pytests/unit/states/file/test_copy.py | 35 ++++++++++++
.../unit/states/file/test_directory.py | 55 +++++++++++++++++++
.../unit/states/file/test_filestate.py | 42 ++++++++++++++
.../pytests/unit/states/file/test_managed.py | 31 +++++++++++
6 files changed, 169 insertions(+)
create mode 100644 changelog/61846.fixed
diff --git a/changelog/61846.fixed b/changelog/61846.fixed
new file mode 100644
index 0000000000..c4024efe9f
--- /dev/null
+++ b/changelog/61846.fixed
@@ -0,0 +1 @@
+Fix the reporting of errors for file.directory in test mode
diff --git a/salt/states/file.py b/salt/states/file.py
index a6288025e5..39cf83b78e 100644
--- a/salt/states/file.py
+++ b/salt/states/file.py
@@ -379,6 +379,11 @@ def _check_user(user, group):
gid = __salt__["file.group_to_gid"](group)
if gid == "":
err += "Group {} is not available".format(group)
+ if err and __opts__["test"]:
+ # Write the warning with error message, but prevent failing,
+ # in case of applying the state in test mode.
+ log.warning(err)
+ return ""
return err
diff --git a/tests/pytests/unit/states/file/test_copy.py b/tests/pytests/unit/states/file/test_copy.py
index ce7161f02d..a11adf5ae0 100644
--- a/tests/pytests/unit/states/file/test_copy.py
+++ b/tests/pytests/unit/states/file/test_copy.py
@@ -205,3 +205,38 @@ def test_copy(tmp_path):
)
res = filestate.copy_(name, source, group=group, preserve=False)
assert res == ret
+
+
+def test_copy_test_mode_user_group_not_present():
+ """
+ Test file copy in test mode with no user or group existing
+ """
+ source = "/tmp/src_copy_no_user_group_test_mode"
+ filename = "/tmp/copy_no_user_group_test_mode"
+ with patch.dict(
+ filestate.__salt__,
+ {
+ "file.group_to_gid": MagicMock(side_effect=["1234", "", ""]),
+ "file.user_to_uid": MagicMock(side_effect=["", "4321", ""]),
+ "file.get_mode": MagicMock(return_value="0644"),
+ },
+ ), patch.dict(filestate.__opts__, {"test": True}), patch.object(
+ os.path, "exists", return_value=True
+ ):
+ ret = filestate.copy_(
+ source, filename, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
+
+ ret = filestate.copy_(
+ source, filename, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
+
+ ret = filestate.copy_(
+ source, filename, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
diff --git a/tests/pytests/unit/states/file/test_directory.py b/tests/pytests/unit/states/file/test_directory.py
index 0e15e1d3ca..1287609c6a 100644
--- a/tests/pytests/unit/states/file/test_directory.py
+++ b/tests/pytests/unit/states/file/test_directory.py
@@ -291,3 +291,58 @@ def test_directory():
assert (
filestate.directory(name, user=user, group=group) == ret
)
+
+
+def test_directory_test_mode_user_group_not_present():
+ name = "/etc/testdir"
+ user = "salt"
+ group = "saltstack"
+ if salt.utils.platform.is_windows():
+ name = name.replace("/", "\\")
+
+ ret = {
+ "name": name,
+ "result": None,
+ "comment": "",
+ "changes": {name: {"directory": "new"}},
+ }
+
+ if salt.utils.platform.is_windows():
+ comt = 'The directory "{}" will be changed' "".format(name)
+ else:
+ comt = "The following files will be changed:\n{}:" " directory - new\n".format(
+ name
+ )
+ ret["comment"] = comt
+
+ mock_f = MagicMock(return_value=False)
+ mock_uid = MagicMock(
+ side_effect=[
+ "",
+ "U12",
+ "",
+ ]
+ )
+ mock_gid = MagicMock(
+ side_effect=[
+ "G12",
+ "",
+ "",
+ ]
+ )
+ mock_error = CommandExecutionError
+ with patch.dict(
+ filestate.__salt__,
+ {
+ "file.user_to_uid": mock_uid,
+ "file.group_to_gid": mock_gid,
+ "file.stats": mock_f,
+ },
+ ), patch("salt.utils.win_dacl.get_sid", mock_error), patch.object(
+ os.path, "isdir", mock_f
+ ), patch.dict(
+ filestate.__opts__, {"test": True}
+ ):
+ assert filestate.directory(name, user=user, group=group) == ret
+ assert filestate.directory(name, user=user, group=group) == ret
+ assert filestate.directory(name, user=user, group=group) == ret
diff --git a/tests/pytests/unit/states/file/test_filestate.py b/tests/pytests/unit/states/file/test_filestate.py
index 2f9f369fb2..c373cb3449 100644
--- a/tests/pytests/unit/states/file/test_filestate.py
+++ b/tests/pytests/unit/states/file/test_filestate.py
@@ -577,3 +577,45 @@ def test_mod_run_check_cmd():
assert filestate.mod_run_check_cmd(cmd, filename) == ret
assert filestate.mod_run_check_cmd(cmd, filename)
+
+
+def test_recurse_test_mode_user_group_not_present():
+ """
+ Test file recurse in test mode with no user or group existing
+ """
+ filename = "/tmp/recurse_no_user_group_test_mode"
+ source = "salt://tmp/src_recurse_no_user_group_test_mode"
+ mock_l = MagicMock(return_value=[])
+ mock_emt = MagicMock(return_value=["tmp/src_recurse_no_user_group_test_mode"])
+ with patch.dict(
+ filestate.__salt__,
+ {
+ "file.group_to_gid": MagicMock(side_effect=["1234", "", ""]),
+ "file.user_to_uid": MagicMock(side_effect=["", "4321", ""]),
+ "file.get_mode": MagicMock(return_value="0644"),
+ "file.source_list": MagicMock(return_value=[source, ""]),
+ "cp.list_master_dirs": mock_emt,
+ "cp.list_master": mock_l,
+ },
+ ), patch.dict(filestate.__opts__, {"test": True}), patch.object(
+ os.path, "exists", return_value=True
+ ), patch.object(
+ os.path, "isdir", return_value=True
+ ):
+ ret = filestate.recurse(
+ filename, source, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
+
+ ret = filestate.recurse(
+ filename, source, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
+
+ ret = filestate.recurse(
+ filename, source, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
diff --git a/tests/pytests/unit/states/file/test_managed.py b/tests/pytests/unit/states/file/test_managed.py
index 9d9fb17717..0b341e09a9 100644
--- a/tests/pytests/unit/states/file/test_managed.py
+++ b/tests/pytests/unit/states/file/test_managed.py
@@ -373,3 +373,34 @@ def test_managed():
filestate.managed(name, user=user, group=group)
== ret
)
+
+
+def test_managed_test_mode_user_group_not_present():
+ """
+ Test file managed in test mode with no user or group existing
+ """
+ filename = "/tmp/managed_no_user_group_test_mode"
+ with patch.dict(
+ filestate.__salt__,
+ {
+ "file.group_to_gid": MagicMock(side_effect=["1234", "", ""]),
+ "file.user_to_uid": MagicMock(side_effect=["", "4321", ""]),
+ },
+ ), patch.dict(filestate.__opts__, {"test": True}):
+ ret = filestate.managed(
+ filename, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
+
+ ret = filestate.managed(
+ filename, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
+
+ ret = filestate.managed(
+ filename, group="nonexistinggroup", user="nonexistinguser"
+ )
+ assert ret["result"] is not False
+ assert "is not available" not in ret["comment"]
--
2.37.2

View File

@ -0,0 +1,37 @@
From 61d9b5e4ceaa0f5feb7fc364c9089cb624006812 Mon Sep 17 00:00:00 2001
From: Alexander Graul <agraul@suse.com>
Date: Tue, 12 Jul 2022 14:02:58 +0200
Subject: [PATCH] Fix test_ipc unit tests
---
tests/unit/transport/test_ipc.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/unit/transport/test_ipc.py b/tests/unit/transport/test_ipc.py
index 79b49f9406..7177b7f6c4 100644
--- a/tests/unit/transport/test_ipc.py
+++ b/tests/unit/transport/test_ipc.py
@@ -107,8 +107,8 @@ class IPCMessagePubSubCase(salt.ext.tornado.testing.AsyncTestCase):
self.stop()
# Now let both waiting data at once
- client1.read_async(handler)
- client2.read_async(handler)
+ client1.read_async()
+ client2.read_async()
self.pub_channel.publish("TEST")
self.wait()
self.assertEqual(len(call_cnt), 2)
@@ -150,7 +150,7 @@ class IPCMessagePubSubCase(salt.ext.tornado.testing.AsyncTestCase):
pass
try:
- ret1 = yield client1.read_async(handler)
+ ret1 = yield client1.read_async()
self.wait()
except StreamClosedError as ex:
assert False, "StreamClosedError was raised inside the Future"
--
2.36.1

View File

@ -0,0 +1,820 @@
From 7803275a8aaeedf2124706f51b6a54cfcfb2d032 Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <Victor.Zhestkov@suse.com>
Date: Thu, 1 Sep 2022 14:45:13 +0300
Subject: [PATCH] Fix the regression in schedule module releasded in
3004 (bsc#1202631)
Co-authored-by: Gareth J. Greenaway <gareth@saltstack.com>
---
changelog/61324.changed | 1 +
salt/modules/schedule.py | 449 ++++++++++++++------
tests/pytests/unit/modules/test_schedule.py | 138 +++++-
3 files changed, 442 insertions(+), 146 deletions(-)
create mode 100644 changelog/61324.changed
diff --git a/changelog/61324.changed b/changelog/61324.changed
new file mode 100644
index 0000000000..d67051a8da
--- /dev/null
+++ b/changelog/61324.changed
@@ -0,0 +1 @@
+Adding the ability to add, delete, purge, and modify Salt scheduler jobs when the Salt minion is not running.
diff --git a/salt/modules/schedule.py b/salt/modules/schedule.py
index bcd64f2851..913a101ea6 100644
--- a/salt/modules/schedule.py
+++ b/salt/modules/schedule.py
@@ -15,6 +15,7 @@ import salt.utils.event
import salt.utils.files
import salt.utils.odict
import salt.utils.yaml
+import yaml
try:
import dateutil.parser as dateutil_parser
@@ -64,7 +65,35 @@ SCHEDULE_CONF = [
]
-def list_(show_all=False, show_disabled=True, where=None, return_yaml=True):
+def _get_schedule_config_file():
+ """
+ Return the minion schedule configuration file
+ """
+ config_dir = __opts__.get("conf_dir", None)
+ if config_dir is None and "conf_file" in __opts__:
+ config_dir = os.path.dirname(__opts__["conf_file"])
+ if config_dir is None:
+ config_dir = salt.syspaths.CONFIG_DIR
+
+ minion_d_dir = os.path.join(
+ config_dir,
+ os.path.dirname(
+ __opts__.get(
+ "default_include",
+ salt.config.DEFAULT_MINION_OPTS["default_include"],
+ )
+ ),
+ )
+
+ if not os.path.isdir(minion_d_dir):
+ os.makedirs(minion_d_dir)
+
+ return os.path.join(minion_d_dir, "_schedule.conf")
+
+
+def list_(
+ show_all=False, show_disabled=True, where=None, return_yaml=True, offline=False
+):
"""
List the jobs currently scheduled on the minion
@@ -83,24 +112,33 @@ def list_(show_all=False, show_disabled=True, where=None, return_yaml=True):
"""
schedule = {}
- try:
- with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
- res = __salt__["event.fire"](
- {"func": "list", "where": where}, "manage_schedule"
- )
- if res:
- event_ret = event_bus.get_event(
- tag="/salt/minion/minion_schedule_list_complete", wait=30
+ if offline:
+ schedule_config = _get_schedule_config_file()
+ if os.path.exists(schedule_config):
+ with salt.utils.files.fopen(schedule_config) as fp_:
+ schedule_yaml = fp_.read()
+ if schedule_yaml:
+ schedule_contents = yaml.safe_load(schedule_yaml)
+ schedule = schedule_contents.get("schedule", {})
+ else:
+ try:
+ with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
+ res = __salt__["event.fire"](
+ {"func": "list", "where": where}, "manage_schedule"
)
- if event_ret and event_ret["complete"]:
- schedule = event_ret["schedule"]
- except KeyError:
- # Effectively a no-op, since we can't really return without an event system
- ret = {}
- ret["comment"] = "Event module not available. Schedule list failed."
- ret["result"] = True
- log.debug("Event module not available. Schedule list failed.")
- return ret
+ if res:
+ event_ret = event_bus.get_event(
+ tag="/salt/minion/minion_schedule_list_complete", wait=30
+ )
+ if event_ret and event_ret["complete"]:
+ schedule = event_ret["schedule"]
+ except KeyError:
+ # Effectively a no-op, since we can't really return without an event system
+ ret = {}
+ ret["comment"] = "Event module not available. Schedule list failed."
+ ret["result"] = True
+ log.debug("Event module not available. Schedule list failed.")
+ return ret
_hidden = ["enabled", "skip_function", "skip_during_range"]
for job in list(schedule.keys()): # iterate over a copy since we will mutate it
@@ -139,14 +177,11 @@ def list_(show_all=False, show_disabled=True, where=None, return_yaml=True):
# remove _seconds from the listing
del schedule[job]["_seconds"]
- if schedule:
- if return_yaml:
- tmp = {"schedule": schedule}
- return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
- else:
- return schedule
+ if return_yaml:
+ tmp = {"schedule": schedule}
+ return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
else:
- return {"schedule": {}}
+ return schedule
def is_enabled(name=None):
@@ -186,11 +221,18 @@ def purge(**kwargs):
.. code-block:: bash
salt '*' schedule.purge
+
+ # Purge jobs on Salt minion
+ salt '*' schedule.purge
+
"""
- ret = {"comment": [], "result": True}
+ ret = {"comment": [], "changes": {}, "result": True}
- for name in list_(show_all=True, return_yaml=False):
+ current_schedule = list_(
+ show_all=True, return_yaml=False, offline=kwargs.get("offline")
+ )
+ for name in pycopy.deepcopy(current_schedule):
if name == "enabled":
continue
if name.startswith("__"):
@@ -202,37 +244,65 @@ def purge(**kwargs):
"Job: {} would be deleted from schedule.".format(name)
)
else:
- persist = kwargs.get("persist", True)
+ if kwargs.get("offline"):
+ del current_schedule[name]
- try:
- with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
- res = __salt__["event.fire"](
- {"name": name, "func": "delete", "persist": persist},
- "manage_schedule",
- )
- if res:
- event_ret = event_bus.get_event(
- tag="/salt/minion/minion_schedule_delete_complete", wait=30
+ ret["comment"].append("Deleted job: {} from schedule.".format(name))
+ ret["changes"][name] = "removed"
+
+ else:
+ persist = kwargs.get("persist", True)
+ try:
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {"name": name, "func": "delete", "persist": persist},
+ "manage_schedule",
)
- if event_ret and event_ret["complete"]:
- _schedule_ret = event_ret["schedule"]
- if name not in _schedule_ret:
- ret["result"] = True
- ret["comment"].append(
- "Deleted job: {} from schedule.".format(name)
- )
- else:
- ret["comment"].append(
- "Failed to delete job {} from schedule.".format(
- name
+ if res:
+ event_ret = event_bus.get_event(
+ tag="/salt/minion/minion_schedule_delete_complete",
+ wait=30,
+ )
+ if event_ret and event_ret["complete"]:
+ _schedule_ret = event_ret["schedule"]
+ if name not in _schedule_ret:
+ ret["result"] = True
+ ret["changes"][name] = "removed"
+ ret["comment"].append(
+ "Deleted job: {} from schedule.".format(name)
)
- )
- ret["result"] = True
+ else:
+ ret["comment"].append(
+ "Failed to delete job {} from schedule.".format(
+ name
+ )
+ )
+ ret["result"] = True
+
+ except KeyError:
+ # Effectively a no-op, since we can't really return without an event system
+ ret["comment"] = "Event module not available. Schedule add failed."
+ ret["result"] = True
+
+ # wait until the end to write file in offline mode
+ if kwargs.get("offline"):
+ schedule_conf = _get_schedule_config_file()
+
+ try:
+ with salt.utils.files.fopen(schedule_conf, "wb+") as fp_:
+ fp_.write(
+ salt.utils.stringutils.to_bytes(
+ salt.utils.yaml.safe_dump({"schedule": current_schedule})
+ )
+ )
+ except OSError:
+ log.error(
+ "Failed to persist the updated schedule",
+ exc_info_on_loglevel=logging.DEBUG,
+ )
- except KeyError:
- # Effectively a no-op, since we can't really return without an event system
- ret["comment"] = "Event module not available. Schedule add failed."
- ret["result"] = True
return ret
@@ -245,6 +315,10 @@ def delete(name, **kwargs):
.. code-block:: bash
salt '*' schedule.delete job1
+
+ # Delete job on Salt minion when the Salt minion is not running
+ salt '*' schedule.delete job1
+
"""
ret = {
@@ -260,45 +334,86 @@ def delete(name, **kwargs):
ret["comment"] = "Job: {} would be deleted from schedule.".format(name)
ret["result"] = True
else:
- persist = kwargs.get("persist", True)
+ if kwargs.get("offline"):
+ current_schedule = list_(
+ show_all=True,
+ where="opts",
+ return_yaml=False,
+ offline=kwargs.get("offline"),
+ )
- if name in list_(show_all=True, where="opts", return_yaml=False):
- event_data = {"name": name, "func": "delete", "persist": persist}
- elif name in list_(show_all=True, where="pillar", return_yaml=False):
- event_data = {
- "name": name,
- "where": "pillar",
- "func": "delete",
- "persist": False,
- }
- else:
- ret["comment"] = "Job {} does not exist.".format(name)
- return ret
+ del current_schedule[name]
- try:
- with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
- res = __salt__["event.fire"](event_data, "manage_schedule")
- if res:
- event_ret = event_bus.get_event(
- tag="/salt/minion/minion_schedule_delete_complete",
- wait=30,
+ schedule_conf = _get_schedule_config_file()
+
+ try:
+ with salt.utils.files.fopen(schedule_conf, "wb+") as fp_:
+ fp_.write(
+ salt.utils.stringutils.to_bytes(
+ salt.utils.yaml.safe_dump({"schedule": current_schedule})
+ )
)
- if event_ret and event_ret["complete"]:
- schedule = event_ret["schedule"]
- if name not in schedule:
- ret["result"] = True
- ret["comment"] = "Deleted Job {} from schedule.".format(
- name
- )
- ret["changes"][name] = "removed"
- else:
- ret[
- "comment"
- ] = "Failed to delete job {} from schedule.".format(name)
- return ret
- except KeyError:
- # Effectively a no-op, since we can't really return without an event system
- ret["comment"] = "Event module not available. Schedule add failed."
+ except OSError:
+ log.error(
+ "Failed to persist the updated schedule",
+ exc_info_on_loglevel=logging.DEBUG,
+ )
+
+ ret["result"] = True
+ ret["comment"] = "Deleted Job {} from schedule.".format(name)
+ ret["changes"][name] = "removed"
+ else:
+ persist = kwargs.get("persist", True)
+
+ if name in list_(
+ show_all=True,
+ where="opts",
+ return_yaml=False,
+ offline=kwargs.get("offline"),
+ ):
+ event_data = {"name": name, "func": "delete", "persist": persist}
+ elif name in list_(
+ show_all=True,
+ where="pillar",
+ return_yaml=False,
+ offline=kwargs.get("offline"),
+ ):
+ event_data = {
+ "name": name,
+ "where": "pillar",
+ "func": "delete",
+ "persist": False,
+ }
+ else:
+ ret["comment"] = "Job {} does not exist.".format(name)
+ return ret
+
+ try:
+ with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
+ res = __salt__["event.fire"](event_data, "manage_schedule")
+ if res:
+ event_ret = event_bus.get_event(
+ tag="/salt/minion/minion_schedule_delete_complete",
+ wait=30,
+ )
+ if event_ret and event_ret["complete"]:
+ schedule = event_ret["schedule"]
+ if name not in schedule:
+ ret["result"] = True
+ ret["comment"] = "Deleted Job {} from schedule.".format(
+ name
+ )
+ ret["changes"][name] = "removed"
+ else:
+ ret[
+ "comment"
+ ] = "Failed to delete job {} from schedule.".format(
+ name
+ )
+ return ret
+ except KeyError:
+ # Effectively a no-op, since we can't really return without an event system
+ ret["comment"] = "Event module not available. Schedule add failed."
return ret
@@ -438,6 +553,10 @@ def add(name, **kwargs):
salt '*' schedule.add job1 function='test.ping' seconds=3600
# If function have some arguments, use job_args
salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60
+
+ # Add job to Salt minion when the Salt minion is not running
+ salt '*' schedule.add job1 function='test.ping' seconds=3600 offline=True
+
"""
ret = {
@@ -445,8 +564,11 @@ def add(name, **kwargs):
"result": False,
"changes": {},
}
+ current_schedule = list_(
+ show_all=True, return_yaml=False, offline=kwargs.get("offline")
+ )
- if name in list_(show_all=True, return_yaml=False):
+ if name in current_schedule:
ret["comment"] = "Job {} already exists in schedule.".format(name)
ret["result"] = False
return ret
@@ -486,32 +608,56 @@ def add(name, **kwargs):
ret["comment"] = "Job: {} would be added to schedule.".format(name)
ret["result"] = True
else:
- try:
- with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
- res = __salt__["event.fire"](
- {
- "name": name,
- "schedule": schedule_data,
- "func": "add",
- "persist": persist,
- },
- "manage_schedule",
+ if kwargs.get("offline"):
+ current_schedule.update(schedule_data)
+
+ schedule_conf = _get_schedule_config_file()
+
+ try:
+ with salt.utils.files.fopen(schedule_conf, "wb+") as fp_:
+ fp_.write(
+ salt.utils.stringutils.to_bytes(
+ salt.utils.yaml.safe_dump({"schedule": current_schedule})
+ )
+ )
+ except OSError:
+ log.error(
+ "Failed to persist the updated schedule",
+ exc_info_on_loglevel=logging.DEBUG,
)
- if res:
- event_ret = event_bus.get_event(
- tag="/salt/minion/minion_schedule_add_complete",
- wait=30,
+
+ ret["result"] = True
+ ret["comment"] = "Added job: {} to schedule.".format(name)
+ ret["changes"][name] = "added"
+ else:
+ try:
+ with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
+ res = __salt__["event.fire"](
+ {
+ "name": name,
+ "schedule": schedule_data,
+ "func": "add",
+ "persist": persist,
+ },
+ "manage_schedule",
)
- if event_ret and event_ret["complete"]:
- schedule = event_ret["schedule"]
- if name in schedule:
- ret["result"] = True
- ret["comment"] = "Added job: {} to schedule.".format(name)
- ret["changes"][name] = "added"
- return ret
- except KeyError:
- # Effectively a no-op, since we can't really return without an event system
- ret["comment"] = "Event module not available. Schedule add failed."
+ if res:
+ event_ret = event_bus.get_event(
+ tag="/salt/minion/minion_schedule_add_complete",
+ wait=30,
+ )
+ if event_ret and event_ret["complete"]:
+ schedule = event_ret["schedule"]
+ if name in schedule:
+ ret["result"] = True
+ ret["comment"] = "Added job: {} to schedule.".format(
+ name
+ )
+ ret["changes"][name] = "added"
+ return ret
+ except KeyError:
+ # Effectively a no-op, since we can't really return without an event system
+ ret["comment"] = "Event module not available. Schedule add failed."
return ret
@@ -524,6 +670,10 @@ def modify(name, **kwargs):
.. code-block:: bash
salt '*' schedule.modify job1 function='test.ping' seconds=3600
+
+ # Modify job on Salt minion when the Salt minion is not running
+ salt '*' schedule.modify job1 function='test.ping' seconds=3600 offline=True
+
"""
ret = {"comment": "", "changes": {}, "result": True}
@@ -549,7 +699,9 @@ def modify(name, **kwargs):
ret["comment"] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
- current_schedule = list_(show_all=True, return_yaml=False)
+ current_schedule = list_(
+ show_all=True, return_yaml=False, offline=kwargs.get("offline")
+ )
if name not in current_schedule:
ret["comment"] = "Job {} does not exist in schedule.".format(name)
@@ -566,8 +718,7 @@ def modify(name, **kwargs):
_current["seconds"] = _current.pop("_seconds")
# Copy _current _new, then update values from kwargs
- _new = pycopy.deepcopy(_current)
- _new.update(kwargs)
+ _new = build_schedule_item(name, **kwargs)
# Remove test from kwargs, it's not a valid schedule option
_new.pop("test", None)
@@ -587,29 +738,51 @@ def modify(name, **kwargs):
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be modified in schedule.".format(name)
else:
- persist = kwargs.get("persist", True)
- if name in list_(show_all=True, where="opts", return_yaml=False):
- event_data = {
- "name": name,
- "schedule": _new,
- "func": "modify",
- "persist": persist,
- }
- elif name in list_(show_all=True, where="pillar", return_yaml=False):
- event_data = {
- "name": name,
- "schedule": _new,
- "where": "pillar",
- "func": "modify",
- "persist": False,
- }
+ if kwargs.get("offline"):
+ current_schedule[name].update(_new)
- out = __salt__["event.fire"](event_data, "manage_schedule")
- if out:
+ schedule_conf = _get_schedule_config_file()
+
+ try:
+ with salt.utils.files.fopen(schedule_conf, "wb+") as fp_:
+ fp_.write(
+ salt.utils.stringutils.to_bytes(
+ salt.utils.yaml.safe_dump({"schedule": current_schedule})
+ )
+ )
+ except OSError:
+ log.error(
+ "Failed to persist the updated schedule",
+ exc_info_on_loglevel=logging.DEBUG,
+ )
+
+ ret["result"] = True
ret["comment"] = "Modified job: {} in schedule.".format(name)
+
else:
- ret["comment"] = "Failed to modify job {} in schedule.".format(name)
- ret["result"] = False
+ persist = kwargs.get("persist", True)
+ if name in list_(show_all=True, where="opts", return_yaml=False):
+ event_data = {
+ "name": name,
+ "schedule": _new,
+ "func": "modify",
+ "persist": persist,
+ }
+ elif name in list_(show_all=True, where="pillar", return_yaml=False):
+ event_data = {
+ "name": name,
+ "schedule": _new,
+ "where": "pillar",
+ "func": "modify",
+ "persist": False,
+ }
+
+ out = __salt__["event.fire"](event_data, "manage_schedule")
+ if out:
+ ret["comment"] = "Modified job: {} in schedule.".format(name)
+ else:
+ ret["comment"] = "Failed to modify job {} in schedule.".format(name)
+ ret["result"] = False
return ret
diff --git a/tests/pytests/unit/modules/test_schedule.py b/tests/pytests/unit/modules/test_schedule.py
index e6cb134982..02914be82f 100644
--- a/tests/pytests/unit/modules/test_schedule.py
+++ b/tests/pytests/unit/modules/test_schedule.py
@@ -8,7 +8,8 @@ import pytest
import salt.modules.schedule as schedule
import salt.utils.odict
from salt.utils.event import SaltEvent
-from tests.support.mock import MagicMock, patch
+from salt.utils.odict import OrderedDict
+from tests.support.mock import MagicMock, call, mock_open, patch
log = logging.getLogger(__name__)
@@ -29,6 +30,11 @@ def sock_dir(tmp_path):
return str(tmp_path / "test-socks")
+@pytest.fixture
+def schedule_config_file(tmp_path):
+ return "/etc/salt/minion.d/_schedule.conf"
+
+
@pytest.fixture
def configure_loader_modules():
return {schedule: {}}
@@ -36,24 +42,56 @@ def configure_loader_modules():
# 'purge' function tests: 1
@pytest.mark.slow_test
-def test_purge(sock_dir):
+def test_purge(sock_dir, job1, schedule_config_file):
"""
Test if it purge all the jobs currently scheduled on the minion.
"""
+ _schedule_data = {"job1": job1}
with patch.dict(schedule.__opts__, {"schedule": {}, "sock_dir": sock_dir}):
mock = MagicMock(return_value=True)
with patch.dict(schedule.__salt__, {"event.fire": mock}):
_ret_value = {"complete": True, "schedule": {}}
with patch.object(SaltEvent, "get_event", return_value=_ret_value):
- assert schedule.purge() == {
- "comment": ["Deleted job: schedule from schedule."],
+ with patch.object(
+ schedule, "list_", MagicMock(return_value=_schedule_data)
+ ):
+ assert schedule.purge() == {
+ "comment": ["Deleted job: job1 from schedule."],
+ "changes": {"job1": "removed"},
+ "result": True,
+ }
+
+ _schedule_data = {"job1": job1, "job2": job1, "job3": job1}
+ comm = [
+ "Deleted job: job1 from schedule.",
+ "Deleted job: job2 from schedule.",
+ "Deleted job: job3 from schedule.",
+ ]
+
+ changes = {"job1": "removed", "job2": "removed", "job3": "removed"}
+
+ with patch.dict(
+ schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir}
+ ):
+ with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock:
+ with patch.object(
+ schedule, "list_", MagicMock(return_value=_schedule_data)
+ ):
+ assert schedule.purge(offline=True) == {
+ "comment": comm,
+ "changes": changes,
"result": True,
}
+ _call = call(b"schedule: {}\n")
+ write_calls = fopen_mock.filehandles[schedule_config_file][
+ 0
+ ].write._mock_mock_calls
+ assert _call in write_calls
# 'delete' function tests: 1
@pytest.mark.slow_test
-def test_delete(sock_dir):
+def test_delete(sock_dir, job1, schedule_config_file):
"""
Test if it delete a job from the minion's schedule.
"""
@@ -68,6 +106,28 @@ def test_delete(sock_dir):
"result": False,
}
+ _schedule_data = {"job1": job1}
+ comm = "Deleted Job job1 from schedule."
+ changes = {"job1": "removed"}
+ with patch.dict(
+ schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir}
+ ):
+ with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock:
+ with patch.object(
+ schedule, "list_", MagicMock(return_value=_schedule_data)
+ ):
+ assert schedule.delete("job1", offline="True") == {
+ "comment": comm,
+ "changes": changes,
+ "result": True,
+ }
+
+ _call = call(b"schedule: {}\n")
+ write_calls = fopen_mock.filehandles[schedule_config_file][
+ 0
+ ].write._mock_mock_calls
+ assert _call in write_calls
+
# 'build_schedule_item' function tests: 1
def test_build_schedule_item(sock_dir):
@@ -120,7 +180,7 @@ def test_build_schedule_item_invalid_when(sock_dir):
@pytest.mark.slow_test
-def test_add(sock_dir):
+def test_add(sock_dir, schedule_config_file):
"""
Test if it add a job to the schedule.
"""
@@ -163,6 +223,24 @@ def test_add(sock_dir):
"result": True,
}
+ comm1 = "Added job: job3 to schedule."
+ changes1 = {"job3": "added"}
+ with patch.dict(
+ schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir}
+ ):
+ with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock:
+ assert schedule.add(
+ "job3", function="test.ping", seconds=3600, offline="True"
+ ) == {"comment": comm1, "changes": changes1, "result": True}
+
+ _call = call(
+ b"schedule:\n job3: {function: test.ping, seconds: 3600, maxrunning: 1, name: job3, enabled: true,\n jid_include: true}\n"
+ )
+ write_calls = fopen_mock.filehandles[schedule_config_file][
+ 1
+ ].write._mock_mock_calls
+ assert _call in write_calls
+
# 'run_job' function tests: 1
@@ -444,7 +522,7 @@ def test_copy(sock_dir, job1):
@pytest.mark.slow_test
-def test_modify(sock_dir):
+def test_modify(sock_dir, job1, schedule_config_file):
"""
Test if modifying job to the schedule.
"""
@@ -564,7 +642,6 @@ def test_modify(sock_dir):
for key in [
"maxrunning",
"function",
- "seconds",
"jid_include",
"name",
"enabled",
@@ -586,6 +663,51 @@ def test_modify(sock_dir):
ret = schedule.modify("job2", function="test.version", test=True)
assert ret == expected5
+ _schedule_data = {"job1": job1}
+ comm = "Modified job: job1 in schedule."
+ changes = {"job1": "removed"}
+
+ changes = {
+ "job1": {
+ "new": OrderedDict(
+ [
+ ("function", "test.version"),
+ ("maxrunning", 1),
+ ("name", "job1"),
+ ("enabled", True),
+ ("jid_include", True),
+ ]
+ ),
+ "old": OrderedDict(
+ [
+ ("function", "test.ping"),
+ ("maxrunning", 1),
+ ("name", "job1"),
+ ("jid_include", True),
+ ("enabled", True),
+ ]
+ ),
+ }
+ }
+ with patch.dict(
+ schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir}
+ ):
+ with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock:
+ with patch.object(
+ schedule, "list_", MagicMock(return_value=_schedule_data)
+ ):
+ assert schedule.modify(
+ "job1", function="test.version", offline="True"
+ ) == {"comment": comm, "changes": changes, "result": True}
+
+ _call = call(
+ b"schedule:\n job1: {enabled: true, function: test.version, jid_include: true, maxrunning: 1,\n name: job1}\n"
+ )
+ write_calls = fopen_mock.filehandles[schedule_config_file][
+ 0
+ ].write._mock_mock_calls
+ assert _call in write_calls
+
# 'is_enabled' function tests: 1
--
2.37.2

View File

@ -0,0 +1,106 @@
From 6c1c81aba71711632a14b725426077f9183065e9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
<psuarezhernandez@suse.com>
Date: Thu, 6 Oct 2022 10:55:50 +0100
Subject: [PATCH] fopen: Workaround bad buffering for binary mode
(#563)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
A lot of code assumes Python 2.x behavior for buffering, in which 1 is a
special value meaning line buffered.
Python 3 makes this value unusable, so fallback to the default buffering
size, and report these calls to be fixed.
Fixes: https://github.com/saltstack/salt/issues/57584
Do not drop buffering from kwargs to avoid errors
Add unit test around linebuffering in binary mode
Add changelog file
Co-authored-by: Pablo Suárez Hernández <psuarezhernandez@suse.com>
Co-authored-by: Ismael Luceno <iluceno@suse.de>
---
changelog/62817.fixed | 1 +
salt/utils/files.py | 8 ++++++++
tests/pytests/unit/utils/test_files.py | 13 ++++++++++++-
3 files changed, 21 insertions(+), 1 deletion(-)
create mode 100644 changelog/62817.fixed
diff --git a/changelog/62817.fixed b/changelog/62817.fixed
new file mode 100644
index 0000000000..ff335f2916
--- /dev/null
+++ b/changelog/62817.fixed
@@ -0,0 +1 @@
+Prevent annoying RuntimeWarning message about line buffering (buffering=1) not being supported in binary mode
diff --git a/salt/utils/files.py b/salt/utils/files.py
index 1cf636a753..3c57cce713 100644
--- a/salt/utils/files.py
+++ b/salt/utils/files.py
@@ -6,6 +6,7 @@ Functions for working with files
import codecs
import contextlib
import errno
+import io
import logging
import os
import re
@@ -382,6 +383,13 @@ def fopen(*args, **kwargs):
if not binary and not kwargs.get("newline", None):
kwargs["newline"] = ""
+ # Workaround callers with bad buffering setting for binary files
+ if kwargs.get("buffering") == 1 and "b" in kwargs.get("mode", ""):
+ log.debug(
+ "Line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used"
+ )
+ kwargs["buffering"] = io.DEFAULT_BUFFER_SIZE
+
f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage
if is_fcntl_available():
diff --git a/tests/pytests/unit/utils/test_files.py b/tests/pytests/unit/utils/test_files.py
index fd88167b16..bd18bc5750 100644
--- a/tests/pytests/unit/utils/test_files.py
+++ b/tests/pytests/unit/utils/test_files.py
@@ -4,11 +4,12 @@ Unit Tests for functions located in salt/utils/files.py
import copy
+import io
import os
import pytest
import salt.utils.files
-from tests.support.mock import patch
+from tests.support.mock import MagicMock, patch
def test_safe_rm():
@@ -74,6 +75,16 @@ def test_fopen_with_disallowed_fds():
)
+def test_fopen_binary_line_buffering(tmp_path):
+ tmp_file = os.path.join(tmp_path, "foobar")
+ with patch("builtins.open") as open_mock, patch(
+ "salt.utils.files.is_fcntl_available", MagicMock(return_value=False)
+ ):
+ salt.utils.files.fopen(os.path.join(tmp_path, "foobar"), mode="b", buffering=1)
+ assert open_mock.called
+ assert open_mock.call_args[1]["buffering"] == io.DEFAULT_BUFFER_SIZE
+
+
def _create_temp_structure(temp_directory, structure):
for folder, files in structure.items():
current_directory = os.path.join(temp_directory, folder)
--
2.37.3

View File

@ -0,0 +1,213 @@
From b4945a0608b3d8996e8b5593dcc458c15b11d6ba Mon Sep 17 00:00:00 2001
From: Victor Zhestkov <Victor.Zhestkov@suse.com>
Date: Wed, 14 Sep 2022 14:57:29 +0300
Subject: [PATCH] Ignore non utf8 characters while reading files with
core grains module (bsc#1202165)
* Ignore UnicodeDecodeError on reading files with core grains
* Add tests for non utf8 chars in cmdline
* Blacken modified lines
* Fix the tests
* Add changelog entry
* Change ignore to surrogateescape for kernelparameters
* Turn static test files to dynamic
---
changelog/62633.fixed | 1 +
salt/grains/core.py | 12 ++-
tests/pytests/unit/grains/test_core.py | 118 +++++++++++++++++++++++++
3 files changed, 128 insertions(+), 3 deletions(-)
create mode 100644 changelog/62633.fixed
diff --git a/changelog/62633.fixed b/changelog/62633.fixed
new file mode 100644
index 0000000000..1ab74f9122
--- /dev/null
+++ b/changelog/62633.fixed
@@ -0,0 +1 @@
+Prevent possible tracebacks in core grains module by ignoring non utf8 characters in /proc/1/environ, /proc/1/cmdline, /proc/cmdline
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 9530a43fc5..b543144da2 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1093,7 +1093,9 @@ def _virtual(osdata):
if ("virtual_subtype" not in grains) or (grains["virtual_subtype"] != "LXC"):
if os.path.isfile("/proc/1/environ"):
try:
- with salt.utils.files.fopen("/proc/1/environ", "r") as fhr:
+ with salt.utils.files.fopen(
+ "/proc/1/environ", "r", errors="ignore"
+ ) as fhr:
fhr_contents = fhr.read()
if "container=lxc" in fhr_contents:
grains["virtual"] = "container"
@@ -1911,7 +1913,9 @@ def os_data():
grains["init"] = "systemd"
except OSError:
try:
- with salt.utils.files.fopen("/proc/1/cmdline") as fhr:
+ with salt.utils.files.fopen(
+ "/proc/1/cmdline", "r", errors="ignore"
+ ) as fhr:
init_cmdline = fhr.read().replace("\x00", " ").split()
except OSError:
pass
@@ -3160,7 +3164,9 @@ def kernelparams():
return {}
else:
try:
- with salt.utils.files.fopen("/proc/cmdline", "r") as fhr:
+ with salt.utils.files.fopen(
+ "/proc/cmdline", "r", errors="surrogateescape"
+ ) as fhr:
cmdline = fhr.read()
grains = {"kernelparams": []}
for data in [
diff --git a/tests/pytests/unit/grains/test_core.py b/tests/pytests/unit/grains/test_core.py
index 84dd97d62f..e640a07f76 100644
--- a/tests/pytests/unit/grains/test_core.py
+++ b/tests/pytests/unit/grains/test_core.py
@@ -11,6 +11,7 @@ import os
import pathlib
import platform
import socket
+import tempfile
import textwrap
from collections import namedtuple
@@ -2635,6 +2636,38 @@ def test_kernelparams_return_linux(cmdline, expectation):
assert core.kernelparams() == expectation
+@pytest.mark.skip_unless_on_linux
+def test_kernelparams_return_linux_non_utf8():
+ _salt_utils_files_fopen = salt.utils.files.fopen
+
+ expected = {
+ "kernelparams": [
+ ("TEST_KEY1", "VAL1"),
+ ("TEST_KEY2", "VAL2"),
+ ("BOOTABLE_FLAG", "\udc80"),
+ ("TEST_KEY_NOVAL", None),
+ ("TEST_KEY3", "3"),
+ ]
+ }
+
+ with tempfile.TemporaryDirectory() as tempdir:
+
+ def _open_mock(file_name, *args, **kwargs):
+ return _salt_utils_files_fopen(
+ os.path.join(tempdir, "cmdline"), *args, **kwargs
+ )
+
+ with salt.utils.files.fopen(
+ os.path.join(tempdir, "cmdline"),
+ "wb",
+ ) as cmdline_fh, patch("salt.utils.files.fopen", _open_mock):
+ cmdline_fh.write(
+ b'TEST_KEY1=VAL1 TEST_KEY2=VAL2 BOOTABLE_FLAG="\x80" TEST_KEY_NOVAL TEST_KEY3=3\n'
+ )
+ cmdline_fh.close()
+ assert core.kernelparams() == expected
+
+
def test_linux_gpus():
"""
Test GPU detection on Linux systems
@@ -2837,3 +2870,88 @@ def test_virtual_set_virtual_ec2():
assert virtual_grains["virtual"] == "kvm"
assert "virtual_subtype" not in virtual_grains
+
+
+@pytest.mark.skip_on_windows
+def test_linux_proc_files_with_non_utf8_chars():
+ _salt_utils_files_fopen = salt.utils.files.fopen
+
+ empty_mock = MagicMock(return_value={})
+
+ with tempfile.TemporaryDirectory() as tempdir:
+
+ def _mock_open(filename, *args, **kwargs):
+ return _salt_utils_files_fopen(
+ os.path.join(tempdir, "cmdline-1"), *args, **kwargs
+ )
+
+ with salt.utils.files.fopen(
+ os.path.join(tempdir, "cmdline-1"),
+ "wb",
+ ) as cmdline_fh, patch("os.path.isfile", return_value=False), patch(
+ "salt.utils.files.fopen", _mock_open
+ ), patch.dict(
+ core.__salt__,
+ {
+ "cmd.retcode": salt.modules.cmdmod.retcode,
+ "cmd.run": MagicMock(return_value=""),
+ },
+ ), patch.object(
+ core, "_linux_bin_exists", return_value=False
+ ), patch.object(
+ core, "_parse_lsb_release", return_value=empty_mock
+ ), patch.object(
+ core, "_parse_os_release", return_value=empty_mock
+ ), patch.object(
+ core, "_hw_data", return_value=empty_mock
+ ), patch.object(
+ core, "_virtual", return_value=empty_mock
+ ), patch.object(
+ core, "_bsd_cpudata", return_value=empty_mock
+ ), patch.object(
+ os, "stat", side_effect=OSError()
+ ):
+ cmdline_fh.write(
+ b"/usr/lib/systemd/systemd\x00--switched-root\x00--system\x00--deserialize\x0028\x80\x00"
+ )
+ cmdline_fh.close()
+ os_grains = core.os_data()
+ assert os_grains != {}
+
+
+@pytest.mark.skip_on_windows
+def test_virtual_linux_proc_files_with_non_utf8_chars():
+ _salt_utils_files_fopen = salt.utils.files.fopen
+
+ def _is_file_mock(filename):
+ if filename == "/proc/1/environ":
+ return True
+ return False
+
+ with tempfile.TemporaryDirectory() as tempdir:
+
+ def _mock_open(filename, *args, **kwargs):
+ return _salt_utils_files_fopen(
+ os.path.join(tempdir, "environ"), *args, **kwargs
+ )
+
+ with salt.utils.files.fopen(
+ os.path.join(tempdir, "environ"),
+ "wb",
+ ) as environ_fh, patch("os.path.isfile", _is_file_mock), patch(
+ "salt.utils.files.fopen", _mock_open
+ ), patch.object(
+ salt.utils.path, "which", MagicMock(return_value=None)
+ ), patch.dict(
+ core.__salt__,
+ {
+ "cmd.run_all": MagicMock(
+ return_value={"retcode": 1, "stderr": "", "stdout": ""}
+ ),
+ "cmd.run": MagicMock(return_value=""),
+ },
+ ):
+ environ_fh.write(b"KEY1=VAL1 KEY2=VAL2\x80 KEY2=VAL2")
+ environ_fh.close()
+ virt_grains = core._virtual({"kernel": "Linux"})
+ assert virt_grains == {"virtual": "physical"}
--
2.37.3

View File

@ -0,0 +1,414 @@
From 7b4f5007b7e6a35386d197afe53d02c8d7b41d53 Mon Sep 17 00:00:00 2001
From: Daniel Mach <daniel.mach@gmail.com>
Date: Thu, 6 Oct 2022 11:58:23 +0200
Subject: [PATCH] Make pass renderer configurable & other fixes (#532)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* pass: Use a secure way of handling pass arguments
The original code would fail on pass paths with spaces,
because they would be split into multiple arguments.
* pass: Strip only trailing newline characters from the secret
* pass: Do not modify $HOME env globally
Just set $HOME for calling the pass binary
to avoid affecting anything outside the pass renderer.
* pass: Use pass executable path from _get_pass_exec()
* Make the pass renderer more configurable
1. Allow us to make the pass renderer fail during pillar rendering
when a secret corresponding with a pass path cannot be fetched.
For this we add a master config variable pass_strict_fetch.
2. Allow to have prefix for variables that should be processed
with the pass renderer.
For this we add a master config variable pass_variable_prefix.
3. Allow us to configure pass' GNUPGHOME and PASSWORD_STORE_DIR
environmental variables.
For this we add master config variables pass_gnupghome and pass_dir.
* Add tests for the pass renderer
* pass: Handle FileNotFoundError when pass binary is not available
Co-authored-by: Marcus Rückert <darix@nordisch.org>
---
changelog/62120.added | 4 +
changelog/62120.fixed | 4 +
salt/config/__init__.py | 12 ++
salt/renderers/pass.py | 104 ++++++++++++--
tests/pytests/unit/renderers/test_pass.py | 164 ++++++++++++++++++++++
5 files changed, 274 insertions(+), 14 deletions(-)
create mode 100644 changelog/62120.added
create mode 100644 changelog/62120.fixed
create mode 100644 tests/pytests/unit/renderers/test_pass.py
diff --git a/changelog/62120.added b/changelog/62120.added
new file mode 100644
index 0000000000..4303d124f0
--- /dev/null
+++ b/changelog/62120.added
@@ -0,0 +1,4 @@
+Config option pass_variable_prefix allows to distinguish variables that contain paths to pass secrets.
+Config option pass_strict_fetch allows to error out when a secret cannot be fetched from pass.
+Config option pass_dir allows setting the PASSWORD_STORE_DIR env for pass.
+Config option pass_gnupghome allows setting the $GNUPGHOME env for pass.
diff --git a/changelog/62120.fixed b/changelog/62120.fixed
new file mode 100644
index 0000000000..22a9711383
--- /dev/null
+++ b/changelog/62120.fixed
@@ -0,0 +1,4 @@
+Pass executable path from _get_path_exec() is used when calling the program.
+The $HOME env is no longer modified globally.
+Only trailing newlines are stripped from the fetched secret.
+Pass process arguments are handled in a secure way.
diff --git a/salt/config/__init__.py b/salt/config/__init__.py
index 2c42290598..9e72a5b4b7 100644
--- a/salt/config/__init__.py
+++ b/salt/config/__init__.py
@@ -960,6 +960,14 @@ VALID_OPTS = immutabletypes.freeze(
# Use Adler32 hashing algorithm for server_id (default False until Sodium, "adler32" after)
# Possible values are: False, adler32, crc32
"server_id_use_crc": (bool, str),
+ # pass renderer: Fetch secrets only for the template variables matching the prefix
+ "pass_variable_prefix": str,
+ # pass renderer: Whether to error out when unable to fetch a secret
+ "pass_strict_fetch": bool,
+ # pass renderer: Set GNUPGHOME env for Pass
+ "pass_gnupghome": str,
+ # pass renderer: Set PASSWORD_STORE_DIR env for Pass
+ "pass_dir": str,
}
)
@@ -1601,6 +1609,10 @@ DEFAULT_MASTER_OPTS = immutabletypes.freeze(
"fips_mode": False,
"detect_remote_minions": False,
"remote_minions_port": 22,
+ "pass_variable_prefix": "",
+ "pass_strict_fetch": False,
+ "pass_gnupghome": "",
+ "pass_dir": "",
}
)
diff --git a/salt/renderers/pass.py b/salt/renderers/pass.py
index 71b1021b96..ba0f152c23 100644
--- a/salt/renderers/pass.py
+++ b/salt/renderers/pass.py
@@ -45,6 +45,34 @@ Install pass binary
pass:
pkg.installed
+
+Salt master configuration options
+
+.. code-block:: yaml
+
+ # If the prefix is *not* set (default behavior), all template variables are
+ # considered for fetching secrets from Pass. Those that cannot be resolved
+ # to a secret are passed through.
+ #
+ # If the prefix is set, only the template variables with matching prefix are
+ # considered for fetching the secrets, other variables are passed through.
+ #
+ # For ease of use it is recommended to set the following options as well:
+ # renderer: 'jinja|yaml|pass'
+ # pass_strict_fetch: true
+ #
+ pass_variable_prefix: 'pass:'
+
+ # If set to 'true', error out when unable to fetch a secret for a template variable.
+ pass_strict_fetch: true
+
+ # Set GNUPGHOME env for Pass.
+ # Defaults to: ~/.gnupg
+ pass_gnupghome: <path>
+
+ # Set PASSWORD_STORE_DIR env for Pass.
+ # Defaults to: ~/.password-store
+ pass_dir: <path>
"""
@@ -54,7 +82,7 @@ from os.path import expanduser
from subprocess import PIPE, Popen
import salt.utils.path
-from salt.exceptions import SaltRenderError
+from salt.exceptions import SaltConfigurationError, SaltRenderError
log = logging.getLogger(__name__)
@@ -75,18 +103,71 @@ def _fetch_secret(pass_path):
Fetch secret from pass based on pass_path. If there is
any error, return back the original pass_path value
"""
- cmd = "pass show {}".format(pass_path.strip())
- log.debug("Fetching secret: %s", cmd)
+ pass_exec = _get_pass_exec()
+
+ # Make a backup in case we want to return the original value without stripped whitespaces
+ original_pass_path = pass_path
+
+ # Remove the optional prefix from pass path
+ pass_prefix = __opts__["pass_variable_prefix"]
+ if pass_prefix:
+ # If we do not see our prefix we do not want to process this variable
+ # and we return the unmodified pass path
+ if not pass_path.startswith(pass_prefix):
+ return pass_path
+
+ # strip the prefix from the start of the string
+ pass_path = pass_path[len(pass_prefix) :]
+
+ # The pass_strict_fetch option must be used with pass_variable_prefix
+ pass_strict_fetch = __opts__["pass_strict_fetch"]
+ if pass_strict_fetch and not pass_prefix:
+ msg = "The 'pass_strict_fetch' option requires 'pass_variable_prefix' option enabled"
+ raise SaltConfigurationError(msg)
+
+ # Remove whitespaces from the pass_path
+ pass_path = pass_path.strip()
- proc = Popen(cmd.split(" "), stdout=PIPE, stderr=PIPE)
- pass_data, pass_error = proc.communicate()
+ cmd = [pass_exec, "show", pass_path]
+ log.debug("Fetching secret: %s", " ".join(cmd))
+
+ # Make sure environment variable HOME is set, since Pass looks for the
+ # password-store under ~/.password-store.
+ env = os.environ.copy()
+ env["HOME"] = expanduser("~")
+
+ pass_dir = __opts__["pass_dir"]
+ if pass_dir:
+ env["PASSWORD_STORE_DIR"] = pass_dir
+
+ pass_gnupghome = __opts__["pass_gnupghome"]
+ if pass_gnupghome:
+ env["GNUPGHOME"] = pass_gnupghome
+
+ try:
+ proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env)
+ pass_data, pass_error = proc.communicate()
+ pass_returncode = proc.returncode
+ except OSError as e:
+ pass_data, pass_error = "", str(e)
+ pass_returncode = 1
# The version of pass used during development sent output to
# stdout instead of stderr even though its returncode was non zero.
- if proc.returncode or not pass_data:
- log.warning("Could not fetch secret: %s %s", pass_data, pass_error)
- pass_data = pass_path
- return pass_data.strip()
+ if pass_returncode or not pass_data:
+ try:
+ pass_error = pass_error.decode("utf-8")
+ except (AttributeError, ValueError):
+ pass
+ msg = "Could not fetch secret '{}' from the password store: {}".format(
+ pass_path, pass_error
+ )
+ if pass_strict_fetch:
+ raise SaltRenderError(msg)
+ else:
+ log.warning(msg)
+ return original_pass_path
+ return pass_data.rstrip("\r\n")
def _decrypt_object(obj):
@@ -108,9 +189,4 @@ def render(pass_info, saltenv="base", sls="", argline="", **kwargs):
"""
Fetch secret from pass based on pass_path
"""
- _get_pass_exec()
-
- # Make sure environment variable HOME is set, since Pass looks for the
- # password-store under ~/.password-store.
- os.environ["HOME"] = expanduser("~")
return _decrypt_object(pass_info)
diff --git a/tests/pytests/unit/renderers/test_pass.py b/tests/pytests/unit/renderers/test_pass.py
new file mode 100644
index 0000000000..74e822c7ec
--- /dev/null
+++ b/tests/pytests/unit/renderers/test_pass.py
@@ -0,0 +1,164 @@
+import importlib
+
+import pytest
+
+import salt.config
+import salt.exceptions
+from tests.support.mock import MagicMock, patch
+
+# "pass" is a reserved keyword, we need to import it differently
+pass_ = importlib.import_module("salt.renderers.pass")
+
+
+@pytest.fixture
+def configure_loader_modules():
+ return {
+ pass_: {
+ "__opts__": salt.config.DEFAULT_MASTER_OPTS.copy(),
+ "_get_pass_exec": MagicMock(return_value="/usr/bin/pass"),
+ }
+ }
+
+
+# The default behavior is that if fetching a secret from pass fails,
+# the value is passed through. Even the trailing newlines are preserved.
+def test_passthrough():
+ pass_path = "secret\n"
+ expected = pass_path
+ result = pass_.render(pass_path)
+
+ assert result == expected
+
+
+# Fetch a secret in the strict mode.
+def test_strict_fetch():
+ config = {
+ "pass_variable_prefix": "pass:",
+ "pass_strict_fetch": True,
+ }
+
+ popen_mock = MagicMock(spec=pass_.Popen)
+ popen_mock.return_value.communicate.return_value = ("password123456\n", "")
+ popen_mock.return_value.returncode = 0
+
+ mocks = {
+ "Popen": popen_mock,
+ }
+
+ pass_path = "pass:secret"
+ expected = "password123456"
+ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
+ result = pass_.render(pass_path)
+
+ assert result == expected
+
+
+# Fail to fetch a secret in the strict mode.
+def test_strict_fetch_fail():
+ config = {
+ "pass_variable_prefix": "pass:",
+ "pass_strict_fetch": True,
+ }
+
+ popen_mock = MagicMock(spec=pass_.Popen)
+ popen_mock.return_value.communicate.return_value = ("", "Secret not found")
+ popen_mock.return_value.returncode = 1
+
+ mocks = {
+ "Popen": popen_mock,
+ }
+
+ pass_path = "pass:secret"
+ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
+ with pytest.raises(salt.exceptions.SaltRenderError):
+ pass_.render(pass_path)
+
+
+# Passthrough a value that doesn't have a pass prefix.
+def test_strict_fetch_passthrough():
+ config = {
+ "pass_variable_prefix": "pass:",
+ "pass_strict_fetch": True,
+ }
+
+ pass_path = "variable-without-pass-prefix\n"
+ expected = pass_path
+ with patch.dict(pass_.__opts__, config):
+ result = pass_.render(pass_path)
+
+ assert result == expected
+
+
+# Fetch a secret in the strict mode. The pass path contains spaces.
+def test_strict_fetch_pass_path_with_spaces():
+ config = {
+ "pass_variable_prefix": "pass:",
+ "pass_strict_fetch": True,
+ }
+
+ popen_mock = MagicMock(spec=pass_.Popen)
+ popen_mock.return_value.communicate.return_value = ("password123456\n", "")
+ popen_mock.return_value.returncode = 0
+
+ mocks = {
+ "Popen": popen_mock,
+ }
+
+ pass_path = "pass:se cr et"
+ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
+ pass_.render(pass_path)
+
+ call_args, call_kwargs = popen_mock.call_args_list[0]
+ assert call_args[0] == ["/usr/bin/pass", "show", "se cr et"]
+
+
+# Fetch a secret in the strict mode. The secret contains leading and trailing whitespaces.
+def test_strict_fetch_secret_with_whitespaces():
+ config = {
+ "pass_variable_prefix": "pass:",
+ "pass_strict_fetch": True,
+ }
+
+ popen_mock = MagicMock(spec=pass_.Popen)
+ popen_mock.return_value.communicate.return_value = (" \tpassword123456\t \r\n", "")
+ popen_mock.return_value.returncode = 0
+
+ mocks = {
+ "Popen": popen_mock,
+ }
+
+ pass_path = "pass:secret"
+ expected = " \tpassword123456\t " # only the trailing newlines get striped
+ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
+ result = pass_.render(pass_path)
+
+ assert result == expected
+
+
+# Test setting env variables based on config values:
+# - pass_gnupghome -> GNUPGHOME
+# - pass_dir -> PASSWORD_STORE_DIR
+def test_env():
+ config = {
+ "pass_variable_prefix": "pass:",
+ "pass_strict_fetch": True,
+ "pass_gnupghome": "/path/to/gnupghome",
+ "pass_dir": "/path/to/secretstore",
+ }
+
+ popen_mock = MagicMock(spec=pass_.Popen)
+ popen_mock.return_value.communicate.return_value = ("password123456\n", "")
+ popen_mock.return_value.returncode = 0
+
+ mocks = {
+ "Popen": popen_mock,
+ }
+
+ pass_path = "pass:secret"
+ expected = " \tpassword123456\t " # only the trailing newlines get striped
+ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
+ result = pass_.render(pass_path)
+
+ call_args, call_kwargs = popen_mock.call_args_list[0]
+ assert call_kwargs["env"]["GNUPGHOME"] == config["pass_gnupghome"]
+ assert call_kwargs["env"]["PASSWORD_STORE_DIR"] == config["pass_dir"]
--
2.37.3

View File

@ -0,0 +1,297 @@
From cedde1082b3a11b941327ba8e213f44637fb8a6b Mon Sep 17 00:00:00 2001
From: Witek Bedyk <witold.bedyk@suse.com>
Date: Mon, 29 Aug 2022 14:16:00 +0200
Subject: [PATCH] Retry if RPM lock is temporarily unavailable (#547)
* Retry if RPM lock is temporarily unavailable
Backported from saltstack/salt#62204
Signed-off-by: Witek Bedyk <witold.bedyk@suse.com>
* Sync formating fixes from upstream
Signed-off-by: Witek Bedyk <witold.bedyk@suse.com>
---
changelog/62204.fixed | 1 +
salt/modules/zypperpkg.py | 117 +++++++++++++++++----------
tests/unit/modules/test_zypperpkg.py | 45 ++++++++++-
3 files changed, 115 insertions(+), 48 deletions(-)
create mode 100644 changelog/62204.fixed
diff --git a/changelog/62204.fixed b/changelog/62204.fixed
new file mode 100644
index 0000000000..59f1914593
--- /dev/null
+++ b/changelog/62204.fixed
@@ -0,0 +1 @@
+Fixed Zypper module failing on RPM lock file being temporarily unavailable.
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
index b622105e15..7a249486fb 100644
--- a/salt/modules/zypperpkg.py
+++ b/salt/modules/zypperpkg.py
@@ -14,6 +14,7 @@ Package support for openSUSE via the zypper package manager
import configparser
import datetime
+import errno
import fnmatch
import logging
import os
@@ -39,6 +40,9 @@ from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationEr
# pylint: disable=import-error,redefined-builtin,no-name-in-module
from salt.utils.versions import LooseVersion
+if salt.utils.files.is_fcntl_available():
+ import fcntl
+
log = logging.getLogger(__name__)
HAS_ZYPP = False
@@ -106,6 +110,7 @@ class _Zypper:
XML_DIRECTIVES = ["-x", "--xmlout"]
# ZYPPER_LOCK is not affected by --root
ZYPPER_LOCK = "/var/run/zypp.pid"
+ RPM_LOCK = "/var/lib/rpm/.rpm.lock"
TAG_RELEASED = "zypper/released"
TAG_BLOCKED = "zypper/blocked"
@@ -276,7 +281,7 @@ class _Zypper:
and self.exit_code not in self.WARNING_EXIT_CODES
)
- def _is_lock(self):
+ def _is_zypper_lock(self):
"""
Is this is a lock error code?
@@ -284,6 +289,23 @@ class _Zypper:
"""
return self.exit_code == self.LOCK_EXIT_CODE
+ def _is_rpm_lock(self):
+ """
+ Is this an RPM lock error?
+ """
+ if salt.utils.files.is_fcntl_available():
+ if self.exit_code > 0 and os.path.exists(self.RPM_LOCK):
+ with salt.utils.files.fopen(self.RPM_LOCK, mode="w+") as rfh:
+ try:
+ fcntl.lockf(rfh, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except OSError as err:
+ if err.errno == errno.EAGAIN:
+ return True
+ else:
+ fcntl.lockf(rfh, fcntl.LOCK_UN)
+
+ return False
+
def _is_xml_mode(self):
"""
Is Zypper's output is in XML format?
@@ -306,7 +328,7 @@ class _Zypper:
raise CommandExecutionError("No output result from Zypper?")
self.exit_code = self.__call_result["retcode"]
- if self._is_lock():
+ if self._is_zypper_lock() or self._is_rpm_lock():
return False
if self._is_error():
@@ -387,48 +409,11 @@ class _Zypper:
if self._check_result():
break
- if os.path.exists(self.ZYPPER_LOCK):
- try:
- with salt.utils.files.fopen(self.ZYPPER_LOCK) as rfh:
- data = __salt__["ps.proc_info"](
- int(rfh.readline()),
- attrs=["pid", "name", "cmdline", "create_time"],
- )
- data["cmdline"] = " ".join(data["cmdline"])
- data["info"] = "Blocking process created at {}.".format(
- datetime.datetime.utcfromtimestamp(
- data["create_time"]
- ).isoformat()
- )
- data["success"] = True
- except Exception as err: # pylint: disable=broad-except
- data = {
- "info": (
- "Unable to retrieve information about blocking process: {}".format(
- err.message
- )
- ),
- "success": False,
- }
- else:
- data = {
- "info": "Zypper is locked, but no Zypper lock has been found.",
- "success": False,
- }
-
- if not data["success"]:
- log.debug("Unable to collect data about blocking process.")
- else:
- log.debug("Collected data about blocking process.")
-
- __salt__["event.fire_master"](data, self.TAG_BLOCKED)
- log.debug(
- "Fired a Zypper blocked event to the master with the data: %s", data
- )
- log.debug("Waiting 5 seconds for Zypper gets released...")
- time.sleep(5)
- if not was_blocked:
- was_blocked = True
+ if self._is_zypper_lock():
+ self._handle_zypper_lock_file()
+ if self._is_rpm_lock():
+ self._handle_rpm_lock_file()
+ was_blocked = True
if was_blocked:
__salt__["event.fire_master"](
@@ -451,6 +436,50 @@ class _Zypper:
or self.__call_result["stdout"]
)
+ def _handle_zypper_lock_file(self):
+ if os.path.exists(self.ZYPPER_LOCK):
+ try:
+ with salt.utils.files.fopen(self.ZYPPER_LOCK) as rfh:
+ data = __salt__["ps.proc_info"](
+ int(rfh.readline()),
+ attrs=["pid", "name", "cmdline", "create_time"],
+ )
+ data["cmdline"] = " ".join(data["cmdline"])
+ data["info"] = "Blocking process created at {}.".format(
+ datetime.datetime.utcfromtimestamp(
+ data["create_time"]
+ ).isoformat()
+ )
+ data["success"] = True
+ except Exception as err: # pylint: disable=broad-except
+ data = {
+ "info": (
+ "Unable to retrieve information about "
+ "blocking process: {}".format(err)
+ ),
+ "success": False,
+ }
+ else:
+ data = {
+ "info": "Zypper is locked, but no Zypper lock has been found.",
+ "success": False,
+ }
+ if not data["success"]:
+ log.debug("Unable to collect data about blocking process.")
+ else:
+ log.debug("Collected data about blocking process.")
+ __salt__["event.fire_master"](data, self.TAG_BLOCKED)
+ log.debug("Fired a Zypper blocked event to the master with the data: %s", data)
+ log.debug("Waiting 5 seconds for Zypper gets released...")
+ time.sleep(5)
+
+ def _handle_rpm_lock_file(self):
+ data = {"info": "RPM is temporarily locked.", "success": True}
+ __salt__["event.fire_master"](data, self.TAG_BLOCKED)
+ log.debug("Fired an RPM blocked event to the master with the data: %s", data)
+ log.debug("Waiting 5 seconds for RPM to get released...")
+ time.sleep(5)
+
__zypper__ = _Zypper()
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
index 3f1560a385..37d555844c 100644
--- a/tests/unit/modules/test_zypperpkg.py
+++ b/tests/unit/modules/test_zypperpkg.py
@@ -4,6 +4,7 @@
import configparser
+import errno
import io
import os
from xml.dom import minidom
@@ -97,7 +98,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
}
with patch.dict(
zypper.__salt__, {"cmd.run_all": MagicMock(return_value=ref_out)}
- ):
+ ), patch.object(zypper.__zypper__, "_is_rpm_lock", return_value=False):
upgrades = zypper.list_upgrades(refresh=False)
self.assertEqual(len(upgrades), 3)
for pkg, version in {
@@ -198,7 +199,9 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
' type="error">Booya!</message></stream>'
)
sniffer = RunSniffer(stdout=stdout_xml_snippet, retcode=1)
- with patch.dict("salt.modules.zypperpkg.__salt__", {"cmd.run_all": sniffer}):
+ with patch.dict(
+ "salt.modules.zypperpkg.__salt__", {"cmd.run_all": sniffer}
+ ), patch.object(zypper.__zypper__, "_is_rpm_lock", return_value=False):
with self.assertRaisesRegex(
CommandExecutionError, "^Zypper command failure: Booya!$"
):
@@ -232,7 +235,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(
"salt.modules.zypperpkg.__salt__",
{"cmd.run_all": MagicMock(return_value=ref_out)},
- ):
+ ), patch.object(zypper.__zypper__, "_is_rpm_lock", return_value=False):
with self.assertRaisesRegex(
CommandExecutionError,
"^Zypper command failure: Some handled zypper internal error{}Another"
@@ -245,7 +248,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(
"salt.modules.zypperpkg.__salt__",
{"cmd.run_all": MagicMock(return_value=ref_out)},
- ):
+ ), patch.object(zypper.__zypper__, "_is_rpm_lock", return_value=False):
with self.assertRaisesRegex(
CommandExecutionError, "^Zypper command failure: Check Zypper's logs.$"
):
@@ -2064,3 +2067,37 @@ pattern() = package-c"""
python_shell=False,
env={"ZYPP_READONLY_HACK": "1"},
)
+
+ def test_is_rpm_lock_no_error(self):
+ with patch.object(os.path, "exists", return_value=True):
+ self.assertFalse(zypper.__zypper__._is_rpm_lock())
+
+ def test_rpm_lock_does_not_exist(self):
+ if salt.utils.files.is_fcntl_available():
+ zypper.__zypper__.exit_code = 1
+ with patch.object(
+ os.path, "exists", return_value=False
+ ) as mock_path_exists:
+ self.assertFalse(zypper.__zypper__._is_rpm_lock())
+ mock_path_exists.assert_called_with(zypper.__zypper__.RPM_LOCK)
+ zypper.__zypper__._reset()
+
+ def test_rpm_lock_acquirable(self):
+ if salt.utils.files.is_fcntl_available():
+ zypper.__zypper__.exit_code = 1
+ with patch.object(os.path, "exists", return_value=True), patch(
+ "fcntl.lockf", side_effect=OSError(errno.EAGAIN, "")
+ ) as lockf_mock, patch("salt.utils.files.fopen", mock_open()):
+ self.assertTrue(zypper.__zypper__._is_rpm_lock())
+ lockf_mock.assert_called()
+ zypper.__zypper__._reset()
+
+ def test_rpm_lock_not_acquirable(self):
+ if salt.utils.files.is_fcntl_available():
+ zypper.__zypper__.exit_code = 1
+ with patch.object(os.path, "exists", return_value=True), patch(
+ "fcntl.lockf"
+ ) as lockf_mock, patch("salt.utils.files.fopen", mock_open()):
+ self.assertFalse(zypper.__zypper__._is_rpm_lock())
+ self.assertEqual(lockf_mock.call_count, 2)
+ zypper.__zypper__._reset()
--
2.37.2

View File

@ -1,3 +1,43 @@
-------------------------------------------------------------------
Thu Oct 6 10:10:16 UTC 2022 - Pablo Suárez Hernández <pablo.suarezhernandez@suse.com>
- Make pass renderer configurable and fix detected issues
- Workaround fopen line buffering for binary mode (bsc#1203834)
- Handle non-UTF-8 bytes in core grains generation (bsc#1202165)
- Fix Syndic authentication errors (bsc#1199562)
- Added:
* make-pass-renderer-configurable-other-fixes-532.patch
* ignore-non-utf8-characters-while-reading-files-with-.patch
* fopen-workaround-bad-buffering-for-binary-mode-563.patch
* backport-syndic-auth-fixes.patch
-------------------------------------------------------------------
Thu Sep 1 12:43:39 UTC 2022 - Victor Zhestkov <victor.zhestkov@suse.com>
- Add Amazon EC2 detection for virtual grains (bsc#1195624)
- Fix the regression in schedule module releasded in 3004 (bsc#1202631)
- Fix state.apply in test mode with file state module
on user/group checking (bsc#1202167)
- Change the delimeters to prevent possible tracebacks
on some packages with dpkg_lowpkg
- Make zypperpkg to retry if RPM lock is temporarily unavailable (bsc#1200596)
- Added:
* fix-the-regression-in-schedule-module-releasded-in-3.patch
* retry-if-rpm-lock-is-temporarily-unavailable-547.patch
* change-the-delimeters-to-prevent-possible-tracebacks.patch
* add-amazon-ec2-detection-for-virtual-grains-bsc-1195.patch
* fix-state.apply-in-test-mode-with-file-state-module-.patch
-------------------------------------------------------------------
Tue Jul 12 12:37:51 UTC 2022 - Alexander Graul <alexander.graul@suse.com>
- Fix test_ipc unit test
- Added:
* fix-test_ipc-unit-tests.patch
-------------------------------------------------------------------
Fri Jul 8 09:45:54 UTC 2022 - Pablo Suárez Hernández <pablo.suarezhernandez@suse.com>

View File

@ -330,6 +330,26 @@ Patch90: fix-salt.states.file.managed-for-follow_symlinks-tru.patch
Patch91: fix-jinja2-contextfuntion-base-on-version-bsc-119874.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/62209
Patch92: add-support-for-gpgautoimport-539.patch
# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/2b486d0484c51509e9972e581d97655f4f87852e
Patch93: fix-test_ipc-unit-tests.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/62204
Patch94: retry-if-rpm-lock-is-temporarily-unavailable-547.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/62519
Patch95: change-the-delimeters-to-prevent-possible-tracebacks.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/61847
Patch96: fix-state.apply-in-test-mode-with-file-state-module-.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/61423
Patch97: fix-the-regression-in-schedule-module-releasded-in-3.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/62539
Patch98: add-amazon-ec2-detection-for-virtual-grains-bsc-1195.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/commit/643bd4b572ca97466e085ecd1d84da45b1684332
Patch99: backport-syndic-auth-fixes.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/62633
Patch100: ignore-non-utf8-characters-while-reading-files-with-.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/62817
Patch101: fopen-workaround-bad-buffering-for-binary-mode-563.patch
# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/62120
Patch102: make-pass-renderer-configurable-other-fixes-532.patch
BuildRoot: %{_tmppath}/%{name}-%{version}-build
BuildRequires: logrotate