diff --git a/_lastrevision b/_lastrevision index 68b64c1..dea4ec1 100644 --- a/_lastrevision +++ b/_lastrevision @@ -1 +1 @@ -140a89771b14471ebcc8154d374b2be88c30eeb8 \ No newline at end of file +28e6e64e03c46f34ac800b2bc2fc78ec0176e0e4 \ No newline at end of file diff --git a/backport-batch-async-fixes-and-improvements-701.patch b/backport-batch-async-fixes-and-improvements-701.patch new file mode 100644 index 0000000..aabd309 --- /dev/null +++ b/backport-batch-async-fixes-and-improvements-701.patch @@ -0,0 +1,336 @@ +From 4fe7231fa99de8edc848367386f1a6a5192a0f58 Mon Sep 17 00:00:00 2001 +From: Victor Zhestkov +Date: Fri, 21 Feb 2025 11:15:42 +0100 +Subject: [PATCH] Backport batch async fixes and improvements (#701) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +* Backport batch async fixes and improvements + +Co-authored-by: Pablo Suárez Hernández + +* Align batch_async tests + +--------- + +Co-authored-by: Pablo Suárez Hernández +--- + salt/cli/batch_async.py | 60 ++++++++++++++++----- + tests/pytests/unit/cli/test_batch_async.py | 63 ++++++++-------------- + 2 files changed, 69 insertions(+), 54 deletions(-) + +diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py +index 5d49993faa..92215d0e04 100644 +--- a/salt/cli/batch_async.py ++++ b/salt/cli/batch_async.py +@@ -35,7 +35,7 @@ def batch_async_required(opts, minions, extra): + Check opts to identify if batch async is required for the operation. + """ + if not isinstance(minions, list): +- False ++ return False + batch_async_opts = opts.get("batch_async", {}) + batch_async_threshold = ( + batch_async_opts.get("threshold", 1) +@@ -179,6 +179,7 @@ class SharedEventsChannel: + self._used_by.discard(subscriber_id) + + def destroy_unused(self): ++ log.trace("SharedEventsChannel.destroy_unused called") + if self._used_by: + return False + self.master_event.remove_event_handler(self.__handle_event) +@@ -267,6 +268,7 @@ class BatchAsync: + self.ended = False + self.event = self.events_channel.master_event + self.scheduled = False ++ self._start_batch_on_timeout = None + + def __set_event_handler(self): + self.events_channel.subscribe( +@@ -278,6 +280,8 @@ class BatchAsync: + + @salt.ext.tornado.gen.coroutine + def __event_handler(self, tag, data, op): ++ # IMPORTANT: This function must run fast and not wait for any other task, ++ # otherwise it would cause events to be stuck. + if not self.event: + return + try: +@@ -285,7 +289,9 @@ class BatchAsync: + if op == "ping_return": + self.minions.add(minion) + if self.targeted_minions == self.minions: +- yield self.start_batch() ++ # call start_batch and do not wait for timeout as we received ++ # the responses from all the targets ++ self.io_loop.add_callback(self.start_batch) + elif op == "find_job_return": + if data.get("return", None): + self.find_job_returned.add(minion) +@@ -293,7 +299,8 @@ class BatchAsync: + if minion in self.active: + self.active.remove(minion) + self.done_minions.add(minion) +- yield self.schedule_next() ++ if not self.active: ++ self.io_loop.add_callback(self.schedule_next) + except Exception as ex: # pylint: disable=W0703 + log.error( + "Exception occured while processing event: %s: %s", +@@ -333,7 +340,7 @@ class BatchAsync: + ) + + if timedout_minions: +- yield self.schedule_next() ++ self.io_loop.add_callback(self.schedule_next) + + if self.event and running: + self.find_job_returned = self.find_job_returned.difference(running) +@@ -344,6 +351,9 @@ class BatchAsync: + """ + Find if the job was finished on the minions + """ ++ log.trace( ++ "[%s] BatchAsync.find_job called for minions: %s", self.batch_jid, minions ++ ) + if not self.event: + return + not_done = minions.difference(self.done_minions).difference( +@@ -386,6 +396,7 @@ class BatchAsync: + if not self.event: + return + self.__set_event_handler() ++ # call test.ping for all the targets in async way + ping_return = yield self.events_channel.local_client.run_job_async( + self.opts["tgt"], + "test.ping", +@@ -398,19 +409,24 @@ class BatchAsync: + listen=False, + **self.eauth, + ) ++ # ping_return contains actual targeted minions and no actual responses ++ # from the minions as it's async and intended to populate targeted_minions set + self.targeted_minions = set(ping_return["minions"]) +- # start batching even if not all minions respond to ping +- yield salt.ext.tornado.gen.sleep( +- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] ++ # schedule start_batch to perform even if not all the minions responded ++ # self.__event_handler can push start_batch in case if all targets responded ++ self._start_batch_on_timeout = self.io_loop.call_later( ++ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"], ++ self.start_batch, + ) +- if self.event: +- yield self.start_batch() + + @salt.ext.tornado.gen.coroutine + def start_batch(self): + """ + Fire `salt/batch/*/start` and continue batch with `run_next` + """ ++ if self._start_batch_on_timeout is not None: ++ self.io_loop.remove_timeout(self._start_batch_on_timeout) ++ self._start_batch_on_timeout = None + if self.initialized: + return + self.batch_size = get_bnum(self.opts, self.minions, True) +@@ -431,6 +447,7 @@ class BatchAsync: + """ + End the batch and call safe closing + """ ++ log.trace("[%s] BatchAsync.end_batch called", self.batch_jid) + left = self.minions.symmetric_difference( + self.done_minions.union(self.timedout_minions) + ) +@@ -452,10 +469,11 @@ class BatchAsync: + + # release to the IOLoop to allow the event to be published + # before closing batch async execution +- yield salt.ext.tornado.gen.sleep(1) ++ yield salt.ext.tornado.gen.sleep(0.03) + self.close_safe() + + def close_safe(self): ++ log.trace("[%s] BatchAsync.close_safe called", self.batch_jid) + if self.events_channel is not None: + self.events_channel.unsubscribe(None, None, id(self)) + self.events_channel.unuse(id(self)) +@@ -465,11 +483,22 @@ class BatchAsync: + + @salt.ext.tornado.gen.coroutine + def schedule_next(self): ++ log.trace("[%s] BatchAsync.schedule_next called", self.batch_jid) + if self.scheduled: ++ log.trace( ++ "[%s] BatchAsync.schedule_next -> Batch already scheduled, nothing to do.", ++ self.batch_jid, ++ ) + return + self.scheduled = True +- # call later so that we maybe gather more returns +- yield salt.ext.tornado.gen.sleep(self.batch_delay) ++ if self._get_next(): ++ # call later so that we maybe gather more returns ++ log.trace( ++ "[%s] BatchAsync.schedule_next delaying batch %s second(s).", ++ self.batch_jid, ++ self.batch_delay, ++ ) ++ yield salt.ext.tornado.gen.sleep(self.batch_delay) + if self.event: + yield self.run_next() + +@@ -480,6 +509,11 @@ class BatchAsync: + """ + self.scheduled = False + next_batch = self._get_next() ++ log.trace( ++ "[%s] BatchAsync.run_next called. Next Batch -> %s", ++ self.batch_jid, ++ next_batch, ++ ) + if not next_batch: + yield self.end_batch() + return +@@ -504,7 +538,7 @@ class BatchAsync: + yield salt.ext.tornado.gen.sleep(self.opts["timeout"]) + + # The batch can be done already at this point, which means no self.event +- if self.event: ++ if self.event and self.active.intersection(next_batch): + yield self.find_job(set(next_batch)) + except Exception as ex: # pylint: disable=W0703 + log.error( +diff --git a/tests/pytests/unit/cli/test_batch_async.py b/tests/pytests/unit/cli/test_batch_async.py +index bc871aba54..be8de692e6 100644 +--- a/tests/pytests/unit/cli/test_batch_async.py ++++ b/tests/pytests/unit/cli/test_batch_async.py +@@ -85,11 +85,17 @@ def test_batch_start_on_batch_presence_ping_timeout(batch): + future.set_result({}) + with patch.object(batch, "events_channel", MagicMock()), patch( + "salt.ext.tornado.gen.sleep", return_value=future +- ), patch.object(batch, "start_batch", return_value=future) as start_batch_mock: ++ ), patch.object(batch, "io_loop", MagicMock()), patch.object( ++ batch, "start_batch", return_value=future ++ ) as start_batch_mock: + batch.events_channel.local_client.run_job_async.return_value = future_ret + ret = batch.start() +- # assert start_batch is called +- start_batch_mock.assert_called_once() ++ # start_batch is scheduled to be called later ++ assert batch.io_loop.call_later.call_args[0] == ( ++ batch.batch_presence_ping_timeout, ++ batch.start_batch, ++ ) ++ assert batch._start_batch_on_timeout is not None + # assert test.ping called + assert batch.events_channel.local_client.run_job_async.call_args[0] == ( + "*", +@@ -109,16 +115,21 @@ def test_batch_start_on_gather_job_timeout(batch): + batch.batch_presence_ping_timeout = None + with patch.object(batch, "events_channel", MagicMock()), patch( + "salt.ext.tornado.gen.sleep", return_value=future ++ ), patch.object(batch, "io_loop", MagicMock()), patch.object( ++ batch, "start_batch", return_value=future + ), patch.object( + batch, "start_batch", return_value=future + ) as start_batch_mock, patch.object( + batch, "batch_presence_ping_timeout", None + ): + batch.events_channel.local_client.run_job_async.return_value = future_ret +- # ret = batch_async.start(batch) + ret = batch.start() +- # assert start_batch is called +- start_batch_mock.assert_called_once() ++ # start_batch is scheduled to be called later ++ assert batch.io_loop.call_later.call_args[0] == ( ++ batch.opts["gather_job_timeout"], ++ batch.start_batch, ++ ) ++ assert batch._start_batch_on_timeout is not None + + + def test_batch_fire_start_event(batch): +@@ -271,34 +282,10 @@ def test_batch__event_handler_ping_return(batch): + assert batch.done_minions == set() + + +-def test_batch__event_handler_call_start_batch_when_all_pings_return(batch): +- batch.targeted_minions = {"foo"} +- future = salt.ext.tornado.gen.Future() +- future.set_result({}) +- with patch.object(batch, "start_batch", return_value=future) as start_batch_mock: +- batch.start() +- batch._BatchAsync__event_handler( +- "salt/job/1234/ret/foo", {"id": "foo"}, "ping_return" +- ) +- start_batch_mock.assert_called_once() +- +- +-def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(batch): +- batch.targeted_minions = {"foo", "bar"} +- future = salt.ext.tornado.gen.Future() +- future.set_result({}) +- with patch.object(batch, "start_batch", return_value=future) as start_batch_mock: +- batch.start() +- batch._BatchAsync__event_handler( +- "salt/job/1234/ret/foo", {"id": "foo"}, "ping_return" +- ) +- start_batch_mock.assert_not_called() +- +- + def test_batch__event_handler_batch_run_return(batch): + future = salt.ext.tornado.gen.Future() + future.set_result({}) +- with patch.object( ++ with patch.object(batch, "io_loop", MagicMock()), patch.object( + batch, "schedule_next", return_value=future + ) as schedule_next_mock: + batch.start() +@@ -308,7 +295,7 @@ def test_batch__event_handler_batch_run_return(batch): + ) + assert batch.active == set() + assert batch.done_minions == {"foo"} +- schedule_next_mock.assert_called_once() ++ batch.io_loop.add_callback.call_args[0] == (batch.schedule_next) + + + def test_batch__event_handler_find_job_return(batch): +@@ -322,9 +309,7 @@ def test_batch__event_handler_find_job_return(batch): + def test_batch_run_next_end_batch_when_no_next(batch): + future = salt.ext.tornado.gen.Future() + future.set_result({}) +- with patch.object( +- batch, "_get_next", return_value={} +- ), patch.object( ++ with patch.object(batch, "_get_next", return_value={}), patch.object( + batch, "end_batch", return_value=future + ) as end_batch_mock: + batch.run_next() +@@ -337,9 +322,7 @@ def test_batch_find_job(batch): + batch.minions = {"foo", "bar"} + with patch("salt.ext.tornado.gen.sleep", return_value=future), patch.object( + batch, "check_find_job", return_value=future +- ) as check_find_job_mock, patch.object( +- batch, "jid_gen", return_value="1236" +- ): ++ ) as check_find_job_mock, patch.object(batch, "jid_gen", return_value="1236"): + batch.events_channel.local_client.run_job_async.return_value = future + batch.find_job({"foo", "bar"}) + assert check_find_job_mock.call_args[0] == ( +@@ -355,9 +338,7 @@ def test_batch_find_job_with_done_minions(batch): + batch.minions = {"foo", "bar"} + with patch("salt.ext.tornado.gen.sleep", return_value=future), patch.object( + batch, "check_find_job", return_value=future +- ) as check_find_job_mock, patch.object( +- batch, "jid_gen", return_value="1236" +- ): ++ ) as check_find_job_mock, patch.object(batch, "jid_gen", return_value="1236"): + batch.events_channel.local_client.run_job_async.return_value = future + batch.find_job({"foo", "bar"}) + assert check_find_job_mock.call_args[0] == ( +-- +2.48.1 + diff --git a/fixed-file-client-private-attribute-reference-on-sal.patch b/fixed-file-client-private-attribute-reference-on-sal.patch new file mode 100644 index 0000000..11ecfae --- /dev/null +++ b/fixed-file-client-private-attribute-reference-on-sal.patch @@ -0,0 +1,81 @@ +From 1772da828f40e36d2a9eceb7055a1fa1a2257830 Mon Sep 17 00:00:00 2001 +From: Georg +Date: Fri, 21 Feb 2025 10:23:38 +0000 +Subject: [PATCH] Fixed file client private attribute reference on + `SaltMakoTemplateLookup` (#694) + +Fixes #64280 + +Signed-off-by: Pedro Algarvio +(cherry picked from commit 560ab52ccf94c7974d5a418dfbba7409e0493066) + +Co-authored-by: Pedro Algarvio +--- + changelog/64280.fixed.md | 1 + + salt/utils/mako.py | 6 ++++-- + tests/pytests/unit/utils/test_mako.py | 28 +++++++++++++++++++++++++++ + 3 files changed, 33 insertions(+), 2 deletions(-) + create mode 100644 changelog/64280.fixed.md + create mode 100644 tests/pytests/unit/utils/test_mako.py + +diff --git a/changelog/64280.fixed.md b/changelog/64280.fixed.md +new file mode 100644 +index 0000000000..5a9b905dd0 +--- /dev/null ++++ b/changelog/64280.fixed.md +@@ -0,0 +1 @@ ++Fixed file client private attribute reference on `SaltMakoTemplateLookup` +diff --git a/salt/utils/mako.py b/salt/utils/mako.py +index 037d5d86de..4397ae8cc7 100644 +--- a/salt/utils/mako.py ++++ b/salt/utils/mako.py +@@ -99,8 +99,10 @@ if HAS_MAKO: + ) + + def destroy(self): +- if self.client: ++ if self._file_client: ++ file_client = self._file_client ++ self._file_client = None + try: +- self.client.destroy() ++ file_client.destroy() + except AttributeError: + pass +diff --git a/tests/pytests/unit/utils/test_mako.py b/tests/pytests/unit/utils/test_mako.py +new file mode 100644 +index 0000000000..952cf44652 +--- /dev/null ++++ b/tests/pytests/unit/utils/test_mako.py +@@ -0,0 +1,28 @@ ++import pytest ++ ++from tests.support.mock import Mock, call, patch ++ ++pytest.importorskip("mako") ++ ++# This import needs to be after the above importorskip so that no ImportError ++# is raised if Mako is not installed ++from salt.utils.mako import SaltMakoTemplateLookup ++ ++ ++def test_mako_template_lookup(minion_opts): ++ """ ++ The shudown method can be called without raising an exception when the ++ file_client does not have a destroy method ++ """ ++ # Test SaltCacheLoader creating and destroying the file client created ++ file_client = Mock() ++ with patch("salt.fileclient.get_file_client", return_value=file_client): ++ loader = SaltMakoTemplateLookup(minion_opts) ++ assert loader._file_client is None ++ assert loader.file_client() is file_client ++ assert loader._file_client is file_client ++ try: ++ loader.destroy() ++ except AttributeError: ++ pytest.fail("Regression when calling SaltMakoTemplateLookup.destroy()") ++ assert file_client.mock_calls == [call.destroy()] +-- +2.48.1 + diff --git a/make-_auth-calls-visible-with-master-stats-696.patch b/make-_auth-calls-visible-with-master-stats-696.patch new file mode 100644 index 0000000..3f90b08 --- /dev/null +++ b/make-_auth-calls-visible-with-master-stats-696.patch @@ -0,0 +1,142 @@ +From 32099b97c2fa549cb050d3ae618b5200c07328c8 Mon Sep 17 00:00:00 2001 +From: Victor Zhestkov +Date: Fri, 21 Feb 2025 11:59:00 +0100 +Subject: [PATCH] Make `_auth` calls visible with master stats (#696) + +* Put _auth calls to the master stats + +* Add _auth master stats tests + +* test small fix +--- + salt/channel/server.py | 9 ++++-- + salt/master.py | 5 ++++ + tests/pytests/unit/channel/__init__.py | 0 + tests/pytests/unit/channel/test_server.py | 34 +++++++++++++++++++++++ + tests/pytests/unit/test_master.py | 25 +++++++++++++++++ + 5 files changed, 70 insertions(+), 3 deletions(-) + create mode 100644 tests/pytests/unit/channel/__init__.py + create mode 100644 tests/pytests/unit/channel/test_server.py + +diff --git a/salt/channel/server.py b/salt/channel/server.py +index f1b6f701a9..59da3a2dc2 100644 +--- a/salt/channel/server.py ++++ b/salt/channel/server.py +@@ -9,6 +9,7 @@ import hashlib + import logging + import os + import shutil ++import time + + import salt.crypt + import salt.ext.tornado.gen +@@ -149,9 +150,11 @@ class ReqServerChannel: + # intercept the "_auth" commands, since the main daemon shouldn't know + # anything about our key auth + if payload["enc"] == "clear" and payload.get("load", {}).get("cmd") == "_auth": +- raise salt.ext.tornado.gen.Return( +- self._auth(payload["load"], sign_messages) +- ) ++ start = time.time() ++ ret = self._auth(payload["load"], sign_messages) ++ if self.opts.get("master_stats", False): ++ yield self.payload_handler({"cmd": "_auth", "_start": start}) ++ raise salt.ext.tornado.gen.Return(ret) + + nonce = None + if version > 1: +diff --git a/salt/master.py b/salt/master.py +index 49cfb68860..c0cd9a366b 100644 +--- a/salt/master.py ++++ b/salt/master.py +@@ -1036,6 +1036,11 @@ class MWorker(salt.utils.process.SignalHandlingProcess): + + :param dict payload: The payload route to the appropriate handler + """ ++ if payload.get("cmd") == "_auth": ++ if self.opts["master_stats"]: ++ self.stats["_auth"]["runs"] += 1 ++ self._post_stats(payload["_start"], "_auth") ++ return + key = payload["enc"] + load = payload["load"] + if key == "aes": +diff --git a/tests/pytests/unit/channel/__init__.py b/tests/pytests/unit/channel/__init__.py +new file mode 100644 +index 0000000000..e69de29bb2 +diff --git a/tests/pytests/unit/channel/test_server.py b/tests/pytests/unit/channel/test_server.py +new file mode 100644 +index 0000000000..3fa5d94bea +--- /dev/null ++++ b/tests/pytests/unit/channel/test_server.py +@@ -0,0 +1,34 @@ ++import time ++ ++import pytest ++ ++import salt.channel.server as server ++import salt.ext.tornado.gen ++from tests.support.mock import MagicMock, patch ++ ++ ++def test__auth_cmd_stats_passing(): ++ req_server_channel = server.ReqServerChannel({"master_stats": True}, None) ++ ++ fake_ret = {"enc": "clear", "load": b"FAKELOAD"} ++ ++ def _auth_mock(*_, **__): ++ time.sleep(0.03) ++ return fake_ret ++ ++ future = salt.ext.tornado.gen.Future() ++ future.set_result({}) ++ ++ with patch.object(req_server_channel, "_auth", _auth_mock): ++ req_server_channel.payload_handler = MagicMock(return_value=future) ++ req_server_channel.handle_message( ++ {"enc": "clear", "load": {"cmd": "_auth", "id": "minion"}} ++ ) ++ cur_time = time.time() ++ req_server_channel.payload_handler.assert_called_once() ++ assert req_server_channel.payload_handler.call_args[0][0]["cmd"] == "_auth" ++ auth_call_duration = ( ++ cur_time - req_server_channel.payload_handler.call_args[0][0]["_start"] ++ ) ++ assert auth_call_duration >= 0.03 ++ assert auth_call_duration < 0.05 +diff --git a/tests/pytests/unit/test_master.py b/tests/pytests/unit/test_master.py +index 679229066d..7fccb24d73 100644 +--- a/tests/pytests/unit/test_master.py ++++ b/tests/pytests/unit/test_master.py +@@ -282,3 +282,28 @@ def test_syndic_return_cache_dir_creation_traversal(encrypted_requests): + ) + assert not (cachedir / "syndics").exists() + assert not (cachedir / "mamajama").exists() ++ ++ ++def test_collect__auth_to_master_stats(): ++ """ ++ Check if master stats is collecting _auth calls while not calling neither _handle_aes nor _handle_clear ++ """ ++ opts = { ++ "master_stats": True, ++ "master_stats_event_iter": 10, ++ } ++ req_channel_mock = MagicMock() ++ mworker = salt.master.MWorker(opts, {}, {}, [req_channel_mock]) ++ with patch.object(mworker, "_handle_aes") as handle_aes_mock, patch.object( ++ mworker, "_handle_clear" ++ ) as handle_clear_mock: ++ mworker._handle_payload({"cmd": "_auth", "_start": time.time() - 0.02}) ++ assert mworker.stats["_auth"]["runs"] == 1 ++ assert mworker.stats["_auth"]["mean"] >= 0.02 ++ assert mworker.stats["_auth"]["mean"] < 0.04 ++ mworker._handle_payload({"cmd": "_auth", "_start": time.time() - 0.02}) ++ assert mworker.stats["_auth"]["runs"] == 2 ++ assert mworker.stats["_auth"]["mean"] >= 0.02 ++ assert mworker.stats["_auth"]["mean"] < 0.04 ++ handle_aes_mock.assert_not_called() ++ handle_clear_mock.assert_not_called() +-- +2.48.1 + diff --git a/repair-fstab_present-test-mode-702.patch b/repair-fstab_present-test-mode-702.patch new file mode 100644 index 0000000..5c67e09 --- /dev/null +++ b/repair-fstab_present-test-mode-702.patch @@ -0,0 +1,69 @@ +From 73d18711314738796e802a6d929f4b609cee1f67 Mon Sep 17 00:00:00 2001 +From: Georg +Date: Fri, 21 Feb 2025 10:26:25 +0000 +Subject: [PATCH] Repair fstab_present test mode (#702) + +Return no pending changes if the configuration already matches. + +Signed-off-by: Georg Pfuetzenreuter +(cherry picked from commit fc7ed2b53152ab255d7763f200e8d28d526c5e52) +--- + changelog/67065.fixed.md | 1 + + salt/states/mount.py | 1 + + tests/pytests/unit/states/test_mount.py | 6 +++--- + 3 files changed, 5 insertions(+), 3 deletions(-) + create mode 100644 changelog/67065.fixed.md + +diff --git a/changelog/67065.fixed.md b/changelog/67065.fixed.md +new file mode 100644 +index 0000000000..7b210dc297 +--- /dev/null ++++ b/changelog/67065.fixed.md +@@ -0,0 +1 @@ ++Repaired mount.fstab_present always returning pending changes +diff --git a/salt/states/mount.py b/salt/states/mount.py +index 36b9a16b5d..97dddbe3b0 100644 +--- a/salt/states/mount.py ++++ b/salt/states/mount.py +@@ -1228,6 +1228,7 @@ def fstab_present( + if out == "present": + msg = "{} entry is already in {}." + ret["comment"].append(msg.format(fs_file, config)) ++ ret["result"] = True + elif out == "new": + msg = "{} entry will be written in {}." + ret["comment"].append(msg.format(fs_file, config)) +diff --git a/tests/pytests/unit/states/test_mount.py b/tests/pytests/unit/states/test_mount.py +index 5e4d5274e8..382fe6d0e8 100644 +--- a/tests/pytests/unit/states/test_mount.py ++++ b/tests/pytests/unit/states/test_mount.py +@@ -701,7 +701,7 @@ def test_fstab_present_macos_test_present(): + """ + ret = { + "name": "/dev/sda1", +- "result": None, ++ "result": True, + "changes": {}, + "comment": ["/home entry is already in /etc/auto_salt."], + } +@@ -730,7 +730,7 @@ def test_fstab_present_aix_test_present(): + """ + ret = { + "name": "/dev/sda1", +- "result": None, ++ "result": True, + "changes": {}, + "comment": ["/home entry is already in /etc/filesystems."], + } +@@ -761,7 +761,7 @@ def test_fstab_present_test_present(): + """ + ret = { + "name": "/dev/sda1", +- "result": None, ++ "result": True, + "changes": {}, + "comment": ["/home entry is already in /etc/fstab."], + } +-- +2.48.1 + diff --git a/repair-virt_query-outputter-655.patch b/repair-virt_query-outputter-655.patch new file mode 100644 index 0000000..361e2a8 --- /dev/null +++ b/repair-virt_query-outputter-655.patch @@ -0,0 +1,304 @@ +From 325506774381cc8edadee9b2f43fd6733d4f9edb Mon Sep 17 00:00:00 2001 +From: Georg +Date: Fri, 21 Feb 2025 12:40:45 +0000 +Subject: [PATCH] Repair virt_query outputter (#655) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +* Repair virt_query outputter + +Existing code was not functional. +Only return if a dictionary is passed and reference the correct +data. + +Signed-off-by: Georg Pfuetzenreuter +(cherry picked from commit e3c365ad8f385121aa878950e13892d986d79656) + +* Facilitate block devices in virt_query outputter + +Disk data in Libvirt VMs does not contain a disk size if the disk +references a block device. +Skip the field for such disks instead of failing with a key error. + +Signed-off-by: Georg Pfuetzenreuter +(cherry picked from commit ed73abd44117ad745e9c91f2b33caf04402b117c) + +* Add unit tests for virt_query outputter + +--------- + +Co-authored-by: Pablo Suárez Hernández +--- + changelog/65841.fixed.md | 1 + + salt/output/virt_query.py | 64 +++---- + tests/pytests/unit/output/test_virt_query.py | 176 +++++++++++++++++++ + 3 files changed, 210 insertions(+), 31 deletions(-) + create mode 100644 changelog/65841.fixed.md + create mode 100644 tests/pytests/unit/output/test_virt_query.py + +diff --git a/changelog/65841.fixed.md b/changelog/65841.fixed.md +new file mode 100644 +index 0000000000..7fb6336ea1 +--- /dev/null ++++ b/changelog/65841.fixed.md +@@ -0,0 +1 @@ ++Restore functionality of virt_query outputter and add support for block devices. +diff --git a/salt/output/virt_query.py b/salt/output/virt_query.py +index d20e6357e6..0f989fedfa 100644 +--- a/salt/output/virt_query.py ++++ b/salt/output/virt_query.py +@@ -12,35 +12,37 @@ def output(data, **kwargs): # pylint: disable=unused-argument + Display output for the salt-run virt.query function + """ + out = "" +- for id_ in data["data"]: +- out += "{}\n".format(id_) +- for vm_ in data["data"][id_]["vm_info"]: +- out += " {}\n".format(vm_) +- vm_data = data[id_]["vm_info"][vm_] +- if "cpu" in vm_data: +- out += " CPU: {}\n".format(vm_data["cpu"]) +- if "mem" in vm_data: +- out += " Memory: {}\n".format(vm_data["mem"]) +- if "state" in vm_data: +- out += " State: {}\n".format(vm_data["state"]) +- if "graphics" in vm_data: +- if vm_data["graphics"].get("type", "") == "vnc": +- out += " Graphics: vnc - {}:{}\n".format( +- id_, vm_data["graphics"]["port"] +- ) +- if "disks" in vm_data: +- for disk, d_data in vm_data["disks"].items(): +- out += " Disk - {}:\n".format(disk) +- out += " Size: {}\n".format(d_data["disk size"]) +- out += " File: {}\n".format(d_data["file"]) +- out += " File Format: {}\n".format(d_data["file format"]) +- if "nics" in vm_data: +- for mac in vm_data["nics"]: +- out += " Nic - {}:\n".format(mac) +- out += " Source: {}\n".format( +- vm_data["nics"][mac]["source"][ +- next(iter(vm_data["nics"][mac]["source"].keys())) +- ] +- ) +- out += " Type: {}\n".format(vm_data["nics"][mac]["type"]) ++ if isinstance(data, dict) and "event" in data: ++ for id_ in data["event"]["data"]: ++ out += "{}\n".format(id_) ++ for vm_ in data["event"]["data"][id_]["vm_info"]: ++ out += " {}\n".format(vm_) ++ vm_data = data["event"]["data"][id_]["vm_info"][vm_] ++ if "cpu" in vm_data: ++ out += " CPU: {}\n".format(vm_data["cpu"]) ++ if "mem" in vm_data: ++ out += " Memory: {}\n".format(vm_data["mem"]) ++ if "state" in vm_data: ++ out += " State: {}\n".format(vm_data["state"]) ++ if "graphics" in vm_data: ++ if vm_data["graphics"].get("type", "") == "vnc": ++ out += " Graphics: vnc - {}:{}\n".format( ++ id_, vm_data["graphics"]["port"] ++ ) ++ if "disks" in vm_data: ++ for disk, d_data in vm_data["disks"].items(): ++ out += " Disk - {}:\n".format(disk) ++ if "disk size" in d_data: ++ out += " Size: {}\n".format(d_data["disk size"]) ++ out += " File: {}\n".format(d_data["file"]) ++ out += " File Format: {}\n".format(d_data["file format"]) ++ if "nics" in vm_data: ++ for mac in vm_data["nics"]: ++ out += " NIC - {}:\n".format(mac) ++ out += " Source: {}\n".format( ++ vm_data["nics"][mac]["source"][ ++ next(iter(vm_data["nics"][mac]["source"].keys())) ++ ] ++ ) ++ out += " Type: {}\n".format(vm_data["nics"][mac]["type"]) + return out +diff --git a/tests/pytests/unit/output/test_virt_query.py b/tests/pytests/unit/output/test_virt_query.py +new file mode 100644 +index 0000000000..3f8814ee26 +--- /dev/null ++++ b/tests/pytests/unit/output/test_virt_query.py +@@ -0,0 +1,176 @@ ++""" ++unittests for virt_query outputter ++""" ++ ++import pytest ++ ++import salt.output.virt_query as virt_query ++from tests.support.mock import patch ++ ++ ++@pytest.fixture ++def configure_loader_modules(): ++ return {virt_query: {}} ++ ++ ++@pytest.fixture ++def data(): ++ return { ++ "suffix": "progress", ++ "event": { ++ "data": { ++ "mysystem": { ++ "freecpu": 14, ++ "freemem": 29566.0, ++ "node_info": { ++ "cpucores": 8, ++ "cpumhz": 1089, ++ "cpumodel": "x86_64", ++ "cpus": 16, ++ "cputhreads": 2, ++ "numanodes": 1, ++ "phymemory": 30846, ++ "sockets": 1, ++ }, ++ "vm_info": { ++ "vm1": { ++ "cpu": 2, ++ "cputime": 1214270000000, ++ "disks": { ++ "vda": { ++ "file": "default/vm1-main-disk", ++ "type": "disk", ++ "file format": "qcow2", ++ "virtual size": 214748364800, ++ "disk size": 1831731200, ++ "backing file": { ++ "file": "/var/lib/libvirt/images/sles15sp4o", ++ "file format": "qcow2", ++ }, ++ }, ++ "hdd": { ++ "file": "default/vm1-cloudinit-disk", ++ "type": "cdrom", ++ "file format": "raw", ++ "virtual size": 374784, ++ "disk size": 376832, ++ }, ++ }, ++ "graphics": { ++ "autoport": "yes", ++ "keymap": "None", ++ "listen": "0.0.0.0", ++ "port": "5900", ++ "type": "spice", ++ }, ++ "nics": { ++ "aa:bb:cc:dd:ee:ff": { ++ "type": "network", ++ "mac": "aa:bb:cc:dd:ee:ff", ++ "source": {"network": "default"}, ++ "model": "virtio", ++ "address": { ++ "type": "pci", ++ "domain": "0x0000", ++ "bus": "0x00", ++ "slot": "0x03", ++ "function": "0x0", ++ }, ++ } ++ }, ++ "uuid": "yyyyyy", ++ "loader": {"path": "None"}, ++ "on_crash": "destroy", ++ "on_reboot": "restart", ++ "on_poweroff": "destroy", ++ "maxMem": 1048576, ++ "mem": 1048576, ++ "state": "running", ++ }, ++ "uyuni-proxy": { ++ "cpu": 2, ++ "cputime": 0, ++ "disks": { ++ "vda": { ++ "file": "default/uyuni-proxy-main-disk", ++ "type": "disk", ++ "file format": "qcow2", ++ "virtual size": 214748364800, ++ "disk size": 4491255808, ++ "backing file": { ++ "file": "/var/lib/libvirt/images/leapmicro55o", ++ "file format": "qcow2", ++ }, ++ } ++ }, ++ "graphics": { ++ "autoport": "yes", ++ "keymap": "None", ++ "listen": "0.0.0.0", ++ "port": "None", ++ "type": "spice", ++ }, ++ "nics": { ++ "aa:bb:cc:dd:ee:aa": { ++ "type": "network", ++ "mac": "aa:bb:cc:dd:ee:aa", ++ "source": {"network": "default"}, ++ "model": "virtio", ++ "address": { ++ "type": "pci", ++ "domain": "0x0000", ++ "bus": "0x00", ++ "slot": "0x03", ++ "function": "0x0", ++ }, ++ } ++ }, ++ "uuid": "xxxxx", ++ "loader": {"path": "None"}, ++ "on_crash": "destroy", ++ "on_reboot": "restart", ++ "on_poweroff": "destroy", ++ "maxMem": 2097152, ++ "mem": 2097152, ++ "state": "shutdown", ++ }, ++ }, ++ } ++ }, ++ "outputter": "virt_query", ++ "_stamp": "2025-02-21T11:28:04.406561", ++ }, ++ } ++ ++ ++def test_default_output(data): ++ ret = virt_query.output(data) ++ expected = """mysystem ++ vm1 ++ CPU: 2 ++ Memory: 1048576 ++ State: running ++ Disk - vda: ++ Size: 1831731200 ++ File: default/vm1-main-disk ++ File Format: qcow2 ++ Disk - hdd: ++ Size: 376832 ++ File: default/vm1-cloudinit-disk ++ File Format: raw ++ NIC - aa:bb:cc:dd:ee:ff: ++ Source: default ++ Type: network ++ uyuni-proxy ++ CPU: 2 ++ Memory: 2097152 ++ State: shutdown ++ Disk - vda: ++ Size: 4491255808 ++ File: default/uyuni-proxy-main-disk ++ File Format: qcow2 ++ NIC - aa:bb:cc:dd:ee:aa: ++ Source: default ++ Type: network ++""" ++ assert expected == ret +-- +2.48.1 + diff --git a/salt.changes b/salt.changes index 2f30007..0fff72c 100644 --- a/salt.changes +++ b/salt.changes @@ -1,3 +1,21 @@ +------------------------------------------------------------------- +Fri Feb 21 12:46:01 UTC 2025 - Pablo Suárez Hernández + +- Fix virt_query outputter and add support for block devices +- Make _auth calls visible with master stats +- Repair mount.fstab_present always returning pending changes +- Set virtual grain in Podman systemd container +- Fix crash due wrong client reference on `SaltMakoTemplateLookup` +- Enhace batch async and fix some detected issues + +- Added: + * repair-virt_query-outputter-655.patch + * make-_auth-calls-visible-with-master-stats-696.patch + * repair-fstab_present-test-mode-702.patch + * set-virtual-grain-in-podman-systemd-container-703.patch + * fixed-file-client-private-attribute-reference-on-sal.patch + * backport-batch-async-fixes-and-improvements-701.patch + ------------------------------------------------------------------- Wed Feb 19 16:06:43 UTC 2025 - Pablo Suárez Hernández diff --git a/salt.spec b/salt.spec index b62943b..a6a6fdb 100644 --- a/salt.spec +++ b/salt.spec @@ -512,6 +512,18 @@ Patch153: fix-failed-to-stat-root-.gitconfig-issue-on-gitfs-bs.patch # PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/commit/58f448405b7f46505b2047ecda72abb42b6df9d1 # PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/commit/79d4ff772a162b5b8e602e3437c13b90a25bc190 Patch154: fix-tests-failures-after-repo.saltproject.io-depreca.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/60269 +Patch155: backport-batch-async-fixes-and-improvements-701.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/commit/560ab52ccf94c7974d5a418dfbba7409e0493066 +Patch156: fixed-file-client-private-attribute-reference-on-sal.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/67734 +Patch157: set-virtual-grain-in-podman-systemd-container-703.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/67066 +Patch158: repair-fstab_present-test-mode-702.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/67746 +Patch159: make-_auth-calls-visible-with-master-stats-696.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/65843 +Patch160: repair-virt_query-outputter-655.patch ### IMPORTANT: The line below is used as a snippet marker. Do not touch it. ### SALT PATCHES LIST END diff --git a/set-virtual-grain-in-podman-systemd-container-703.patch b/set-virtual-grain-in-podman-systemd-container-703.patch new file mode 100644 index 0000000..452a89e --- /dev/null +++ b/set-virtual-grain-in-podman-systemd-container-703.patch @@ -0,0 +1,84 @@ +From dde665763bd2f043022f9601dd25d0ca8aa716be Mon Sep 17 00:00:00 2001 +From: Georg +Date: Fri, 21 Feb 2025 10:24:51 +0000 +Subject: [PATCH] Set virtual grain in Podman systemd container (#703) + +Correctly handle the systemd-detect-virt output to identify a Podman +container running systemd as what it is instead of as a physical machine. + +Signed-off-by: Georg Pfuetzenreuter +(cherry picked from commit cf504a06859fb4a4fe9b8ebdd76380697f1f0c25) +--- + changelog/67733.fixed.md | 1 + + salt/grains/core.py | 4 ++++ + tests/pytests/unit/grains/test_core.py | 31 ++++++++++++++++++++++++++ + 3 files changed, 36 insertions(+) + create mode 100644 changelog/67733.fixed.md + +diff --git a/changelog/67733.fixed.md b/changelog/67733.fixed.md +new file mode 100644 +index 0000000000..242f65ec76 +--- /dev/null ++++ b/changelog/67733.fixed.md +@@ -0,0 +1 @@ ++Set correct virtual grain in systemd based Podman containers +diff --git a/salt/grains/core.py b/salt/grains/core.py +index 84d5b179dd..ceb142a7b8 100644 +--- a/salt/grains/core.py ++++ b/salt/grains/core.py +@@ -911,6 +911,10 @@ def _virtual(osdata): + grains["virtual"] = "container" + grains["virtual_subtype"] = "LXC" + break ++ elif "podman" in output: ++ grains["virtual"] = "container" ++ grains["virtual_subtype"] = "Podman" ++ break + elif "amazon" in output: + grains["virtual"] = "Nitro" + grains["virtual_subtype"] = "Amazon EC2" +diff --git a/tests/pytests/unit/grains/test_core.py b/tests/pytests/unit/grains/test_core.py +index 3d2beaa2c9..072287248f 100644 +--- a/tests/pytests/unit/grains/test_core.py ++++ b/tests/pytests/unit/grains/test_core.py +@@ -1752,6 +1752,37 @@ def test_lxc_virtual_with_virt_what(): + assert ret["virtual_subtype"] == "LXC" + + ++@pytest.mark.skip_on_windows ++def test_podman_virtual_with_systemd_detect_virt(): ++ """ ++ Test if virtual grains are parsed correctly in Podman using systemd-detect-virt. ++ """ ++ ++ def _which_side_effect(path): ++ if path == "systemd-detect-virt": ++ return "/usr/bin/systemd-detect-virt" ++ return None ++ ++ with patch.object( ++ salt.utils.platform, "is_windows", MagicMock(return_value=False) ++ ), patch.object( ++ salt.utils.path, ++ "which", ++ MagicMock(return_value=True, side_effect=_which_side_effect), ++ ), patch.dict( ++ core.__salt__, ++ { ++ "cmd.run_all": MagicMock( ++ return_value={"pid": 78, "retcode": 0, "stderr": "", "stdout": "podman"} ++ ) ++ }, ++ ): ++ osdata = {"kernel": "test"} ++ ret = core._virtual(osdata) ++ assert ret["virtual"] == "container" ++ assert ret["virtual_subtype"] == "Podman" ++ ++ + @pytest.mark.skip_on_windows + def test_container_inside_virtual_machine(): + """ +-- +2.48.1 +