From 7a62c3fd3cf0199cb07f13ef4a3a366c7757d190dbd3020708c9bc713cd301bb Mon Sep 17 00:00:00 2001
From: Victor Zhestkov
Date: Wed, 19 Feb 2025 16:36:29 +0000
Subject: [PATCH 1/3] - Enhacement of Salt packaging * Use
update-alternatives for all salt scripts * Use flexible dependencies for
the subpackages * Make salt-minion to require flavored zypp-plugin * Make
zyppnotify to use update-alternatives * Drop unused yumnotify plugin *
Add dependency to python3-dnf-plugins-core for RHEL based - Fix tests
failures after "repo.saltproject.io" deprecation - Added: *
fix-tests-failures-after-repo.saltproject.io-depreca.patch
OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=272
---
.gitattributes | 23 +
.gitignore | 1 +
3005.1-implement-zypper-removeptf-573.patch | 505 ++
...pygit2.giterror-error-loading-known_.patch | 71 +
README.SUSE | 31 +
_lastrevision | 1 +
_multibuild | 3 +
_service | 20 +
...beacons-sources-config-pillar-grains.patch | 28 +
add-custom-suse-capabilities-as-grains.patch | 30 +
...variable-to-know-if-yum-is-invoked-f.patch | 83 +
...te-and-gpg-key-management-functions-.patch | 1216 +++
...ntextvars-dependency-in-salt.version.patch | 38 +
..._batch-to-clearfuncs-exposed-methods.patch | 26 +
...pport-with-venv-salt-minion-3004-493.patch | 795 ++
...eption-handling-on-minion-connection.patch | 41 +
...onfiguration-file-for-enabling-packa.patch | 26 +
add-support-for-gpgautoimport-539.patch | 369 +
...ive-grain-types-for-autosign_grains-.patch | 97 +
...-fileserver-roots-update-bsc-1218482.patch | 164 +
...rcontexts-to-be-returned-from-loader.patch | 73 +
allow-vendor-change-option-with-zypper.patch | 841 ++
async-batch-implementation.patch | 1149 +++
...with-dependencies-versions-bsc-12116.patch | 47 +
...rong-output-of-systemctl-version-bsc.patch | 153 +
...ve-syslogging-by-watchdog-cronjob-58.patch | 26 +
...eading-of-etc-salt-minion-bsc-122035.patch | 27 +
...file-directory-user-and-group-owners.patch | 112 +
...eters-to-prevent-possible-tracebacks.patch | 30 +
...-collection-of-lvm-grains-via-config.patch | 37 +
...n-info_installed-compatibility-50453.patch | 351 +
...ap-byte-stream-to-string-bsc-1219001.patch | 80 +
...name__-for-transactional_update-modu.patch | 39 +
...inks-to-set-proper-__cli-opt-bsc-121.patch | 101 +
....yml-and-.yaml-playbooks-bsc-1211888.patch | 188 +
...set-plugin-implementation-3002.2-450.patch | 130 +
...async-wrapper-calls-with-the-separat.patch | 254 +
...ad-message-pack-message-bsc-1213441-.patch | 155 +
...state-if-there-is-no-3rd-party-depen.patch | 46 +
...use-shell-sbin-nologin-in-requisites.patch | 39 +
...from-event.unpack-in-cli.batch_async.patch | 34 +
early-feature-support-config.patch | 3784 +++++++++
...-probes-for-salt-ssh-executions-bsc-.patch | 346 +
...-unix_socket-for-mysql-returners-bsc.patch | 68 +
...mechanism-after-salt-bundle-upgrade-.patch | 26 +
...on-garbage-filtering-bsc-1231605-688.patch | 79 +
...nscap-module-add-xccdf_eval-call-386.patch | 425 +
...ize-new-rich-rules-before-comparing-.patch | 180 +
fix-bsc-1065792.patch | 25 +
...of-sls-context-vars-when-trailing-do.patch | 69 +
fix-cve-2023-34049-bsc-1215157.patch | 1163 +++
...31-and-cve-2024-22232-bsc-1219430-bs.patch | 544 ++
fix-deprecated-code-677.patch | 166 +
...at-root-.gitconfig-issue-on-gitfs-bs.patch | 73 +
fix-for-suse-expanded-support-detection.patch | 39 +
..._-and-improve-cache-cleaning-bsc-119.patch | 2024 +++++
fix-issue-2068-test.patch | 52 +
...break-salt-in-python-3.12-and-3.13-6.patch | 71 +
...ing-minion-returns-in-batch-mode-360.patch | 30 +
...tion_order-opt-to-prevent-test-fails.patch | 62 +
...-salt-thin-directory-when-using-the-.patch | 50 +
...tests-and-allow-smooth-tests-executi.patch | 2695 +++++++
...ultiple-values-for-keyword-argument-.patch | 253 +
...ith-depending-client.ssh-on-psutil-b.patch | 53 +
...-opts-poisoning-bsc-1197637-3004-501.patch | 128 +
...ngs-and-testuite-for-python-3.11-635.patch | 3860 +++++++++
...tringutils.to_str-calls-to-make-it-w.patch | 141 +
...detected-in-salt-support-cli-module-.patch | 118 +
...sage-and-exclude-some-tests-to-run-w.patch | 243 +
...an-to-work-in-our-infrastructure-676.patch | 25 +
fix-test_system-flaky-setup_teardown-fn.patch | 44 +
...es-after-repo.saltproject.io-depreca.patch | 518 ++
...es-and-errors-when-detected-on-vm-ex.patch | 772 ++
...ake-them-running-with-salt-testsuite.patch | 841 ++
fix-the-aptpkg.py-unit-test-failure.patch | 25 +
...-regression-for-yumnotify-plugin-456.patch | 23 +
...on-of-user.present-state-when-group-.patch | 154 +
...context-for-salt-minion-service-bsc-.patch | 83 +
...int_exc-calls-for-test_pip_state-432.patch | 26 +
...user.list_groups-omits-remote-groups.patch | 265 +
...g-in-pass-renderer-and-make-it-more-.patch | 181 +
...ction-and-avoid-building-and-testing.patch | 58 +
...ns-for-vms-running-on-nutanix-ahv-bs.patch | 282 +
...-key-tests-and-test_suse-on-sle12-68.patch | 87 +
...est-fails-on-old-openssl-systems-682.patch | 261 +
...edir_basename-to-avoid-hash-collisio.patch | 833 ++
...n-logs-when-running-a-state-that-fai.patch | 121 +
fixes-for-python-3.10-502.patch | 44 +
...ger-flushing-already-closed-file-686.patch | 58 +
html.tar.bz2 | 3 +
...lling-for-batch-async-from-the-salt-.patch | 145 +
...broken-events-catching-and-reporting.patch | 202 +
...ndling-with-different-openssl-versio.patch | 98 +
...et-override-condition-with-venv_pip_.patch | 113 +
...alt.utils.json.find_json-bsc-1213293.patch | 204 +
include-aliases-in-the-fqdns-grains.patch | 138 +
...talled-works-without-status-attr-now.patch | 66 +
join-masters-if-it-is-a-list-671.patch | 105 +
...-platform-python-binary-in-rhel8-191.patch | 32 +
..._repos-compatible-on-enabled-disable.patch | 28 +
...g-seco.range-thread-safe-bsc-1211649.patch | 63 +
make-logging-calls-lighter.patch | 233 +
...-compatible-with-salt-3000-and-older.patch | 37 +
...nnecting-on-changing-master-ip-bsc-1.patch | 770 ++
...ine-less-blocking-the-eventpublisher.patch | 104 +
...-self-recoverable-on-killing-eventpu.patch | 243 +
...script-to-not-require-setuptools-9.1.patch | 33 +
...ured-user-is-properly-set-by-salt-bs.patch | 204 +
...e-file-client-is-destroyed-upon-used.patch | 850 ++
make-tests-compatible-with-venv-bundle.patch | 883 ++
mark-salt-3006-as-released-586.patch | 480 ++
..._str-on-curl_debug-message-in-tornad.patch | 31 +
pass-the-context-to-pillar-ext-modules.patch | 276 +
...mock-for-python-versions-that-are-su.patch | 135 +
...n-of-ssh.opts-with-lazyloader-bsc-11.patch | 240 +
...-high-amount-of-batch-async-calls-bs.patch | 1272 +++
...ins-errors-on-missing-cookie-path-bs.patch | 102 +
...-exception-in-tornado.concurrent.fut.patch | 37 +
...-exceptions-on-salt.utils.user.get_g.patch | 68 +
...jection-via-pre_flight_script_args-4.patch | 33 +
...ent-using-syncwrapper-with-no-reason.patch | 25 +
provide-systemd-timer-unit.patch | 43 +
...ithout-using-interpolation-bsc-11356.patch | 29 +
...undant-_file_find-call-to-the-master.patch | 40 +
...-run_func-from-salt.master.mworker._.patch | 224 +
...port-causing-delays-on-starting-salt.patch | 25 +
...ygit2-deprecated-and-removed-1.15.0-.patch | 122 +
...default-behaviour-of-pkg-list-return.patch | 137 +
...expected-powerpc-os-arch-bsc-1117995.patch | 31 +
...use-case-when-multiple-inotify-beaco.patch | 216 +
...-configured-user-is-properly-set-by-.patch | 194 +
...elinux-context-for-minion-service-bs.patch | 65 +
...e-making-reactor-less-blocking-bsc-1.patch | 106 +
...long-running-req-channel-bsc-1213960.patch | 418 +
run-salt-api-as-user-salt-bsc-1064520.patch | 25 +
run-salt-master-as-dedicated-salt-user.patch | 47 +
salt-tmpfiles.d | 5 +
salt.changes | 7082 +++++++++++++++++
salt.spec | 1756 ++++
save-log-to-logfile-with-docker.build.patch | 56 +
...r-tests-to-avoid-errors-and-failures.patch | 557 ++
...ts-if-necessary-and-mark-some-flaky-.patch | 392 +
...ts-related-to-old-openssl-algorithms.patch | 97 +
...-names-without-colon-bsc-1208691-578.patch | 27 +
...nsupported-algorithm-on-old-openssl-.patch | 117 +
...e-small-tests-fixes-enhancements-661.patch | 152 +
...tcher.confirm_top-by-using-__context.patch | 64 +
...ewalld-state-to-use-change_interface.patch | 72 +
switch-oscap-encoding-to-utf-8-639.patch | 80 +
...tend-the-whitelist-of-allowed-comman.patch | 34 +
...ust-test-expectation-to-prevent-fail.patch | 28 +
...pen-redirect-in-staticfilehandler-cv.patch | 35 +
transactional_update.conf | 4 +
travis.yml | 35 +
update-__pillar__-during-pillar_refresh.patch | 169 +
update-documentation.sh | 100 +
...cation-of-hex-in-pygit2-1.15.0-and-a.patch | 62 +
...x-for-salt-ssh-to-process-targets-li.patch | 98 +
...lgorithm-to-compute-string-checksums.patch | 124 +
...-extension_modules-in-salt-call-bsc-.patch | 110 +
...rlock-to-avoid-deadlocks-in-salt-ssh.patch | 27 +
use-salt-bundle-in-dockermod.patch | 375 +
...om-salt-bundle-with-transactional_up.patch | 103 +
v3006.0.tar.gz | 3 +
...on-before-building-when-using-with-s.patch | 30 +
x509-fixes-111.patch | 431 +
zypper-pkgrepo-alreadyconfigured-585.patch | 366 +
...-retcode-104-for-search-bsc-1176697-.patch | 275 +
168 files changed, 50789 insertions(+)
create mode 100644 .gitattributes
create mode 100644 .gitignore
create mode 100644 3005.1-implement-zypper-removeptf-573.patch
create mode 100644 3006.0-prevent-_pygit2.giterror-error-loading-known_.patch
create mode 100644 README.SUSE
create mode 100644 _lastrevision
create mode 100644 _multibuild
create mode 100644 _service
create mode 100644 activate-all-beacons-sources-config-pillar-grains.patch
create mode 100644 add-custom-suse-capabilities-as-grains.patch
create mode 100644 add-environment-variable-to-know-if-yum-is-invoked-f.patch
create mode 100644 add-migrated-state-and-gpg-key-management-functions-.patch
create mode 100644 add-missing-contextvars-dependency-in-salt.version.patch
create mode 100644 add-publish_batch-to-clearfuncs-exposed-methods.patch
create mode 100644 add-salt-ssh-support-with-venv-salt-minion-3004-493.patch
create mode 100644 add-sleep-on-exception-handling-on-minion-connection.patch
create mode 100644 add-standalone-configuration-file-for-enabling-packa.patch
create mode 100644 add-support-for-gpgautoimport-539.patch
create mode 100644 allow-all-primitive-grain-types-for-autosign_grains-.patch
create mode 100644 allow-kwargs-for-fileserver-roots-update-bsc-1218482.patch
create mode 100644 allow-namedloadercontexts-to-be-returned-from-loader.patch
create mode 100644 allow-vendor-change-option-with-zypper.patch
create mode 100644 async-batch-implementation.patch
create mode 100644 avoid-conflicts-with-dependencies-versions-bsc-12116.patch
create mode 100644 avoid-crash-on-wrong-output-of-systemctl-version-bsc.patch
create mode 100644 avoid-excessive-syslogging-by-watchdog-cronjob-58.patch
create mode 100644 avoid-explicit-reading-of-etc-salt-minion-bsc-122035.patch
create mode 100644 bsc-1176024-fix-file-directory-user-and-group-owners.patch
create mode 100644 change-the-delimeters-to-prevent-possible-tracebacks.patch
create mode 100644 control-the-collection-of-lvm-grains-via-config.patch
create mode 100644 debian-info_installed-compatibility-50453.patch
create mode 100644 decode-oscap-byte-stream-to-string-bsc-1219001.patch
create mode 100644 define-__virtualname__-for-transactional_update-modu.patch
create mode 100644 dereference-symlinks-to-set-proper-__cli-opt-bsc-121.patch
create mode 100644 discover-both-.yml-and-.yaml-playbooks-bsc-1211888.patch
create mode 100644 dnfnotify-pkgset-plugin-implementation-3002.2-450.patch
create mode 100644 do-not-call-the-async-wrapper-calls-with-the-separat.patch
create mode 100644 do-not-fail-on-bad-message-pack-message-bsc-1213441-.patch
create mode 100644 do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch
create mode 100644 don-t-use-shell-sbin-nologin-in-requisites.patch
create mode 100644 drop-serial-from-event.unpack-in-cli.batch_async.patch
create mode 100644 early-feature-support-config.patch
create mode 100644 enable-keepalive-probes-for-salt-ssh-executions-bsc-.patch
create mode 100644 enable-passing-a-unix_socket-for-mysql-returners-bsc.patch
create mode 100644 enhance-cleanup-mechanism-after-salt-bundle-upgrade-.patch
create mode 100644 enhance-find_json-garbage-filtering-bsc-1231605-688.patch
create mode 100644 enhance-openscap-module-add-xccdf_eval-call-386.patch
create mode 100644 firewalld-normalize-new-rich-rules-before-comparing-.patch
create mode 100644 fix-bsc-1065792.patch
create mode 100644 fix-calculation-of-sls-context-vars-when-trailing-do.patch
create mode 100644 fix-cve-2023-34049-bsc-1215157.patch
create mode 100644 fix-cve-2024-22231-and-cve-2024-22232-bsc-1219430-bs.patch
create mode 100644 fix-deprecated-code-677.patch
create mode 100644 fix-failed-to-stat-root-.gitconfig-issue-on-gitfs-bs.patch
create mode 100644 fix-for-suse-expanded-support-detection.patch
create mode 100644 fix-gitfs-__env__-and-improve-cache-cleaning-bsc-119.patch
create mode 100644 fix-issue-2068-test.patch
create mode 100644 fix-issues-that-break-salt-in-python-3.12-and-3.13-6.patch
create mode 100644 fix-missing-minion-returns-in-batch-mode-360.patch
create mode 100644 fix-optimization_order-opt-to-prevent-test-fails.patch
create mode 100644 fix-ownership-of-salt-thin-directory-when-using-the-.patch
create mode 100644 fix-problematic-tests-and-allow-smooth-tests-executi.patch
create mode 100644 fix-regression-multiple-values-for-keyword-argument-.patch
create mode 100644 fix-regression-with-depending-client.ssh-on-psutil-b.patch
create mode 100644 fix-salt-ssh-opts-poisoning-bsc-1197637-3004-501.patch
create mode 100644 fix-salt-warnings-and-testuite-for-python-3.11-635.patch
create mode 100644 fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
create mode 100644 fix-some-issues-detected-in-salt-support-cli-module-.patch
create mode 100644 fix-status.diskusage-and-exclude-some-tests-to-run-w.patch
create mode 100644 fix-test_debian-to-work-in-our-infrastructure-676.patch
create mode 100644 fix-test_system-flaky-setup_teardown-fn.patch
create mode 100644 fix-tests-failures-after-repo.saltproject.io-depreca.patch
create mode 100644 fix-tests-failures-and-errors-when-detected-on-vm-ex.patch
create mode 100644 fix-tests-to-make-them-running-with-salt-testsuite.patch
create mode 100644 fix-the-aptpkg.py-unit-test-failure.patch
create mode 100644 fix-the-regression-for-yumnotify-plugin-456.patch
create mode 100644 fix-the-regression-of-user.present-state-when-group-.patch
create mode 100644 fix-the-selinux-context-for-salt-minion-service-bsc-.patch
create mode 100644 fix-traceback.print_exc-calls-for-test_pip_state-432.patch
create mode 100644 fix-user.list_groups-omits-remote-groups.patch
create mode 100644 fix-utf8-handling-in-pass-renderer-and-make-it-more-.patch
create mode 100644 fix-version-detection-and-avoid-building-and-testing.patch
create mode 100644 fix-virtual-grains-for-vms-running-on-nutanix-ahv-bs.patch
create mode 100644 fix-x509-private-key-tests-and-test_suse-on-sle12-68.patch
create mode 100644 fix-x509-test-fails-on-old-openssl-systems-682.patch
create mode 100644 fixed-gitfs-cachedir_basename-to-avoid-hash-collisio.patch
create mode 100644 fixed-keyerror-in-logs-when-running-a-state-that-fai.patch
create mode 100644 fixes-for-python-3.10-502.patch
create mode 100644 handle-logger-flushing-already-closed-file-686.patch
create mode 100644 html.tar.bz2
create mode 100644 implement-the-calling-for-batch-async-from-the-salt-.patch
create mode 100644 improve-broken-events-catching-and-reporting.patch
create mode 100644 improve-error-handling-with-different-openssl-versio.patch
create mode 100644 improve-pip-target-override-condition-with-venv_pip_.patch
create mode 100644 improve-salt.utils.json.find_json-bsc-1213293.patch
create mode 100644 include-aliases-in-the-fqdns-grains.patch
create mode 100644 info_installed-works-without-status-attr-now.patch
create mode 100644 join-masters-if-it-is-a-list-671.patch
create mode 100644 let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
create mode 100644 make-aptpkg.list_repos-compatible-on-enabled-disable.patch
create mode 100644 make-importing-seco.range-thread-safe-bsc-1211649.patch
create mode 100644 make-logging-calls-lighter.patch
create mode 100644 make-master_tops-compatible-with-salt-3000-and-older.patch
create mode 100644 make-minion-reconnecting-on-changing-master-ip-bsc-1.patch
create mode 100644 make-reactor-engine-less-blocking-the-eventpublisher.patch
create mode 100644 make-salt-master-self-recoverable-on-killing-eventpu.patch
create mode 100644 make-setup.py-script-to-not-require-setuptools-9.1.patch
create mode 100644 make-sure-configured-user-is-properly-set-by-salt-bs.patch
create mode 100644 make-sure-the-file-client-is-destroyed-upon-used.patch
create mode 100644 make-tests-compatible-with-venv-bundle.patch
create mode 100644 mark-salt-3006-as-released-586.patch
create mode 100644 only-call-native_str-on-curl_debug-message-in-tornad.patch
create mode 100644 pass-the-context-to-pillar-ext-modules.patch
create mode 100644 prefer-unittest.mock-for-python-versions-that-are-su.patch
create mode 100644 prevent-affection-of-ssh.opts-with-lazyloader-bsc-11.patch
create mode 100644 prevent-oom-with-high-amount-of-batch-async-calls-bs.patch
create mode 100644 prevent-pkg-plugins-errors-on-missing-cookie-path-bs.patch
create mode 100644 prevent-possible-exception-in-tornado.concurrent.fut.patch
create mode 100644 prevent-possible-exceptions-on-salt.utils.user.get_g.patch
create mode 100644 prevent-shell-injection-via-pre_flight_script_args-4.patch
create mode 100644 prevent-using-syncwrapper-with-no-reason.patch
create mode 100644 provide-systemd-timer-unit.patch
create mode 100644 read-repo-info-without-using-interpolation-bsc-11356.patch
create mode 100644 remove-redundant-_file_find-call-to-the-master.patch
create mode 100644 remove-redundant-run_func-from-salt.master.mworker._.patch
create mode 100644 remove-unused-import-causing-delays-on-starting-salt.patch
create mode 100644 replace-use-of-pygit2-deprecated-and-removed-1.15.0-.patch
create mode 100644 restore-default-behaviour-of-pkg-list-return.patch
create mode 100644 return-the-expected-powerpc-os-arch-bsc-1117995.patch
create mode 100644 revert-fixing-a-use-case-when-multiple-inotify-beaco.patch
create mode 100644 revert-make-sure-configured-user-is-properly-set-by-.patch
create mode 100644 revert-setting-selinux-context-for-minion-service-bs.patch
create mode 100644 revert-the-change-making-reactor-less-blocking-bsc-1.patch
create mode 100644 revert-usage-of-long-running-req-channel-bsc-1213960.patch
create mode 100644 run-salt-api-as-user-salt-bsc-1064520.patch
create mode 100644 run-salt-master-as-dedicated-salt-user.patch
create mode 100644 salt-tmpfiles.d
create mode 100644 salt.changes
create mode 100644 salt.spec
create mode 100644 save-log-to-logfile-with-docker.build.patch
create mode 100644 several-fixes-for-tests-to-avoid-errors-and-failures.patch
create mode 100644 skip-certain-tests-if-necessary-and-mark-some-flaky-.patch
create mode 100644 skip-more-tests-related-to-old-openssl-algorithms.patch
create mode 100644 skip-package-names-without-colon-bsc-1208691-578.patch
create mode 100644 skip-tests-for-unsupported-algorithm-on-old-openssl-.patch
create mode 100644 some-more-small-tests-fixes-enhancements-661.patch
create mode 100644 speed-up-salt.matcher.confirm_top-by-using-__context.patch
create mode 100644 switch-firewalld-state-to-use-change_interface.patch
create mode 100644 switch-oscap-encoding-to-utf-8-639.patch
create mode 100644 temporary-fix-extend-the-whitelist-of-allowed-comman.patch
create mode 100644 test_vultrpy-adjust-test-expectation-to-prevent-fail.patch
create mode 100644 tornado-fix-an-open-redirect-in-staticfilehandler-cv.patch
create mode 100644 transactional_update.conf
create mode 100644 travis.yml
create mode 100644 update-__pillar__-during-pillar_refresh.patch
create mode 100644 update-documentation.sh
create mode 100644 update-for-deprecation-of-hex-in-pygit2-1.15.0-and-a.patch
create mode 100644 update-target-fix-for-salt-ssh-to-process-targets-li.patch
create mode 100644 use-adler32-algorithm-to-compute-string-checksums.patch
create mode 100644 use-cachedir-for-extension_modules-in-salt-call-bsc-.patch
create mode 100644 use-rlock-to-avoid-deadlocks-in-salt-ssh.patch
create mode 100644 use-salt-bundle-in-dockermod.patch
create mode 100644 use-salt-call-from-salt-bundle-with-transactional_up.patch
create mode 100644 v3006.0.tar.gz
create mode 100644 write-salt-version-before-building-when-using-with-s.patch
create mode 100644 x509-fixes-111.patch
create mode 100644 zypper-pkgrepo-alreadyconfigured-585.patch
create mode 100644 zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..9b03811
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,23 @@
+## Default LFS
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.bsp filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.gem filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.jar filter=lfs diff=lfs merge=lfs -text
+*.lz filter=lfs diff=lfs merge=lfs -text
+*.lzma filter=lfs diff=lfs merge=lfs -text
+*.obscpio filter=lfs diff=lfs merge=lfs -text
+*.oxt filter=lfs diff=lfs merge=lfs -text
+*.pdf filter=lfs diff=lfs merge=lfs -text
+*.png filter=lfs diff=lfs merge=lfs -text
+*.rpm filter=lfs diff=lfs merge=lfs -text
+*.tbz filter=lfs diff=lfs merge=lfs -text
+*.tbz2 filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.ttf filter=lfs diff=lfs merge=lfs -text
+*.txz filter=lfs diff=lfs merge=lfs -text
+*.whl filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..57affb6
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+.osc
diff --git a/3005.1-implement-zypper-removeptf-573.patch b/3005.1-implement-zypper-removeptf-573.patch
new file mode 100644
index 0000000..82e4c9f
--- /dev/null
+++ b/3005.1-implement-zypper-removeptf-573.patch
@@ -0,0 +1,505 @@
+From 327a5e5b24c4fa047df44b245abd672e02999cca Mon Sep 17 00:00:00 2001
+From: Michael Calmer
+Date: Mon, 23 Jan 2023 14:33:26 +0100
+Subject: [PATCH] 3005.1 implement zypper removeptf (#573)
+
+* handle ptf packages inside of normal pkg.remove function
+
+* add testcase for remove and removeptf
+
+* add changelog
+
+* adapt old tests to changed function
+
+* Update Docs
+
+Co-authored-by: Megan Wilhite
+---
+ changelog/63442.added | 1 +
+ salt/modules/zypperpkg.py | 38 +-
+ tests/pytests/unit/modules/test_zypperpkg.py | 356 ++++++++++++++++++-
+ tests/unit/modules/test_zypperpkg.py | 1 +
+ 4 files changed, 394 insertions(+), 2 deletions(-)
+ create mode 100644 changelog/63442.added
+
+diff --git a/changelog/63442.added b/changelog/63442.added
+new file mode 100644
+index 0000000000..ad81b2f9d5
+--- /dev/null
++++ b/changelog/63442.added
+@@ -0,0 +1 @@
++implement removal of ptf packages in zypper pkg module
+diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
+index 051f8f72c7..44f2cdbd3a 100644
+--- a/salt/modules/zypperpkg.py
++++ b/salt/modules/zypperpkg.py
+@@ -2073,17 +2073,21 @@ def _uninstall(inclusion_detection, name=None, pkgs=None, root=None):
+ except MinionError as exc:
+ raise CommandExecutionError(exc)
+
++ ptfpackages = _find_ptf_packages(pkg_params.keys(), root=root)
+ includes = _detect_includes(pkg_params.keys(), inclusion_detection)
+ old = list_pkgs(root=root, includes=includes)
+ targets = []
+ for target in pkg_params:
++ if target in ptfpackages:
++ # ptfpackages needs special handling
++ continue
+ # Check if package version set to be removed is actually installed:
+ # old[target] contains a comma-separated list of installed versions
+ if target in old and pkg_params[target] in old[target].split(","):
+ targets.append(target + "-" + pkg_params[target])
+ elif target in old and not pkg_params[target]:
+ targets.append(target)
+- if not targets:
++ if not targets and not ptfpackages:
+ return {}
+
+ systemd_scope = _systemd_scope()
+@@ -2095,6 +2099,13 @@ def _uninstall(inclusion_detection, name=None, pkgs=None, root=None):
+ )
+ targets = targets[500:]
+
++ # handle ptf packages
++ while ptfpackages:
++ __zypper__(systemd_scope=systemd_scope, root=root).call(
++ "removeptf", "--allow-downgrade", *ptfpackages[:500]
++ )
++ ptfpackages = ptfpackages[500:]
++
+ _clean_cache()
+ new = list_pkgs(root=root, includes=includes)
+ ret = salt.utils.data.compare_dicts(old, new)
+@@ -2183,6 +2194,11 @@ def remove(
+ salt '*' pkg.remove
+ salt '*' pkg.remove ,,
+ salt '*' pkg.remove pkgs='["foo", "bar"]'
++
++ .. versionchanged:: 3007
++ Can now remove also PTF packages which require a different handling in the backend.
++
++ Can now remove also PTF packages which require a different handling in the backend.
+ """
+ return _uninstall(inclusion_detection, name=name, pkgs=pkgs, root=root)
+
+@@ -2658,6 +2674,26 @@ def _get_visible_patterns(root=None):
+ return patterns
+
+
++def _find_ptf_packages(pkgs, root=None):
++ """
++ Find ptf packages in "pkgs" and return them as list
++ """
++ ptfs = []
++ cmd = ["rpm"]
++ if root:
++ cmd.extend(["--root", root])
++ cmd.extend(["-q", "--qf", "%{NAME}: [%{PROVIDES} ]\n"])
++ cmd.extend(pkgs)
++ output = __salt__["cmd.run"](cmd)
++ for line in output.splitlines():
++ if not line.strip():
++ continue
++ pkg, provides = line.split(":", 1)
++ if "ptf()" in provides:
++ ptfs.append(pkg)
++ return ptfs
++
++
+ def _get_installed_patterns(root=None):
+ """
+ List all installed patterns.
+diff --git a/tests/pytests/unit/modules/test_zypperpkg.py b/tests/pytests/unit/modules/test_zypperpkg.py
+index 91132b7277..c996662e1c 100644
+--- a/tests/pytests/unit/modules/test_zypperpkg.py
++++ b/tests/pytests/unit/modules/test_zypperpkg.py
+@@ -11,7 +11,7 @@ import pytest
+ import salt.modules.pkg_resource as pkg_resource
+ import salt.modules.zypperpkg as zypper
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
+-from tests.support.mock import MagicMock, mock_open, patch
++from tests.support.mock import MagicMock, mock_open, call, patch
+
+
+ @pytest.fixture
+@@ -27,6 +27,11 @@ def configure_loader_modules():
+ }
+
+
++@pytest.fixture(autouse=True)
++def fresh_zypper_instance():
++ zypper.__zypper__ = zypper._Zypper()
++
++
+ def test_list_pkgs_no_context():
+ """
+ Test packages listing.
+@@ -395,3 +400,352 @@ def test_del_repo_key():
+ with patch.dict(zypper.__salt__, salt_mock):
+ assert zypper.del_repo_key(keyid="keyid", root="/mnt")
+ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
++
++@pytest.mark.parametrize(
++ "zypper_version,lowpkg_version_cmp,expected_inst_avc,expected_dup_avc",
++ [
++ ("0.5", [-1, -1], False, False),
++ ("1.11.34", [0, -1], False, True),
++ ("1.14.8", [0, 0], True, True),
++ ],
++)
++def test_refresh_zypper_flags(
++ zypper_version, lowpkg_version_cmp, expected_inst_avc, expected_dup_avc
++):
++ with patch(
++ "salt.modules.zypperpkg.version", MagicMock(return_value=zypper_version)
++ ), patch.dict(
++ zypper.__salt__,
++ {"lowpkg.version_cmp": MagicMock(side_effect=lowpkg_version_cmp)},
++ ):
++ _zypper = zypper._Zypper()
++ _zypper.refresh_zypper_flags()
++ assert _zypper.inst_avc == expected_inst_avc
++ assert _zypper.dup_avc == expected_dup_avc
++
++
++@pytest.mark.parametrize(
++ "inst_avc,dup_avc,avc,allowvendorchange_param,novendorchange_param,expected",
++ [
++ # inst_avc = True, dup_avc = True
++ (True, True, False, False, False, True),
++ (True, True, False, True, False, True),
++ (True, True, False, False, True, False),
++ (True, True, False, True, True, True),
++ # inst_avc = False, dup_avc = True
++ (False, True, False, False, False, True),
++ (False, True, False, True, False, True),
++ (False, True, False, False, True, False),
++ (False, True, False, True, True, True),
++ # inst_avc = False, dup_avc = False
++ (False, False, False, False, False, False),
++ (False, False, False, True, False, False),
++ (False, False, False, False, True, False),
++ (False, False, False, True, True, False),
++ ],
++)
++@patch("salt.modules.zypperpkg._Zypper.refresh_zypper_flags", MagicMock())
++def test_allow_vendor_change(
++ inst_avc,
++ dup_avc,
++ avc,
++ allowvendorchange_param,
++ novendorchange_param,
++ expected,
++):
++ _zypper = zypper._Zypper()
++ _zypper.inst_avc = inst_avc
++ _zypper.dup_avc = dup_avc
++ _zypper.avc = avc
++ _zypper.allow_vendor_change(allowvendorchange_param, novendorchange_param)
++ assert _zypper.avc == expected
++
++
++@pytest.mark.parametrize(
++ "package,pre_version,post_version,fromrepo_param,name_param,pkgs_param,diff_attr_param",
++ [
++ ("vim", "1.1", "1.2", [], "", [], "all"),
++ ("kernel-default", "1.1", "1.1,1.2", ["dummy", "dummy2"], "", [], None),
++ ("vim", "1.1", "1.2", [], "vim", [], None),
++ ],
++)
++@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
++def test_upgrade(
++ package,
++ pre_version,
++ post_version,
++ fromrepo_param,
++ name_param,
++ pkgs_param,
++ diff_attr_param,
++):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call"
++ ) as zypper_mock, patch.object(
++ zypper,
++ "list_pkgs",
++ MagicMock(side_effect=[{package: pre_version}, {package: post_version}]),
++ ) as list_pkgs_mock:
++ expected_call = ["update", "--auto-agree-with-licenses"]
++ for repo in fromrepo_param:
++ expected_call.extend(["--repo", repo])
++
++ if pkgs_param:
++ expected_call.extend(pkgs_param)
++ elif name_param:
++ expected_call.append(name_param)
++
++ result = zypper.upgrade(
++ name=name_param,
++ pkgs=pkgs_param,
++ fromrepo=fromrepo_param,
++ diff_attr=diff_attr_param,
++ )
++ zypper_mock.assert_any_call(*expected_call)
++ assert result == {package: {"old": pre_version, "new": post_version}}
++ list_pkgs_mock.assert_any_call(root=None, attr=diff_attr_param)
++
++
++@pytest.mark.parametrize(
++ "package,pre_version,post_version,fromrepo_param",
++ [
++ ("vim", "1.1", "1.2", []),
++ ("emacs", "1.1", "1.2", ["Dummy", "Dummy2"]),
++ ],
++)
++@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
++def test_dist_upgrade(package, pre_version, post_version, fromrepo_param):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call"
++ ) as zypper_mock, patch.object(
++ zypper,
++ "list_pkgs",
++ MagicMock(side_effect=[{package: pre_version}, {package: post_version}]),
++ ):
++ expected_call = ["dist-upgrade", "--auto-agree-with-licenses"]
++
++ for repo in fromrepo_param:
++ expected_call.extend(["--from", repo])
++
++ result = zypper.upgrade(dist_upgrade=True, fromrepo=fromrepo_param)
++ zypper_mock.assert_any_call(*expected_call)
++ assert result == {package: {"old": pre_version, "new": post_version}}
++
++
++@pytest.mark.parametrize(
++ "package,pre_version,post_version,dup_avc,novendorchange_param,allowvendorchange_param,vendor_change",
++ [
++ # dup_avc = True, both params = default -> no vendor change
++ ("vim", "1.1", "1.2", True, True, False, False),
++ # dup_avc = True, allowvendorchange = True -> vendor change
++ (
++ "emacs",
++ "1.1",
++ "1.2",
++ True,
++ True,
++ True,
++ True,
++ ),
++ # dup_avc = True, novendorchange = False -> vendor change
++ ("joe", "1.1", "1.2", True, False, False, True),
++ # dup_avc = True, both params = toggled -> vendor change
++ ("kate", "1.1", "1.2", True, False, True, True),
++ # dup_avc = False -> no vendor change
++ (
++ "gedit",
++ "1.1",
++ "1.2",
++ False,
++ False,
++ True,
++ False
++ ),
++ ],
++)
++@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
++def test_dist_upgrade_vendorchange(
++ package,
++ pre_version,
++ post_version,
++ dup_avc,
++ novendorchange_param,
++ allowvendorchange_param,
++ vendor_change
++):
++ cmd_run_mock = MagicMock(return_value={"retcode": 0, "stdout": None})
++ with patch.object(
++ zypper,
++ "list_pkgs",
++ MagicMock(side_effect=[{package: pre_version}, {package: post_version}]),
++ ), patch("salt.modules.zypperpkg.__zypper__.refresh_zypper_flags",), patch.dict(
++ zypper.__salt__, {"cmd.run_all": cmd_run_mock}
++ ):
++ expected_cmd = ["zypper", "--non-interactive", "--no-refresh", "dist-upgrade"]
++ # --allow-vendor-change is injected right after "dist-upgrade"
++ if vendor_change:
++ expected_cmd.append("--allow-vendor-change")
++ expected_cmd.append("--auto-agree-with-licenses")
++
++ zypper.__zypper__.dup_avc = dup_avc
++ zypper.upgrade(
++ dist_upgrade=True,
++ allowvendorchange=allowvendorchange_param,
++ novendorchange=novendorchange_param,
++ )
++ cmd_run_mock.assert_any_call(
++ expected_cmd, output_loglevel="trace", python_shell=False, env={}
++ )
++
++
++@pytest.mark.parametrize(
++ "package,pre_version,post_version,fromrepo_param",
++ [
++ ("vim", "1.1", "1.1", []),
++ ("emacs", "1.1", "1.1", ["Dummy", "Dummy2"]),
++ ],
++)
++@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
++def test_dist_upgrade_dry_run(package, pre_version, post_version, fromrepo_param):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call"
++ ) as zypper_mock, patch.object(
++ zypper,
++ "list_pkgs",
++ MagicMock(side_effect=[{package: pre_version}, {package: post_version}]),
++ ):
++ expected_call = ["dist-upgrade", "--auto-agree-with-licenses", "--dry-run"]
++
++ for repo in fromrepo_param:
++ expected_call.extend(["--from", repo])
++
++ zypper.upgrade(dist_upgrade=True, dryrun=True, fromrepo=fromrepo_param)
++ zypper_mock.assert_any_call(*expected_call)
++ # dryrun=True causes two calls, one with a trailing --debug-solver flag
++ expected_call.append("--debug-solver")
++ zypper_mock.assert_any_call(*expected_call)
++
++
++@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
++def test_dist_upgrade_failure():
++ zypper_output = textwrap.dedent(
++ """\
++ Loading repository data...
++ Reading installed packages...
++ Computing distribution upgrade...
++ Use 'zypper repos' to get the list of defined repositories.
++ Repository 'DUMMY' not found by its alias, number, or URI.
++ """
++ )
++ call_spy = MagicMock()
++ zypper_mock = MagicMock()
++ zypper_mock.stdout = zypper_output
++ zypper_mock.stderr = ""
++ zypper_mock.exit_code = 3
++ zypper_mock.noraise.call = call_spy
++ with patch("salt.modules.zypperpkg.__zypper__", zypper_mock), patch.object(
++ zypper, "list_pkgs", MagicMock(side_effect=[{"vim": 1.1}, {"vim": 1.1}])
++ ):
++ expected_call = [
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--from",
++ "Dummy",
++ ]
++
++ with pytest.raises(CommandExecutionError) as exc:
++ zypper.upgrade(dist_upgrade=True, fromrepo=["Dummy"])
++ call_spy.assert_called_with(*expected_call)
++
++ assert exc.exception.info["changes"] == {}
++ assert exc.exception.info["result"]["stdout"] == zypper_output
++
++
++def test_remove_multiple_pkgs_with_ptf():
++ call_spy = MagicMock()
++ zypper_mock = MagicMock()
++ zypper_mock.stdout = ""
++ zypper_mock.stderr = ""
++ zypper_mock.exit_code = 0
++ zypper_mock.call = call_spy
++
++ rpm_output = textwrap.dedent(
++ """
++ vim: vi vim vim(x86-64) vim-base vim-enhanced vim-python vim_client
++ ptf-12345: ptf() ptf-12345
++ """
++ )
++ rpm_mock = MagicMock(side_effect=[rpm_output])
++
++ with patch(
++ "salt.modules.zypperpkg.__zypper__", MagicMock(return_value=zypper_mock)
++ ), patch.object(
++ zypper,
++ "list_pkgs",
++ MagicMock(side_effect=[{"vim": "0.18.0", "ptf-12345": "1"}, {}]),
++ ), patch.dict(
++ zypper.__salt__, {"cmd.run": rpm_mock}
++ ):
++ expected_calls = [
++ call(
++ "remove",
++ "vim",
++ ),
++ call(
++ "removeptf",
++ "--allow-downgrade",
++ "ptf-12345",
++ ),
++ ]
++
++ result = zypper.remove(name="vim,ptf-12345")
++ call_spy.assert_has_calls(expected_calls, any_order=False)
++ assert result["vim"]["new"] == "", result
++ assert result["vim"]["old"] == "0.18.0", result
++ assert result["ptf-12345"]["new"] == "", result
++ assert result["ptf-12345"]["old"] == "1", result
++
++
++def test_remove_ptf():
++ call_spy = MagicMock()
++ zypper_mock = MagicMock()
++ zypper_mock.stdout = ""
++ zypper_mock.stderr = ""
++ zypper_mock.exit_code = 0
++ zypper_mock.call = call_spy
++
++ rpm_mock = MagicMock(
++ side_effect=[
++ "vim: vi vim vim(x86-64) vim-base vim-enhanced vim-python vim_client",
++ "ptf-12345: ptf() ptf-12345",
++ ]
++ )
++
++ with patch(
++ "salt.modules.zypperpkg.__zypper__", MagicMock(return_value=zypper_mock)
++ ), patch.object(
++ zypper,
++ "list_pkgs",
++ MagicMock(side_effect=[{"vim": "0.18.0"}, {}, {"ptf-12345": "1"}, {}]),
++ ), patch.dict(
++ zypper.__salt__, {"cmd.run": rpm_mock}
++ ):
++ expected_call_vim = [
++ "remove",
++ "vim",
++ ]
++ expected_call_ptf = [
++ "removeptf",
++ "--allow-downgrade",
++ "ptf-12345",
++ ]
++
++ result = zypper.remove(name="vim")
++ call_spy.assert_called_with(*expected_call_vim)
++ assert result["vim"]["new"] == "", result
++ assert result["vim"]["old"] == "0.18.0", result
++
++ result = zypper.remove(name="ptf-12345")
++ call_spy.assert_called_with(*expected_call_ptf)
++ assert result["ptf-12345"]["new"] == "", result
++ assert result["ptf-12345"]["old"] == "1", result
+diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
+index f5b6d74b6f..6e5ca88895 100644
+--- a/tests/unit/modules/test_zypperpkg.py
++++ b/tests/unit/modules/test_zypperpkg.py
+@@ -1953,6 +1953,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ # If config.get starts being used elsewhere, we'll need to write a
+ # side_effect function.
+ patches = {
++ "cmd.run": MagicMock(return_value="vim: vi vim\npico: pico"),
+ "cmd.run_all": MagicMock(return_value=cmd_out),
+ "pkg_resource.parse_targets": MagicMock(return_value=parsed_targets),
+ "pkg_resource.stringify": MagicMock(),
+--
+2.39.2
+
+
diff --git a/3006.0-prevent-_pygit2.giterror-error-loading-known_.patch b/3006.0-prevent-_pygit2.giterror-error-loading-known_.patch
new file mode 100644
index 0000000..e498f5c
--- /dev/null
+++ b/3006.0-prevent-_pygit2.giterror-error-loading-known_.patch
@@ -0,0 +1,71 @@
+From 40a57afc65e71835127a437248ed655404cff0e8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 27 Jun 2023 11:24:39 +0100
+Subject: [PATCH] 3006.0: Prevent _pygit2.GitError: error loading
+ known_hosts when $HOME is not set (bsc#1210994) (#588)
+
+* Prevent _pygit2.GitError: error loading known_hosts when $HOME is not set
+
+* Add unit test to cover case of unset home
+---
+ salt/utils/gitfs.py | 5 +++++
+ tests/unit/utils/test_gitfs.py | 14 ++++++++++++++
+ 2 files changed, 19 insertions(+)
+
+diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py
+index cc9895d8ab..38e84f38aa 100644
+--- a/salt/utils/gitfs.py
++++ b/salt/utils/gitfs.py
+@@ -34,6 +34,7 @@ import salt.utils.stringutils
+ import salt.utils.url
+ import salt.utils.user
+ import salt.utils.versions
++import salt.syspaths
+ from salt.config import DEFAULT_MASTER_OPTS as _DEFAULT_MASTER_OPTS
+ from salt.exceptions import FileserverConfigError, GitLockError, get_error_message
+ from salt.utils.event import tagify
+@@ -1867,6 +1868,10 @@ class Pygit2(GitProvider):
+ # pruning only available in pygit2 >= 0.26.2
+ pass
+ try:
++ # Make sure $HOME env variable is set to prevent
++ # _pygit2.GitError: error loading known_hosts in some libgit2 versions.
++ if "HOME" not in os.environ:
++ os.environ["HOME"] = salt.syspaths.HOME_DIR
+ fetch_results = origin.fetch(**fetch_kwargs)
+ except GitError as exc: # pylint: disable=broad-except
+ exc_str = get_error_message(exc).lower()
+diff --git a/tests/unit/utils/test_gitfs.py b/tests/unit/utils/test_gitfs.py
+index b99da3ef91..7c400b69af 100644
+--- a/tests/unit/utils/test_gitfs.py
++++ b/tests/unit/utils/test_gitfs.py
+@@ -14,6 +14,7 @@ import salt.utils.gitfs
+ import salt.utils.platform
+ import tests.support.paths
+ from salt.exceptions import FileserverConfigError
++from tests.support.helpers import patched_environ
+ from tests.support.mixins import AdaptedConfigurationTestCaseMixin
+ from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
+@@ -335,3 +336,16 @@ class TestPygit2(TestCase):
+ self.assertIn(provider.cachedir, provider.checkout())
+ provider.branch = "does_not_exist"
+ self.assertIsNone(provider.checkout())
++
++ def test_checkout_with_home_env_unset(self):
++ remote = os.path.join(tests.support.paths.TMP, "pygit2-repo")
++ cache = os.path.join(tests.support.paths.TMP, "pygit2-repo-cache")
++ self._prepare_remote_repository(remote)
++ provider = self._prepare_cache_repository(remote, cache)
++ provider.remotecallbacks = None
++ provider.credentials = None
++ with patched_environ(__cleanup__=["HOME"]):
++ self.assertTrue("HOME" not in os.environ)
++ provider.init_remote()
++ provider.fetch()
++ self.assertTrue("HOME" in os.environ)
+--
+2.41.0
+
+
diff --git a/README.SUSE b/README.SUSE
new file mode 100644
index 0000000..8b70e7f
--- /dev/null
+++ b/README.SUSE
@@ -0,0 +1,31 @@
+Salt-master as non-root user
+============================
+
+With this version of salt the salt-master will run as salt user.
+
+Why an extra user
+=================
+
+While the current setup runs the master as root user, this is considered a security issue
+and not in line with the other configuration management tools (eg. puppet) which runs as a
+dedicated user.
+
+How can I undo the change
+=========================
+
+If you would like to make the change before you can do the following steps manually:
+1. change the user parameter in the master configuration
+ user: root
+2. update the file permissions:
+ as root: chown -R root /etc/salt /var/cache/salt /var/log/salt /var/run/salt
+3. restart the salt-master daemon:
+ as root: rcsalt-master restart or systemctl restart salt-master
+
+NOTE
+====
+
+Running the salt-master daemon as a root user is considers by some a security risk, but
+running as root, enables the pam external auth system, as this system needs root access to check authentication.
+
+For more information:
+http://docs.saltstack.com/en/latest/ref/configuration/nonroot.html
\ No newline at end of file
diff --git a/_lastrevision b/_lastrevision
new file mode 100644
index 0000000..68b64c1
--- /dev/null
+++ b/_lastrevision
@@ -0,0 +1 @@
+140a89771b14471ebcc8154d374b2be88c30eeb8
\ No newline at end of file
diff --git a/_multibuild b/_multibuild
new file mode 100644
index 0000000..a0cd1a3
--- /dev/null
+++ b/_multibuild
@@ -0,0 +1,3 @@
+
+ testsuite
+
diff --git a/_service b/_service
new file mode 100644
index 0000000..6b7dd83
--- /dev/null
+++ b/_service
@@ -0,0 +1,20 @@
+
+
+ https://github.com/openSUSE/salt-packaging.git
+ salt
+ package
+ release/3006.0
+ git
+
+
+ *package*.tar
+ */*
+
+
+ codeload.github.com
+ openSUSE/salt/tar.gz/v3006.0-suse
+ v3006.0.tar.gz
+
+
+
+
diff --git a/activate-all-beacons-sources-config-pillar-grains.patch b/activate-all-beacons-sources-config-pillar-grains.patch
new file mode 100644
index 0000000..9a02d85
--- /dev/null
+++ b/activate-all-beacons-sources-config-pillar-grains.patch
@@ -0,0 +1,28 @@
+From f2938966bd1fcb46df0f202f5a86729ab190565a Mon Sep 17 00:00:00 2001
+From: Bo Maryniuk
+Date: Tue, 17 Oct 2017 16:52:33 +0200
+Subject: [PATCH] Activate all beacons sources: config/pillar/grains
+
+---
+ salt/minion.py | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/salt/minion.py b/salt/minion.py
+index 6237fcc4b7..2f905e4a4f 100644
+--- a/salt/minion.py
++++ b/salt/minion.py
+@@ -503,9 +503,7 @@ class MinionBase:
+ the pillar or grains changed
+ """
+ if "config.merge" in functions:
+- b_conf = functions["config.merge"](
+- "beacons", self.opts["beacons"], omit_opts=True
+- )
++ b_conf = functions["config.merge"]("beacons", self.opts["beacons"])
+ if b_conf:
+ return self.beacons.process(
+ b_conf, self.opts["grains"]
+--
+2.39.2
+
+
diff --git a/add-custom-suse-capabilities-as-grains.patch b/add-custom-suse-capabilities-as-grains.patch
new file mode 100644
index 0000000..690c44e
--- /dev/null
+++ b/add-custom-suse-capabilities-as-grains.patch
@@ -0,0 +1,30 @@
+From 311d4e320527158b6ff88604b45e15f0dc2bfa62 Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 18 Jan 2022 12:59:43 +0100
+Subject: [PATCH] Add custom SUSE capabilities as Grains
+
+Add new custom SUSE capability for saltutil state module
+---
+ salt/grains/extra.py | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/salt/grains/extra.py b/salt/grains/extra.py
+index 300052f1ee..f2504dbf19 100644
+--- a/salt/grains/extra.py
++++ b/salt/grains/extra.py
+@@ -96,3 +96,11 @@ def uefi():
+ def transactional():
+ """Determine if the system is transactional."""
+ return {"transactional": bool(salt.utils.path.which("transactional-update"))}
++
++
++def suse_backported_capabilities():
++ return {
++ '__suse_reserved_pkg_all_versions_support': True,
++ '__suse_reserved_pkg_patches_support': True,
++ '__suse_reserved_saltutil_states_support': True
++ }
+--
+2.39.2
+
+
diff --git a/add-environment-variable-to-know-if-yum-is-invoked-f.patch b/add-environment-variable-to-know-if-yum-is-invoked-f.patch
new file mode 100644
index 0000000..1235df4
--- /dev/null
+++ b/add-environment-variable-to-know-if-yum-is-invoked-f.patch
@@ -0,0 +1,83 @@
+From d7682d1bc67ccdd63022c63b2d3229f8ab40d52b Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 18 Jan 2022 12:57:21 +0100
+Subject: [PATCH] Add environment variable to know if yum is invoked from
+ Salt(bsc#1057635)
+
+---
+ salt/modules/yumpkg.py | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
+index 4d0070f21a..b362d30bf4 100644
+--- a/salt/modules/yumpkg.py
++++ b/salt/modules/yumpkg.py
+@@ -964,7 +964,9 @@ def list_repo_pkgs(*args, **kwargs):
+ None
+ if _yum() != "yum"
+ else LooseVersion(
+- __salt__["cmd.run"](["yum", "--version"], python_shell=False)
++ __salt__["cmd.run"](
++ ["yum", "--version"], python_shell=False, env={"SALT_RUNNING": "1"}
++ )
+ .splitlines()[0]
+ .strip()
+ )
+@@ -2474,7 +2476,9 @@ def list_holds(pattern=__HOLD_PATTERN, full=True):
+ """
+ _check_versionlock()
+
+- out = __salt__["cmd.run"]([_yum(), "versionlock", "list"], python_shell=False)
++ out = __salt__["cmd.run"](
++ [_yum(), "versionlock", "list"], python_shell=False, env={"SALT_RUNNING": "1"}
++ )
+ ret = []
+ for line in salt.utils.itertools.split(out, "\n"):
+ match = _get_hold(line, pattern=pattern, full=full)
+@@ -2542,7 +2546,10 @@ def group_list():
+ }
+
+ out = __salt__["cmd.run_stdout"](
+- [_yum(), "grouplist", "hidden"], output_loglevel="trace", python_shell=False
++ [_yum(), "grouplist", "hidden"],
++ output_loglevel="trace",
++ python_shell=False,
++ env={"SALT_RUNNING": "1"},
+ )
+ key = None
+ for line in salt.utils.itertools.split(out, "\n"):
+@@ -2613,7 +2620,9 @@ def group_info(name, expand=False, ignore_groups=None):
+ ret[pkgtype] = set()
+
+ cmd = [_yum(), "--quiet", "groupinfo", name]
+- out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False)
++ out = __salt__["cmd.run_stdout"](
++ cmd, output_loglevel="trace", python_shell=False, env={"SALT_RUNNING": "1"}
++ )
+
+ g_info = {}
+ for line in salt.utils.itertools.split(out, "\n"):
+@@ -3342,7 +3351,9 @@ def download(*packages, **kwargs):
+
+ cmd = ["yumdownloader", "-q", "--destdir={}".format(CACHE_DIR)]
+ cmd.extend(packages)
+- __salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False)
++ __salt__["cmd.run"](
++ cmd, output_loglevel="trace", python_shell=False, env={"SALT_RUNNING": "1"}
++ )
+ ret = {}
+ for dld_result in os.listdir(CACHE_DIR):
+ if not dld_result.endswith(".rpm"):
+@@ -3418,7 +3429,7 @@ def _get_patches(installed_only=False):
+ patches = {}
+
+ cmd = [_yum(), "--quiet", "updateinfo", "list", "all"]
+- ret = __salt__["cmd.run_stdout"](cmd, python_shell=False)
++ ret = __salt__["cmd.run_stdout"](cmd, python_shell=False, env={"SALT_RUNNING": "1"})
+ parsing_errors = False
+
+ for line in salt.utils.itertools.split(ret, os.linesep):
+--
+2.39.2
+
+
diff --git a/add-migrated-state-and-gpg-key-management-functions-.patch b/add-migrated-state-and-gpg-key-management-functions-.patch
new file mode 100644
index 0000000..beffeaa
--- /dev/null
+++ b/add-migrated-state-and-gpg-key-management-functions-.patch
@@ -0,0 +1,1216 @@
+From c5236dadcffc24c00181c10ac4cf56020371c538 Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 18 Jan 2022 18:40:40 +0100
+Subject: [PATCH] Add "migrated" state and GPG key management functions
+ (#290)
+
+* rpm_lowpkg: add API for GPG keys
+
+* zypperpkg: do not quote the repo name
+
+* pkgrepo: add migrated function
+
+* pkg: unify apt and rpm API for key repo
+
+aptpkg is the virtual package "pkg" for Debian, and contains some API
+for key management.
+
+This patch add a similar API for zypperpkg and yumpkg, also part of the
+same virtual package, based on the counterpart from rpm_lowpkg API.
+
+Convert test to pytests
+---
+ salt/modules/aptpkg.py | 4 +-
+ salt/modules/rpm_lowpkg.py | 151 +++++++
+ salt/modules/yumpkg.py | 88 ++++
+ salt/modules/zypperpkg.py | 88 ++++
+ salt/states/pkgrepo.py | 207 +++++++++
+ tests/pytests/unit/modules/test_yumpkg.py | 44 +-
+ tests/pytests/unit/modules/test_zypperpkg.py | 45 +-
+ tests/pytests/unit/states/test_pkgrepo.py | 448 +++++++++++++++++++
+ 8 files changed, 1070 insertions(+), 5 deletions(-)
+
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index 3289f6604d..9885e9fb60 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -2197,7 +2197,7 @@ def _parse_repo_keys_output(cmd_ret):
+ return ret
+
+
+-def get_repo_keys(aptkey=True, keydir=None):
++def get_repo_keys(aptkey=True, keydir=None, **kwargs):
+ """
+ .. versionadded:: 2017.7.0
+
+@@ -2305,6 +2305,7 @@ def add_repo_key(
+ aptkey=True,
+ keydir=None,
+ keyfile=None,
++ **kwargs
+ ):
+ """
+ .. versionadded:: 2017.7.0
+@@ -2358,7 +2359,6 @@ def add_repo_key(
+ if not salt.utils.path.which("apt-key"):
+ aptkey = False
+ cmd = ["apt-key"]
+- kwargs = {}
+
+ # If the keyid is provided or determined, check it against the existing
+ # repo key ids to determine whether it needs to be imported.
+diff --git a/salt/modules/rpm_lowpkg.py b/salt/modules/rpm_lowpkg.py
+index 4cd137c258..b360ec8df3 100644
+--- a/salt/modules/rpm_lowpkg.py
++++ b/salt/modules/rpm_lowpkg.py
+@@ -865,3 +865,154 @@ def checksum(*paths, **kwargs):
+ )
+
+ return ret
++
++
++def list_gpg_keys(info=False, root=None):
++ """Return the list of all the GPG keys stored in the RPM database
++
++ .. versionadded:: TBD
++
++ info
++ get the key information, returing a dictionary instead of a
++ list
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' lowpkg.list_gpg_keys
++ salt '*' lowpkg.list_gpg_keys info=True
++
++ """
++ cmd = ["rpm"]
++ if root:
++ cmd.extend(["--root", root])
++ cmd.extend(["-qa", "gpg-pubkey*"])
++ keys = __salt__["cmd.run_stdout"](cmd, python_shell=False).splitlines()
++ if info:
++ return {key: info_gpg_key(key, root=root) for key in keys}
++ else:
++ return keys
++
++
++def info_gpg_key(key, root=None):
++ """Return a dictionary with the information of a GPG key parsed
++
++ .. versionadded:: TBD
++
++ key
++ key identificatior
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' lowpkg.info_gpg_key gpg-pubkey-3dbdc284-53674dd4
++
++ """
++ cmd = ["rpm"]
++ if root:
++ cmd.extend(["--root", root])
++ cmd.extend(["-qi", key])
++ info = __salt__["cmd.run_stdout"](cmd, python_shell=False)
++
++ res = {}
++ # The parser algorithm is very ad-hoc. Works under the
++ # expectation that all the fields are of the type "key: value" in
++ # a single line, except "Description", that will be composed of
++ # multiple lines. Note that even if the official `rpm` makes this
++ # field the last one, other (like openSUSE) exted it with more
++ # fields.
++ in_description = False
++ description = []
++ for line in info.splitlines():
++ if line.startswith("Description"):
++ in_description = True
++ elif in_description:
++ description.append(line)
++ if line.startswith("-----END"):
++ res["Description"] = "\n".join(description)
++ in_description = False
++ elif line:
++ key, _, value = line.partition(":")
++ value = value.strip()
++ if "Date" in key:
++ try:
++ value = datetime.datetime.strptime(
++ value, "%a %d %b %Y %H:%M:%S %p %Z"
++ )
++ except ValueError:
++ pass
++ elif "Size" in key:
++ try:
++ value = int(value)
++ except TypeError:
++ pass
++ elif "(none)" in value:
++ value = None
++ res[key.strip()] = value
++ return res
++
++
++def import_gpg_key(key, root=None):
++ """Import a new key into the key storage
++
++ .. versionadded:: TBD
++
++ key
++ public key block content
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' lowpkg.import_gpg_key "-----BEGIN ..."
++
++ """
++ key_file = salt.utils.files.mkstemp()
++ with salt.utils.files.fopen(key_file, "w") as f:
++ f.write(key)
++
++ cmd = ["rpm"]
++ if root:
++ cmd.extend(["--root", root])
++ cmd.extend(["--import", key_file])
++ ret = __salt__["cmd.retcode"](cmd)
++
++ os.remove(key_file)
++
++ return ret == 0
++
++
++def remove_gpg_key(key, root=None):
++ """Remove a key from the key storage
++
++ .. versionadded:: TBD
++
++ key
++ key identificatior
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' lowpkg.remove_gpg_key gpg-pubkey-3dbdc284-53674dd4
++
++ """
++ cmd = ["rpm"]
++ if root:
++ cmd.extend(["--root", root])
++ cmd.extend(["-e", key])
++ return __salt__["cmd.retcode"](cmd) == 0
+diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
+index b362d30bf4..b2be251a40 100644
+--- a/salt/modules/yumpkg.py
++++ b/salt/modules/yumpkg.py
+@@ -3535,3 +3535,91 @@ def services_need_restart(**kwargs):
+ services.add(service)
+
+ return list(services)
++
++
++def get_repo_keys(info=False, root=None, **kwargs):
++ """Return the list of all the GPG keys stored in the RPM database
++
++ .. versionadded:: TBD
++
++ info
++ get the key information, returing a dictionary instead of a
++ list
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' pkg.get_repo_keys
++ salt '*' pkg.get_repo_keys info=True
++
++ """
++ return __salt__["lowpkg.list_gpg_keys"](info, root)
++
++
++def add_repo_key(path=None, text=None, root=None, saltenv="base", **kwargs):
++ """Import a new key into the key storage
++
++ .. versionadded:: TBD
++
++ path
++ the path of the key file to import
++
++ text
++ the key data to import, in string form
++
++ root
++ use root as top level directory (default: "/")
++
++ saltenv
++ the environment the key file resides in
++
++ CLI Examples:
++
++ .. code-block:: bash
++
++ salt '*' pkg.add_repo_key 'salt://apt/sources/test.key'
++ salt '*' pkg.add_repo_key text="'$KEY1'"
++
++ """
++ if not path and not text:
++ raise SaltInvocationError("Provide a key to add")
++
++ if path and text:
++ raise SaltInvocationError("Add a key via path or key")
++
++ if path:
++ cache_path = __salt__["cp.cache_file"](path, saltenv)
++
++ if not cache_path:
++ log.error("Unable to get cached copy of file: %s", path)
++ return False
++
++ with salt.utils.files.fopen(cache_path, "r") as f:
++ text = f.read()
++
++ return __salt__["lowpkg.import_gpg_key"](text, root)
++
++
++def del_repo_key(keyid, root=None, **kwargs):
++ """Remove a key from the key storage
++
++ .. versionadded:: TBD
++
++ keyid
++ key identificatior
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Examples:
++
++ .. code-block:: bash
++
++ salt '*' pkg.del_repo_key keyid=gpg-pubkey-3dbdc284-53674dd4
++
++ """
++ return __salt__["lowpkg.remove_gpg_key"](keyid, root)
+diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
+index 2da470bea3..318c871b37 100644
+--- a/salt/modules/zypperpkg.py
++++ b/salt/modules/zypperpkg.py
+@@ -3261,3 +3261,91 @@ def services_need_restart(root=None, **kwargs):
+ services = zypper_output.split()
+
+ return services
++
++
++def get_repo_keys(info=False, root=None, **kwargs):
++ """Return the list of all the GPG keys stored in the RPM database
++
++ .. versionadded:: TBD
++
++ info
++ get the key information, returing a dictionary instead of a
++ list
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' pkg.get_repo_keys
++ salt '*' pkg.get_repo_keys info=True
++
++ """
++ return __salt__["lowpkg.list_gpg_keys"](info, root)
++
++
++def add_repo_key(path=None, text=None, root=None, saltenv="base", **kwargs):
++ """Import a new key into the key storage
++
++ .. versionadded:: TBD
++
++ path
++ the path of the key file to import
++
++ text
++ the key data to import, in string form
++
++ root
++ use root as top level directory (default: "/")
++
++ saltenv
++ the environment the key file resides in
++
++ CLI Examples:
++
++ .. code-block:: bash
++
++ salt '*' pkg.add_repo_key 'salt://apt/sources/test.key'
++ salt '*' pkg.add_repo_key text="'$KEY1'"
++
++ """
++ if not path and not text:
++ raise SaltInvocationError("Provide a key to add")
++
++ if path and text:
++ raise SaltInvocationError("Add a key via path or key")
++
++ if path:
++ cache_path = __salt__["cp.cache_file"](path, saltenv)
++
++ if not cache_path:
++ log.error("Unable to get cached copy of file: %s", path)
++ return False
++
++ with salt.utils.files.fopen(cache_path, "r") as f:
++ text = f.read()
++
++ return __salt__["lowpkg.import_gpg_key"](text, root)
++
++
++def del_repo_key(keyid, root=None, **kwargs):
++ """Remove a key from the key storage
++
++ .. versionadded:: TBD
++
++ keyid
++ key identificatior
++
++ root
++ use root as top level directory (default: "/")
++
++ CLI Examples:
++
++ .. code-block:: bash
++
++ salt '*' pkg.del_repo_key keyid=gpg-pubkey-3dbdc284-53674dd4
++
++ """
++ return __salt__["lowpkg.remove_gpg_key"](keyid, root)
+diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
+index 67a50c3ca0..c2d23f95bb 100644
+--- a/salt/states/pkgrepo.py
++++ b/salt/states/pkgrepo.py
+@@ -118,6 +118,7 @@ Using ``aptkey: False`` with ``keyserver`` and ``keyid``:
+ """
+
+
++import os
+ import sys
+
+ import salt.utils.data
+@@ -745,3 +746,209 @@ def absent(name, **kwargs):
+ ret["comment"] = "Failed to remove repo {}".format(name)
+
+ return ret
++
++
++def _normalize_repo(repo):
++ """Normalize the get_repo information"""
++ # `pkg.get_repo()` specific virtual module implementation is
++ # parsing the information directly from the repository
++ # configuration file, and can be different from the ones that
++ # `pkg.mod_repo()` accepts
++
++ # If the field is not present will be dropped
++ suse = {
++ # "alias": "repo",
++ "name": "humanname",
++ "priority": "priority",
++ "enabled": "enabled",
++ "autorefresh": "refresh",
++ "gpgcheck": "gpgcheck",
++ "keepackages": "cache",
++ "baseurl": "url",
++ }
++ translator = {
++ "Suse": suse,
++ }
++ table = translator.get(__grains__["os_family"], {})
++ return {table[k]: v for k, v in repo.items() if k in table}
++
++
++def _normalize_key(key):
++ """Normalize the info_gpg_key information"""
++
++ # If the field is not present will be dropped
++ rpm = {
++ "Description": "key",
++ }
++ translator = {
++ "Suse": rpm,
++ "RedHat": rpm,
++ }
++ table = translator.get(__grains__["os_family"], {})
++ return {table[k]: v for k, v in key.items() if k in table}
++
++
++def _repos_keys_migrate_drop(root, keys, drop):
++ """Helper function to calculate repost and key migrations"""
++
++ def _d2s(d):
++ """Serialize a dict and store in a set"""
++ return {
++ (k, tuple((_k, _v) for _k, _v in sorted(v.items())))
++ for k, v in sorted(d.items())
++ }
++
++ src_repos = _d2s(
++ {k: _normalize_repo(v) for k, v in __salt__["pkg.list_repos"]().items()}
++ )
++ # There is no guarantee that the target repository is even initialized
++ try:
++ tgt_repos = _d2s(
++ {
++ k: _normalize_repo(v)
++ for k, v in __salt__["pkg.list_repos"](root=root).items()
++ }
++ )
++ except Exception: # pylint: disable=broad-except
++ tgt_repos = set()
++
++ src_keys = set()
++ tgt_keys = set()
++ if keys:
++ src_keys = _d2s(
++ {
++ k: _normalize_key(v)
++ for k, v in __salt__["lowpkg.list_gpg_keys"](info=True).items()
++ }
++ )
++ try:
++ tgt_keys = _d2s(
++ {
++ k: _normalize_key(v)
++ for k, v in __salt__["lowpkg.list_gpg_keys"](
++ info=True, root=root
++ ).items()
++ }
++ )
++ except Exception: # pylint: disable=broad-except
++ pass
++
++ repos_to_migrate = src_repos - tgt_repos
++ repos_to_drop = tgt_repos - src_repos if drop else set()
++
++ keys_to_migrate = src_keys - tgt_keys
++ keys_to_drop = tgt_keys - src_keys if drop else set()
++
++ return (repos_to_migrate, repos_to_drop, keys_to_migrate, keys_to_drop)
++
++
++def _copy_repository_to(root):
++ repo = {
++ "Suse": ["/etc/zypp/repos.d"],
++ "RedHat": ["/etc/yum.conf", "/etc/yum.repos.d"],
++ }
++ for src in repo.get(__grains__["os_family"], []):
++ dst = os.path.join(root, os.path.relpath(src, os.path.sep))
++ __salt__["file.copy"](src=src, dst=dst, recurse=True)
++
++
++def migrated(name, keys=True, drop=False, method=None, **kwargs):
++ """Migrate a repository from one directory to another, including the
++ GPG keys if requested
++
++ .. versionadded:: TBD
++
++ name
++ Directory were to migrate the repositories. For example, if we
++ are booting from a USB key and we mounted the rootfs in
++ "/mnt", the repositories will live in "/mnt/etc/yum.repos.d"
++ or in "/etc/zypp/repos.d", depending on the system. For both
++ cases the expected value for "name" would be "/mnt"
++
++ keys
++ If is is True, will migrate all the keys
++
++ drop
++ If True, the target repositories that do not exist in the
++ source will be dropped
++
++ method
++ If None or "salt", it will use the Salt API to migrate the
++ repositories, if "copy", it will copy the repository files
++ directly
++
++ """
++ ret = {"name": name, "result": False, "changes": {}, "comment": ""}
++
++ if __grains__["os_family"] not in ("Suse",):
++ ret["comment"] = "Migration not supported for this platform"
++ return ret
++
++ if keys and "lowpkg.import_gpg_key" not in __salt__:
++ ret["comment"] = "Keys cannot be migrated for this platform"
++ return ret
++
++ if method not in (None, "salt", "copy"):
++ ret["comment"] = "Migration method not supported"
++ return ret
++
++ (
++ repos_to_migrate,
++ repos_to_drop,
++ keys_to_migrate,
++ keys_to_drop,
++ ) = _repos_keys_migrate_drop(name, keys, drop)
++
++ if not any((repos_to_migrate, repos_to_drop, keys_to_migrate, keys_to_drop)):
++ ret["result"] = True
++ ret["comment"] = "Repositories are already migrated"
++ return ret
++
++ if __opts__["test"]:
++ ret["result"] = None
++ ret["comment"] = "There are keys or repositories to migrate or drop"
++ ret["changes"] = {
++ "repos to migrate": [repo for repo, _ in repos_to_migrate],
++ "repos to drop": [repo for repo, _ in repos_to_drop],
++ "keys to migrate": [key for key, _ in keys_to_migrate],
++ "keys to drop": [key for key, _ in keys_to_drop],
++ }
++ return ret
++
++ for repo, repo_info in repos_to_migrate:
++ if method == "copy":
++ _copy_repository_to(name)
++ else:
++ __salt__["pkg.mod_repo"](repo, **dict(repo_info), root=name)
++ for repo, _ in repos_to_drop:
++ __salt__["pkg.del_repo"](repo, root=name)
++
++ for _, key_info in keys_to_migrate:
++ __salt__["lowpkg.import_gpg_key"](dict(key_info)["key"], root=name)
++ for key, _ in keys_to_drop:
++ __salt__["lowpkg.remove_gpg_key"](key, root=name)
++
++ (
++ rem_repos_to_migrate,
++ rem_repos_to_drop,
++ rem_keys_to_migrate,
++ rem_keys_to_drop,
++ ) = _repos_keys_migrate_drop(name, keys, drop)
++
++ if any(
++ (rem_repos_to_migrate, rem_repos_to_drop, rem_keys_to_migrate, rem_keys_to_drop)
++ ):
++ ret["result"] = False
++ ret["comment"] = "Migration of repositories failed"
++ return ret
++
++ ret["result"] = True
++ ret["comment"] = "Repositories synchronized"
++ ret["changes"] = {
++ "repos migrated": [repo for repo, _ in repos_to_migrate],
++ "repos dropped": [repo for repo, _ in repos_to_drop],
++ "keys migrated": [key for key, _ in keys_to_migrate],
++ "keys dropped": [key for key, _ in keys_to_drop],
++ }
++
++ return ret
+diff --git a/tests/pytests/unit/modules/test_yumpkg.py b/tests/pytests/unit/modules/test_yumpkg.py
+index 1354ee5d2d..45c62d793d 100644
+--- a/tests/pytests/unit/modules/test_yumpkg.py
++++ b/tests/pytests/unit/modules/test_yumpkg.py
+@@ -9,7 +9,7 @@ import salt.modules.rpm_lowpkg as rpm
+ import salt.modules.yumpkg as yumpkg
+ import salt.utils.platform
+ from salt.exceptions import CommandExecutionError, SaltInvocationError
+-from tests.support.mock import MagicMock, Mock, call, patch
++from tests.support.mock import MagicMock, Mock, call, mock_open, patch
+
+ log = logging.getLogger(__name__)
+
+@@ -1908,6 +1908,48 @@ def test_get_repo_with_non_existent_repo(list_repos_var):
+ assert ret == expected, ret
+
+
++def test_get_repo_keys():
++ salt_mock = {"lowpkg.list_gpg_keys": MagicMock(return_value=True)}
++ with patch.dict(yumpkg.__salt__, salt_mock):
++ assert yumpkg.get_repo_keys(info=True, root="/mnt")
++ salt_mock["lowpkg.list_gpg_keys"].assert_called_once_with(True, "/mnt")
++
++
++def test_add_repo_key_fail():
++ with pytest.raises(SaltInvocationError):
++ yumpkg.add_repo_key()
++
++ with pytest.raises(SaltInvocationError):
++ yumpkg.add_repo_key(path="path", text="text")
++
++
++def test_add_repo_key_path():
++ salt_mock = {
++ "cp.cache_file": MagicMock(return_value="path"),
++ "lowpkg.import_gpg_key": MagicMock(return_value=True),
++ }
++ with patch("salt.utils.files.fopen", mock_open(read_data="text")), patch.dict(
++ yumpkg.__salt__, salt_mock
++ ):
++ assert yumpkg.add_repo_key(path="path", root="/mnt")
++ salt_mock["cp.cache_file"].assert_called_once_with("path", "base")
++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt")
++
++
++def test_add_repo_key_text():
++ salt_mock = {"lowpkg.import_gpg_key": MagicMock(return_value=True)}
++ with patch.dict(yumpkg.__salt__, salt_mock):
++ assert yumpkg.add_repo_key(text="text", root="/mnt")
++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt")
++
++
++def test_del_repo_key():
++ salt_mock = {"lowpkg.remove_gpg_key": MagicMock(return_value=True)}
++ with patch.dict(yumpkg.__salt__, salt_mock):
++ assert yumpkg.del_repo_key(keyid="keyid", root="/mnt")
++ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
++
++
+ def test_pkg_update_dnf():
+ """
+ Tests that the proper CLI options are added when obsoletes=False
+diff --git a/tests/pytests/unit/modules/test_zypperpkg.py b/tests/pytests/unit/modules/test_zypperpkg.py
+index 1e2d6ea443..91132b7277 100644
+--- a/tests/pytests/unit/modules/test_zypperpkg.py
++++ b/tests/pytests/unit/modules/test_zypperpkg.py
+@@ -10,8 +10,8 @@ import pytest
+
+ import salt.modules.pkg_resource as pkg_resource
+ import salt.modules.zypperpkg as zypper
+-from salt.exceptions import CommandExecutionError
+-from tests.support.mock import MagicMock, patch
++from salt.exceptions import CommandExecutionError, SaltInvocationError
++from tests.support.mock import MagicMock, mock_open, patch
+
+
+ @pytest.fixture
+@@ -354,3 +354,44 @@ def test_dist_upgrade_failure():
+
+ assert exc.exception.info["changes"] == {}
+ assert exc.exception.info["result"]["stdout"] == zypper_output
++
++def test_get_repo_keys():
++ salt_mock = {"lowpkg.list_gpg_keys": MagicMock(return_value=True)}
++ with patch.dict(zypper.__salt__, salt_mock):
++ assert zypper.get_repo_keys(info=True, root="/mnt")
++ salt_mock["lowpkg.list_gpg_keys"].assert_called_once_with(True, "/mnt")
++
++
++def test_add_repo_key_fail():
++ with pytest.raises(SaltInvocationError):
++ zypper.add_repo_key()
++
++ with pytest.raises(SaltInvocationError):
++ zypper.add_repo_key(path="path", text="text")
++
++
++def test_add_repo_key_path():
++ salt_mock = {
++ "cp.cache_file": MagicMock(return_value="path"),
++ "lowpkg.import_gpg_key": MagicMock(return_value=True),
++ }
++ with patch("salt.utils.files.fopen", mock_open(read_data="text")), patch.dict(
++ zypper.__salt__, salt_mock
++ ):
++ assert zypper.add_repo_key(path="path", root="/mnt")
++ salt_mock["cp.cache_file"].assert_called_once_with("path", "base")
++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt")
++
++
++def test_add_repo_key_text():
++ salt_mock = {"lowpkg.import_gpg_key": MagicMock(return_value=True)}
++ with patch.dict(zypper.__salt__, salt_mock):
++ assert zypper.add_repo_key(text="text", root="/mnt")
++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt")
++
++
++def test_del_repo_key():
++ salt_mock = {"lowpkg.remove_gpg_key": MagicMock(return_value=True)}
++ with patch.dict(zypper.__salt__, salt_mock):
++ assert zypper.del_repo_key(keyid="keyid", root="/mnt")
++ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt")
+diff --git a/tests/pytests/unit/states/test_pkgrepo.py b/tests/pytests/unit/states/test_pkgrepo.py
+index c572583d19..5f540bd245 100644
+--- a/tests/pytests/unit/states/test_pkgrepo.py
++++ b/tests/pytests/unit/states/test_pkgrepo.py
+@@ -72,3 +72,451 @@ def test_managed_insecure_key():
+ ret["comment"]
+ == "Cannot have 'key_url' using http with 'allow_insecure_key' set to True"
+ )
++
++
++def test__normalize_repo_suse():
++ repo = {
++ "name": "repo name",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": True,
++ }
++ grains = {"os_family": "Suse"}
++ with patch.dict(pkgrepo.__grains__, grains):
++ assert pkgrepo._normalize_repo(repo) == {
++ "humanname": "repo name",
++ "refresh": True,
++ "priority": 0,
++ }
++
++
++def test__normalize_key_rpm():
++ key = {"Description": "key", "Date": "Date", "Other": "Other"}
++ for os_family in ("Suse", "RedHat"):
++ grains = {"os_family": os_family}
++ with patch.dict(pkgrepo.__grains__, grains):
++ assert pkgrepo._normalize_key(key) == {"key": "key"}
++
++
++def test__repos_keys_migrate_drop_migrate_to_empty():
++ src_repos = {
++ "repo-1": {
++ "name": "repo name 1",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": True,
++ },
++ "repo-2": {
++ "name": "repo name 2",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": False,
++ },
++ }
++ tgt_repos = {}
++
++ src_keys = {
++ "key1": {"Description": "key1", "Other": "Other1"},
++ "key2": {"Description": "key2", "Other": "Other2"},
++ }
++ tgt_keys = {}
++
++ grains = {"os_family": "Suse"}
++ salt_mock = {
++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]),
++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]),
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__salt__, salt_mock
++ ):
++ assert pkgrepo._repos_keys_migrate_drop("/mnt", False, False) == (
++ {
++ (
++ "repo-1",
++ (
++ ("humanname", "repo name 1"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ (
++ "repo-2",
++ (
++ ("humanname", "repo name 2"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ },
++ set(),
++ set(),
++ set(),
++ )
++
++
++def test__repos_keys_migrate_drop_migrate_to_empty_keys():
++ src_repos = {
++ "repo-1": {
++ "name": "repo name 1",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": True,
++ },
++ "repo-2": {
++ "name": "repo name 2",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": False,
++ },
++ }
++ tgt_repos = {}
++
++ src_keys = {
++ "key1": {"Description": "key1", "Other": "Other1"},
++ "key2": {"Description": "key2", "Other": "Other2"},
++ }
++ tgt_keys = {}
++
++ grains = {"os_family": "Suse"}
++ salt_mock = {
++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]),
++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]),
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__salt__, salt_mock
++ ):
++ assert pkgrepo._repos_keys_migrate_drop("/mnt", True, False) == (
++ {
++ (
++ "repo-1",
++ (
++ ("humanname", "repo name 1"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ (
++ "repo-2",
++ (
++ ("humanname", "repo name 2"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ },
++ set(),
++ {("key1", (("key", "key1"),)), ("key2", (("key", "key2"),))},
++ set(),
++ )
++
++
++def test__repos_keys_migrate_drop_migrate_to_populated_no_drop():
++ src_repos = {
++ "repo-1": {
++ "name": "repo name 1",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": True,
++ },
++ "repo-2": {
++ "name": "repo name 2",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": False,
++ },
++ }
++ tgt_repos = {
++ "repo-1": {
++ "name": "repo name 1",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": True,
++ },
++ "repo-3": {
++ "name": "repo name 3",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": False,
++ },
++ }
++
++ src_keys = {
++ "key1": {"Description": "key1", "Other": "Other1"},
++ "key2": {"Description": "key2", "Other": "Other2"},
++ }
++ tgt_keys = {
++ "key1": {"Description": "key1", "Other": "Other1"},
++ "key3": {"Description": "key3", "Other": "Other2"},
++ }
++
++ grains = {"os_family": "Suse"}
++ salt_mock = {
++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]),
++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]),
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__salt__, salt_mock
++ ):
++ assert pkgrepo._repos_keys_migrate_drop("/mnt", True, False) == (
++ {
++ (
++ "repo-2",
++ (
++ ("humanname", "repo name 2"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ },
++ set(),
++ {("key2", (("key", "key2"),))},
++ set(),
++ )
++
++
++def test__repos_keys_migrate_drop_migrate_to_populated_drop():
++ src_repos = {
++ "repo-1": {
++ "name": "repo name 1",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": True,
++ },
++ "repo-2": {
++ "name": "repo name 2",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": False,
++ },
++ }
++ tgt_repos = {
++ "repo-1": {
++ "name": "repo name 1",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": True,
++ },
++ "repo-3": {
++ "name": "repo name 3",
++ "autorefresh": True,
++ "priority": 0,
++ "pkg_gpgcheck": False,
++ },
++ }
++
++ src_keys = {
++ "key1": {"Description": "key1", "Other": "Other1"},
++ "key2": {"Description": "key2", "Other": "Other2"},
++ }
++ tgt_keys = {
++ "key1": {"Description": "key1", "Other": "Other1"},
++ "key3": {"Description": "key3", "Other": "Other2"},
++ }
++
++ grains = {"os_family": "Suse"}
++ salt_mock = {
++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]),
++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]),
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__salt__, salt_mock
++ ):
++ assert pkgrepo._repos_keys_migrate_drop("/mnt", True, True) == (
++ {
++ (
++ "repo-2",
++ (
++ ("humanname", "repo name 2"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ },
++ {
++ (
++ "repo-3",
++ (
++ ("humanname", "repo name 3"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ },
++ {("key2", (("key", "key2"),))},
++ {("key3", (("key", "key3"),))},
++ )
++
++
++@pytest.mark.skip_on_windows(reason="Not a Windows test")
++def test__copy_repository_to_suse():
++ grains = {"os_family": "Suse"}
++ salt_mock = {"file.copy": MagicMock()}
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__salt__, salt_mock
++ ):
++ pkgrepo._copy_repository_to("/mnt")
++ salt_mock["file.copy"].assert_called_with(
++ src="/etc/zypp/repos.d", dst="/mnt/etc/zypp/repos.d", recurse=True
++ )
++
++
++def test_migrated_non_supported_platform():
++ grains = {"os_family": "Debian"}
++ with patch.dict(pkgrepo.__grains__, grains):
++ assert pkgrepo.migrated("/mnt") == {
++ "name": "/mnt",
++ "result": False,
++ "changes": {},
++ "comment": "Migration not supported for this platform",
++ }
++
++
++def test_migrated_missing_keys_api():
++ grains = {"os_family": "Suse"}
++ with patch.dict(pkgrepo.__grains__, grains):
++ assert pkgrepo.migrated("/mnt") == {
++ "name": "/mnt",
++ "result": False,
++ "changes": {},
++ "comment": "Keys cannot be migrated for this platform",
++ }
++
++
++def test_migrated_wrong_method():
++ grains = {"os_family": "Suse"}
++ salt_mock = {
++ "lowpkg.import_gpg_key": True,
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__salt__, salt_mock
++ ):
++ assert pkgrepo.migrated("/mnt", method_="magic") == {
++ "name": "/mnt",
++ "result": False,
++ "changes": {},
++ "comment": "Migration method not supported",
++ }
++
++
++@patch(
++ "salt.states.pkgrepo._repos_keys_migrate_drop",
++ MagicMock(return_value=(set(), set(), set(), set())),
++)
++def test_migrated_empty():
++ grains = {"os_family": "Suse"}
++ salt_mock = {
++ "lowpkg.import_gpg_key": True,
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__salt__, salt_mock
++ ):
++ assert pkgrepo.migrated("/mnt") == {
++ "name": "/mnt",
++ "result": True,
++ "changes": {},
++ "comment": "Repositories are already migrated",
++ }
++
++
++def test_migrated():
++ _repos_keys_migrate_drop = MagicMock()
++ _repos_keys_migrate_drop.side_effect = [
++ (
++ {
++ (
++ "repo-1",
++ (
++ ("humanname", "repo name 1"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ },
++ {
++ (
++ "repo-2",
++ (
++ ("humanname", "repo name 2"),
++ ("priority", 0),
++ ("refresh", True),
++ ),
++ ),
++ },
++ {("key1", (("key", "key1"),))},
++ {("key2", (("key", "key2"),))},
++ ),
++ (set(), set(), set(), set()),
++ ]
++
++ grains = {"os_family": "Suse"}
++ opts = {"test": False}
++ salt_mock = {
++ "pkg.mod_repo": MagicMock(),
++ "pkg.del_repo": MagicMock(),
++ "lowpkg.import_gpg_key": MagicMock(),
++ "lowpkg.remove_gpg_key": MagicMock(),
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__opts__, opts
++ ), patch.dict(pkgrepo.__salt__, salt_mock), patch(
++ "salt.states.pkgrepo._repos_keys_migrate_drop", _repos_keys_migrate_drop
++ ):
++ assert pkgrepo.migrated("/mnt", True, True) == {
++ "name": "/mnt",
++ "result": True,
++ "changes": {
++ "repos migrated": ["repo-1"],
++ "repos dropped": ["repo-2"],
++ "keys migrated": ["key1"],
++ "keys dropped": ["key2"],
++ },
++ "comment": "Repositories synchronized",
++ }
++ salt_mock["pkg.mod_repo"].assert_called_with(
++ "repo-1", humanname="repo name 1", priority=0, refresh=True, root="/mnt"
++ )
++ salt_mock["pkg.del_repo"].assert_called_with("repo-2", root="/mnt")
++ salt_mock["lowpkg.import_gpg_key"].assert_called_with("key1", root="/mnt")
++ salt_mock["lowpkg.remove_gpg_key"].assert_called_with("key2", root="/mnt")
++
++
++def test_migrated_test():
++ _repos_keys_migrate_drop = MagicMock()
++ _repos_keys_migrate_drop.return_value = (
++ {
++ (
++ "repo-1",
++ (("humanname", "repo name 1"), ("priority", 0), ("refresh", True)),
++ ),
++ },
++ {
++ (
++ "repo-2",
++ (("humanname", "repo name 2"), ("priority", 0), ("refresh", True)),
++ ),
++ },
++ {("key1", (("key", "key1"),))},
++ {("key2", (("key", "key2"),))},
++ )
++
++ grains = {"os_family": "Suse"}
++ opts = {"test": True}
++ salt_mock = {
++ "lowpkg.import_gpg_key": True,
++ }
++ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
++ pkgrepo.__opts__, opts
++ ), patch.dict(pkgrepo.__salt__, salt_mock), patch(
++ "salt.states.pkgrepo._repos_keys_migrate_drop", _repos_keys_migrate_drop
++ ):
++ assert pkgrepo.migrated("/mnt", True, True) == {
++ "name": "/mnt",
++ "result": None,
++ "changes": {
++ "repos to migrate": ["repo-1"],
++ "repos to drop": ["repo-2"],
++ "keys to migrate": ["key1"],
++ "keys to drop": ["key2"],
++ },
++ "comment": "There are keys or repositories to migrate or drop",
++ }
+--
+2.39.2
+
+
diff --git a/add-missing-contextvars-dependency-in-salt.version.patch b/add-missing-contextvars-dependency-in-salt.version.patch
new file mode 100644
index 0000000..6c8a26a
--- /dev/null
+++ b/add-missing-contextvars-dependency-in-salt.version.patch
@@ -0,0 +1,38 @@
+From 1a5716365e0c3b8d290759847f4046f28ee4b79f Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 15 May 2024 09:53:20 +0200
+Subject: [PATCH] Add missing contextvars dependency in salt.version
+
+---
+ salt/version.py | 1 +
+ tests/unit/states/test_pip_state.py | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/salt/version.py b/salt/version.py
+index 44372830b2..b2643550e9 100644
+--- a/salt/version.py
++++ b/salt/version.py
+@@ -717,6 +717,7 @@ def dependency_information(include_salt_cloud=False):
+ ("docker-py", "docker", "__version__"),
+ ("packaging", "packaging", "__version__"),
+ ("looseversion", "looseversion", None),
++ ("contextvars", "contextvars", None),
+ ("relenv", "relenv", "__version__"),
+ ]
+
+diff --git a/tests/unit/states/test_pip_state.py b/tests/unit/states/test_pip_state.py
+index d70b115000..fe5d171a15 100644
+--- a/tests/unit/states/test_pip_state.py
++++ b/tests/unit/states/test_pip_state.py
+@@ -419,7 +419,7 @@ class PipStateInstallationErrorTest(TestCase):
+ def test_importable_installation_error(self):
+ extra_requirements = []
+ for name, version in salt.version.dependency_information():
+- if name in ["PyYAML", "packaging", "looseversion"]:
++ if name in ["PyYAML", "packaging", "looseversion", "contextvars"]:
+ extra_requirements.append("{}=={}".format(name, version))
+ failures = {}
+ pip_version_requirements = [
+--
+2.45.0
+
diff --git a/add-publish_batch-to-clearfuncs-exposed-methods.patch b/add-publish_batch-to-clearfuncs-exposed-methods.patch
new file mode 100644
index 0000000..9effdce
--- /dev/null
+++ b/add-publish_batch-to-clearfuncs-exposed-methods.patch
@@ -0,0 +1,26 @@
+From 3ef2071daf7a415f2c43e1339affe2b7cad93b3e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Thu, 28 May 2020 09:37:08 +0100
+Subject: [PATCH] Add publish_batch to ClearFuncs exposed methods
+
+---
+ salt/master.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/salt/master.py b/salt/master.py
+index 2a526b4f21..a0552fa232 100644
+--- a/salt/master.py
++++ b/salt/master.py
+@@ -1960,6 +1960,7 @@ class ClearFuncs(TransportMethods):
+ expose_methods = (
+ "ping",
+ "publish",
++ "publish_batch",
+ "get_token",
+ "mk_token",
+ "wheel",
+--
+2.39.2
+
+
diff --git a/add-salt-ssh-support-with-venv-salt-minion-3004-493.patch b/add-salt-ssh-support-with-venv-salt-minion-3004-493.patch
new file mode 100644
index 0000000..dc38bd4
--- /dev/null
+++ b/add-salt-ssh-support-with-venv-salt-minion-3004-493.patch
@@ -0,0 +1,795 @@
+From 3fd6c0c6793632c819fb5f8fb3b3538463eaaccc Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Thu, 24 Feb 2022 16:52:24 +0300
+Subject: [PATCH] Add salt-ssh support with venv-salt-minion - 3004
+ (#493)
+
+* Add salt-ssh support with venv-salt-minion
+
+* Add some comments and drop the commented line
+
+* Fix return in check_venv_hash_file
+
+* Convert all script parameters to strings
+
+* Reduce the size of minion response
+
+Minion response contains SSH_PY_CODE wrapped to base64.
+This fix reduces the size of the response in DEBUG logging
+
+* Make VENV_HASH_FILE global
+
+* Pass the context to roster modules
+
+* Avoid race condition on loading roster modules
+
+* Prevent simultaneous to salt-ssh minion
+
+* Make ssh session grace time configurable
+
+* Prevent possible segfault by GC
+
+* Revert "Avoid race condition on loading roster modules"
+
+This reverts commit 8ff822a162cc494d3528184aef983ad20e09f4e2.
+
+* Prevent deadlocks with importlib on using LazyLoader
+
+* Make logging on salt-ssh errors more informative
+
+* Add comments about using salt.loader.LOAD_LOCK
+
+* Fix test_loader test
+
+* Prevent deadlocks on using logging
+
+* Use collections.deque instead of list for salt-ssh
+
+Suggested by @agraul
+
+* Get proper exitstatus from salt.utils.vt.Terminal
+
+to prevent empty event returns due to improperly detecting
+the child process as failed
+
+* Do not run pre flight script for raw_shell
+---
+ salt/_logging/impl.py | 55 +++++++-----
+ salt/client/ssh/__init__.py | 157 ++++++++++++++++++++++++++++-----
+ salt/client/ssh/client.py | 7 +-
+ salt/client/ssh/shell.py | 8 ++
+ salt/client/ssh/ssh_py_shim.py | 108 +++++++++++++----------
+ salt/loader/__init__.py | 31 ++++++-
+ salt/netapi/__init__.py | 3 +-
+ salt/roster/__init__.py | 6 +-
+ tests/unit/test_loader.py | 2 +-
+ 9 files changed, 278 insertions(+), 99 deletions(-)
+
+diff --git a/salt/_logging/impl.py b/salt/_logging/impl.py
+index cc18f49a9e..e050f43caf 100644
+--- a/salt/_logging/impl.py
++++ b/salt/_logging/impl.py
+@@ -14,6 +14,7 @@ import re
+ import socket
+ import sys
+ import traceback
++import threading
+ import types
+ import urllib.parse
+
+@@ -104,6 +105,10 @@ DFLT_LOG_DATEFMT_LOGFILE = "%Y-%m-%d %H:%M:%S"
+ DFLT_LOG_FMT_CONSOLE = "[%(levelname)-8s] %(message)s"
+ DFLT_LOG_FMT_LOGFILE = "%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s"
+
++# LOG_LOCK is used to prevent deadlocks on using logging
++# in combination with multiprocessing with salt-api
++LOG_LOCK = threading.Lock()
++
+
+ class SaltLogRecord(logging.LogRecord):
+ def __init__(self, *args, **kwargs):
+@@ -270,27 +275,35 @@ class SaltLoggingClass(LOGGING_LOGGER_CLASS, metaclass=LoggingMixinMeta):
+ else:
+ extra["exc_info_on_loglevel"] = exc_info_on_loglevel
+
+- if sys.version_info < (3, 8):
+- LOGGING_LOGGER_CLASS._log(
+- self,
+- level,
+- msg,
+- args,
+- exc_info=exc_info,
+- extra=extra,
+- stack_info=stack_info,
+- )
+- else:
+- LOGGING_LOGGER_CLASS._log(
+- self,
+- level,
+- msg,
+- args,
+- exc_info=exc_info,
+- extra=extra,
+- stack_info=stack_info,
+- stacklevel=stacklevel,
+- )
++ try:
++ LOG_LOCK.acquire()
++ if sys.version_info < (3,):
++ LOGGING_LOGGER_CLASS._log(
++ self, level, msg, args, exc_info=exc_info, extra=extra
++ )
++ elif sys.version_info < (3, 8):
++ LOGGING_LOGGER_CLASS._log(
++ self,
++ level,
++ msg,
++ args,
++ exc_info=exc_info,
++ extra=extra,
++ stack_info=stack_info,
++ )
++ else:
++ LOGGING_LOGGER_CLASS._log(
++ self,
++ level,
++ msg,
++ args,
++ exc_info=exc_info,
++ extra=extra,
++ stack_info=stack_info,
++ stacklevel=stacklevel,
++ )
++ finally:
++ LOG_LOCK.release()
+
+ def makeRecord(
+ self,
+diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
+index 19089ce8ad..e6837df4e5 100644
+--- a/salt/client/ssh/__init__.py
++++ b/salt/client/ssh/__init__.py
+@@ -6,11 +6,13 @@ import base64
+ import binascii
+ import copy
+ import datetime
++import gc
+ import getpass
+ import hashlib
+ import logging
+ import multiprocessing
+ import os
++import psutil
+ import queue
+ import re
+ import shlex
+@@ -20,6 +22,7 @@ import tarfile
+ import tempfile
+ import time
+ import uuid
++from collections import deque
+
+ import salt.client.ssh.shell
+ import salt.client.ssh.wrapper
+@@ -47,6 +50,7 @@ import salt.utils.url
+ import salt.utils.verify
+ from salt._logging import LOG_LEVELS
+ from salt._logging.mixins import MultiprocessingStateMixin
++from salt._logging.impl import LOG_LOCK
+ from salt.template import compile_template
+ from salt.utils.process import Process
+ from salt.utils.zeromq import zmq
+@@ -146,15 +150,26 @@ if [ "$SUDO" ] && [ "$SUDO_USER" ]
+ then SUDO="$SUDO -u $SUDO_USER"
+ fi
+ EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
+-PYTHON_CMDS="python3 /usr/libexec/platform-python python27 python2.7 python26 python2.6 python2 python"
++set +x
++SSH_PY_CODE='import base64;
++ exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
++if [ -n "$DEBUG" ]
++ then set -x
++fi
++PYTHON_CMDS="/var/tmp/venv-salt-minion/bin/python python3 /usr/libexec/platform-python python27 python2.7 python26 python2.6 python2 python"
+ for py_cmd in $PYTHON_CMDS
+ do
+ if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
+ then
+ py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function;import sys; print(sys.executable);'`
+ cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
++ cmdpath=`readlink -f $cmdpath`
+ if file $cmdpath | grep "shell script" > /dev/null
+ then
++ if echo $cmdpath | grep venv-salt-minion > /dev/null
++ then
++ exec $SUDO "$cmdpath" -c "$SSH_PY_CODE"
++ fi
+ ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
+ 'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
+ export `$py_cmd -c \
+@@ -166,13 +181,9 @@ do
+ exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
+ MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
+ PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
+- "$py_cmd_path" -c \
+- 'import base64;
+- exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
++ "$py_cmd_path" -c "$SSH_PY_CODE"
+ else
+- exec $SUDO "$py_cmd_path" -c \
+- 'import base64;
+- exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
++ exec $SUDO "$py_cmd_path" -c "$SSH_PY_CODE"
+ fi
+ exit 0
+ else
+@@ -189,6 +200,9 @@ EOF'''.format(
+ ]
+ )
+
++# The file on a salt-ssh minion used to identify if Salt Bundle was deployed
++VENV_HASH_FILE = "/var/tmp/venv-salt-minion/venv-hash.txt"
++
+ if not salt.utils.platform.is_windows() and not salt.utils.platform.is_junos():
+ shim_file = os.path.join(os.path.dirname(__file__), "ssh_py_shim.py")
+ if not os.path.exists(shim_file):
+@@ -209,7 +223,7 @@ class SSH(MultiprocessingStateMixin):
+
+ ROSTER_UPDATE_FLAG = "#__needs_update"
+
+- def __init__(self, opts):
++ def __init__(self, opts, context=None):
+ self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True}
+ pull_sock = os.path.join(opts["sock_dir"], "master_event_pull.ipc")
+ if os.path.exists(pull_sock) and zmq:
+@@ -236,7 +250,9 @@ class SSH(MultiprocessingStateMixin):
+ else "glob"
+ )
+ self._expand_target()
+- self.roster = salt.roster.Roster(self.opts, self.opts.get("roster", "flat"))
++ self.roster = salt.roster.Roster(
++ self.opts, self.opts.get("roster", "flat"), context=context
++ )
+ self.targets = self.roster.targets(self.opts["tgt"], self.tgt_type)
+ if not self.targets:
+ self._update_targets()
+@@ -316,6 +332,13 @@ class SSH(MultiprocessingStateMixin):
+ extended_cfg=self.opts.get("ssh_ext_alternatives"),
+ )
+ self.mods = mod_data(self.fsclient)
++ self.cache = salt.cache.Cache(self.opts)
++ self.master_id = self.opts["id"]
++ self.max_pid_wait = int(self.opts.get("ssh_max_pid_wait", 600))
++ self.session_flock_file = os.path.join(
++ self.opts["cachedir"], "salt-ssh.session.lock"
++ )
++ self.ssh_session_grace_time = int(self.opts.get("ssh_session_grace_time", 3))
+
+ # __setstate__ and __getstate__ are only used on spawning platforms.
+ def __setstate__(self, state):
+@@ -546,6 +569,8 @@ class SSH(MultiprocessingStateMixin):
+ """
+ Run the routine in a "Thread", put a dict on the queue
+ """
++ LOG_LOCK.release()
++ salt.loader.LOAD_LOCK.release()
+ opts = copy.deepcopy(opts)
+ single = Single(
+ opts,
+@@ -585,7 +610,7 @@ class SSH(MultiprocessingStateMixin):
+ """
+ que = multiprocessing.Queue()
+ running = {}
+- target_iter = self.targets.__iter__()
++ targets_queue = deque(self.targets.keys())
+ returned = set()
+ rets = set()
+ init = False
+@@ -594,11 +619,43 @@ class SSH(MultiprocessingStateMixin):
+ log.error("No matching targets found in roster.")
+ break
+ if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
+- try:
+- host = next(target_iter)
+- except StopIteration:
++ if targets_queue:
++ host = targets_queue.popleft()
++ else:
+ init = True
+ continue
++ with salt.utils.files.flopen(self.session_flock_file, "w"):
++ cached_session = self.cache.fetch("salt-ssh/session", host)
++ if cached_session is not None and "ts" in cached_session:
++ prev_session_running = time.time() - cached_session["ts"]
++ if (
++ "pid" in cached_session
++ and cached_session.get("master_id", self.master_id)
++ == self.master_id
++ ):
++ pid_running = (
++ False
++ if cached_session["pid"] == 0
++ else psutil.pid_exists(cached_session["pid"])
++ )
++ if (
++ pid_running and prev_session_running < self.max_pid_wait
++ ) or (
++ not pid_running
++ and prev_session_running < self.ssh_session_grace_time
++ ):
++ targets_queue.append(host)
++ time.sleep(0.3)
++ continue
++ self.cache.store(
++ "salt-ssh/session",
++ host,
++ {
++ "pid": 0,
++ "master_id": self.master_id,
++ "ts": time.time(),
++ },
++ )
+ for default in self.defaults:
+ if default not in self.targets[host]:
+ self.targets[host][default] = self.defaults[default]
+@@ -630,8 +687,38 @@ class SSH(MultiprocessingStateMixin):
+ mine,
+ )
+ routine = Process(target=self.handle_routine, args=args)
+- routine.start()
++ # Explicitly call garbage collector to prevent possible segfault
++ # in salt-api child process. (bsc#1188607)
++ gc.collect()
++ try:
++ # salt.loader.LOAD_LOCK is used to prevent deadlock
++ # with importlib in combination with using multiprocessing (bsc#1182851)
++ # If the salt-api child process is creating while LazyLoader instance
++ # is loading module, new child process gets the lock for this module acquired.
++ # Touching this module with importlib inside child process leads to deadlock.
++ #
++ # salt.loader.LOAD_LOCK is used to prevent salt-api child process creation
++ # while creating new instance of LazyLoader
++ # salt.loader.LOAD_LOCK must be released explicitly in self.handle_routine
++ salt.loader.LOAD_LOCK.acquire()
++ # The same solution applied to fix logging deadlock
++ # LOG_LOCK must be released explicitly in self.handle_routine
++ LOG_LOCK.acquire()
++ routine.start()
++ finally:
++ LOG_LOCK.release()
++ salt.loader.LOAD_LOCK.release()
+ running[host] = {"thread": routine}
++ with salt.utils.files.flopen(self.session_flock_file, "w"):
++ self.cache.store(
++ "salt-ssh/session",
++ host,
++ {
++ "pid": routine.pid,
++ "master_id": self.master_id,
++ "ts": time.time(),
++ },
++ )
+ continue
+ ret = {}
+ try:
+@@ -662,12 +749,27 @@ class SSH(MultiprocessingStateMixin):
+ )
+ ret = {"id": host, "ret": error}
+ log.error(error)
++ log.error(
++ "PID %s did not return any data for host '%s'",
++ running[host]["thread"].pid,
++ host,
++ )
+ yield {ret["id"]: ret["ret"]}
+ running[host]["thread"].join()
+ rets.add(host)
+ for host in rets:
+ if host in running:
+ running.pop(host)
++ with salt.utils.files.flopen(self.session_flock_file, "w"):
++ self.cache.store(
++ "salt-ssh/session",
++ host,
++ {
++ "pid": 0,
++ "master_id": self.master_id,
++ "ts": time.time(),
++ },
++ )
+ if len(rets) >= len(self.targets):
+ break
+ # Sleep when limit or all threads started
+@@ -1036,14 +1138,24 @@ class Single:
+ return False
+ return True
+
++ def check_venv_hash_file(self):
++ """
++ check if the venv exists on the remote machine
++ """
++ stdout, stderr, retcode = self.shell.exec_cmd(
++ "test -f {}".format(VENV_HASH_FILE)
++ )
++ return retcode == 0
++
+ def deploy(self):
+ """
+ Deploy salt-thin
+ """
+- self.shell.send(
+- self.thin,
+- os.path.join(self.thin_dir, "salt-thin.tgz"),
+- )
++ if not self.check_venv_hash_file():
++ self.shell.send(
++ self.thin,
++ os.path.join(self.thin_dir, "salt-thin.tgz"),
++ )
+ self.deploy_ext()
+ return True
+
+@@ -1071,8 +1183,9 @@ class Single:
+ Returns tuple of (stdout, stderr, retcode)
+ """
+ stdout = stderr = retcode = None
++ raw_shell = self.opts.get("raw_shell", False)
+
+- if self.ssh_pre_flight:
++ if self.ssh_pre_flight and not raw_shell:
+ if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
+ log.info(
+ "%s thin dir already exists. Not running ssh_pre_flight script",
+@@ -1086,14 +1199,16 @@ class Single:
+ stdout, stderr, retcode = self.run_ssh_pre_flight()
+ if retcode != 0:
+ log.error(
+- "Error running ssh_pre_flight script %s", self.ssh_pre_file
++ "Error running ssh_pre_flight script %s for host '%s'",
++ self.ssh_pre_file,
++ self.target["host"],
+ )
+ return stdout, stderr, retcode
+ log.info(
+ "Successfully ran the ssh_pre_flight script: %s", self.ssh_pre_file
+ )
+
+- if self.opts.get("raw_shell", False):
++ if raw_shell:
+ cmd_str = " ".join([self._escape_arg(arg) for arg in self.argv])
+ stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
+
+diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py
+index be9247cb15..0b67598fc6 100644
+--- a/salt/client/ssh/client.py
++++ b/salt/client/ssh/client.py
+@@ -108,7 +108,7 @@ class SSHClient:
+ return sane_kwargs
+
+ def _prep_ssh(
+- self, tgt, fun, arg=(), timeout=None, tgt_type="glob", kwarg=None, **kwargs
++ self, tgt, fun, arg=(), timeout=None, tgt_type="glob", kwarg=None, context=None, **kwargs
+ ):
+ """
+ Prepare the arguments
+@@ -123,7 +123,7 @@ class SSHClient:
+ opts["selected_target_option"] = tgt_type
+ opts["tgt"] = tgt
+ opts["arg"] = arg
+- return salt.client.ssh.SSH(opts)
++ return salt.client.ssh.SSH(opts, context=context)
+
+ def cmd_iter(
+ self,
+@@ -160,7 +160,7 @@ class SSHClient:
+ final.update(ret)
+ return final
+
+- def cmd_sync(self, low):
++ def cmd_sync(self, low, context=None):
+ """
+ Execute a salt-ssh call synchronously.
+
+@@ -193,6 +193,7 @@ class SSHClient:
+ low.get("timeout"),
+ low.get("tgt_type"),
+ low.get("kwarg"),
++ context=context,
+ **kwargs
+ )
+
+diff --git a/salt/client/ssh/shell.py b/salt/client/ssh/shell.py
+index cfa82d13c2..bc1ad034df 100644
+--- a/salt/client/ssh/shell.py
++++ b/salt/client/ssh/shell.py
+@@ -464,6 +464,14 @@ class Shell:
+ if stdout:
+ old_stdout = stdout
+ time.sleep(0.01)
++ if term.exitstatus is None:
++ try:
++ term.wait()
++ except: # pylint: disable=broad-except
++ # It's safe to put the broad exception handling here
++ # as we just need to ensure the child process in term finished
++ # to get proper term.exitstatus instead of None
++ pass
+ return ret_stdout, ret_stderr, term.exitstatus
+ finally:
+ term.close(terminate=True, kill=True)
+diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py
+index b77749f495..293ea1b7fa 100644
+--- a/salt/client/ssh/ssh_py_shim.py
++++ b/salt/client/ssh/ssh_py_shim.py
+@@ -279,56 +279,72 @@ def main(argv): # pylint: disable=W0613
+ """
+ Main program body
+ """
+- thin_path = os.path.join(OPTIONS.saltdir, THIN_ARCHIVE)
+- if os.path.isfile(thin_path):
+- if OPTIONS.checksum != get_hash(thin_path, OPTIONS.hashfunc):
+- need_deployment()
+- unpack_thin(thin_path)
+- # Salt thin now is available to use
+- else:
+- if not sys.platform.startswith("win"):
+- scpstat = subprocess.Popen(["/bin/sh", "-c", "command -v scp"]).wait()
+- if scpstat != 0:
+- sys.exit(EX_SCP_NOT_FOUND)
+-
+- if os.path.exists(OPTIONS.saltdir) and not os.path.isdir(OPTIONS.saltdir):
+- sys.stderr.write(
+- 'ERROR: salt path "{0}" exists but is not a directory\n'.format(
+- OPTIONS.saltdir
++
++ virt_env = os.getenv("VIRTUAL_ENV", None)
++ # VIRTUAL_ENV environment variable is defined by venv-salt-minion wrapper
++ # it's used to check if the shim is running under this wrapper
++ venv_salt_call = None
++ if virt_env and "venv-salt-minion" in virt_env:
++ venv_salt_call = os.path.join(virt_env, "bin", "salt-call")
++ if not os.path.exists(venv_salt_call):
++ venv_salt_call = None
++ elif not os.path.exists(OPTIONS.saltdir):
++ os.makedirs(OPTIONS.saltdir)
++ cache_dir = os.path.join(OPTIONS.saltdir, "running_data", "var", "cache")
++ os.makedirs(os.path.join(cache_dir, "salt"))
++ os.symlink("salt", os.path.relpath(os.path.join(cache_dir, "venv-salt-minion")))
++
++ if venv_salt_call is None:
++ # Use Salt thin only if Salt Bundle (venv-salt-minion) is not available
++ thin_path = os.path.join(OPTIONS.saltdir, THIN_ARCHIVE)
++ if os.path.isfile(thin_path):
++ if OPTIONS.checksum != get_hash(thin_path, OPTIONS.hashfunc):
++ need_deployment()
++ unpack_thin(thin_path)
++ # Salt thin now is available to use
++ else:
++ if not sys.platform.startswith("win"):
++ scpstat = subprocess.Popen(["/bin/sh", "-c", "command -v scp"]).wait()
++ if scpstat != 0:
++ sys.exit(EX_SCP_NOT_FOUND)
++
++ if os.path.exists(OPTIONS.saltdir) and not os.path.isdir(OPTIONS.saltdir):
++ sys.stderr.write(
++ 'ERROR: salt path "{0}" exists but is'
++ " not a directory\n".format(OPTIONS.saltdir)
+ )
+- )
+- sys.exit(EX_CANTCREAT)
++ sys.exit(EX_CANTCREAT)
+
+- if not os.path.exists(OPTIONS.saltdir):
+- need_deployment()
++ if not os.path.exists(OPTIONS.saltdir):
++ need_deployment()
+
+- code_checksum_path = os.path.normpath(
+- os.path.join(OPTIONS.saltdir, "code-checksum")
+- )
+- if not os.path.exists(code_checksum_path) or not os.path.isfile(
+- code_checksum_path
+- ):
+- sys.stderr.write(
+- "WARNING: Unable to locate current code checksum: {0}.\n".format(
+- code_checksum_path
+- )
++ code_checksum_path = os.path.normpath(
++ os.path.join(OPTIONS.saltdir, "code-checksum")
+ )
+- need_deployment()
+- with open(code_checksum_path, "r") as vpo:
+- cur_code_cs = vpo.readline().strip()
+- if cur_code_cs != OPTIONS.code_checksum:
+- sys.stderr.write(
+- "WARNING: current code checksum {0} is different to {1}.\n".format(
+- cur_code_cs, OPTIONS.code_checksum
++ if not os.path.exists(code_checksum_path) or not os.path.isfile(
++ code_checksum_path
++ ):
++ sys.stderr.write(
++ "WARNING: Unable to locate current code checksum: {0}.\n".format(
++ code_checksum_path
++ )
+ )
+- )
+- need_deployment()
+- # Salt thin exists and is up-to-date - fall through and use it
++ need_deployment()
++ with open(code_checksum_path, "r") as vpo:
++ cur_code_cs = vpo.readline().strip()
++ if cur_code_cs != OPTIONS.code_checksum:
++ sys.stderr.write(
++ "WARNING: current code checksum {0} is different to {1}.\n".format(
++ cur_code_cs, OPTIONS.code_checksum
++ )
++ )
++ need_deployment()
++ # Salt thin exists and is up-to-date - fall through and use it
+
+- salt_call_path = os.path.join(OPTIONS.saltdir, "salt-call")
+- if not os.path.isfile(salt_call_path):
+- sys.stderr.write('ERROR: thin is missing "{0}"\n'.format(salt_call_path))
+- need_deployment()
++ salt_call_path = os.path.join(OPTIONS.saltdir, "salt-call")
++ if not os.path.isfile(salt_call_path):
++ sys.stderr.write('ERROR: thin is missing "{0}"\n'.format(salt_call_path))
++ need_deployment()
+
+ with open(os.path.join(OPTIONS.saltdir, "minion"), "w") as config:
+ config.write(OPTIONS.config + "\n")
+@@ -351,8 +367,8 @@ def main(argv): # pylint: disable=W0613
+ argv_prepared = ARGS
+
+ salt_argv = [
+- get_executable(),
+- salt_call_path,
++ sys.executable if venv_salt_call is not None else get_executable(),
++ venv_salt_call if venv_salt_call is not None else salt_call_path,
+ "--retcode-passthrough",
+ "--local",
+ "--metadata",
+diff --git a/salt/loader/__init__.py b/salt/loader/__init__.py
+index 72a5e54401..32f8a7702c 100644
+--- a/salt/loader/__init__.py
++++ b/salt/loader/__init__.py
+@@ -9,6 +9,7 @@ import inspect
+ import logging
+ import os
+ import re
++import threading
+ import time
+ import types
+
+@@ -31,7 +32,7 @@ from salt.exceptions import LoaderError
+ from salt.template import check_render_pipe_str
+ from salt.utils import entrypoints
+
+-from .lazy import SALT_BASE_PATH, FilterDictWrapper, LazyLoader
++from .lazy import SALT_BASE_PATH, FilterDictWrapper, LazyLoader as _LazyLoader
+
+ log = logging.getLogger(__name__)
+
+@@ -81,6 +82,18 @@ SALT_INTERNAL_LOADERS_PATHS = (
+ str(SALT_BASE_PATH / "wheel"),
+ )
+
++LOAD_LOCK = threading.Lock()
++
++
++def LazyLoader(*args, **kwargs):
++ # This wrapper is used to prevent deadlocks with importlib (bsc#1182851)
++ # LOAD_LOCK is also used directly in salt.client.ssh.SSH
++ try:
++ LOAD_LOCK.acquire()
++ return _LazyLoader(*args, **kwargs)
++ finally:
++ LOAD_LOCK.release()
++
+
+ def static_loader(
+ opts,
+@@ -725,7 +738,7 @@ def fileserver(opts, backends, loaded_base_name=None):
+ )
+
+
+-def roster(opts, runner=None, utils=None, whitelist=None, loaded_base_name=None):
++def roster(opts, runner=None, utils=None, whitelist=None, loaded_base_name=None, context=None):
+ """
+ Returns the roster modules
+
+@@ -736,12 +749,15 @@ def roster(opts, runner=None, utils=None, whitelist=None, loaded_base_name=None)
+ :param str loaded_base_name: The imported modules namespace when imported
+ by the salt loader.
+ """
++ if context is None:
++ context = {}
++
+ return LazyLoader(
+ _module_dirs(opts, "roster"),
+ opts,
+ tag="roster",
+ whitelist=whitelist,
+- pack={"__runner__": runner, "__utils__": utils},
++ pack={"__runner__": runner, "__utils__": utils, "__context__": context},
+ extra_module_dirs=utils.module_dirs if utils else None,
+ loaded_base_name=loaded_base_name,
+ )
+@@ -933,7 +949,14 @@ def render(
+ )
+ rend = FilterDictWrapper(ret, ".render")
+
+- if not check_render_pipe_str(
++ def _check_render_pipe_str(pipestr, renderers, blacklist, whitelist):
++ try:
++ LOAD_LOCK.acquire()
++ return check_render_pipe_str(pipestr, renderers, blacklist, whitelist)
++ finally:
++ LOAD_LOCK.release()
++
++ if not _check_render_pipe_str(
+ opts["renderer"], rend, opts["renderer_blacklist"], opts["renderer_whitelist"]
+ ):
+ err = (
+diff --git a/salt/netapi/__init__.py b/salt/netapi/__init__.py
+index a89c1a19af..8a28c48460 100644
+--- a/salt/netapi/__init__.py
++++ b/salt/netapi/__init__.py
+@@ -79,6 +79,7 @@ class NetapiClient:
+ self.loadauth = salt.auth.LoadAuth(apiopts)
+ self.key = salt.daemons.masterapi.access_keys(apiopts)
+ self.ckminions = salt.utils.minions.CkMinions(apiopts)
++ self.context = {}
+
+ def _is_master_running(self):
+ """
+@@ -245,7 +246,7 @@ class NetapiClient:
+ with salt.client.ssh.client.SSHClient(
+ mopts=self.opts, disable_custom_roster=True
+ ) as client:
+- return client.cmd_sync(kwargs)
++ return client.cmd_sync(kwargs, context=self.context)
+
+ def runner(self, fun, timeout=None, full_return=False, **kwargs):
+ """
+diff --git a/salt/roster/__init__.py b/salt/roster/__init__.py
+index fc7339d785..ea23d550d7 100644
+--- a/salt/roster/__init__.py
++++ b/salt/roster/__init__.py
+@@ -59,7 +59,7 @@ class Roster:
+ minion aware
+ """
+
+- def __init__(self, opts, backends="flat"):
++ def __init__(self, opts, backends="flat", context=None):
+ self.opts = opts
+ if isinstance(backends, list):
+ self.backends = backends
+@@ -71,7 +71,9 @@ class Roster:
+ self.backends = ["flat"]
+ utils = salt.loader.utils(self.opts)
+ runner = salt.loader.runner(self.opts, utils=utils)
+- self.rosters = salt.loader.roster(self.opts, runner=runner, utils=utils)
++ self.rosters = salt.loader.roster(
++ self.opts, runner=runner, utils=utils, context=context
++ )
+
+ def _gen_back(self):
+ """
+diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py
+index cf33903320..1b616375b3 100644
+--- a/tests/unit/test_loader.py
++++ b/tests/unit/test_loader.py
+@@ -1697,7 +1697,7 @@ class LazyLoaderRefreshFileMappingTest(TestCase):
+ cls.funcs = salt.loader.minion_mods(cls.opts, utils=cls.utils, proxy=cls.proxy)
+
+ def setUp(self):
+- class LazyLoaderMock(salt.loader.LazyLoader):
++ class LazyLoaderMock(salt.loader._LazyLoader):
+ pass
+
+ self.LOADER_CLASS = LazyLoaderMock
+--
+2.39.2
+
+
diff --git a/add-sleep-on-exception-handling-on-minion-connection.patch b/add-sleep-on-exception-handling-on-minion-connection.patch
new file mode 100644
index 0000000..edda8d7
--- /dev/null
+++ b/add-sleep-on-exception-handling-on-minion-connection.patch
@@ -0,0 +1,41 @@
+From bad9e783e1a6923d85bdb1477a2e9766887a511e Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
+Date: Thu, 18 Feb 2021 14:49:38 +0300
+Subject: [PATCH] Add sleep on exception handling on minion connection
+ attempt to the master (bsc#1174855) (#321)
+
+* Async batch implementation fix
+
+* Add sleep on exception handling on minion connection attempt to the master (bsc#1174855)
+---
+ salt/minion.py | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/salt/minion.py b/salt/minion.py
+index 2f905e4a4f..c3b65f16c3 100644
+--- a/salt/minion.py
++++ b/salt/minion.py
+@@ -1123,6 +1123,9 @@ class MinionManager(MinionBase):
+ last = 0 # never have we signed in
+ auth_wait = minion.opts["acceptance_wait_time"]
+ failed = False
++ retry_wait = 1
++ retry_wait_inc = 1
++ max_retry_wait = 20
+ while True:
+ try:
+ if minion.opts.get("beacons_before_connect", False):
+@@ -1161,6 +1164,9 @@ class MinionManager(MinionBase):
+ minion.opts["master"],
+ exc_info=True,
+ )
++ yield salt.ext.tornado.gen.sleep(retry_wait)
++ if retry_wait < max_retry_wait:
++ retry_wait += retry_wait_inc
+
+ # Multi Master Tune In
+ def tune_in(self):
+--
+2.39.2
+
+
diff --git a/add-standalone-configuration-file-for-enabling-packa.patch b/add-standalone-configuration-file-for-enabling-packa.patch
new file mode 100644
index 0000000..7000574
--- /dev/null
+++ b/add-standalone-configuration-file-for-enabling-packa.patch
@@ -0,0 +1,26 @@
+From 94e702e83c05814296ea8987a722b71e99117360 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 22 May 2019 13:00:46 +0100
+Subject: [PATCH] Add standalone configuration file for enabling package
+ formulas
+
+---
+ conf/suse/standalone-formulas-configuration.conf | 4 ++++
+ 1 file changed, 4 insertions(+)
+ create mode 100644 conf/suse/standalone-formulas-configuration.conf
+
+diff --git a/conf/suse/standalone-formulas-configuration.conf b/conf/suse/standalone-formulas-configuration.conf
+new file mode 100644
+index 0000000000..94d05fb2ee
+--- /dev/null
++++ b/conf/suse/standalone-formulas-configuration.conf
+@@ -0,0 +1,4 @@
++file_roots:
++ base:
++ - /usr/share/salt-formulas/states
++ - /srv/salt
+--
+2.39.2
+
+
diff --git a/add-support-for-gpgautoimport-539.patch b/add-support-for-gpgautoimport-539.patch
new file mode 100644
index 0000000..d1176dc
--- /dev/null
+++ b/add-support-for-gpgautoimport-539.patch
@@ -0,0 +1,369 @@
+From 2e103365c50fe42a72de3e9d57c3fdbee47454aa Mon Sep 17 00:00:00 2001
+From: Michael Calmer
+Date: Fri, 8 Jul 2022 10:15:37 +0200
+Subject: [PATCH] add support for gpgautoimport (#539)
+
+* add support for gpgautoimport to refresh_db in the zypperpkg module
+
+* call refresh_db function from mod_repo
+
+* call refresh_db with kwargs where possible
+
+* ignore no repos defined exit code
+
+* fix zypperpkg test after adding more success return codes
+---
+ salt/modules/zypperpkg.py | 47 +++++++---
+ tests/unit/modules/test_zypperpkg.py | 124 +++++++++++++++++++++++----
+ 2 files changed, 140 insertions(+), 31 deletions(-)
+
+diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
+index 318c871b37..051f8f72c7 100644
+--- a/salt/modules/zypperpkg.py
++++ b/salt/modules/zypperpkg.py
+@@ -623,7 +623,7 @@ def list_upgrades(refresh=True, root=None, **kwargs):
+ salt '*' pkg.list_upgrades
+ """
+ if refresh:
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ ret = dict()
+ cmd = ["list-updates"]
+@@ -737,7 +737,7 @@ def info_available(*names, **kwargs):
+
+ # Refresh db before extracting the latest package
+ if kwargs.get("refresh", True):
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ pkg_info = []
+ batch = names[:]
+@@ -1439,7 +1439,6 @@ def mod_repo(repo, **kwargs):
+ cmd_opt.append(kwargs.get("name"))
+
+ if kwargs.get("gpgautoimport") is True:
+- global_cmd_opt.append("--gpg-auto-import-keys")
+ call_refresh = True
+
+ if cmd_opt:
+@@ -1451,8 +1450,8 @@ def mod_repo(repo, **kwargs):
+ # when used with "zypper ar --refresh" or "zypper mr --refresh"
+ # --gpg-auto-import-keys is not doing anything
+ # so we need to specifically refresh here with --gpg-auto-import-keys
+- refresh_opts = global_cmd_opt + ["refresh"] + [repo]
+- __zypper__(root=root).xml.call(*refresh_opts)
++ kwargs.update({"repos": repo})
++ refresh_db(root=root, **kwargs)
+ elif not added and not cmd_opt:
+ comment = "Specified arguments did not result in modification of repo"
+
+@@ -1463,7 +1462,7 @@ def mod_repo(repo, **kwargs):
+ return repo
+
+
+-def refresh_db(force=None, root=None):
++def refresh_db(force=None, root=None, **kwargs):
+ """
+ Trigger a repository refresh by calling ``zypper refresh``. Refresh will run
+ with ``--force`` if the "force=True" flag is passed on the CLI or
+@@ -1474,6 +1473,17 @@ def refresh_db(force=None, root=None):
+
+ {'': Bool}
+
++ gpgautoimport : False
++ If set to True, automatically trust and import public GPG key for
++ the repository.
++
++ .. versionadded:: 3005
++
++ repos
++ Refresh just the specified repos
++
++ .. versionadded:: 3005
++
+ root
+ operate on a different root directory.
+
+@@ -1494,11 +1504,22 @@ def refresh_db(force=None, root=None):
+ salt.utils.pkg.clear_rtag(__opts__)
+ ret = {}
+ refresh_opts = ["refresh"]
++ global_opts = []
+ if force is None:
+ force = __pillar__.get("zypper", {}).get("refreshdb_force", True)
+ if force:
+ refresh_opts.append("--force")
+- out = __zypper__(root=root).refreshable.call(*refresh_opts)
++ repos = kwargs.get("repos", [])
++ refresh_opts.extend([repos] if not isinstance(repos, list) else repos)
++
++ if kwargs.get("gpgautoimport", False):
++ global_opts.append("--gpg-auto-import-keys")
++
++ # We do the actual call to zypper refresh.
++ # We ignore retcode 6 which is returned when there are no repositories defined.
++ out = __zypper__(root=root).refreshable.call(
++ *global_opts, *refresh_opts, success_retcodes=[0, 6]
++ )
+
+ for line in out.splitlines():
+ if not line:
+@@ -1683,7 +1704,7 @@ def install(
+ 'arch': ''}}}
+ """
+ if refresh:
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ try:
+ pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
+@@ -1980,7 +2001,7 @@ def upgrade(
+ cmd_update.insert(0, "--no-gpg-checks")
+
+ if refresh:
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ if dryrun:
+ cmd_update.append("--dry-run")
+@@ -2808,7 +2829,7 @@ def search(criteria, refresh=False, **kwargs):
+ root = kwargs.get("root", None)
+
+ if refresh:
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ cmd = ["search"]
+ if kwargs.get("match") == "exact":
+@@ -2959,7 +2980,7 @@ def download(*packages, **kwargs):
+
+ refresh = kwargs.get("refresh", False)
+ if refresh:
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ pkg_ret = {}
+ for dld_result in (
+@@ -3111,7 +3132,7 @@ def list_patches(refresh=False, root=None, **kwargs):
+ salt '*' pkg.list_patches
+ """
+ if refresh:
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ return _get_patches(root=root)
+
+@@ -3205,7 +3226,7 @@ def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs):
+ salt '*' pkg.resolve_capabilities resolve_capabilities=True w3m_ssl
+ """
+ if refresh:
+- refresh_db(root)
++ refresh_db(root, **kwargs)
+
+ ret = list()
+ for pkg in pkgs:
+diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
+index e85c93da3b..f5b6d74b6f 100644
+--- a/tests/unit/modules/test_zypperpkg.py
++++ b/tests/unit/modules/test_zypperpkg.py
+@@ -377,7 +377,12 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ run_out = {"stderr": "", "stdout": "\n".join(ref_out), "retcode": 0}
+
+ zypper_mock = MagicMock(return_value=run_out)
+- call_kwargs = {"output_loglevel": "trace", "python_shell": False, "env": {}}
++ call_kwargs = {
++ "output_loglevel": "trace",
++ "python_shell": False,
++ "env": {},
++ "success_retcodes": [0, 6],
++ }
+ with patch.dict(zypper.__salt__, {"cmd.run_all": zypper_mock}):
+ with patch.object(salt.utils.pkg, "clear_rtag", Mock()):
+ result = zypper.refresh_db()
+@@ -395,6 +400,73 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ zypper_mock.assert_called_with(
+ ["zypper", "--non-interactive", "refresh", "--force"], **call_kwargs
+ )
++ zypper.refresh_db(gpgautoimport=True)
++ zypper_mock.assert_called_with(
++ [
++ "zypper",
++ "--non-interactive",
++ "--gpg-auto-import-keys",
++ "refresh",
++ "--force",
++ ],
++ **call_kwargs
++ )
++ zypper.refresh_db(gpgautoimport=True, force=True)
++ zypper_mock.assert_called_with(
++ [
++ "zypper",
++ "--non-interactive",
++ "--gpg-auto-import-keys",
++ "refresh",
++ "--force",
++ ],
++ **call_kwargs
++ )
++ zypper.refresh_db(gpgautoimport=True, force=False)
++ zypper_mock.assert_called_with(
++ [
++ "zypper",
++ "--non-interactive",
++ "--gpg-auto-import-keys",
++ "refresh",
++ ],
++ **call_kwargs
++ )
++ zypper.refresh_db(
++ gpgautoimport=True,
++ refresh=True,
++ repos="mock-repo-name",
++ root=None,
++ url="http://repo.url/some/path",
++ )
++ zypper_mock.assert_called_with(
++ [
++ "zypper",
++ "--non-interactive",
++ "--gpg-auto-import-keys",
++ "refresh",
++ "--force",
++ "mock-repo-name",
++ ],
++ **call_kwargs
++ )
++ zypper.refresh_db(
++ gpgautoimport=True,
++ repos="mock-repo-name",
++ root=None,
++ url="http://repo.url/some/path",
++ )
++ zypper_mock.assert_called_with(
++ [
++ "zypper",
++ "--non-interactive",
++ "--gpg-auto-import-keys",
++ "refresh",
++ "--force",
++ "mock-repo-name",
++ ],
++ **call_kwargs
++ )
+
+ def test_info_installed(self):
+ """
+@@ -2082,18 +2154,23 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+
+ url = self.new_repo_config["url"]
+ name = self.new_repo_config["name"]
+- with zypper_patcher:
++ with zypper_patcher, patch.object(zypper, "refresh_db", Mock()) as refreshmock:
+ zypper.mod_repo(name, **{"url": url, "gpgautoimport": True})
+ self.assertEqual(
+ zypper.__zypper__(root=None).xml.call.call_args_list,
+ [
+ call("ar", url, name),
+- call("--gpg-auto-import-keys", "refresh", name),
+ ],
+ )
+ self.assertTrue(
+ zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0
+ )
++ refreshmock.assert_called_once_with(
++ gpgautoimport=True,
++ repos=name,
++ root=None,
++ url="http://repo.url/some/path",
++ )
+
+ def test_repo_noadd_nomod_ref(self):
+ """
+@@ -2112,15 +2189,17 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ "salt.modules.zypperpkg", **self.zypper_patcher_config
+ )
+
+- with zypper_patcher:
++ with zypper_patcher, patch.object(zypper, "refresh_db", Mock()) as refreshmock:
+ zypper.mod_repo(name, **{"url": url, "gpgautoimport": True})
+- self.assertEqual(
+- zypper.__zypper__(root=None).xml.call.call_args_list,
+- [call("--gpg-auto-import-keys", "refresh", name)],
+- )
+ self.assertTrue(
+ zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0
+ )
++ refreshmock.assert_called_once_with(
++ gpgautoimport=True,
++ repos=name,
++ root=None,
++ url="http://repo.url/some/path",
++ )
+
+ def test_repo_add_mod_ref(self):
+ """
+@@ -2133,10 +2212,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ zypper_patcher = patch.multiple(
+ "salt.modules.zypperpkg", **self.zypper_patcher_config
+ )
+-
+ url = self.new_repo_config["url"]
+ name = self.new_repo_config["name"]
+- with zypper_patcher:
++
++ with zypper_patcher, patch.object(zypper, "refresh_db", Mock()) as refreshmock:
+ zypper.mod_repo(
+ name, **{"url": url, "refresh": True, "gpgautoimport": True}
+ )
+@@ -2144,11 +2223,17 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ zypper.__zypper__(root=None).xml.call.call_args_list,
+ [
+ call("ar", url, name),
+- call("--gpg-auto-import-keys", "refresh", name),
+ ],
+ )
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
+- "--gpg-auto-import-keys", "mr", "--refresh", name
++ "mr", "--refresh", name
++ )
++ refreshmock.assert_called_once_with(
++ gpgautoimport=True,
++ refresh=True,
++ repos=name,
++ root=None,
++ url="http://repo.url/some/path",
+ )
+
+ def test_repo_noadd_mod_ref(self):
+@@ -2168,16 +2253,19 @@ Repository 'DUMMY' not found by its alias, number, or URI.
+ "salt.modules.zypperpkg", **self.zypper_patcher_config
+ )
+
+- with zypper_patcher:
++ with zypper_patcher, patch.object(zypper, "refresh_db", Mock()) as refreshmock:
+ zypper.mod_repo(
+ name, **{"url": url, "refresh": True, "gpgautoimport": True}
+ )
+- self.assertEqual(
+- zypper.__zypper__(root=None).xml.call.call_args_list,
+- [call("--gpg-auto-import-keys", "refresh", name)],
+- )
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
+- "--gpg-auto-import-keys", "mr", "--refresh", name
++ "mr", "--refresh", name
++ )
++ refreshmock.assert_called_once_with(
++ gpgautoimport=True,
++ refresh=True,
++ repos=name,
++ root=None,
++ url="http://repo.url/some/path",
+ )
+
+ def test_wildcard_to_query_match_all(self):
+--
+2.39.2
+
+
diff --git a/allow-all-primitive-grain-types-for-autosign_grains-.patch b/allow-all-primitive-grain-types-for-autosign_grains-.patch
new file mode 100644
index 0000000..ee1b7e3
--- /dev/null
+++ b/allow-all-primitive-grain-types-for-autosign_grains-.patch
@@ -0,0 +1,97 @@
+From ae4e1d1cc15b3c510bdd774a1dfeff67c522324a Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Tue, 17 Oct 2023 13:05:00 +0200
+Subject: [PATCH] Allow all primitive grain types for autosign_grains
+ (#607)
+
+* Allow all primitive grain types for autosign_grains
+
+Signed-off-by: Marek Czernek
+
+* blacken daemons/masterapi.py and its test_auto_key
+
+Signed-off-by: Marek Czernek
+
+---------
+
+Signed-off-by: Marek Czernek
+Co-authored-by: Alexander Graul
+---
+ changelog/61416.fixed.md | 1 +
+ changelog/63708.fixed.md | 1 +
+ salt/daemons/masterapi.py | 2 +-
+ .../pytests/unit/daemons/masterapi/test_auto_key.py | 13 +++++++------
+ 4 files changed, 10 insertions(+), 7 deletions(-)
+ create mode 100644 changelog/61416.fixed.md
+ create mode 100644 changelog/63708.fixed.md
+
+diff --git a/changelog/61416.fixed.md b/changelog/61416.fixed.md
+new file mode 100644
+index 0000000000..3203a0a1c6
+--- /dev/null
++++ b/changelog/61416.fixed.md
+@@ -0,0 +1 @@
++Allow all primitive grain types for autosign_grains
+diff --git a/changelog/63708.fixed.md b/changelog/63708.fixed.md
+new file mode 100644
+index 0000000000..3203a0a1c6
+--- /dev/null
++++ b/changelog/63708.fixed.md
+@@ -0,0 +1 @@
++Allow all primitive grain types for autosign_grains
+diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py
+index 3716c63d99..54aca64a76 100644
+--- a/salt/daemons/masterapi.py
++++ b/salt/daemons/masterapi.py
+@@ -366,7 +366,7 @@ class AutoKey:
+ line = salt.utils.stringutils.to_unicode(line).strip()
+ if line.startswith("#"):
+ continue
+- if autosign_grains[grain] == line:
++ if str(autosign_grains[grain]) == line:
+ return True
+ return False
+
+diff --git a/tests/pytests/unit/daemons/masterapi/test_auto_key.py b/tests/pytests/unit/daemons/masterapi/test_auto_key.py
+index b3657b7f1b..54c3f22d2a 100644
+--- a/tests/pytests/unit/daemons/masterapi/test_auto_key.py
++++ b/tests/pytests/unit/daemons/masterapi/test_auto_key.py
+@@ -17,11 +17,11 @@ def gen_permissions(owner="", group="", others=""):
+ """
+ ret = 0
+ for c in owner:
+- ret |= getattr(stat, "S_I{}USR".format(c.upper()), 0)
++ ret |= getattr(stat, f"S_I{c.upper()}USR", 0)
+ for c in group:
+- ret |= getattr(stat, "S_I{}GRP".format(c.upper()), 0)
++ ret |= getattr(stat, f"S_I{c.upper()}GRP", 0)
+ for c in others:
+- ret |= getattr(stat, "S_I{}OTH".format(c.upper()), 0)
++ ret |= getattr(stat, f"S_I{c.upper()}OTH", 0)
+ return ret
+
+
+@@ -256,16 +256,17 @@ def test_check_autosign_grains_no_autosign_grains_dir(auto_key):
+ _test_check_autosign_grains(test_func, auto_key, autosign_grains_dir=None)
+
+
+-def test_check_autosign_grains_accept(auto_key):
++@pytest.mark.parametrize("grain_value", ["test_value", 123, True])
++def test_check_autosign_grains_accept(grain_value, auto_key):
+ """
+ Asserts that autosigning from grains passes when a matching grain value is in an
+ autosign_grain file.
+ """
+
+ def test_func(*args):
+- assert auto_key.check_autosign_grains({"test_grain": "test_value"}) is True
++ assert auto_key.check_autosign_grains({"test_grain": grain_value}) is True
+
+- file_content = "#test_ignore\ntest_value"
++ file_content = f"#test_ignore\n{grain_value}"
+ _test_check_autosign_grains(test_func, auto_key, file_content=file_content)
+
+
+--
+2.42.0
+
diff --git a/allow-kwargs-for-fileserver-roots-update-bsc-1218482.patch b/allow-kwargs-for-fileserver-roots-update-bsc-1218482.patch
new file mode 100644
index 0000000..5b9af1c
--- /dev/null
+++ b/allow-kwargs-for-fileserver-roots-update-bsc-1218482.patch
@@ -0,0 +1,164 @@
+From 8ae54e8a0e12193507f1936f363c3438b4a006ee Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Yeray=20Guti=C3=A9rrez=20Cedr=C3=A9s?=
+
+Date: Tue, 23 Jan 2024 15:33:28 +0000
+Subject: [PATCH] Allow kwargs for fileserver roots update
+ (bsc#1218482) (#618)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* Allow kwargs for fileserver roots update (bsc#1218482)
+
+* Prevent exceptions with fileserver.update when called via state
+
+* Fix wrong logic and enhance tests around fileserver.update
+
+* Remove test which is not longer valid
+
+---------
+
+Co-authored-by: Pablo Suárez Hernández
+---
+ changelog/65819.fixed.md | 1 +
+ salt/fileserver/roots.py | 8 ++--
+ salt/runners/fileserver.py | 6 +++
+ tests/integration/runners/test_fileserver.py | 40 ++++++++++++++++++--
+ tests/pytests/unit/fileserver/test_roots.py | 2 +-
+ 5 files changed, 47 insertions(+), 10 deletions(-)
+ create mode 100644 changelog/65819.fixed.md
+
+diff --git a/changelog/65819.fixed.md b/changelog/65819.fixed.md
+new file mode 100644
+index 0000000000..432f5c791c
+--- /dev/null
++++ b/changelog/65819.fixed.md
+@@ -0,0 +1 @@
++Prevent exceptions with fileserver.update when called via state
+diff --git a/salt/fileserver/roots.py b/salt/fileserver/roots.py
+index 4880cbab9b..a02b597c6f 100644
+--- a/salt/fileserver/roots.py
++++ b/salt/fileserver/roots.py
+@@ -193,9 +193,7 @@ def update():
+ os.makedirs(mtime_map_path_dir)
+ with salt.utils.files.fopen(mtime_map_path, "wb") as fp_:
+ for file_path, mtime in new_mtime_map.items():
+- fp_.write(
+- salt.utils.stringutils.to_bytes("{}:{}\n".format(file_path, mtime))
+- )
++ fp_.write(salt.utils.stringutils.to_bytes(f"{file_path}:{mtime}\n"))
+
+ if __opts__.get("fileserver_events", False):
+ # if there is a change, fire an event
+@@ -326,11 +324,11 @@ def _file_lists(load, form):
+ return []
+ list_cache = os.path.join(
+ list_cachedir,
+- "{}.p".format(salt.utils.files.safe_filename_leaf(actual_saltenv)),
++ f"{salt.utils.files.safe_filename_leaf(actual_saltenv)}.p",
+ )
+ w_lock = os.path.join(
+ list_cachedir,
+- ".{}.w".format(salt.utils.files.safe_filename_leaf(actual_saltenv)),
++ f".{salt.utils.files.safe_filename_leaf(actual_saltenv)}.w",
+ )
+ cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
+ __opts__, form, list_cache, w_lock
+diff --git a/salt/runners/fileserver.py b/salt/runners/fileserver.py
+index d75d7de0cf..1ed05b68ca 100644
+--- a/salt/runners/fileserver.py
++++ b/salt/runners/fileserver.py
+@@ -350,6 +350,12 @@ def update(backend=None, **kwargs):
+ salt-run fileserver.update backend=git remotes=myrepo,yourrepo
+ """
+ fileserver = salt.fileserver.Fileserver(__opts__)
++
++ # Remove possible '__pub_user' in kwargs as it is not expected
++ # on "update" function for the different fileserver backends.
++ if "__pub_user" in kwargs:
++ del kwargs["__pub_user"]
++
+ fileserver.update(back=backend, **kwargs)
+ return True
+
+diff --git a/tests/integration/runners/test_fileserver.py b/tests/integration/runners/test_fileserver.py
+index ae8ab766aa..62f0da0c4a 100644
+--- a/tests/integration/runners/test_fileserver.py
++++ b/tests/integration/runners/test_fileserver.py
+@@ -202,15 +202,31 @@ class FileserverTest(ShellCase):
+ fileserver.update
+ """
+ ret = self.run_run_plus(fun="fileserver.update")
+- self.assertTrue(ret["return"])
++ self.assertTrue(ret["return"] is True)
+
+ # Backend submitted as a string
+ ret = self.run_run_plus(fun="fileserver.update", backend="roots")
+- self.assertTrue(ret["return"])
++ self.assertTrue(ret["return"] is True)
+
+ # Backend submitted as a list
+ ret = self.run_run_plus(fun="fileserver.update", backend=["roots"])
+- self.assertTrue(ret["return"])
++ self.assertTrue(ret["return"] is True)
++
++ # Possible '__pub_user' is removed from kwargs
++ ret = self.run_run_plus(
++ fun="fileserver.update", backend=["roots"], __pub_user="foo"
++ )
++ self.assertTrue(ret["return"] is True)
++
++ # Unknown arguments
++ ret = self.run_run_plus(
++ fun="fileserver.update", backend=["roots"], unknown_arg="foo"
++ )
++ self.assertIn(
++ "Passed invalid arguments: update() got an unexpected keyword argument"
++ " 'unknown_arg'",
++ ret["return"],
++ )
+
+ # Other arguments are passed to backend
+ def mock_gitfs_update(remotes=None):
+@@ -225,7 +241,23 @@ class FileserverTest(ShellCase):
+ ret = self.run_run_plus(
+ fun="fileserver.update", backend="gitfs", remotes="myrepo,yourrepo"
+ )
+- self.assertTrue(ret["return"])
++ self.assertTrue(ret["return"] is True)
++ mock_backend_func.assert_called_once_with(remotes="myrepo,yourrepo")
++
++ # Possible '__pub_user' arguments are removed from kwargs
++ mock_backend_func = create_autospec(mock_gitfs_update)
++ mock_return_value = {
++ "gitfs.envs": None, # This is needed to activate the backend
++ "gitfs.update": mock_backend_func,
++ }
++ with patch("salt.loader.fileserver", MagicMock(return_value=mock_return_value)):
++ ret = self.run_run_plus(
++ fun="fileserver.update",
++ backend="gitfs",
++ remotes="myrepo,yourrepo",
++ __pub_user="foo",
++ )
++ self.assertTrue(ret["return"] is True)
+ mock_backend_func.assert_called_once_with(remotes="myrepo,yourrepo")
+
+ # Unknown arguments are passed to backend
+diff --git a/tests/pytests/unit/fileserver/test_roots.py b/tests/pytests/unit/fileserver/test_roots.py
+index a8a80eea17..96bceb0fd3 100644
+--- a/tests/pytests/unit/fileserver/test_roots.py
++++ b/tests/pytests/unit/fileserver/test_roots.py
+@@ -236,7 +236,7 @@ def test_update_mtime_map():
+ # between Python releases.
+ lines_written = sorted(mtime_map_mock.write_calls())
+ expected = sorted(
+- salt.utils.stringutils.to_bytes("{key}:{val}\n".format(key=key, val=val))
++ salt.utils.stringutils.to_bytes(f"{key}:{val}\n")
+ for key, val in new_mtime_map.items()
+ )
+ assert lines_written == expected, lines_written
+--
+2.43.0
+
+
diff --git a/allow-namedloadercontexts-to-be-returned-from-loader.patch b/allow-namedloadercontexts-to-be-returned-from-loader.patch
new file mode 100644
index 0000000..ca9720c
--- /dev/null
+++ b/allow-namedloadercontexts-to-be-returned-from-loader.patch
@@ -0,0 +1,73 @@
+From 1be3f92ef3bf14e47340e2e075291204b3e75e98 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 25 Sep 2024 14:07:42 +0300
+Subject: [PATCH] Allow NamedLoaderContexts to be returned from loader
+
+It is useful in some cases to return NamedLoaderContexts from loaded
+functions. Instead of choking or requireing implimenters to call the
+context's value() method before being de-scoped, detect when a
+NamedLoaderContext has been returned and return the value from the
+current context.
+
+Co-authored-by: Daniel A. Wozniak
+---
+ salt/loader/lazy.py | 5 ++++-
+ tests/pytests/integration/modules/test_config.py | 8 ++++++++
+ tests/pytests/unit/loader/test_loader.py | 13 +++++++++++++
+ 3 files changed, 25 insertions(+), 1 deletion(-)
+ create mode 100644 tests/pytests/integration/modules/test_config.py
+
+diff --git a/salt/loader/lazy.py b/salt/loader/lazy.py
+index 5de995d446..b7fd97f0e1 100644
+--- a/salt/loader/lazy.py
++++ b/salt/loader/lazy.py
+@@ -1246,7 +1246,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
+ self.parent_loader = current_loader
+ token = salt.loader.context.loader_ctxvar.set(self)
+ try:
+- return _func_or_method(*args, **kwargs)
++ ret = _func_or_method(*args, **kwargs)
++ if isinstance(ret, salt.loader.context.NamedLoaderContext):
++ ret = ret.value()
++ return ret
+ finally:
+ self.parent_loader = None
+ salt.loader.context.loader_ctxvar.reset(token)
+diff --git a/tests/pytests/integration/modules/test_config.py b/tests/pytests/integration/modules/test_config.py
+new file mode 100644
+index 0000000000..afdf470605
+--- /dev/null
++++ b/tests/pytests/integration/modules/test_config.py
+@@ -0,0 +1,8 @@
++import pytest
++
++
++@pytest.mark.slow_test
++def test_config_items(salt_cli, salt_minion):
++ ret = salt_cli.run("config.items", minion_tgt=salt_minion.id)
++ assert ret.returncode == 0
++ assert isinstance(ret.data, dict)
+diff --git a/tests/pytests/unit/loader/test_loader.py b/tests/pytests/unit/loader/test_loader.py
+index 86348749db..aba605f42a 100644
+--- a/tests/pytests/unit/loader/test_loader.py
++++ b/tests/pytests/unit/loader/test_loader.py
+@@ -62,3 +62,16 @@ def test_raw_mod_functions():
+ ret = salt.loader.raw_mod(opts, "grains", "get")
+ for k, v in ret.items():
+ assert isinstance(v, salt.loader.lazy.LoadedFunc)
++
++
++def test_return_named_context_from_loaded_func(tmp_path):
++ opts = {
++ "optimization_order": [0],
++ }
++ contents = """
++ def foobar():
++ return __test__
++ """
++ with pytest.helpers.temp_file("mymod.py", contents, directory=tmp_path):
++ loader = salt.loader.LazyLoader([tmp_path], opts, pack={"__test__": "meh"})
++ assert loader["mymod.foobar"]() == "meh"
+--
+2.46.1
+
diff --git a/allow-vendor-change-option-with-zypper.patch b/allow-vendor-change-option-with-zypper.patch
new file mode 100644
index 0000000..5d33a98
--- /dev/null
+++ b/allow-vendor-change-option-with-zypper.patch
@@ -0,0 +1,841 @@
+From a36d6524e530eca32966f46597c88dbfd4b90e78 Mon Sep 17 00:00:00 2001
+From: Martin Seidl
+Date: Tue, 27 Oct 2020 16:12:29 +0100
+Subject: [PATCH] Allow vendor change option with zypper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix novendorchange option (#284)
+
+* Fixed novendorchange handling in zypperpkg
+
+* refactor handling of novendorchange and fix tests
+
+add patch support for allow vendor change option with zypper
+
+Revert "add patch support for allow vendor change option with zypper"
+
+This reverts commit cee4cc182b4740c912861c712dea7bc44eb70ffb.
+
+Allow vendor change option with zypper (#313)
+
+* add patch support for allow vendor change option with zypper
+
+* adjust unit tests vendor change refactor, dropping cli arg
+
+* Fix pr issues
+
+Co-authored-by: Pablo Suárez Hernández
+
+* Fix unit test for allow vendor change on upgrade
+
+* Add unit test with unsupported zypper version
+
+Co-authored-by: Pablo Suárez Hernández
+
+Move vendor change logic to zypper class (#355)
+
+* move vendor change logic to zypper class
+
+* fix thing in zypperkg
+
+* refactor unit tests
+
+* Fix for syntax error
+
+* Fix mocking issue in unit test
+
+* fix issues with pr
+
+* Fix for zypperpkg unit test after refactor of vendorchangeflags
+
+Co-authored-by: Pablo Suárez Hernández
+
+* fix docs for vendor change options
+
+* Fix doc strings, and clean up tests
+
+Co-authored-by: Jochen Breuer
+Co-authored-by: Pablo Suárez Hernández
+---
+ salt/modules/zypperpkg.py | 105 ++++--
+ tests/unit/modules/test_zypperpkg.py | 532 ++++++++++++++++++++++++++-
+ 2 files changed, 612 insertions(+), 25 deletions(-)
+
+diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
+index 4bb10f445a..2da470bea3 100644
+--- a/salt/modules/zypperpkg.py
++++ b/salt/modules/zypperpkg.py
+@@ -36,6 +36,8 @@ import salt.utils.stringutils
+ import salt.utils.systemd
+ import salt.utils.versions
+ from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
++
++# pylint: disable=import-error,redefined-builtin,no-name-in-module
+ from salt.utils.versions import LooseVersion
+
+ if salt.utils.files.is_fcntl_available():
+@@ -140,6 +142,13 @@ class _Zypper:
+ self.__systemd_scope = False
+ self.__root = None
+
++ # Dist upgrade vendor change support (SLE12+)
++ self.dup_avc = False
++ # Install/Patch/Upgrade vendor change support (SLE15+)
++ self.inst_avc = False
++ # Flag if allow vendor change should be allowed
++ self.avc = False
++
+ # Call status
+ self.__called = False
+
+@@ -184,6 +193,8 @@ class _Zypper:
+ self.__no_raise = True
+ elif item == "refreshable":
+ self.__refresh = True
++ elif item == "allow_vendor_change":
++ return self.__allow_vendor_change
+ elif item == "call":
+ return self.__call
+ else:
+@@ -224,6 +235,33 @@ class _Zypper:
+ def pid(self):
+ return self.__call_result.get("pid", "")
+
++ def __allow_vendor_change(self, allowvendorchange, novendorchange):
++ if allowvendorchange or not novendorchange:
++ self.refresh_zypper_flags()
++ if self.dup_avc or self.inst_avc:
++ log.info("Enabling vendor change")
++ self.avc = True
++ else:
++ log.warning(
++ "Enabling/Disabling vendor changes is not supported on this Zypper version"
++ )
++ return self
++
++ def refresh_zypper_flags(self):
++ try:
++ zypp_version = version("zypper")
++ # zypper version 1.11.34 in SLE12 update supports vendor change for only dist upgrade
++ if version_cmp(zypp_version, "1.11.34") >= 0:
++ # zypper version supports vendor change for dist upgrade
++ self.dup_avc = True
++ # zypper version 1.14.8 in SLE15 update supports vendor change in install/patch/upgrading
++ if version_cmp(zypp_version, "1.14.8") >= 0:
++ self.inst_avc = True
++ else:
++ log.error("Failed to compare Zypper version")
++ except Exception as ex:
++ log.error("Unable to get Zypper version: {}".format(ex))
++
+ def _is_error(self):
+ """
+ Is this is an error code?
+@@ -362,6 +400,15 @@ class _Zypper:
+ if self.__systemd_scope:
+ cmd.extend(["systemd-run", "--scope"])
+ cmd.extend(self.__cmd)
++
++ if self.avc:
++ for i in ["install", "upgrade", "dist-upgrade"]:
++ if i in cmd:
++ if i == "install" and self.inst_avc:
++ cmd.insert(cmd.index(i) + 1, "--allow-vendor-change")
++ elif i in ["upgrade", "dist-upgrade"] and self.dup_avc:
++ cmd.insert(cmd.index(i) + 1, "--allow-vendor-change")
++
+ log.debug("Calling Zypper: %s", " ".join(cmd))
+ self.__call_result = __salt__["cmd.run_all"](cmd, **kwargs)
+ if self._check_result():
+@@ -1490,6 +1537,8 @@ def install(
+ no_recommends=False,
+ root=None,
+ inclusion_detection=False,
++ novendorchange=True,
++ allowvendorchange=False,
+ **kwargs
+ ):
+ """
+@@ -1537,6 +1586,13 @@ def install(
+ skip_verify
+ Skip the GPG verification check (e.g., ``--no-gpg-checks``)
+
++ novendorchange
++ DEPRECATED(use allowvendorchange): If set to True, do not allow vendor changes. Default: True
++
++ allowvendorchange
++ If set to True, vendor change is allowed. Default: False
++ If both allowvendorchange and novendorchange are passed, only allowvendorchange is used.
++
+ version
+ Can be either a version number, or the combination of a comparison
+ operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4').
+@@ -1702,6 +1758,7 @@ def install(
+ cmd_install.append(
+ kwargs.get("resolve_capabilities") and "--capability" or "--name"
+ )
++ # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+
+
+ if not refresh:
+ cmd_install.insert(0, "--no-refresh")
+@@ -1738,6 +1795,7 @@ def install(
+ systemd_scope=systemd_scope,
+ root=root,
+ )
++ .allow_vendor_change(allowvendorchange, novendorchange)
+ .call(*cmd)
+ .splitlines()
+ ):
+@@ -1750,7 +1808,9 @@ def install(
+ while downgrades:
+ cmd = cmd_install + ["--force"] + downgrades[:500]
+ downgrades = downgrades[500:]
+- __zypper__(no_repo_failure=ignore_repo_failure, root=root).call(*cmd)
++ __zypper__(no_repo_failure=ignore_repo_failure, root=root).allow_vendor_change(
++ allowvendorchange, novendorchange
++ ).call(*cmd)
+
+ _clean_cache()
+ new = (
+@@ -1783,7 +1843,8 @@ def upgrade(
+ dryrun=False,
+ dist_upgrade=False,
+ fromrepo=None,
+- novendorchange=False,
++ novendorchange=True,
++ allowvendorchange=False,
+ skip_verify=False,
+ no_recommends=False,
+ root=None,
+@@ -1844,7 +1905,11 @@ def upgrade(
+ Specify a list of package repositories to upgrade from. Default: None
+
+ novendorchange
+- If set to True, no allow vendor changes. Default: False
++ DEPRECATED(use allowvendorchange): If set to True, do not allow vendor changes. Default: True
++
++ allowvendorchange
++ If set to True, vendor change is allowed. Default: False
++ If both allowvendorchange and novendorchange are passed, only allowvendorchange is used.
+
+ skip_verify
+ Skip the GPG verification check (e.g., ``--no-gpg-checks``)
+@@ -1927,28 +1992,18 @@ def upgrade(
+ cmd_update.extend(["--from" if dist_upgrade else "--repo", repo])
+ log.info("Targeting repos: %s", fromrepo)
+
+- if dist_upgrade:
+- if novendorchange:
+- # TODO: Grains validation should be moved to Zypper class
+- if __grains__["osrelease_info"][0] > 11:
+- cmd_update.append("--no-allow-vendor-change")
+- log.info("Disabling vendor changes")
+- else:
+- log.warning(
+- "Disabling vendor changes is not supported on this Zypper version"
+- )
++ if no_recommends:
++ cmd_update.append("--no-recommends")
++ log.info("Disabling recommendations")
+
+- if no_recommends:
+- cmd_update.append("--no-recommends")
+- log.info("Disabling recommendations")
++ if dryrun:
++ # Creates a solver test case for debugging.
++ log.info("Executing debugsolver and performing a dry-run dist-upgrade")
++ __zypper__(systemd_scope=_systemd_scope(), root=root).allow_vendor_change(
++ allowvendorchange, novendorchange
++ ).noraise.call(*cmd_update + ["--debug-solver"])
+
+- if dryrun:
+- # Creates a solver test case for debugging.
+- log.info("Executing debugsolver and performing a dry-run dist-upgrade")
+- __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(
+- *cmd_update + ["--debug-solver"]
+- )
+- else:
++ if not dist_upgrade:
+ if name or pkgs:
+ try:
+ (pkg_params, _) = __salt__["pkg_resource.parse_targets"](
+@@ -1962,7 +2017,9 @@ def upgrade(
+
+ old = list_pkgs(root=root, attr=diff_attr)
+
+- __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update)
++ __zypper__(systemd_scope=_systemd_scope(), root=root).allow_vendor_change(
++ allowvendorchange, novendorchange
++ ).noraise.call(*cmd_update)
+ _clean_cache()
+ new = list_pkgs(root=root, attr=diff_attr)
+ ret = salt.utils.data.compare_dicts(old, new)
+diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
+index 5e4c967520..e85c93da3b 100644
+--- a/tests/unit/modules/test_zypperpkg.py
++++ b/tests/unit/modules/test_zypperpkg.py
+@@ -137,6 +137,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+
+ stdout_xml_snippet = ''
+ sniffer = RunSniffer(stdout=stdout_xml_snippet)
++ zypper.__zypper__._reset()
+ with patch.dict("salt.modules.zypperpkg.__salt__", {"cmd.run_all": sniffer}):
+ self.assertEqual(zypper.__zypper__.call("foo"), stdout_xml_snippet)
+ self.assertEqual(len(sniffer.calls), 1)
+@@ -628,13 +629,495 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ {"vim": "7.4.326-2.62", "fakepkg": ""},
+ )
+
++ def test_upgrade_without_vendor_change(self):
++ """
++ Dist-upgrade without vendor change option.
++ """
++ with patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
++ ):
++ ret = zypper.upgrade(dist_upgrade=True)
++ self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
++ zypper_mock.assert_any_call(
++ "dist-upgrade", "--auto-agree-with-licenses",
++ )
++
++ def test_refresh_zypper_flags(self):
++ zypper.__zypper__._reset()
++ with patch(
++ "salt.modules.zypperpkg.version", MagicMock(return_value="0.5")
++ ), patch.dict(
++ zypper.__salt__, {"lowpkg.version_cmp": MagicMock(side_effect=[-1, -1])}
++ ):
++ zypper.__zypper__.refresh_zypper_flags()
++ assert zypper.__zypper__.inst_avc == False
++ assert zypper.__zypper__.dup_avc == False
++ with patch(
++ "salt.modules.zypperpkg.version", MagicMock(return_value="1.11.34")
++ ), patch.dict(
++ zypper.__salt__, {"lowpkg.version_cmp": MagicMock(side_effect=[0, -1])}
++ ):
++ zypper.__zypper__.refresh_zypper_flags()
++ assert zypper.__zypper__.inst_avc == False
++ assert zypper.__zypper__.dup_avc == True
++ with patch(
++ "salt.modules.zypperpkg.version", MagicMock(return_value="1.14.8")
++ ), patch.dict(
++ zypper.__salt__, {"lowpkg.version_cmp": MagicMock(side_effect=[0, 0])}
++ ):
++ zypper.__zypper__.refresh_zypper_flags()
++ assert zypper.__zypper__.inst_avc == True
++ assert zypper.__zypper__.dup_avc == True
++
++ @patch("salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock())
++ def test_allow_vendor_change_function(self):
++ zypper.__zypper__._reset()
++ zypper.__zypper__.inst_avc = True
++ zypper.__zypper__.dup_avc = True
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(False, False)
++ assert zypper.__zypper__.avc == True
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(True, False)
++ assert zypper.__zypper__.avc == True
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(False, True)
++ assert zypper.__zypper__.avc == False
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(True, True)
++ assert zypper.__zypper__.avc == True
++
++ zypper.__zypper__._reset()
++ zypper.__zypper__.inst_avc = False
++ zypper.__zypper__.dup_avc = True
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(False, False)
++ assert zypper.__zypper__.avc == True
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(True, False)
++ assert zypper.__zypper__.avc == True
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(False, True)
++ assert zypper.__zypper__.avc == False
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(True, True)
++ assert zypper.__zypper__.avc == True
++
++ zypper.__zypper__._reset()
++ zypper.__zypper__.inst_avc = False
++ zypper.__zypper__.dup_avc = False
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(False, False)
++ assert zypper.__zypper__.avc == False
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(True, False)
++ assert zypper.__zypper__.avc == False
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(False, True)
++ assert zypper.__zypper__.avc == False
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.allow_vendor_change(True, True)
++ assert zypper.__zypper__.avc == False
++
++ @patch(
++ "salt.utils.environment.get_module_environment",
++ MagicMock(return_value={"SALT_RUNNING": "1"}),
++ )
++ def test_zypper_call_dist_upgrade_with_avc_true(self):
++ cmd_run_mock = MagicMock(return_value={"retcode": 0, "stdout": None})
++ zypper.__zypper__._reset()
++ with patch.dict(zypper.__salt__, {"cmd.run_all": cmd_run_mock}), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ), patch("salt.modules.zypperpkg.__zypper__._reset", MagicMock()):
++ zypper.__zypper__.dup_avc = True
++ zypper.__zypper__.avc = True
++ zypper.__zypper__.call("dist-upgrade")
++ cmd_run_mock.assert_any_call(
++ [
++ "zypper",
++ "--non-interactive",
++ "--no-refresh",
++ "dist-upgrade",
++ "--allow-vendor-change",
++ ],
++ output_loglevel="trace",
++ python_shell=False,
++ env={"SALT_RUNNING": "1"},
++ )
++
++ @patch(
++ "salt.utils.environment.get_module_environment",
++ MagicMock(return_value={"SALT_RUNNING": "1"}),
++ )
++ def test_zypper_call_dist_upgrade_with_avc_false(self):
++ cmd_run_mock = MagicMock(return_value={"retcode": 0, "stdout": None})
++ zypper.__zypper__._reset()
++ with patch.dict(zypper.__salt__, {"cmd.run_all": cmd_run_mock}), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ), patch("salt.modules.zypperpkg.__zypper__._reset", MagicMock()):
++ zypper.__zypper__.dup_avc = False
++ zypper.__zypper__.avc = False
++ zypper.__zypper__.call("dist-upgrade")
++ cmd_run_mock.assert_any_call(
++ ["zypper", "--non-interactive", "--no-refresh", "dist-upgrade",],
++ output_loglevel="trace",
++ python_shell=False,
++ env={"SALT_RUNNING": "1"},
++ )
++
++ @patch(
++ "salt.utils.environment.get_module_environment",
++ MagicMock(return_value={"SALT_RUNNING": "1"}),
++ )
++ def test_zypper_call_install_with_avc_true(self):
++ cmd_run_mock = MagicMock(return_value={"retcode": 0, "stdout": None})
++ zypper.__zypper__._reset()
++ with patch.dict(zypper.__salt__, {"cmd.run_all": cmd_run_mock}), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ), patch("salt.modules.zypperpkg.__zypper__._reset", MagicMock()):
++ zypper.__zypper__.inst_avc = True
++ zypper.__zypper__.avc = True
++ zypper.__zypper__.call("install")
++ cmd_run_mock.assert_any_call(
++ [
++ "zypper",
++ "--non-interactive",
++ "--no-refresh",
++ "install",
++ "--allow-vendor-change",
++ ],
++ output_loglevel="trace",
++ python_shell=False,
++ env={"SALT_RUNNING": "1"},
++ )
++
++ @patch(
++ "salt.utils.environment.get_module_environment",
++ MagicMock(return_value={"SALT_RUNNING": "1"}),
++ )
++ def test_zypper_call_install_with_avc_false(self):
++ cmd_run_mock = MagicMock(return_value={"retcode": 0, "stdout": None})
++ zypper.__zypper__._reset()
++ with patch.dict(zypper.__salt__, {"cmd.run_all": cmd_run_mock}), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ), patch("salt.modules.zypperpkg.__zypper__._reset", MagicMock()):
++ zypper.__zypper__.inst_avc = False
++ zypper.__zypper__.dup_avc = True
++ zypper.__zypper__.avc = True
++ zypper.__zypper__.call("install")
++ cmd_run_mock.assert_any_call(
++ ["zypper", "--non-interactive", "--no-refresh", "install",],
++ output_loglevel="trace",
++ python_shell=False,
++ env={"SALT_RUNNING": "1"},
++ )
++
++ def test_upgrade_with_novendorchange_true(self):
++ """
++ Dist-upgrade without vendor change option.
++ """
++ zypper.__zypper__._reset()
++ with patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ) as refresh_flags_mock, patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
++ ):
++ ret = zypper.upgrade(dist_upgrade=True, novendorchange=True)
++ refresh_flags_mock.assert_not_called()
++ zypper_mock.assert_any_call(
++ "dist-upgrade", "--auto-agree-with-licenses",
++ )
++
++ def test_upgrade_with_novendorchange_false(self):
++ """
++ Perform dist-upgrade with novendorchange set to False.
++ """
++ zypper.__zypper__._reset()
++ with patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ), patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ zypper.__zypper__.inst_avc = True
++ zypper.__zypper__.dup_avc = True
++ with patch.dict(
++ zypper.__salt__,
++ {
++ "pkg_resource.version": MagicMock(return_value="1.15"),
++ "lowpkg.version_cmp": MagicMock(return_value=1),
++ },
++ ):
++ ret = zypper.upgrade(
++ dist_upgrade=True,
++ dryrun=True,
++ fromrepo=["Dummy", "Dummy2"],
++ novendorchange=False,
++ )
++ assert zypper.__zypper__.avc == True
++
++ def test_upgrade_with_allowvendorchange_true(self):
++ """
++ Perform dist-upgrade with allowvendorchange set to True.
++ """
++ zypper.__zypper__._reset()
++ with patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ), patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ with patch.dict(
++ zypper.__salt__,
++ {
++ "pkg_resource.version": MagicMock(return_value="1.15"),
++ "lowpkg.version_cmp": MagicMock(return_value=1),
++ },
++ ):
++
++ zypper.__zypper__.inst_avc = True
++ zypper.__zypper__.dup_avc = True
++ ret = zypper.upgrade(
++ dist_upgrade=True,
++ dryrun=True,
++ fromrepo=["Dummy", "Dummy2"],
++ allowvendorchange=True,
++ )
++ assert zypper.__zypper__.avc == True
++
++ def test_upgrade_with_allowvendorchange_false(self):
++ """
++ Perform dist-upgrade with allowvendorchange set to False.
++ """
++ zypper.__zypper__._reset()
++ with patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ), patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ with patch.dict(
++ zypper.__salt__,
++ {
++ "pkg_resource.version": MagicMock(return_value="1.15"),
++ "lowpkg.version_cmp": MagicMock(return_value=1),
++ },
++ ):
++
++ zypper.__zypper__.inst_avc = True
++ zypper.__zypper__.dup_avc = True
++ ret = zypper.upgrade(
++ dist_upgrade=True,
++ dryrun=True,
++ fromrepo=["Dummy", "Dummy2"],
++ allowvendorchange=False,
++ )
++ assert zypper.__zypper__.avc == False
++
++ def test_upgrade_old_zypper(self):
++ zypper.__zypper__._reset()
++ with patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg.__zypper__.refresh_zypper_flags", MagicMock()
++ ) as refresh_flags_mock, patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ with patch.dict(
++ zypper.__salt__,
++ {
++ "pkg_resource.version": MagicMock(return_value="1.11"),
++ "lowpkg.version_cmp": MagicMock(return_value=-1),
++ },
++ ):
++ zypper.__zypper__.inst_avc = False
++ zypper.__zypper__.dup_avc = False
++ ret = zypper.upgrade(
++ dist_upgrade=True,
++ dryrun=True,
++ fromrepo=["Dummy", "Dummy2"],
++ novendorchange=False,
++ )
++ zypper.__zypper__.avc = False
++
++ def test_upgrade_success(self):
++ """
++ Test system upgrade and dist-upgrade success.
++
++ :return:
++ """
++ with patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ with patch(
++ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
++ ) as zypper_mock:
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
++ ):
++ ret = zypper.upgrade()
++ self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
++ zypper_mock.assert_any_call("update", "--auto-agree-with-licenses")
++
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(
++ side_effect=[
++ {"kernel-default": "1.1"},
++ {"kernel-default": "1.1,1.2"},
++ ]
++ ),
++ ):
++ ret = zypper.upgrade()
++ self.assertDictEqual(
++ ret, {"kernel-default": {"old": "1.1", "new": "1.1,1.2"}}
++ )
++ zypper_mock.assert_any_call("update", "--auto-agree-with-licenses")
++
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1,1.2"}]),
++ ):
++ ret = zypper.upgrade()
++ self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.1,1.2"}})
++ zypper_mock.assert_any_call("update", "--auto-agree-with-licenses")
++
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ ret = zypper.upgrade(dist_upgrade=True, dryrun=True)
++ zypper_mock.assert_any_call(
++ "dist-upgrade", "--auto-agree-with-licenses", "--dry-run"
++ )
++ zypper_mock.assert_any_call(
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--dry-run",
++ "--debug-solver",
++ )
++
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ ret = zypper.upgrade(
++ dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False
++ )
++ zypper_mock.assert_any_call(
++ "update",
++ "--auto-agree-with-licenses",
++ "--repo",
++ "Dummy",
++ "--repo",
++ "Dummy2",
++ )
++
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ ret = zypper.upgrade(
++ dist_upgrade=True,
++ dryrun=True,
++ fromrepo=["Dummy", "Dummy2"],
++ novendorchange=True,
++ )
++ zypper_mock.assert_any_call(
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--dry-run",
++ "--from",
++ "Dummy",
++ "--from",
++ "Dummy2",
++ )
++ zypper_mock.assert_any_call(
++ "dist-upgrade",
++ "--auto-agree-with-licenses",
++ "--dry-run",
++ "--from",
++ "Dummy",
++ "--from",
++ "Dummy2",
++ "--debug-solver",
++ )
++
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ ret = zypper.upgrade(
++ dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False
++ )
++ zypper_mock.assert_any_call(
++ "update",
++ "--auto-agree-with-licenses",
++ "--repo",
++ "Dummy",
++ "--repo",
++ "Dummy2",
++ )
++
+ def test_upgrade_kernel(self):
+ """
+ Test kernel package upgrade success.
+
+ :return:
+ """
+- with patch.dict(zypper.__grains__, {"osrelease_info": [12, 1]}), patch(
++ with patch(
+ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
+ ), patch(
+ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
+@@ -672,6 +1155,53 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+ },
+ )
+
++ def test_upgrade_failure(self):
++ """
++ Test system upgrade failure.
++
++ :return:
++ """
++ zypper_out = """
++Loading repository data...
++Reading installed packages...
++Computing distribution upgrade...
++Use 'zypper repos' to get the list of defined repositories.
++Repository 'DUMMY' not found by its alias, number, or URI.
++"""
++
++ class FailingZypperDummy:
++ def __init__(self):
++ self.stdout = zypper_out
++ self.stderr = ""
++ self.pid = 1234
++ self.exit_code = 555
++ self.noraise = MagicMock()
++ self.allow_vendor_change = self
++ self.SUCCESS_EXIT_CODES = [0]
++
++ def __call__(self, *args, **kwargs):
++ return self
++
++ with patch(
++ "salt.modules.zypperpkg.__zypper__", FailingZypperDummy()
++ ) as zypper_mock, patch(
++ "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
++ ), patch(
++ "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
++ ):
++ zypper_mock.noraise.call = MagicMock()
++ with patch(
++ "salt.modules.zypperpkg.list_pkgs",
++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
++ ):
++ with self.assertRaises(CommandExecutionError) as cmd_exc:
++ ret = zypper.upgrade(dist_upgrade=True, fromrepo=["DUMMY"])
++ self.assertEqual(cmd_exc.exception.info["changes"], {})
++ self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out)
++ zypper_mock.noraise.call.assert_called_with(
++ "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY",
++ )
++
+ def test_upgrade_available(self):
+ """
+ Test whether or not an upgrade is available for a given package.
+--
+2.39.2
+
+
diff --git a/async-batch-implementation.patch b/async-batch-implementation.patch
new file mode 100644
index 0000000..303127a
--- /dev/null
+++ b/async-batch-implementation.patch
@@ -0,0 +1,1149 @@
+From 76e69d9ef729365db1b0f1798f5f8a038d2065fc Mon Sep 17 00:00:00 2001
+From: Mihai Dinca
+Date: Fri, 16 Nov 2018 17:05:29 +0100
+Subject: [PATCH] Async batch implementation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add find_job checks
+
+Check if should close on all events
+
+Make batch_delay a request parameter
+
+Allow multiple event handlers
+
+Use config value for gather_job_timeout when not in payload
+
+Add async batch unittests
+
+Allow metadata to pass
+
+Pass metadata only to batch jobs
+
+Add the metadata to the start/done events
+
+Pass only metadata not all **kwargs
+
+Add separate batch presence_ping timeout
+
+Fix async batch race conditions
+
+Close batching when there is no next batch
+
+Add 'batch_presence_ping_timeout' and 'batch_presence_ping_gather_job_timeout' parameters for synchronous batching
+
+Fix async-batch multiple done events
+
+Fix memory leak produced by batch async find_jobs mechanism (bsc#1140912)
+
+Multiple fixes:
+
+- use different JIDs per find_job
+- fix bug in detection of find_job returns
+- fix timeout passed from request payload
+- better cleanup at the end of batching
+
+Co-authored-by: Pablo Suárez Hernández
+
+Improve batch_async to release consumed memory (bsc#1140912)
+
+Use current IOLoop for the LocalClient instance of BatchAsync (bsc#1137642)
+
+Fix failing unit tests for batch async
+
+Remove unnecessary yield causing BadYieldError (bsc#1154620)
+
+Fixing StreamClosed issue
+
+Fix batch_async obsolete test
+
+batch_async: avoid using fnmatch to match event (#217)
+
+Batch Async: Catch exceptions and safety unregister and close instances
+
+Fix unit tests for batch async after refactor
+
+Changed imports to vendored Tornado
+
+Async batch implementation fix (#320)
+
+Remove deprecated usage of NO_MOCK and NO_MOCK_REASON
+---
+ salt/auth/__init__.py | 2 +
+ salt/cli/batch.py | 109 ++++--
+ salt/cli/batch_async.py | 315 +++++++++++++++++
+ salt/cli/support/profiles/__init__.py | 5 +-
+ salt/client/__init__.py | 45 +--
+ salt/master.py | 20 ++
+ salt/transport/ipc.py | 9 +-
+ salt/utils/event.py | 8 +-
+ tests/pytests/unit/cli/test_batch_async.py | 386 +++++++++++++++++++++
+ 9 files changed, 841 insertions(+), 58 deletions(-)
+ create mode 100644 salt/cli/batch_async.py
+ create mode 100644 tests/pytests/unit/cli/test_batch_async.py
+
+diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py
+index 331baab211..b0f0c0ac6c 100644
+--- a/salt/auth/__init__.py
++++ b/salt/auth/__init__.py
+@@ -49,6 +49,8 @@ AUTH_INTERNAL_KEYWORDS = frozenset(
+ "print_event",
+ "raw",
+ "yield_pub_data",
++ "batch",
++ "batch_delay",
+ ]
+ )
+
+diff --git a/salt/cli/batch.py b/salt/cli/batch.py
+index 8e1547c61d..fcd3f571d5 100644
+--- a/salt/cli/batch.py
++++ b/salt/cli/batch.py
+@@ -13,9 +13,88 @@ import salt.exceptions
+ import salt.output
+ import salt.utils.stringutils
+
++# pylint: disable=import-error,no-name-in-module,redefined-builtin
++
+ log = logging.getLogger(__name__)
+
+
++def get_bnum(opts, minions, quiet):
++ """
++ Return the active number of minions to maintain
++ """
++ partition = lambda x: float(x) / 100.0 * len(minions)
++ try:
++ if isinstance(opts["batch"], str) and "%" in opts["batch"]:
++ res = partition(float(opts["batch"].strip("%")))
++ if res < 1:
++ return int(math.ceil(res))
++ else:
++ return int(res)
++ else:
++ return int(opts["batch"])
++ except ValueError:
++ if not quiet:
++ salt.utils.stringutils.print_cli(
++ "Invalid batch data sent: {}\nData must be in the "
++ "form of %10, 10% or 3".format(opts["batch"])
++ )
++
++
++def batch_get_opts(
++ tgt, fun, batch, parent_opts, arg=(), tgt_type="glob", ret="", kwarg=None, **kwargs
++):
++ # We need to re-import salt.utils.args here
++ # even though it has already been imported.
++ # when cmd_batch is called via the NetAPI
++ # the module is unavailable.
++ import salt.utils.args
++
++ arg = salt.utils.args.condition_input(arg, kwarg)
++ opts = {
++ "tgt": tgt,
++ "fun": fun,
++ "arg": arg,
++ "tgt_type": tgt_type,
++ "ret": ret,
++ "batch": batch,
++ "failhard": kwargs.get("failhard", parent_opts.get("failhard", False)),
++ "raw": kwargs.get("raw", False),
++ }
++
++ if "timeout" in kwargs:
++ opts["timeout"] = kwargs["timeout"]
++ if "gather_job_timeout" in kwargs:
++ opts["gather_job_timeout"] = kwargs["gather_job_timeout"]
++ if "batch_wait" in kwargs:
++ opts["batch_wait"] = int(kwargs["batch_wait"])
++
++ for key, val in parent_opts.items():
++ if key not in opts:
++ opts[key] = val
++
++ opts["batch_presence_ping_timeout"] = kwargs.get(
++ "batch_presence_ping_timeout", opts["timeout"]
++ )
++ opts["batch_presence_ping_gather_job_timeout"] = kwargs.get(
++ "batch_presence_ping_gather_job_timeout", opts["gather_job_timeout"]
++ )
++
++ return opts
++
++
++def batch_get_eauth(kwargs):
++ eauth = {}
++ if "eauth" in kwargs:
++ eauth["eauth"] = kwargs.pop("eauth")
++ if "username" in kwargs:
++ eauth["username"] = kwargs.pop("username")
++ if "password" in kwargs:
++ eauth["password"] = kwargs.pop("password")
++ if "token" in kwargs:
++ eauth["token"] = kwargs.pop("token")
++ return eauth
++
++
+ class Batch:
+ """
+ Manage the execution of batch runs
+@@ -39,6 +118,7 @@ class Batch:
+ self.pub_kwargs = eauth if eauth else {}
+ self.quiet = quiet
+ self.options = _parser
++ self.minions = set()
+ # Passing listen True to local client will prevent it from purging
+ # cahced events while iterating over the batches.
+ self.local = salt.client.get_local_client(opts["conf_file"], listen=True)
+@@ -51,7 +131,7 @@ class Batch:
+ self.opts["tgt"],
+ "test.ping",
+ [],
+- self.opts["timeout"],
++ self.opts.get("batch_presence_ping_timeout", self.opts["timeout"]),
+ ]
+
+ selected_target_option = self.opts.get("selected_target_option", None)
+@@ -62,7 +142,12 @@ class Batch:
+
+ self.pub_kwargs["yield_pub_data"] = True
+ ping_gen = self.local.cmd_iter(
+- *args, gather_job_timeout=self.opts["gather_job_timeout"], **self.pub_kwargs
++ *args,
++ gather_job_timeout=self.opts.get(
++ "batch_presence_ping_gather_job_timeout",
++ self.opts["gather_job_timeout"],
++ ),
++ **self.pub_kwargs
+ )
+
+ # Broadcast to targets
+@@ -87,25 +172,7 @@ class Batch:
+ return (list(fret), ping_gen, nret.difference(fret))
+
+ def get_bnum(self):
+- """
+- Return the active number of minions to maintain
+- """
+- partition = lambda x: float(x) / 100.0 * len(self.minions)
+- try:
+- if isinstance(self.opts["batch"], str) and "%" in self.opts["batch"]:
+- res = partition(float(self.opts["batch"].strip("%")))
+- if res < 1:
+- return int(math.ceil(res))
+- else:
+- return int(res)
+- else:
+- return int(self.opts["batch"])
+- except ValueError:
+- if not self.quiet:
+- salt.utils.stringutils.print_cli(
+- "Invalid batch data sent: {}\nData must be in the "
+- "form of %10, 10% or 3".format(self.opts["batch"])
+- )
++ return get_bnum(self.opts, self.minions, self.quiet)
+
+ def __update_wait(self, wait):
+ now = datetime.now()
+diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
+new file mode 100644
+index 0000000000..09aa85258b
+--- /dev/null
++++ b/salt/cli/batch_async.py
+@@ -0,0 +1,315 @@
++"""
++Execute a job on the targeted minions by using a moving window of fixed size `batch`.
++"""
++
++import gc
++
++# pylint: enable=import-error,no-name-in-module,redefined-builtin
++import logging
++
++import salt.client
++import salt.ext.tornado
++import tornado
++from salt.cli.batch import batch_get_eauth, batch_get_opts, get_bnum
++
++log = logging.getLogger(__name__)
++
++
++class BatchAsync:
++ """
++ Run a job on the targeted minions by using a moving window of fixed size `batch`.
++
++ ``BatchAsync`` is used to execute a job on the targeted minions by keeping
++ the number of concurrent running minions to the size of `batch` parameter.
++
++ The control parameters are:
++ - batch: number/percentage of concurrent running minions
++ - batch_delay: minimum wait time between batches
++ - batch_presence_ping_timeout: time to wait for presence pings before starting the batch
++ - gather_job_timeout: `find_job` timeout
++ - timeout: time to wait before firing a `find_job`
++
++ When the batch stars, a `start` event is fired:
++ - tag: salt/batch//start
++ - data: {
++ "available_minions": self.minions,
++ "down_minions": targeted_minions - presence_ping_minions
++ }
++
++ When the batch ends, an `done` event is fired:
++ - tag: salt/batch//done
++ - data: {
++ "available_minions": self.minions,
++ "down_minions": targeted_minions - presence_ping_minions
++ "done_minions": self.done_minions,
++ "timedout_minions": self.timedout_minions
++ }
++ """
++
++ def __init__(self, parent_opts, jid_gen, clear_load):
++ ioloop = salt.ext.tornado.ioloop.IOLoop.current()
++ self.local = salt.client.get_local_client(
++ parent_opts["conf_file"], io_loop=ioloop
++ )
++ if "gather_job_timeout" in clear_load["kwargs"]:
++ clear_load["gather_job_timeout"] = clear_load["kwargs"].pop(
++ "gather_job_timeout"
++ )
++ else:
++ clear_load["gather_job_timeout"] = self.local.opts["gather_job_timeout"]
++ self.batch_presence_ping_timeout = clear_load["kwargs"].get(
++ "batch_presence_ping_timeout", None
++ )
++ self.batch_delay = clear_load["kwargs"].get("batch_delay", 1)
++ self.opts = batch_get_opts(
++ clear_load.pop("tgt"),
++ clear_load.pop("fun"),
++ clear_load["kwargs"].pop("batch"),
++ self.local.opts,
++ **clear_load
++ )
++ self.eauth = batch_get_eauth(clear_load["kwargs"])
++ self.metadata = clear_load["kwargs"].get("metadata", {})
++ self.minions = set()
++ self.targeted_minions = set()
++ self.timedout_minions = set()
++ self.done_minions = set()
++ self.active = set()
++ self.initialized = False
++ self.jid_gen = jid_gen
++ self.ping_jid = jid_gen()
++ self.batch_jid = jid_gen()
++ self.find_job_jid = jid_gen()
++ self.find_job_returned = set()
++ self.ended = False
++ self.event = salt.utils.event.get_event(
++ "master",
++ self.opts["sock_dir"],
++ self.opts["transport"],
++ opts=self.opts,
++ listen=True,
++ io_loop=ioloop,
++ keep_loop=True,
++ )
++ self.scheduled = False
++ self.patterns = set()
++
++ def __set_event_handler(self):
++ ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid)
++ batch_return_pattern = "salt/job/{}/ret/*".format(self.batch_jid)
++ self.event.subscribe(ping_return_pattern, match_type="glob")
++ self.event.subscribe(batch_return_pattern, match_type="glob")
++ self.patterns = {
++ (ping_return_pattern, "ping_return"),
++ (batch_return_pattern, "batch_run"),
++ }
++ self.event.set_event_handler(self.__event_handler)
++
++ def __event_handler(self, raw):
++ if not self.event:
++ return
++ try:
++ mtag, data = self.event.unpack(raw, self.event.serial)
++ for (pattern, op) in self.patterns:
++ if mtag.startswith(pattern[:-1]):
++ minion = data["id"]
++ if op == "ping_return":
++ self.minions.add(minion)
++ if self.targeted_minions == self.minions:
++ self.event.io_loop.spawn_callback(self.start_batch)
++ elif op == "find_job_return":
++ if data.get("return", None):
++ self.find_job_returned.add(minion)
++ elif op == "batch_run":
++ if minion in self.active:
++ self.active.remove(minion)
++ self.done_minions.add(minion)
++ self.event.io_loop.spawn_callback(self.schedule_next)
++ except Exception as ex:
++ log.error("Exception occured while processing event: {}".format(ex))
++
++ def _get_next(self):
++ to_run = (
++ self.minions.difference(self.done_minions)
++ .difference(self.active)
++ .difference(self.timedout_minions)
++ )
++ next_batch_size = min(
++ len(to_run), # partial batch (all left)
++ self.batch_size - len(self.active), # full batch or available slots
++ )
++ return set(list(to_run)[:next_batch_size])
++
++ def check_find_job(self, batch_minions, jid):
++ if self.event:
++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
++ self.event.unsubscribe(find_job_return_pattern, match_type="glob")
++ self.patterns.remove((find_job_return_pattern, "find_job_return"))
++
++ timedout_minions = batch_minions.difference(
++ self.find_job_returned
++ ).difference(self.done_minions)
++ self.timedout_minions = self.timedout_minions.union(timedout_minions)
++ self.active = self.active.difference(self.timedout_minions)
++ running = batch_minions.difference(self.done_minions).difference(
++ self.timedout_minions
++ )
++
++ if timedout_minions:
++ self.schedule_next()
++
++ if self.event and running:
++ self.find_job_returned = self.find_job_returned.difference(running)
++ self.event.io_loop.spawn_callback(self.find_job, running)
++
++ @salt.ext.tornado.gen.coroutine
++ def find_job(self, minions):
++ if self.event:
++ not_done = minions.difference(self.done_minions).difference(
++ self.timedout_minions
++ )
++ try:
++ if not_done:
++ jid = self.jid_gen()
++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid)
++ self.patterns.add((find_job_return_pattern, "find_job_return"))
++ self.event.subscribe(find_job_return_pattern, match_type="glob")
++ ret = yield self.local.run_job_async(
++ not_done,
++ "saltutil.find_job",
++ [self.batch_jid],
++ "list",
++ gather_job_timeout=self.opts["gather_job_timeout"],
++ jid=jid,
++ **self.eauth
++ )
++ yield salt.ext.tornado.gen.sleep(self.opts["gather_job_timeout"])
++ if self.event:
++ self.event.io_loop.spawn_callback(
++ self.check_find_job, not_done, jid
++ )
++ except Exception as ex:
++ log.error(
++ "Exception occured handling batch async: {}. Aborting execution.".format(
++ ex
++ )
++ )
++ self.close_safe()
++
++ @salt.ext.tornado.gen.coroutine
++ def start(self):
++ if self.event:
++ self.__set_event_handler()
++ ping_return = yield self.local.run_job_async(
++ self.opts["tgt"],
++ "test.ping",
++ [],
++ self.opts.get(
++ "selected_target_option", self.opts.get("tgt_type", "glob")
++ ),
++ gather_job_timeout=self.opts["gather_job_timeout"],
++ jid=self.ping_jid,
++ metadata=self.metadata,
++ **self.eauth
++ )
++ self.targeted_minions = set(ping_return["minions"])
++ # start batching even if not all minions respond to ping
++ yield salt.ext.tornado.gen.sleep(
++ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"]
++ )
++ if self.event:
++ self.event.io_loop.spawn_callback(self.start_batch)
++
++ @salt.ext.tornado.gen.coroutine
++ def start_batch(self):
++ if not self.initialized:
++ self.batch_size = get_bnum(self.opts, self.minions, True)
++ self.initialized = True
++ data = {
++ "available_minions": self.minions,
++ "down_minions": self.targeted_minions.difference(self.minions),
++ "metadata": self.metadata,
++ }
++ ret = self.event.fire_event(
++ data, "salt/batch/{}/start".format(self.batch_jid)
++ )
++ if self.event:
++ self.event.io_loop.spawn_callback(self.run_next)
++
++ @salt.ext.tornado.gen.coroutine
++ def end_batch(self):
++ left = self.minions.symmetric_difference(
++ self.done_minions.union(self.timedout_minions)
++ )
++ if not left and not self.ended:
++ self.ended = True
++ data = {
++ "available_minions": self.minions,
++ "down_minions": self.targeted_minions.difference(self.minions),
++ "done_minions": self.done_minions,
++ "timedout_minions": self.timedout_minions,
++ "metadata": self.metadata,
++ }
++ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid))
++
++ # release to the IOLoop to allow the event to be published
++ # before closing batch async execution
++ yield salt.ext.tornado.gen.sleep(1)
++ self.close_safe()
++
++ def close_safe(self):
++ for (pattern, label) in self.patterns:
++ self.event.unsubscribe(pattern, match_type="glob")
++ self.event.remove_event_handler(self.__event_handler)
++ self.event = None
++ self.local = None
++ self.ioloop = None
++ del self
++ gc.collect()
++
++ @salt.ext.tornado.gen.coroutine
++ def schedule_next(self):
++ if not self.scheduled:
++ self.scheduled = True
++ # call later so that we maybe gather more returns
++ yield salt.ext.tornado.gen.sleep(self.batch_delay)
++ if self.event:
++ self.event.io_loop.spawn_callback(self.run_next)
++
++ @salt.ext.tornado.gen.coroutine
++ def run_next(self):
++ self.scheduled = False
++ next_batch = self._get_next()
++ if next_batch:
++ self.active = self.active.union(next_batch)
++ try:
++ ret = yield self.local.run_job_async(
++ next_batch,
++ self.opts["fun"],
++ self.opts["arg"],
++ "list",
++ raw=self.opts.get("raw", False),
++ ret=self.opts.get("return", ""),
++ gather_job_timeout=self.opts["gather_job_timeout"],
++ jid=self.batch_jid,
++ metadata=self.metadata,
++ )
++
++ yield salt.ext.tornado.gen.sleep(self.opts["timeout"])
++
++ # The batch can be done already at this point, which means no self.event
++ if self.event:
++ self.event.io_loop.spawn_callback(self.find_job, set(next_batch))
++ except Exception as ex:
++ log.error("Error in scheduling next batch: %s. Aborting execution", ex)
++ self.active = self.active.difference(next_batch)
++ self.close_safe()
++ else:
++ yield self.end_batch()
++ gc.collect()
++
++ def __del__(self):
++ self.local = None
++ self.event = None
++ self.ioloop = None
++ gc.collect()
+diff --git a/salt/cli/support/profiles/__init__.py b/salt/cli/support/profiles/__init__.py
+index b86aef30b8..4ae6d07b13 100644
+--- a/salt/cli/support/profiles/__init__.py
++++ b/salt/cli/support/profiles/__init__.py
+@@ -1,4 +1,3 @@
+-# coding=utf-8
+-'''
++"""
+ Profiles for salt-support.
+-'''
++"""
+diff --git a/salt/client/__init__.py b/salt/client/__init__.py
+index 7ce8963b8f..bcda56c9b4 100644
+--- a/salt/client/__init__.py
++++ b/salt/client/__init__.py
+@@ -594,38 +594,20 @@ class LocalClient:
+ import salt.cli.batch
+ import salt.utils.args
+
+- arg = salt.utils.args.condition_input(arg, kwarg)
+- opts = {
+- "tgt": tgt,
+- "fun": fun,
+- "arg": arg,
+- "tgt_type": tgt_type,
+- "ret": ret,
+- "batch": batch,
+- "failhard": kwargs.get("failhard", self.opts.get("failhard", False)),
+- "raw": kwargs.get("raw", False),
+- }
++ opts = salt.cli.batch.batch_get_opts(
++ tgt,
++ fun,
++ batch,
++ self.opts,
++ arg=arg,
++ tgt_type=tgt_type,
++ ret=ret,
++ kwarg=kwarg,
++ **kwargs
++ )
++
++ eauth = salt.cli.batch.batch_get_eauth(kwargs)
+
+- if "timeout" in kwargs:
+- opts["timeout"] = kwargs["timeout"]
+- if "gather_job_timeout" in kwargs:
+- opts["gather_job_timeout"] = kwargs["gather_job_timeout"]
+- if "batch_wait" in kwargs:
+- opts["batch_wait"] = int(kwargs["batch_wait"])
+-
+- eauth = {}
+- if "eauth" in kwargs:
+- eauth["eauth"] = kwargs.pop("eauth")
+- if "username" in kwargs:
+- eauth["username"] = kwargs.pop("username")
+- if "password" in kwargs:
+- eauth["password"] = kwargs.pop("password")
+- if "token" in kwargs:
+- eauth["token"] = kwargs.pop("token")
+-
+- for key, val in self.opts.items():
+- if key not in opts:
+- opts[key] = val
+ batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True)
+ for ret, _ in batch.run():
+ yield ret
+@@ -1826,6 +1808,7 @@ class LocalClient:
+ "key": self.key,
+ "tgt_type": tgt_type,
+ "ret": ret,
++ "timeout": timeout,
+ "jid": jid,
+ }
+
+diff --git a/salt/master.py b/salt/master.py
+index 9d2239bffb..2a526b4f21 100644
+--- a/salt/master.py
++++ b/salt/master.py
+@@ -19,6 +19,7 @@ import time
+ import salt.acl
+ import salt.auth
+ import salt.channel.server
++import salt.cli.batch_async
+ import salt.client
+ import salt.client.ssh.client
+ import salt.crypt
+@@ -2153,6 +2154,22 @@ class ClearFuncs(TransportMethods):
+ return False
+ return self.loadauth.get_tok(clear_load["token"])
+
++ def publish_batch(self, clear_load, minions, missing):
++ batch_load = {}
++ batch_load.update(clear_load)
++ batch = salt.cli.batch_async.BatchAsync(
++ self.local.opts,
++ functools.partial(self._prep_jid, clear_load, {}),
++ batch_load,
++ )
++ ioloop = salt.ext.tornado.ioloop.IOLoop.current()
++ ioloop.add_callback(batch.start)
++
++ return {
++ "enc": "clear",
++ "load": {"jid": batch.batch_jid, "minions": minions, "missing": missing},
++ }
++
+ def publish(self, clear_load):
+ """
+ This method sends out publications to the minions, it can only be used
+@@ -2297,6 +2314,9 @@ class ClearFuncs(TransportMethods):
+ ),
+ },
+ }
++ if extra.get("batch", None):
++ return self.publish_batch(clear_load, minions, missing)
++
+ jid = self._prep_jid(clear_load, extra)
+ if jid is None:
+ return {"enc": "clear", "load": {"error": "Master failed to assign jid"}}
+diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py
+index ca13a498e3..3a3f0c7a5f 100644
+--- a/salt/transport/ipc.py
++++ b/salt/transport/ipc.py
+@@ -659,6 +659,7 @@ class IPCMessageSubscriber(IPCClient):
+ self._read_stream_future = None
+ self._saved_data = []
+ self._read_in_progress = Lock()
++ self.callbacks = set()
+
+ @salt.ext.tornado.gen.coroutine
+ def _read(self, timeout, callback=None):
+@@ -764,8 +765,12 @@ class IPCMessageSubscriber(IPCClient):
+ return self._saved_data.pop(0)
+ return self.io_loop.run_sync(lambda: self._read(timeout))
+
++ def __run_callbacks(self, raw):
++ for callback in self.callbacks:
++ self.io_loop.spawn_callback(callback, raw)
++
+ @salt.ext.tornado.gen.coroutine
+- def read_async(self, callback):
++ def read_async(self):
+ """
+ Asynchronously read messages and invoke a callback when they are ready.
+
+@@ -783,7 +788,7 @@ class IPCMessageSubscriber(IPCClient):
+ except Exception as exc: # pylint: disable=broad-except
+ log.error("Exception occurred while Subscriber connecting: %s", exc)
+ yield salt.ext.tornado.gen.sleep(1)
+- yield self._read(None, callback)
++ yield self._read(None, self.__run_callbacks)
+
+ def close(self):
+ """
+diff --git a/salt/utils/event.py b/salt/utils/event.py
+index a07ad513b1..869e12a140 100644
+--- a/salt/utils/event.py
++++ b/salt/utils/event.py
+@@ -946,6 +946,10 @@ class SaltEvent:
+ # Minion fired a bad retcode, fire an event
+ self._fire_ret_load_specific_fun(load)
+
++ def remove_event_handler(self, event_handler):
++ if event_handler in self.subscriber.callbacks:
++ self.subscriber.callbacks.remove(event_handler)
++
+ def set_event_handler(self, event_handler):
+ """
+ Invoke the event_handler callback each time an event arrives.
+@@ -954,8 +958,10 @@ class SaltEvent:
+
+ if not self.cpub:
+ self.connect_pub()
++
++ self.subscriber.callbacks.add(event_handler)
+ # This will handle reconnects
+- return self.subscriber.read_async(event_handler)
++ return self.subscriber.read_async()
+
+ # pylint: disable=W1701
+ def __del__(self):
+diff --git a/tests/pytests/unit/cli/test_batch_async.py b/tests/pytests/unit/cli/test_batch_async.py
+new file mode 100644
+index 0000000000..c0b708de76
+--- /dev/null
++++ b/tests/pytests/unit/cli/test_batch_async.py
+@@ -0,0 +1,386 @@
++import salt.ext.tornado
++from salt.cli.batch_async import BatchAsync
++from salt.ext.tornado.testing import AsyncTestCase
++from tests.support.mock import MagicMock, patch
++from tests.support.unit import TestCase, skipIf
++
++
++class AsyncBatchTestCase(AsyncTestCase, TestCase):
++ def setUp(self):
++ self.io_loop = self.get_new_ioloop()
++ opts = {
++ "batch": "1",
++ "conf_file": {},
++ "tgt": "*",
++ "timeout": 5,
++ "gather_job_timeout": 5,
++ "batch_presence_ping_timeout": 1,
++ "transport": None,
++ "sock_dir": "",
++ }
++
++ with patch("salt.client.get_local_client", MagicMock(return_value=MagicMock())):
++ with patch(
++ "salt.cli.batch_async.batch_get_opts", MagicMock(return_value=opts)
++ ):
++ self.batch = BatchAsync(
++ opts,
++ MagicMock(side_effect=["1234", "1235", "1236"]),
++ {
++ "tgt": "",
++ "fun": "",
++ "kwargs": {"batch": "", "batch_presence_ping_timeout": 1},
++ },
++ )
++
++ def test_ping_jid(self):
++ self.assertEqual(self.batch.ping_jid, "1234")
++
++ def test_batch_jid(self):
++ self.assertEqual(self.batch.batch_jid, "1235")
++
++ def test_find_job_jid(self):
++ self.assertEqual(self.batch.find_job_jid, "1236")
++
++ def test_batch_size(self):
++ """
++ Tests passing batch value as a number
++ """
++ self.batch.opts = {"batch": "2", "timeout": 5}
++ self.batch.minions = {"foo", "bar"}
++ self.batch.start_batch()
++ self.assertEqual(self.batch.batch_size, 2)
++
++ @salt.ext.tornado.testing.gen_test
++ def test_batch_start_on_batch_presence_ping_timeout(self):
++ self.batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({"minions": ["foo", "bar"]})
++ self.batch.local.run_job_async.return_value = future
++ ret = self.batch.start()
++ # assert start_batch is called later with batch_presence_ping_timeout as param
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.start_batch,),
++ )
++ # assert test.ping called
++ self.assertEqual(
++ self.batch.local.run_job_async.call_args[0], ("*", "test.ping", [], "glob")
++ )
++ # assert targeted_minions == all minions matched by tgt
++ self.assertEqual(self.batch.targeted_minions, {"foo", "bar"})
++
++ @salt.ext.tornado.testing.gen_test
++ def test_batch_start_on_gather_job_timeout(self):
++ self.batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({"minions": ["foo", "bar"]})
++ self.batch.local.run_job_async.return_value = future
++ self.batch.batch_presence_ping_timeout = None
++ ret = self.batch.start()
++ # assert start_batch is called later with gather_job_timeout as param
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.start_batch,),
++ )
++
++ def test_batch_fire_start_event(self):
++ self.batch.minions = {"foo", "bar"}
++ self.batch.opts = {"batch": "2", "timeout": 5}
++ self.batch.event = MagicMock()
++ self.batch.metadata = {"mykey": "myvalue"}
++ self.batch.start_batch()
++ self.assertEqual(
++ self.batch.event.fire_event.call_args[0],
++ (
++ {
++ "available_minions": {"foo", "bar"},
++ "down_minions": set(),
++ "metadata": self.batch.metadata,
++ },
++ "salt/batch/1235/start",
++ ),
++ )
++
++ @salt.ext.tornado.testing.gen_test
++ def test_start_batch_calls_next(self):
++ self.batch.run_next = MagicMock(return_value=MagicMock())
++ self.batch.event = MagicMock()
++ self.batch.start_batch()
++ self.assertEqual(self.batch.initialized, True)
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0], (self.batch.run_next,)
++ )
++
++ def test_batch_fire_done_event(self):
++ self.batch.targeted_minions = {"foo", "baz", "bar"}
++ self.batch.minions = {"foo", "bar"}
++ self.batch.done_minions = {"foo"}
++ self.batch.timedout_minions = {"bar"}
++ self.batch.event = MagicMock()
++ self.batch.metadata = {"mykey": "myvalue"}
++ old_event = self.batch.event
++ self.batch.end_batch()
++ self.assertEqual(
++ old_event.fire_event.call_args[0],
++ (
++ {
++ "available_minions": {"foo", "bar"},
++ "done_minions": self.batch.done_minions,
++ "down_minions": {"baz"},
++ "timedout_minions": self.batch.timedout_minions,
++ "metadata": self.batch.metadata,
++ },
++ "salt/batch/1235/done",
++ ),
++ )
++
++ def test_batch__del__(self):
++ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
++ event = MagicMock()
++ batch.event = event
++ batch.__del__()
++ self.assertEqual(batch.local, None)
++ self.assertEqual(batch.event, None)
++ self.assertEqual(batch.ioloop, None)
++
++ def test_batch_close_safe(self):
++ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
++ event = MagicMock()
++ batch.event = event
++ batch.patterns = {
++ ("salt/job/1234/ret/*", "find_job_return"),
++ ("salt/job/4321/ret/*", "find_job_return"),
++ }
++ batch.close_safe()
++ self.assertEqual(batch.local, None)
++ self.assertEqual(batch.event, None)
++ self.assertEqual(batch.ioloop, None)
++ self.assertEqual(len(event.unsubscribe.mock_calls), 2)
++ self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
++
++ @salt.ext.tornado.testing.gen_test
++ def test_batch_next(self):
++ self.batch.event = MagicMock()
++ self.batch.opts["fun"] = "my.fun"
++ self.batch.opts["arg"] = []
++ self.batch._get_next = MagicMock(return_value={"foo", "bar"})
++ self.batch.batch_size = 2
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({"minions": ["foo", "bar"]})
++ self.batch.local.run_job_async.return_value = future
++ self.batch.run_next()
++ self.assertEqual(
++ self.batch.local.run_job_async.call_args[0],
++ ({"foo", "bar"}, "my.fun", [], "list"),
++ )
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.find_job, {"foo", "bar"}),
++ )
++ self.assertEqual(self.batch.active, {"bar", "foo"})
++
++ def test_next_batch(self):
++ self.batch.minions = {"foo", "bar"}
++ self.batch.batch_size = 2
++ self.assertEqual(self.batch._get_next(), {"foo", "bar"})
++
++ def test_next_batch_one_done(self):
++ self.batch.minions = {"foo", "bar"}
++ self.batch.done_minions = {"bar"}
++ self.batch.batch_size = 2
++ self.assertEqual(self.batch._get_next(), {"foo"})
++
++ def test_next_batch_one_done_one_active(self):
++ self.batch.minions = {"foo", "bar", "baz"}
++ self.batch.done_minions = {"bar"}
++ self.batch.active = {"baz"}
++ self.batch.batch_size = 2
++ self.assertEqual(self.batch._get_next(), {"foo"})
++
++ def test_next_batch_one_done_one_active_one_timedout(self):
++ self.batch.minions = {"foo", "bar", "baz", "faz"}
++ self.batch.done_minions = {"bar"}
++ self.batch.active = {"baz"}
++ self.batch.timedout_minions = {"faz"}
++ self.batch.batch_size = 2
++ self.assertEqual(self.batch._get_next(), {"foo"})
++
++ def test_next_batch_bigger_size(self):
++ self.batch.minions = {"foo", "bar"}
++ self.batch.batch_size = 3
++ self.assertEqual(self.batch._get_next(), {"foo", "bar"})
++
++ def test_next_batch_all_done(self):
++ self.batch.minions = {"foo", "bar"}
++ self.batch.done_minions = {"foo", "bar"}
++ self.batch.batch_size = 2
++ self.assertEqual(self.batch._get_next(), set())
++
++ def test_next_batch_all_active(self):
++ self.batch.minions = {"foo", "bar"}
++ self.batch.active = {"foo", "bar"}
++ self.batch.batch_size = 2
++ self.assertEqual(self.batch._get_next(), set())
++
++ def test_next_batch_all_timedout(self):
++ self.batch.minions = {"foo", "bar"}
++ self.batch.timedout_minions = {"foo", "bar"}
++ self.batch.batch_size = 2
++ self.assertEqual(self.batch._get_next(), set())
++
++ def test_batch__event_handler_ping_return(self):
++ self.batch.targeted_minions = {"foo"}
++ self.batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
++ )
++ self.batch.start()
++ self.assertEqual(self.batch.minions, set())
++ self.batch._BatchAsync__event_handler(MagicMock())
++ self.assertEqual(self.batch.minions, {"foo"})
++ self.assertEqual(self.batch.done_minions, set())
++
++ def test_batch__event_handler_call_start_batch_when_all_pings_return(self):
++ self.batch.targeted_minions = {"foo"}
++ self.batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
++ )
++ self.batch.start()
++ self.batch._BatchAsync__event_handler(MagicMock())
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.start_batch,),
++ )
++
++ def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self):
++ self.batch.targeted_minions = {"foo", "bar"}
++ self.batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
++ )
++ self.batch.start()
++ self.batch._BatchAsync__event_handler(MagicMock())
++ self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0)
++
++ def test_batch__event_handler_batch_run_return(self):
++ self.batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1235/ret/foo", {"id": "foo"}))
++ )
++ self.batch.start()
++ self.batch.active = {"foo"}
++ self.batch._BatchAsync__event_handler(MagicMock())
++ self.assertEqual(self.batch.active, set())
++ self.assertEqual(self.batch.done_minions, {"foo"})
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.schedule_next,),
++ )
++
++ def test_batch__event_handler_find_job_return(self):
++ self.batch.event = MagicMock(
++ unpack=MagicMock(
++ return_value=(
++ "salt/job/1236/ret/foo",
++ {"id": "foo", "return": "deadbeaf"},
++ )
++ )
++ )
++ self.batch.start()
++ self.batch.patterns.add(("salt/job/1236/ret/*", "find_job_return"))
++ self.batch._BatchAsync__event_handler(MagicMock())
++ self.assertEqual(self.batch.find_job_returned, {"foo"})
++
++ @salt.ext.tornado.testing.gen_test
++ def test_batch_run_next_end_batch_when_no_next(self):
++ self.batch.end_batch = MagicMock()
++ self.batch._get_next = MagicMock(return_value={})
++ self.batch.run_next()
++ self.assertEqual(len(self.batch.end_batch.mock_calls), 1)
++
++ @salt.ext.tornado.testing.gen_test
++ def test_batch_find_job(self):
++ self.batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({})
++ self.batch.local.run_job_async.return_value = future
++ self.batch.minions = {"foo", "bar"}
++ self.batch.jid_gen = MagicMock(return_value="1234")
++ salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
++ self.batch.find_job({"foo", "bar"})
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.check_find_job, {"foo", "bar"}, "1234"),
++ )
++
++ @salt.ext.tornado.testing.gen_test
++ def test_batch_find_job_with_done_minions(self):
++ self.batch.done_minions = {"bar"}
++ self.batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({})
++ self.batch.local.run_job_async.return_value = future
++ self.batch.minions = {"foo", "bar"}
++ self.batch.jid_gen = MagicMock(return_value="1234")
++ salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
++ self.batch.find_job({"foo", "bar"})
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.check_find_job, {"foo"}, "1234"),
++ )
++
++ def test_batch_check_find_job_did_not_return(self):
++ self.batch.event = MagicMock()
++ self.batch.active = {"foo"}
++ self.batch.find_job_returned = set()
++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ self.batch.check_find_job({"foo"}, jid="1234")
++ self.assertEqual(self.batch.find_job_returned, set())
++ self.assertEqual(self.batch.active, set())
++ self.assertEqual(len(self.batch.event.io_loop.add_callback.mock_calls), 0)
++
++ def test_batch_check_find_job_did_return(self):
++ self.batch.event = MagicMock()
++ self.batch.find_job_returned = {"foo"}
++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ self.batch.check_find_job({"foo"}, jid="1234")
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.find_job, {"foo"}),
++ )
++
++ def test_batch_check_find_job_multiple_states(self):
++ self.batch.event = MagicMock()
++ # currently running minions
++ self.batch.active = {"foo", "bar"}
++
++ # minion is running and find_job returns
++ self.batch.find_job_returned = {"foo"}
++
++ # minion started running but find_job did not return
++ self.batch.timedout_minions = {"faz"}
++
++ # minion finished
++ self.batch.done_minions = {"baz"}
++
++ # both not yet done but only 'foo' responded to find_job
++ not_done = {"foo", "bar"}
++
++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ self.batch.check_find_job(not_done, jid="1234")
++
++ # assert 'bar' removed from active
++ self.assertEqual(self.batch.active, {"foo"})
++
++ # assert 'bar' added to timedout_minions
++ self.assertEqual(self.batch.timedout_minions, {"bar", "faz"})
++
++ # assert 'find_job' schedueled again only for 'foo'
++ self.assertEqual(
++ self.batch.event.io_loop.spawn_callback.call_args[0],
++ (self.batch.find_job, {"foo"}),
++ )
++
++ def test_only_on_run_next_is_scheduled(self):
++ self.batch.event = MagicMock()
++ self.batch.scheduled = True
++ self.batch.schedule_next()
++ self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0)
+--
+2.39.2
+
+
diff --git a/avoid-conflicts-with-dependencies-versions-bsc-12116.patch b/avoid-conflicts-with-dependencies-versions-bsc-12116.patch
new file mode 100644
index 0000000..fd31505
--- /dev/null
+++ b/avoid-conflicts-with-dependencies-versions-bsc-12116.patch
@@ -0,0 +1,47 @@
+From 8e9f2587aea52c1d0a5c07d5f9bb77a23ae4d4a6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 23 May 2023 10:40:02 +0100
+Subject: [PATCH] Avoid conflicts with dependencies versions
+ (bsc#1211612) (#581)
+
+This commit fixes the Salt requirements file that are used to
+generate the "requires.txt" file that is included in Salt egginfo
+in order to be consistent with the installed packages
+of Salt dependencies.
+
+This prevents issues when resolving and validating Salt dependencies
+with "pkg_resources" Python module.
+---
+ requirements/base.txt | 2 +-
+ requirements/zeromq.txt | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/requirements/base.txt b/requirements/base.txt
+index c19d8804a2..437aa01d31 100644
+--- a/requirements/base.txt
++++ b/requirements/base.txt
+@@ -6,7 +6,7 @@ MarkupSafe
+ requests>=1.0.0
+ distro>=1.0.1
+ psutil>=5.0.0
+-packaging>=21.3
++packaging>=17.1
+ looseversion
+ # We need contextvars for salt-ssh
+ contextvars
+diff --git a/requirements/zeromq.txt b/requirements/zeromq.txt
+index 1e9a815c1b..23d1ef25dc 100644
+--- a/requirements/zeromq.txt
++++ b/requirements/zeromq.txt
+@@ -1,5 +1,5 @@
+ -r base.txt
+ -r crypto.txt
+
+-pyzmq>=20.0.0
++pyzmq>=17.1.2
+ pyzmq==25.0.2 ; sys_platform == "win32"
+--
+2.39.2
+
+
diff --git a/avoid-crash-on-wrong-output-of-systemctl-version-bsc.patch b/avoid-crash-on-wrong-output-of-systemctl-version-bsc.patch
new file mode 100644
index 0000000..f726aba
--- /dev/null
+++ b/avoid-crash-on-wrong-output-of-systemctl-version-bsc.patch
@@ -0,0 +1,153 @@
+From b2faa019f0f5aa03b03e6c54c9aa60b7f6aa4f91 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Fri, 30 Aug 2024 14:35:33 +0200
+Subject: [PATCH] Avoid crash on wrong output of systemctl version
+ (bsc#1229539)
+
+* Better handling output of systemctl --version
+
+* Add more cases to test grains.core._systemd
+---
+ salt/grains/core.py | 27 +++++++-
+ tests/pytests/unit/grains/test_core.py | 89 ++++++++++++++++++++++++++
+ 2 files changed, 113 insertions(+), 3 deletions(-)
+
+diff --git a/salt/grains/core.py b/salt/grains/core.py
+index 4454c303fe..98bbd3868e 100644
+--- a/salt/grains/core.py
++++ b/salt/grains/core.py
+@@ -2432,10 +2432,31 @@ def _systemd():
+ """
+ Return the systemd grain
+ """
+- systemd_info = __salt__["cmd.run"]("systemctl --version").splitlines()
++ systemd_version = "UNDEFINED"
++ systemd_features = ""
++ try:
++ systemd_output = __salt__["cmd.run_all"]("systemctl --version")
++ except Exception: # pylint: disable=broad-except
++ log.error("Exception while executing `systemctl --version`", exc_info=True)
++ return {
++ "version": systemd_version,
++ "features": systemd_features,
++ }
++ if systemd_output.get("retcode") == 0:
++ systemd_info = systemd_output.get("stdout", "").splitlines()
++ try:
++ if systemd_info[0].startswith("systemd "):
++ systemd_version = systemd_info[0].split()[1]
++ systemd_features = systemd_info[1]
++ except IndexError:
++ pass
++ if systemd_version == "UNDEFINED" or systemd_features == "":
++ log.error(
++ "Unexpected output returned by `systemctl --version`: %s", systemd_output
++ )
+ return {
+- "version": systemd_info[0].split()[1],
+- "features": systemd_info[1],
++ "version": systemd_version,
++ "features": systemd_features,
+ }
+
+
+diff --git a/tests/pytests/unit/grains/test_core.py b/tests/pytests/unit/grains/test_core.py
+index 36545287b9..b64b8c4bf8 100644
+--- a/tests/pytests/unit/grains/test_core.py
++++ b/tests/pytests/unit/grains/test_core.py
+@@ -3593,3 +3593,92 @@ def test_virtual_set_virtual_ec2():
+
+ assert virtual_grains["virtual"] == "Nitro"
+ assert virtual_grains["virtual_subtype"] == "Amazon EC2"
++
++
++@pytest.mark.parametrize(
++ "systemd_data,expected",
++ (
++ (
++ {
++ "pid": 1234,
++ "retcode": 0,
++ "stdout": "systemd 254 (254.3-1)\n+PAM +AUDIT -SELINUX -APPARMOR -IMA +SMACK "
++ "+SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS "
++ "+FIDO2 +IDN2 -IDN +IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 "
++ "-PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ +ZLIB +ZSTD "
++ "+BPF_FRAMEWORK +XKBCOMMON +UTMP -SYSVINIT default-hierarchy=unified",
++ "stderr": "",
++ },
++ {
++ "version": "254",
++ "features": "+PAM +AUDIT -SELINUX -APPARMOR -IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL "
++ "+ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 -IDN +IPTC +KMOD +LIBCRYPTSETUP "
++ "+LIBFDISK +PCRE2 -PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ "
++ "+ZLIB +ZSTD +BPF_FRAMEWORK +XKBCOMMON +UTMP -SYSVINIT default-hierarchy=unified",
++ },
++ ),
++ (
++ {
++ "pid": 2345,
++ "retcode": 1,
++ "stdout": "",
++ "stderr": "some garbage in the output",
++ },
++ {
++ "version": "UNDEFINED",
++ "features": "",
++ },
++ ),
++ (
++ {
++ "pid": 3456,
++ "retcode": 0,
++ "stdout": "unexpected stdout\none more line",
++ "stderr": "",
++ },
++ {
++ "version": "UNDEFINED",
++ "features": "",
++ },
++ ),
++ (
++ {
++ "pid": 4567,
++ "retcode": 0,
++ "stdout": "",
++ "stderr": "",
++ },
++ {
++ "version": "UNDEFINED",
++ "features": "",
++ },
++ ),
++ (
++ Exception("Some exception on calling `systemctl --version`"),
++ {
++ "version": "UNDEFINED",
++ "features": "",
++ },
++ ),
++ ),
++)
++def test__systemd(systemd_data, expected):
++ """
++ test _systemd
++ """
++
++ def mock_run_all_systemd(_):
++ if isinstance(systemd_data, Exception):
++ raise systemd_data
++ return systemd_data
++
++ with patch.dict(
++ core.__salt__,
++ {
++ "cmd.run_all": mock_run_all_systemd,
++ },
++ ):
++ ret = core._systemd()
++ assert "version" in ret
++ assert "features" in ret
++ assert ret == expected
+--
+2.46.0
+
diff --git a/avoid-excessive-syslogging-by-watchdog-cronjob-58.patch b/avoid-excessive-syslogging-by-watchdog-cronjob-58.patch
new file mode 100644
index 0000000..7536fad
--- /dev/null
+++ b/avoid-excessive-syslogging-by-watchdog-cronjob-58.patch
@@ -0,0 +1,26 @@
+From 4d8c88d6e467c22ea74738743de5be6577f81085 Mon Sep 17 00:00:00 2001
+From: Hubert Mantel
+Date: Mon, 27 Nov 2017 13:55:13 +0100
+Subject: [PATCH] avoid excessive syslogging by watchdog cronjob (#58)
+
+---
+ pkg/old/suse/salt-minion | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pkg/old/suse/salt-minion b/pkg/old/suse/salt-minion
+index 2e418094ed..73a91ebd62 100755
+--- a/pkg/old/suse/salt-minion
++++ b/pkg/old/suse/salt-minion
+@@ -55,7 +55,7 @@ WATCHDOG_CRON="/etc/cron.d/salt-minion"
+
+ set_watchdog() {
+ if [ ! -f $WATCHDOG_CRON ]; then
+- echo -e '* * * * * root /usr/bin/salt-daemon-watcher --with-init\n' > $WATCHDOG_CRON
++ echo -e '-* * * * * root /usr/bin/salt-daemon-watcher --with-init\n' > $WATCHDOG_CRON
+ # Kick the watcher for 1 minute immediately, because cron will wake up only afterwards
+ /usr/bin/salt-daemon-watcher --with-init & disown
+ fi
+--
+2.39.2
+
+
diff --git a/avoid-explicit-reading-of-etc-salt-minion-bsc-122035.patch b/avoid-explicit-reading-of-etc-salt-minion-bsc-122035.patch
new file mode 100644
index 0000000..fba9803
--- /dev/null
+++ b/avoid-explicit-reading-of-etc-salt-minion-bsc-122035.patch
@@ -0,0 +1,27 @@
+From bbdb56932845dceb47332a4c967c13a9a78b88bc Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 25 Sep 2024 14:08:20 +0300
+Subject: [PATCH] Avoid explicit reading of /etc/salt/minion
+ (bsc#1220357)
+
+Co-authored-by: Daniel A. Wozniak
+---
+ salt/utils/azurearm.py | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/salt/utils/azurearm.py b/salt/utils/azurearm.py
+index 276cbb66b3..9ae128273c 100644
+--- a/salt/utils/azurearm.py
++++ b/salt/utils/azurearm.py
+@@ -47,8 +47,6 @@ try:
+ except ImportError:
+ HAS_AZURE = False
+
+-__opts__ = salt.config.minion_config("/etc/salt/minion")
+-__salt__ = salt.loader.minion_mods(__opts__)
+
+ log = logging.getLogger(__name__)
+
+--
+2.46.1
+
diff --git a/bsc-1176024-fix-file-directory-user-and-group-owners.patch b/bsc-1176024-fix-file-directory-user-and-group-owners.patch
new file mode 100644
index 0000000..edd8e9e
--- /dev/null
+++ b/bsc-1176024-fix-file-directory-user-and-group-owners.patch
@@ -0,0 +1,112 @@
+From 2ca37fe7d2a03ad86ed738f2636fe240b9f4467e Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
+Date: Tue, 6 Oct 2020 12:36:41 +0300
+Subject: [PATCH] bsc#1176024: Fix file/directory user and group
+ ownership containing UTF-8 characters (#275)
+
+* Fix check_perm typos of file module
+
+* Fix UTF8 support for user/group ownership operations with file module and state
+
+* Fix UTF8 support for user/group ownership operations with file module and state
+
+Co-authored-by: Victor Zhestkov
+---
+ salt/modules/file.py | 20 ++++++++++----------
+ salt/states/file.py | 12 ++++++++++--
+ 2 files changed, 20 insertions(+), 12 deletions(-)
+
+diff --git a/salt/modules/file.py b/salt/modules/file.py
+index 69d7992f5a..4612d65511 100644
+--- a/salt/modules/file.py
++++ b/salt/modules/file.py
+@@ -245,7 +245,7 @@ def group_to_gid(group):
+ try:
+ if isinstance(group, int):
+ return group
+- return grp.getgrnam(group).gr_gid
++ return grp.getgrnam(salt.utils.stringutils.to_str(group)).gr_gid
+ except KeyError:
+ return ""
+
+@@ -336,7 +336,7 @@ def user_to_uid(user):
+ try:
+ if isinstance(user, int):
+ return user
+- return pwd.getpwnam(user).pw_uid
++ return pwd.getpwnam(salt.utils.stringutils.to_str(user)).pw_uid
+ except KeyError:
+ return ""
+
+@@ -5133,8 +5133,8 @@ def check_perms(
+ salt.utils.platform.is_windows() and not user_to_uid(user) == cur["uid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not user == cur["user"]
+- and not user == cur["uid"]
++ and not salt.utils.stringutils.to_str(user) == cur["user"]
++ and not salt.utils.stringutils.to_str(user) == cur["uid"]
+ ):
+ perms["cuser"] = user
+
+@@ -5143,8 +5143,8 @@ def check_perms(
+ salt.utils.platform.is_windows() and not group_to_gid(group) == cur["gid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not group == cur["group"]
+- and not group == cur["gid"]
++ and not salt.utils.stringutils.to_str(group) == cur["group"]
++ and not salt.utils.stringutils.to_str(group) == cur["gid"]
+ ):
+ perms["cgroup"] = group
+
+@@ -5188,8 +5188,8 @@ def check_perms(
+ salt.utils.platform.is_windows() and not user_to_uid(user) == post["uid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not user == post["user"]
+- and not user == post["uid"]
++ and not salt.utils.stringutils.to_str(user) == post["user"]
++ and not salt.utils.stringutils.to_str(user) == post["uid"]
+ ):
+ if __opts__["test"] is True:
+ ret["changes"]["user"] = user
+@@ -5204,8 +5204,8 @@ def check_perms(
+ salt.utils.platform.is_windows() and not group_to_gid(group) == post["gid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not group == post["group"]
+- and not group == post["gid"]
++ and not salt.utils.stringutils.to_str(group) == post["group"]
++ and not salt.utils.stringutils.to_str(group) == post["gid"]
+ ):
+ if __opts__["test"] is True:
+ ret["changes"]["group"] = group
+diff --git a/salt/states/file.py b/salt/states/file.py
+index 9f32151b8b..024e5e34ce 100644
+--- a/salt/states/file.py
++++ b/salt/states/file.py
+@@ -864,9 +864,17 @@ def _check_dir_meta(name, user, group, mode, follow_symlinks=False):
+ if not stats:
+ changes["directory"] = "new"
+ return changes
+- if user is not None and user != stats["user"] and user != stats.get("uid"):
++ if (
++ user is not None
++ and salt.utils.stringutils.to_str(user) != stats["user"]
++ and user != stats.get("uid")
++ ):
+ changes["user"] = user
+- if group is not None and group != stats["group"] and group != stats.get("gid"):
++ if (
++ group is not None
++ and salt.utils.stringutils.to_str(group) != stats["group"]
++ and group != stats.get("gid")
++ ):
+ changes["group"] = group
+ # Normalize the dir mode
+ smode = salt.utils.files.normalize_mode(stats["mode"])
+--
+2.39.2
+
+
diff --git a/change-the-delimeters-to-prevent-possible-tracebacks.patch b/change-the-delimeters-to-prevent-possible-tracebacks.patch
new file mode 100644
index 0000000..4e50b2b
--- /dev/null
+++ b/change-the-delimeters-to-prevent-possible-tracebacks.patch
@@ -0,0 +1,30 @@
+From b7a554e2dec3351c91c237497fe37cbc30d664bd Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Thu, 1 Sep 2022 14:42:24 +0300
+Subject: [PATCH] Change the delimeters to prevent possible tracebacks on
+ some packages with dpkg_lowpkg
+
+* Use another separator on query to dpkg-query
+
+* Fix the test test_dpkg_lowpkg::test_info
+---
+ salt/modules/dpkg_lowpkg.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py
+index 4d716c8772..78990492cf 100644
+--- a/salt/modules/dpkg_lowpkg.py
++++ b/salt/modules/dpkg_lowpkg.py
+@@ -347,7 +347,7 @@ def _get_pkg_info(*packages, **kwargs):
+ if build_date:
+ pkg_data["build_date"] = build_date
+ pkg_data["build_date_time_t"] = build_date_t
+- pkg_data["description"] = pkg_descr.split(":", 1)[-1]
++ pkg_data["description"] = pkg_descr
+ ret.append(pkg_data)
+
+ return ret
+--
+2.39.2
+
+
diff --git a/control-the-collection-of-lvm-grains-via-config.patch b/control-the-collection-of-lvm-grains-via-config.patch
new file mode 100644
index 0000000..b767488
--- /dev/null
+++ b/control-the-collection-of-lvm-grains-via-config.patch
@@ -0,0 +1,37 @@
+From fcb43735942ca1b796f656d5647e49a93f770bb2 Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 10 Jan 2023 15:04:01 +0100
+Subject: [PATCH] Control the collection of lvm grains via config
+
+lvm grain collection can take a long time on systems with a lot of
+volumes and volume groups. On one server we measured ~3 minutes, which
+is way too long for grains.
+
+This change is backwards-compatible, leaving the lvm grain collection
+enabled by default. Users with a lot of lvm volumes/volume groups can
+disable these grains in the minion config by setting
+
+ enable_lvm_grains: False
+---
+ salt/grains/lvm.py | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/salt/grains/lvm.py b/salt/grains/lvm.py
+index 586b187ddb..f5c406cb44 100644
+--- a/salt/grains/lvm.py
++++ b/salt/grains/lvm.py
+@@ -17,6 +17,10 @@ __salt__ = {
+ log = logging.getLogger(__name__)
+
+
++def __virtual__():
++ return __opts__.get("enable_lvm_grains", True)
++
++
+ def lvm():
+ """
+ Return list of LVM devices
+--
+2.39.2
+
+
diff --git a/debian-info_installed-compatibility-50453.patch b/debian-info_installed-compatibility-50453.patch
new file mode 100644
index 0000000..0703083
--- /dev/null
+++ b/debian-info_installed-compatibility-50453.patch
@@ -0,0 +1,351 @@
+From 2fbc5b580661b094cf79cc5da0860745b72088e4 Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 25 Jan 2022 17:08:57 +0100
+Subject: [PATCH] Debian info_installed compatibility (#50453)
+
+Remove unused variable
+
+Get unit ticks installation time
+
+Pass on unix ticks installation date time
+
+Implement function to figure out package build time
+
+Unify arch attribute
+
+Add 'attr' support.
+
+Use attr parameter in aptpkg
+
+Add 'all_versions' output structure backward compatibility
+
+Fix docstring
+
+Add UT for generic test of function 'info'
+
+Add UT for 'info' function with the parameter 'attr'
+
+Add UT for info_installed's 'attr' param
+
+Fix docstring
+
+Add returned type check
+
+Add UT for info_installed with 'all_versions=True' output structure
+
+Refactor UT for 'owner' function
+
+Refactor UT: move to decorators, add more checks
+
+Schedule TODO for next refactoring of UT 'show' function
+
+Refactor UT: get rid of old assertion way, flatten tests
+
+Refactor UT: move to native assertions, cleanup noise, flatten complexity for better visibility what is tested
+
+Lintfix: too many empty lines
+
+Adjust architecture getter according to the lowpkg info
+
+Fix wrong Git merge: missing function signature
+
+Reintroducing reverted changes
+
+Reintroducing changes from commit e20362f6f053eaa4144583604e6aac3d62838419
+that got partially reverted by this commit:
+https://github.com/openSUSE/salt/commit/d0ef24d113bdaaa29f180031b5da384cffe08c64#diff-820e6ce667fe3afddbc1b9cf1682fdef
+---
+ salt/modules/aptpkg.py | 24 ++++-
+ salt/modules/dpkg_lowpkg.py | 110 ++++++++++++++++++----
+ tests/pytests/unit/modules/test_aptpkg.py | 52 ++++++++++
+ 3 files changed, 167 insertions(+), 19 deletions(-)
+
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index 8e89744b5e..938e37cc9e 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -3440,6 +3440,15 @@ def info_installed(*names, **kwargs):
+
+ .. versionadded:: 2016.11.3
+
++ attr
++ Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
++
++ Valid attributes are:
++ version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
++ build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
++
++ .. versionadded:: Neon
++
+ CLI Example:
+
+ .. code-block:: bash
+@@ -3450,11 +3459,19 @@ def info_installed(*names, **kwargs):
+ """
+ kwargs = salt.utils.args.clean_kwargs(**kwargs)
+ failhard = kwargs.pop("failhard", True)
++ kwargs.pop("errors", None) # Only for compatibility with RPM
++ attr = kwargs.pop("attr", None) # Package attributes to return
++ all_versions = kwargs.pop(
++ "all_versions", False
++ ) # This is for backward compatible structure only
++
+ if kwargs:
+ salt.utils.args.invalid_kwargs(kwargs)
+
+ ret = dict()
+- for pkg_name, pkg_nfo in __salt__["lowpkg.info"](*names, failhard=failhard).items():
++ for pkg_name, pkg_nfo in __salt__["lowpkg.info"](
++ *names, failhard=failhard, attr=attr
++ ).items():
+ t_nfo = dict()
+ if pkg_nfo.get("status", "ii")[1] != "i":
+ continue # return only packages that are really installed
+@@ -3475,7 +3492,10 @@ def info_installed(*names, **kwargs):
+ else:
+ t_nfo[key] = value
+
+- ret[pkg_name] = t_nfo
++ if all_versions:
++ ret.setdefault(pkg_name, []).append(t_nfo)
++ else:
++ ret[pkg_name] = t_nfo
+
+ return ret
+
+diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py
+index eefd852c51..4d716c8772 100644
+--- a/salt/modules/dpkg_lowpkg.py
++++ b/salt/modules/dpkg_lowpkg.py
+@@ -234,6 +234,44 @@ def file_dict(*packages, **kwargs):
+ return {"errors": errors, "packages": ret}
+
+
++def _get_pkg_build_time(name):
++ """
++ Get package build time, if possible.
++
++ :param name:
++ :return:
++ """
++ iso_time = iso_time_t = None
++ changelog_dir = os.path.join("/usr/share/doc", name)
++ if os.path.exists(changelog_dir):
++ for fname in os.listdir(changelog_dir):
++ try:
++ iso_time_t = int(os.path.getmtime(os.path.join(changelog_dir, fname)))
++ iso_time = (
++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z"
++ )
++ break
++ except OSError:
++ pass
++
++ # Packager doesn't care about Debian standards, therefore Plan B: brute-force it.
++ if not iso_time:
++ for pkg_f_path in __salt__["cmd.run"](
++ "dpkg-query -L {}".format(name)
++ ).splitlines():
++ if "changelog" in pkg_f_path.lower() and os.path.exists(pkg_f_path):
++ try:
++ iso_time_t = int(os.path.getmtime(pkg_f_path))
++ iso_time = (
++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z"
++ )
++ break
++ except OSError:
++ pass
++
++ return iso_time, iso_time_t
++
++
+ def _get_pkg_info(*packages, **kwargs):
+ """
+ Return list of package information. If 'packages' parameter is empty,
+@@ -257,7 +295,7 @@ def _get_pkg_info(*packages, **kwargs):
+ cmd = (
+ "dpkg-query -W -f='package:" + bin_var + "\\n"
+ "revision:${binary:Revision}\\n"
+- "architecture:${Architecture}\\n"
++ "arch:${Architecture}\\n"
+ "maintainer:${Maintainer}\\n"
+ "summary:${Summary}\\n"
+ "source:${source:Package}\\n"
+@@ -299,10 +337,17 @@ def _get_pkg_info(*packages, **kwargs):
+ key, value = pkg_info_line.split(":", 1)
+ if value:
+ pkg_data[key] = value
+- install_date = _get_pkg_install_time(pkg_data.get("package"))
+- if install_date:
+- pkg_data["install_date"] = install_date
+- pkg_data["description"] = pkg_descr
++ install_date, install_date_t = _get_pkg_install_time(
++ pkg_data.get("package"), pkg_data.get("arch")
++ )
++ if install_date:
++ pkg_data["install_date"] = install_date
++ pkg_data["install_date_time_t"] = install_date_t # Unix ticks
++ build_date, build_date_t = _get_pkg_build_time(pkg_data.get("package"))
++ if build_date:
++ pkg_data["build_date"] = build_date
++ pkg_data["build_date_time_t"] = build_date_t
++ pkg_data["description"] = pkg_descr.split(":", 1)[-1]
+ ret.append(pkg_data)
+
+ return ret
+@@ -327,24 +372,34 @@ def _get_pkg_license(pkg):
+ return ", ".join(sorted(licenses))
+
+
+-def _get_pkg_install_time(pkg):
++def _get_pkg_install_time(pkg, arch):
+ """
+ Return package install time, based on the /var/lib/dpkg/info/.list
+
+ :return:
+ """
+- iso_time = None
++ iso_time = iso_time_t = None
++ loc_root = "/var/lib/dpkg/info"
+ if pkg is not None:
+- location = "/var/lib/dpkg/info/{}.list".format(pkg)
+- if os.path.exists(location):
+- iso_time = (
+- datetime.datetime.utcfromtimestamp(
+- int(os.path.getmtime(location))
+- ).isoformat()
+- + "Z"
+- )
++ locations = []
++ if arch is not None and arch != "all":
++ locations.append(os.path.join(loc_root, "{}:{}.list".format(pkg, arch)))
+
+- return iso_time
++ locations.append(os.path.join(loc_root, "{}.list".format(pkg)))
++ for location in locations:
++ try:
++ iso_time_t = int(os.path.getmtime(location))
++ iso_time = (
++ datetime.datetime.utcfromtimestamp(iso_time_t).isoformat() + "Z"
++ )
++ break
++ except OSError:
++ pass
++
++ if iso_time is None:
++ log.debug('Unable to get package installation time for package "%s".', pkg)
++
++ return iso_time, iso_time_t
+
+
+ def _get_pkg_ds_avail():
+@@ -394,6 +449,15 @@ def info(*packages, **kwargs):
+
+ .. versionadded:: 2016.11.3
+
++ attr
++ Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
++
++ Valid attributes are:
++ version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
++ build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
++
++ .. versionadded:: Neon
++
+ CLI Example:
+
+ .. code-block:: bash
+@@ -408,6 +472,10 @@ def info(*packages, **kwargs):
+
+ kwargs = salt.utils.args.clean_kwargs(**kwargs)
+ failhard = kwargs.pop("failhard", True)
++ attr = kwargs.pop("attr", None) or None
++ if attr:
++ attr = attr.split(",")
++
+ if kwargs:
+ salt.utils.args.invalid_kwargs(kwargs)
+
+@@ -435,6 +503,14 @@ def info(*packages, **kwargs):
+ lic = _get_pkg_license(pkg["package"])
+ if lic:
+ pkg["license"] = lic
+- ret[pkg["package"]] = pkg
++
++ # Remove keys that aren't in attrs
++ pkg_name = pkg["package"]
++ if attr:
++ for k in list(pkg.keys())[:]:
++ if k not in attr:
++ del pkg[k]
++
++ ret[pkg_name] = pkg
+
+ return ret
+diff --git a/tests/pytests/unit/modules/test_aptpkg.py b/tests/pytests/unit/modules/test_aptpkg.py
+index b69402578a..4226957eeb 100644
+--- a/tests/pytests/unit/modules/test_aptpkg.py
++++ b/tests/pytests/unit/modules/test_aptpkg.py
+@@ -360,6 +360,58 @@ def test_info_installed(lowpkg_info_var):
+ assert len(aptpkg.info_installed()) == 1
+
+
++def test_info_installed_attr(lowpkg_info_var):
++ """
++ Test info_installed 'attr'.
++ This doesn't test 'attr' behaviour per se, since the underlying function is in dpkg.
++ The test should simply not raise exceptions for invalid parameter.
++
++ :return:
++ """
++ expected_pkg = {
++ "url": "http://www.gnu.org/software/wget/",
++ "packager": "Ubuntu Developers ",
++ "name": "wget",
++ "install_date": "2016-08-30T22:20:15Z",
++ "description": "retrieves files from the web",
++ "version": "1.15-1ubuntu1.14.04.2",
++ "architecture": "amd64",
++ "group": "web",
++ "source": "wget",
++ }
++ mock = MagicMock(return_value=lowpkg_info_var)
++ with patch.dict(aptpkg.__salt__, {"lowpkg.info": mock}):
++ ret = aptpkg.info_installed("wget", attr="foo,bar")
++ assert ret["wget"] == expected_pkg
++
++
++def test_info_installed_all_versions(lowpkg_info_var):
++ """
++ Test info_installed 'all_versions'.
++ Since Debian won't return same name packages with the different names,
++ this should just return different structure, backward compatible with
++ the RPM equivalents.
++
++ :return:
++ """
++ expected_pkg = {
++ "url": "http://www.gnu.org/software/wget/",
++ "packager": "Ubuntu Developers ",
++ "name": "wget",
++ "install_date": "2016-08-30T22:20:15Z",
++ "description": "retrieves files from the web",
++ "version": "1.15-1ubuntu1.14.04.2",
++ "architecture": "amd64",
++ "group": "web",
++ "source": "wget",
++ }
++ mock = MagicMock(return_value=lowpkg_info_var)
++ with patch.dict(aptpkg.__salt__, {"lowpkg.info": mock}):
++ ret = aptpkg.info_installed("wget", all_versions=True)
++ assert isinstance(ret, dict)
++ assert ret["wget"] == [expected_pkg]
++
++
+ def test_owner():
+ """
+ Test - Return the name of the package that owns the file.
+--
+2.39.2
+
+
diff --git a/decode-oscap-byte-stream-to-string-bsc-1219001.patch b/decode-oscap-byte-stream-to-string-bsc-1219001.patch
new file mode 100644
index 0000000..3cca99d
--- /dev/null
+++ b/decode-oscap-byte-stream-to-string-bsc-1219001.patch
@@ -0,0 +1,80 @@
+From 45b97042766e15a4336b141b40a03d68156771bc Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Thu, 14 Mar 2024 16:16:02 +0100
+Subject: [PATCH] Decode oscap byte stream to string (bsc#1219001)
+
+---
+ salt/modules/openscap.py | 5 +++--
+ tests/unit/modules/test_openscap.py | 10 +++++-----
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py
+index 216fd89eef..89712ae722 100644
+--- a/salt/modules/openscap.py
++++ b/salt/modules/openscap.py
+@@ -152,10 +152,11 @@ def xccdf_eval(xccdffile, ovalfiles=None, **kwargs):
+ if success:
+ tempdir = tempfile.mkdtemp()
+ proc = Popen(cmd_opts, stdout=PIPE, stderr=PIPE, cwd=tempdir)
+- (stdoutdata, error) = proc.communicate()
++ (_, error) = proc.communicate()
++ error = error.decode('ascii', errors='ignore')
+ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False)
+ if proc.returncode < 0:
+- error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii')
++ error += "\nKilled by signal {}\n".format(proc.returncode)
+ returncode = proc.returncode
+ if success:
+ __salt__["cp.push_dir"](tempdir)
+diff --git a/tests/unit/modules/test_openscap.py b/tests/unit/modules/test_openscap.py
+index 301c1869ec..6fbdfed7cf 100644
+--- a/tests/unit/modules/test_openscap.py
++++ b/tests/unit/modules/test_openscap.py
+@@ -218,7 +218,7 @@ class OpenscapTestCase(TestCase):
+ "salt.modules.openscap.Popen",
+ MagicMock(
+ return_value=Mock(
+- **{"returncode": 0, "communicate.return_value": ("", "")}
++ **{"returncode": 0, "communicate.return_value": (bytes(0), bytes(0))}
+ )
+ ),
+ ):
+@@ -269,7 +269,7 @@ class OpenscapTestCase(TestCase):
+ "salt.modules.openscap.Popen",
+ MagicMock(
+ return_value=Mock(
+- **{"returncode": 0, "communicate.return_value": ("", "")}
++ **{"returncode": 0, "communicate.return_value": (bytes(0), bytes(0))}
+ )
+ ),
+ ):
+@@ -323,7 +323,7 @@ class OpenscapTestCase(TestCase):
+ "salt.modules.openscap.Popen",
+ MagicMock(
+ return_value=Mock(
+- **{"returncode": 2, "communicate.return_value": ("", "some error")}
++ **{"returncode": 2, "communicate.return_value": (bytes(0), bytes("some error", "UTF-8"))}
+ )
+ ),
+ ):
+@@ -374,7 +374,7 @@ class OpenscapTestCase(TestCase):
+ "salt.modules.openscap.Popen",
+ MagicMock(
+ return_value=Mock(
+- **{"returncode": 2, "communicate.return_value": ("", "some error")}
++ **{"returncode": 2, "communicate.return_value": (bytes(0), bytes("some error", "UTF-8"))}
+ )
+ ),
+ ):
+@@ -423,7 +423,7 @@ class OpenscapTestCase(TestCase):
+ return_value=Mock(
+ **{
+ "returncode": 1,
+- "communicate.return_value": ("", "evaluation error"),
++ "communicate.return_value": (bytes(0), bytes("evaluation error", "UTF-8")),
+ }
+ )
+ ),
+--
+2.43.0
+
diff --git a/define-__virtualname__-for-transactional_update-modu.patch b/define-__virtualname__-for-transactional_update-modu.patch
new file mode 100644
index 0000000..b6509b5
--- /dev/null
+++ b/define-__virtualname__-for-transactional_update-modu.patch
@@ -0,0 +1,39 @@
+From f02e97df14e4927efbb5ddd3a2bbc5a650330b9e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Fri, 26 May 2023 16:50:51 +0100
+Subject: [PATCH] Define __virtualname__ for transactional_update module
+ (#582)
+
+This prevent problems with LazyLoader when importing this module,
+which was wrongly exposing functions for this module under "state.*"
+---
+ salt/modules/transactional_update.py | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py
+index 6493966782..658ebccc6b 100644
+--- a/salt/modules/transactional_update.py
++++ b/salt/modules/transactional_update.py
+@@ -285,6 +285,8 @@ from salt.modules.state import _check_queue, _prior_running_states, _wait, runni
+
+ __func_alias__ = {"apply_": "apply"}
+
++__virtualname__ = "transactional_update"
++
+ log = logging.getLogger(__name__)
+
+
+@@ -300,7 +302,7 @@ def __virtual__():
+ _prior_running_states, globals()
+ )
+ running = salt.utils.functools.namespaced_function(running, globals())
+- return True
++ return __virtualname__
+ else:
+ return (False, "Module transactional_update requires a transactional system")
+
+--
+2.39.2
+
+
diff --git a/dereference-symlinks-to-set-proper-__cli-opt-bsc-121.patch b/dereference-symlinks-to-set-proper-__cli-opt-bsc-121.patch
new file mode 100644
index 0000000..55058a2
--- /dev/null
+++ b/dereference-symlinks-to-set-proper-__cli-opt-bsc-121.patch
@@ -0,0 +1,101 @@
+From 9942c488b1e74f2c6f187fcef3556fe53382bb4c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Mon, 13 Nov 2023 15:04:14 +0000
+Subject: [PATCH] Dereference symlinks to set proper __cli opt
+ (bsc#1215963) (#611)
+
+* Dereference symlinks to set proper __cli
+
+* Add changelog entry
+
+* Add unit tests to check path is expanded
+
+---------
+
+Co-authored-by: vzhestkov
+---
+ changelog/65435.fixed.md | 1 +
+ salt/config/__init__.py | 8 ++++++--
+ tests/pytests/unit/config/test_master_config.py | 13 +++++++++++++
+ tests/pytests/unit/config/test_minion_config.py | 13 +++++++++++++
+ 4 files changed, 33 insertions(+), 2 deletions(-)
+ create mode 100644 changelog/65435.fixed.md
+ create mode 100644 tests/pytests/unit/config/test_master_config.py
+ create mode 100644 tests/pytests/unit/config/test_minion_config.py
+
+diff --git a/changelog/65435.fixed.md b/changelog/65435.fixed.md
+new file mode 100644
+index 0000000000..5fa532891d
+--- /dev/null
++++ b/changelog/65435.fixed.md
+@@ -0,0 +1 @@
++Dereference symlinks to set proper __cli opt
+diff --git a/salt/config/__init__.py b/salt/config/__init__.py
+index 43182f3f92..d8258a4dbc 100644
+--- a/salt/config/__init__.py
++++ b/salt/config/__init__.py
+@@ -3747,7 +3747,9 @@ def apply_minion_config(
+ )
+ opts["fileserver_backend"][idx] = new_val
+
+- opts["__cli"] = salt.utils.stringutils.to_unicode(os.path.basename(sys.argv[0]))
++ opts["__cli"] = salt.utils.stringutils.to_unicode(
++ os.path.basename(salt.utils.path.expand(sys.argv[0]))
++ )
+
+ # No ID provided. Will getfqdn save us?
+ using_ip_for_id = False
+@@ -3949,7 +3951,9 @@ def apply_master_config(overrides=None, defaults=None):
+ )
+ opts["keep_acl_in_token"] = True
+
+- opts["__cli"] = salt.utils.stringutils.to_unicode(os.path.basename(sys.argv[0]))
++ opts["__cli"] = salt.utils.stringutils.to_unicode(
++ os.path.basename(salt.utils.path.expand(sys.argv[0]))
++ )
+
+ if "environment" in opts:
+ if opts["saltenv"] is not None:
+diff --git a/tests/pytests/unit/config/test_master_config.py b/tests/pytests/unit/config/test_master_config.py
+new file mode 100644
+index 0000000000..c9de8a7892
+--- /dev/null
++++ b/tests/pytests/unit/config/test_master_config.py
+@@ -0,0 +1,13 @@
++import salt.config
++from tests.support.mock import MagicMock, patch
++
++
++def test___cli_path_is_expanded():
++ defaults = salt.config.DEFAULT_MASTER_OPTS.copy()
++ overrides = {}
++ with patch(
++ "salt.utils.path.expand", MagicMock(return_value="/path/to/testcli")
++ ) as expand_mock:
++ opts = salt.config.apply_master_config(overrides, defaults)
++ assert expand_mock.called
++ assert opts["__cli"] == "testcli"
+diff --git a/tests/pytests/unit/config/test_minion_config.py b/tests/pytests/unit/config/test_minion_config.py
+new file mode 100644
+index 0000000000..34aa84daa7
+--- /dev/null
++++ b/tests/pytests/unit/config/test_minion_config.py
+@@ -0,0 +1,13 @@
++import salt.config
++from tests.support.mock import MagicMock, patch
++
++
++def test___cli_path_is_expanded():
++ defaults = salt.config.DEFAULT_MINION_OPTS.copy()
++ overrides = {}
++ with patch(
++ "salt.utils.path.expand", MagicMock(return_value="/path/to/testcli")
++ ) as expand_mock:
++ opts = salt.config.apply_minion_config(overrides, defaults)
++ assert expand_mock.called
++ assert opts["__cli"] == "testcli"
+--
+2.42.0
+
+
diff --git a/discover-both-.yml-and-.yaml-playbooks-bsc-1211888.patch b/discover-both-.yml-and-.yaml-playbooks-bsc-1211888.patch
new file mode 100644
index 0000000..5aefe29
--- /dev/null
+++ b/discover-both-.yml-and-.yaml-playbooks-bsc-1211888.patch
@@ -0,0 +1,188 @@
+From 05fbd376090c5d7f997c510db0abb62be54d6d40 Mon Sep 17 00:00:00 2001
+From: Johannes Hahn
+Date: Tue, 20 Feb 2024 15:38:08 +0100
+Subject: [PATCH] Discover both *.yml and *.yaml playbooks (bsc#1211888)
+
+Allow for 'playbook_extension' to be either a string or a tuple and
+change the default behavior to discover both.
+---
+ changelog/66048.changed.md | 1 +
+ salt/modules/ansiblegate.py | 46 +++++++++----------
+ .../pytests/unit/modules/test_ansiblegate.py | 3 ++
+ .../example_playbooks/playbook1.yaml | 5 ++
+ 4 files changed, 30 insertions(+), 25 deletions(-)
+ create mode 100644 changelog/66048.changed.md
+ create mode 100644 tests/unit/files/playbooks/example_playbooks/playbook1.yaml
+
+diff --git a/changelog/66048.changed.md b/changelog/66048.changed.md
+new file mode 100644
+index 0000000000..b042e0d313
+--- /dev/null
++++ b/changelog/66048.changed.md
+@@ -0,0 +1 @@
++Ansiblegate discover_playbooks was changed to find playbooks as either *.yml or *.yaml files
+diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py
+index 2f60a7444f..920c374e5a 100644
+--- a/salt/modules/ansiblegate.py
++++ b/salt/modules/ansiblegate.py
+@@ -111,7 +111,7 @@ def __virtual__():
+ if proc.returncode != 0:
+ return (
+ False,
+- "Failed to get the listing of ansible modules:\n{}".format(proc.stderr),
++ f"Failed to get the listing of ansible modules:\n{proc.stderr}",
+ )
+
+ module_funcs = dir(sys.modules[__name__])
+@@ -240,7 +240,7 @@ def call(module, *args, **kwargs):
+ _kwargs = {k: v for (k, v) in kwargs.items() if not k.startswith("__pub")}
+
+ for key, value in _kwargs.items():
+- module_args.append("{}={}".format(key, salt.utils.json.dumps(value)))
++ module_args.append(f"{key}={salt.utils.json.dumps(value)}")
+
+ with NamedTemporaryFile(mode="w") as inventory:
+
+@@ -367,15 +367,15 @@ def playbooks(
+ if diff:
+ command.append("--diff")
+ if isinstance(extra_vars, dict):
+- command.append("--extra-vars='{}'".format(json.dumps(extra_vars)))
++ command.append(f"--extra-vars='{json.dumps(extra_vars)}'")
+ elif isinstance(extra_vars, str) and extra_vars.startswith("@"):
+- command.append("--extra-vars={}".format(extra_vars))
++ command.append(f"--extra-vars={extra_vars}")
+ if flush_cache:
+ command.append("--flush-cache")
+ if inventory:
+- command.append("--inventory={}".format(inventory))
++ command.append(f"--inventory={inventory}")
+ if limit:
+- command.append("--limit={}".format(limit))
++ command.append(f"--limit={limit}")
+ if list_hosts:
+ command.append("--list-hosts")
+ if list_tags:
+@@ -383,25 +383,25 @@ def playbooks(
+ if list_tasks:
+ command.append("--list-tasks")
+ if module_path:
+- command.append("--module-path={}".format(module_path))
++ command.append(f"--module-path={module_path}")
+ if skip_tags:
+- command.append("--skip-tags={}".format(skip_tags))
++ command.append(f"--skip-tags={skip_tags}")
+ if start_at_task:
+- command.append("--start-at-task={}".format(start_at_task))
++ command.append(f"--start-at-task={start_at_task}")
+ if syntax_check:
+ command.append("--syntax-check")
+ if tags:
+- command.append("--tags={}".format(tags))
++ command.append(f"--tags={tags}")
+ if playbook_kwargs:
+ for key, value in playbook_kwargs.items():
+ key = key.replace("_", "-")
+ if value is True:
+- command.append("--{}".format(key))
++ command.append(f"--{key}")
+ elif isinstance(value, str):
+- command.append("--{}={}".format(key, value))
++ command.append(f"--{key}={value}")
+ elif isinstance(value, dict):
+- command.append("--{}={}".format(key, json.dumps(value)))
+- command.append("--forks={}".format(forks))
++ command.append(f"--{key}={json.dumps(value)}")
++ command.append(f"--forks={forks}")
+ cmd_kwargs = {
+ "env": {
+ "ANSIBLE_STDOUT_CALLBACK": "json",
+@@ -502,7 +502,7 @@ def discover_playbooks(
+ List of paths to discover playbooks from.
+
+ :param playbook_extension:
+- File extension of playbooks file to search for. Default: "yml"
++ File extension(s) of playbook files to search for, can be a string or tuple of strings. Default: (".yml", ".yaml")
+
+ :param hosts_filename:
+ Filename of custom playbook inventory to search for. Default: "hosts"
+@@ -533,19 +533,17 @@ def discover_playbooks(
+ )
+
+ if not playbook_extension:
+- playbook_extension = "yml"
++ playbook_extension = (".yml", ".yaml")
+ if not hosts_filename:
+ hosts_filename = "hosts"
+
+ if path:
+ if not os.path.isabs(path):
+ raise CommandExecutionError(
+- "The given path is not an absolute path: {}".format(path)
++ f"The given path is not an absolute path: {path}"
+ )
+ if not os.path.isdir(path):
+- raise CommandExecutionError(
+- "The given path is not a directory: {}".format(path)
+- )
++ raise CommandExecutionError(f"The given path is not a directory: {path}")
+ return {
+ path: _explore_path(path, playbook_extension, hosts_filename, syntax_check)
+ }
+@@ -573,7 +571,7 @@ def _explore_path(path, playbook_extension, hosts_filename, syntax_check):
+ # Check files in the given path
+ for _f in os.listdir(path):
+ _path = os.path.join(path, _f)
+- if os.path.isfile(_path) and _path.endswith("." + playbook_extension):
++ if os.path.isfile(_path) and _path.endswith(playbook_extension):
+ ret[_f] = {"fullpath": _path}
+ # Check for custom inventory file
+ if os.path.isfile(os.path.join(path, hosts_filename)):
+@@ -584,9 +582,7 @@ def _explore_path(path, playbook_extension, hosts_filename, syntax_check):
+ # Check files in the 1st level of subdirectories
+ for _f2 in os.listdir(_path):
+ _path2 = os.path.join(_path, _f2)
+- if os.path.isfile(_path2) and _path2.endswith(
+- "." + playbook_extension
+- ):
++ if os.path.isfile(_path2) and _path2.endswith(playbook_extension):
+ ret[os.path.join(_f, _f2)] = {"fullpath": _path2}
+ # Check for custom inventory file
+ if os.path.isfile(os.path.join(_path, hosts_filename)):
+@@ -599,7 +595,7 @@ def _explore_path(path, playbook_extension, hosts_filename, syntax_check):
+ )
+ except Exception as exc:
+ raise CommandExecutionError(
+- "There was an exception while discovering playbooks: {}".format(exc)
++ f"There was an exception while discovering playbooks: {exc}"
+ )
+
+ # Run syntax check validation
+diff --git a/tests/pytests/unit/modules/test_ansiblegate.py b/tests/pytests/unit/modules/test_ansiblegate.py
+index 6201809c22..272da721bf 100644
+--- a/tests/pytests/unit/modules/test_ansiblegate.py
++++ b/tests/pytests/unit/modules/test_ansiblegate.py
+@@ -198,6 +198,9 @@ def test_ansible_discover_playbooks_single_path():
+ assert ret[playbooks_dir]["playbook1.yml"] == {
+ "fullpath": os.path.join(playbooks_dir, "playbook1.yml")
+ }
++ assert ret[playbooks_dir]["playbook1.yaml"] == {
++ "fullpath": os.path.join(playbooks_dir, "playbook1.yaml")
++ }
+ assert ret[playbooks_dir]["example-playbook2/site.yml"] == {
+ "fullpath": os.path.join(playbooks_dir, "example-playbook2/site.yml"),
+ "custom_inventory": os.path.join(playbooks_dir, "example-playbook2/hosts"),
+diff --git a/tests/unit/files/playbooks/example_playbooks/playbook1.yaml b/tests/unit/files/playbooks/example_playbooks/playbook1.yaml
+new file mode 100644
+index 0000000000..e258a101e1
+--- /dev/null
++++ b/tests/unit/files/playbooks/example_playbooks/playbook1.yaml
+@@ -0,0 +1,5 @@
++---
++- hosts: all
++ gather_facts: false
++ tasks:
++ - ping:
+--
+2.43.1
+
diff --git a/dnfnotify-pkgset-plugin-implementation-3002.2-450.patch b/dnfnotify-pkgset-plugin-implementation-3002.2-450.patch
new file mode 100644
index 0000000..388222d
--- /dev/null
+++ b/dnfnotify-pkgset-plugin-implementation-3002.2-450.patch
@@ -0,0 +1,130 @@
+From c2a35c0c0aac093d0cc35181c1fda0162e22ac4c Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
+Date: Mon, 8 Nov 2021 18:09:53 +0300
+Subject: [PATCH] dnfnotify pkgset plugin implementation - 3002.2 (#450)
+
+* dnfnotify pkgset plugin implementation
+
+* Fix failing check
+
+* Add error reporting if not possible to save cookie
+
+* Try to create dir if not exists
+
+* Show the exception message instead of file name
+
+* Fix isort
+---
+ scripts/suse/dnf/plugins/README.md | 21 +++++++++
+ scripts/suse/dnf/plugins/dnfnotify.conf | 2 +
+ scripts/suse/dnf/plugins/dnfnotify.py | 60 +++++++++++++++++++++++++
+ 3 files changed, 83 insertions(+)
+ create mode 100644 scripts/suse/dnf/plugins/README.md
+ create mode 100644 scripts/suse/dnf/plugins/dnfnotify.conf
+ create mode 100644 scripts/suse/dnf/plugins/dnfnotify.py
+
+diff --git a/scripts/suse/dnf/plugins/README.md b/scripts/suse/dnf/plugins/README.md
+new file mode 100644
+index 0000000000..b19428608e
+--- /dev/null
++++ b/scripts/suse/dnf/plugins/README.md
+@@ -0,0 +1,21 @@
++## What it is
++
++Plugin which provides a notification mechanism to Salt, if DNF is
++used outside of it.
++
++## Installation
++
++Configuration files are going to:
++
++ `/etc/dnf/plugins/[name].conf`
++
++Plugin itself goes to:
++
++ `%{python_sitelib}/dnf-plugins/[name].py`
++ The path to dnf-plugins directory is Python version dependant.
++
++## Permissions
++
++User: root
++Group: root
++Mode: 644
+diff --git a/scripts/suse/dnf/plugins/dnfnotify.conf b/scripts/suse/dnf/plugins/dnfnotify.conf
+new file mode 100644
+index 0000000000..e7002aa3e9
+--- /dev/null
++++ b/scripts/suse/dnf/plugins/dnfnotify.conf
+@@ -0,0 +1,2 @@
++[main]
++enabled = 1
+diff --git a/scripts/suse/dnf/plugins/dnfnotify.py b/scripts/suse/dnf/plugins/dnfnotify.py
+new file mode 100644
+index 0000000000..6e9df85f71
+--- /dev/null
++++ b/scripts/suse/dnf/plugins/dnfnotify.py
+@@ -0,0 +1,60 @@
++import hashlib
++import os
++
++import dnf
++from dnfpluginscore import _, logger
++
++
++class DnfNotifyPlugin(dnf.Plugin):
++ def __init__(self, base, cli):
++ super().__init__(base, cli)
++ self.base = base
++ self.cookie_file = "/var/cache/salt/minion/rpmdb.cookie"
++ if os.path.exists("/var/lib/rpm/rpmdb.sqlite"):
++ self.rpmdb_file = "/var/lib/rpm/rpmdb.sqlite"
++ else:
++ self.rpmdb_file = "/var/lib/rpm/Packages"
++
++ def transaction(self):
++ if "SALT_RUNNING" not in os.environ:
++ try:
++ ck_dir = os.path.dirname(self.cookie_file)
++ if not os.path.exists(ck_dir):
++ os.makedirs(ck_dir)
++ with open(self.cookie_file, "w") as ck_fh:
++ ck_fh.write(
++ "{chksum} {mtime}\n".format(
++ chksum=self._get_checksum(), mtime=self._get_mtime()
++ )
++ )
++ except OSError as e:
++ logger.error(_("Unable to save cookie file: %s"), e)
++
++ def _get_mtime(self):
++ """
++ Get the modified time of the RPM Database.
++
++ Returns:
++ Unix ticks
++ """
++ return (
++ os.path.exists(self.rpmdb_file)
++ and int(os.path.getmtime(self.rpmdb_file))
++ or 0
++ )
++
++ def _get_checksum(self):
++ """
++ Get the checksum of the RPM Database.
++
++ Returns:
++ hexdigest
++ """
++ digest = hashlib.sha256()
++ with open(self.rpmdb_file, "rb") as rpm_db_fh:
++ while True:
++ buff = rpm_db_fh.read(0x1000)
++ if not buff:
++ break
++ digest.update(buff)
++ return digest.hexdigest()
+--
+2.39.2
+
+
diff --git a/do-not-call-the-async-wrapper-calls-with-the-separat.patch b/do-not-call-the-async-wrapper-calls-with-the-separat.patch
new file mode 100644
index 0000000..1f4368b
--- /dev/null
+++ b/do-not-call-the-async-wrapper-calls-with-the-separat.patch
@@ -0,0 +1,254 @@
+From 4021f938ed1b64acd47ccaefc111197a1118ee4f Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 15 May 2024 11:48:46 +0200
+Subject: [PATCH] Do not call the async wrapper calls with the separate
+ thread
+
+* Do not run method with the distinct thread
+
+* Move test_asynchronous.py to pytests
+---
+ salt/utils/asynchronous.py | 25 +----
+ tests/pytests/unit/utils/test_asynchronous.py | 92 +++++++++++++++++++
+ tests/unit/utils/test_asynchronous.py | 81 ----------------
+ 3 files changed, 94 insertions(+), 104 deletions(-)
+ create mode 100644 tests/pytests/unit/utils/test_asynchronous.py
+ delete mode 100644 tests/unit/utils/test_asynchronous.py
+
+diff --git a/salt/utils/asynchronous.py b/salt/utils/asynchronous.py
+index 88596a4a20..55a50cbcbf 100644
+--- a/salt/utils/asynchronous.py
++++ b/salt/utils/asynchronous.py
+@@ -2,11 +2,8 @@
+ Helpers/utils for working with tornado asynchronous stuff
+ """
+
+-
+ import contextlib
+ import logging
+-import sys
+-import threading
+
+ import salt.ext.tornado.concurrent
+ import salt.ext.tornado.ioloop
+@@ -111,30 +108,12 @@ class SyncWrapper:
+
+ def _wrap(self, key):
+ def wrap(*args, **kwargs):
+- results = []
+- thread = threading.Thread(
+- target=self._target,
+- args=(key, args, kwargs, results, self.io_loop),
++ return self.io_loop.run_sync(
++ lambda: getattr(self.obj, key)(*args, **kwargs)
+ )
+- thread.start()
+- thread.join()
+- if results[0]:
+- return results[1]
+- else:
+- exc_info = results[1]
+- raise exc_info[1].with_traceback(exc_info[2])
+
+ return wrap
+
+- def _target(self, key, args, kwargs, results, io_loop):
+- try:
+- result = io_loop.run_sync(lambda: getattr(self.obj, key)(*args, **kwargs))
+- results.append(True)
+- results.append(result)
+- except Exception: # pylint: disable=broad-except
+- results.append(False)
+- results.append(sys.exc_info())
+-
+ def __enter__(self):
+ return self
+
+diff --git a/tests/pytests/unit/utils/test_asynchronous.py b/tests/pytests/unit/utils/test_asynchronous.py
+new file mode 100644
+index 0000000000..2b5613e2bf
+--- /dev/null
++++ b/tests/pytests/unit/utils/test_asynchronous.py
+@@ -0,0 +1,92 @@
++import tornado.gen
++import tornado.ioloop
++
++import salt.utils.asynchronous as asynchronous
++
++
++class HelperA:
++
++ async_methods = [
++ "sleep",
++ ]
++
++ def __init__(self, io_loop=None):
++ pass
++
++ @tornado.gen.coroutine
++ def sleep(self):
++ yield tornado.gen.sleep(0.1)
++ raise tornado.gen.Return(True)
++
++
++class HelperB:
++
++ async_methods = [
++ "sleep",
++ ]
++
++ def __init__(self, a=None, io_loop=None):
++ if a is None:
++ a = asynchronous.SyncWrapper(HelperA)
++ self.a = a
++
++ @tornado.gen.coroutine
++ def sleep(self):
++ yield tornado.gen.sleep(0.1)
++ self.a.sleep()
++ raise tornado.gen.Return(False)
++
++
++def test_helpers():
++ """
++ Test that the helper classes do what we expect within a regular asynchronous env
++ """
++ io_loop = tornado.ioloop.IOLoop(make_current=False)
++ ret = io_loop.run_sync(lambda: HelperA().sleep())
++ assert ret is True
++
++ ret = io_loop.run_sync(lambda: HelperB().sleep())
++ assert ret is False
++
++
++def test_basic_wrap():
++ """
++ Test that we can wrap an asynchronous caller.
++ """
++ sync = asynchronous.SyncWrapper(HelperA)
++ ret = sync.sleep()
++ assert ret is True
++
++
++def test_basic_wrap_series():
++ """
++ Test that we can wrap an asynchronous caller and call the method in series.
++ """
++ sync = asynchronous.SyncWrapper(HelperA)
++ ret = sync.sleep()
++ assert ret is True
++ ret = sync.sleep()
++ assert ret is True
++
++
++def test_double():
++ """
++ Test when the asynchronous wrapper object itself creates a wrap of another thing
++
++ This works fine since the second wrap is based on the first's IOLoop so we
++ don't have to worry about complex start/stop mechanics
++ """
++ sync = asynchronous.SyncWrapper(HelperB)
++ ret = sync.sleep()
++ assert ret is False
++
++
++def test_double_sameloop():
++ """
++ Test asynchronous wrappers initiated from the same IOLoop, to ensure that
++ we don't wire up both to the same IOLoop (since it causes MANY problems).
++ """
++ a = asynchronous.SyncWrapper(HelperA)
++ sync = asynchronous.SyncWrapper(HelperB, (a,))
++ ret = sync.sleep()
++ assert ret is False
+diff --git a/tests/unit/utils/test_asynchronous.py b/tests/unit/utils/test_asynchronous.py
+deleted file mode 100644
+index e5bd974cb6..0000000000
+--- a/tests/unit/utils/test_asynchronous.py
++++ /dev/null
+@@ -1,81 +0,0 @@
+-import salt.ext.tornado.gen
+-import salt.ext.tornado.testing
+-import salt.utils.asynchronous as asynchronous
+-from salt.ext.tornado.testing import AsyncTestCase
+-
+-
+-class HelperA:
+-
+- async_methods = [
+- "sleep",
+- ]
+-
+- def __init__(self, io_loop=None):
+- pass
+-
+- @salt.ext.tornado.gen.coroutine
+- def sleep(self):
+- yield salt.ext.tornado.gen.sleep(0.1)
+- raise salt.ext.tornado.gen.Return(True)
+-
+-
+-class HelperB:
+-
+- async_methods = [
+- "sleep",
+- ]
+-
+- def __init__(self, a=None, io_loop=None):
+- if a is None:
+- a = asynchronous.SyncWrapper(HelperA)
+- self.a = a
+-
+- @salt.ext.tornado.gen.coroutine
+- def sleep(self):
+- yield salt.ext.tornado.gen.sleep(0.1)
+- self.a.sleep()
+- raise salt.ext.tornado.gen.Return(False)
+-
+-
+-class TestSyncWrapper(AsyncTestCase):
+- @salt.ext.tornado.testing.gen_test
+- def test_helpers(self):
+- """
+- Test that the helper classes do what we expect within a regular asynchronous env
+- """
+- ha = HelperA()
+- ret = yield ha.sleep()
+- self.assertTrue(ret)
+-
+- hb = HelperB()
+- ret = yield hb.sleep()
+- self.assertFalse(ret)
+-
+- def test_basic_wrap(self):
+- """
+- Test that we can wrap an asynchronous caller.
+- """
+- sync = asynchronous.SyncWrapper(HelperA)
+- ret = sync.sleep()
+- self.assertTrue(ret)
+-
+- def test_double(self):
+- """
+- Test when the asynchronous wrapper object itself creates a wrap of another thing
+-
+- This works fine since the second wrap is based on the first's IOLoop so we
+- don't have to worry about complex start/stop mechanics
+- """
+- sync = asynchronous.SyncWrapper(HelperB)
+- ret = sync.sleep()
+- self.assertFalse(ret)
+-
+- def test_double_sameloop(self):
+- """
+- Test asynchronous wrappers initiated from the same IOLoop, to ensure that
+- we don't wire up both to the same IOLoop (since it causes MANY problems).
+- """
+- a = asynchronous.SyncWrapper(HelperA)
+- sync = asynchronous.SyncWrapper(HelperB, (a,))
+- ret = sync.sleep()
+- self.assertFalse(ret)
+--
+2.45.0
+
diff --git a/do-not-fail-on-bad-message-pack-message-bsc-1213441-.patch b/do-not-fail-on-bad-message-pack-message-bsc-1213441-.patch
new file mode 100644
index 0000000..8a02fff
--- /dev/null
+++ b/do-not-fail-on-bad-message-pack-message-bsc-1213441-.patch
@@ -0,0 +1,155 @@
+From da544d7ab09899717e57a02321928ceaf3c6465c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 22 Aug 2023 11:43:46 +0100
+Subject: [PATCH] Do not fail on bad message pack message (bsc#1213441,
+ CVE-2023-20897) (#595)
+
+* Do not fail on bad message pack message
+
+Fix unit test after backporting to openSUSE/release/3006.0
+
+* Better error message when inconsistent decoded payload
+
+---------
+
+Co-authored-by: Daniel A. Wozniak
+---
+ salt/channel/server.py | 10 +++
+ salt/transport/zeromq.py | 6 +-
+ tests/pytests/unit/transport/test_zeromq.py | 69 +++++++++++++++++++++
+ 3 files changed, 84 insertions(+), 1 deletion(-)
+
+diff --git a/salt/channel/server.py b/salt/channel/server.py
+index a2117f2934..b6d51fef08 100644
+--- a/salt/channel/server.py
++++ b/salt/channel/server.py
+@@ -22,6 +22,7 @@ import salt.utils.minions
+ import salt.utils.platform
+ import salt.utils.stringutils
+ import salt.utils.verify
++from salt.exceptions import SaltDeserializationError
+ from salt.utils.cache import CacheCli
+
+ try:
+@@ -252,6 +253,15 @@ class ReqServerChannel:
+ return False
+
+ def _decode_payload(self, payload):
++ # Sometimes msgpack deserialization of random bytes could be successful,
++ # so we need to ensure payload in good shape to process this function.
++ if (
++ not isinstance(payload, dict)
++ or "enc" not in payload
++ or "load" not in payload
++ ):
++ raise SaltDeserializationError("bad load received on socket!")
++
+ # we need to decrypt it
+ if payload["enc"] == "aes":
+ try:
+diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py
+index 3ec7f7726c..7cc6b9987f 100644
+--- a/salt/transport/zeromq.py
++++ b/salt/transport/zeromq.py
+@@ -428,7 +428,11 @@ class RequestServer(salt.transport.base.DaemonizedRequestServer):
+
+ @salt.ext.tornado.gen.coroutine
+ def handle_message(self, stream, payload):
+- payload = self.decode_payload(payload)
++ try:
++ payload = self.decode_payload(payload)
++ except salt.exceptions.SaltDeserializationError:
++ self.stream.send(self.encode_payload({"msg": "bad load"}))
++ return
+ # XXX: Is header really needed?
+ reply = yield self.message_handler(payload)
+ self.stream.send(self.encode_payload(reply))
+diff --git a/tests/pytests/unit/transport/test_zeromq.py b/tests/pytests/unit/transport/test_zeromq.py
+index 10bb4917b8..c7cbc53864 100644
+--- a/tests/pytests/unit/transport/test_zeromq.py
++++ b/tests/pytests/unit/transport/test_zeromq.py
+@@ -11,6 +11,7 @@ import threading
+ import time
+ import uuid
+
++import msgpack
+ import pytest
+
+ import salt.channel.client
+@@ -1404,3 +1405,71 @@ async def test_req_chan_auth_v2_new_minion_without_master_pub(pki_dir, io_loop):
+ assert "sig" in ret
+ ret = client.auth.handle_signin_response(signin_payload, ret)
+ assert ret == "retry"
++
++
++async def test_req_server_garbage_request(io_loop):
++ """
++ Validate invalid msgpack messages will not raise exceptions in the
++ RequestServers's message handler.
++ """
++ opts = salt.config.master_config("")
++ request_server = salt.transport.zeromq.RequestServer(opts)
++
++ def message_handler(payload):
++ return payload
++
++ request_server.post_fork(message_handler, io_loop)
++
++ byts = msgpack.dumps({"foo": "bar"})
++ badbyts = byts[:3] + b"^M" + byts[3:]
++
++ valid_response = msgpack.dumps({"msg": "bad load"})
++
++ with MagicMock() as stream:
++ request_server.stream = stream
++
++ try:
++ await request_server.handle_message(stream, badbyts)
++ except Exception as exc: # pylint: disable=broad-except
++ pytest.fail("Exception was raised {}".format(exc))
++
++ request_server.stream.send.assert_called_once_with(valid_response)
++
++
++async def test_req_chan_bad_payload_to_decode(pki_dir, io_loop):
++ opts = {
++ "master_uri": "tcp://127.0.0.1:4506",
++ "interface": "127.0.0.1",
++ "ret_port": 4506,
++ "ipv6": False,
++ "sock_dir": ".",
++ "pki_dir": str(pki_dir.joinpath("minion")),
++ "id": "minion",
++ "__role": "minion",
++ "keysize": 4096,
++ "max_minions": 0,
++ "auto_accept": False,
++ "open_mode": False,
++ "key_pass": None,
++ "publish_port": 4505,
++ "auth_mode": 1,
++ "acceptance_wait_time": 3,
++ "acceptance_wait_time_max": 3,
++ }
++ SMaster.secrets["aes"] = {
++ "secret": multiprocessing.Array(
++ ctypes.c_char,
++ salt.utils.stringutils.to_bytes(salt.crypt.Crypticle.generate_key_string()),
++ ),
++ "reload": salt.crypt.Crypticle.generate_key_string,
++ }
++ master_opts = dict(opts, pki_dir=str(pki_dir.joinpath("master")))
++ master_opts["master_sign_pubkey"] = False
++ server = salt.channel.server.ReqServerChannel.factory(master_opts)
++
++ with pytest.raises(salt.exceptions.SaltDeserializationError):
++ server._decode_payload(None)
++ with pytest.raises(salt.exceptions.SaltDeserializationError):
++ server._decode_payload({})
++ with pytest.raises(salt.exceptions.SaltDeserializationError):
++ server._decode_payload(12345)
+--
+2.41.0
+
+
diff --git a/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch
new file mode 100644
index 0000000..74ff65f
--- /dev/null
+++ b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch
@@ -0,0 +1,46 @@
+From 4060d4cd24ac0fbcf83c1521553921d76c070a57 Mon Sep 17 00:00:00 2001
+From: Bo Maryniuk
+Date: Fri, 21 Sep 2018 17:31:39 +0200
+Subject: [PATCH] Do not load pip state if there is no 3rd party
+ dependencies
+
+Safe import 3rd party dependency
+---
+ salt/modules/pip.py | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/salt/modules/pip.py b/salt/modules/pip.py
+index c4de0c2984..a60bdca0bb 100644
+--- a/salt/modules/pip.py
++++ b/salt/modules/pip.py
+@@ -96,6 +96,12 @@ import salt.utils.url
+ import salt.utils.versions
+ from salt.exceptions import CommandExecutionError, CommandNotFoundError
+
++try:
++ import pkg_resources
++except ImportError:
++ pkg_resources = None
++
++
+ # This needs to be named logger so we don't shadow it in pip.install
+ logger = logging.getLogger(__name__) # pylint: disable=invalid-name
+
+@@ -114,7 +120,12 @@ def __virtual__():
+ entire filesystem. If it's not installed in a conventional location, the
+ user is required to provide the location of pip each time it is used.
+ """
+- return "pip"
++ if pkg_resources is None:
++ ret = False, 'Package dependency "pkg_resource" is missing'
++ else:
++ ret = "pip"
++
++ return ret
+
+
+ def _pip_bin_env(cwd, bin_env):
+--
+2.39.2
+
+
diff --git a/don-t-use-shell-sbin-nologin-in-requisites.patch b/don-t-use-shell-sbin-nologin-in-requisites.patch
new file mode 100644
index 0000000..481d356
--- /dev/null
+++ b/don-t-use-shell-sbin-nologin-in-requisites.patch
@@ -0,0 +1,39 @@
+From da6adc6984f21c0d93afff0b0ff55d0eb0ee3e9f Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 17 Aug 2021 11:52:00 +0200
+Subject: [PATCH] Don't use shell="/sbin/nologin" in requisites
+
+Using shell="/sbin/nologin" in an onlyif/unless requisite does not
+really make sense since the condition can't be run. shell=/sbin/nologin
+is also a common argument, e.g. for user.present.
+
+Fixes: bsc#1188259
+---
+ salt/state.py | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/salt/state.py b/salt/state.py
+index cb434a91e7..cda84a0fcb 100644
+--- a/salt/state.py
++++ b/salt/state.py
+@@ -986,9 +986,14 @@ class State:
+ cmd_opts[run_cmd_arg] = low_data.get(run_cmd_arg)
+
+ if "shell" in low_data and "shell" not in cmd_opts_exclude:
+- cmd_opts["shell"] = low_data["shell"]
++ shell = low_data["shell"]
+ elif "shell" in self.opts["grains"]:
+- cmd_opts["shell"] = self.opts["grains"].get("shell")
++ shell = self.opts["grains"].get("shell")
++ else:
++ shell = None
++ # /sbin/nologin always causes the onlyif / unless cmd to fail
++ if shell is not None and shell != "/sbin/nologin":
++ cmd_opts["shell"] = shell
+
+ if "onlyif" in low_data:
+ _ret = self._run_check_onlyif(low_data, cmd_opts)
+--
+2.39.2
+
+
diff --git a/drop-serial-from-event.unpack-in-cli.batch_async.patch b/drop-serial-from-event.unpack-in-cli.batch_async.patch
new file mode 100644
index 0000000..a550031
--- /dev/null
+++ b/drop-serial-from-event.unpack-in-cli.batch_async.patch
@@ -0,0 +1,34 @@
+From e7ef0b5a46cc69a9237033d8dc4dbc60c0802a20 Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Mon, 31 Jan 2022 10:24:26 +0100
+Subject: [PATCH] Drop serial from event.unpack in cli.batch_async
+
+---
+ salt/cli/batch_async.py | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
+index 09aa85258b..1012ce37cc 100644
+--- a/salt/cli/batch_async.py
++++ b/salt/cli/batch_async.py
+@@ -9,7 +9,6 @@ import logging
+
+ import salt.client
+ import salt.ext.tornado
+-import tornado
+ from salt.cli.batch import batch_get_eauth, batch_get_opts, get_bnum
+
+ log = logging.getLogger(__name__)
+@@ -109,7 +108,7 @@ class BatchAsync:
+ if not self.event:
+ return
+ try:
+- mtag, data = self.event.unpack(raw, self.event.serial)
++ mtag, data = self.event.unpack(raw)
+ for (pattern, op) in self.patterns:
+ if mtag.startswith(pattern[:-1]):
+ minion = data["id"]
+--
+2.39.2
+
+
diff --git a/early-feature-support-config.patch b/early-feature-support-config.patch
new file mode 100644
index 0000000..8e66230
--- /dev/null
+++ b/early-feature-support-config.patch
@@ -0,0 +1,3784 @@
+From 7b47e6f19b38d773a6ec744209753f3d29b094ea Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 18 Jan 2022 16:40:45 +0100
+Subject: [PATCH] early feature: support-config
+
+Add support script function
+
+Add salt-support starter
+
+Initial support wrapper
+
+Add data collector skeleton
+
+Add default scenario of the support configuration
+
+Add main flow for the collector.
+
+Move support library to its own package
+
+Add default support collection scenario
+
+Add logging
+
+Handle CLI error.
+
+Update format of the default support scenario
+
+Default archive name
+
+Finalise local data collection
+
+Write archive from memory objects.
+
+Add colored console outputter for salt-support.
+
+Use colored outputter
+
+Add message output class
+
+Remove try/except capture from the scripts and move to the runner directly
+
+Implement output highlighter methods for CLI output
+
+Move scenarios to profiles
+
+Get return section from the output. Tolerate raw data.
+
+Implement internal data collector
+
+Add network stack examination to the default profile
+
+Add an internal filetree function
+
+Add a method to discard current session
+
+Add a method to link a static file to the resulting archive
+
+Implement internal function caller
+
+Add internal functions
+
+Add default root for the one-file support data
+
+Set output device
+
+Separate dynamic data and static files on the fs
+
+Update color theme
+
+Add ident to the error message
+
+Report rejected files with the ident
+
+Reuse system error exceptions and reduce stat on the file check
+
+Use socket name of the host machine
+
+Add options for profile and archive settings
+
+Use archive name from options.
+
+Get profile by config/options
+
+Cleanup broken archive on crash/exception
+
+Use profile from the options/configuration
+
+Add more colored messages :-)
+
+Initial implementation of get static profiles
+
+Update docstring
+
+Move PostgreSQL profile to its own
+
+Handle profile listing, do not yield sys.exit on specific module
+
+Add network profile
+
+Add Salt's profile
+
+Uncomment package profile
+
+Allow several profiles to be specified
+
+Remove comments, add parameter to get more profiles
+
+Implement existing configuration finder
+
+Add options to handle unit configurations
+
+Pre-parse options prior run() to choose proper configuration target
+
+Handle arg parse generic errors, unit mis-choose
+
+Let cleanup be aware of pre-config state
+
+Fix imports
+
+Handle exit codes properly
+
+Allow to overwrite existing archive
+
+Use py2/3 exceptions equally
+
+Include exit exception on debugging
+
+Render profiles as Jinja2, add basic recursive caller to the template of the profile
+
+Add "users" profile
+
+Implement basic caller for the profile template
+
+Add table output renderer
+
+Fix typo
+
+Remove table outputter
+
+Allow default outputters and specify outputters inside the profile
+
+Remove group.getent from the loop per each user
+
+Add table outputter to network profile
+
+Add text outputter to hostname/fqdn data
+
+Remove network part from the default profile. Add text/table outputters.
+
+Fix Py3 compat
+
+Collect status (initial)
+
+Avoid irrelevant to profile files
+
+Add job profiles
+
+Add profile template trace
+
+Add inspection through the runners
+
+Allow parameters in callers and runners
+
+Handle non-dict iterables
+
+Highlight template content in the trace log
+
+Add return extractor from the local call returns
+
+Move local runner to its own namespace
+
+Lintfix: PEP8
+
+Remove duplicate code
+
+Fix caller return
+
+Add description tag to the scenario
+
+Add generic colored message
+
+Add wrapping function. NOTE: it should be refactored with the other similar functions
+
+Print description while processing the scenario
+
+Turn off default profile and print help instead
+
+Move command-line check before collector
+
+Do not verify archive if help needs to be printed
+
+Add console output unit test for indent output
+
+Fix docstring
+
+Rename test class
+
+Refactor test to add setup/teardown
+
+Add unit test to verify indent
+
+Use direct constants instead of encoded strings
+
+Add unit test for color indent rotation check
+
+Add a test case for Collector class
+
+Add unit test for closing the archive
+
+Add unit test for add/write sections on the collector object
+
+Add test for linking an external file
+
+Cleanup tests on tear-down method
+
+Add call count check
+
+Add unit test for support collection section discard
+
+Add unittest for SaltSupport's function config preparation
+
+Fix docstring
+
+Add unit test for local caller
+
+Add unit test for local runner
+
+Add unit test for internal function call
+
+Add unit test for getting an action description from the action meta
+
+Add unit test for internal function call
+
+Add unit test for return extration
+
+Add unit test for determine action type from the action meta
+
+Add unit test for cleanup routine
+
+Fix typo of method name
+
+Add unit test for check existing archive
+
+Add test suite for profile testing
+
+Add unit test for default profile is YAML-parseable
+
+Add unit test for user template profile rendering
+
+Update unit test for all non-template profiles parse check
+
+Add function to render a Jinja2 template by name
+
+Use template rendering function
+
+Add unit test on jobs-trace template for runner
+
+Move function above the tests
+
+Add current logfile, if defined in configuration
+
+Bugfix: ignore logfile, if path was not found or not defined or is None
+
+Lintfix: iteration over .keys()
+
+Remove template "salt" from non-template checks
+
+Lintfix: use salt.utils.files.fopen for resource leak prevention
+
+Lintfix: PEP8 E302: expected 2 blank lines, found 0
+
+Lintfix: use salt.utils.files.fopen instead of open
+
+Lintfix: PEP8 E303: too many blank lines (3)
+
+Lintfix: Uses of an external blacklisted import 'six': Please use 'import salt.ext.six as six'
+
+Lintfix: use salt.utils.files.fopen instead of open
+
+Fix unit tests
+
+Fix six import
+
+Mute pylint: file handler explicitly needed
+
+Lintfix: explicitly close filehandle
+
+Lintfix: mute fopen warning
+
+Remove development stub. Ughh...
+
+Removed blacklist of pkg_resources
+
+Make profiles a package.
+
+Add UTF-8 encoding
+
+Add a docstring
+
+Support-config non-root permission issues fixes (U#50095)
+
+Do not crash if there is no configuration available at all
+
+Handle CLI and log errors
+
+Catch overwriting exiting archive error by other users
+
+Suppress excessive tracebacks on error log level
+
+Add multi-file support and globbing to the filetree (U#50018)
+
+Add more possible logs
+
+Support multiple files grabbing
+
+Collect system logs and boot logs
+
+Support globbing in filetree
+
+Add supportconfig module for remote calls and SaltSSH
+
+Add log collector for remote purposes
+
+Implement default archive name
+
+Fix imports
+
+Implement runner function
+
+Remove targets data collector function as it is now called by a module instead
+
+Add external method decorator marker
+
+Add utility class for detecting exportable methods
+
+Mark run method as an external function
+
+Implement function setter
+
+Fix imports
+
+Setup config from __opts__
+
+Use utility class
+
+Remove utils class
+
+Allow specify profile from the API parameter directly
+
+Rename module by virtual name
+
+Bypass parent subclass
+
+Implement profiles listing (local only for now)
+
+Specify profile from the state/call
+
+Set default or personalised archive name
+
+Add archives lister
+
+Add personalised name element to the archive name
+
+Use proper args/kwargs to the exported function
+
+Add archives deletion function
+
+Change log level when debugging rendered profiles
+
+Add ability to directly pass profile source when taking local data
+
+Add pillar profile support
+
+Remove extra-line
+
+Fix header
+
+Change output format for deleting archives
+
+Refactor logger output format
+
+Add time/milliseconds to each log notification
+
+Fix imports
+
+Switch output destination by context
+
+Add last archive function
+
+Lintfix
+
+Return consistent type
+
+Change output format for deleted archives report
+
+Implement report archive syncing to the reporting node
+
+Send multiple files at once via rsync, instead of send one after another
+
+Add sync stats formatter
+
+Change signature: cleanup -> move. Update docstring.
+
+Flush empty data from the output format
+
+Report archfiles activity
+
+Refactor imports
+
+Do not remove retcode if it is EX_OK
+
+Do not raise rsync error for undefined archives.
+
+Update header
+
+Add salt-support state module
+
+Move all functions into a callable class object
+
+Support __call__ function in state and command modules as default entrance that does not need to be specified in SLS state syntax
+
+Access from the outside only allowed class methods
+
+Pre-create destination of the archive, preventing single archive copied as a group name
+
+Handle functions exceptions
+
+Add unit test scaffold
+
+Add LogCollector UT for testing regular message
+
+Add LogCollector UT for testing INFO message
+
+Add LogCollector UT for testing WARNING message
+
+Replace hardcoded variables with defined constants
+
+Add LogCollector UT for testing ERROR message
+
+Test title attribute in msg method of LogCollector
+
+Add UT for LogCollector on highlighter method
+
+Add UT for LogCollector on put method
+
+Fix docstrings
+
+Add UT for archive name generator
+
+Add UT for custom archive name
+
+Fix docstring for the UT
+
+Add UT for checking profiles list format
+
+Add Unit Test for existing archives listing
+
+Add UT for the last archive function
+
+Create instance of the support class
+
+Add UT for successfully deleting all archives
+
+Add UT for deleting archives with failures
+
+Add UI for formatting sync stats and order preservation
+
+Add UT for testing sync failure when no archives has been specified
+
+Add UT for last picked archive has not found
+
+Add UT for last specified archive was not found
+
+Bugfix: do not create an array with None element in it
+
+Fix UT for found bugfix
+
+Add UT for syncing no archives failure
+
+Add UT for sync function
+
+Add UT for run support function
+
+Fix docstring for function "run"
+
+lintfix: use 'salt.support.mock' and 'patch()'
+
+Rewrite subdirectory creation and do not rely on Python3-only code
+
+Lintfix: remove unused imports
+
+Lintfix: regexp strings
+
+Break-down oneliner if/else clause
+
+Use ordered dictionary to preserve order of the state.
+
+This has transparent effect to the current process: OrderedDict is the
+same as just Python dict, except it is preserving order of the state
+chunks.
+
+Refactor state processing class.
+
+Add __call__ function to process single-id syntax
+
+Add backward-compatibility with default SLS syntax (id-per-call)
+
+Lintfix: E1120 no value in argument 'name' for class constructor
+
+Remove unused import
+
+Check last function by full name
+---
+ doc/ref/modules/all/index.rst | 1 +
+ doc/ref/states/all/index.rst | 1 +
+ salt/cli/support/__init__.py | 76 +++
+ salt/cli/support/collector.py | 563 ++++++++++++++++++++++
+ salt/cli/support/console.py | 184 +++++++
+ salt/cli/support/intfunc.py | 51 ++
+ salt/cli/support/localrunner.py | 33 ++
+ salt/cli/support/profiles/__init__.py | 4 +
+ salt/cli/support/profiles/default.yml | 78 +++
+ salt/cli/support/profiles/jobs-active.yml | 3 +
+ salt/cli/support/profiles/jobs-last.yml | 3 +
+ salt/cli/support/profiles/jobs-trace.yml | 7 +
+ salt/cli/support/profiles/network.yml | 27 ++
+ salt/cli/support/profiles/postgres.yml | 11 +
+ salt/cli/support/profiles/salt.yml | 9 +
+ salt/cli/support/profiles/users.yml | 22 +
+ salt/loader/lazy.py | 6 +-
+ salt/modules/saltsupport.py | 405 ++++++++++++++++
+ salt/scripts.py | 15 +
+ salt/state.py | 38 +-
+ salt/states/saltsupport.py | 225 +++++++++
+ salt/utils/args.py | 3 +-
+ salt/utils/decorators/__init__.py | 24 +
+ salt/utils/parsers.py | 114 +++++
+ scripts/salt-support | 11 +
+ setup.py | 2 +
+ tests/pytests/unit/cli/test_support.py | 553 +++++++++++++++++++++
+ tests/unit/modules/test_saltsupport.py | 496 +++++++++++++++++++
+ 28 files changed, 2958 insertions(+), 7 deletions(-)
+ create mode 100644 salt/cli/support/__init__.py
+ create mode 100644 salt/cli/support/collector.py
+ create mode 100644 salt/cli/support/console.py
+ create mode 100644 salt/cli/support/intfunc.py
+ create mode 100644 salt/cli/support/localrunner.py
+ create mode 100644 salt/cli/support/profiles/__init__.py
+ create mode 100644 salt/cli/support/profiles/default.yml
+ create mode 100644 salt/cli/support/profiles/jobs-active.yml
+ create mode 100644 salt/cli/support/profiles/jobs-last.yml
+ create mode 100644 salt/cli/support/profiles/jobs-trace.yml
+ create mode 100644 salt/cli/support/profiles/network.yml
+ create mode 100644 salt/cli/support/profiles/postgres.yml
+ create mode 100644 salt/cli/support/profiles/salt.yml
+ create mode 100644 salt/cli/support/profiles/users.yml
+ create mode 100644 salt/modules/saltsupport.py
+ create mode 100644 salt/states/saltsupport.py
+ create mode 100755 scripts/salt-support
+ create mode 100644 tests/pytests/unit/cli/test_support.py
+ create mode 100644 tests/unit/modules/test_saltsupport.py
+
+diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst
+index cbd8b0cdc5..abd40e0bc7 100644
+--- a/doc/ref/modules/all/index.rst
++++ b/doc/ref/modules/all/index.rst
+@@ -416,6 +416,7 @@ execution modules
+ salt_version
+ saltcheck
+ saltcloudmod
++ saltsupport
+ saltutil
+ schedule
+ scp_mod
+diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst
+index 13ff645b59..7a062c227b 100644
+--- a/doc/ref/states/all/index.rst
++++ b/doc/ref/states/all/index.rst
+@@ -283,6 +283,7 @@ state modules
+ rvm
+ salt_proxy
+ saltmod
++ saltsupport
+ saltutil
+ schedule
+ selinux
+diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py
+new file mode 100644
+index 0000000000..59c2609e07
+--- /dev/null
++++ b/salt/cli/support/__init__.py
+@@ -0,0 +1,76 @@
++"""
++Get default scenario of the support.
++"""
++import logging
++import os
++
++import jinja2
++import salt.exceptions
++import yaml
++
++log = logging.getLogger(__name__)
++
++
++def _render_profile(path, caller, runner):
++ """
++ Render profile as Jinja2.
++ :param path:
++ :return:
++ """
++ env = jinja2.Environment(
++ loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False
++ )
++ return (
++ env.get_template(os.path.basename(path))
++ .render(salt=caller, runners=runner)
++ .strip()
++ )
++
++
++def get_profile(profile, caller, runner):
++ """
++ Get profile.
++
++ :param profile:
++ :return:
++ """
++ profiles = profile.split(",")
++ data = {}
++ for profile in profiles:
++ if os.path.basename(profile) == profile:
++ profile = profile.split(".")[0] # Trim extension if someone added it
++ profile_path = os.path.join(
++ os.path.dirname(__file__), "profiles", profile + ".yml"
++ )
++ else:
++ profile_path = profile
++ if os.path.exists(profile_path):
++ try:
++ rendered_template = _render_profile(profile_path, caller, runner)
++ log.debug("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template))
++ data.update(yaml.load(rendered_template))
++ except Exception as ex:
++ log.debug(ex, exc_info=True)
++ raise salt.exceptions.SaltException(
++ "Rendering profile failed: {}".format(ex)
++ )
++ else:
++ raise salt.exceptions.SaltException(
++ 'Profile "{}" is not found.'.format(profile)
++ )
++
++ return data
++
++
++def get_profiles(config):
++ """
++ Get available profiles.
++
++ :return:
++ """
++ profiles = []
++ for profile_name in os.listdir(os.path.join(os.path.dirname(__file__), "profiles")):
++ if profile_name.endswith(".yml"):
++ profiles.append(profile_name.split(".")[0])
++
++ return sorted(profiles)
+diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py
+new file mode 100644
+index 0000000000..1879cc5220
+--- /dev/null
++++ b/salt/cli/support/collector.py
+@@ -0,0 +1,563 @@
++import builtins as exceptions
++import copy
++import json
++import logging
++import os
++import sys
++import tarfile
++import time
++from io import BytesIO
++from io import IOBase as file
++
++import salt.cli.caller
++import salt.cli.support
++import salt.cli.support.console
++import salt.cli.support.intfunc
++import salt.cli.support.localrunner
++import salt.defaults.exitcodes
++import salt.exceptions
++import salt.ext.six as six
++import salt.output.table_out
++import salt.runner
++import salt.utils.files
++import salt.utils.parsers
++import salt.utils.platform
++import salt.utils.process
++import salt.utils.stringutils
++import salt.utils.verify
++import yaml
++
++salt.output.table_out.__opts__ = {}
++log = logging.getLogger(__name__)
++
++
++class SupportDataCollector:
++ """
++ Data collector. It behaves just like another outputter,
++ except it grabs the data to the archive files.
++ """
++
++ def __init__(self, name, output):
++ """
++ constructor of the data collector
++ :param name:
++ :param path:
++ :param format:
++ """
++ self.archive_path = name
++ self.__default_outputter = output
++ self.__format = format
++ self.__arch = None
++ self.__current_section = None
++ self.__current_section_name = None
++ self.__default_root = time.strftime("%Y.%m.%d-%H.%M.%S-snapshot")
++ self.out = salt.cli.support.console.MessagesOutput()
++
++ def open(self):
++ """
++ Opens archive.
++ :return:
++ """
++ if self.__arch is not None:
++ raise salt.exceptions.SaltException("Archive already opened.")
++ self.__arch = tarfile.TarFile.bz2open(self.archive_path, "w")
++
++ def close(self):
++ """
++ Closes the archive.
++ :return:
++ """
++ if self.__arch is None:
++ raise salt.exceptions.SaltException("Archive already closed")
++ self._flush_content()
++ self.__arch.close()
++ self.__arch = None
++
++ def _flush_content(self):
++ """
++ Flush content to the archive
++ :return:
++ """
++ if self.__current_section is not None:
++ buff = BytesIO()
++ buff._dirty = False
++ for action_return in self.__current_section:
++ for title, ret_data in action_return.items():
++ if isinstance(ret_data, file):
++ self.out.put(ret_data.name, indent=4)
++ self.__arch.add(ret_data.name, arcname=ret_data.name)
++ else:
++ buff.write(salt.utils.stringutils.to_bytes(title + "\n"))
++ buff.write(
++ salt.utils.stringutils.to_bytes(("-" * len(title)) + "\n\n")
++ )
++ buff.write(salt.utils.stringutils.to_bytes(ret_data))
++ buff.write(salt.utils.stringutils.to_bytes("\n\n\n"))
++ buff._dirty = True
++ if buff._dirty:
++ buff.seek(0)
++ tar_info = tarfile.TarInfo(
++ name="{}/{}".format(
++ self.__default_root, self.__current_section_name
++ )
++ )
++ if not hasattr(buff, "getbuffer"): # Py2's BytesIO is older
++ buff.getbuffer = buff.getvalue
++ tar_info.size = len(buff.getbuffer())
++ self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
++
++ def add(self, name):
++ """
++ Start a new section.
++ :param name:
++ :return:
++ """
++ if self.__current_section:
++ self._flush_content()
++ self.discard_current(name)
++
++ def discard_current(self, name=None):
++ """
++ Discard current section
++ :return:
++ """
++ self.__current_section = []
++ self.__current_section_name = name
++
++ def _printout(self, data, output):
++ """
++ Use salt outputter to printout content.
++
++ :return:
++ """
++ opts = {"extension_modules": "", "color": False}
++ try:
++ printout = salt.output.get_printout(output, opts)(data)
++ if printout is not None:
++ return printout.rstrip()
++ except (KeyError, AttributeError, TypeError) as err:
++ log.debug(err, exc_info=True)
++ try:
++ printout = salt.output.get_printout("nested", opts)(data)
++ if printout is not None:
++ return printout.rstrip()
++ except (KeyError, AttributeError, TypeError) as err:
++ log.debug(err, exc_info=True)
++ printout = salt.output.get_printout("raw", opts)(data)
++ if printout is not None:
++ return printout.rstrip()
++
++ return salt.output.try_printout(data, output, opts)
++
++ def write(self, title, data, output=None):
++ """
++ Add a data to the current opened section.
++ :return:
++ """
++ if not isinstance(data, (dict, list, tuple)):
++ data = {"raw-content": str(data)}
++ output = output or self.__default_outputter
++
++ if output != "null":
++ try:
++ if isinstance(data, dict) and "return" in data:
++ data = data["return"]
++ content = self._printout(data, output)
++ except Exception: # Fall-back to just raw YAML
++ content = None
++ else:
++ content = None
++
++ if content is None:
++ data = json.loads(json.dumps(data))
++ if isinstance(data, dict) and data.get("return"):
++ data = data.get("return")
++ content = yaml.safe_dump(data, default_flow_style=False, indent=4)
++
++ self.__current_section.append({title: content})
++
++ def link(self, title, path):
++ """
++ Add a static file on the file system.
++
++ :param title:
++ :param path:
++ :return:
++ """
++ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
++ # pylint: disable=W8470
++ if not isinstance(path, file):
++ path = salt.utils.files.fopen(path)
++ self.__current_section.append({title: path})
++ # pylint: enable=W8470
++
++
++class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
++ """
++ Class to run Salt Support subsystem.
++ """
++
++ RUNNER_TYPE = "run"
++ CALL_TYPE = "call"
++
++ def _setup_fun_config(self, fun_conf):
++ """
++ Setup function configuration.
++
++ :param conf:
++ :return:
++ """
++ conf = copy.deepcopy(self.config)
++ conf["file_client"] = "local"
++ conf["fun"] = ""
++ conf["arg"] = []
++ conf["kwarg"] = {}
++ conf["cache_jobs"] = False
++ conf["print_metadata"] = False
++ conf.update(fun_conf)
++ conf["fun"] = conf["fun"].split(":")[-1] # Discard typing prefix
++
++ return conf
++
++ def _get_runner(self, conf):
++ """
++ Get & setup runner.
++
++ :param conf:
++ :return:
++ """
++ conf = self._setup_fun_config(copy.deepcopy(conf))
++ if not getattr(self, "_runner", None):
++ self._runner = salt.cli.support.localrunner.LocalRunner(conf)
++ else:
++ self._runner.opts = conf
++ return self._runner
++
++ def _get_caller(self, conf):
++ """
++ Get & setup caller from the factory.
++
++ :param conf:
++ :return:
++ """
++ conf = self._setup_fun_config(copy.deepcopy(conf))
++ if not getattr(self, "_caller", None):
++ self._caller = salt.cli.caller.Caller.factory(conf)
++ else:
++ self._caller.opts = conf
++ return self._caller
++
++ def _local_call(self, call_conf):
++ """
++ Execute local call
++ """
++ try:
++ ret = self._get_caller(call_conf).call()
++ except SystemExit:
++ ret = "Data is not available at this moment"
++ self.out.error(ret)
++ except Exception as ex:
++ ret = "Unhandled exception occurred: {}".format(ex)
++ log.debug(ex, exc_info=True)
++ self.out.error(ret)
++
++ return ret
++
++ def _local_run(self, run_conf):
++ """
++ Execute local runner
++
++ :param run_conf:
++ :return:
++ """
++ try:
++ ret = self._get_runner(run_conf).run()
++ except SystemExit:
++ ret = "Runner is not available at this moment"
++ self.out.error(ret)
++ except Exception as ex:
++ ret = "Unhandled exception occurred: {}".format(ex)
++ log.debug(ex, exc_info=True)
++
++ return ret
++
++ def _internal_function_call(self, call_conf):
++ """
++ Call internal function.
++
++ :param call_conf:
++ :return:
++ """
++
++ def stub(*args, **kwargs):
++ message = "Function {} is not available".format(call_conf["fun"])
++ self.out.error(message)
++ log.debug(
++ 'Attempt to run "{fun}" with {arg} arguments and {kwargs} parameters.'.format(
++ **call_conf
++ )
++ )
++ return message
++
++ return getattr(salt.cli.support.intfunc, call_conf["fun"], stub)(
++ self.collector, *call_conf["arg"], **call_conf["kwargs"]
++ )
++
++ def _get_action(self, action_meta):
++ """
++ Parse action and turn into a calling point.
++ :param action_meta:
++ :return:
++ """
++ conf = {
++ "fun": list(action_meta.keys())[0],
++ "arg": [],
++ "kwargs": {},
++ }
++ if not len(conf["fun"].split(".")) - 1:
++ conf["salt.int.intfunc"] = True
++
++ action_meta = action_meta[conf["fun"]]
++ info = action_meta.get("info", "Action for {}".format(conf["fun"]))
++ for arg in action_meta.get("args") or []:
++ if not isinstance(arg, dict):
++ conf["arg"].append(arg)
++ else:
++ conf["kwargs"].update(arg)
++
++ return info, action_meta.get("output"), conf
++
++ def collect_internal_data(self):
++ """
++ Dumps current running pillars, configuration etc.
++ :return:
++ """
++ section = "configuration"
++ self.out.put(section)
++ self.collector.add(section)
++ self.out.put("Saving config", indent=2)
++ self.collector.write("General Configuration", self.config)
++ self.out.put("Saving pillars", indent=2)
++ self.collector.write(
++ "Active Pillars", self._local_call({"fun": "pillar.items"})
++ )
++
++ section = "highstate"
++ self.out.put(section)
++ self.collector.add(section)
++ self.out.put("Saving highstate", indent=2)
++ self.collector.write(
++ "Rendered highstate", self._local_call({"fun": "state.show_highstate"})
++ )
++
++ def _extract_return(self, data):
++ """
++ Extracts return data from the results.
++
++ :param data:
++ :return:
++ """
++ if isinstance(data, dict):
++ data = data.get("return", data)
++
++ return data
++
++ def collect_local_data(self, profile=None, profile_source=None):
++ """
++ Collects master system data.
++ :return:
++ """
++
++ def call(func, *args, **kwargs):
++ """
++ Call wrapper for templates
++ :param func:
++ :return:
++ """
++ return self._extract_return(
++ self._local_call({"fun": func, "arg": args, "kwarg": kwargs})
++ )
++
++ def run(func, *args, **kwargs):
++ """
++ Runner wrapper for templates
++ :param func:
++ :return:
++ """
++ return self._extract_return(
++ self._local_run({"fun": func, "arg": args, "kwarg": kwargs})
++ )
++
++ scenario = profile_source or salt.cli.support.get_profile(
++ profile or self.config["support_profile"], call, run
++ )
++ for category_name in scenario:
++ self.out.put(category_name)
++ self.collector.add(category_name)
++ for action in scenario[category_name]:
++ if not action:
++ continue
++ action_name = next(iter(action))
++ if not isinstance(action[action_name], str):
++ info, output, conf = self._get_action(action)
++ action_type = self._get_action_type(
++ action
++ ) # run: for runners
++ if action_type == self.RUNNER_TYPE:
++ self.out.put("Running {}".format(info.lower()), indent=2)
++ self.collector.write(info, self._local_run(conf), output=output)
++ elif action_type == self.CALL_TYPE:
++ if not conf.get("salt.int.intfunc"):
++ self.out.put("Collecting {}".format(info.lower()), indent=2)
++ self.collector.write(
++ info, self._local_call(conf), output=output
++ )
++ else:
++ self.collector.discard_current()
++ self._internal_function_call(conf)
++ else:
++ self.out.error(
++ 'Unknown action type "{}" for action: {}'.format(
++ action_type, action
++ )
++ )
++ else:
++ # TODO: This needs to be moved then to the utils.
++ # But the code is not yet there (other PRs)
++ self.out.msg(
++ "\n".join(salt.cli.support.console.wrap(action[action_name])),
++ ident=2,
++ )
++
++ def _get_action_type(self, action):
++ """
++ Get action type.
++ :param action:
++ :return:
++ """
++ action_name = next(iter(action or {"": None}))
++ if ":" not in action_name:
++ action_name = "{}:{}".format(self.CALL_TYPE, action_name)
++
++ return action_name.split(":")[0] or None
++
++ def _cleanup(self):
++ """
++ Cleanup if crash/exception
++ :return:
++ """
++ if (
++ hasattr(self, "config")
++ and self.config.get("support_archive")
++ and os.path.exists(self.config["support_archive"])
++ ):
++ self.out.warning("Terminated earlier, cleaning up")
++ try:
++ os.unlink(self.config["support_archive"])
++ except Exception as err:
++ log.debug(err)
++ self.out.error("{} while cleaning up.".format(err))
++
++ def _check_existing_archive(self):
++ """
++ Check if archive exists or not. If exists and --force was not specified,
++ bail out. Otherwise remove it and move on.
++
++ :return:
++ """
++ if os.path.exists(self.config["support_archive"]):
++ if self.config["support_archive_force_overwrite"]:
++ self.out.warning(
++ "Overwriting existing archive: {}".format(
++ self.config["support_archive"]
++ )
++ )
++ try:
++ os.unlink(self.config["support_archive"])
++ except Exception as err:
++ log.debug(err)
++ self.out.error(
++ "{} while trying to overwrite existing archive.".format(err)
++ )
++ ret = True
++ else:
++ self.out.warning(
++ "File {} already exists.".format(self.config["support_archive"])
++ )
++ ret = False
++ else:
++ ret = True
++
++ return ret
++
++ def run(self):
++ exit_code = salt.defaults.exitcodes.EX_OK
++ self.out = salt.cli.support.console.MessagesOutput()
++ try:
++ self.parse_args()
++ except (Exception, SystemExit) as ex:
++ if not isinstance(ex, exceptions.SystemExit):
++ exit_code = salt.defaults.exitcodes.EX_GENERIC
++ self.out.error(ex)
++ elif isinstance(ex, exceptions.SystemExit):
++ exit_code = ex.code
++ else:
++ exit_code = salt.defaults.exitcodes.EX_GENERIC
++ self.out.error(ex)
++ else:
++ if self.config["log_level"] not in ("quiet",):
++ self.setup_logfile_logger()
++ salt.utils.verify.verify_log(self.config)
++ salt.cli.support.log = log # Pass update logger so trace is available
++
++ if self.config["support_profile_list"]:
++ self.out.put("List of available profiles:")
++ for idx, profile in enumerate(
++ salt.cli.support.get_profiles(self.config)
++ ):
++ msg_template = " {}. ".format(idx + 1) + "{}"
++ self.out.highlight(msg_template, profile)
++ exit_code = salt.defaults.exitcodes.EX_OK
++ elif self.config["support_show_units"]:
++ self.out.put("List of available units:")
++ for idx, unit in enumerate(self.find_existing_configs(None)):
++ msg_template = " {}. ".format(idx + 1) + "{}"
++ self.out.highlight(msg_template, unit)
++ exit_code = salt.defaults.exitcodes.EX_OK
++ else:
++ if not self.config["support_profile"]:
++ self.print_help()
++ raise SystemExit()
++
++ if self._check_existing_archive():
++ try:
++ self.collector = SupportDataCollector(
++ self.config["support_archive"],
++ output=self.config["support_output_format"],
++ )
++ except Exception as ex:
++ self.out.error(ex)
++ exit_code = salt.defaults.exitcodes.EX_GENERIC
++ log.debug(ex, exc_info=True)
++ else:
++ try:
++ self.collector.open()
++ self.collect_local_data()
++ self.collect_internal_data()
++ self.collector.close()
++
++ archive_path = self.collector.archive_path
++ self.out.highlight(
++ '\nSupport data has been written to "{}" file.\n',
++ archive_path,
++ _main="YELLOW",
++ )
++ except Exception as ex:
++ self.out.error(ex)
++ log.debug(ex, exc_info=True)
++ exit_code = salt.defaults.exitcodes.EX_SOFTWARE
++
++ if exit_code:
++ self._cleanup()
++
++ sys.exit(exit_code)
+diff --git a/salt/cli/support/console.py b/salt/cli/support/console.py
+new file mode 100644
+index 0000000000..266b645479
+--- /dev/null
++++ b/salt/cli/support/console.py
+@@ -0,0 +1,184 @@
++"""
++Collection of tools to report messages to console.
++
++NOTE: This is subject to incorporate other formatting bits
++ from all around everywhere and then to be moved to utils.
++"""
++
++
++import os
++import sys
++import textwrap
++
++import salt.utils.color
++
++
++class IndentOutput:
++ """
++ Paint different indends in different output.
++ """
++
++ def __init__(self, conf=None, device=sys.stdout):
++ if conf is None:
++ conf = {0: "CYAN", 2: "GREEN", 4: "LIGHT_BLUE", 6: "BLUE"}
++ self._colors_conf = conf
++ self._device = device
++ self._colors = salt.utils.color.get_colors()
++ self._default_color = "GREEN"
++ self._default_hl_color = "LIGHT_GREEN"
++
++ def put(self, message, indent=0):
++ """
++ Print message with an indent.
++
++ :param message:
++ :param indent:
++ :return:
++ """
++ color = self._colors_conf.get(
++ indent + indent % 2, self._colors_conf.get(0, self._default_color)
++ )
++
++ for chunk in [" " * indent, self._colors[color], message, self._colors["ENDC"]]:
++ self._device.write(str(chunk))
++ self._device.write(os.linesep)
++ self._device.flush()
++
++
++class MessagesOutput(IndentOutput):
++ """
++ Messages output to the CLI.
++ """
++
++ def msg(self, message, title=None, title_color=None, color="BLUE", ident=0):
++ """
++ Hint message.
++
++ :param message:
++ :param title:
++ :param title_color:
++ :param color:
++ :param ident:
++ :return:
++ """
++ if title and not title_color:
++ title_color = color
++ if title_color and not title:
++ title_color = None
++
++ self.__colored_output(title, message, title_color, color, ident=ident)
++
++ def info(self, message, ident=0):
++ """
++ Write an info message to the CLI.
++
++ :param message:
++ :param ident:
++ :return:
++ """
++ self.__colored_output("Info", message, "GREEN", "LIGHT_GREEN", ident=ident)
++
++ def warning(self, message, ident=0):
++ """
++ Write a warning message to the CLI.
++
++ :param message:
++ :param ident:
++ :return:
++ """
++ self.__colored_output("Warning", message, "YELLOW", "LIGHT_YELLOW", ident=ident)
++
++ def error(self, message, ident=0):
++ """
++ Write an error message to the CLI.
++
++ :param message:
++ :param ident
++ :return:
++ """
++ self.__colored_output("Error", message, "RED", "LIGHT_RED", ident=ident)
++
++ def __colored_output(self, title, message, title_color, message_color, ident=0):
++ if title and not title.endswith(":"):
++ _linesep = title.endswith(os.linesep)
++ title = "{}:{}".format(title.strip(), _linesep and os.linesep or " ")
++
++ for chunk in [
++ title_color and self._colors[title_color] or None,
++ " " * ident,
++ title,
++ self._colors[message_color],
++ message,
++ self._colors["ENDC"],
++ ]:
++ if chunk:
++ self._device.write(str(chunk))
++ self._device.write(os.linesep)
++ self._device.flush()
++
++ def highlight(self, message, *values, **colors):
++ """
++ Highlighter works the way that message parameter is a template,
++ the "values" is a list of arguments going one after another as values there.
++ And so the "colors" should designate either highlight color or alternate for each.
++
++ Example:
++
++ highlight('Hello {}, there! It is {}.', 'user', 'daytime', _main='GREEN', _highlight='RED')
++ highlight('Hello {}, there! It is {}.', 'user', 'daytime', _main='GREEN', _highlight='RED', 'daytime'='YELLOW')
++
++ First example will highlight all the values in the template with the red color.
++ Second example will highlight the second value with the yellow color.
++
++ Usage:
++
++ colors:
++ _main: Sets the main color (or default is used)
++ _highlight: Sets the alternative color for everything
++ 'any phrase' that is the same in the "values" can override color.
++
++ :param message:
++ :param formatted:
++ :param colors:
++ :return:
++ """
++
++ m_color = colors.get("_main", self._default_color)
++ h_color = colors.get("_highlight", self._default_hl_color)
++
++ _values = []
++ for value in values:
++ _values.append(
++ "{p}{c}{r}".format(
++ p=self._colors[colors.get(value, h_color)],
++ c=value,
++ r=self._colors[m_color],
++ )
++ )
++ self._device.write(
++ "{s}{m}{e}".format(
++ s=self._colors[m_color],
++ m=message.format(*_values),
++ e=self._colors["ENDC"],
++ )
++ )
++ self._device.write(os.linesep)
++ self._device.flush()
++
++
++def wrap(txt, width=80, ident=0):
++ """
++ Wrap text to the required dimensions and clean it up, prepare for display.
++
++ :param txt:
++ :param width:
++ :return:
++ """
++ ident = " " * ident
++ txt = (txt or "").replace(os.linesep, " ").strip()
++
++ wrapper = textwrap.TextWrapper()
++ wrapper.fix_sentence_endings = False
++ wrapper.initial_indent = wrapper.subsequent_indent = ident
++
++ return wrapper.wrap(txt)
+diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py
+new file mode 100644
+index 0000000000..a9f76a6003
+--- /dev/null
++++ b/salt/cli/support/intfunc.py
+@@ -0,0 +1,51 @@
++"""
++Internal functions.
++"""
++# Maybe this needs to be a modules in a future?
++
++import glob
++import os
++
++import salt.utils.files
++from salt.cli.support.console import MessagesOutput
++
++out = MessagesOutput()
++
++
++def filetree(collector, *paths):
++ """
++ Add all files in the tree. If the "path" is a file,
++ only that file will be added.
++
++ :param path: File or directory
++ :return:
++ """
++ _paths = []
++ # Unglob
++ for path in paths:
++ _paths += glob.glob(path)
++ for path in set(_paths):
++ if not path:
++ out.error("Path not defined", ident=2)
++ elif not os.path.exists(path):
++ out.warning("Path {} does not exists".format(path))
++ else:
++ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
++ # pylint: disable=W8470
++ if os.path.isfile(path):
++ filename = os.path.basename(path)
++ try:
++ file_ref = salt.utils.files.fopen(path) # pylint: disable=W
++ out.put("Add {}".format(filename), indent=2)
++ collector.add(filename)
++ collector.link(title=path, path=file_ref)
++ except Exception as err:
++ out.error(err, ident=4)
++ # pylint: enable=W8470
++ else:
++ try:
++ for fname in os.listdir(path):
++ fname = os.path.join(path, fname)
++ filetree(collector, [fname])
++ except Exception as err:
++ out.error(err, ident=4)
+diff --git a/salt/cli/support/localrunner.py b/salt/cli/support/localrunner.py
+new file mode 100644
+index 0000000000..ad10eda0b0
+--- /dev/null
++++ b/salt/cli/support/localrunner.py
+@@ -0,0 +1,33 @@
++"""
++Local Runner
++"""
++
++import logging
++
++import salt.runner
++import salt.utils.platform
++import salt.utils.process
++
++log = logging.getLogger(__name__)
++
++
++class LocalRunner(salt.runner.Runner):
++ """
++ Runner class that changes its default behaviour.
++ """
++
++ def _proc_function(self, fun, low, user, tag, jid, daemonize=True):
++ """
++ Same as original _proc_function in AsyncClientMixin,
++ except it calls "low" without firing a print event.
++ """
++ if daemonize and not salt.utils.platform.is_windows():
++ salt.log.setup.shutdown_multiprocessing_logging()
++ salt.utils.process.daemonize()
++ salt.log.setup.setup_multiprocessing_logging()
++
++ low["__jid__"] = jid
++ low["__user__"] = user
++ low["__tag__"] = tag
++
++ return self.low(fun, low, print_event=False, full_return=False)
+diff --git a/salt/cli/support/profiles/__init__.py b/salt/cli/support/profiles/__init__.py
+new file mode 100644
+index 0000000000..b86aef30b8
+--- /dev/null
++++ b/salt/cli/support/profiles/__init__.py
+@@ -0,0 +1,4 @@
++# coding=utf-8
++'''
++Profiles for salt-support.
++'''
+diff --git a/salt/cli/support/profiles/default.yml b/salt/cli/support/profiles/default.yml
+new file mode 100644
+index 0000000000..3defb5eef3
+--- /dev/null
++++ b/salt/cli/support/profiles/default.yml
+@@ -0,0 +1,78 @@
++sysinfo:
++ - description: |
++ Get the Salt grains of the current system.
++ - grains.items:
++ info: System grains
++
++packages:
++ - description: |
++ Fetch list of all the installed packages.
++ - pkg.list_pkgs:
++ info: Installed packages
++
++repositories:
++ - pkg.list_repos:
++ info: Available repositories
++
++upgrades:
++ - pkg.list_upgrades:
++ info: Possible upgrades
++
++## TODO: Some data here belongs elsewhere and also is duplicated
++status:
++ - status.version:
++ info: Status version
++ - status.cpuinfo:
++ info: CPU information
++ - status.cpustats:
++ info: CPU stats
++ - status.diskstats:
++ info: Disk stats
++ - status.loadavg:
++ info: Average load of the current system
++ - status.uptime:
++ info: Uptime of the machine
++ - status.meminfo:
++ info: Information about memory
++ - status.vmstats:
++ info: Virtual memory stats
++ - status.netdev:
++ info: Network device stats
++ - status.nproc:
++ info: Number of processing units available on this system
++ - status.procs:
++ info: Process data
++
++general-health:
++ - ps.boot_time:
++ info: System Boot Time
++ - ps.swap_memory:
++ info: Swap Memory
++ output: txt
++ - ps.cpu_times:
++ info: CPU times
++ - ps.disk_io_counters:
++ info: Disk IO counters
++ - ps.disk_partition_usage:
++ info: Disk partition usage
++ output: table
++ - ps.disk_partitions:
++ info: Disk partitions
++ output: table
++ - ps.top:
++ info: Top CPU consuming processes
++
++boot_log:
++ - filetree:
++ info: Collect boot logs
++ args:
++ - /var/log/boot.*
++
++system.log:
++ # This works on any file system object.
++ - filetree:
++ info: Add system log
++ args:
++ - /var/log/syslog
++ - /var/log/messages
++
+diff --git a/salt/cli/support/profiles/jobs-active.yml b/salt/cli/support/profiles/jobs-active.yml
+new file mode 100644
+index 0000000000..508c54ece7
+--- /dev/null
++++ b/salt/cli/support/profiles/jobs-active.yml
+@@ -0,0 +1,3 @@
++jobs-active:
++ - run:jobs.active:
++ info: List of all actively running jobs
+diff --git a/salt/cli/support/profiles/jobs-last.yml b/salt/cli/support/profiles/jobs-last.yml
+new file mode 100644
+index 0000000000..e3b719f552
+--- /dev/null
++++ b/salt/cli/support/profiles/jobs-last.yml
+@@ -0,0 +1,3 @@
++jobs-last:
++ - run:jobs.last_run:
++ info: List all detectable jobs and associated functions
+diff --git a/salt/cli/support/profiles/jobs-trace.yml b/salt/cli/support/profiles/jobs-trace.yml
+new file mode 100644
+index 0000000000..00b28e0502
+--- /dev/null
++++ b/salt/cli/support/profiles/jobs-trace.yml
+@@ -0,0 +1,7 @@
++jobs-details:
++ {% for job in runners('jobs.list_jobs') %}
++ - run:jobs.list_job:
++ info: Details on JID {{job}}
++ args:
++ - {{job}}
++ {% endfor %}
+diff --git a/salt/cli/support/profiles/network.yml b/salt/cli/support/profiles/network.yml
+new file mode 100644
+index 0000000000..268f02e61f
+--- /dev/null
++++ b/salt/cli/support/profiles/network.yml
+@@ -0,0 +1,27 @@
++network:
++ - network.get_hostname:
++ info: Hostname
++ output: txt
++ - network.get_fqdn:
++ info: FQDN
++ output: txt
++ - network.default_route:
++ info: Default route
++ output: table
++ - network.interfaces:
++ info: All the available interfaces
++ output: table
++ - network.subnets:
++ info: List of IPv4 subnets
++ - network.subnets6:
++ info: List of IPv6 subnets
++ - network.routes:
++ info: Network configured routes from routing tables
++ output: table
++ - network.netstat:
++ info: Information on open ports and states
++ output: table
++ - network.active_tcp:
++ info: All running TCP connections
++ - network.arp:
++ info: ARP table
+diff --git a/salt/cli/support/profiles/postgres.yml b/salt/cli/support/profiles/postgres.yml
+new file mode 100644
+index 0000000000..2238752c7a
+--- /dev/null
++++ b/salt/cli/support/profiles/postgres.yml
+@@ -0,0 +1,11 @@
++system.log:
++ - filetree:
++ info: Add system log
++ args:
++ - /var/log/syslog
++
++etc/postgres:
++ - filetree:
++ info: Pick entire /etc/postgresql
++ args:
++ - /etc/postgresql
+diff --git a/salt/cli/support/profiles/salt.yml b/salt/cli/support/profiles/salt.yml
+new file mode 100644
+index 0000000000..4b18d98870
+--- /dev/null
++++ b/salt/cli/support/profiles/salt.yml
+@@ -0,0 +1,9 @@
++sysinfo:
++ - grains.items:
++ info: System grains
++
++logfile:
++ - filetree:
++ info: Add current logfile
++ args:
++ - {{salt('config.get', 'log_file')}}
+diff --git a/salt/cli/support/profiles/users.yml b/salt/cli/support/profiles/users.yml
+new file mode 100644
+index 0000000000..391acdb606
+--- /dev/null
++++ b/salt/cli/support/profiles/users.yml
+@@ -0,0 +1,22 @@
++all-users:
++ {%for uname in salt('user.list_users') %}
++ - user.info:
++ info: Information about "{{uname}}"
++ args:
++ - {{uname}}
++ - user.list_groups:
++ info: List groups for user "{{uname}}"
++ args:
++ - {{uname}}
++ - shadow.info:
++ info: Shadow information about user "{{uname}}"
++ args:
++ - {{uname}}
++ - cron.raw_cron:
++ info: Cron for user "{{uname}}"
++ args:
++ - {{uname}}
++ {%endfor%}
++ - group.getent:
++ info: List of all available groups
++ output: table
+diff --git a/salt/loader/lazy.py b/salt/loader/lazy.py
+index d319fe54b4..5de995d446 100644
+--- a/salt/loader/lazy.py
++++ b/salt/loader/lazy.py
+@@ -972,8 +972,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
+ mod_names = [module_name] + list(virtual_aliases)
+
+ for attr in funcs_to_load:
+- if attr.startswith("_"):
+- # private functions are skipped
++ if attr.startswith("_") and attr != "__call__":
++ # private functions are skipped,
++ # except __call__ which is default entrance
++ # for multi-function batch-like state syntax
+ continue
+ func = getattr(mod, attr)
+ if not inspect.isfunction(func) and not isinstance(func, functools.partial):
+diff --git a/salt/modules/saltsupport.py b/salt/modules/saltsupport.py
+new file mode 100644
+index 0000000000..e800e3bf1f
+--- /dev/null
++++ b/salt/modules/saltsupport.py
+@@ -0,0 +1,405 @@
++#
++# Author: Bo Maryniuk
++#
++# Copyright 2018 SUSE LLC
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++"""
++:codeauthor: :email:`Bo Maryniuk `
++
++Module to run salt-support within Salt.
++"""
++# pylint: disable=W0231,W0221
++
++
++import datetime
++import logging
++import os
++import re
++import sys
++import tempfile
++import time
++
++import salt.cli.support
++import salt.cli.support.intfunc
++import salt.defaults.exitcodes
++import salt.exceptions
++import salt.utils.decorators
++import salt.utils.dictupdate
++import salt.utils.odict
++import salt.utils.path
++import salt.utils.stringutils
++from salt.cli.support.collector import SaltSupport, SupportDataCollector
++
++__virtualname__ = "support"
++log = logging.getLogger(__name__)
++
++
++class LogCollector:
++ """
++ Output collector.
++ """
++
++ INFO = "info"
++ WARNING = "warning"
++ ERROR = "error"
++
++ class MessagesList(list):
++ def append(self, obj):
++ list.append(
++ self,
++ "{} - {}".format(
++ datetime.datetime.utcnow().strftime("%T.%f")[:-3], obj
++ ),
++ )
++
++ __call__ = append
++
++ def __init__(self):
++ self.messages = {
++ self.INFO: self.MessagesList(),
++ self.WARNING: self.MessagesList(),
++ self.ERROR: self.MessagesList(),
++ }
++
++ def msg(self, message, *args, **kwargs):
++ title = kwargs.get("title")
++ if title:
++ message = "{}: {}".format(title, message)
++ self.messages[self.INFO](message)
++
++ def info(self, message, *args, **kwargs):
++ self.msg(message)
++
++ def warning(self, message, *args, **kwargs):
++ self.messages[self.WARNING](message)
++
++ def error(self, message, *args, **kwargs):
++ self.messages[self.ERROR](message)
++
++ def put(self, message, *args, **kwargs):
++ self.messages[self.INFO](message)
++
++ def highlight(self, message, *values, **kwargs):
++ self.msg(message.format(*values))
++
++
++class SaltSupportModule(SaltSupport):
++ """
++ Salt Support module class.
++ """
++
++ def __init__(self):
++ """
++ Constructor
++ """
++ self.config = self.setup_config()
++
++ def setup_config(self):
++ """
++ Return current configuration
++ :return:
++ """
++ return __opts__
++
++ def _get_archive_name(self, archname=None):
++ """
++ Create default archive name.
++
++ :return:
++ """
++ archname = re.sub("[^a-z0-9]", "", (archname or "").lower()) or "support"
++ for grain in ["fqdn", "host", "localhost", "nodename"]:
++ host = __grains__.get(grain)
++ if host:
++ break
++ if not host:
++ host = "localhost"
++
++ return os.path.join(
++ tempfile.gettempdir(),
++ "{hostname}-{archname}-{date}-{time}.bz2".format(
++ archname=archname,
++ hostname=host,
++ date=time.strftime("%Y%m%d"),
++ time=time.strftime("%H%M%S"),
++ ),
++ )
++
++ @salt.utils.decorators.external
++ def profiles(self):
++ """
++ Get list of profiles.
++
++ :return:
++ """
++ return {
++ "standard": salt.cli.support.get_profiles(self.config),
++ "custom": [],
++ }
++
++ @salt.utils.decorators.external
++ def archives(self):
++ """
++ Get list of existing archives.
++ :return:
++ """
++ arc_files = []
++ tmpdir = tempfile.gettempdir()
++ for filename in os.listdir(tmpdir):
++ mtc = re.match(r"\w+-\w+-\d+-\d+\.bz2", filename)
++ if mtc and len(filename) == mtc.span()[-1]:
++ arc_files.append(os.path.join(tmpdir, filename))
++
++ return arc_files
++
++ @salt.utils.decorators.external
++ def last_archive(self):
++ """
++ Get the last available archive
++ :return:
++ """
++ archives = {}
++ for archive in self.archives():
++ archives[int(archive.split(".")[0].split("-")[-1])] = archive
++
++ return archives and archives[max(archives)] or None
++
++ @salt.utils.decorators.external
++ def delete_archives(self, *archives):
++ """
++ Delete archives
++ :return:
++ """
++ # Remove paths
++ _archives = []
++ for archive in archives:
++ _archives.append(os.path.basename(archive))
++ archives = _archives[:]
++
++ ret = {"files": {}, "errors": {}}
++ for archive in self.archives():
++ arc_dir = os.path.dirname(archive)
++ archive = os.path.basename(archive)
++ if archives and archive in archives or not archives:
++ archive = os.path.join(arc_dir, archive)
++ try:
++ os.unlink(archive)
++ ret["files"][archive] = "removed"
++ except Exception as err:
++ ret["errors"][archive] = str(err)
++ ret["files"][archive] = "left"
++
++ return ret
++
++ def format_sync_stats(self, cnt):
++ """
++ Format stats of the sync output.
++
++ :param cnt:
++ :return:
++ """
++ stats = salt.utils.odict.OrderedDict()
++ if cnt.get("retcode") == salt.defaults.exitcodes.EX_OK:
++ for line in cnt.get("stdout", "").split(os.linesep):
++ line = line.split(": ")
++ if len(line) == 2:
++ stats[line[0].lower().replace(" ", "_")] = line[1]
++ cnt["transfer"] = stats
++ del cnt["stdout"]
++
++ # Remove empty
++ empty_sections = []
++ for section in cnt:
++ if not cnt[section] and section != "retcode":
++ empty_sections.append(section)
++ for section in empty_sections:
++ del cnt[section]
++
++ return cnt
++
++ @salt.utils.decorators.depends("rsync")
++ @salt.utils.decorators.external
++ def sync(self, group, name=None, host=None, location=None, move=False, all=False):
++ """
++ Sync the latest archive to the host on given location.
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' support.sync group=test
++ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2
++ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 host=allmystuff.lan
++ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 host=allmystuff.lan location=/opt/
++
++ :param group: name of the local directory to which sync is going to put the result files
++ :param name: name of the archive. Latest, if not specified.
++ :param host: name of the destination host for rsync. Default is master, if not specified.
++ :param location: local destination directory, default temporary if not specified
++ :param move: move archive file[s]. Default is False.
++ :param all: work with all available archives. Default is False (i.e. latest available)
++
++ :return:
++ """
++ tfh, tfn = tempfile.mkstemp()
++ processed_archives = []
++ src_uri = uri = None
++
++ last_arc = self.last_archive()
++ if name:
++ archives = [name]
++ elif all:
++ archives = self.archives()
++ elif last_arc:
++ archives = [last_arc]
++ else:
++ archives = []
++
++ for name in archives:
++ err = None
++ if not name:
++ err = "No support archive has been defined."
++ elif not os.path.exists(name):
++ err = 'Support archive "{}" was not found'.format(name)
++ if err is not None:
++ log.error(err)
++ raise salt.exceptions.SaltInvocationError(err)
++
++ if not uri:
++ src_uri = os.path.dirname(name)
++ uri = "{host}:{loc}".format(
++ host=host or __opts__["master"],
++ loc=os.path.join(location or tempfile.gettempdir(), group),
++ )
++
++ os.write(tfh, salt.utils.stringutils.to_bytes(os.path.basename(name)))
++ os.write(tfh, salt.utils.stringutils.to_bytes(os.linesep))
++ processed_archives.append(name)
++ log.debug("Syncing {filename} to {uri}".format(filename=name, uri=uri))
++ os.close(tfh)
++
++ if not processed_archives:
++ raise salt.exceptions.SaltInvocationError("No archives found to transfer.")
++
++ ret = __salt__["rsync.rsync"](
++ src=src_uri,
++ dst=uri,
++ additional_opts=["--stats", "--files-from={}".format(tfn)],
++ )
++ ret["files"] = {}
++ for name in processed_archives:
++ if move:
++ salt.utils.dictupdate.update(ret, self.delete_archives(name))
++ log.debug("Deleting {filename}".format(filename=name))
++ ret["files"][name] = "moved"
++ else:
++ ret["files"][name] = "copied"
++
++ try:
++ os.unlink(tfn)
++ except OSError as err:
++ log.error(
++ "Cannot remove temporary rsync file {fn}: {err}".format(fn=tfn, err=err)
++ )
++
++ return self.format_sync_stats(ret)
++
++ @salt.utils.decorators.external
++ def run(self, profile="default", pillar=None, archive=None, output="nested"):
++ """
++ Run Salt Support on the minion.
++
++ profile
++ Set available profile name. Default is "default".
++
++ pillar
++ Set available profile from the pillars.
++
++ archive
++ Override archive name. Default is "support". This results to "hostname-support-YYYYMMDD-hhmmss.bz2".
++
++ output
++ Change the default outputter. Default is "nested".
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' support.run
++ salt '*' support.run profile=network
++ salt '*' support.run pillar=something_special
++ """
++
++ class outputswitch:
++ """
++ Output switcher on context
++ """
++
++ def __init__(self, output_device):
++ self._tmp_out = output_device
++ self._orig_out = None
++
++ def __enter__(self):
++ self._orig_out = salt.cli.support.intfunc.out
++ salt.cli.support.intfunc.out = self._tmp_out
++
++ def __exit__(self, *args):
++ salt.cli.support.intfunc.out = self._orig_out
++
++ self.out = LogCollector()
++ with outputswitch(self.out):
++ self.collector = SupportDataCollector(
++ archive or self._get_archive_name(archname=archive), output
++ )
++ self.collector.out = self.out
++ self.collector.open()
++ self.collect_local_data(
++ profile=profile, profile_source=__pillar__.get(pillar)
++ )
++ self.collect_internal_data()
++ self.collector.close()
++
++ return {"archive": self.collector.archive_path, "messages": self.out.messages}
++
++
++def __virtual__():
++ """
++ Set method references as module functions aliases
++ :return:
++ """
++ support = SaltSupportModule()
++
++ def _set_function(obj):
++ """
++ Create a Salt function for the SaltSupport class.
++ """
++
++ def _cmd(*args, **kwargs):
++ """
++ Call support method as a function from the Salt.
++ """
++ _kwargs = {}
++ for kw in kwargs:
++ if not kw.startswith("__"):
++ _kwargs[kw] = kwargs[kw]
++ return obj(*args, **_kwargs)
++
++ _cmd.__doc__ = obj.__doc__
++ return _cmd
++
++ for m_name in dir(support):
++ obj = getattr(support, m_name)
++ if getattr(obj, "external", False):
++ setattr(sys.modules[__name__], m_name, _set_function(obj))
++
++ return __virtualname__
+diff --git a/salt/scripts.py b/salt/scripts.py
+index 07393373c9..16b032af2e 100644
+--- a/salt/scripts.py
++++ b/salt/scripts.py
+@@ -622,3 +622,18 @@ def salt_pip():
+ ] + _pip_args(sys.argv[1:], extras)
+ ret = subprocess.run(command, shell=False, check=False, env=env)
+ sys.exit(ret.returncode)
++
++
++def salt_support():
++ """
++ Run Salt Support that collects system data, logs etc for debug and support purposes.
++ :return:
++ """
++
++ import salt.cli.support.collector
++
++ if "" in sys.path:
++ sys.path.remove("")
++ client = salt.cli.support.collector.SaltSupport()
++ _install_signal_handlers(client)
++ client.run()
+diff --git a/salt/state.py b/salt/state.py
+index 868be2749e..8352a8defc 100644
+--- a/salt/state.py
++++ b/salt/state.py
+@@ -1671,7 +1671,9 @@ class State:
+ names = []
+ if state.startswith("__"):
+ continue
+- chunk = {"state": state, "name": name}
++ chunk = OrderedDict()
++ chunk["state"] = state
++ chunk["name"] = name
+ if orchestration_jid is not None:
+ chunk["__orchestration_jid__"] = orchestration_jid
+ if "__sls__" in body:
+@@ -2382,9 +2384,16 @@ class State:
+ else:
+ self.format_slots(cdata)
+ with salt.utils.files.set_umask(low.get("__umask__")):
+- ret = self.states[cdata["full"]](
+- *cdata["args"], **cdata["kwargs"]
+- )
++ if cdata["full"].split(".")[-1] == "__call__":
++ # __call__ requires OrderedDict to preserve state order
++ # kwargs are also invalid overall
++ ret = self.states[cdata["full"]](
++ cdata["args"], module=None, state=cdata["kwargs"]
++ )
++ else:
++ ret = self.states[cdata["full"]](
++ *cdata["args"], **cdata["kwargs"]
++ )
+ self.states.inject_globals = {}
+ if "check_cmd" in low:
+ state_check_cmd = "{0[state]}.mod_run_check_cmd".format(low)
+@@ -3489,10 +3498,31 @@ class State:
+ running.update(errors)
+ return running
+
++ def inject_default_call(self, high):
++ """
++ Sets .call function to a state, if not there.
++
++ :param high:
++ :return:
++ """
++ for chunk in high:
++ state = high[chunk]
++ for state_ref in state:
++ needs_default = True
++ for argset in state[state_ref]:
++ if isinstance(argset, str):
++ needs_default = False
++ break
++ if needs_default:
++ order = state[state_ref].pop(-1)
++ state[state_ref].append("__call__")
++ state[state_ref].append(order)
++
+ def call_high(self, high, orchestration_jid=None):
+ """
+ Process a high data call and ensure the defined states.
+ """
++ self.inject_default_call(high)
+ errors = []
+ # If there is extension data reconcile it
+ high, ext_errors = self.reconcile_extend(high)
+diff --git a/salt/states/saltsupport.py b/salt/states/saltsupport.py
+new file mode 100644
+index 0000000000..fb0c9e0372
+--- /dev/null
++++ b/salt/states/saltsupport.py
+@@ -0,0 +1,225 @@
++#
++# Author: Bo Maryniuk
++#
++# Copyright 2018 SUSE LLC
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++r"""
++:codeauthor: :email:`Bo Maryniuk `
++
++Execution of Salt Support from within states
++============================================
++
++State to collect support data from the systems:
++
++.. code-block:: yaml
++
++ examine_my_systems:
++ support.taken:
++ - profile: default
++
++ support.collected:
++ - group: somewhere
++ - move: true
++
++"""
++import logging
++import os
++import tempfile
++
++import salt.exceptions
++
++# Import salt modules
++import salt.fileclient
++import salt.utils.decorators.path
++import salt.utils.odict
++
++log = logging.getLogger(__name__)
++__virtualname__ = "support"
++
++
++class SaltSupportState:
++ """
++ Salt-support.
++ """
++
++ EXPORTED = ["collected", "taken"]
++
++ def get_kwargs(self, data):
++ kwargs = {}
++ for keyset in data:
++ kwargs.update(keyset)
++
++ return kwargs
++
++ def __call__(self, state):
++ """
++ Call support.
++
++ :param args:
++ :param kwargs:
++ :return:
++ """
++ ret = {
++ "name": state.pop("name"),
++ "changes": {},
++ "result": True,
++ "comment": "",
++ }
++
++ out = {}
++ functions = ["Functions:"]
++ try:
++ for ref_func, ref_kwargs in state.items():
++ if ref_func not in self.EXPORTED:
++ raise salt.exceptions.SaltInvocationError(
++ "Function {} is not found".format(ref_func)
++ )
++ out[ref_func] = getattr(self, ref_func)(**self.get_kwargs(ref_kwargs))
++ functions.append(" - {}".format(ref_func))
++ ret["comment"] = "\n".join(functions)
++ except Exception as ex:
++ ret["comment"] = str(ex)
++ ret["result"] = False
++ ret["changes"] = out
++
++ return ret
++
++ def check_destination(self, location, group):
++ """
++ Check destination for the archives.
++ :return:
++ """
++ # Pre-create destination, since rsync will
++ # put one file named as group
++ try:
++ destination = os.path.join(location, group)
++ if os.path.exists(destination) and not os.path.isdir(destination):
++ raise salt.exceptions.SaltException(
++ 'Destination "{}" should be directory!'.format(destination)
++ )
++ if not os.path.exists(destination):
++ os.makedirs(destination)
++ log.debug("Created destination directory for archives: %s", destination)
++ else:
++ log.debug(
++ "Archives destination directory %s already exists", destination
++ )
++ except OSError as err:
++ log.error(err)
++
++ def collected(
++ self, group, filename=None, host=None, location=None, move=True, all=True
++ ):
++ """
++ Sync archives to a central place.
++
++ :param name:
++ :param group:
++ :param filename:
++ :param host:
++ :param location:
++ :param move:
++ :param all:
++ :return:
++ """
++ ret = {
++ "name": "support.collected",
++ "changes": {},
++ "result": True,
++ "comment": "",
++ }
++ location = location or tempfile.gettempdir()
++ self.check_destination(location, group)
++ ret["changes"] = __salt__["support.sync"](
++ group, name=filename, host=host, location=location, move=move, all=all
++ )
++
++ return ret
++
++ def taken(self, profile="default", pillar=None, archive=None, output="nested"):
++ """
++ Takes minion support config data.
++
++ :param profile:
++ :param pillar:
++ :param archive:
++ :param output:
++ :return:
++ """
++ ret = {
++ "name": "support.taken",
++ "changes": {},
++ "result": True,
++ }
++
++ result = __salt__["support.run"](
++ profile=profile, pillar=pillar, archive=archive, output=output
++ )
++ if result.get("archive"):
++ ret[
++ "comment"
++ ] = "Information about this system has been saved to {} file.".format(
++ result["archive"]
++ )
++ ret["changes"]["archive"] = result["archive"]
++ ret["changes"]["messages"] = {}
++ for key in ["info", "error", "warning"]:
++ if result.get("messages", {}).get(key):
++ ret["changes"]["messages"][key] = result["messages"][key]
++ else:
++ ret["comment"] = ""
++
++ return ret
++
++
++_support_state = SaltSupportState()
++
++
++def __call__(*args, **kwargs):
++ """
++ SLS single-ID syntax processing.
++
++ module:
++ This module reference, equals to sys.modules[__name__]
++
++ state:
++ Compiled state in preserved order. The function supposed to look
++ at first level array of functions.
++
++ :param cdata:
++ :param kwargs:
++ :return:
++ """
++ return _support_state(kwargs.get("state", {}))
++
++
++def taken(name, profile="default", pillar=None, archive=None, output="nested"):
++ return _support_state.taken(
++ profile=profile, pillar=pillar, archive=archive, output=output
++ )
++
++
++def collected(
++ name, group, filename=None, host=None, location=None, move=True, all=True
++):
++ return _support_state.collected(
++ group=group, filename=filename, host=host, location=location, move=move, all=all
++ )
++
++
++def __virtual__():
++ """
++ Salt Support state
++ """
++ return __virtualname__
+diff --git a/salt/utils/args.py b/salt/utils/args.py
+index 536aea3816..04a8a14054 100644
+--- a/salt/utils/args.py
++++ b/salt/utils/args.py
+@@ -15,6 +15,7 @@ import salt.utils.jid
+ import salt.utils.versions
+ import salt.utils.yaml
+ from salt.exceptions import SaltInvocationError
++from salt.utils.odict import OrderedDict
+
+ log = logging.getLogger(__name__)
+
+@@ -399,7 +400,7 @@ def format_call(
+ ret = initial_ret is not None and initial_ret or {}
+
+ ret["args"] = []
+- ret["kwargs"] = {}
++ ret["kwargs"] = OrderedDict()
+
+ aspec = get_function_argspec(fun, is_class_method=is_class_method)
+
+diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py
+index 1f62d5f3d6..1906cc2ecc 100644
+--- a/salt/utils/decorators/__init__.py
++++ b/salt/utils/decorators/__init__.py
+@@ -866,3 +866,27 @@ def ensure_unicode_args(function):
+ return function(*args, **kwargs)
+
+ return wrapped
++
++
++def external(func):
++ """
++ Mark function as external.
++
++ :param func:
++ :return:
++ """
++
++ def f(*args, **kwargs):
++ """
++ Stub.
++
++ :param args:
++ :param kwargs:
++ :return:
++ """
++ return func(*args, **kwargs)
++
++ f.external = True
++ f.__doc__ = func.__doc__
++
++ return f
+diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py
+index 911b2cbb04..dc125de7d7 100644
+--- a/salt/utils/parsers.py
++++ b/salt/utils/parsers.py
+@@ -17,6 +17,7 @@ import optparse
+ import os
+ import signal
+ import sys
++import tempfile
+ import traceback
+ import types
+ from functools import partial
+@@ -31,6 +32,7 @@ import salt.utils.args
+ import salt.utils.data
+ import salt.utils.files
+ import salt.utils.jid
++import salt.utils.network
+ import salt.utils.platform
+ import salt.utils.process
+ import salt.utils.stringutils
+@@ -2026,6 +2028,118 @@ class SyndicOptionParser(
+ return opts
+
+
++class SaltSupportOptionParser(
++ OptionParser,
++ ConfigDirMixIn,
++ MergeConfigMixIn,
++ LogLevelMixIn,
++ TimeoutMixIn,
++ metaclass=OptionParserMeta,
++):
++ default_timeout = 5
++ description = "Salt Support is a program to collect all support data: logs, system configuration etc."
++ usage = "%prog [options] '' [arguments]"
++ # ConfigDirMixIn config filename attribute
++ _config_filename_ = "master"
++
++ # LogLevelMixIn attributes
++ _default_logging_level_ = config.DEFAULT_MASTER_OPTS["log_level"]
++ _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS["log_file"]
++
++ def _mixin_setup(self):
++ self.add_option(
++ "-P",
++ "--show-profiles",
++ default=False,
++ action="store_true",
++ dest="support_profile_list",
++ help="Show available profiles",
++ )
++ self.add_option(
++ "-p",
++ "--profile",
++ default="",
++ dest="support_profile",
++ help='Specify support profile or comma-separated profiles, e.g.: "salt,network"',
++ )
++ support_archive = "{t}/{h}-support.tar.bz2".format(
++ t=tempfile.gettempdir(), h=salt.utils.network.get_fqhostname()
++ )
++ self.add_option(
++ "-a",
++ "--archive",
++ default=support_archive,
++ dest="support_archive",
++ help=(
++ "Specify name of the resulting support archive. "
++ 'Default is "{f}".'.format(f=support_archive)
++ ),
++ )
++ self.add_option(
++ "-u",
++ "--unit",
++ default="",
++ dest="support_unit",
++ help='Specify examined unit (default "master").',
++ )
++ self.add_option(
++ "-U",
++ "--show-units",
++ default=False,
++ action="store_true",
++ dest="support_show_units",
++ help="Show available units",
++ )
++ self.add_option(
++ "-f",
++ "--force",
++ default=False,
++ action="store_true",
++ dest="support_archive_force_overwrite",
++ help="Force overwrite existing archive, if exists",
++ )
++ self.add_option(
++ "-o",
++ "--out",
++ default="null",
++ dest="support_output_format",
++ help=(
++ "Set the default output using the specified outputter, "
++ 'unless profile does not overrides this. Default: "yaml".'
++ ),
++ )
++
++ def find_existing_configs(self, default):
++ """
++ Find configuration files on the system.
++ :return:
++ """
++ configs = []
++ for cfg in [default, self._config_filename_, "minion", "proxy", "cloud", "spm"]:
++ if not cfg:
++ continue
++ config_path = self.get_config_file_path(cfg)
++ if os.path.exists(config_path):
++ configs.append(cfg)
++
++ if default and default not in configs:
++ raise SystemExit("Unknown configuration unit: {}".format(default))
++
++ return configs
++
++ def setup_config(self, cfg=None):
++ """
++ Open suitable config file.
++ :return:
++ """
++ _opts, _args = optparse.OptionParser.parse_args(self)
++ configs = self.find_existing_configs(_opts.support_unit)
++ if configs and cfg not in configs:
++ cfg = configs[0]
++
++ return config.master_config(self.get_config_file_path(cfg))
++
++
+ class SaltCMDOptionParser(
+ OptionParser,
+ ConfigDirMixIn,
+diff --git a/scripts/salt-support b/scripts/salt-support
+new file mode 100755
+index 0000000000..4e0e79f3ea
+--- /dev/null
++++ b/scripts/salt-support
+@@ -0,0 +1,11 @@
++#!/usr/bin/env python
++"""
++Salt support is to collect logs,
++debug data and system information
++for support purposes.
++"""
++
++from salt.scripts import salt_support
++
++if __name__ == "__main__":
++ salt_support()
+diff --git a/setup.py b/setup.py
+index 931ed40a51..e60f1b7085 100755
+--- a/setup.py
++++ b/setup.py
+@@ -1061,6 +1061,7 @@ class SaltDistribution(distutils.dist.Distribution):
+ "scripts/salt-minion",
+ "scripts/salt-proxy",
+ "scripts/salt-run",
++ "scripts/salt-support",
+ "scripts/salt-ssh",
+ "scripts/salt-syndic",
+ "scripts/spm",
+@@ -1109,6 +1110,7 @@ class SaltDistribution(distutils.dist.Distribution):
+ "salt-master = salt.scripts:salt_master",
+ "salt-minion = salt.scripts:salt_minion",
+ "salt-run = salt.scripts:salt_run",
++ "salt-support = salt.scripts:salt_support",
+ "salt-ssh = salt.scripts:salt_ssh",
+ "salt-syndic = salt.scripts:salt_syndic",
+ "spm = salt.scripts:salt_spm",
+diff --git a/tests/pytests/unit/cli/test_support.py b/tests/pytests/unit/cli/test_support.py
+new file mode 100644
+index 0000000000..dc0e99bb3d
+--- /dev/null
++++ b/tests/pytests/unit/cli/test_support.py
+@@ -0,0 +1,553 @@
++"""
++ :codeauthor: Bo Maryniuk
++"""
++
++
++import os
++
++import jinja2
++import salt.cli.support.collector
++import salt.exceptions
++import salt.utils.files
++import yaml
++from salt.cli.support.collector import SaltSupport, SupportDataCollector
++from salt.cli.support.console import IndentOutput
++from salt.utils.color import get_colors
++from salt.utils.stringutils import to_bytes
++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.unit import TestCase, skipIf
++
++try:
++ import pytest
++except ImportError:
++ pytest = None
++
++
++@skipIf(not bool(pytest), "Pytest needs to be installed")
++@skipIf(NO_MOCK, NO_MOCK_REASON)
++class SaltSupportIndentOutputTestCase(TestCase):
++ """
++ Unit Tests for the salt-support indent output.
++ """
++
++ def setUp(self):
++ """
++ Setup test
++ :return:
++ """
++
++ self.message = "Stubborn processes on dumb terminal"
++ self.device = MagicMock()
++ self.iout = IndentOutput(device=self.device)
++ self.colors = get_colors()
++
++ def tearDown(self):
++ """
++ Remove instances after test run
++ :return:
++ """
++ del self.message
++ del self.device
++ del self.iout
++ del self.colors
++
++ def test_standard_output(self):
++ """
++ Test console standard output.
++ """
++ self.iout.put(self.message)
++ assert self.device.write.called
++ assert self.device.write.call_count == 5
++ for idx, data in enumerate(
++ ["", str(self.colors["CYAN"]), self.message, str(self.colors["ENDC"]), "\n"]
++ ):
++ assert self.device.write.call_args_list[idx][0][0] == data
++
++ def test_indent_output(self):
++ """
++ Test indent distance.
++ :return:
++ """
++ self.iout.put(self.message, indent=10)
++ for idx, data in enumerate(
++ [
++ " " * 10,
++ str(self.colors["CYAN"]),
++ self.message,
++ str(self.colors["ENDC"]),
++ "\n",
++ ]
++ ):
++ assert self.device.write.call_args_list[idx][0][0] == data
++
++ def test_color_config(self):
++ """
++ Test color config changes on each ident.
++ :return:
++ """
++
++ conf = {0: "MAGENTA", 2: "RED", 4: "WHITE", 6: "YELLOW"}
++ self.iout = IndentOutput(conf=conf, device=self.device)
++ for indent in sorted(list(conf)):
++ self.iout.put(self.message, indent=indent)
++
++ step = 1
++ for ident_key in sorted(list(conf)):
++ assert str(self.device.write.call_args_list[step][0][0]) == str(
++ self.colors[conf[ident_key]]
++ )
++ step += 5
++
++
++@skipIf(not bool(pytest), "Pytest needs to be installed")
++@skipIf(NO_MOCK, NO_MOCK_REASON)
++class SaltSupportCollectorTestCase(TestCase):
++ """
++ Collector tests.
++ """
++
++ def setUp(self):
++ """
++ Setup the test case
++ :return:
++ """
++ self.archive_path = "/highway/to/hell"
++ self.output_device = MagicMock()
++ self.collector = SupportDataCollector(self.archive_path, self.output_device)
++
++ def tearDown(self):
++ """
++ Tear down the test case elements
++ :return:
++ """
++ del self.collector
++ del self.archive_path
++ del self.output_device
++
++ @patch("salt.cli.support.collector.tarfile.TarFile", MagicMock())
++ def test_archive_open(self):
++ """
++ Test archive is opened.
++
++ :return:
++ """
++ self.collector.open()
++ assert self.collector.archive_path == self.archive_path
++ with pytest.raises(salt.exceptions.SaltException) as err:
++ self.collector.open()
++ assert "Archive already opened" in str(err)
++
++ @patch("salt.cli.support.collector.tarfile.TarFile", MagicMock())
++ def test_archive_close(self):
++ """
++ Test archive is opened.
++
++ :return:
++ """
++ self.collector.open()
++ self.collector._flush_content = lambda: None
++ self.collector.close()
++ assert self.collector.archive_path == self.archive_path
++ with pytest.raises(salt.exceptions.SaltException) as err:
++ self.collector.close()
++ assert "Archive already closed" in str(err)
++
++ def test_archive_addwrite(self):
++ """
++ Test add to the archive a section and write to it.
++
++ :return:
++ """
++ archive = MagicMock()
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
++ self.collector.open()
++ self.collector.add("foo")
++ self.collector.write(title="title", data="data", output="null")
++ self.collector._flush_content()
++
++ assert archive.bz2open().addfile.call_args[1]["fileobj"].read() == to_bytes(
++ "title\n-----\n\nraw-content: data\n\n\n\n"
++ )
++
++ @patch("salt.utils.files.fopen", MagicMock(return_value="path=/dev/null"))
++ def test_archive_addlink(self):
++ """
++ Test add to the archive a section and link an external file or directory to it.
++
++ :return:
++ """
++ archive = MagicMock()
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
++ self.collector.open()
++ self.collector.add("foo")
++ self.collector.link(title="Backup Path", path="/path/to/backup.config")
++ self.collector._flush_content()
++
++ assert archive.bz2open().addfile.call_count == 1
++ assert archive.bz2open().addfile.call_args[1]["fileobj"].read() == to_bytes(
++ "Backup Path\n-----------\n\npath=/dev/null\n\n\n"
++ )
++
++ @patch("salt.utils.files.fopen", MagicMock(return_value="path=/dev/null"))
++ def test_archive_discard_section(self):
++ """
++ Test discard a section from the archive.
++
++ :return:
++ """
++ archive = MagicMock()
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
++ self.collector.open()
++ self.collector.add("solar-interference")
++ self.collector.link(
++ title="Thermal anomaly", path="/path/to/another/great.config"
++ )
++ self.collector.add("foo")
++ self.collector.link(title="Backup Path", path="/path/to/backup.config")
++ self.collector._flush_content()
++ assert archive.bz2open().addfile.call_count == 2
++ assert archive.bz2open().addfile.mock_calls[0][2][
++ "fileobj"
++ ].read() == to_bytes(
++ "Thermal anomaly\n---------------\n\npath=/dev/null\n\n\n"
++ )
++ self.collector.close()
++
++ archive = MagicMock()
++ with patch("salt.cli.support.collector.tarfile.TarFile", archive):
++ self.collector.open()
++ self.collector.add("solar-interference")
++ self.collector.link(
++ title="Thermal anomaly", path="/path/to/another/great.config"
++ )
++ self.collector.discard_current()
++ self.collector.add("foo")
++ self.collector.link(title="Backup Path", path="/path/to/backup.config")
++ self.collector._flush_content()
++ assert archive.bz2open().addfile.call_count == 2
++ assert archive.bz2open().addfile.mock_calls[0][2][
++ "fileobj"
++ ].read() == to_bytes("Backup Path\n-----------\n\npath=/dev/null\n\n\n")
++ self.collector.close()
++
++
++@skipIf(not bool(pytest), "Pytest needs to be installed")
++@skipIf(NO_MOCK, NO_MOCK_REASON)
++class SaltSupportRunnerTestCase(TestCase):
++ """
++ Test runner class.
++ """
++
++ def setUp(self):
++ """
++ Set up test suite.
++ :return:
++ """
++ self.archive_path = "/dev/null"
++ self.output_device = MagicMock()
++ self.runner = SaltSupport()
++ self.runner.collector = SupportDataCollector(
++ self.archive_path, self.output_device
++ )
++
++ def tearDown(self):
++ """
++ Tear down.
++
++ :return:
++ """
++ del self.archive_path
++ del self.output_device
++ del self.runner
++
++ def test_function_config(self):
++ """
++ Test function config formation.
++
++ :return:
++ """
++ self.runner.config = {}
++ msg = "Electromagnetic energy loss"
++ assert self.runner._setup_fun_config({"description": msg}) == {
++ "print_metadata": False,
++ "file_client": "local",
++ "fun": "",
++ "kwarg": {},
++ "description": msg,
++ "cache_jobs": False,
++ "arg": [],
++ }
++
++ def test_local_caller(self):
++ """
++ Test local caller.
++
++ :return:
++ """
++ msg = "Because of network lag due to too many people playing deathmatch"
++ caller = MagicMock()
++ caller().call = MagicMock(return_value=msg)
++
++ self.runner._get_caller = caller
++ self.runner.out = MagicMock()
++ assert self.runner._local_call({}) == msg
++
++ caller().call = MagicMock(side_effect=SystemExit)
++ assert self.runner._local_call({}) == "Data is not available at this moment"
++
++ err_msg = "The UPS doesn't have a battery backup."
++ caller().call = MagicMock(side_effect=Exception(err_msg))
++ assert (
++ self.runner._local_call({})
++ == "Unhandled exception occurred: The UPS doesn't have a battery backup."
++ )
++
++ def test_local_runner(self):
++ """
++ Test local runner.
++
++ :return:
++ """
++ msg = "Big to little endian conversion error"
++ runner = MagicMock()
++ runner().run = MagicMock(return_value=msg)
++
++ self.runner._get_runner = runner
++ self.runner.out = MagicMock()
++ assert self.runner._local_run({}) == msg
++
++ runner().run = MagicMock(side_effect=SystemExit)
++ assert self.runner._local_run({}) == "Runner is not available at this moment"
++
++ err_msg = "Trojan horse ran out of hay"
++ runner().run = MagicMock(side_effect=Exception(err_msg))
++ assert (
++ self.runner._local_run({})
++ == "Unhandled exception occurred: Trojan horse ran out of hay"
++ )
++
++ @patch("salt.cli.support.intfunc", MagicMock(spec=[]))
++ def test_internal_function_call_stub(self):
++ """
++ Test missing internal function call is handled accordingly.
++
++ :return:
++ """
++ self.runner.out = MagicMock()
++ out = self.runner._internal_function_call(
++ {"fun": "everythingisawesome", "arg": [], "kwargs": {}}
++ )
++ assert out == "Function everythingisawesome is not available"
++
++ def test_internal_function_call(self):
++ """
++ Test missing internal function call is handled accordingly.
++
++ :return:
++ """
++ msg = "Internet outage"
++ intfunc = MagicMock()
++ intfunc.everythingisawesome = MagicMock(return_value=msg)
++ self.runner.out = MagicMock()
++ with patch("salt.cli.support.intfunc", intfunc):
++ out = self.runner._internal_function_call(
++ {"fun": "everythingisawesome", "arg": [], "kwargs": {}}
++ )
++ assert out == msg
++
++ def test_get_action(self):
++ """
++ Test action meta gets parsed.
++
++ :return:
++ """
++ action_meta = {
++ "run:jobs.list_jobs_filter": {"info": "List jobs filter", "args": [1]}
++ }
++ assert self.runner._get_action(action_meta) == (
++ "List jobs filter",
++ None,
++ {"fun": "run:jobs.list_jobs_filter", "kwargs": {}, "arg": [1]},
++ )
++ action_meta = {
++ "user.info": {"info": 'Information about "usbmux"', "args": ["usbmux"]}
++ }
++ assert self.runner._get_action(action_meta) == (
++ 'Information about "usbmux"',
++ None,
++ {"fun": "user.info", "kwargs": {}, "arg": ["usbmux"]},
++ )
++
++ def test_extract_return(self):
++ """
++ Test extract return from the output.
++
++ :return:
++ """
++ out = {"key": "value"}
++ assert self.runner._extract_return(out) == out
++ assert self.runner._extract_return({"return": out}) == out
++
++ def test_get_action_type(self):
++ """
++ Test action meta determines action type.
++
++ :return:
++ """
++ action_meta = {
++ "run:jobs.list_jobs_filter": {"info": "List jobs filter", "args": [1]}
++ }
++ assert self.runner._get_action_type(action_meta) == "run"
++
++ action_meta = {
++ "user.info": {"info": 'Information about "usbmux"', "args": ["usbmux"]}
++ }
++ assert self.runner._get_action_type(action_meta) == "call"
++
++ @patch("os.path.exists", MagicMock(return_value=True))
++ def test_cleanup(self):
++ """
++ Test cleanup routine.
++
++ :return:
++ """
++ arch = "/tmp/killme.zip"
++ unlink = MagicMock()
++ with patch("os.unlink", unlink):
++ self.runner.config = {"support_archive": arch}
++ self.runner.out = MagicMock()
++ self.runner._cleanup()
++
++ assert (
++ self.runner.out.warning.call_args[0][0]
++ == "Terminated earlier, cleaning up"
++ )
++ unlink.assert_called_once_with(arch)
++
++ @patch("os.path.exists", MagicMock(return_value=True))
++ def test_check_existing_archive(self):
++ """
++ Test check existing archive.
++
++ :return:
++ """
++ arch = "/tmp/endothermal-recalibration.zip"
++ unlink = MagicMock()
++ with patch("os.unlink", unlink), patch(
++ "os.path.exists", MagicMock(return_value=False)
++ ):
++ self.runner.config = {
++ "support_archive": "",
++ "support_archive_force_overwrite": True,
++ }
++ self.runner.out = MagicMock()
++ assert self.runner._check_existing_archive()
++ assert self.runner.out.warning.call_count == 0
++
++ with patch("os.unlink", unlink):
++ self.runner.config = {
++ "support_archive": arch,
++ "support_archive_force_overwrite": False,
++ }
++ self.runner.out = MagicMock()
++ assert not self.runner._check_existing_archive()
++ assert self.runner.out.warning.call_args[0][
++ 0
++ ] == "File {} already exists.".format(arch)
++
++ with patch("os.unlink", unlink):
++ self.runner.config = {
++ "support_archive": arch,
++ "support_archive_force_overwrite": True,
++ }
++ self.runner.out = MagicMock()
++ assert self.runner._check_existing_archive()
++ assert self.runner.out.warning.call_args[0][
++ 0
++ ] == "Overwriting existing archive: {}".format(arch)
++
++
++@skipIf(not bool(pytest), "Pytest needs to be installed")
++@skipIf(NO_MOCK, NO_MOCK_REASON)
++class ProfileIntegrityTestCase(TestCase):
++ """
++ Default profile integrity
++ """
++
++ def setUp(self):
++ """
++ Set up test suite.
++
++ :return:
++ """
++ self.profiles = {}
++ profiles = os.path.join(
++ os.path.dirname(salt.cli.support.collector.__file__), "profiles"
++ )
++ for profile in os.listdir(profiles):
++ self.profiles[profile.split(".")[0]] = os.path.join(profiles, profile)
++
++ def tearDown(self):
++ """
++ Tear down test suite.
++
++ :return:
++ """
++ del self.profiles
++
++ def _render_template_to_yaml(self, name, *args, **kwargs):
++ """
++ Get template referene for rendering.
++ :return:
++ """
++ with salt.utils.files.fopen(self.profiles[name]) as t_fh:
++ template = t_fh.read()
++ return yaml.load(
++ jinja2.Environment().from_string(template).render(*args, **kwargs)
++ )
++
++ def test_non_template_profiles_parseable(self):
++ """
++ Test shipped default profile is YAML parse-able.
++
++ :return:
++ """
++ for t_name in ["default", "jobs-active", "jobs-last", "network", "postgres"]:
++ with salt.utils.files.fopen(self.profiles[t_name]) as ref:
++ try:
++ yaml.load(ref)
++ parsed = True
++ except Exception:
++ parsed = False
++ assert parsed
++
++ def test_users_template_profile(self):
++ """
++ Test users template profile.
++
++ :return:
++ """
++ users_data = self._render_template_to_yaml(
++ "users", salt=MagicMock(return_value=["pokemon"])
++ )
++ assert len(users_data["all-users"]) == 5
++ for user_data in users_data["all-users"]:
++ for tgt in ["user.list_groups", "shadow.info", "cron.raw_cron"]:
++ if tgt in user_data:
++ assert user_data[tgt]["args"] == ["pokemon"]
++
++ def test_jobs_trace_template_profile(self):
++ """
++ Test jobs-trace template profile.
++
++ :return:
++ """
++ jobs_trace = self._render_template_to_yaml(
++ "jobs-trace", runners=MagicMock(return_value=["0000"])
++ )
++ assert len(jobs_trace["jobs-details"]) == 1
++ assert (
++ jobs_trace["jobs-details"][0]["run:jobs.list_job"]["info"]
++ == "Details on JID 0000"
++ )
++ assert jobs_trace["jobs-details"][0]["run:jobs.list_job"]["args"] == [0]
+diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py
+new file mode 100644
+index 0000000000..f9ce7be29a
+--- /dev/null
++++ b/tests/unit/modules/test_saltsupport.py
+@@ -0,0 +1,496 @@
++"""
++ :codeauthor: Bo Maryniuk
++"""
++
++
++import datetime
++
++import salt.exceptions
++from salt.modules import saltsupport
++from tests.support.mixins import LoaderModuleMockMixin
++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.unit import TestCase, skipIf
++
++try:
++ import pytest
++except ImportError:
++ pytest = None
++
++
++@skipIf(not bool(pytest), "Pytest required")
++@skipIf(NO_MOCK, NO_MOCK_REASON)
++class SaltSupportModuleTestCase(TestCase, LoaderModuleMockMixin):
++ """
++ Test cases for salt.modules.support::SaltSupportModule
++ """
++
++ def setup_loader_modules(self):
++ return {saltsupport: {}}
++
++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage"))
++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"})
++ @patch("time.strftime", MagicMock(return_value="000"))
++ def test_get_archive_name(self):
++ """
++ Test archive name construction.
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ assert support._get_archive_name() == "/mnt/storage/c-3po-support-000-000.bz2"
++
++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage"))
++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"})
++ @patch("time.strftime", MagicMock(return_value="000"))
++ def test_get_custom_archive_name(self):
++ """
++ Test get custom archive name.
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ temp_name = support._get_archive_name(archname="Darth Wader")
++ assert temp_name == "/mnt/storage/c-3po-darthwader-000-000.bz2"
++ temp_name = support._get_archive_name(archname="Яйця з сіллю")
++ assert temp_name == "/mnt/storage/c-3po-support-000-000.bz2"
++ temp_name = support._get_archive_name(archname="!@#$%^&*()Fillip J. Fry")
++ assert temp_name == "/mnt/storage/c-3po-fillipjfry-000-000.bz2"
++
++ @patch(
++ "salt.cli.support.get_profiles",
++ MagicMock(return_value={"message": "Feature was not beta tested"}),
++ )
++ def test_profiles_format(self):
++ """
++ Test profiles format.
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ profiles = support.profiles()
++ assert "custom" in profiles
++ assert "standard" in profiles
++ assert "message" in profiles["standard"]
++ assert profiles["custom"] == []
++ assert profiles["standard"]["message"] == "Feature was not beta tested"
++
++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage"))
++ @patch(
++ "os.listdir",
++ MagicMock(
++ return_value=[
++ "one-support-000-000.bz2",
++ "two-support-111-111.bz2",
++ "trash.bz2",
++ "hostname-000-000.bz2",
++ "three-support-wrong222-222.bz2",
++ "000-support-000-000.bz2",
++ ]
++ ),
++ )
++ def test_get_existing_archives(self):
++ """
++ Get list of existing archives.
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ out = support.archives()
++ assert len(out) == 3
++ for name in [
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/000-support-000-000.bz2",
++ ]:
++ assert name in out
++
++ def test_last_archive(self):
++ """
++ Get last archive name
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++ assert support.last_archive() == "/mnt/storage/three-support-222-222.bz2"
++
++ @patch("os.unlink", MagicMock(return_value=True))
++ def test_delete_all_archives_success(self):
++ """
++ Test delete archives
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++ ret = support.delete_archives()
++ assert "files" in ret
++ assert "errors" in ret
++ assert not bool(ret["errors"])
++ assert bool(ret["files"])
++ assert isinstance(ret["errors"], dict)
++ assert isinstance(ret["files"], dict)
++
++ for arc in support.archives():
++ assert ret["files"][arc] == "removed"
++
++ @patch(
++ "os.unlink",
++ MagicMock(
++ return_value=False,
++ side_effect=[
++ OSError("Decreasing electron flux"),
++ OSError("Solar flares interference"),
++ None,
++ ],
++ ),
++ )
++ def test_delete_all_archives_failure(self):
++ """
++ Test delete archives failure
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++ ret = support.delete_archives()
++ assert "files" in ret
++ assert "errors" in ret
++ assert bool(ret["errors"])
++ assert bool(ret["files"])
++ assert isinstance(ret["errors"], dict)
++ assert isinstance(ret["files"], dict)
++
++ assert ret["files"]["/mnt/storage/three-support-222-222.bz2"] == "removed"
++ assert ret["files"]["/mnt/storage/one-support-000-000.bz2"] == "left"
++ assert ret["files"]["/mnt/storage/two-support-111-111.bz2"] == "left"
++
++ assert len(ret["errors"]) == 2
++ assert (
++ ret["errors"]["/mnt/storage/one-support-000-000.bz2"]
++ == "Decreasing electron flux"
++ )
++ assert (
++ ret["errors"]["/mnt/storage/two-support-111-111.bz2"]
++ == "Solar flares interference"
++ )
++
++ def test_format_sync_stats(self):
++ """
++ Test format rsync stats for preserving ordering of the keys
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ stats = """
++robot: Bender
++cute: Leela
++weird: Zoidberg
++professor: Farnsworth
++ """
++ f_stats = support.format_sync_stats({"retcode": 0, "stdout": stats})
++ assert list(f_stats["transfer"].keys()) == [
++ "robot",
++ "cute",
++ "weird",
++ "professor",
++ ]
++ assert list(f_stats["transfer"].values()) == [
++ "Bender",
++ "Leela",
++ "Zoidberg",
++ "Farnsworth",
++ ]
++
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.close", MagicMock())
++ def test_sync_no_archives_failure(self):
++ """
++ Test sync failed when no archives specified.
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(return_value=[])
++
++ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
++ support.sync("group-name")
++ assert "No archives found to transfer" in str(err)
++
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=False))
++ def test_sync_last_picked_archive_not_found_failure(self):
++ """
++ Test sync failed when archive was not found (last picked)
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++
++ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
++ support.sync("group-name")
++ assert (
++ ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found'
++ in str(err)
++ )
++
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=False))
++ def test_sync_specified_archive_not_found_failure(self):
++ """
++ Test sync failed when archive was not found (last picked)
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++
++ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
++ support.sync("group-name", name="lost.bz2")
++ assert ' Support archive "lost.bz2" was not found' in str(err)
++
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=False))
++ @patch("os.close", MagicMock())
++ def test_sync_no_archive_to_transfer_failure(self):
++ """
++ Test sync failed when no archive was found to transfer
++
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(return_value=[])
++ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
++ support.sync("group-name", all=True)
++ assert "No archives found to transfer" in str(err)
++
++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
++ @patch("os.path.exists", MagicMock(return_value=True))
++ @patch("os.close", MagicMock())
++ @patch("os.write", MagicMock())
++ @patch("os.unlink", MagicMock())
++ @patch(
++ "salt.modules.saltsupport.__salt__", {"rsync.rsync": MagicMock(return_value={})}
++ )
++ def test_sync_archives(self):
++ """
++ Test sync archives
++ :return:
++ """
++ support = saltsupport.SaltSupportModule()
++ support.archives = MagicMock(
++ return_value=[
++ "/mnt/storage/one-support-000-000.bz2",
++ "/mnt/storage/two-support-111-111.bz2",
++ "/mnt/storage/three-support-222-222.bz2",
++ ]
++ )
++ out = support.sync("group-name", host="buzz", all=True, move=False)
++ assert "files" in out
++ for arc_name in out["files"]:
++ assert out["files"][arc_name] == "copied"
++ assert saltsupport.os.unlink.call_count == 1
++ assert saltsupport.os.unlink.call_args_list[0][0][0] == "dummy"
++ calls = []
++ for call in saltsupport.os.write.call_args_list:
++ assert len(call) == 2
++ calls.append(call[0])
++ assert calls == [
++ (0, b"one-support-000-000.bz2"),
++ (0, b"\n"),
++ (0, b"two-support-111-111.bz2"),
++ (0, b"\n"),
++ (0, b"three-support-222-222.bz2"),
++ (0, b"\n"),
++ ]
++
++ @patch("salt.modules.saltsupport.__pillar__", {})
++ @patch("salt.modules.saltsupport.SupportDataCollector", MagicMock())
++ def test_run_support(self):
++ """
++ Test run support
++ :return:
++ """
++ saltsupport.SupportDataCollector(None, None).archive_path = "dummy"
++ support = saltsupport.SaltSupportModule()
++ support.collect_internal_data = MagicMock()
++ support.collect_local_data = MagicMock()
++ out = support.run()
++
++ for section in ["messages", "archive"]:
++ assert section in out
++ assert out["archive"] == "dummy"
++ for section in ["warning", "error", "info"]:
++ assert section in out["messages"]
++ ld_call = support.collect_local_data.call_args_list[0][1]
++ assert "profile" in ld_call
++ assert ld_call["profile"] == "default"
++ assert "profile_source" in ld_call
++ assert ld_call["profile_source"] is None
++ assert support.collector.open.call_count == 1
++ assert support.collector.close.call_count == 1
++ assert support.collect_internal_data.call_count == 1
++
++
++@skipIf(not bool(pytest), "Pytest required")
++@skipIf(NO_MOCK, NO_MOCK_REASON)
++class LogCollectorTestCase(TestCase, LoaderModuleMockMixin):
++ """
++ Test cases for salt.modules.support::LogCollector
++ """
++
++ def setup_loader_modules(self):
++ return {saltsupport: {}}
++
++ def test_msg(self):
++ """
++ Test message to the log collector.
++
++ :return:
++ """
++ utcmock = MagicMock()
++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
++ with patch("datetime.datetime", utcmock):
++ msg = "Upgrading /dev/null device"
++ out = saltsupport.LogCollector()
++ out.msg(msg, title="Here")
++ assert saltsupport.LogCollector.INFO in out.messages
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - {}: {}".format("Here", msg)
++ ]
++
++ def test_info_message(self):
++ """
++ Test info message to the log collector.
++
++ :return:
++ """
++ utcmock = MagicMock()
++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
++ with patch("datetime.datetime", utcmock):
++ msg = "SIMM crosstalk during tectonic stress"
++ out = saltsupport.LogCollector()
++ out.info(msg)
++ assert saltsupport.LogCollector.INFO in out.messages
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
++
++ def test_put_message(self):
++ """
++ Test put message to the log collector.
++
++ :return:
++ """
++ utcmock = MagicMock()
++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
++ with patch("datetime.datetime", utcmock):
++ msg = "Webmaster kidnapped by evil cult"
++ out = saltsupport.LogCollector()
++ out.put(msg)
++ assert saltsupport.LogCollector.INFO in out.messages
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
++
++ def test_warning_message(self):
++ """
++ Test warning message to the log collector.
++
++ :return:
++ """
++ utcmock = MagicMock()
++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
++ with patch("datetime.datetime", utcmock):
++ msg = "Your e-mail is now being delivered by USPS"
++ out = saltsupport.LogCollector()
++ out.warning(msg)
++ assert saltsupport.LogCollector.WARNING in out.messages
++ assert (
++ type(out.messages[saltsupport.LogCollector.WARNING])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.WARNING] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
++
++ def test_error_message(self):
++ """
++ Test error message to the log collector.
++
++ :return:
++ """
++ utcmock = MagicMock()
++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
++ with patch("datetime.datetime", utcmock):
++ msg = "Learning curve appears to be fractal"
++ out = saltsupport.LogCollector()
++ out.error(msg)
++ assert saltsupport.LogCollector.ERROR in out.messages
++ assert (
++ type(out.messages[saltsupport.LogCollector.ERROR])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.ERROR] == [
++ "00:00:00.000 - {}".format(msg)
++ ]
++
++ def test_hl_message(self):
++ """
++ Test highlighter message to the log collector.
++
++ :return:
++ """
++ utcmock = MagicMock()
++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))
++ with patch("datetime.datetime", utcmock):
++ out = saltsupport.LogCollector()
++ out.highlight("The {} TTYs became {} TTYs and vice versa", "real", "pseudo")
++ assert saltsupport.LogCollector.INFO in out.messages
++ assert (
++ type(out.messages[saltsupport.LogCollector.INFO])
++ == saltsupport.LogCollector.MessagesList
++ )
++ assert out.messages[saltsupport.LogCollector.INFO] == [
++ "00:00:00.000 - The real TTYs became " "pseudo TTYs and vice versa"
++ ]
+--
+2.39.2
+
+
diff --git a/enable-keepalive-probes-for-salt-ssh-executions-bsc-.patch b/enable-keepalive-probes-for-salt-ssh-executions-bsc-.patch
new file mode 100644
index 0000000..8303a37
--- /dev/null
+++ b/enable-keepalive-probes-for-salt-ssh-executions-bsc-.patch
@@ -0,0 +1,346 @@
+From 5303cc612bcbdb1ec45ede397ca1e2ca12ba3bd3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Fri, 1 Dec 2023 10:59:30 +0000
+Subject: [PATCH] Enable "KeepAlive" probes for Salt SSH executions
+ (bsc#1211649) (#610)
+
+* Enable KeepAlive probes for Salt SSH connections (bsc#1211649)
+
+* Add tests for Salt SSH keepalive options
+
+* Add changelog file
+
+* Make changes suggested by pre-commit
+---
+ changelog/65488.added.md | 1 +
+ salt/client/ssh/__init__.py | 32 +++++++++---
+ salt/client/ssh/client.py | 13 ++++-
+ salt/client/ssh/shell.py | 12 +++++
+ salt/config/__init__.py | 6 +++
+ salt/utils/parsers.py | 19 +++++++
+ tests/pytests/unit/client/ssh/test_single.py | 55 ++++++++++++++++++++
+ tests/pytests/unit/client/ssh/test_ssh.py | 3 ++
+ 8 files changed, 133 insertions(+), 8 deletions(-)
+ create mode 100644 changelog/65488.added.md
+
+diff --git a/changelog/65488.added.md b/changelog/65488.added.md
+new file mode 100644
+index 0000000000..78476cec11
+--- /dev/null
++++ b/changelog/65488.added.md
+@@ -0,0 +1 @@
++Enable "KeepAlive" probes for Salt SSH executions
+diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
+index 1e143f9e30..1d8426b7c2 100644
+--- a/salt/client/ssh/__init__.py
++++ b/salt/client/ssh/__init__.py
+@@ -50,8 +50,8 @@ import salt.utils.thin
+ import salt.utils.url
+ import salt.utils.verify
+ from salt._logging import LOG_LEVELS
+-from salt._logging.mixins import MultiprocessingStateMixin
+ from salt._logging.impl import LOG_LOCK
++from salt._logging.mixins import MultiprocessingStateMixin
+ from salt.template import compile_template
+ from salt.utils.process import Process
+ from salt.utils.zeromq import zmq
+@@ -307,6 +307,18 @@ class SSH(MultiprocessingStateMixin):
+ "ssh_timeout", salt.config.DEFAULT_MASTER_OPTS["ssh_timeout"]
+ )
+ + self.opts.get("timeout", salt.config.DEFAULT_MASTER_OPTS["timeout"]),
++ "keepalive": self.opts.get(
++ "ssh_keepalive",
++ salt.config.DEFAULT_MASTER_OPTS["ssh_keepalive"],
++ ),
++ "keepalive_interval": self.opts.get(
++ "ssh_keepalive_interval",
++ salt.config.DEFAULT_MASTER_OPTS["ssh_keepalive_interval"],
++ ),
++ "keepalive_count_max": self.opts.get(
++ "ssh_keepalive_count_max",
++ salt.config.DEFAULT_MASTER_OPTS["ssh_keepalive_count_max"],
++ ),
+ "sudo": self.opts.get(
+ "ssh_sudo", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo"]
+ ),
+@@ -557,7 +569,7 @@ class SSH(MultiprocessingStateMixin):
+ mods=self.mods,
+ fsclient=self.fsclient,
+ thin=self.thin,
+- **target
++ **target,
+ )
+ if salt.utils.path.which("ssh-copy-id"):
+ # we have ssh-copy-id, use it!
+@@ -573,7 +585,7 @@ class SSH(MultiprocessingStateMixin):
+ mods=self.mods,
+ fsclient=self.fsclient,
+ thin=self.thin,
+- **target
++ **target,
+ )
+ stdout, stderr, retcode = single.cmd_block()
+ try:
+@@ -601,7 +613,7 @@ class SSH(MultiprocessingStateMixin):
+ fsclient=self.fsclient,
+ thin=self.thin,
+ mine=mine,
+- **target
++ **target,
+ )
+ ret = {"id": single.id}
+ stdout, stderr, retcode = single.run()
+@@ -1022,7 +1034,10 @@ class Single:
+ remote_port_forwards=None,
+ winrm=False,
+ ssh_options=None,
+- **kwargs
++ keepalive=True,
++ keepalive_interval=60,
++ keepalive_count_max=3,
++ **kwargs,
+ ):
+ # Get mine setting and mine_functions if defined in kwargs (from roster)
+ self.mine = mine
+@@ -1081,6 +1096,9 @@ class Single:
+ "priv": priv,
+ "priv_passwd": priv_passwd,
+ "timeout": timeout,
++ "keepalive": keepalive,
++ "keepalive_interval": keepalive_interval,
++ "keepalive_count_max": keepalive_count_max,
+ "sudo": sudo,
+ "tty": tty,
+ "mods": self.mods,
+@@ -1302,7 +1320,7 @@ class Single:
+ self.id,
+ fsclient=self.fsclient,
+ minion_opts=self.minion_opts,
+- **self.target
++ **self.target,
+ )
+
+ opts_pkg = pre_wrapper["test.opts_pkg"]() # pylint: disable=E1102
+@@ -1388,7 +1406,7 @@ class Single:
+ self.id,
+ fsclient=self.fsclient,
+ minion_opts=self.minion_opts,
+- **self.target
++ **self.target,
+ )
+ wrapper.fsclient.opts["cachedir"] = opts["cachedir"]
+ self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
+diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py
+index 0b67598fc6..a00f5de423 100644
+--- a/salt/client/ssh/client.py
++++ b/salt/client/ssh/client.py
+@@ -52,6 +52,9 @@ class SSHClient:
+ ("ssh_priv_passwd", str),
+ ("ssh_identities_only", bool),
+ ("ssh_remote_port_forwards", str),
++ ("ssh_keepalive", bool),
++ ("ssh_keepalive_interval", int),
++ ("ssh_keepalive_count_max", int),
+ ("ssh_options", list),
+ ("ssh_max_procs", int),
+ ("ssh_askpass", bool),
+@@ -108,7 +111,15 @@ class SSHClient:
+ return sane_kwargs
+
+ def _prep_ssh(
+- self, tgt, fun, arg=(), timeout=None, tgt_type="glob", kwarg=None, context=None, **kwargs
++ self,
++ tgt,
++ fun,
++ arg=(),
++ timeout=None,
++ tgt_type="glob",
++ kwarg=None,
++ context=None,
++ **kwargs
+ ):
+ """
+ Prepare the arguments
+diff --git a/salt/client/ssh/shell.py b/salt/client/ssh/shell.py
+index bc1ad034df..182e2c19e3 100644
+--- a/salt/client/ssh/shell.py
++++ b/salt/client/ssh/shell.py
+@@ -85,6 +85,9 @@ class Shell:
+ remote_port_forwards=None,
+ winrm=False,
+ ssh_options=None,
++ keepalive=True,
++ keepalive_interval=None,
++ keepalive_count_max=None,
+ ):
+ self.opts = opts
+ # ssh , but scp [ (4, 9):
+ options.append("GSSAPIAuthentication=no")
+ options.append("ConnectTimeout={}".format(self.timeout))
++ if self.keepalive:
++ options.append(f"ServerAliveInterval={self.keepalive_interval}")
++ options.append(f"ServerAliveCountMax={self.keepalive_count_max}")
+ if self.opts.get("ignore_host_keys"):
+ options.append("StrictHostKeyChecking=no")
+ if self.opts.get("no_host_keys"):
+@@ -165,6 +174,9 @@ class Shell:
+ if self.opts["_ssh_version"] > (4, 9):
+ options.append("GSSAPIAuthentication=no")
+ options.append("ConnectTimeout={}".format(self.timeout))
++ if self.keepalive:
++ options.append(f"ServerAliveInterval={self.keepalive_interval}")
++ options.append(f"ServerAliveCountMax={self.keepalive_count_max}")
+ if self.opts.get("ignore_host_keys"):
+ options.append("StrictHostKeyChecking=no")
+ if self.opts.get("no_host_keys"):
+diff --git a/salt/config/__init__.py b/salt/config/__init__.py
+index d8258a4dbc..68f2b0f674 100644
+--- a/salt/config/__init__.py
++++ b/salt/config/__init__.py
+@@ -822,6 +822,9 @@ VALID_OPTS = immutabletypes.freeze(
+ "ssh_scan_ports": str,
+ "ssh_scan_timeout": float,
+ "ssh_identities_only": bool,
++ "ssh_keepalive": bool,
++ "ssh_keepalive_interval": int,
++ "ssh_keepalive_count_max": int,
+ "ssh_log_file": str,
+ "ssh_config_file": str,
+ "ssh_merge_pillar": bool,
+@@ -1592,6 +1595,9 @@ DEFAULT_MASTER_OPTS = immutabletypes.freeze(
+ "ssh_scan_ports": "22",
+ "ssh_scan_timeout": 0.01,
+ "ssh_identities_only": False,
++ "ssh_keepalive": True,
++ "ssh_keepalive_interval": 60,
++ "ssh_keepalive_count_max": 3,
+ "ssh_log_file": os.path.join(salt.syspaths.LOGS_DIR, "ssh"),
+ "ssh_config_file": os.path.join(salt.syspaths.HOME_DIR, ".ssh", "config"),
+ "cluster_mode": False,
+diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py
+index dc125de7d7..6c7f9f2f66 100644
+--- a/salt/utils/parsers.py
++++ b/salt/utils/parsers.py
+@@ -3383,6 +3383,25 @@ class SaltSSHOptionParser(
+ "-R parameters."
+ ),
+ )
++ ssh_group.add_option(
++ "--disable-keepalive",
++ default=True,
++ action="store_false",
++ dest="ssh_keepalive",
++ help=(
++ "Disable KeepAlive probes (ServerAliveInterval) for the SSH connection."
++ ),
++ )
++ ssh_group.add_option(
++ "--keepalive-interval",
++ dest="ssh_keepalive_interval",
++ help=("Define the value for ServerAliveInterval option."),
++ )
++ ssh_group.add_option(
++ "--keepalive-count-max",
++ dest="ssh_keepalive_count_max",
++ help=("Define the value for ServerAliveCountMax option."),
++ )
+ ssh_group.add_option(
+ "--ssh-option",
+ dest="ssh_options",
+diff --git a/tests/pytests/unit/client/ssh/test_single.py b/tests/pytests/unit/client/ssh/test_single.py
+index c88a1c2127..8d87da8700 100644
+--- a/tests/pytests/unit/client/ssh/test_single.py
++++ b/tests/pytests/unit/client/ssh/test_single.py
+@@ -63,6 +63,61 @@ def test_single_opts(opts, target):
+ **target,
+ )
+
++ assert single.shell._ssh_opts() == ""
++ expected_cmd = (
++ "ssh login1 "
++ "-o KbdInteractiveAuthentication=no -o "
++ "PasswordAuthentication=yes -o ConnectTimeout=65 -o ServerAliveInterval=60 "
++ "-o ServerAliveCountMax=3 -o Port=22 "
++ "-o IdentityFile=/etc/salt/pki/master/ssh/salt-ssh.rsa "
++ "-o User=root date +%s"
++ )
++ assert single.shell._cmd_str("date +%s") == expected_cmd
++
++
++def test_single_opts_custom_keepalive_options(opts, target):
++ """Sanity check for ssh.Single options with custom keepalive"""
++
++ single = ssh.Single(
++ opts,
++ opts["argv"],
++ "localhost",
++ mods={},
++ fsclient=None,
++ thin=salt.utils.thin.thin_path(opts["cachedir"]),
++ mine=False,
++ keepalive_interval=15,
++ keepalive_count_max=5,
++ **target,
++ )
++
++ assert single.shell._ssh_opts() == ""
++ expected_cmd = (
++ "ssh login1 "
++ "-o KbdInteractiveAuthentication=no -o "
++ "PasswordAuthentication=yes -o ConnectTimeout=65 -o ServerAliveInterval=15 "
++ "-o ServerAliveCountMax=5 -o Port=22 "
++ "-o IdentityFile=/etc/salt/pki/master/ssh/salt-ssh.rsa "
++ "-o User=root date +%s"
++ )
++ assert single.shell._cmd_str("date +%s") == expected_cmd
++
++
++def test_single_opts_disable_keepalive(opts, target):
++ """Sanity check for ssh.Single options with custom keepalive"""
++
++ single = ssh.Single(
++ opts,
++ opts["argv"],
++ "localhost",
++ mods={},
++ fsclient=None,
++ thin=salt.utils.thin.thin_path(opts["cachedir"]),
++ mine=False,
++ keepalive=False,
++ **target,
++ )
++
+ assert single.shell._ssh_opts() == ""
+ expected_cmd = (
+ "ssh login1 "
+diff --git a/tests/pytests/unit/client/ssh/test_ssh.py b/tests/pytests/unit/client/ssh/test_ssh.py
+index cece16026c..23223ba8ec 100644
+--- a/tests/pytests/unit/client/ssh/test_ssh.py
++++ b/tests/pytests/unit/client/ssh/test_ssh.py
+@@ -78,6 +78,9 @@ def roster():
+ ("ssh_scan_ports", "test", True),
+ ("ssh_scan_timeout", 1.0, True),
+ ("ssh_timeout", 1, False),
++ ("ssh_keepalive", True, True),
++ ("ssh_keepalive_interval", 30, True),
++ ("ssh_keepalive_count_max", 3, True),
+ ("ssh_log_file", "/tmp/test", True),
+ ("raw_shell", True, True),
+ ("refresh_cache", True, True),
+--
+2.42.0
+
+
diff --git a/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch b/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch
new file mode 100644
index 0000000..c978774
--- /dev/null
+++ b/enable-passing-a-unix_socket-for-mysql-returners-bsc.patch
@@ -0,0 +1,68 @@
+From e9d52cb97d619a76355c5aa1d03b733c125c0f22 Mon Sep 17 00:00:00 2001
+From: Maximilian Meister
+Date: Thu, 3 May 2018 15:52:23 +0200
+Subject: [PATCH] enable passing a unix_socket for mysql returners
+ (bsc#1091371)
+
+quick fix for:
+ https://bugzilla.suse.com/show_bug.cgi?id=1091371
+
+the upstream patch will go through some bigger refactoring of
+the mysql drivers to be cleaner
+
+this patch should only be temporary and can be dropped again once
+the refactor is done upstream
+
+Signed-off-by: Maximilian Meister
+---
+ salt/returners/mysql.py | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/salt/returners/mysql.py b/salt/returners/mysql.py
+index 67b44004ac..a220f11465 100644
+--- a/salt/returners/mysql.py
++++ b/salt/returners/mysql.py
+@@ -17,6 +17,7 @@ config. These are the defaults:
+ mysql.pass: 'salt'
+ mysql.db: 'salt'
+ mysql.port: 3306
++ mysql.unix_socket: '/tmp/mysql.sock'
+
+ SSL is optional. The defaults are set to None. If you do not want to use SSL,
+ either exclude these options or set them to None.
+@@ -42,6 +43,7 @@ optional. The following ssl options are simply for illustration purposes:
+ alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem'
+ alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt'
+ alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key'
++ alternative.mysql.unix_socket: '/tmp/mysql.sock'
+
+ Should you wish the returner data to be cleaned out every so often, set
+ `keep_jobs_seconds` to the number of hours for the jobs to live in the
+@@ -197,6 +199,7 @@ def _get_options(ret=None):
+ "ssl_ca": None,
+ "ssl_cert": None,
+ "ssl_key": None,
++ "unix_socket": "/tmp/mysql.sock",
+ }
+
+ attrs = {
+@@ -208,6 +211,7 @@ def _get_options(ret=None):
+ "ssl_ca": "ssl_ca",
+ "ssl_cert": "ssl_cert",
+ "ssl_key": "ssl_key",
++ "unix_socket": "unix_socket",
+ }
+
+ _options = salt.returners.get_returner_options(
+@@ -266,6 +270,7 @@ def _get_serv(ret=None, commit=False):
+ db=_options.get("db"),
+ port=_options.get("port"),
+ ssl=ssl_options,
++ unix_socket=_options.get("unix_socket"),
+ )
+
+ try:
+--
+2.39.2
+
+
diff --git a/enhance-cleanup-mechanism-after-salt-bundle-upgrade-.patch b/enhance-cleanup-mechanism-after-salt-bundle-upgrade-.patch
new file mode 100644
index 0000000..4c4e3a3
--- /dev/null
+++ b/enhance-cleanup-mechanism-after-salt-bundle-upgrade-.patch
@@ -0,0 +1,26 @@
+From 994ebca519945c86dc30f1510dff36b3261446c0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Thu, 10 Oct 2024 11:51:02 +0100
+Subject: [PATCH] Enhance cleanup mechanism after Salt Bundle upgrade
+ (bsc#1228690) (#685)
+
+---
+ pkg/common/venv-salt-minion-postinstall.service | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pkg/common/venv-salt-minion-postinstall.service b/pkg/common/venv-salt-minion-postinstall.service
+index b122d7d6ea..c9db270435 100644
+--- a/pkg/common/venv-salt-minion-postinstall.service
++++ b/pkg/common/venv-salt-minion-postinstall.service
+@@ -2,6 +2,6 @@
+ Description=Clean old environment for venv-salt-minion
+
+ [Service]
+-ExecStart=/bin/sh -c '/usr/lib/venv-salt-minion/bin/post_start_cleanup.sh || :'
++ExecStart=/bin/sh -c 'WAIT_IF_SALT_JOBS=1 /usr/lib/venv-salt-minion/bin/post_start_cleanup.sh || :'
+ Type=oneshot
+
+--
+2.46.1
+
diff --git a/enhance-find_json-garbage-filtering-bsc-1231605-688.patch b/enhance-find_json-garbage-filtering-bsc-1231605-688.patch
new file mode 100644
index 0000000..4dd65ef
--- /dev/null
+++ b/enhance-find_json-garbage-filtering-bsc-1231605-688.patch
@@ -0,0 +1,79 @@
+From a9505da8f4bb2f9a9ef4ee6832197f0749a2c2e6 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Thu, 23 Jan 2025 17:34:48 +0100
+Subject: [PATCH] Enhance find_json garbage filtering (bsc#1231605)
+ (#688)
+
+* Enhance find_json garbage filtering
+
+* Enhance error handling in transactional_update module
+---
+ salt/modules/transactional_update.py | 2 +-
+ salt/utils/json.py | 12 ++++++++++--
+ tests/unit/utils/test_json.py | 5 +++++
+ 3 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py
+index d6915475f5..32e1eb9cc4 100644
+--- a/salt/modules/transactional_update.py
++++ b/salt/modules/transactional_update.py
+@@ -984,7 +984,7 @@ def call(function, *args, **kwargs):
+ return local.get("return", local)
+ else:
+ return local
+- except ValueError:
++ except (ValueError, AttributeError):
+ return {"result": False, "retcode": 1, "comment": ret_stdout}
+ finally:
+ # Check if reboot is needed
+diff --git a/salt/utils/json.py b/salt/utils/json.py
+index 0845b64694..26cb38cdbe 100644
+--- a/salt/utils/json.py
++++ b/salt/utils/json.py
+@@ -39,6 +39,7 @@ def find_json(raw):
+ # Search for possible starts end ends of the json fragments
+ for ind, _ in enumerate(lines):
+ line = lines[ind].lstrip()
++ line = line[0] if line else line
+ if line == "{" or line == "[":
+ starts.append((ind, line))
+ if line == "}" or line == "]":
+@@ -61,10 +62,17 @@ def find_json(raw):
+ working = "\n".join(lines[start : end + 1])
+ try:
+ ret = json.loads(working)
++ return ret
+ except ValueError:
+- continue
+- if ret:
++ pass
++ # Try filtering non-JSON text right after the last closing curly brace
++ end_str = lines[end].lstrip()[0]
++ working = "\n".join(lines[start : end]) + end_str
++ try:
++ ret = json.loads(working)
+ return ret
++ except ValueError:
++ continue
+
+ # Fall back to old implementation for backward compatibility
+ # excpecting json after the text
+diff --git a/tests/unit/utils/test_json.py b/tests/unit/utils/test_json.py
+index b123e7e884..5ea409a705 100644
+--- a/tests/unit/utils/test_json.py
++++ b/tests/unit/utils/test_json.py
+@@ -109,6 +109,11 @@ class JSONTestCase(TestCase):
+ ret = salt.utils.json.find_json(garbage_prepend_json)
+ self.assertDictEqual(ret, expected_ret)
+
++ # Pre-pend garbage right after closing bracket of the JSON
++ garbage_prepend_json = "{}{}".format(test_sample_json.rstrip(), LOREM_IPSUM)
++ ret = salt.utils.json.find_json(garbage_prepend_json)
++ self.assertDictEqual(ret, expected_ret)
++
+ # Test to see if a ValueError is raised if no JSON is passed in
+ self.assertRaises(ValueError, salt.utils.json.find_json, LOREM_IPSUM)
+
+--
+2.47.0
+
diff --git a/enhance-openscap-module-add-xccdf_eval-call-386.patch b/enhance-openscap-module-add-xccdf_eval-call-386.patch
new file mode 100644
index 0000000..32e8cf2
--- /dev/null
+++ b/enhance-openscap-module-add-xccdf_eval-call-386.patch
@@ -0,0 +1,425 @@
+From 17452801e950b3f49a9ec7ef444e3d57862cd9bf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 7 Jul 2021 15:41:48 +0100
+Subject: [PATCH] Enhance openscap module: add "xccdf_eval" call (#386)
+
+* Enhance openscap module: add xccdf_eval call
+
+* Allow 'tailoring_file' and 'tailoring_id' parameters
+
+* Fix wrong reference to subprocess.PIPE in openscap unit tests
+
+* Add changes suggested by pre-commit
+
+Co-authored-by: Michael Calmer
+
+Fix error handling in openscap module (bsc#1188647) (#409)
+---
+ changelog/59756.added | 1 +
+ salt/modules/openscap.py | 116 +++++++++++++-
+ tests/unit/modules/test_openscap.py | 234 ++++++++++++++++++++++++++++
+ 3 files changed, 350 insertions(+), 1 deletion(-)
+ create mode 100644 changelog/59756.added
+
+diff --git a/changelog/59756.added b/changelog/59756.added
+new file mode 100644
+index 0000000000..a59fb21eef
+--- /dev/null
++++ b/changelog/59756.added
+@@ -0,0 +1 @@
++adding new call for openscap xccdf eval supporting new parameters
+diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py
+index 770c8e7c04..216fd89eef 100644
+--- a/salt/modules/openscap.py
++++ b/salt/modules/openscap.py
+@@ -4,6 +4,7 @@ Module for OpenSCAP Management
+ """
+
+
++import os.path
+ import shlex
+ import shutil
+ import tempfile
+@@ -55,6 +56,117 @@ _OSCAP_EXIT_CODES_MAP = {
+ }
+
+
++def xccdf_eval(xccdffile, ovalfiles=None, **kwargs):
++ """
++ Run ``oscap xccdf eval`` commands on minions.
++ It uses cp.push_dir to upload the generated files to the salt master
++ in the master's minion files cachedir
++ (defaults to ``/var/cache/salt/master/minions/minion-id/files``)
++
++ It needs ``file_recv`` set to ``True`` in the master configuration file.
++
++ xccdffile
++ the path to the xccdf file to evaluate
++
++ ovalfiles
++ additional oval definition files
++
++ profile
++ the name of Profile to be evaluated
++
++ rule
++ the name of a single rule to be evaluated
++
++ oval_results
++ save OVAL results as well (True or False)
++
++ results
++ write XCCDF Results into given file
++
++ report
++ write HTML report into given file
++
++ fetch_remote_resources
++ download remote content referenced by XCCDF (True or False)
++
++ tailoring_file
++ use given XCCDF Tailoring file
++
++ tailoring_id
++ use given DS component as XCCDF Tailoring file
++
++ remediate
++ automatically execute XCCDF fix elements for failed rules.
++ Use of this option is always at your own risk. (True or False)
++
++ CLI Example:
++
++ .. code-block:: bash
++
++ salt '*' openscap.xccdf_eval /usr/share/openscap/scap-yast2sec-xccdf.xml profile=Default
++
++ """
++ success = True
++ error = None
++ upload_dir = None
++ returncode = None
++ if not ovalfiles:
++ ovalfiles = []
++
++ cmd_opts = ["oscap", "xccdf", "eval"]
++ if kwargs.get("oval_results"):
++ cmd_opts.append("--oval-results")
++ if "results" in kwargs:
++ cmd_opts.append("--results")
++ cmd_opts.append(kwargs["results"])
++ if "report" in kwargs:
++ cmd_opts.append("--report")
++ cmd_opts.append(kwargs["report"])
++ if "profile" in kwargs:
++ cmd_opts.append("--profile")
++ cmd_opts.append(kwargs["profile"])
++ if "rule" in kwargs:
++ cmd_opts.append("--rule")
++ cmd_opts.append(kwargs["rule"])
++ if "tailoring_file" in kwargs:
++ cmd_opts.append("--tailoring-file")
++ cmd_opts.append(kwargs["tailoring_file"])
++ if "tailoring_id" in kwargs:
++ cmd_opts.append("--tailoring-id")
++ cmd_opts.append(kwargs["tailoring_id"])
++ if kwargs.get("fetch_remote_resources"):
++ cmd_opts.append("--fetch-remote-resources")
++ if kwargs.get("remediate"):
++ cmd_opts.append("--remediate")
++ cmd_opts.append(xccdffile)
++ cmd_opts.extend(ovalfiles)
++
++ if not os.path.exists(xccdffile):
++ success = False
++ error = "XCCDF File '{}' does not exist".format(xccdffile)
++ for ofile in ovalfiles:
++ if success and not os.path.exists(ofile):
++ success = False
++ error = "Oval File '{}' does not exist".format(ofile)
++
++ if success:
++ tempdir = tempfile.mkdtemp()
++ proc = Popen(cmd_opts, stdout=PIPE, stderr=PIPE, cwd=tempdir)
++ (stdoutdata, error) = proc.communicate()
++ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False)
++ if proc.returncode < 0:
++ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii')
++ returncode = proc.returncode
++ if success:
++ __salt__["cp.push_dir"](tempdir)
++ upload_dir = tempdir
++ shutil.rmtree(tempdir, ignore_errors=True)
++
++ return dict(
++ success=success, upload_dir=upload_dir, error=error, returncode=returncode
++ )
++
++
+ def xccdf(params):
+ """
+ Run ``oscap xccdf`` commands on minions.
+@@ -92,7 +204,9 @@ def xccdf(params):
+ tempdir = tempfile.mkdtemp()
+ proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
+ (stdoutdata, error) = proc.communicate()
+- success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
++ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False)
++ if proc.returncode < 0:
++ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii')
+ returncode = proc.returncode
+ if success:
+ __salt__["cp.push_dir"](tempdir)
+diff --git a/tests/unit/modules/test_openscap.py b/tests/unit/modules/test_openscap.py
+index 045c37f7c9..301c1869ec 100644
+--- a/tests/unit/modules/test_openscap.py
++++ b/tests/unit/modules/test_openscap.py
+@@ -21,6 +21,7 @@ class OpenscapTestCase(TestCase):
+ "salt.modules.openscap.tempfile.mkdtemp",
+ Mock(return_value=self.random_temp_dir),
+ ),
++ patch("salt.modules.openscap.os.path.exists", Mock(return_value=True)),
+ ]
+ for patcher in patchers:
+ self.apply_patch(patcher)
+@@ -211,3 +212,236 @@ class OpenscapTestCase(TestCase):
+ "returncode": None,
+ },
+ )
++
++ def test_new_openscap_xccdf_eval_success(self):
++ with patch(
++ "salt.modules.openscap.Popen",
++ MagicMock(
++ return_value=Mock(
++ **{"returncode": 0, "communicate.return_value": ("", "")}
++ )
++ ),
++ ):
++ response = openscap.xccdf_eval(
++ self.policy_file,
++ profile="Default",
++ oval_results=True,
++ results="results.xml",
++ report="report.html",
++ )
++
++ self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
++ expected_cmd = [
++ "oscap",
++ "xccdf",
++ "eval",
++ "--oval-results",
++ "--results",
++ "results.xml",
++ "--report",
++ "report.html",
++ "--profile",
++ "Default",
++ self.policy_file,
++ ]
++ openscap.Popen.assert_called_once_with(
++ expected_cmd,
++ cwd=openscap.tempfile.mkdtemp.return_value,
++ stderr=subprocess.PIPE,
++ stdout=subprocess.PIPE,
++ )
++ openscap.__salt__["cp.push_dir"].assert_called_once_with(
++ self.random_temp_dir
++ )
++ self.assertEqual(openscap.shutil.rmtree.call_count, 1)
++ self.assertEqual(
++ response,
++ {
++ "upload_dir": self.random_temp_dir,
++ "error": "",
++ "success": True,
++ "returncode": 0,
++ },
++ )
++
++ def test_new_openscap_xccdf_eval_success_with_extra_ovalfiles(self):
++ with patch(
++ "salt.modules.openscap.Popen",
++ MagicMock(
++ return_value=Mock(
++ **{"returncode": 0, "communicate.return_value": ("", "")}
++ )
++ ),
++ ):
++ response = openscap.xccdf_eval(
++ self.policy_file,
++ ["/usr/share/xml/another-oval.xml", "/usr/share/xml/oval.xml"],
++ profile="Default",
++ oval_results=True,
++ results="results.xml",
++ report="report.html",
++ )
++
++ self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
++ expected_cmd = [
++ "oscap",
++ "xccdf",
++ "eval",
++ "--oval-results",
++ "--results",
++ "results.xml",
++ "--report",
++ "report.html",
++ "--profile",
++ "Default",
++ self.policy_file,
++ "/usr/share/xml/another-oval.xml",
++ "/usr/share/xml/oval.xml",
++ ]
++ openscap.Popen.assert_called_once_with(
++ expected_cmd,
++ cwd=openscap.tempfile.mkdtemp.return_value,
++ stderr=subprocess.PIPE,
++ stdout=subprocess.PIPE,
++ )
++ openscap.__salt__["cp.push_dir"].assert_called_once_with(
++ self.random_temp_dir
++ )
++ self.assertEqual(openscap.shutil.rmtree.call_count, 1)
++ self.assertEqual(
++ response,
++ {
++ "upload_dir": self.random_temp_dir,
++ "error": "",
++ "success": True,
++ "returncode": 0,
++ },
++ )
++
++ def test_new_openscap_xccdf_eval_success_with_failing_rules(self):
++ with patch(
++ "salt.modules.openscap.Popen",
++ MagicMock(
++ return_value=Mock(
++ **{"returncode": 2, "communicate.return_value": ("", "some error")}
++ )
++ ),
++ ):
++ response = openscap.xccdf_eval(
++ self.policy_file,
++ profile="Default",
++ oval_results=True,
++ results="results.xml",
++ report="report.html",
++ )
++
++ self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
++ expected_cmd = [
++ "oscap",
++ "xccdf",
++ "eval",
++ "--oval-results",
++ "--results",
++ "results.xml",
++ "--report",
++ "report.html",
++ "--profile",
++ "Default",
++ self.policy_file,
++ ]
++ openscap.Popen.assert_called_once_with(
++ expected_cmd,
++ cwd=openscap.tempfile.mkdtemp.return_value,
++ stderr=subprocess.PIPE,
++ stdout=subprocess.PIPE,
++ )
++ openscap.__salt__["cp.push_dir"].assert_called_once_with(
++ self.random_temp_dir
++ )
++ self.assertEqual(openscap.shutil.rmtree.call_count, 1)
++ self.assertEqual(
++ response,
++ {
++ "upload_dir": self.random_temp_dir,
++ "error": "some error",
++ "success": True,
++ "returncode": 2,
++ },
++ )
++
++ def test_new_openscap_xccdf_eval_success_ignore_unknown_params(self):
++ with patch(
++ "salt.modules.openscap.Popen",
++ MagicMock(
++ return_value=Mock(
++ **{"returncode": 2, "communicate.return_value": ("", "some error")}
++ )
++ ),
++ ):
++ response = openscap.xccdf_eval(
++ "/policy/file",
++ param="Default",
++ profile="Default",
++ oval_results=True,
++ results="results.xml",
++ report="report.html",
++ )
++
++ self.assertEqual(
++ response,
++ {
++ "upload_dir": self.random_temp_dir,
++ "error": "some error",
++ "success": True,
++ "returncode": 2,
++ },
++ )
++ expected_cmd = [
++ "oscap",
++ "xccdf",
++ "eval",
++ "--oval-results",
++ "--results",
++ "results.xml",
++ "--report",
++ "report.html",
++ "--profile",
++ "Default",
++ "/policy/file",
++ ]
++ openscap.Popen.assert_called_once_with(
++ expected_cmd,
++ cwd=openscap.tempfile.mkdtemp.return_value,
++ stderr=subprocess.PIPE,
++ stdout=subprocess.PIPE,
++ )
++
++ def test_new_openscap_xccdf_eval_evaluation_error(self):
++ with patch(
++ "salt.modules.openscap.Popen",
++ MagicMock(
++ return_value=Mock(
++ **{
++ "returncode": 1,
++ "communicate.return_value": ("", "evaluation error"),
++ }
++ )
++ ),
++ ):
++ response = openscap.xccdf_eval(
++ self.policy_file,
++ profile="Default",
++ oval_results=True,
++ results="results.xml",
++ report="report.html",
++ )
++
++ self.assertEqual(
++ response,
++ {
++ "upload_dir": None,
++ "error": "evaluation error",
++ "success": False,
++ "returncode": 1,
++ },
++ )
+--
+2.39.2
+
+
diff --git a/firewalld-normalize-new-rich-rules-before-comparing-.patch b/firewalld-normalize-new-rich-rules-before-comparing-.patch
new file mode 100644
index 0000000..24f9281
--- /dev/null
+++ b/firewalld-normalize-new-rich-rules-before-comparing-.patch
@@ -0,0 +1,180 @@
+From 522b2331e6584758aeaefbf2d41f0c18cd1113d9 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Tue, 23 Jul 2024 13:01:27 +0200
+Subject: [PATCH] firewalld: normalize new rich rules before comparing
+ to old (bsc#1222684) (#648)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* Normalize new rich rules before comparing to old
+
+Firewallcmd rich rule output quotes each
+assigned part of the rich rule, for example:
+rule family="ipv4" source port port="161" ...
+The firewalld module must first normalize
+the user defined rich rules to match the
+firewallcmd output before comparison to
+ensure idempotency.
+
+* Add changelog entry
+
+* Enhance documentation for normalization function
+
+* Add unit tests to cover rich rules normalization
+
+---------
+
+Co-authored-by: Pablo Suárez Hernández
+---
+ changelog/61235.fixed.md | 1 +
+ salt/states/firewalld.py | 38 +++++++++++-
+ tests/pytests/unit/states/test_firewalld.py | 64 +++++++++++++++++++++
+ 3 files changed, 102 insertions(+), 1 deletion(-)
+ create mode 100644 changelog/61235.fixed.md
+ create mode 100644 tests/pytests/unit/states/test_firewalld.py
+
+diff --git a/changelog/61235.fixed.md b/changelog/61235.fixed.md
+new file mode 100644
+index 00000000000..7ae9bb40800
+--- /dev/null
++++ b/changelog/61235.fixed.md
+@@ -0,0 +1 @@
++- firewalld: normalize new rich rules before comparing to old ones
+diff --git a/salt/states/firewalld.py b/salt/states/firewalld.py
+index 534b9dd62df..9ce0bfc61a8 100644
+--- a/salt/states/firewalld.py
++++ b/salt/states/firewalld.py
+@@ -204,7 +204,6 @@ def present(
+ rich_rules=None,
+ prune_rich_rules=False,
+ ):
+-
+ """
+ Ensure a zone has specific attributes.
+
+@@ -378,6 +377,42 @@ def service(name, ports=None, protocols=None):
+ return ret
+
+
++def _normalize_rich_rules(rich_rules):
++ """
++ Make sure rich rules are normalized and attributes
++ are quoted with double quotes so it matches the output
++ from firewall-cmd
++
++ Example:
++
++ rule family="ipv4" source address="192.168.0.0/16" port port=22 protocol=tcp accept
++ rule family="ipv4" source address="192.168.0.0/16" port port='22' protocol=tcp accept
++ rule family='ipv4' source address='192.168.0.0/16' port port='22' protocol=tcp accept
++
++ normalized to:
++
++ rule family="ipv4" source address="192.168.0.0/16" port port="22" protocol="tcp" accept
++ """
++ normalized_rules = []
++ for rich_rule in rich_rules:
++ normalized_rule = ""
++ for cmd in rich_rule.split(" "):
++ cmd_components = cmd.split("=", 1)
++ if len(cmd_components) == 2:
++ assigned_component = cmd_components[1]
++ if not assigned_component.startswith(
++ '"'
++ ) and not assigned_component.endswith('"'):
++ if assigned_component.startswith(
++ "'"
++ ) and assigned_component.endswith("'"):
++ assigned_component = assigned_component[1:-1]
++ cmd_components[1] = f'"{assigned_component}"'
++ normalized_rule = f"{normalized_rule} {'='.join(cmd_components)}"
++ normalized_rules.append(normalized_rule.lstrip())
++ return normalized_rules
++
++
+ def _present(
+ name,
+ block_icmp=None,
+@@ -761,6 +796,7 @@ def _present(
+
+ if rich_rules or prune_rich_rules:
+ rich_rules = rich_rules or []
++ rich_rules = _normalize_rich_rules(rich_rules)
+ try:
+ _current_rich_rules = __salt__["firewalld.get_rich_rules"](
+ name, permanent=True
+diff --git a/tests/pytests/unit/states/test_firewalld.py b/tests/pytests/unit/states/test_firewalld.py
+new file mode 100644
+index 00000000000..0cbc59633bf
+--- /dev/null
++++ b/tests/pytests/unit/states/test_firewalld.py
+@@ -0,0 +1,64 @@
++"""
++ :codeauthor: Hristo Voyvodov
++"""
++
++import pytest
++
++import salt.states.firewalld as firewalld
++from tests.support.mock import MagicMock, patch
++
++
++@pytest.fixture
++def configure_loader_modules():
++ return {firewalld: {"__opts__": {"test": False}}}
++
++
++@pytest.mark.parametrize(
++ "rich_rule",
++ [
++ (
++ [
++ 'rule family="ipv4" source address="192.168.0.0/16" port port=22 protocol=tcp accept'
++ ]
++ ),
++ (
++ [
++ 'rule family="ipv4" source address="192.168.0.0/16" port port=\'22\' protocol=tcp accept'
++ ]
++ ),
++ (
++ [
++ "rule family='ipv4' source address='192.168.0.0/16' port port='22' protocol=tcp accept"
++ ]
++ ),
++ ],
++)
++def test_present_rich_rules_normalized(rich_rule):
++ firewalld_reload_rules = MagicMock(return_value={})
++ firewalld_rich_rules = [
++ 'rule family="ipv4" source address="192.168.0.0/16" port port="22" protocol="tcp" accept',
++ ]
++
++ firewalld_get_zones = MagicMock(
++ return_value=[
++ "block",
++ "public",
++ ]
++ )
++ firewalld_get_masquerade = MagicMock(return_value=False)
++ firewalld_get_rich_rules = MagicMock(return_value=firewalld_rich_rules)
++
++ __salt__ = {
++ "firewalld.reload_rules": firewalld_reload_rules,
++ "firewalld.get_zones": firewalld_get_zones,
++ "firewalld.get_masquerade": firewalld_get_masquerade,
++ "firewalld.get_rich_rules": firewalld_get_rich_rules,
++ }
++ with patch.dict(firewalld.__dict__, {"__salt__": __salt__}):
++ ret = firewalld.present("public", rich_rules=rich_rule)
++ assert ret == {
++ "changes": {},
++ "result": True,
++ "comment": "'public' is already in the desired state.",
++ "name": "public",
++ }
+--
+2.45.2
+
+
diff --git a/fix-bsc-1065792.patch b/fix-bsc-1065792.patch
new file mode 100644
index 0000000..283b400
--- /dev/null
+++ b/fix-bsc-1065792.patch
@@ -0,0 +1,25 @@
+From 42a5e5d1a898d7b8bdb56a94decf525204ebccb8 Mon Sep 17 00:00:00 2001
+From: Bo Maryniuk
+Date: Thu, 14 Dec 2017 16:21:40 +0100
+Subject: [PATCH] Fix bsc#1065792
+
+---
+ salt/states/service.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/salt/states/service.py b/salt/states/service.py
+index 93c7c4fb07..0d8a4efa03 100644
+--- a/salt/states/service.py
++++ b/salt/states/service.py
+@@ -78,6 +78,7 @@ def __virtual__():
+ Only make these states available if a service provider has been detected or
+ assigned for this minion
+ """
++ __salt__._load_all()
+ if "service.start" in __salt__:
+ return __virtualname__
+ else:
+--
+2.39.2
+
+
diff --git a/fix-calculation-of-sls-context-vars-when-trailing-do.patch b/fix-calculation-of-sls-context-vars-when-trailing-do.patch
new file mode 100644
index 0000000..690026a
--- /dev/null
+++ b/fix-calculation-of-sls-context-vars-when-trailing-do.patch
@@ -0,0 +1,69 @@
+From 3403a7391df785be31b6fbe401a8229c2007ac19 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Mon, 2 Oct 2023 10:44:05 +0100
+Subject: [PATCH] Fix calculation of SLS context vars when trailing dots
+ on targetted sls/state (bsc#1213518) (#598)
+
+* Fix calculation of SLS context vars when trailing dots on targetted state
+
+* Add changelog file
+---
+ changelog/63411.fixed.md | 1 +
+ salt/utils/templates.py | 5 +++--
+ tests/unit/utils/test_templates.py | 14 ++++++++++++++
+ 3 files changed, 18 insertions(+), 2 deletions(-)
+ create mode 100644 changelog/63411.fixed.md
+
+diff --git a/changelog/63411.fixed.md b/changelog/63411.fixed.md
+new file mode 100644
+index 0000000000..65340e3652
+--- /dev/null
++++ b/changelog/63411.fixed.md
+@@ -0,0 +1 @@
++Fix calculation of SLS context vars when trailing dots on targetted state
+diff --git a/salt/utils/templates.py b/salt/utils/templates.py
+index 4a8adf2a14..8639ea703e 100644
+--- a/salt/utils/templates.py
++++ b/salt/utils/templates.py
+@@ -113,8 +113,9 @@ def generate_sls_context(tmplpath, sls):
+
+ sls_context = {}
+
+- # Normalize SLS as path.
+- slspath = sls.replace(".", "/")
++ # Normalize SLS as path and remove possible trailing slashes
++ # to prevent matching issues and wrong vars calculation
++ slspath = sls.replace(".", "/").rstrip("/")
+
+ if tmplpath:
+ # Normalize template path
+diff --git a/tests/unit/utils/test_templates.py b/tests/unit/utils/test_templates.py
+index 4ba2f52d7b..264b4ae801 100644
+--- a/tests/unit/utils/test_templates.py
++++ b/tests/unit/utils/test_templates.py
+@@ -320,6 +320,20 @@ class WrapRenderTestCase(TestCase):
+ slspath="foo",
+ )
+
++ def test_generate_sls_context__one_level_init_implicit_with_trailing_dot(self):
++ """generate_sls_context - Basic one level with implicit init.sls with trailing dot"""
++ self._test_generated_sls_context(
++ "/tmp/foo/init.sls",
++ "foo.",
++ tplfile="foo/init.sls",
++ tpldir="foo",
++ tpldot="foo",
++ slsdotpath="foo",
++ slscolonpath="foo",
++ sls_path="foo",
++ slspath="foo",
++ )
++
+ def test_generate_sls_context__one_level_init_explicit(self):
+ """generate_sls_context - Basic one level with explicit init.sls"""
+ self._test_generated_sls_context(
+--
+2.42.0
+
+
diff --git a/fix-cve-2023-34049-bsc-1215157.patch b/fix-cve-2023-34049-bsc-1215157.patch
new file mode 100644
index 0000000..82d8736
--- /dev/null
+++ b/fix-cve-2023-34049-bsc-1215157.patch
@@ -0,0 +1,1163 @@
+From b2baafcc96a2807cf7d34374904e1710a4f58b9f Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 31 Oct 2023 11:26:15 +0100
+Subject: [PATCH] Fix CVE-2023-34049 (bsc#1215157)
+
+Backport of https://github.com/saltstack/salt/pull/65482
+---
+ salt/client/ssh/__init__.py | 56 +++-
+ tests/integration/modules/test_ssh.py | 3 +-
+ tests/integration/ssh/test_pre_flight.py | 132 --------
+ .../integration/ssh/test_pre_flight.py | 315 ++++++++++++++++++
+ tests/pytests/unit/client/ssh/test_single.py | 296 +++++++++++++---
+ tests/pytests/unit/client/ssh/test_ssh.py | 110 ++++++
+ 6 files changed, 727 insertions(+), 185 deletions(-)
+ delete mode 100644 tests/integration/ssh/test_pre_flight.py
+ create mode 100644 tests/pytests/integration/ssh/test_pre_flight.py
+
+diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
+index b120e0002e8..1e143f9e30c 100644
+--- a/salt/client/ssh/__init__.py
++++ b/salt/client/ssh/__init__.py
+@@ -12,9 +12,11 @@ import hashlib
+ import logging
+ import multiprocessing
+ import os
++import pathlib
+ import queue
+ import re
+ import shlex
++import shutil
+ import subprocess
+ import sys
+ import tarfile
+@@ -515,7 +517,14 @@ class SSH(MultiprocessingStateMixin):
+ if target.get("passwd", False) or self.opts["ssh_passwd"]:
+ self._key_deploy_run(host, target, False)
+ return ret
+- if ret[host].get("stderr", "").count("Permission denied"):
++ stderr = ret[host].get("stderr", "")
++ # -failed to upload file- is detecting scp errors
++ # Errors to ignore when Permission denied is in the stderr. For example
++ # scp can get a permission denied on the target host, but they where
++ # able to accurate authenticate against the box
++ ignore_err = ["failed to upload file"]
++ check_err = [x for x in ignore_err if stderr.count(x)]
++ if "Permission denied" in stderr and not check_err:
+ target = self.targets[host]
+ # permission denied, attempt to auto deploy ssh key
+ print(
+@@ -1137,11 +1146,30 @@ class Single:
+ """
+ Run our pre_flight script before running any ssh commands
+ """
+- script = os.path.join(tempfile.gettempdir(), self.ssh_pre_file)
+-
+- self.shell.send(self.ssh_pre_flight, script)
+-
+- return self.execute_script(script, script_args=self.ssh_pre_flight_args)
++ with tempfile.NamedTemporaryFile() as temp:
++ # ensure we use copyfile to not copy the file attributes
++ # we want to ensure we use the perms set by the secure
++ # NamedTemporaryFile
++ try:
++ shutil.copyfile(self.ssh_pre_flight, temp.name)
++ except OSError as err:
++ return (
++ "",
++ "Could not copy pre flight script to temporary path",
++ 1,
++ )
++ target_script = f".{pathlib.Path(temp.name).name}"
++ log.trace("Copying the pre flight script to target")
++ stdout, stderr, retcode = self.shell.send(temp.name, target_script)
++ if retcode != 0:
++ # We could not copy the script to the target
++ log.error("Could not copy the pre flight script to target")
++ return stdout, stderr, retcode
++
++ log.trace("Executing the pre flight script on target")
++ return self.execute_script(
++ target_script, script_args=self.ssh_pre_flight_args
++ )
+
+ def check_thin_dir(self):
+ """
+@@ -1531,18 +1559,20 @@ ARGS = {arguments}\n'''.format(
+ return self.shell.exec_cmd(cmd_str)
+
+ # Write the shim to a temporary file in the default temp directory
+- with tempfile.NamedTemporaryFile(
+- mode="w+b", prefix="shim_", delete=False
+- ) as shim_tmp_file:
++ with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as shim_tmp_file:
+ shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
+
+ # Copy shim to target system, under $HOME/.
+- target_shim_file = ".{}.{}".format(
+- binascii.hexlify(os.urandom(6)).decode("ascii"), extension
+- )
++ target_shim_file = f".{pathlib.Path(shim_tmp_file.name).name}"
++
+ if self.winrm:
+ target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
+- self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
++ stdout, stderr, retcode = self.shell.send(
++ shim_tmp_file.name, target_shim_file, makedirs=True
++ )
++ if retcode != 0:
++ log.error("Could not copy the shim script to target")
++ return stdout, stderr, retcode
+
+ # Remove our shim file
+ try:
+diff --git a/tests/integration/modules/test_ssh.py b/tests/integration/modules/test_ssh.py
+index 0817877c86b..55586211622 100644
+--- a/tests/integration/modules/test_ssh.py
++++ b/tests/integration/modules/test_ssh.py
+@@ -26,7 +26,8 @@ def check_status():
+ return False
+
+
+-@pytest.mark.windows_whitelisted
++# @pytest.mark.windows_whitelisted
++# De-whitelist windows since it's hanging on the newer windows golden images
+ @pytest.mark.skip_if_binaries_missing("ssh", "ssh-keygen", check_all=True)
+ class SSHModuleTest(ModuleCase):
+ """
+diff --git a/tests/integration/ssh/test_pre_flight.py b/tests/integration/ssh/test_pre_flight.py
+deleted file mode 100644
+index 1598b3d51b5..00000000000
+--- a/tests/integration/ssh/test_pre_flight.py
++++ /dev/null
+@@ -1,132 +0,0 @@
+-"""
+-Test for ssh_pre_flight roster option
+-"""
+-
+-import os
+-
+-import pytest
+-
+-import salt.utils.files
+-from tests.support.case import SSHCase
+-from tests.support.runtests import RUNTIME_VARS
+-
+-
+-class SSHPreFlightTest(SSHCase):
+- """
+- Test ssh_pre_flight roster option
+- """
+-
+- def setUp(self):
+- super().setUp()
+- self.roster = os.path.join(RUNTIME_VARS.TMP, "pre_flight_roster")
+- self.data = {
+- "ssh_pre_flight": os.path.join(RUNTIME_VARS.TMP, "ssh_pre_flight.sh")
+- }
+- self.test_script = os.path.join(
+- RUNTIME_VARS.TMP, "test-pre-flight-script-worked.txt"
+- )
+-
+- def _create_roster(self, pre_flight_script_args=None):
+- data = dict(self.data)
+- if pre_flight_script_args:
+- data["ssh_pre_flight_args"] = pre_flight_script_args
+-
+- self.custom_roster(self.roster, data)
+-
+- with salt.utils.files.fopen(data["ssh_pre_flight"], "w") as fp_:
+- fp_.write("touch {}".format(self.test_script))
+-
+- @pytest.mark.slow_test
+- def test_ssh_pre_flight(self):
+- """
+- test ssh when ssh_pre_flight is set
+- ensure the script runs successfully
+- """
+- self._create_roster()
+- assert self.run_function("test.ping", roster_file=self.roster)
+-
+- assert os.path.exists(self.test_script)
+-
+- @pytest.mark.slow_test
+- def test_ssh_run_pre_flight(self):
+- """
+- test ssh when --pre-flight is passed to salt-ssh
+- to ensure the script runs successfully
+- """
+- self._create_roster()
+- # make sure we previously ran a command so the thin dir exists
+- self.run_function("test.ping", wipe=False)
+- assert not os.path.exists(self.test_script)
+-
+- assert self.run_function(
+- "test.ping", ssh_opts="--pre-flight", roster_file=self.roster, wipe=False
+- )
+- assert os.path.exists(self.test_script)
+-
+- @pytest.mark.slow_test
+- def test_ssh_run_pre_flight_args(self):
+- """
+- test ssh when --pre-flight is passed to salt-ssh
+- to ensure the script runs successfully passing some args
+- """
+- self._create_roster(pre_flight_script_args="foobar test")
+- # make sure we previously ran a command so the thin dir exists
+- self.run_function("test.ping", wipe=False)
+- assert not os.path.exists(self.test_script)
+-
+- assert self.run_function(
+- "test.ping", ssh_opts="--pre-flight", roster_file=self.roster, wipe=False
+- )
+- assert os.path.exists(self.test_script)
+-
+- @pytest.mark.slow_test
+- def test_ssh_run_pre_flight_args_prevent_injection(self):
+- """
+- test ssh when --pre-flight is passed to salt-ssh
+- and evil arguments are used in order to produce shell injection
+- """
+- injected_file = os.path.join(RUNTIME_VARS.TMP, "injection")
+- self._create_roster(
+- pre_flight_script_args="foobar; echo injected > {}".format(injected_file)
+- )
+- # make sure we previously ran a command so the thin dir exists
+- self.run_function("test.ping", wipe=False)
+- assert not os.path.exists(self.test_script)
+- assert not os.path.isfile(injected_file)
+-
+- assert self.run_function(
+- "test.ping", ssh_opts="--pre-flight", roster_file=self.roster, wipe=False
+- )
+-
+- assert not os.path.isfile(
+- injected_file
+- ), "File injection suceeded. This shouldn't happend"
+-
+- @pytest.mark.slow_test
+- def test_ssh_run_pre_flight_failure(self):
+- """
+- test ssh_pre_flight when there is a failure
+- in the script.
+- """
+- self._create_roster()
+- with salt.utils.files.fopen(self.data["ssh_pre_flight"], "w") as fp_:
+- fp_.write("exit 2")
+-
+- ret = self.run_function(
+- "test.ping", ssh_opts="--pre-flight", roster_file=self.roster, wipe=False
+- )
+- assert ret["retcode"] == 2
+-
+- def tearDown(self):
+- """
+- make sure to clean up any old ssh directories
+- """
+- files = [
+- self.roster,
+- self.data["ssh_pre_flight"],
+- self.test_script,
+- os.path.join(RUNTIME_VARS.TMP, "injection"),
+- ]
+- for fp_ in files:
+- if os.path.exists(fp_):
+- os.remove(fp_)
+diff --git a/tests/pytests/integration/ssh/test_pre_flight.py b/tests/pytests/integration/ssh/test_pre_flight.py
+new file mode 100644
+index 00000000000..09c65d29430
+--- /dev/null
++++ b/tests/pytests/integration/ssh/test_pre_flight.py
+@@ -0,0 +1,315 @@
++"""
++Test for ssh_pre_flight roster option
++"""
++
++try:
++ import grp
++ import pwd
++except ImportError:
++ # windows stacktraces on import of these modules
++ pass
++import os
++import pathlib
++import shutil
++import subprocess
++
++import pytest
++import yaml
++from saltfactories.utils import random_string
++
++import salt.utils.files
++
++pytestmark = pytest.mark.skip_on_windows(reason="Salt-ssh not available on Windows")
++
++
++def _custom_roster(roster_file, roster_data):
++ with salt.utils.files.fopen(roster_file, "r") as fp:
++ data = salt.utils.yaml.safe_load(fp)
++ for key, item in roster_data.items():
++ data["localhost"][key] = item
++ with salt.utils.files.fopen(roster_file, "w") as fp:
++ yaml.safe_dump(data, fp)
++
++
++@pytest.fixture
++def _create_roster(salt_ssh_roster_file, tmp_path):
++ ret = {}
++ ret["roster"] = salt_ssh_roster_file
++ ret["data"] = {"ssh_pre_flight": str(tmp_path / "ssh_pre_flight.sh")}
++ ret["test_script"] = str(tmp_path / "test-pre-flight-script-worked.txt")
++ ret["thin_dir"] = tmp_path / "thin_dir"
++
++ with salt.utils.files.fopen(salt_ssh_roster_file, "r") as fp:
++ data = salt.utils.yaml.safe_load(fp)
++ pre_flight_script = ret["data"]["ssh_pre_flight"]
++ data["localhost"]["ssh_pre_flight"] = pre_flight_script
++ data["localhost"]["thin_dir"] = str(ret["thin_dir"])
++ with salt.utils.files.fopen(salt_ssh_roster_file, "w") as fp:
++ yaml.safe_dump(data, fp)
++
++ with salt.utils.files.fopen(pre_flight_script, "w") as fp:
++ fp.write("touch {}".format(ret["test_script"]))
++
++ yield ret
++ if ret["thin_dir"].exists():
++ shutil.rmtree(ret["thin_dir"])
++
++
++@pytest.mark.slow_test
++def test_ssh_pre_flight(salt_ssh_cli, caplog, _create_roster):
++ """
++ test ssh when ssh_pre_flight is set
++ ensure the script runs successfully
++ """
++ ret = salt_ssh_cli.run("test.ping")
++ assert ret.returncode == 0
++
++ assert pathlib.Path(_create_roster["test_script"]).exists()
++
++
++@pytest.mark.slow_test
++def test_ssh_run_pre_flight(salt_ssh_cli, _create_roster):
++ """
++ test ssh when --pre-flight is passed to salt-ssh
++ to ensure the script runs successfully
++ """
++ # make sure we previously ran a command so the thin dir exists
++ ret = salt_ssh_cli.run("test.ping")
++ assert pathlib.Path(_create_roster["test_script"]).exists()
++
++ # Now remeove the script to ensure pre_flight doesn't run
++ # without --pre-flight
++ pathlib.Path(_create_roster["test_script"]).unlink()
++
++ assert salt_ssh_cli.run("test.ping").returncode == 0
++ assert not pathlib.Path(_create_roster["test_script"]).exists()
++
++ # Now ensure
++ ret = salt_ssh_cli.run(
++ "test.ping",
++ "--pre-flight",
++ )
++ assert ret.returncode == 0
++ assert pathlib.Path(_create_roster["test_script"]).exists()
++
++
++@pytest.mark.slow_test
++def test_ssh_run_pre_flight_args(salt_ssh_cli, _create_roster):
++ """
++ test ssh when --pre-flight is passed to salt-ssh
++ to ensure the script runs successfully passing some args
++ """
++ _custom_roster(salt_ssh_cli.roster_file, {"ssh_pre_flight_args": "foobar test"})
++ # Create pre_flight script that accepts args
++ test_script = _create_roster["test_script"]
++ test_script_1 = pathlib.Path(test_script + "-foobar")
++ test_script_2 = pathlib.Path(test_script + "-test")
++ with salt.utils.files.fopen(_create_roster["data"]["ssh_pre_flight"], "w") as fp:
++ fp.write(
++ f"""
++ touch {str(test_script)}-$1
++ touch {str(test_script)}-$2
++ """
++ )
++ ret = salt_ssh_cli.run("test.ping")
++ assert ret.returncode == 0
++ assert test_script_1.exists()
++ assert test_script_2.exists()
++ pathlib.Path(test_script_1).unlink()
++ pathlib.Path(test_script_2).unlink()
++
++ ret = salt_ssh_cli.run("test.ping")
++ assert ret.returncode == 0
++ assert not test_script_1.exists()
++ assert not test_script_2.exists()
++
++ ret = salt_ssh_cli.run(
++ "test.ping",
++ "--pre-flight",
++ )
++ assert ret.returncode == 0
++ assert test_script_1.exists()
++ assert test_script_2.exists()
++
++
++@pytest.mark.slow_test
++def test_ssh_run_pre_flight_args_prevent_injection(
++ salt_ssh_cli, _create_roster, tmp_path
++):
++ """
++ test ssh when --pre-flight is passed to salt-ssh
++ and evil arguments are used in order to produce shell injection
++ """
++ injected_file = tmp_path / "injection"
++ _custom_roster(
++ salt_ssh_cli.roster_file,
++ {"ssh_pre_flight_args": f"foobar; echo injected > {str(injected_file)}"},
++ )
++ # Create pre_flight script that accepts args
++ test_script = _create_roster["test_script"]
++ test_script_1 = pathlib.Path(test_script + "-echo")
++ test_script_2 = pathlib.Path(test_script + "-foobar;")
++ with salt.utils.files.fopen(_create_roster["data"]["ssh_pre_flight"], "w") as fp:
++ fp.write(
++ f"""
++ touch {str(test_script)}-$1
++ touch {str(test_script)}-$2
++ """
++ )
++
++ # make sure we previously ran a command so the thin dir exists
++ ret = salt_ssh_cli.run("test.ping")
++ assert ret.returncode == 0
++ assert test_script_1.exists()
++ assert test_script_2.exists()
++ test_script_1.unlink()
++ test_script_2.unlink()
++ assert not injected_file.is_file()
++
++ ret = salt_ssh_cli.run(
++ "test.ping",
++ "--pre-flight",
++ )
++ assert ret.returncode == 0
++
++ assert test_script_1.exists()
++ assert test_script_2.exists()
++ assert not pathlib.Path(
++ injected_file
++ ).is_file(), "File injection suceeded. This shouldn't happend"
++
++
++@pytest.mark.flaky(max_runs=4)
++@pytest.mark.slow_test
++def test_ssh_run_pre_flight_failure(salt_ssh_cli, _create_roster):
++ """
++ test ssh_pre_flight when there is a failure
++ in the script.
++ """
++ with salt.utils.files.fopen(_create_roster["data"]["ssh_pre_flight"], "w") as fp_:
++ fp_.write("exit 2")
++
++ ret = salt_ssh_cli.run(
++ "test.ping",
++ "--pre-flight",
++ )
++ assert ret.data["retcode"] == 2
++
++
++@pytest.fixture
++def account():
++ username = random_string("test-account-", uppercase=False)
++ with pytest.helpers.create_account(username=username) as account:
++ yield account
++
++
++@pytest.mark.slow_test
++def test_ssh_pre_flight_script(salt_ssh_cli, caplog, _create_roster, tmp_path, account):
++ """
++ Test to ensure user cannot create and run a script
++ with the expected pre_flight script path on target.
++ """
++ try:
++ script = pathlib.Path.home() / "hacked"
++ tmp_preflight = pathlib.Path("/tmp", "ssh_pre_flight.sh")
++ tmp_preflight.write_text(f"touch {script}")
++ os.chown(tmp_preflight, account.info.uid, account.info.gid)
++ ret = salt_ssh_cli.run("test.ping")
++ assert not script.is_file()
++ assert ret.returncode == 0
++ assert ret.stdout == '{\n"localhost": true\n}\n'
++ finally:
++ for _file in [script, tmp_preflight]:
++ if _file.is_file():
++ _file.unlink()
++
++
++def demote(user_uid, user_gid):
++ def result():
++ # os.setgid does not remove group membership, so we remove them here so they are REALLY non-root
++ os.setgroups([])
++ os.setgid(user_gid)
++ os.setuid(user_uid)
++
++ return result
++
++
++@pytest.mark.slow_test
++def test_ssh_pre_flight_perms(salt_ssh_cli, caplog, _create_roster, account):
++ """
++ Test to ensure standard user cannot run pre flight script
++ on target when user sets wrong permissions (777) on
++ ssh_pre_flight script.
++ """
++ try:
++ script = pathlib.Path("/tmp", "itworked")
++ preflight = pathlib.Path("/ssh_pre_flight.sh")
++ preflight.write_text(f"touch {str(script)}")
++ tmp_preflight = pathlib.Path("/tmp", preflight.name)
++
++ _custom_roster(salt_ssh_cli.roster_file, {"ssh_pre_flight": str(preflight)})
++ preflight.chmod(0o0777)
++ run_script = pathlib.Path("/run_script")
++ run_script.write_text(
++ f"""
++ x=1
++ while [ $x -le 200000 ]; do
++ SCRIPT=`bash {str(tmp_preflight)} 2> /dev/null; echo $?`
++ if [ ${{SCRIPT}} == 0 ]; then
++ break
++ fi
++ x=$(( $x + 1 ))
++ done
++ """
++ )
++ run_script.chmod(0o0777)
++ # pylint: disable=W1509
++ ret = subprocess.Popen(
++ ["sh", f"{run_script}"],
++ preexec_fn=demote(account.info.uid, account.info.gid),
++ stdout=None,
++ stderr=None,
++ stdin=None,
++ universal_newlines=True,
++ )
++ # pylint: enable=W1509
++ ret = salt_ssh_cli.run("test.ping")
++ assert ret.returncode == 0
++
++ # Lets make sure a different user other than root
++ # Didn't run the script
++ assert os.stat(script).st_uid != account.info.uid
++ assert script.is_file()
++ finally:
++ for _file in [script, preflight, tmp_preflight, run_script]:
++ if _file.is_file():
++ _file.unlink()
++
++
++@pytest.mark.slow_test
++def test_ssh_run_pre_flight_target_file_perms(salt_ssh_cli, _create_roster, tmp_path):
++ """
++ test ssh_pre_flight to ensure the target pre flight script
++ has the correct perms
++ """
++ perms_file = tmp_path / "perms"
++ with salt.utils.files.fopen(_create_roster["data"]["ssh_pre_flight"], "w") as fp_:
++ fp_.write(
++ f"""
++ SCRIPT_NAME=$0
++ stat -L -c "%a %G %U" $SCRIPT_NAME > {perms_file}
++ """
++ )
++
++ ret = salt_ssh_cli.run(
++ "test.ping",
++ "--pre-flight",
++ )
++ assert ret.returncode == 0
++ with salt.utils.files.fopen(perms_file) as fp:
++ data = fp.read()
++ assert data.split()[0] == "600"
++ uid = os.getuid()
++ gid = os.getgid()
++ assert data.split()[1] == grp.getgrgid(gid).gr_name
++ assert data.split()[2] == pwd.getpwuid(uid).pw_name
+diff --git a/tests/pytests/unit/client/ssh/test_single.py b/tests/pytests/unit/client/ssh/test_single.py
+index f97519d5cc2..c88a1c2127f 100644
+--- a/tests/pytests/unit/client/ssh/test_single.py
++++ b/tests/pytests/unit/client/ssh/test_single.py
+@@ -1,6 +1,5 @@
+-import os
++import logging
+ import re
+-import tempfile
+ from textwrap import dedent
+
+ import pytest
+@@ -16,6 +15,8 @@ import salt.utils.yaml
+ from salt.client import ssh
+ from tests.support.mock import MagicMock, call, patch
+
++log = logging.getLogger(__name__)
++
+
+ @pytest.fixture
+ def opts(tmp_path):
+@@ -59,7 +60,7 @@ def test_single_opts(opts, target):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ assert single.shell._ssh_opts() == ""
+@@ -87,7 +88,7 @@ def test_run_with_pre_flight(opts, target, tmp_path):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ cmd_ret = ("Success", "", 0)
+@@ -122,7 +123,7 @@ def test_run_with_pre_flight_with_args(opts, target, tmp_path):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ cmd_ret = ("Success", "foobar", 0)
+@@ -156,7 +157,7 @@ def test_run_with_pre_flight_stderr(opts, target, tmp_path):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ cmd_ret = ("", "Error running script", 1)
+@@ -190,7 +191,7 @@ def test_run_with_pre_flight_script_doesnot_exist(opts, target, tmp_path):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ cmd_ret = ("Success", "", 0)
+@@ -224,7 +225,7 @@ def test_run_with_pre_flight_thin_dir_exists(opts, target, tmp_path):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ cmd_ret = ("", "", 0)
+@@ -242,6 +243,39 @@ def test_run_with_pre_flight_thin_dir_exists(opts, target, tmp_path):
+ assert ret == cmd_ret
+
+
++def test_run_ssh_pre_flight(opts, target, tmp_path):
++ """
++ test Single.run_ssh_pre_flight function
++ """
++ target["ssh_pre_flight"] = str(tmp_path / "script.sh")
++ single = ssh.Single(
++ opts,
++ opts["argv"],
++ "localhost",
++ mods={},
++ fsclient=None,
++ thin=salt.utils.thin.thin_path(opts["cachedir"]),
++ mine=False,
++ **target,
++ )
++
++ cmd_ret = ("Success", "", 0)
++ mock_flight = MagicMock(return_value=cmd_ret)
++ mock_cmd = MagicMock(return_value=cmd_ret)
++ patch_flight = patch("salt.client.ssh.Single.run_ssh_pre_flight", mock_flight)
++ patch_cmd = patch("salt.client.ssh.Single.cmd_block", mock_cmd)
++ patch_exec_cmd = patch(
++ "salt.client.ssh.shell.Shell.exec_cmd", return_value=("", "", 1)
++ )
++ patch_os = patch("os.path.exists", side_effect=[True])
++
++ with patch_os, patch_flight, patch_cmd, patch_exec_cmd:
++ ret = single.run()
++ mock_cmd.assert_called()
++ mock_flight.assert_called()
++ assert ret == cmd_ret
++
++
+ def test_execute_script(opts, target, tmp_path):
+ """
+ test Single.execute_script()
+@@ -255,7 +289,7 @@ def test_execute_script(opts, target, tmp_path):
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+ winrm=False,
+- **target
++ **target,
+ )
+
+ exp_ret = ("Success", "", 0)
+@@ -273,7 +307,7 @@ def test_execute_script(opts, target, tmp_path):
+ ] == mock_cmd.call_args_list
+
+
+-def test_shim_cmd(opts, target):
++def test_shim_cmd(opts, target, tmp_path):
+ """
+ test Single.shim_cmd()
+ """
+@@ -287,7 +321,7 @@ def test_shim_cmd(opts, target):
+ mine=False,
+ winrm=False,
+ tty=True,
+- **target
++ **target,
+ )
+
+ exp_ret = ("Success", "", 0)
+@@ -295,21 +329,24 @@ def test_shim_cmd(opts, target):
+ patch_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_cmd)
+ patch_send = patch("salt.client.ssh.shell.Shell.send", return_value=("", "", 0))
+ patch_rand = patch("os.urandom", return_value=b"5\xd9l\xca\xc2\xff")
++ tmp_file = tmp_path / "tmp_file"
++ mock_tmp = MagicMock()
++ patch_tmp = patch("tempfile.NamedTemporaryFile", mock_tmp)
++ mock_tmp.return_value.__enter__.return_value.name = tmp_file
+
+- with patch_cmd, patch_rand, patch_send:
++ with patch_cmd, patch_tmp, patch_send:
+ ret = single.shim_cmd(cmd_str="echo test")
+ assert ret == exp_ret
+ assert [
+- call("/bin/sh '.35d96ccac2ff.py'"),
+- call("rm '.35d96ccac2ff.py'"),
++ call(f"/bin/sh '.{tmp_file.name}'"),
++ call(f"rm '.{tmp_file.name}'"),
+ ] == mock_cmd.call_args_list
+
+
+-def test_run_ssh_pre_flight(opts, target, tmp_path):
++def test_shim_cmd_copy_fails(opts, target, caplog):
+ """
+- test Single.run_ssh_pre_flight
++ test Single.shim_cmd() when copying the file fails
+ """
+- target["ssh_pre_flight"] = str(tmp_path / "script.sh")
+ single = ssh.Single(
+ opts,
+ opts["argv"],
+@@ -320,24 +357,202 @@ def test_run_ssh_pre_flight(opts, target, tmp_path):
+ mine=False,
+ winrm=False,
+ tty=True,
+- **target
++ **target,
+ )
+
+- exp_ret = ("Success", "", 0)
+- mock_cmd = MagicMock(return_value=exp_ret)
++ ret_cmd = ("Success", "", 0)
++ mock_cmd = MagicMock(return_value=ret_cmd)
+ patch_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_cmd)
+- patch_send = patch("salt.client.ssh.shell.Shell.send", return_value=exp_ret)
+- exp_tmp = os.path.join(
+- tempfile.gettempdir(), os.path.basename(target["ssh_pre_flight"])
++ ret_send = ("", "General error in file copy", 1)
++ patch_send = patch("salt.client.ssh.shell.Shell.send", return_value=ret_send)
++ patch_rand = patch("os.urandom", return_value=b"5\xd9l\xca\xc2\xff")
++
++ with patch_cmd, patch_rand, patch_send:
++ ret = single.shim_cmd(cmd_str="echo test")
++ assert ret == ret_send
++ assert "Could not copy the shim script to target" in caplog.text
++ mock_cmd.assert_not_called()
++
++
++def test_run_ssh_pre_flight_no_connect(opts, target, tmp_path, caplog):
++ """
++ test Single.run_ssh_pre_flight when you
++ cannot connect to the target
++ """
++ pre_flight = tmp_path / "script.sh"
++ pre_flight.write_text("")
++ target["ssh_pre_flight"] = str(pre_flight)
++ single = ssh.Single(
++ opts,
++ opts["argv"],
++ "localhost",
++ mods={},
++ fsclient=None,
++ thin=salt.utils.thin.thin_path(opts["cachedir"]),
++ mine=False,
++ winrm=False,
++ tty=True,
++ **target,
+ )
++ mock_exec_cmd = MagicMock(return_value=("", "", 1))
++ patch_exec_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_exec_cmd)
++ tmp_file = tmp_path / "tmp_file"
++ mock_tmp = MagicMock()
++ patch_tmp = patch("tempfile.NamedTemporaryFile", mock_tmp)
++ mock_tmp.return_value.__enter__.return_value.name = tmp_file
++ ret_send = (
++ "",
++ "ssh: connect to host 192.168.1.186 port 22: No route to host\nscp: Connection closed\n",
++ 255,
++ )
++ send_mock = MagicMock(return_value=ret_send)
++ patch_send = patch("salt.client.ssh.shell.Shell.send", send_mock)
++
++ with caplog.at_level(logging.TRACE):
++ with patch_send, patch_exec_cmd, patch_tmp:
++ ret = single.run_ssh_pre_flight()
++ assert "Copying the pre flight script" in caplog.text
++ assert "Could not copy the pre flight script to target" in caplog.text
++ assert ret == ret_send
++ assert send_mock.call_args_list[0][0][0] == tmp_file
++ target_script = send_mock.call_args_list[0][0][1]
++ assert re.search(r".[a-z0-9]+", target_script)
++ mock_exec_cmd.assert_not_called()
++
++
++def test_run_ssh_pre_flight_permission_denied(opts, target, tmp_path):
++ """
++ test Single.run_ssh_pre_flight when you
++ cannot copy script to the target due to
++ a permission denied error
++ """
++ pre_flight = tmp_path / "script.sh"
++ pre_flight.write_text("")
++ target["ssh_pre_flight"] = str(pre_flight)
++ single = ssh.Single(
++ opts,
++ opts["argv"],
++ "localhost",
++ mods={},
++ fsclient=None,
++ thin=salt.utils.thin.thin_path(opts["cachedir"]),
++ mine=False,
++ winrm=False,
++ tty=True,
++ **target,
++ )
++ mock_exec_cmd = MagicMock(return_value=("", "", 1))
++ patch_exec_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_exec_cmd)
++ tmp_file = tmp_path / "tmp_file"
++ mock_tmp = MagicMock()
++ patch_tmp = patch("tempfile.NamedTemporaryFile", mock_tmp)
++ mock_tmp.return_value.__enter__.return_value.name = tmp_file
++ ret_send = (
++ "",
++ 'scp: dest open "/tmp/preflight.sh": Permission denied\nscp: failed to upload file /etc/salt/preflight.sh to /tmp/preflight.sh\n',
++ 255,
++ )
++ send_mock = MagicMock(return_value=ret_send)
++ patch_send = patch("salt.client.ssh.shell.Shell.send", send_mock)
+
+- with patch_cmd, patch_send:
++ with patch_send, patch_exec_cmd, patch_tmp:
+ ret = single.run_ssh_pre_flight()
+- assert ret == exp_ret
+- assert [
+- call("/bin/sh '{}'".format(exp_tmp)),
+- call("rm '{}'".format(exp_tmp)),
+- ] == mock_cmd.call_args_list
++ assert ret == ret_send
++ assert send_mock.call_args_list[0][0][0] == tmp_file
++ target_script = send_mock.call_args_list[0][0][1]
++ assert re.search(r".[a-z0-9]+", target_script)
++ mock_exec_cmd.assert_not_called()
++
++
++def test_run_ssh_pre_flight_connect(opts, target, tmp_path, caplog):
++ """
++ test Single.run_ssh_pre_flight when you
++ can connect to the target
++ """
++ pre_flight = tmp_path / "script.sh"
++ pre_flight.write_text("")
++ target["ssh_pre_flight"] = str(pre_flight)
++ single = ssh.Single(
++ opts,
++ opts["argv"],
++ "localhost",
++ mods={},
++ fsclient=None,
++ thin=salt.utils.thin.thin_path(opts["cachedir"]),
++ mine=False,
++ winrm=False,
++ tty=True,
++ **target,
++ )
++ ret_exec_cmd = ("", "", 1)
++ mock_exec_cmd = MagicMock(return_value=ret_exec_cmd)
++ patch_exec_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_exec_cmd)
++ tmp_file = tmp_path / "tmp_file"
++ mock_tmp = MagicMock()
++ patch_tmp = patch("tempfile.NamedTemporaryFile", mock_tmp)
++ mock_tmp.return_value.__enter__.return_value.name = tmp_file
++ ret_send = (
++ "",
++ "\rroot@192.168.1.187's password: \n\rpreflight.sh 0% 0 0.0KB/s --:-- ETA\rpreflight.sh 100% 20 2.7KB/s 00:00 \n",
++ 0,
++ )
++ send_mock = MagicMock(return_value=ret_send)
++ patch_send = patch("salt.client.ssh.shell.Shell.send", send_mock)
++
++ with caplog.at_level(logging.TRACE):
++ with patch_send, patch_exec_cmd, patch_tmp:
++ ret = single.run_ssh_pre_flight()
++
++ assert "Executing the pre flight script on target" in caplog.text
++ assert ret == ret_exec_cmd
++ assert send_mock.call_args_list[0][0][0] == tmp_file
++ target_script = send_mock.call_args_list[0][0][1]
++ assert re.search(r".[a-z0-9]+", target_script)
++ mock_exec_cmd.assert_called()
++
++
++def test_run_ssh_pre_flight_shutil_fails(opts, target, tmp_path):
++ """
++ test Single.run_ssh_pre_flight when cannot
++ copyfile with shutil
++ """
++ pre_flight = tmp_path / "script.sh"
++ pre_flight.write_text("")
++ target["ssh_pre_flight"] = str(pre_flight)
++ single = ssh.Single(
++ opts,
++ opts["argv"],
++ "localhost",
++ mods={},
++ fsclient=None,
++ thin=salt.utils.thin.thin_path(opts["cachedir"]),
++ mine=False,
++ winrm=False,
++ tty=True,
++ **target,
++ )
++ ret_exec_cmd = ("", "", 1)
++ mock_exec_cmd = MagicMock(return_value=ret_exec_cmd)
++ patch_exec_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_exec_cmd)
++ tmp_file = tmp_path / "tmp_file"
++ mock_tmp = MagicMock()
++ patch_tmp = patch("tempfile.NamedTemporaryFile", mock_tmp)
++ mock_tmp.return_value.__enter__.return_value.name = tmp_file
++ send_mock = MagicMock()
++ mock_shutil = MagicMock(side_effect=IOError("Permission Denied"))
++ patch_shutil = patch("shutil.copyfile", mock_shutil)
++ patch_send = patch("salt.client.ssh.shell.Shell.send", send_mock)
++
++ with patch_send, patch_exec_cmd, patch_tmp, patch_shutil:
++ ret = single.run_ssh_pre_flight()
++
++ assert ret == (
++ "",
++ "Could not copy pre flight script to temporary path",
++ 1,
++ )
++ mock_exec_cmd.assert_not_called()
++ send_mock.assert_not_called()
+
+
+ @pytest.mark.skip_on_windows(reason="SSH_PY_SHIM not set on windows")
+@@ -355,7 +570,7 @@ def test_cmd_run_set_path(opts, target):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ ret = single._cmd_str()
+@@ -376,7 +591,7 @@ def test_cmd_run_not_set_path(opts, target):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ ret = single._cmd_str()
+@@ -395,7 +610,7 @@ def test_cmd_block_python_version_error(opts, target):
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+ winrm=False,
+- **target
++ **target,
+ )
+ mock_shim = MagicMock(
+ return_value=(("", "ERROR: Unable to locate appropriate python command\n", 10))
+@@ -434,7 +649,9 @@ def test_run_with_pre_flight_args(opts, target, test_opts, tmp_path):
+ and script successfully runs
+ """
+ opts["ssh_run_pre_flight"] = True
+- target["ssh_pre_flight"] = str(tmp_path / "script.sh")
++ pre_flight_script = tmp_path / "script.sh"
++ pre_flight_script.write_text("")
++ target["ssh_pre_flight"] = str(pre_flight_script)
+
+ if test_opts[0] is not None:
+ target["ssh_pre_flight_args"] = test_opts[0]
+@@ -448,7 +665,7 @@ def test_run_with_pre_flight_args(opts, target, test_opts, tmp_path):
+ fsclient=None,
+ thin=salt.utils.thin.thin_path(opts["cachedir"]),
+ mine=False,
+- **target
++ **target,
+ )
+
+ cmd_ret = ("Success", "", 0)
+@@ -456,14 +673,15 @@ def test_run_with_pre_flight_args(opts, target, test_opts, tmp_path):
+ mock_exec_cmd = MagicMock(return_value=("", "", 0))
+ patch_cmd = patch("salt.client.ssh.Single.cmd_block", mock_cmd)
+ patch_exec_cmd = patch("salt.client.ssh.shell.Shell.exec_cmd", mock_exec_cmd)
+- patch_shell_send = patch("salt.client.ssh.shell.Shell.send", return_value=None)
++ patch_shell_send = patch(
++ "salt.client.ssh.shell.Shell.send", return_value=("", "", 0)
++ )
+ patch_os = patch("os.path.exists", side_effect=[True])
+
+ with patch_os, patch_cmd, patch_exec_cmd, patch_shell_send:
+- ret = single.run()
+- assert mock_exec_cmd.mock_calls[0].args[
+- 0
+- ] == "/bin/sh '/tmp/script.sh'{}".format(expected_args)
++ single.run()
++ script_args = mock_exec_cmd.mock_calls[0].args[0]
++ assert re.search(r"\/bin\/sh '.[a-z0-9]+", script_args)
+
+
+ @pytest.mark.slow_test
+diff --git a/tests/pytests/unit/client/ssh/test_ssh.py b/tests/pytests/unit/client/ssh/test_ssh.py
+index 377aad9998c..cece16026cf 100644
+--- a/tests/pytests/unit/client/ssh/test_ssh.py
++++ b/tests/pytests/unit/client/ssh/test_ssh.py
+@@ -339,3 +339,113 @@ def test_extra_filerefs(tmp_path, opts):
+ with patch("salt.roster.get_roster_file", MagicMock(return_value=roster)):
+ ssh_obj = client._prep_ssh(**ssh_opts)
+ assert ssh_obj.opts.get("extra_filerefs", None) == "salt://foobar"
++
++
++def test_key_deploy_permission_denied_scp(tmp_path, opts):
++ """
++ test "key_deploy" function when
++ permission denied authentication error
++ when attempting to use scp to copy file
++ to target
++ """
++ host = "localhost"
++ passwd = "password"
++ usr = "ssh-usr"
++ opts["ssh_user"] = usr
++ opts["tgt"] = host
++
++ ssh_ret = {
++ host: {
++ "stdout": "\rroot@192.168.1.187's password: \n\rroot@192.168.1.187's password: \n\rroot@192.168.1.187's password: \n",
++ "stderr": "Permission denied, please try again.\nPermission denied, please try again.\nroot@192.168.1.187: Permission denied (publickey,gssapi-keyex,gssapi-with-micimport pudb; pu.dbassword).\nscp: Connection closed\n",
++ "retcode": 255,
++ }
++ }
++ key_run_ret = {
++ "localhost": {
++ "jid": "20230922155652279959",
++ "return": "test",
++ "retcode": 0,
++ "id": "test",
++ "fun": "cmd.run",
++ "fun_args": ["echo test"],
++ }
++ }
++ patch_roster_file = patch("salt.roster.get_roster_file", MagicMock(return_value=""))
++ with patch_roster_file:
++ client = ssh.SSH(opts)
++ patch_input = patch("builtins.input", side_effect=["y"])
++ patch_getpass = patch("getpass.getpass", return_value=["password"])
++ mock_key_run = MagicMock(return_value=key_run_ret)
++ patch_key_run = patch("salt.client.ssh.SSH._key_deploy_run", mock_key_run)
++ with patch_input, patch_getpass, patch_key_run:
++ ret = client.key_deploy(host, ssh_ret)
++ assert mock_key_run.call_args_list[0][0] == (
++ host,
++ {"passwd": [passwd], "host": host, "user": usr},
++ True,
++ )
++ assert ret == key_run_ret
++ assert mock_key_run.call_count == 1
++
++
++def test_key_deploy_permission_denied_file_scp(tmp_path, opts):
++ """
++ test "key_deploy" function when permission denied
++ due to not having access to copy the file to the target
++ We do not want to deploy the key, because this is not
++ an authentication to the target error.
++ """
++ host = "localhost"
++ passwd = "password"
++ usr = "ssh-usr"
++ opts["ssh_user"] = usr
++ opts["tgt"] = host
++
++ mock_key_run = MagicMock(return_value=False)
++ patch_key_run = patch("salt.client.ssh.SSH._key_deploy_run", mock_key_run)
++
++ ssh_ret = {
++ "localhost": {
++ "stdout": "",
++ "stderr": 'scp: dest open "/tmp/preflight.sh": Permission denied\nscp: failed to upload file /etc/salt/preflight.sh to /tmp/preflight.sh\n',
++ "retcode": 1,
++ }
++ }
++ patch_roster_file = patch("salt.roster.get_roster_file", MagicMock(return_value=""))
++ with patch_roster_file:
++ client = ssh.SSH(opts)
++ ret = client.key_deploy(host, ssh_ret)
++ assert ret == ssh_ret
++ assert mock_key_run.call_count == 0
++
++
++def test_key_deploy_no_permission_denied(tmp_path, opts):
++ """
++ test "key_deploy" function when no permission denied
++ is returned
++ """
++ host = "localhost"
++ passwd = "password"
++ usr = "ssh-usr"
++ opts["ssh_user"] = usr
++ opts["tgt"] = host
++
++ mock_key_run = MagicMock(return_value=False)
++ patch_key_run = patch("salt.client.ssh.SSH._key_deploy_run", mock_key_run)
++ ssh_ret = {
++ "localhost": {
++ "jid": "20230922161937998385",
++ "return": "test",
++ "retcode": 0,
++ "id": "test",
++ "fun": "cmd.run",
++ "fun_args": ["echo test"],
++ }
++ }
++ patch_roster_file = patch("salt.roster.get_roster_file", MagicMock(return_value=""))
++ with patch_roster_file:
++ client = ssh.SSH(opts)
++ ret = client.key_deploy(host, ssh_ret)
++ assert ret == ssh_ret
++ assert mock_key_run.call_count == 0
+--
+2.42.0
+
diff --git a/fix-cve-2024-22231-and-cve-2024-22232-bsc-1219430-bs.patch b/fix-cve-2024-22231-and-cve-2024-22232-bsc-1219430-bs.patch
new file mode 100644
index 0000000..d08b5c1
--- /dev/null
+++ b/fix-cve-2024-22231-and-cve-2024-22232-bsc-1219430-bs.patch
@@ -0,0 +1,544 @@
+From 5710bc3ff3887762182f8326bd74f40d3872a69f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Thu, 1 Feb 2024 11:50:16 +0000
+Subject: [PATCH] Fix "CVE-2024-22231" and "CVE-2024-22232"
+ (bsc#1219430, bsc#1219431) (#621)
+
+* Fix CVE-2024-22231 and CVE-2024-22232
+
+* Add changelogs for CVE-2024-22231 and CVE-2024-22232
+
+* Fix linter issue
+
+* Add credit
+
+* Fix wart in patch
+
+* Clean up test fixtures
+
+* Fix test on windows
+
+* Update changelog file name
+
+* Fix fileroots tests
+
+---------
+
+Co-authored-by: Daniel A. Wozniak
+---
+ changelog/565.security.md | 4 +
+ salt/fileserver/__init__.py | 9 +-
+ salt/fileserver/roots.py | 26 +++++
+ salt/master.py | 15 ++-
+ tests/pytests/unit/fileserver/test_roots.py | 58 +++++++--
+ tests/pytests/unit/test_fileserver.py | 123 ++++++++++++++++++++
+ tests/pytests/unit/test_master.py | 33 ++++++
+ tests/unit/test_fileserver.py | 79 -------------
+ 8 files changed, 250 insertions(+), 97 deletions(-)
+ create mode 100644 changelog/565.security.md
+ create mode 100644 tests/pytests/unit/test_fileserver.py
+ delete mode 100644 tests/unit/test_fileserver.py
+
+diff --git a/changelog/565.security.md b/changelog/565.security.md
+new file mode 100644
+index 00000000000..5d7ec8202ba
+--- /dev/null
++++ b/changelog/565.security.md
+@@ -0,0 +1,4 @@
++CVE-2024-22231 Prevent directory traversal when creating syndic cache directory on the master
++CVE-2024-22232 Prevent directory traversal attacks in the master's serve_file method.
++These vulerablities were discovered and reported by:
++Yudi Zhao(Huawei Nebula Security Lab),Chenwei Jiang(Huawei Nebula Security Lab)
+diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py
+index 99f12387f91..4eca98d14a4 100644
+--- a/salt/fileserver/__init__.py
++++ b/salt/fileserver/__init__.py
+@@ -568,11 +568,6 @@ class Fileserver:
+ saltenv = salt.utils.stringutils.to_unicode(saltenv)
+ back = self.backends(back)
+ kwargs = {}
+- fnd = {"path": "", "rel": ""}
+- if os.path.isabs(path):
+- return fnd
+- if "../" in path:
+- return fnd
+ if salt.utils.url.is_escaped(path):
+ # don't attempt to find URL query arguments in the path
+ path = salt.utils.url.unescape(path)
+@@ -588,6 +583,10 @@ class Fileserver:
+ args = comp.split("=", 1)
+ kwargs[args[0]] = args[1]
+
++ fnd = {"path": "", "rel": ""}
++ if os.path.isabs(path) or "../" in path:
++ return fnd
++
+ if "env" in kwargs:
+ # "env" is not supported; Use "saltenv".
+ kwargs.pop("env")
+diff --git a/salt/fileserver/roots.py b/salt/fileserver/roots.py
+index a02b597c6f8..e2ea92029c3 100644
+--- a/salt/fileserver/roots.py
++++ b/salt/fileserver/roots.py
+@@ -27,6 +27,7 @@ import salt.utils.hashutils
+ import salt.utils.path
+ import salt.utils.platform
+ import salt.utils.stringutils
++import salt.utils.verify
+ import salt.utils.versions
+
+ log = logging.getLogger(__name__)
+@@ -98,6 +99,11 @@ def find_file(path, saltenv="base", **kwargs):
+ if saltenv == "__env__":
+ root = root.replace("__env__", actual_saltenv)
+ full = os.path.join(root, path)
++
++ # Refuse to serve file that is not under the root.
++ if not salt.utils.verify.clean_path(root, full, subdir=True):
++ continue
++
+ if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
+ fnd["path"] = full
+ fnd["rel"] = path
+@@ -128,6 +134,26 @@ def serve_file(load, fnd):
+ ret["dest"] = fnd["rel"]
+ gzip = load.get("gzip", None)
+ fpath = os.path.normpath(fnd["path"])
++
++ actual_saltenv = saltenv = load["saltenv"]
++ if saltenv not in __opts__["file_roots"]:
++ if "__env__" in __opts__["file_roots"]:
++ log.debug(
++ "salt environment '%s' maps to __env__ file_roots directory", saltenv
++ )
++ saltenv = "__env__"
++ else:
++ return fnd
++ file_in_root = False
++ for root in __opts__["file_roots"][saltenv]:
++ if saltenv == "__env__":
++ root = root.replace("__env__", actual_saltenv)
++ # Refuse to serve file that is not under the root.
++ if salt.utils.verify.clean_path(root, fpath, subdir=True):
++ file_in_root = True
++ if not file_in_root:
++ return ret
++
+ with salt.utils.files.fopen(fpath, "rb") as fp_:
+ fp_.seek(load["loc"])
+ data = fp_.read(__opts__["file_buffer_size"])
+diff --git a/salt/master.py b/salt/master.py
+index 3d2ba1e29de..425b4121481 100644
+--- a/salt/master.py
++++ b/salt/master.py
+@@ -1038,7 +1038,10 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
+ """
+ key = payload["enc"]
+ load = payload["load"]
+- ret = {"aes": self._handle_aes, "clear": self._handle_clear}[key](load)
++ if key == "aes":
++ ret = self._handle_aes(load)
++ else:
++ ret = self._handle_clear(load)
+ raise salt.ext.tornado.gen.Return(ret)
+
+ def _post_stats(self, start, cmd):
+@@ -1213,7 +1216,7 @@ class AESFuncs(TransportMethods):
+ "_dir_list",
+ "_symlink_list",
+ "_file_envs",
+- "_ext_nodes", # To keep compatibility with old Salt minion versions
++ "_ext_nodes", # To keep compatibility with old Salt minion versions
+ )
+
+ def __init__(self, opts, context=None):
+@@ -1746,10 +1749,16 @@ class AESFuncs(TransportMethods):
+ self.mminion.returners[fstr](load["jid"], load["load"])
+
+ # Register the syndic
++
++ # We are creating a path using user suplied input. Use the
++ # clean_path to prevent a directory traversal.
++ root = os.path.join(self.opts["cachedir"], "syndics")
+ syndic_cache_path = os.path.join(
+ self.opts["cachedir"], "syndics", load["id"]
+ )
+- if not os.path.exists(syndic_cache_path):
++ if salt.utils.verify.clean_path(
++ root, syndic_cache_path
++ ) and not os.path.exists(syndic_cache_path):
+ path_name = os.path.split(syndic_cache_path)[0]
+ if not os.path.exists(path_name):
+ os.makedirs(path_name)
+diff --git a/tests/pytests/unit/fileserver/test_roots.py b/tests/pytests/unit/fileserver/test_roots.py
+index 96bceb0fd3d..c1660280bc5 100644
+--- a/tests/pytests/unit/fileserver/test_roots.py
++++ b/tests/pytests/unit/fileserver/test_roots.py
+@@ -5,6 +5,7 @@
+ import copy
+ import pathlib
+ import shutil
++import sys
+ import textwrap
+
+ import pytest
+@@ -28,14 +29,14 @@ def unicode_dirname():
+ return "соль"
+
+
+-@pytest.fixture(autouse=True)
++@pytest.fixture
+ def testfile(tmp_path):
+ fp = tmp_path / "testfile"
+ fp.write_text("This is a testfile")
+ return fp
+
+
+-@pytest.fixture(autouse=True)
++@pytest.fixture
+ def tmp_state_tree(tmp_path, testfile, unicode_filename, unicode_dirname):
+ dirname = tmp_path / "roots_tmp_state_tree"
+ dirname.mkdir(parents=True, exist_ok=True)
+@@ -54,11 +55,15 @@ def tmp_state_tree(tmp_path, testfile, unicode_filename, unicode_dirname):
+
+
+ @pytest.fixture
+-def configure_loader_modules(tmp_state_tree, temp_salt_master):
+- opts = temp_salt_master.config.copy()
++def testfilepath(tmp_state_tree, testfile):
++ return tmp_state_tree / testfile.name
++
++
++@pytest.fixture
++def configure_loader_modules(tmp_state_tree, master_opts):
+ overrides = {"file_roots": {"base": [str(tmp_state_tree)]}}
+- opts.update(overrides)
+- return {roots: {"__opts__": opts}}
++ master_opts.update(overrides)
++ return {roots: {"__opts__": master_opts}}
+
+
+ def test_file_list(unicode_filename):
+@@ -75,17 +80,17 @@ def test_find_file(tmp_state_tree):
+ assert full_path_to_file == ret["path"]
+
+
+-def test_serve_file(testfile):
++def test_serve_file(testfilepath):
+ with patch.dict(roots.__opts__, {"file_buffer_size": 262144}):
+ load = {
+ "saltenv": "base",
+- "path": str(testfile),
++ "path": str(testfilepath),
+ "loc": 0,
+ }
+- fnd = {"path": str(testfile), "rel": "testfile"}
++ fnd = {"path": str(testfilepath), "rel": "testfile"}
+ ret = roots.serve_file(load, fnd)
+
+- with salt.utils.files.fopen(str(testfile), "rb") as fp_:
++ with salt.utils.files.fopen(str(testfilepath), "rb") as fp_:
+ data = fp_.read()
+
+ assert ret == {"data": data, "dest": "testfile"}
+@@ -277,3 +282,36 @@ def test_update_mtime_map_unicode_error(tmp_path):
+ },
+ "backend": "roots",
+ }
++
++
++def test_find_file_not_in_root(tmp_state_tree):
++ """
++ Fileroots should never 'find' a file that is outside of it's root.
++ """
++ badfile = pathlib.Path(tmp_state_tree).parent / "bar"
++ badfile.write_text("Bad file")
++ badpath = f"../bar"
++ ret = roots.find_file(badpath)
++ assert ret == {"path": "", "rel": ""}
++ badpath = f"{tmp_state_tree / '..' / 'bar'}"
++ ret = roots.find_file(badpath)
++ assert ret == {"path": "", "rel": ""}
++
++
++def test_serve_file_not_in_root(tmp_state_tree):
++ """
++ Fileroots should never 'serve' a file that is outside of it's root.
++ """
++ badfile = pathlib.Path(tmp_state_tree).parent / "bar"
++ badfile.write_text("Bad file")
++ badpath = f"../bar"
++ load = {"path": "salt://|..\\bar", "saltenv": "base", "loc": 0}
++ fnd = {
++ "path": f"{tmp_state_tree / '..' / 'bar'}",
++ "rel": f"{pathlib.Path('..') / 'bar'}",
++ }
++ ret = roots.serve_file(load, fnd)
++ if "win" in sys.platform:
++ assert ret == {"data": "", "dest": "..\\bar"}
++ else:
++ assert ret == {"data": "", "dest": "../bar"}
+diff --git a/tests/pytests/unit/test_fileserver.py b/tests/pytests/unit/test_fileserver.py
+new file mode 100644
+index 00000000000..8dd3ea0a27d
+--- /dev/null
++++ b/tests/pytests/unit/test_fileserver.py
+@@ -0,0 +1,123 @@
++import datetime
++import os
++import time
++
++import salt.fileserver
++import salt.utils.files
++
++
++def test_diff_with_diffent_keys():
++ """
++ Test that different maps are indeed reported different
++ """
++ map1 = {"file1": 1234}
++ map2 = {"file2": 1234}
++ assert salt.fileserver.diff_mtime_map(map1, map2) is True
++
++
++def test_diff_with_diffent_values():
++ """
++ Test that different maps are indeed reported different
++ """
++ map1 = {"file1": 12345}
++ map2 = {"file1": 1234}
++ assert salt.fileserver.diff_mtime_map(map1, map2) is True
++
++
++def test_whitelist():
++ opts = {
++ "fileserver_backend": ["roots", "git", "s3fs", "hgfs", "svn"],
++ "extension_modules": "",
++ }
++ fs = salt.fileserver.Fileserver(opts)
++ assert sorted(fs.servers.whitelist) == sorted(
++ ["git", "gitfs", "hg", "hgfs", "svn", "svnfs", "roots", "s3fs"]
++ ), fs.servers.whitelist
++
++
++def test_future_file_list_cache_file_ignored(tmp_path):
++ opts = {
++ "fileserver_backend": ["roots"],
++ "cachedir": tmp_path,
++ "extension_modules": "",
++ }
++
++ back_cachedir = os.path.join(tmp_path, "file_lists/roots")
++ os.makedirs(os.path.join(back_cachedir))
++
++ # Touch a couple files
++ for filename in ("base.p", "foo.txt"):
++ with salt.utils.files.fopen(os.path.join(back_cachedir, filename), "wb") as _f:
++ if filename == "base.p":
++ _f.write(b"\x80")
++
++ # Set modification time to file list cache file to 1 year in the future
++ now = datetime.datetime.utcnow()
++ future = now + datetime.timedelta(days=365)
++ mod_time = time.mktime(future.timetuple())
++ os.utime(os.path.join(back_cachedir, "base.p"), (mod_time, mod_time))
++
++ list_cache = os.path.join(back_cachedir, "base.p")
++ w_lock = os.path.join(back_cachedir, ".base.w")
++ ret = salt.fileserver.check_file_list_cache(opts, "files", list_cache, w_lock)
++ assert (
++ ret[1] is True
++ ), "Cache file list cache file is not refreshed when future modification time"
++
++
++def test_file_server_url_escape(tmp_path):
++ (tmp_path / "srv").mkdir()
++ (tmp_path / "srv" / "salt").mkdir()
++ (tmp_path / "foo").mkdir()
++ (tmp_path / "foo" / "bar").write_text("Bad file")
++ fileroot = str(tmp_path / "srv" / "salt")
++ badfile = str(tmp_path / "foo" / "bar")
++ opts = {
++ "fileserver_backend": ["roots"],
++ "extension_modules": "",
++ "optimization_order": [
++ 0,
++ ],
++ "file_roots": {
++ "base": [fileroot],
++ },
++ "file_ignore_regex": "",
++ "file_ignore_glob": "",
++ }
++ fs = salt.fileserver.Fileserver(opts)
++ ret = fs.find_file(
++ "salt://|..\\..\\..\\foo/bar",
++ "base",
++ )
++ assert ret == {"path": "", "rel": ""}
++
++
++def test_file_server_serve_url_escape(tmp_path):
++ (tmp_path / "srv").mkdir()
++ (tmp_path / "srv" / "salt").mkdir()
++ (tmp_path / "foo").mkdir()
++ (tmp_path / "foo" / "bar").write_text("Bad file")
++ fileroot = str(tmp_path / "srv" / "salt")
++ badfile = str(tmp_path / "foo" / "bar")
++ opts = {
++ "fileserver_backend": ["roots"],
++ "extension_modules": "",
++ "optimization_order": [
++ 0,
++ ],
++ "file_roots": {
++ "base": [fileroot],
++ },
++ "file_ignore_regex": "",
++ "file_ignore_glob": "",
++ "file_buffer_size": 2048,
++ }
++ fs = salt.fileserver.Fileserver(opts)
++ ret = fs.serve_file(
++ {
++ "path": "salt://|..\\..\\..\\foo/bar",
++ "saltenv": "base",
++ "loc": 0,
++ }
++ )
++ assert ret == {"data": "", "dest": ""}
+diff --git a/tests/pytests/unit/test_master.py b/tests/pytests/unit/test_master.py
+index 98c796912aa..d338307d1f8 100644
+--- a/tests/pytests/unit/test_master.py
++++ b/tests/pytests/unit/test_master.py
+@@ -1,3 +1,4 @@
++import pathlib
+ import time
+
+ import pytest
+@@ -249,3 +250,35 @@ def test_mworker_pass_context():
+ loadler_pillars_mock.call_args_list[0][1].get("pack").get("__context__")
+ == test_context
+ )
++
++
++def test_syndic_return_cache_dir_creation(encrypted_requests):
++ """master's cachedir for a syndic will be created by AESFuncs._syndic_return method"""
++ cachedir = pathlib.Path(encrypted_requests.opts["cachedir"])
++ assert not (cachedir / "syndics").exists()
++ encrypted_requests._syndic_return(
++ {
++ "id": "mamajama",
++ "jid": "",
++ "return": {},
++ }
++ )
++ assert (cachedir / "syndics").exists()
++ assert (cachedir / "syndics" / "mamajama").exists()
++
++
++def test_syndic_return_cache_dir_creation_traversal(encrypted_requests):
++ """
++ master's AESFuncs._syndic_return method cachdir creation is not vulnerable to a directory traversal
++ """
++ cachedir = pathlib.Path(encrypted_requests.opts["cachedir"])
++ assert not (cachedir / "syndics").exists()
++ encrypted_requests._syndic_return(
++ {
++ "id": "../mamajama",
++ "jid": "",
++ "return": {},
++ }
++ )
++ assert not (cachedir / "syndics").exists()
++ assert not (cachedir / "mamajama").exists()
+diff --git a/tests/unit/test_fileserver.py b/tests/unit/test_fileserver.py
+deleted file mode 100644
+index c290b16b7e4..00000000000
+--- a/tests/unit/test_fileserver.py
++++ /dev/null
+@@ -1,79 +0,0 @@
+-"""
+- :codeauthor: Joao Mesquita
+-"""
+-
+-
+-import datetime
+-import os
+-import time
+-
+-import salt.utils.files
+-from salt import fileserver
+-from tests.support.helpers import with_tempdir
+-from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.unit import TestCase
+-
+-
+-class MapDiffTestCase(TestCase):
+- def test_diff_with_diffent_keys(self):
+- """
+- Test that different maps are indeed reported different
+- """
+- map1 = {"file1": 1234}
+- map2 = {"file2": 1234}
+- assert fileserver.diff_mtime_map(map1, map2) is True
+-
+- def test_diff_with_diffent_values(self):
+- """
+- Test that different maps are indeed reported different
+- """
+- map1 = {"file1": 12345}
+- map2 = {"file1": 1234}
+- assert fileserver.diff_mtime_map(map1, map2) is True
+-
+-
+-class VCSBackendWhitelistCase(TestCase, LoaderModuleMockMixin):
+- def setup_loader_modules(self):
+- return {fileserver: {}}
+-
+- def test_whitelist(self):
+- opts = {
+- "fileserver_backend": ["roots", "git", "s3fs", "hgfs", "svn"],
+- "extension_modules": "",
+- }
+- fs = fileserver.Fileserver(opts)
+- assert sorted(fs.servers.whitelist) == sorted(
+- ["git", "gitfs", "hg", "hgfs", "svn", "svnfs", "roots", "s3fs"]
+- ), fs.servers.whitelist
+-
+- @with_tempdir()
+- def test_future_file_list_cache_file_ignored(self, cachedir):
+- opts = {
+- "fileserver_backend": ["roots"],
+- "cachedir": cachedir,
+- "extension_modules": "",
+- }
+-
+- back_cachedir = os.path.join(cachedir, "file_lists/roots")
+- os.makedirs(os.path.join(back_cachedir))
+-
+- # Touch a couple files
+- for filename in ("base.p", "foo.txt"):
+- with salt.utils.files.fopen(
+- os.path.join(back_cachedir, filename), "wb"
+- ) as _f:
+- if filename == "base.p":
+- _f.write(b"\x80")
+-
+- # Set modification time to file list cache file to 1 year in the future
+- now = datetime.datetime.utcnow()
+- future = now + datetime.timedelta(days=365)
+- mod_time = time.mktime(future.timetuple())
+- os.utime(os.path.join(back_cachedir, "base.p"), (mod_time, mod_time))
+-
+- list_cache = os.path.join(back_cachedir, "base.p")
+- w_lock = os.path.join(back_cachedir, ".base.w")
+- ret = fileserver.check_file_list_cache(opts, "files", list_cache, w_lock)
+- assert (
+- ret[1] is True
+- ), "Cache file list cache file is not refreshed when future modification time"
+--
+2.43.0
+
+
diff --git a/fix-deprecated-code-677.patch b/fix-deprecated-code-677.patch
new file mode 100644
index 0000000..b64f339
--- /dev/null
+++ b/fix-deprecated-code-677.patch
@@ -0,0 +1,166 @@
+From d5f3df07783d8aaf3a897ca2f209e662973b930c Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Wed, 4 Sep 2024 13:11:33 +0200
+Subject: [PATCH] Fix deprecated code (#677)
+
+Due to SUSE's extended support policy, we won't remove
+code from Salt until next major release.
+---
+ salt/_logging/handlers.py | 6 +++---
+ salt/log/__init__.py | 2 +-
+ salt/log/handlers/__init__.py | 2 +-
+ salt/log/mixins.py | 2 +-
+ salt/log/setup.py | 4 ++--
+ salt/modules/aptpkg.py | 2 +-
+ salt/modules/cassandra_mod.py | 2 +-
+ salt/returners/cassandra_return.py | 2 +-
+ salt/returners/django_return.py | 2 +-
+ 9 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/salt/_logging/handlers.py b/salt/_logging/handlers.py
+index f4b0b6fec3d..5a1a1613137 100644
+--- a/salt/_logging/handlers.py
++++ b/salt/_logging/handlers.py
+@@ -36,7 +36,7 @@ class TemporaryLoggingHandler(logging.NullHandler):
+
+ def __init__(self, level=logging.NOTSET, max_queue_size=10000):
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using '{name}.TemporaryLoggingHandler'. "
+ "'{name}.TemporaryLoggingHandler' will go away after "
+ "{{date}}.".format(name=__name__),
+@@ -225,7 +225,7 @@ if sys.version_info < (3, 7):
+ def __init__(self, queue): # pylint: disable=useless-super-delegation
+ super().__init__(queue)
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using '{name}.QueueHandler' and instead "
+ "use 'logging.handlers.QueueHandler'. "
+ "'{name}.QueueHandler' will go away after "
+@@ -283,7 +283,7 @@ else:
+ def __init__(self, queue): # pylint: disable=useless-super-delegation
+ super().__init__(queue)
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using '{name}.QueueHandler' and instead "
+ "use 'logging.handlers.QueueHandler'. "
+ "'{name}.QueueHandler' will go away after "
+diff --git a/salt/log/__init__.py b/salt/log/__init__.py
+index 3458474f2ca..69bfa8ed15b 100644
+--- a/salt/log/__init__.py
++++ b/salt/log/__init__.py
+@@ -24,7 +24,7 @@ from salt.log.setup import (
+ from salt.utils.versions import warn_until_date
+
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using '{name}' and instead use 'salt._logging'. "
+ "'{name}' will go away after {{date}}.".format(name=__name__),
+ stacklevel=3,
+diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py
+index 8bc740e20f1..55cf10cdb78 100644
+--- a/salt/log/handlers/__init__.py
++++ b/salt/log/handlers/__init__.py
+@@ -12,7 +12,7 @@ from salt._logging.handlers import (
+ from salt.utils.versions import warn_until_date
+
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using '{name}' and instead use 'salt._logging.handlers'. "
+ "'{name}' will go away after {{date}}.".format(name=__name__),
+ )
+diff --git a/salt/log/mixins.py b/salt/log/mixins.py
+index 6619b564198..65f5ed7f78a 100644
+--- a/salt/log/mixins.py
++++ b/salt/log/mixins.py
+@@ -11,7 +11,7 @@ from salt.utils.versions import warn_until_date
+ # pylint: enable=unused-import
+
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using '{name}' and instead use 'salt._logging.mixins'. "
+ "'{name}' will go away after {{date}}.".format(name=__name__),
+ )
+diff --git a/salt/log/setup.py b/salt/log/setup.py
+index 74bd7bbd3e1..f4c80b0f280 100644
+--- a/salt/log/setup.py
++++ b/salt/log/setup.py
+@@ -21,7 +21,7 @@ from salt._logging.impl import set_log_record_factory as setLogRecordFactory
+ from salt.utils.versions import warn_until_date
+
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using '{name}' and instead use 'salt._logging'. "
+ "'{name}' will go away after {{date}}. Do note however that "
+ "'salt._logging' is now considered a non public implementation "
+@@ -34,7 +34,7 @@ def _deprecated_warning(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "Please stop using 'salt.log.setup.{name}()' as it no longer does anything and "
+ "will go away after {{date}}.".format(name=func.__qualname__),
+ stacklevel=4,
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index ad5450c4151..cd40aea54f1 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -3128,7 +3128,7 @@ def expand_repo_def(**kwargs):
+ NOT USABLE IN THE CLI
+ """
+ warn_until_date(
+- "20250101",
++ "20260101",
+ "The pkg.expand_repo_def function is deprecated and set for removal "
+ "after {date}. This is only unsed internally by the apt pkg state "
+ "module. If that's not the case, please file an new issue requesting "
+diff --git a/salt/modules/cassandra_mod.py b/salt/modules/cassandra_mod.py
+index 029fd08fb9b..db9c8821920 100644
+--- a/salt/modules/cassandra_mod.py
++++ b/salt/modules/cassandra_mod.py
+@@ -45,7 +45,7 @@ def __virtual__():
+ )
+
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "The cassandra returner is broken and deprecated, and will be removed"
+ " after {date}. Use the cassandra_cql returner instead",
+ )
+diff --git a/salt/returners/cassandra_return.py b/salt/returners/cassandra_return.py
+index ac01a4e46cb..5fcc00ee8ce 100644
+--- a/salt/returners/cassandra_return.py
++++ b/salt/returners/cassandra_return.py
+@@ -53,7 +53,7 @@ def __virtual__():
+ if not HAS_PYCASSA:
+ return False, "Could not import cassandra returner; pycassa is not installed."
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "The cassandra returner is broken and deprecated, and will be removed"
+ " after {date}. Use the cassandra_cql returner instead",
+ )
+diff --git a/salt/returners/django_return.py b/salt/returners/django_return.py
+index 36386875552..474653f3831 100644
+--- a/salt/returners/django_return.py
++++ b/salt/returners/django_return.py
+@@ -57,7 +57,7 @@ __virtualname__ = "django"
+
+ def __virtual__():
+ warn_until_date(
+- "20240101",
++ "20260101",
+ "The django returner is broken and deprecated, and will be removed"
+ " after {date}.",
+ )
+--
+2.46.0
+
diff --git a/fix-failed-to-stat-root-.gitconfig-issue-on-gitfs-bs.patch b/fix-failed-to-stat-root-.gitconfig-issue-on-gitfs-bs.patch
new file mode 100644
index 0000000..6e271a0
--- /dev/null
+++ b/fix-failed-to-stat-root-.gitconfig-issue-on-gitfs-bs.patch
@@ -0,0 +1,73 @@
+From 0ef67b3a7ce03335f1bfc6545f851897e11f5795 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 29 Jan 2025 10:08:28 +0000
+Subject: [PATCH] Fix failed to stat '/root/.gitconfig' issue on gitfs
+ (bsc#1230944) (bsc#1234881) (#699)
+
+* Fix failed to stat '/root/.gitconfig' issue on gitfs (bsc#1230944) (bsc#1234881)
+
+This commit ensures the right HOME value is set during Pygit2 remote
+initialization, otherwise there are chances that it gets a wrong value
+depending on the execution stack.
+
+* Add changelog entry file
+
+* Add test_checkout_pygit2_with_home_env_unset unit test
+---
+ changelog/64121.fixed.md | 1 +
+ salt/utils/gitfs.py | 9 +++++----
+ tests/pytests/unit/utils/test_gitfs.py | 1 -
+ 3 files changed, 6 insertions(+), 5 deletions(-)
+ create mode 100644 changelog/64121.fixed.md
+
+diff --git a/changelog/64121.fixed.md b/changelog/64121.fixed.md
+new file mode 100644
+index 0000000000..e78bbd5b7f
+--- /dev/null
++++ b/changelog/64121.fixed.md
+@@ -0,0 +1 @@
++Ensure the right HOME environment value is set during Pygit2 remote initialization.
+diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py
+index 58fa611db8..6f691f3869 100644
+--- a/salt/utils/gitfs.py
++++ b/salt/utils/gitfs.py
+@@ -1889,7 +1889,12 @@ class Pygit2(GitProvider):
+ """
+ # https://github.com/libgit2/pygit2/issues/339
+ # https://github.com/libgit2/libgit2/issues/2122
++ # https://github.com/saltstack/salt/issues/64121
+ home = os.path.expanduser("~")
++ if "HOME" not in os.environ:
++ # Make sure $HOME env variable is set to prevent
++ # _pygit2.GitError: error loading known_hosts in some libgit2 versions.
++ os.environ["HOME"] = home
+ pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home
+ new = False
+ if not os.listdir(self._cachedir):
+@@ -1994,10 +1999,6 @@ class Pygit2(GitProvider):
+ # pruning only available in pygit2 >= 0.26.2
+ pass
+ try:
+- # Make sure $HOME env variable is set to prevent
+- # _pygit2.GitError: error loading known_hosts in some libgit2 versions.
+- if "HOME" not in os.environ:
+- os.environ["HOME"] = salt.syspaths.HOME_DIR
+ fetch_results = origin.fetch(**fetch_kwargs)
+ except GitError as exc: # pylint: disable=broad-except
+ exc_str = get_error_message(exc).lower()
+diff --git a/tests/pytests/unit/utils/test_gitfs.py b/tests/pytests/unit/utils/test_gitfs.py
+index bd7d74cb2b..3c4a85a856 100644
+--- a/tests/pytests/unit/utils/test_gitfs.py
++++ b/tests/pytests/unit/utils/test_gitfs.py
+@@ -251,7 +251,6 @@ def test_checkout_pygit2_with_home_env_unset(_prepare_provider):
+ with patched_environ(__cleanup__=["HOME"]):
+ assert "HOME" not in os.environ
+ provider.init_remote()
+- provider.fetch()
+ assert "HOME" in os.environ
+
+
+--
+2.48.1
+
diff --git a/fix-for-suse-expanded-support-detection.patch b/fix-for-suse-expanded-support-detection.patch
new file mode 100644
index 0000000..acbfc28
--- /dev/null
+++ b/fix-for-suse-expanded-support-detection.patch
@@ -0,0 +1,39 @@
+From 7be26299bc7b6ec2065ab13857f088dc500ee882 Mon Sep 17 00:00:00 2001
+From: Jochen Breuer
+Date: Thu, 6 Sep 2018 17:15:18 +0200
+Subject: [PATCH] Fix for SUSE Expanded Support detection
+
+A SUSE ES installation has both, the centos-release and redhat-release
+file. Since os_data only used the centos-release file to detect a
+CentOS installation, this lead to SUSE ES being detected as CentOS.
+
+This change also adds a check for redhat-release and then marks the
+'lsb_distrib_id' as RedHat.
+---
+ salt/grains/core.py | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/salt/grains/core.py b/salt/grains/core.py
+index 710c57f28f..1199ad274f 100644
+--- a/salt/grains/core.py
++++ b/salt/grains/core.py
+@@ -2279,6 +2279,15 @@ def _legacy_linux_distribution_data(grains, os_release, lsb_has_error):
+ log.trace("Parsing distrib info from /etc/centos-release")
+ # CentOS Linux
+ grains["lsb_distrib_id"] = "CentOS"
++ # Maybe CentOS Linux; could also be SUSE Expanded Support.
++ # SUSE ES has both, centos-release and redhat-release.
++ if os.path.isfile("/etc/redhat-release"):
++ with salt.utils.files.fopen("/etc/redhat-release") as ifile:
++ for line in ifile:
++ if "red hat enterprise linux server" in line.lower():
++ # This is a SUSE Expanded Support Rhel installation
++ grains["lsb_distrib_id"] = "RedHat"
++ break
+ with salt.utils.files.fopen("/etc/centos-release") as ifile:
+ for line in ifile:
+ # Need to pull out the version and codename
+--
+2.39.2
+
+
diff --git a/fix-gitfs-__env__-and-improve-cache-cleaning-bsc-119.patch b/fix-gitfs-__env__-and-improve-cache-cleaning-bsc-119.patch
new file mode 100644
index 0000000..188743d
--- /dev/null
+++ b/fix-gitfs-__env__-and-improve-cache-cleaning-bsc-119.patch
@@ -0,0 +1,2024 @@
+From a7c98ce490833ff232946b9715909161b6ba5a46 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Mon, 13 Nov 2023 11:24:09 +0000
+Subject: [PATCH] Fix gitfs "__env__" and improve cache cleaning
+ (bsc#1193948) (#608)
+
+* Fix __env__ and improve cache cleaning
+* Move gitfs locks out of cachedir/.git
+
+---------
+
+Co-authored-by: cmcmarrow
+---
+ changelog/65002.fixed.md | 1 +
+ changelog/65086.fixed.md | 1 +
+ salt/utils/cache.py | 32 ++
+ salt/utils/gitfs.py | 307 +++++++++------
+ .../functional/pillar/test_git_pillar.py | 262 +++++++++++++
+ tests/pytests/functional/utils/test_cache.py | 83 ++++
+ tests/pytests/functional/utils/test_gitfs.py | 275 +++++++++++++
+ tests/pytests/functional/utils/test_pillar.py | 365 ++++++++++++++++++
+ .../pytests/functional/utils/test_winrepo.py | 164 ++++++++
+ tests/pytests/unit/test_minion.py | 31 +-
+ tests/pytests/unit/utils/test_gitfs.py | 18 +-
+ tests/unit/utils/test_gitfs.py | 21 +-
+ 12 files changed, 1393 insertions(+), 167 deletions(-)
+ create mode 100644 changelog/65002.fixed.md
+ create mode 100644 changelog/65086.fixed.md
+ create mode 100644 tests/pytests/functional/pillar/test_git_pillar.py
+ create mode 100644 tests/pytests/functional/utils/test_cache.py
+ create mode 100644 tests/pytests/functional/utils/test_gitfs.py
+ create mode 100644 tests/pytests/functional/utils/test_pillar.py
+ create mode 100644 tests/pytests/functional/utils/test_winrepo.py
+
+diff --git a/changelog/65002.fixed.md b/changelog/65002.fixed.md
+new file mode 100644
+index 0000000000..86ed2d4bcc
+--- /dev/null
++++ b/changelog/65002.fixed.md
+@@ -0,0 +1 @@
++Fix __env__ and improve cache cleaning see more info at pull #65017.
+diff --git a/changelog/65086.fixed.md b/changelog/65086.fixed.md
+new file mode 100644
+index 0000000000..292930f0fd
+--- /dev/null
++++ b/changelog/65086.fixed.md
+@@ -0,0 +1 @@
++Moved gitfs locks to salt working dir to avoid lock wipes
+diff --git a/salt/utils/cache.py b/salt/utils/cache.py
+index a78a1f70fc..88e7fa2400 100644
+--- a/salt/utils/cache.py
++++ b/salt/utils/cache.py
+@@ -6,6 +6,7 @@ import functools
+ import logging
+ import os
+ import re
++import shutil
+ import time
+
+ import salt.config
+@@ -15,6 +16,8 @@ import salt.utils.data
+ import salt.utils.dictupdate
+ import salt.utils.files
+ import salt.utils.msgpack
++import salt.utils.path
++import salt.version
+ from salt.utils.zeromq import zmq
+
+ log = logging.getLogger(__name__)
+@@ -345,3 +348,32 @@ def context_cache(func):
+ return func(*args, **kwargs)
+
+ return context_cache_wrap
++
++
++def verify_cache_version(cache_path):
++ """
++ Check that the cached version matches the Salt version.
++ If the cached version does not match the Salt version, wipe the cache.
++
++ :return: ``True`` if cache version matches, otherwise ``False``
++ """
++ if not os.path.isdir(cache_path):
++ os.makedirs(cache_path)
++ with salt.utils.files.fopen(
++ salt.utils.path.join(cache_path, "cache_version"), "a+"
++ ) as file:
++ file.seek(0)
++ data = "\n".join(file.readlines())
++ if data != salt.version.__version__:
++ log.warning(f"Cache version mismatch clearing: {repr(cache_path)}")
++ file.truncate(0)
++ file.write(salt.version.__version__)
++ for item in os.listdir(cache_path):
++ if item != "cache_version":
++ item_path = salt.utils.path.join(cache_path, item)
++ if os.path.isfile(item_path):
++ os.remove(item_path)
++ else:
++ shutil.rmtree(item_path)
++ return False
++ return True
+diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py
+index af61aa0dda..061647edac 100644
+--- a/salt/utils/gitfs.py
++++ b/salt/utils/gitfs.py
+@@ -17,7 +17,6 @@ import os
+ import shlex
+ import shutil
+ import stat
+-import string
+ import subprocess
+ import time
+ import weakref
+@@ -26,6 +25,7 @@ from datetime import datetime
+ import salt.ext.tornado.ioloop
+ import salt.fileserver
+ import salt.syspaths
++import salt.utils.cache
+ import salt.utils.configparser
+ import salt.utils.data
+ import salt.utils.files
+@@ -253,7 +253,6 @@ class GitProvider:
+ val_cb=lambda x, y: str(y),
+ )
+ self.conf = copy.deepcopy(per_remote_defaults)
+-
+ # Remove the 'salt://' from the beginning of any globally-defined
+ # per-saltenv mountpoints
+ for saltenv, saltenv_conf in self.global_saltenv.items():
+@@ -457,48 +456,38 @@ class GitProvider:
+ self.id,
+ )
+ failhard(self.role)
+-
+- hash_type = getattr(hashlib, self.opts.get("hash_type", "md5"))
+- # Generate full id.
+- # Full id helps decrease the chances of collections in the gitfs cache.
+- try:
+- target = str(self.get_checkout_target())
+- except AttributeError:
+- target = ""
+- self._full_id = "-".join(
+- [
+- getattr(self, "name", ""),
+- self.id,
+- getattr(self, "env", ""),
+- getattr(self, "_root", ""),
+- self.role,
+- getattr(self, "base", ""),
+- getattr(self, "branch", ""),
+- target,
+- ]
++ if hasattr(self, "name"):
++ self._cache_basehash = self.name
++ else:
++ hash_type = getattr(hashlib, self.opts.get("hash_type", "md5"))
++ # We loaded this data from yaml configuration files, so, its safe
++ # to use UTF-8
++ self._cache_basehash = str(
++ base64.b64encode(hash_type(self.id.encode("utf-8")).digest()),
++ encoding="ascii", # base64 only outputs ascii
++ ).replace(
++ "/", "_"
++ ) # replace "/" with "_" to not cause trouble with file system
++ self._cache_hash = salt.utils.path.join(cache_root, self._cache_basehash)
++ self._cache_basename = "_"
++ if self.id.startswith("__env__"):
++ try:
++ self._cache_basename = self.get_checkout_target()
++ except AttributeError:
++ log.critical(f"__env__ cant generate basename: {self.role} {self.id}")
++ failhard(self.role)
++ self._cache_full_basename = salt.utils.path.join(
++ self._cache_basehash, self._cache_basename
+ )
+- # We loaded this data from yaml configuration files, so, its safe
+- # to use UTF-8
+- base64_hash = str(
+- base64.b64encode(hash_type(self._full_id.encode("utf-8")).digest()),
+- encoding="ascii", # base64 only outputs ascii
+- ).replace(
+- "/", "_"
+- ) # replace "/" with "_" to not cause trouble with file system
+-
+- # limit name length to 19, so we don't eat up all the path length for windows
+- # this is due to pygit2 limitations
+- # replace any unknown char with "_" to not cause trouble with file system
+- name_chars = string.ascii_letters + string.digits + "-"
+- cache_name = "".join(
+- c if c in name_chars else "_" for c in getattr(self, "name", "")[:19]
++ self._cachedir = salt.utils.path.join(self._cache_hash, self._cache_basename)
++ self._salt_working_dir = salt.utils.path.join(
++ cache_root, "work", self._cache_full_basename
+ )
+-
+- self.cachedir_basename = f"{cache_name}-{base64_hash}"
+- self.cachedir = salt.utils.path.join(cache_root, self.cachedir_basename)
+- self.linkdir = salt.utils.path.join(cache_root, "links", self.cachedir_basename)
+- if not os.path.isdir(self.cachedir):
+- os.makedirs(self.cachedir)
++ self._linkdir = salt.utils.path.join(
++ cache_root, "links", self._cache_full_basename
++ )
++ if not os.path.isdir(self._cachedir):
++ os.makedirs(self._cachedir)
+
+ try:
+ self.new = self.init_remote()
+@@ -510,12 +499,32 @@ class GitProvider:
+ msg += " Perhaps git is not available."
+ log.critical(msg, exc_info=True)
+ failhard(self.role)
++ self.verify_auth()
++ self.setup_callbacks()
++ if not os.path.isdir(self._salt_working_dir):
++ os.makedirs(self._salt_working_dir)
++ self.fetch_request_check()
++
++ def get_cache_basehash(self):
++ return self._cache_basehash
++
++ def get_cache_hash(self):
++ return self._cache_hash
+
+- def full_id(self):
+- return self._full_id
++ def get_cache_basename(self):
++ return self._cache_basename
+
+- def get_cachedir_basename(self):
+- return self.cachedir_basename
++ def get_cache_full_basename(self):
++ return self._cache_full_basename
++
++ def get_cachedir(self):
++ return self._cachedir
++
++ def get_linkdir(self):
++ return self._linkdir
++
++ def get_salt_working_dir(self):
++ return self._salt_working_dir
+
+ def _get_envs_from_ref_paths(self, refs):
+ """
+@@ -557,7 +566,7 @@ class GitProvider:
+ return ret
+
+ def _get_lock_file(self, lock_type="update"):
+- return salt.utils.path.join(self.gitdir, lock_type + ".lk")
++ return salt.utils.path.join(self._salt_working_dir, lock_type + ".lk")
+
+ @classmethod
+ def add_conf_overlay(cls, name):
+@@ -644,7 +653,7 @@ class GitProvider:
+ # No need to pass an environment to self.root() here since per-saltenv
+ # configuration is a gitfs-only feature and check_root() is not used
+ # for gitfs.
+- root_dir = salt.utils.path.join(self.cachedir, self.root()).rstrip(os.sep)
++ root_dir = salt.utils.path.join(self._cachedir, self.root()).rstrip(os.sep)
+ if os.path.isdir(root_dir):
+ return root_dir
+ log.error(
+@@ -816,7 +825,7 @@ class GitProvider:
+ desired_refspecs,
+ )
+ if refspecs != desired_refspecs:
+- conf.set_multivar(remote_section, "fetch", self.refspecs)
++ conf.set_multivar(remote_section, "fetch", desired_refspecs)
+ log.debug(
+ "Refspecs for %s remote '%s' set to %s",
+ self.role,
+@@ -1069,7 +1078,7 @@ class GitProvider:
+ """
+ raise NotImplementedError()
+
+- def checkout(self):
++ def checkout(self, fetch_on_fail=True):
+ """
+ This function must be overridden in a sub-class
+ """
+@@ -1192,6 +1201,21 @@ class GitProvider:
+ else:
+ self.url = self.id
+
++ def fetch_request_check(self):
++ fetch_request = salt.utils.path.join(self._salt_working_dir, "fetch_request")
++ if os.path.isfile(fetch_request):
++ log.debug(f"Fetch request: {self._salt_working_dir}")
++ try:
++ os.remove(fetch_request)
++ except OSError as exc:
++ log.error(
++ f"Failed to remove Fetch request: {self._salt_working_dir} {exc}",
++ exc_info=True,
++ )
++ self.fetch()
++ return True
++ return False
++
+ @property
+ def linkdir_walk(self):
+ """
+@@ -1218,14 +1242,14 @@ class GitProvider:
+ dirs = []
+ self._linkdir_walk.append(
+ (
+- salt.utils.path.join(self.linkdir, *parts[: idx + 1]),
++ salt.utils.path.join(self._linkdir, *parts[: idx + 1]),
+ dirs,
+ [],
+ )
+ )
+ try:
+ # The linkdir itself goes at the beginning
+- self._linkdir_walk.insert(0, (self.linkdir, [parts[0]], []))
++ self._linkdir_walk.insert(0, (self._linkdir, [parts[0]], []))
+ except IndexError:
+ pass
+ return self._linkdir_walk
+@@ -1275,13 +1299,17 @@ class GitPython(GitProvider):
+ role,
+ )
+
+- def checkout(self):
++ def checkout(self, fetch_on_fail=True):
+ """
+ Checkout the configured branch/tag. We catch an "Exception" class here
+ instead of a specific exception class because the exceptions raised by
+ GitPython when running these functions vary in different versions of
+ GitPython.
++
++ fetch_on_fail
++ If checkout fails perform a fetch then try to checkout again.
+ """
++ self.fetch_request_check()
+ tgt_ref = self.get_checkout_target()
+ try:
+ head_sha = self.repo.rev_parse("HEAD").hexsha
+@@ -1345,6 +1373,15 @@ class GitPython(GitProvider):
+ except Exception: # pylint: disable=broad-except
+ continue
+ return self.check_root()
++ if fetch_on_fail:
++ log.debug(
++ "Failed to checkout %s from %s remote '%s': fetch and try again",
++ tgt_ref,
++ self.role,
++ self.id,
++ )
++ self.fetch()
++ return self.checkout(fetch_on_fail=False)
+ log.error(
+ "Failed to checkout %s from %s remote '%s': remote ref does not exist",
+ tgt_ref,
+@@ -1360,16 +1397,16 @@ class GitPython(GitProvider):
+ initialized by this function.
+ """
+ new = False
+- if not os.listdir(self.cachedir):
++ if not os.listdir(self._cachedir):
+ # Repo cachedir is empty, initialize a new repo there
+- self.repo = git.Repo.init(self.cachedir)
++ self.repo = git.Repo.init(self._cachedir)
+ new = True
+ else:
+ # Repo cachedir exists, try to attach
+ try:
+- self.repo = git.Repo(self.cachedir)
++ self.repo = git.Repo(self._cachedir)
+ except git.exc.InvalidGitRepositoryError:
+- log.error(_INVALID_REPO, self.cachedir, self.url, self.role)
++ log.error(_INVALID_REPO, self._cachedir, self.url, self.role)
+ return new
+
+ self.gitdir = salt.utils.path.join(self.repo.working_dir, ".git")
+@@ -1603,10 +1640,14 @@ class Pygit2(GitProvider):
+ except AttributeError:
+ return obj.get_object()
+
+- def checkout(self):
++ def checkout(self, fetch_on_fail=True):
+ """
+ Checkout the configured branch/tag
++
++ fetch_on_fail
++ If checkout fails perform a fetch then try to checkout again.
+ """
++ self.fetch_request_check()
+ tgt_ref = self.get_checkout_target()
+ local_ref = "refs/heads/" + tgt_ref
+ remote_ref = "refs/remotes/origin/" + tgt_ref
+@@ -1796,6 +1837,15 @@ class Pygit2(GitProvider):
+ exc_info=True,
+ )
+ return None
++ if fetch_on_fail:
++ log.debug(
++ "Failed to checkout %s from %s remote '%s': fetch and try again",
++ tgt_ref,
++ self.role,
++ self.id,
++ )
++ self.fetch()
++ return self.checkout(fetch_on_fail=False)
+ log.error(
+ "Failed to checkout %s from %s remote '%s': remote ref does not exist",
+ tgt_ref,
+@@ -1837,16 +1887,16 @@ class Pygit2(GitProvider):
+ home = os.path.expanduser("~")
+ pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home
+ new = False
+- if not os.listdir(self.cachedir):
++ if not os.listdir(self._cachedir):
+ # Repo cachedir is empty, initialize a new repo there
+- self.repo = pygit2.init_repository(self.cachedir)
++ self.repo = pygit2.init_repository(self._cachedir)
+ new = True
+ else:
+ # Repo cachedir exists, try to attach
+ try:
+- self.repo = pygit2.Repository(self.cachedir)
++ self.repo = pygit2.Repository(self._cachedir)
+ except KeyError:
+- log.error(_INVALID_REPO, self.cachedir, self.url, self.role)
++ log.error(_INVALID_REPO, self._cachedir, self.url, self.role)
+ return new
+
+ self.gitdir = salt.utils.path.join(self.repo.workdir, ".git")
+@@ -2370,6 +2420,7 @@ class GitBase:
+ self.file_list_cachedir = salt.utils.path.join(
+ self.opts["cachedir"], "file_lists", self.role
+ )
++ salt.utils.cache.verify_cache_version(self.cache_root)
+ if init_remotes:
+ self.init_remotes(
+ remotes if remotes is not None else [],
+@@ -2442,8 +2493,6 @@ class GitBase:
+ )
+ if hasattr(repo_obj, "repo"):
+ # Sanity check and assign the credential parameter
+- repo_obj.verify_auth()
+- repo_obj.setup_callbacks()
+ if self.opts["__role"] == "minion" and repo_obj.new:
+ # Perform initial fetch on masterless minion
+ repo_obj.fetch()
+@@ -2492,7 +2541,7 @@ class GitBase:
+ # Don't allow collisions in cachedir naming
+ cachedir_map = {}
+ for repo in self.remotes:
+- cachedir_map.setdefault(repo.cachedir, []).append(repo.id)
++ cachedir_map.setdefault(repo.get_cachedir(), []).append(repo.id)
+
+ collisions = [x for x in cachedir_map if len(cachedir_map[x]) > 1]
+ if collisions:
+@@ -2509,48 +2558,42 @@ class GitBase:
+ if any(x.new for x in self.remotes):
+ self.write_remote_map()
+
++ def _remove_cache_dir(self, cache_dir):
++ try:
++ shutil.rmtree(cache_dir)
++ except OSError as exc:
++ log.error(
++ "Unable to remove old %s remote cachedir %s: %s",
++ self.role,
++ cache_dir,
++ exc,
++ )
++ return False
++ log.debug("%s removed old cachedir %s", self.role, cache_dir)
++ return True
++
++ def _iter_remote_hashes(self):
++ for item in os.listdir(self.cache_root):
++ if item in ("hash", "refs", "links", "work"):
++ continue
++ if os.path.isdir(salt.utils.path.join(self.cache_root, item)):
++ yield item
++
+ def clear_old_remotes(self):
+ """
+ Remove cache directories for remotes no longer configured
+ """
+- try:
+- cachedir_ls = os.listdir(self.cache_root)
+- except OSError:
+- cachedir_ls = []
+- # Remove actively-used remotes from list
+- for repo in self.remotes:
+- try:
+- cachedir_ls.remove(repo.cachedir_basename)
+- except ValueError:
+- pass
+- to_remove = []
+- for item in cachedir_ls:
+- if item in ("hash", "refs"):
+- continue
+- path = salt.utils.path.join(self.cache_root, item)
+- if os.path.isdir(path):
+- to_remove.append(path)
+- failed = []
+- if to_remove:
+- for rdir in to_remove:
+- try:
+- shutil.rmtree(rdir)
+- except OSError as exc:
+- log.error(
+- "Unable to remove old %s remote cachedir %s: %s",
+- self.role,
+- rdir,
+- exc,
+- )
+- failed.append(rdir)
+- else:
+- log.debug("%s removed old cachedir %s", self.role, rdir)
+- for fdir in failed:
+- to_remove.remove(fdir)
+- ret = bool(to_remove)
+- if ret:
++ change = False
++ # Remove all hash dirs not part of this group
++ remote_set = {r.get_cache_basehash() for r in self.remotes}
++ for item in self._iter_remote_hashes():
++ if item not in remote_set:
++ change = self._remove_cache_dir(
++ salt.utils.path.join(self.cache_root, item) or change
++ )
++ if not change:
+ self.write_remote_map()
+- return ret
++ return change
+
+ def clear_cache(self):
+ """
+@@ -2609,6 +2652,27 @@ class GitBase:
+ name = getattr(repo, "name", None)
+ if not remotes or (repo.id, name) in remotes or name in remotes:
+ try:
++ # Find and place fetch_request file for all the other branches for this repo
++ repo_work_hash = os.path.split(repo.get_salt_working_dir())[0]
++ for branch in os.listdir(repo_work_hash):
++ # Don't place fetch request in current branch being updated
++ if branch == repo.get_cache_basename():
++ continue
++ branch_salt_dir = salt.utils.path.join(repo_work_hash, branch)
++ fetch_path = salt.utils.path.join(
++ branch_salt_dir, "fetch_request"
++ )
++ if os.path.isdir(branch_salt_dir):
++ try:
++ with salt.utils.files.fopen(fetch_path, "w"):
++ pass
++ except OSError as exc: # pylint: disable=broad-except
++ log.error(
++ f"Failed to make fetch request: {fetch_path} {exc}",
++ exc_info=True,
++ )
++ else:
++ log.error(f"Failed to make fetch request: {fetch_path}")
+ if repo.fetch():
+ # We can't just use the return value from repo.fetch()
+ # because the data could still have changed if old
+@@ -2863,7 +2927,7 @@ class GitBase:
+ for repo in self.remotes:
+ fp_.write(
+ salt.utils.stringutils.to_str(
+- "{} = {}\n".format(repo.cachedir_basename, repo.id)
++ "{} = {}\n".format(repo.get_cache_basehash(), repo.id)
+ )
+ )
+ except OSError:
+@@ -2871,15 +2935,18 @@ class GitBase:
+ else:
+ log.info("Wrote new %s remote map to %s", self.role, remote_map)
+
+- def do_checkout(self, repo):
++ def do_checkout(self, repo, fetch_on_fail=True):
+ """
+ Common code for git_pillar/winrepo to handle locking and checking out
+ of a repo.
++
++ fetch_on_fail
++ If checkout fails perform a fetch then try to checkout again.
+ """
+ time_start = time.time()
+ while time.time() - time_start <= 5:
+ try:
+- return repo.checkout()
++ return repo.checkout(fetch_on_fail=fetch_on_fail)
+ except GitLockError as exc:
+ if exc.errno == errno.EEXIST:
+ time.sleep(0.1)
+@@ -3274,14 +3341,17 @@ class GitPillar(GitBase):
+
+ role = "git_pillar"
+
+- def checkout(self):
++ def checkout(self, fetch_on_fail=True):
+ """
+ Checkout the targeted branches/tags from the git_pillar remotes
++
++ fetch_on_fail
++ If checkout fails perform a fetch then try to checkout again.
+ """
+ self.pillar_dirs = OrderedDict()
+ self.pillar_linked_dirs = []
+ for repo in self.remotes:
+- cachedir = self.do_checkout(repo)
++ cachedir = self.do_checkout(repo, fetch_on_fail=fetch_on_fail)
+ if cachedir is not None:
+ # Figure out which environment this remote should be assigned
+ if repo.branch == "__env__" and hasattr(repo, "all_saltenvs"):
+@@ -3298,8 +3368,8 @@ class GitPillar(GitBase):
+ env = "base" if tgt == repo.base else tgt
+ if repo._mountpoint:
+ if self.link_mountpoint(repo):
+- self.pillar_dirs[repo.linkdir] = env
+- self.pillar_linked_dirs.append(repo.linkdir)
++ self.pillar_dirs[repo.get_linkdir()] = env
++ self.pillar_linked_dirs.append(repo.get_linkdir())
+ else:
+ self.pillar_dirs[cachedir] = env
+
+@@ -3308,17 +3378,19 @@ class GitPillar(GitBase):
+ Ensure that the mountpoint is present in the correct location and
+ points at the correct path
+ """
+- lcachelink = salt.utils.path.join(repo.linkdir, repo._mountpoint)
+- lcachedest = salt.utils.path.join(repo.cachedir, repo.root()).rstrip(os.sep)
++ lcachelink = salt.utils.path.join(repo.get_linkdir(), repo._mountpoint)
++ lcachedest = salt.utils.path.join(repo.get_cachedir(), repo.root()).rstrip(
++ os.sep
++ )
+ wipe_linkdir = False
+ create_link = False
+ try:
+ with repo.gen_lock(lock_type="mountpoint", timeout=10):
+- walk_results = list(os.walk(repo.linkdir, followlinks=False))
++ walk_results = list(os.walk(repo.get_linkdir(), followlinks=False))
+ if walk_results != repo.linkdir_walk:
+ log.debug(
+ "Results of walking %s differ from expected results",
+- repo.linkdir,
++ repo.get_linkdir(),
+ )
+ log.debug("Walk results: %s", walk_results)
+ log.debug("Expected results: %s", repo.linkdir_walk)
+@@ -3379,7 +3451,7 @@ class GitPillar(GitBase):
+ # Wiping implies that we need to create the link
+ create_link = True
+ try:
+- shutil.rmtree(repo.linkdir)
++ shutil.rmtree(repo.get_linkdir())
+ except OSError:
+ pass
+ try:
+@@ -3431,6 +3503,9 @@ class GitPillar(GitBase):
+ class WinRepo(GitBase):
+ """
+ Functionality specific to the winrepo runner
++
++ fetch_on_fail
++ If checkout fails perform a fetch then try to checkout again.
+ """
+
+ role = "winrepo"
+@@ -3438,12 +3513,12 @@ class WinRepo(GitBase):
+ # out the repos.
+ winrepo_dirs = {}
+
+- def checkout(self):
++ def checkout(self, fetch_on_fail=True):
+ """
+ Checkout the targeted branches/tags from the winrepo remotes
+ """
+ self.winrepo_dirs = {}
+ for repo in self.remotes:
+- cachedir = self.do_checkout(repo)
++ cachedir = self.do_checkout(repo, fetch_on_fail=fetch_on_fail)
+ if cachedir is not None:
+ self.winrepo_dirs[repo.id] = cachedir
+diff --git a/tests/pytests/functional/pillar/test_git_pillar.py b/tests/pytests/functional/pillar/test_git_pillar.py
+new file mode 100644
+index 0000000000..6fd3dee431
+--- /dev/null
++++ b/tests/pytests/functional/pillar/test_git_pillar.py
+@@ -0,0 +1,262 @@
++import pytest
++
++from salt.pillar.git_pillar import ext_pillar
++from salt.utils.immutabletypes import ImmutableDict, ImmutableList
++from tests.support.mock import patch
++
++pytestmark = [
++ pytest.mark.slow_test,
++]
++
++
++try:
++ import git # pylint: disable=unused-import
++
++ HAS_GITPYTHON = True
++except ImportError:
++ HAS_GITPYTHON = False
++
++
++try:
++ import pygit2 # pylint: disable=unused-import
++
++ HAS_PYGIT2 = True
++except ImportError:
++ HAS_PYGIT2 = False
++
++
++skipif_no_gitpython = pytest.mark.skipif(not HAS_GITPYTHON, reason="Missing gitpython")
++skipif_no_pygit2 = pytest.mark.skipif(not HAS_PYGIT2, reason="Missing pygit2")
++
++
++@pytest.fixture
++def git_pillar_opts(salt_master, tmp_path):
++ opts = dict(salt_master.config)
++ opts["cachedir"] = str(tmp_path)
++ for key, item in opts.items():
++ if isinstance(item, ImmutableDict):
++ opts[key] = dict(item)
++ elif isinstance(item, ImmutableList):
++ opts[key] = list(item)
++ return opts
++
++
++@pytest.fixture
++def gitpython_pillar_opts(git_pillar_opts):
++ git_pillar_opts["verified_git_pillar_provider"] = "gitpython"
++ return git_pillar_opts
++
++
++@pytest.fixture
++def pygit2_pillar_opts(git_pillar_opts):
++ git_pillar_opts["verified_git_pillar_provider"] = "pygit2"
++ return git_pillar_opts
++
++
++def _get_ext_pillar(minion, pillar_opts, grains, *repos):
++ with patch("salt.pillar.git_pillar.__opts__", pillar_opts, create=True):
++ with patch("salt.pillar.git_pillar.__grains__", grains, create=True):
++ return ext_pillar(minion, None, *repos)
++
++
++def _test_simple(pillar_opts, grains):
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ assert data == {"key": "value"}
++
++
++@skipif_no_gitpython
++def test_gitpython_simple(gitpython_pillar_opts, grains):
++ _test_simple(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_simple(pygit2_pillar_opts, grains):
++ _test_simple(pygit2_pillar_opts, grains)
++
++
++def _test_missing_env(pillar_opts, grains):
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ {
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git": [
++ {"env": "misssing"}
++ ]
++ },
++ )
++ assert data == {}
++
++
++@skipif_no_gitpython
++def test_gitpython_missing_env(gitpython_pillar_opts, grains):
++ _test_missing_env(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_missing_env(pygit2_pillar_opts, grains):
++ _test_missing_env(pygit2_pillar_opts, grains)
++
++
++def _test_env(pillar_opts, grains):
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ {
++ "other https://github.com/saltstack/salt-test-pillar-gitfs-2.git": [
++ {"env": "other_env"}
++ ]
++ },
++ )
++ assert data == {"other": "env"}
++
++
++@skipif_no_gitpython
++def test_gitpython_env(gitpython_pillar_opts, grains):
++ _test_env(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_env(pygit2_pillar_opts, grains):
++ _test_env(pygit2_pillar_opts, grains)
++
++
++def _test_branch(pillar_opts, grains):
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ "branch https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ assert data == {"key": "data"}
++
++
++@skipif_no_gitpython
++def test_gitpython_branch(gitpython_pillar_opts, grains):
++ _test_branch(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_branch(pygit2_pillar_opts, grains):
++ _test_branch(pygit2_pillar_opts, grains)
++
++
++def _test_simple_dynamic(pillar_opts, grains):
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ assert data == {"key": "value"}
++
++
++@skipif_no_gitpython
++def test_gitpython_simple_dynamic(gitpython_pillar_opts, grains):
++ _test_simple_dynamic(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_simple_dynamic(pygit2_pillar_opts, grains):
++ _test_simple_dynamic(pygit2_pillar_opts, grains)
++
++
++def _test_missing_env_dynamic(pillar_opts, grains):
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ {
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git": [
++ {"env": "misssing"}
++ ]
++ },
++ )
++ assert data == {}
++
++
++@skipif_no_gitpython
++def test_gitpython_missing_env_dynamic(gitpython_pillar_opts, grains):
++ _test_missing_env_dynamic(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_missing_env_dynamic(pygit2_pillar_opts, grains):
++ _test_missing_env_dynamic(pygit2_pillar_opts, grains)
++
++
++def _test_pillarenv_dynamic(pillar_opts, grains):
++ pillar_opts["pillarenv"] = "branch"
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ assert data == {"key": "data"}
++
++
++@skipif_no_gitpython
++def test_gitpython_pillarenv_dynamic(gitpython_pillar_opts, grains):
++ _test_pillarenv_dynamic(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_pillarenv_dynamic(pygit2_pillar_opts, grains):
++ _test_pillarenv_dynamic(pygit2_pillar_opts, grains)
++
++
++def _test_multiple(pillar_opts, grains):
++ pillar_opts["pillarenv"] = "branch"
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ )
++ assert data == {"key": "data"}
++
++
++@skipif_no_gitpython
++def test_gitpython_multiple(gitpython_pillar_opts, grains):
++ _test_multiple(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_multiple(pygit2_pillar_opts, grains):
++ _test_multiple(pygit2_pillar_opts, grains)
++
++
++def _test_multiple_2(pillar_opts, grains):
++ data = _get_ext_pillar(
++ "minion",
++ pillar_opts,
++ grains,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ )
++ assert data == {
++ "key": "value",
++ "key1": "value1",
++ "key2": "value2",
++ "key4": "value4",
++ "data1": "d",
++ "data2": "d2",
++ }
++
++
++@skipif_no_gitpython
++def test_gitpython_multiple_2(gitpython_pillar_opts, grains):
++ _test_multiple_2(gitpython_pillar_opts, grains)
++
++
++@skipif_no_pygit2
++def test_pygit2_multiple_2(pygit2_pillar_opts, grains):
++ _test_multiple_2(pygit2_pillar_opts, grains)
+diff --git a/tests/pytests/functional/utils/test_cache.py b/tests/pytests/functional/utils/test_cache.py
+new file mode 100644
+index 0000000000..d405b8246f
+--- /dev/null
++++ b/tests/pytests/functional/utils/test_cache.py
+@@ -0,0 +1,83 @@
++import os
++
++import pytest
++
++import salt.utils.cache
++import salt.utils.files
++import salt.utils.path
++import salt.version
++
++_DUMMY_FILES = (
++ "data.txt",
++ "foo.t2",
++ "bar.t3",
++ "nested/test",
++ "nested/cache.txt",
++ "n/n1/n2/n3/n4/n5",
++)
++
++
++def _make_dummy_files(tmp_path):
++ for full_path in _DUMMY_FILES:
++ full_path = salt.utils.path.join(tmp_path, full_path)
++ path, _ = os.path.split(full_path)
++ if not os.path.isdir(path):
++ os.makedirs(path)
++ with salt.utils.files.fopen(full_path, "w") as file:
++ file.write("data")
++
++
++def _dummy_files_exists(tmp_path):
++ """
++ True if all files exists
++ False if all files are missing
++ None if some files exists and others are missing
++ """
++ ret = None
++ for full_path in _DUMMY_FILES:
++ full_path = salt.utils.path.join(tmp_path, full_path)
++ is_file = os.path.isfile(full_path)
++ if ret is None:
++ ret = is_file
++ elif ret is not is_file:
++ return None # Some files are found and others are missing
++ return ret
++
++
++def test_verify_cache_version_bad_path():
++ with pytest.raises(ValueError):
++ # cache version should fail if given bad file python
++ salt.utils.cache.verify_cache_version("\0/bad/path")
++
++
++def test_verify_cache_version(tmp_path):
++ # cache version should make dir if it does not exist
++ tmp_path = str(salt.utils.path.join(str(tmp_path), "work", "salt"))
++ cache_version = salt.utils.path.join(tmp_path, "cache_version")
++
++ # check that cache clears when no cache_version is present
++ _make_dummy_files(tmp_path)
++ assert salt.utils.cache.verify_cache_version(tmp_path) is False
++ assert _dummy_files_exists(tmp_path) is False
++
++ # check that cache_version has correct salt version
++ with salt.utils.files.fopen(cache_version, "r") as file:
++ assert "\n".join(file.readlines()) == salt.version.__version__
++
++ # check that cache does not get clear when check is called multiple times
++ _make_dummy_files(tmp_path)
++ for _ in range(3):
++ assert salt.utils.cache.verify_cache_version(tmp_path) is True
++ assert _dummy_files_exists(tmp_path) is True
++
++ # check that cache clears when a different version is present
++ with salt.utils.files.fopen(cache_version, "w") as file:
++ file.write("-1")
++ assert salt.utils.cache.verify_cache_version(tmp_path) is False
++ assert _dummy_files_exists(tmp_path) is False
++
++ # check that cache does not get clear when check is called multiple times
++ _make_dummy_files(tmp_path)
++ for _ in range(3):
++ assert salt.utils.cache.verify_cache_version(tmp_path) is True
++ assert _dummy_files_exists(tmp_path) is True
+diff --git a/tests/pytests/functional/utils/test_gitfs.py b/tests/pytests/functional/utils/test_gitfs.py
+new file mode 100644
+index 0000000000..30a5f147fa
+--- /dev/null
++++ b/tests/pytests/functional/utils/test_gitfs.py
+@@ -0,0 +1,275 @@
++import os.path
++
++import pytest
++
++from salt.fileserver.gitfs import PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
++from salt.utils.gitfs import GitFS, GitPython, Pygit2
++from salt.utils.immutabletypes import ImmutableDict, ImmutableList
++
++pytestmark = [
++ pytest.mark.slow_test,
++]
++
++
++try:
++ import git # pylint: disable=unused-import
++
++ HAS_GITPYTHON = True
++except ImportError:
++ HAS_GITPYTHON = False
++
++
++try:
++ import pygit2 # pylint: disable=unused-import
++
++ HAS_PYGIT2 = True
++except ImportError:
++ HAS_PYGIT2 = False
++
++
++skipif_no_gitpython = pytest.mark.skipif(not HAS_GITPYTHON, reason="Missing gitpython")
++skipif_no_pygit2 = pytest.mark.skipif(not HAS_PYGIT2, reason="Missing pygit2")
++
++
++@pytest.fixture
++def gitfs_opts(salt_factories, tmp_path):
++ config_defaults = {"cachedir": str(tmp_path)}
++ factory = salt_factories.salt_master_daemon(
++ "gitfs-functional-master", defaults=config_defaults
++ )
++ config_defaults = dict(factory.config)
++ for key, item in config_defaults.items():
++ if isinstance(item, ImmutableDict):
++ config_defaults[key] = dict(item)
++ elif isinstance(item, ImmutableList):
++ config_defaults[key] = list(item)
++ return config_defaults
++
++
++@pytest.fixture
++def gitpython_gitfs_opts(gitfs_opts):
++ gitfs_opts["verified_gitfs_provider"] = "gitpython"
++ GitFS.instance_map.clear() # wipe instance_map object map for clean run
++ return gitfs_opts
++
++
++@pytest.fixture
++def pygit2_gitfs_opts(gitfs_opts):
++ gitfs_opts["verified_gitfs_provider"] = "pygit2"
++ GitFS.instance_map.clear() # wipe instance_map object map for clean run
++ return gitfs_opts
++
++
++def _get_gitfs(opts, *remotes):
++ return GitFS(
++ opts,
++ remotes,
++ per_remote_overrides=PER_REMOTE_OVERRIDES,
++ per_remote_only=PER_REMOTE_ONLY,
++ )
++
++
++def _test_gitfs_simple(gitfs_opts):
++ g = _get_gitfs(
++ gitfs_opts,
++ {"https://github.com/saltstack/salt-test-pillar-gitfs.git": [{"name": "bob"}]},
++ )
++ g.fetch_remotes()
++ assert len(g.remotes) == 1
++ assert set(g.file_list({"saltenv": "main"})) == {".gitignore", "README.md"}
++
++
++@skipif_no_gitpython
++def test_gitpython_gitfs_simple(gitpython_gitfs_opts):
++ _test_gitfs_simple(gitpython_gitfs_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_gitfs_simple(pygit2_gitfs_opts):
++ _test_gitfs_simple(pygit2_gitfs_opts)
++
++
++def _test_gitfs_simple_base(gitfs_opts):
++ g = _get_gitfs(
++ gitfs_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ g.fetch_remotes()
++ assert len(g.remotes) == 1
++ assert set(g.file_list({"saltenv": "base"})) == {
++ ".gitignore",
++ "README.md",
++ "file.sls",
++ "top.sls",
++ }
++
++
++@skipif_no_gitpython
++def test_gitpython_gitfs_simple_base(gitpython_gitfs_opts):
++ _test_gitfs_simple_base(gitpython_gitfs_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_gitfs_simple_base(pygit2_gitfs_opts):
++ _test_gitfs_simple_base(pygit2_gitfs_opts)
++
++
++@skipif_no_gitpython
++def test_gitpython_gitfs_provider(gitpython_gitfs_opts):
++ g = _get_gitfs(
++ gitpython_gitfs_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ assert len(g.remotes) == 1
++ assert g.provider == "gitpython"
++ assert isinstance(g.remotes[0], GitPython)
++
++
++@skipif_no_pygit2
++def test_pygit2_gitfs_provider(pygit2_gitfs_opts):
++ g = _get_gitfs(
++ pygit2_gitfs_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ assert len(g.remotes) == 1
++ assert g.provider == "pygit2"
++ assert isinstance(g.remotes[0], Pygit2)
++
++
++def _test_gitfs_minion(gitfs_opts):
++ gitfs_opts["__role"] = "minion"
++ g = _get_gitfs(
++ gitfs_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ g.fetch_remotes()
++ assert len(g.remotes) == 1
++ assert set(g.file_list({"saltenv": "base"})) == {
++ ".gitignore",
++ "README.md",
++ "file.sls",
++ "top.sls",
++ }
++ assert set(g.file_list({"saltenv": "main"})) == {".gitignore", "README.md"}
++
++
++@skipif_no_gitpython
++def test_gitpython_gitfs_minion(gitpython_gitfs_opts):
++ _test_gitfs_minion(gitpython_gitfs_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_gitfs_minion(pygit2_gitfs_opts):
++ _test_gitfs_minion(pygit2_gitfs_opts)
++
++
++def _test_fetch_request_with_mountpoint(opts):
++ mpoint = [{"mountpoint": "salt/m"}]
++ p = _get_gitfs(
++ opts,
++ {"https://github.com/saltstack/salt-test-pillar-gitfs.git": mpoint},
++ )
++ p.fetch_remotes()
++ assert len(p.remotes) == 1
++ repo = p.remotes[0]
++ assert repo.mountpoint("testmount") == "salt/m"
++ assert set(p.file_list({"saltenv": "testmount"})) == {
++ "salt/m/test_dir1/testfile3",
++ "salt/m/test_dir1/test_dir2/testfile2",
++ "salt/m/.gitignore",
++ "salt/m/README.md",
++ "salt/m/test_dir1/test_dir2/testfile1",
++ }
++
++
++@skipif_no_gitpython
++def test_gitpython_fetch_request_with_mountpoint(gitpython_gitfs_opts):
++ _test_fetch_request_with_mountpoint(gitpython_gitfs_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_fetch_request_with_mountpoint(pygit2_gitfs_opts):
++ _test_fetch_request_with_mountpoint(pygit2_gitfs_opts)
++
++
++def _test_name(opts):
++ p = _get_gitfs(
++ opts,
++ {
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git": [
++ {"name": "name1"}
++ ]
++ },
++ {
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git": [
++ {"name": "name2"}
++ ]
++ },
++ )
++ p.fetch_remotes()
++ assert len(p.remotes) == 2
++ repo = p.remotes[0]
++ repo2 = p.remotes[1]
++ assert repo.get_cache_basehash() == "name1"
++ assert repo2.get_cache_basehash() == "name2"
++
++
++@skipif_no_gitpython
++def test_gitpython_name(gitpython_gitfs_opts):
++ _test_name(gitpython_gitfs_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_name(pygit2_gitfs_opts):
++ _test_name(pygit2_gitfs_opts)
++
++
++def _test_remote_map(opts):
++ p = _get_gitfs(
++ opts,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ p.fetch_remotes()
++ assert len(p.remotes) == 1
++ assert os.path.isfile(os.path.join(opts["cachedir"], "gitfs", "remote_map.txt"))
++
++
++@skipif_no_gitpython
++def test_gitpython_remote_map(gitpython_gitfs_opts):
++ _test_remote_map(gitpython_gitfs_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_remote_map(pygit2_gitfs_opts):
++ _test_remote_map(pygit2_gitfs_opts)
++
++
++def _test_lock(opts):
++ g = _get_gitfs(
++ opts,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ g.fetch_remotes()
++ assert len(g.remotes) == 1
++ repo = g.remotes[0]
++ assert repo.get_salt_working_dir() in repo._get_lock_file()
++ assert repo.lock() == (
++ [
++ "Set update lock for gitfs remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
++ ],
++ [],
++ )
++ assert os.path.isfile(repo._get_lock_file())
++ assert repo.clear_lock() == (
++ [
++ "Removed update lock for gitfs remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
++ ],
++ [],
++ )
++ assert not os.path.isfile(repo._get_lock_file())
++
++
++@skipif_no_gitpython
++def test_gitpython_lock(gitpython_gitfs_opts):
++ _test_lock(gitpython_gitfs_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_lock(pygit2_gitfs_opts):
++ _test_lock(pygit2_gitfs_opts)
+diff --git a/tests/pytests/functional/utils/test_pillar.py b/tests/pytests/functional/utils/test_pillar.py
+new file mode 100644
+index 0000000000..143edbf6ff
+--- /dev/null
++++ b/tests/pytests/functional/utils/test_pillar.py
+@@ -0,0 +1,365 @@
++import os
++
++import pytest
++
++from salt.pillar.git_pillar import GLOBAL_ONLY, PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
++from salt.utils.gitfs import GitPillar, GitPython, Pygit2
++from salt.utils.immutabletypes import ImmutableDict, ImmutableList
++
++pytestmark = [
++ pytest.mark.slow_test,
++]
++
++
++try:
++ import git # pylint: disable=unused-import
++
++ HAS_GITPYTHON = True
++except ImportError:
++ HAS_GITPYTHON = False
++
++
++try:
++ import pygit2 # pylint: disable=unused-import
++
++ HAS_PYGIT2 = True
++except ImportError:
++ HAS_PYGIT2 = False
++
++
++skipif_no_gitpython = pytest.mark.skipif(not HAS_GITPYTHON, reason="Missing gitpython")
++skipif_no_pygit2 = pytest.mark.skipif(not HAS_PYGIT2, reason="Missing pygit2")
++
++
++@pytest.fixture
++def pillar_opts(salt_factories, tmp_path):
++ config_defaults = {"cachedir": str(tmp_path)}
++ factory = salt_factories.salt_master_daemon(
++ "pillar-functional-master", defaults=config_defaults
++ )
++ config_defaults = dict(factory.config)
++ for key, item in config_defaults.items():
++ if isinstance(item, ImmutableDict):
++ config_defaults[key] = dict(item)
++ elif isinstance(item, ImmutableList):
++ config_defaults[key] = list(item)
++ return config_defaults
++
++
++@pytest.fixture
++def gitpython_pillar_opts(pillar_opts):
++ pillar_opts["verified_git_pillar_provider"] = "gitpython"
++ return pillar_opts
++
++
++@pytest.fixture
++def pygit2_pillar_opts(pillar_opts):
++ pillar_opts["verified_git_pillar_provider"] = "pygit2"
++ return pillar_opts
++
++
++def _get_pillar(opts, *remotes):
++ return GitPillar(
++ opts,
++ remotes,
++ per_remote_overrides=PER_REMOTE_OVERRIDES,
++ per_remote_only=PER_REMOTE_ONLY,
++ global_only=GLOBAL_ONLY,
++ )
++
++
++@skipif_no_gitpython
++def test_gitpython_pillar_provider(gitpython_pillar_opts):
++ p = _get_pillar(
++ gitpython_pillar_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ assert len(p.remotes) == 1
++ assert p.provider == "gitpython"
++ assert isinstance(p.remotes[0], GitPython)
++
++
++@skipif_no_pygit2
++def test_pygit2_pillar_provider(pygit2_pillar_opts):
++ p = _get_pillar(
++ pygit2_pillar_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ assert len(p.remotes) == 1
++ assert p.provider == "pygit2"
++ assert isinstance(p.remotes[0], Pygit2)
++
++
++def _test_env(opts):
++ p = _get_pillar(
++ opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ assert len(p.remotes) == 1
++ p.checkout()
++ repo = p.remotes[0]
++ # test that two different pillarenvs can exist at the same time
++ files = set(os.listdir(repo.get_cachedir()))
++ for f in (".gitignore", "README.md", "file.sls", "top.sls"):
++ assert f in files
++ opts["pillarenv"] = "main"
++ p2 = _get_pillar(
++ opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ assert len(p.remotes) == 1
++ p2.checkout()
++ repo2 = p2.remotes[0]
++ files = set(os.listdir(repo2.get_cachedir()))
++ for f in (".gitignore", "README.md"):
++ assert f in files
++ for f in ("file.sls", "top.sls", "back.sls", "rooms.sls"):
++ assert f not in files
++ assert repo.get_cachedir() != repo2.get_cachedir()
++ files = set(os.listdir(repo.get_cachedir()))
++ for f in (".gitignore", "README.md", "file.sls", "top.sls"):
++ assert f in files
++
++ # double check cache paths
++ assert (
++ repo.get_cache_hash() == repo2.get_cache_hash()
++ ) # __env__ repos share same hash
++ assert repo.get_cache_basename() != repo2.get_cache_basename()
++ assert repo.get_linkdir() != repo2.get_linkdir()
++ assert repo.get_salt_working_dir() != repo2.get_salt_working_dir()
++ assert repo.get_cache_basename() == "master"
++ assert repo2.get_cache_basename() == "main"
++
++ assert repo.get_cache_basename() in repo.get_cachedir()
++ assert (
++ os.path.join(repo.get_cache_basehash(), repo.get_cache_basename())
++ == repo.get_cache_full_basename()
++ )
++ assert repo.get_linkdir() not in repo.get_cachedir()
++ assert repo.get_salt_working_dir() not in repo.get_cachedir()
++
++
++@skipif_no_gitpython
++def test_gitpython_env(gitpython_pillar_opts):
++ _test_env(gitpython_pillar_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_env(pygit2_pillar_opts):
++ _test_env(pygit2_pillar_opts)
++
++
++def _test_checkout_fetch_on_fail(opts):
++ p = _get_pillar(opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git")
++ p.checkout(fetch_on_fail=False) # TODO write me
++
++
++@skipif_no_gitpython
++def test_gitpython_checkout_fetch_on_fail(gitpython_pillar_opts):
++ _test_checkout_fetch_on_fail(gitpython_pillar_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_checkout_fetch_on_fail(pygit2_pillar_opts):
++ _test_checkout_fetch_on_fail(pygit2_pillar_opts)
++
++
++def _test_multiple_repos(opts):
++ p = _get_pillar(
++ opts,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "main https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "branch https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ "other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ )
++ p.checkout()
++ assert len(p.remotes) == 5
++ # make sure all repos dont share cache and working dir
++ assert len({r.get_cachedir() for r in p.remotes}) == 5
++ assert len({r.get_salt_working_dir() for r in p.remotes}) == 5
++
++ p2 = _get_pillar(
++ opts,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "main https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "branch https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ "other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ )
++ p2.checkout()
++ assert len(p2.remotes) == 5
++ # make sure that repos are given same cache dir
++ for repo, repo2 in zip(p.remotes, p2.remotes):
++ assert repo.get_cachedir() == repo2.get_cachedir()
++ assert repo.get_salt_working_dir() == repo2.get_salt_working_dir()
++ opts["pillarenv"] = "main"
++ p3 = _get_pillar(
++ opts,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "main https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "branch https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ "other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ )
++ p3.checkout()
++ # check that __env__ has different cache with different pillarenv
++ assert p.remotes[0].get_cachedir() != p3.remotes[0].get_cachedir()
++ assert p.remotes[1].get_cachedir() == p3.remotes[1].get_cachedir()
++ assert p.remotes[2].get_cachedir() == p3.remotes[2].get_cachedir()
++ assert p.remotes[3].get_cachedir() != p3.remotes[3].get_cachedir()
++ assert p.remotes[4].get_cachedir() == p3.remotes[4].get_cachedir()
++
++ # check that other branch data is in cache
++ files = set(os.listdir(p.remotes[4].get_cachedir()))
++ for f in (".gitignore", "README.md", "file.sls", "top.sls", "other_env.sls"):
++ assert f in files
++
++
++@skipif_no_gitpython
++def test_gitpython_multiple_repos(gitpython_pillar_opts):
++ _test_multiple_repos(gitpython_pillar_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_multiple_repos(pygit2_pillar_opts):
++ _test_multiple_repos(pygit2_pillar_opts)
++
++
++def _test_fetch_request(opts):
++ p = _get_pillar(
++ opts,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ )
++ frequest = os.path.join(p.remotes[0].get_salt_working_dir(), "fetch_request")
++ frequest_other = os.path.join(p.remotes[1].get_salt_working_dir(), "fetch_request")
++ opts["pillarenv"] = "main"
++ p2 = _get_pillar(
++ opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ frequest2 = os.path.join(p2.remotes[0].get_salt_working_dir(), "fetch_request")
++ assert frequest != frequest2
++ assert os.path.isfile(frequest) is False
++ assert os.path.isfile(frequest2) is False
++ assert os.path.isfile(frequest_other) is False
++ p.fetch_remotes()
++ assert os.path.isfile(frequest) is False
++ # fetch request was placed
++ assert os.path.isfile(frequest2) is True
++ p2.checkout()
++ # fetch request was found
++ assert os.path.isfile(frequest2) is False
++ p2.fetch_remotes()
++ assert os.path.isfile(frequest) is True
++ assert os.path.isfile(frequest2) is False
++ assert os.path.isfile(frequest_other) is False
++ for _ in range(3):
++ p2.fetch_remotes()
++ assert os.path.isfile(frequest) is True
++ assert os.path.isfile(frequest2) is False
++ assert os.path.isfile(frequest_other) is False
++ # fetch request should still be processed even on fetch_on_fail=False
++ p.checkout(fetch_on_fail=False)
++ assert os.path.isfile(frequest) is False
++ assert os.path.isfile(frequest2) is False
++ assert os.path.isfile(frequest_other) is False
++
++
++@skipif_no_gitpython
++def test_gitpython_fetch_request(gitpython_pillar_opts):
++ _test_fetch_request(gitpython_pillar_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_fetch_request(pygit2_pillar_opts):
++ _test_fetch_request(pygit2_pillar_opts)
++
++
++def _test_clear_old_remotes(opts):
++ p = _get_pillar(
++ opts,
++ "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ "other https://github.com/saltstack/salt-test-pillar-gitfs-2.git",
++ )
++ repo = p.remotes[0]
++ repo2 = p.remotes[1]
++ opts["pillarenv"] = "main"
++ p2 = _get_pillar(
++ opts, "__env__ https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ repo3 = p2.remotes[0]
++ assert os.path.isdir(repo.get_cachedir()) is True
++ assert os.path.isdir(repo2.get_cachedir()) is True
++ assert os.path.isdir(repo3.get_cachedir()) is True
++ p.clear_old_remotes()
++ assert os.path.isdir(repo.get_cachedir()) is True
++ assert os.path.isdir(repo2.get_cachedir()) is True
++ assert os.path.isdir(repo3.get_cachedir()) is True
++ p2.clear_old_remotes()
++ assert os.path.isdir(repo.get_cachedir()) is True
++ assert os.path.isdir(repo2.get_cachedir()) is False
++ assert os.path.isdir(repo3.get_cachedir()) is True
++
++
++@skipif_no_gitpython
++def test_gitpython_clear_old_remotes(gitpython_pillar_opts):
++ _test_clear_old_remotes(gitpython_pillar_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_clear_old_remotes(pygit2_pillar_opts):
++ _test_clear_old_remotes(pygit2_pillar_opts)
++
++
++def _test_remote_map(opts):
++ p = _get_pillar(
++ opts,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ p.fetch_remotes()
++ assert len(p.remotes) == 1
++ assert os.path.isfile(
++ os.path.join(opts["cachedir"], "git_pillar", "remote_map.txt")
++ )
++
++
++@skipif_no_gitpython
++def test_gitpython_remote_map(gitpython_pillar_opts):
++ _test_remote_map(gitpython_pillar_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_remote_map(pygit2_pillar_opts):
++ _test_remote_map(pygit2_pillar_opts)
++
++
++def _test_lock(opts):
++ p = _get_pillar(
++ opts,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ p.fetch_remotes()
++ assert len(p.remotes) == 1
++ repo = p.remotes[0]
++ assert repo.get_salt_working_dir() in repo._get_lock_file()
++ assert repo.lock() == (
++ [
++ "Set update lock for git_pillar remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
++ ],
++ [],
++ )
++ assert os.path.isfile(repo._get_lock_file())
++ assert repo.clear_lock() == (
++ [
++ "Removed update lock for git_pillar remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
++ ],
++ [],
++ )
++ assert not os.path.isfile(repo._get_lock_file())
++
++
++@skipif_no_gitpython
++def test_gitpython_lock(gitpython_pillar_opts):
++ _test_lock(gitpython_pillar_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_lock(pygit2_pillar_opts):
++ _test_lock(pygit2_pillar_opts)
+diff --git a/tests/pytests/functional/utils/test_winrepo.py b/tests/pytests/functional/utils/test_winrepo.py
+new file mode 100644
+index 0000000000..117d995bba
+--- /dev/null
++++ b/tests/pytests/functional/utils/test_winrepo.py
+@@ -0,0 +1,164 @@
++import os
++
++import pytest
++
++from salt.runners.winrepo import GLOBAL_ONLY, PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
++from salt.utils.gitfs import GitPython, Pygit2, WinRepo
++from salt.utils.immutabletypes import ImmutableDict, ImmutableList
++
++pytestmark = [
++ pytest.mark.slow_test,
++]
++
++
++try:
++ import git # pylint: disable=unused-import
++
++ HAS_GITPYTHON = True
++except ImportError:
++ HAS_GITPYTHON = False
++
++
++try:
++ import pygit2 # pylint: disable=unused-import
++
++ HAS_PYGIT2 = True
++except ImportError:
++ HAS_PYGIT2 = False
++
++
++skipif_no_gitpython = pytest.mark.skipif(not HAS_GITPYTHON, reason="Missing gitpython")
++skipif_no_pygit2 = pytest.mark.skipif(not HAS_PYGIT2, reason="Missing pygit2")
++
++
++@pytest.fixture
++def winrepo_opts(salt_factories, tmp_path):
++ config_defaults = {"cachedir": str(tmp_path)}
++ factory = salt_factories.salt_master_daemon(
++ "winrepo-functional-master", defaults=config_defaults
++ )
++ config_defaults = dict(factory.config)
++ for key, item in config_defaults.items():
++ if isinstance(item, ImmutableDict):
++ config_defaults[key] = dict(item)
++ elif isinstance(item, ImmutableList):
++ config_defaults[key] = list(item)
++ return config_defaults
++
++
++@pytest.fixture
++def gitpython_winrepo_opts(winrepo_opts):
++ winrepo_opts["verified_winrepo_provider"] = "gitpython"
++ return winrepo_opts
++
++
++@pytest.fixture
++def pygit2_winrepo_opts(winrepo_opts):
++ winrepo_opts["verified_winrepo_provider"] = "pygit2"
++ return winrepo_opts
++
++
++def _get_winrepo(opts, *remotes):
++ return WinRepo(
++ opts,
++ remotes,
++ per_remote_overrides=PER_REMOTE_OVERRIDES,
++ per_remote_only=PER_REMOTE_ONLY,
++ global_only=GLOBAL_ONLY,
++ )
++
++
++@skipif_no_gitpython
++def test_gitpython_winrepo_provider(gitpython_winrepo_opts):
++ w = _get_winrepo(
++ gitpython_winrepo_opts,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ assert len(w.remotes) == 1
++ assert w.provider == "gitpython"
++ assert isinstance(w.remotes[0], GitPython)
++
++
++@skipif_no_pygit2
++def test_pygit2_winrepo_provider(pygit2_winrepo_opts):
++ w = _get_winrepo(
++ pygit2_winrepo_opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git"
++ )
++ assert len(w.remotes) == 1
++ assert w.provider == "pygit2"
++ assert isinstance(w.remotes[0], Pygit2)
++
++
++def _test_winrepo_simple(opts):
++ w = _get_winrepo(opts, "https://github.com/saltstack/salt-test-pillar-gitfs.git")
++ assert len(w.remotes) == 1
++ w.checkout()
++ repo = w.remotes[0]
++ files = set(os.listdir(repo.get_cachedir()))
++ for f in (".gitignore", "README.md", "file.sls", "top.sls"):
++ assert f in files
++
++
++@skipif_no_gitpython
++def test_gitpython_winrepo_simple(gitpython_winrepo_opts):
++ _test_winrepo_simple(gitpython_winrepo_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_winrepo_simple(pygit2_winrepo_opts):
++ _test_winrepo_simple(pygit2_winrepo_opts)
++
++
++def _test_remote_map(opts):
++ p = _get_winrepo(
++ opts,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ p.fetch_remotes()
++ assert len(p.remotes) == 1
++ assert os.path.isfile(os.path.join(opts["cachedir"], "winrepo", "remote_map.txt"))
++
++
++@skipif_no_gitpython
++def test_gitpython_remote_map(gitpython_winrepo_opts):
++ _test_remote_map(gitpython_winrepo_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_remote_map(pygit2_winrepo_opts):
++ _test_remote_map(pygit2_winrepo_opts)
++
++
++def _test_lock(opts):
++ w = _get_winrepo(
++ opts,
++ "https://github.com/saltstack/salt-test-pillar-gitfs.git",
++ )
++ w.fetch_remotes()
++ assert len(w.remotes) == 1
++ repo = w.remotes[0]
++ assert repo.get_salt_working_dir() in repo._get_lock_file()
++ assert repo.lock() == (
++ [
++ "Set update lock for winrepo remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
++ ],
++ [],
++ )
++ assert os.path.isfile(repo._get_lock_file())
++ assert repo.clear_lock() == (
++ [
++ "Removed update lock for winrepo remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
++ ],
++ [],
++ )
++ assert not os.path.isfile(repo._get_lock_file())
++
++
++@skipif_no_gitpython
++def test_gitpython_lock(gitpython_winrepo_opts):
++ _test_lock(gitpython_winrepo_opts)
++
++
++@skipif_no_pygit2
++def test_pygit2_lock(pygit2_winrepo_opts):
++ _test_lock(pygit2_winrepo_opts)
+diff --git a/tests/pytests/unit/test_minion.py b/tests/pytests/unit/test_minion.py
+index 4508eaee95..740743194e 100644
+--- a/tests/pytests/unit/test_minion.py
++++ b/tests/pytests/unit/test_minion.py
+@@ -21,35 +21,33 @@ from tests.support.mock import MagicMock, patch
+ log = logging.getLogger(__name__)
+
+
+-def test_minion_load_grains_false():
++def test_minion_load_grains_false(minion_opts):
+ """
+ Minion does not generate grains when load_grains is False
+ """
+- opts = {"random_startup_delay": 0, "grains": {"foo": "bar"}}
++ minion_opts["grains"] = {"foo": "bar"}
+ with patch("salt.loader.grains") as grainsfunc:
+- minion = salt.minion.Minion(opts, load_grains=False)
+- assert minion.opts["grains"] == opts["grains"]
++ minion = salt.minion.Minion(minion_opts, load_grains=False)
++ assert minion.opts["grains"] == minion_opts["grains"]
+ grainsfunc.assert_not_called()
+
+
+-def test_minion_load_grains_true():
++def test_minion_load_grains_true(minion_opts):
+ """
+ Minion generates grains when load_grains is True
+ """
+- opts = {"random_startup_delay": 0, "grains": {}}
+ with patch("salt.loader.grains") as grainsfunc:
+- minion = salt.minion.Minion(opts, load_grains=True)
++ minion = salt.minion.Minion(minion_opts, load_grains=True)
+ assert minion.opts["grains"] != {}
+ grainsfunc.assert_called()
+
+
+-def test_minion_load_grains_default():
++def test_minion_load_grains_default(minion_opts):
+ """
+ Minion load_grains defaults to True
+ """
+- opts = {"random_startup_delay": 0, "grains": {}}
+ with patch("salt.loader.grains") as grainsfunc:
+- minion = salt.minion.Minion(opts)
++ minion = salt.minion.Minion(minion_opts)
+ assert minion.opts["grains"] != {}
+ grainsfunc.assert_called()
+
+@@ -91,24 +89,17 @@ def test_send_req_tries(req_channel, minion_opts):
+
+ assert rtn == 30
+
+-
+-@patch("salt.channel.client.ReqChannel.factory")
+-def test_mine_send_tries(req_channel_factory):
++def test_mine_send_tries(minion_opts):
+ channel_enter = MagicMock()
+ channel_enter.send.side_effect = lambda load, timeout, tries: tries
+ channel = MagicMock()
+ channel.__enter__.return_value = channel_enter
+
+- opts = {
+- "random_startup_delay": 0,
+- "grains": {},
+- "return_retry_tries": 20,
+- "minion_sign_messages": False,
+- }
++ minion_opts["return_retry_tries"] = 20
+ with patch("salt.channel.client.ReqChannel.factory", return_value=channel), patch(
+ "salt.loader.grains"
+ ):
+- minion = salt.minion.Minion(opts)
++ minion = salt.minion.Minion(minion_opts)
+ minion.tok = "token"
+
+ data = {}
+diff --git a/tests/pytests/unit/utils/test_gitfs.py b/tests/pytests/unit/utils/test_gitfs.py
+index e9915de412..2bf627049f 100644
+--- a/tests/pytests/unit/utils/test_gitfs.py
++++ b/tests/pytests/unit/utils/test_gitfs.py
+@@ -1,5 +1,4 @@
+ import os
+-import string
+ import time
+
+ import pytest
+@@ -214,11 +213,11 @@ def test_checkout_pygit2(_prepare_provider):
+ provider.init_remote()
+ provider.fetch()
+ provider.branch = "master"
+- assert provider.cachedir in provider.checkout()
++ assert provider.get_cachedir() in provider.checkout()
+ provider.branch = "simple_tag"
+- assert provider.cachedir in provider.checkout()
++ assert provider.get_cachedir() in provider.checkout()
+ provider.branch = "annotated_tag"
+- assert provider.cachedir in provider.checkout()
++ assert provider.get_cachedir() in provider.checkout()
+ provider.branch = "does_not_exist"
+ assert provider.checkout() is None
+
+@@ -238,18 +237,9 @@ def test_checkout_pygit2_with_home_env_unset(_prepare_provider):
+ assert "HOME" in os.environ
+
+
+-def test_full_id_pygit2(_prepare_provider):
+- assert _prepare_provider.full_id().startswith("-")
+- assert _prepare_provider.full_id().endswith("/pygit2-repo---gitfs-master--")
+-
+-
+ @pytest.mark.skipif(not HAS_PYGIT2, reason="This host lacks proper pygit2 support")
+ @pytest.mark.skip_on_windows(
+ reason="Skip Pygit2 on windows, due to pygit2 access error on windows"
+ )
+ def test_get_cachedir_basename_pygit2(_prepare_provider):
+- basename = _prepare_provider.get_cachedir_basename()
+- assert len(basename) == 45
+- assert basename[0] == "-"
+- # check that a valid base64 is given '/' -> '_'
+- assert all(c in string.ascii_letters + string.digits + "+_=" for c in basename[1:])
++ assert "_" == _prepare_provider.get_cache_basename()
+diff --git a/tests/unit/utils/test_gitfs.py b/tests/unit/utils/test_gitfs.py
+index 6d8e97a239..259ea056fc 100644
+--- a/tests/unit/utils/test_gitfs.py
++++ b/tests/unit/utils/test_gitfs.py
+@@ -114,27 +114,14 @@ class TestGitBase(TestCase, AdaptedConfigurationTestCaseMixin):
+ self.assertTrue(self.main_class.remotes[0].fetched)
+ self.assertFalse(self.main_class.remotes[1].fetched)
+
+- def test_full_id(self):
+- self.assertEqual(
+- self.main_class.remotes[0].full_id(), "-file://repo1.git---gitfs-master--"
+- )
+-
+- def test_full_id_with_name(self):
+- self.assertEqual(
+- self.main_class.remotes[1].full_id(),
+- "repo2-file://repo2.git---gitfs-master--",
+- )
+-
+ def test_get_cachedir_basename(self):
+ self.assertEqual(
+- self.main_class.remotes[0].get_cachedir_basename(),
+- "-jXhnbGDemchtZwTwaD2s6VOaVvs98a7w+AtiYlmOVb0=",
++ self.main_class.remotes[0].get_cache_basename(),
++ "_",
+ )
+-
+- def test_get_cachedir_base_with_name(self):
+ self.assertEqual(
+- self.main_class.remotes[1].get_cachedir_basename(),
+- "repo2-nuezpiDtjQRFC0ZJDByvi+F6Vb8ZhfoH41n_KFxTGsU=",
++ self.main_class.remotes[1].get_cache_basename(),
++ "_",
+ )
+
+ def test_git_provider_mp_lock(self):
+--
+2.42.0
+
+
diff --git a/fix-issue-2068-test.patch b/fix-issue-2068-test.patch
new file mode 100644
index 0000000..701457a
--- /dev/null
+++ b/fix-issue-2068-test.patch
@@ -0,0 +1,52 @@
+From b0e713d6946526b894837406c0760c262e4312a1 Mon Sep 17 00:00:00 2001
+From: Bo Maryniuk
+Date: Wed, 9 Jan 2019 16:08:19 +0100
+Subject: [PATCH] Fix issue #2068 test
+
+Skip injecting `__call__` if chunk is not dict.
+
+This also fixes `integration/modules/test_state.py:StateModuleTest.test_exclude` that tests `include` and `exclude` state directives containing the only list of strings.
+
+Minor update: more correct is-dict check.
+---
+ salt/state.py | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/salt/state.py b/salt/state.py
+index 8352a8defc..cb434a91e7 100644
+--- a/salt/state.py
++++ b/salt/state.py
+@@ -12,6 +12,7 @@ The data sent to the state calls is as follows:
+ """
+
+
++import collections
+ import copy
+ import datetime
+ import fnmatch
+@@ -3507,16 +3508,18 @@ class State:
+ """
+ for chunk in high:
+ state = high[chunk]
++ if not isinstance(state, collections.Mapping):
++ continue
+ for state_ref in state:
+ needs_default = True
++ if not isinstance(state[state_ref], list):
++ continue
+ for argset in state[state_ref]:
+ if isinstance(argset, str):
+ needs_default = False
+ break
+ if needs_default:
+- order = state[state_ref].pop(-1)
+- state[state_ref].append("__call__")
+- state[state_ref].append(order)
++ state[state_ref].insert(-1, "__call__")
+
+ def call_high(self, high, orchestration_jid=None):
+ """
+--
+2.39.2
+
+
diff --git a/fix-issues-that-break-salt-in-python-3.12-and-3.13-6.patch b/fix-issues-that-break-salt-in-python-3.12-and-3.13-6.patch
new file mode 100644
index 0000000..e20e9ee
--- /dev/null
+++ b/fix-issues-that-break-salt-in-python-3.12-and-3.13-6.patch
@@ -0,0 +1,71 @@
+From d7a6f923ed86be59183161590b60698ea2fd1a21 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 22 Jan 2025 13:01:09 +0000
+Subject: [PATCH] Fix issues that break Salt in Python 3.12 and 3.13
+ (#698)
+
+---
+ salt/ext/tornado/netutil.py | 22 +++++++++++++---------
+ salt/utils/url.py | 5 ++---
+ 2 files changed, 15 insertions(+), 12 deletions(-)
+
+diff --git a/salt/ext/tornado/netutil.py b/salt/ext/tornado/netutil.py
+index f86b430674..3e7fa130b5 100644
+--- a/salt/ext/tornado/netutil.py
++++ b/salt/ext/tornado/netutil.py
+@@ -48,15 +48,19 @@ except ImportError:
+ if PY3:
+ xrange = range
+
+-if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
+- ssl_match_hostname = ssl.match_hostname
+- SSLCertificateError = ssl.CertificateError
+-elif ssl is None:
+- ssl_match_hostname = SSLCertificateError = None # type: ignore
+-else:
+- import backports.ssl_match_hostname
+- ssl_match_hostname = backports.ssl_match_hostname.match_hostname
+- SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore
++try:
++ from salt.ext.ssl_match_hostname import CertificateError as SSLCertificateError
++ from salt.ext.ssl_match_hostname import match_hostname as ssl_match_hostname
++except ImportError:
++ if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
++ ssl_match_hostname = ssl.match_hostname
++ SSLCertificateError = ssl.CertificateError
++ elif ssl is None:
++ ssl_match_hostname = SSLCertificateError = None # type: ignore
++ else:
++ import backports.ssl_match_hostname
++ ssl_match_hostname = backports.ssl_match_hostname.match_hostname
++ SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore
+
+ if hasattr(ssl, 'SSLContext'):
+ if hasattr(ssl, 'create_default_context'):
+diff --git a/salt/utils/url.py b/salt/utils/url.py
+index a30610394c..0a10e0e1b6 100644
+--- a/salt/utils/url.py
++++ b/salt/utils/url.py
+@@ -5,7 +5,7 @@ URL utils
+
+ import re
+ import sys
+-from urllib.parse import urlparse, urlunparse
++from urllib.parse import urlparse, urlunparse, urlunsplit
+
+ import salt.utils.data
+ import salt.utils.path
+@@ -47,8 +47,7 @@ def create(path, saltenv=None):
+ path = salt.utils.data.decode(path)
+
+ query = "saltenv={}".format(saltenv) if saltenv else ""
+- url = salt.utils.data.decode(urlunparse(("file", "", path, "", query, "")))
+- return "salt://{}".format(url[len("file:///") :])
++ return f'salt://{salt.utils.data.decode(urlunsplit(("", "", path, query, "")))}'
+
+
+ def is_escaped(url):
+--
+2.47.0
+
diff --git a/fix-missing-minion-returns-in-batch-mode-360.patch b/fix-missing-minion-returns-in-batch-mode-360.patch
new file mode 100644
index 0000000..1566eb6
--- /dev/null
+++ b/fix-missing-minion-returns-in-batch-mode-360.patch
@@ -0,0 +1,30 @@
+From 5158ebce305d961a2d2e3cb3f889b0cde593c4a0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ond=C5=99ej=20Hole=C4=8Dek?=
+Date: Mon, 10 May 2021 16:23:19 +0200
+Subject: [PATCH] Fix missing minion returns in batch mode (#360)
+
+Don't close pub if there are pending events, otherwise events will be lost
+resulting in empty minion returns.
+
+Co-authored-by: Denis V. Meltsaykin
+---
+ salt/client/__init__.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/salt/client/__init__.py b/salt/client/__init__.py
+index bcda56c9b4..b2617e4554 100644
+--- a/salt/client/__init__.py
++++ b/salt/client/__init__.py
+@@ -976,7 +976,7 @@ class LocalClient:
+
+ self._clean_up_subscriptions(pub_data["jid"])
+ finally:
+- if not was_listening:
++ if not was_listening and not self.event.pending_events:
+ self.event.close_pub()
+
+ def cmd_full_return(
+--
+2.39.2
+
+
diff --git a/fix-optimization_order-opt-to-prevent-test-fails.patch b/fix-optimization_order-opt-to-prevent-test-fails.patch
new file mode 100644
index 0000000..1b72186
--- /dev/null
+++ b/fix-optimization_order-opt-to-prevent-test-fails.patch
@@ -0,0 +1,62 @@
+From aaf593d17f51a517e0adb6e9ec1c0d768ab5f855 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Mon, 2 Oct 2023 14:24:27 +0200
+Subject: [PATCH] Fix optimization_order opt to prevent test fails
+
+---
+ tests/pytests/unit/grains/test_core.py | 4 ++--
+ tests/pytests/unit/loader/test_loader.py | 2 +-
+ tests/pytests/unit/test_config.py | 2 +-
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/tests/pytests/unit/grains/test_core.py b/tests/pytests/unit/grains/test_core.py
+index 993c723950..36545287b9 100644
+--- a/tests/pytests/unit/grains/test_core.py
++++ b/tests/pytests/unit/grains/test_core.py
+@@ -156,7 +156,7 @@ def test_network_grains_secondary_ip(tmp_path):
+ opts = {
+ "cachedir": str(cache_dir),
+ "extension_modules": str(extmods),
+- "optimization_order": [0],
++ "optimization_order": [0, 1, 2],
+ }
+ with patch("salt.utils.network.interfaces", side_effect=[data]):
+ grains = salt.loader.grain_funcs(opts)
+@@ -243,7 +243,7 @@ def test_network_grains_cache(tmp_path):
+ opts = {
+ "cachedir": str(cache_dir),
+ "extension_modules": str(extmods),
+- "optimization_order": [0],
++ "optimization_order": [0, 1, 2],
+ }
+ with patch(
+ "salt.utils.network.interfaces", side_effect=[call_1, call_2]
+diff --git a/tests/pytests/unit/loader/test_loader.py b/tests/pytests/unit/loader/test_loader.py
+index f4a4b51a58..86348749db 100644
+--- a/tests/pytests/unit/loader/test_loader.py
++++ b/tests/pytests/unit/loader/test_loader.py
+@@ -57,7 +57,7 @@ def test_raw_mod_functions():
+ "Ensure functions loaded by raw_mod are LoaderFunc instances"
+ opts = {
+ "extension_modules": "",
+- "optimization_order": [0],
++ "optimization_order": [0, 1, 2],
+ }
+ ret = salt.loader.raw_mod(opts, "grains", "get")
+ for k, v in ret.items():
+diff --git a/tests/pytests/unit/test_config.py b/tests/pytests/unit/test_config.py
+index cb343cb75e..76d5605360 100644
+--- a/tests/pytests/unit/test_config.py
++++ b/tests/pytests/unit/test_config.py
+@@ -16,7 +16,7 @@ def test_call_id_function(tmp_path):
+ "cachedir": str(cache_dir),
+ "extension_modules": str(extmods),
+ "grains": {"osfinger": "meh"},
+- "optimization_order": [0],
++ "optimization_order": [0, 1, 2],
+ }
+ ret = salt.config.call_id_function(opts)
+ assert ret == "meh"
+--
+2.42.0
+
diff --git a/fix-ownership-of-salt-thin-directory-when-using-the-.patch b/fix-ownership-of-salt-thin-directory-when-using-the-.patch
new file mode 100644
index 0000000..2b9fdf5
--- /dev/null
+++ b/fix-ownership-of-salt-thin-directory-when-using-the-.patch
@@ -0,0 +1,50 @@
+From 5f6488ab9211927c421e3d87a4ee84fe659ceb8b Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Mon, 27 Jun 2022 18:03:49 +0300
+Subject: [PATCH] Fix ownership of salt thin directory when using the
+ Salt Bundle
+
+---
+ salt/client/ssh/ssh_py_shim.py | 25 ++++++++++++++++++++++++-
+ 1 file changed, 24 insertions(+), 1 deletion(-)
+
+diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py
+index 293ea1b7fa..95171f7aea 100644
+--- a/salt/client/ssh/ssh_py_shim.py
++++ b/salt/client/ssh/ssh_py_shim.py
+@@ -292,7 +292,30 @@ def main(argv): # pylint: disable=W0613
+ os.makedirs(OPTIONS.saltdir)
+ cache_dir = os.path.join(OPTIONS.saltdir, "running_data", "var", "cache")
+ os.makedirs(os.path.join(cache_dir, "salt"))
+- os.symlink("salt", os.path.relpath(os.path.join(cache_dir, "venv-salt-minion")))
++ os.symlink(
++ "salt", os.path.relpath(os.path.join(cache_dir, "venv-salt-minion"))
++ )
++ if os.path.exists(OPTIONS.saltdir) and (
++ "SUDO_UID" in os.environ or "SUDO_GID" in os.environ
++ ):
++ try:
++ sudo_uid = int(os.environ.get("SUDO_UID", -1))
++ except ValueError:
++ sudo_uid = -1
++ try:
++ sudo_gid = int(os.environ.get("SUDO_GID", -1))
++ except ValueError:
++ sudo_gid = -1
++ dstat = os.stat(OPTIONS.saltdir)
++ if (sudo_uid != -1 and dstat.st_uid != sudo_uid) or (
++ sudo_gid != -1 and dstat.st_gid != sudo_gid
++ ):
++ os.chown(OPTIONS.saltdir, sudo_uid, sudo_gid)
++ for dir_path, dir_names, file_names in os.walk(OPTIONS.saltdir):
++ for dir_name in dir_names:
++ os.lchown(os.path.join(dir_path, dir_name), sudo_uid, sudo_gid)
++ for file_name in file_names:
++ os.lchown(os.path.join(dir_path, file_name), sudo_uid, sudo_gid)
+
+ if venv_salt_call is None:
+ # Use Salt thin only if Salt Bundle (venv-salt-minion) is not available
+--
+2.39.2
+
+
diff --git a/fix-problematic-tests-and-allow-smooth-tests-executi.patch b/fix-problematic-tests-and-allow-smooth-tests-executi.patch
new file mode 100644
index 0000000..ecc3de8
--- /dev/null
+++ b/fix-problematic-tests-and-allow-smooth-tests-executi.patch
@@ -0,0 +1,2695 @@
+From 1b1bbc3e46ab2eed98f07a23368877fc068dbc06 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Mon, 26 Feb 2024 11:25:22 +0000
+Subject: [PATCH] Fix problematic tests and allow smooth tests
+ executions on containers
+
+* Align boto imports in tests with Salt modules
+
+Some modules `import boto` to set log levels. The related tests don't
+import `boto`. This can cause a problem when `boto` is not available.
+
+Tests are skipped when HAS_BOTO in the test_boto_*.py is False. Not
+trying to `import boto` can leave HAS_BOTO=True in the test file even
+though HAS_BOTO is False on the application side. In this case, tests
+are not skipped and fail.
+
+* Fix mock order in test_dig (test_network.py)
+
+`salt.utils.path.which` should not be mocked before `network.__utils__`. The
+Salt loader calls `salt.utils.network.linux_interfaces`, which needs the real
+`salt.utils.path.which`.
+
+* Fix mock calls
+
+Signed-off-by: Pedro Algarvio
+(cherry picked from commit 3506e7fd0e84320b2873370f1fe527025c244dca)
+
+* Skip venafiapi test if vcert module not available
+
+The same HAS_VCERT check is done in the runner module.
+
+* Moving tests/integration/modules/test_cmdmod.py to pytest, Gareth J Greenaway original author
+
+(cherry picked from commit 2c1040b4c2885efaa86576fd88eb36bb550b5996)
+
+* The repo.saltproject.io `index.html` file changed it's contents. Fix tests.
+
+Signed-off-by: Pedro Algarvio
+(cherry picked from commit 672f6586d7c3cdb0e8c5ee42524895035aafcc23)
+
+* Skip hwclock test when executed inside a container
+
+* Skip git pillar tests when executed inside a container
+
+These tests require a Git repository container, which is hard to correctly set
+up when executing the tests in the container in GH Actions.
+
+Using --network host can help, but there was still an error (the git repos were
+empty) when I tried to set this up.
+
+* Skip test requiring systemd inside container
+
+* Exclude tests for hgfs if missing hglib
+
+* Skip and fix tests when running on containers
+
+* Fix some failing test causing problem in SUSE environments
+
+* Skip more tests when running on containers
+
+* Use skipif instead of skip_if as it seems not behaving equally
+
+* Skip more tests that cannot run in a container
+
+* Remove SSH test which doesn't make sense after lock mechanism
+
+* Fix failing boto tests
+
+* Skip unmaintained tests upstream around zcbuildout
+
+* Skip some tests that does not run well in GH Actions
+
+---------
+
+Co-authored-by: Pedro Algarvio
+Co-authored-by: Gareth J. Greenaway
+Co-authored-by: Alexander Graul
+---
+ .../integration/externalapi/test_venafiapi.py | 10 +-
+ tests/integration/modules/test_cmdmod.py | 634 ------------------
+ tests/integration/modules/test_cp.py | 24 +-
+ tests/integration/modules/test_timezone.py | 3 +
+ tests/integration/pillar/test_git_pillar.py | 3 +
+ tests/integration/ssh/test_state.py | 47 --
+ tests/pytests/functional/cache/test_consul.py | 4 +
+ tests/pytests/functional/cache/test_mysql.py | 4 +
+ .../functional/fileserver/hgfs/test_hgfs.py | 2 +
+ .../pytests/functional/modules/test_cmdmod.py | 561 ++++++++++++++++
+ .../functional/modules/test_dockermod.py | 4 +
+ .../pytests/functional/modules/test_swarm.py | 5 +
+ .../pytests/functional/modules/test_system.py | 3 +
+ .../pillar/hg_pillar/test_hg_pillar.py | 1 +
+ .../states/rabbitmq/test_cluster.py | 4 +
+ .../functional/states/rabbitmq/test_plugin.py | 4 +
+ .../functional/states/rabbitmq/test_policy.py | 4 +
+ .../states/rabbitmq/test_upstream.py | 4 +
+ .../functional/states/rabbitmq/test_user.py | 4 +
+ .../functional/states/rabbitmq/test_vhost.py | 4 +
+ .../functional/states/test_docker_network.py | 5 +
+ tests/pytests/functional/states/test_pkg.py | 6 +-
+ .../integration/cli/test_syndic_eauth.py | 3 +
+ .../integration/daemons/test_memory_leak.py | 4 +
+ .../integration/modules/test_cmdmod.py | 93 +++
+ .../pytests/integration/modules/test_virt.py | 4 +
+ tests/pytests/integration/ssh/test_log.py | 3 +
+ tests/pytests/integration/ssh/test_master.py | 5 +
+ .../integration/ssh/test_py_versions.py | 3 +
+ .../pytests/integration/ssh/test_ssh_setup.py | 2 +
+ .../scenarios/compat/test_with_versions.py | 4 +
+ .../multimaster/test_failover_master.py | 3 +
+ tests/pytests/scenarios/setup/test_install.py | 6 +
+ tests/pytests/unit/modules/test_aptpkg.py | 12 +-
+ .../pytests/unit/modules/test_linux_sysctl.py | 8 +-
+ tests/pytests/unit/modules/test_win_ip.py | 4 +-
+ tests/pytests/unit/test_master.py | 2 +-
+ tests/pytests/unit/test_minion.py | 4 +-
+ tests/pytests/unit/utils/event/test_event.py | 24 +-
+ tests/unit/modules/test_boto_apigateway.py | 1 +
+ .../unit/modules/test_boto_cognitoidentity.py | 1 +
+ .../modules/test_boto_elasticsearch_domain.py | 1 +
+ tests/unit/modules/test_boto_lambda.py | 1 +
+ tests/unit/modules/test_network.py | 6 +-
+ tests/unit/modules/test_nilrt_ip.py | 4 +-
+ tests/unit/modules/test_zcbuildout.py | 2 +
+ .../unit/netapi/rest_tornado/test_saltnado.py | 22 +-
+ tests/unit/states/test_boto_apigateway.py | 1 +
+ .../unit/states/test_boto_cognitoidentity.py | 1 +
+ tests/unit/states/test_zcbuildout.py | 1 +
+ 50 files changed, 824 insertions(+), 741 deletions(-)
+ delete mode 100644 tests/integration/modules/test_cmdmod.py
+ create mode 100644 tests/pytests/functional/modules/test_cmdmod.py
+
+diff --git a/tests/integration/externalapi/test_venafiapi.py b/tests/integration/externalapi/test_venafiapi.py
+index ad08605430f..3ae1e3392d8 100644
+--- a/tests/integration/externalapi/test_venafiapi.py
++++ b/tests/integration/externalapi/test_venafiapi.py
+@@ -13,6 +13,14 @@ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.x509.oid import NameOID
+
++try:
++ import vcert
++ from vcert.common import CertificateRequest
++
++ HAS_VCERT = True
++except ImportError:
++ HAS_VCERT = False
++
+ from tests.support.case import ShellCase
+
+
+@@ -36,6 +44,7 @@ def with_random_name(func):
+ return wrapper
+
+
++@pytest.mark.skipif(HAS_VCERT is False, reason="The vcert module must be installed.")
+ class VenafiTest(ShellCase):
+ """
+ Test the venafi runner
+@@ -86,7 +95,6 @@ class VenafiTest(ShellCase):
+ @with_random_name
+ @pytest.mark.slow_test
+ def test_sign(self, name):
+-
+ csr_pem = """-----BEGIN CERTIFICATE REQUEST-----
+ MIIFbDCCA1QCAQAwgbQxCzAJBgNVBAYTAlVTMQ0wCwYDVQQIDARVdGFoMRIwEAYD
+ VQQHDAlTYWx0IExha2UxFDASBgNVBAoMC1ZlbmFmaSBJbmMuMRQwEgYDVQQLDAtJ
+diff --git a/tests/integration/modules/test_cmdmod.py b/tests/integration/modules/test_cmdmod.py
+deleted file mode 100644
+index 800111174f0..00000000000
+--- a/tests/integration/modules/test_cmdmod.py
++++ /dev/null
+@@ -1,634 +0,0 @@
+-import os
+-import random
+-import sys
+-import tempfile
+-from contextlib import contextmanager
+-
+-import pytest
+-
+-import salt.utils.path
+-import salt.utils.platform
+-import salt.utils.user
+-from tests.support.case import ModuleCase
+-from tests.support.helpers import SKIP_INITIAL_PHOTONOS_FAILURES, dedent
+-from tests.support.runtests import RUNTIME_VARS
+-
+-AVAILABLE_PYTHON_EXECUTABLE = salt.utils.path.which_bin(
+- ["python", "python2", "python2.6", "python2.7"]
+-)
+-
+-
+-@pytest.mark.windows_whitelisted
+-class CMDModuleTest(ModuleCase):
+- """
+- Validate the cmd module
+- """
+-
+- def setUp(self):
+- self.runas_usr = "nobody"
+- if salt.utils.platform.is_darwin():
+- self.runas_usr = "macsalttest"
+-
+- @contextmanager
+- def _ensure_user_exists(self, name):
+- if name in self.run_function("user.info", [name]).values():
+- # User already exists; don't touch
+- yield
+- else:
+- # Need to create user for test
+- self.run_function("user.add", [name])
+- try:
+- yield
+- finally:
+- self.run_function("user.delete", [name], remove=True)
+-
+- @pytest.mark.slow_test
+- @pytest.mark.skip_on_windows
+- def test_run(self):
+- """
+- cmd.run
+- """
+- shell = os.environ.get("SHELL")
+- if shell is None:
+- # Failed to get the SHELL var, don't run
+- self.skipTest("Unable to get the SHELL environment variable")
+-
+- self.assertTrue(self.run_function("cmd.run", ["echo $SHELL"]))
+- self.assertEqual(
+- self.run_function(
+- "cmd.run", ["echo $SHELL", "shell={}".format(shell)], python_shell=True
+- ).rstrip(),
+- shell,
+- )
+- self.assertEqual(
+- self.run_function("cmd.run", ["ls / | grep etc"], python_shell=True), "etc"
+- )
+- self.assertEqual(
+- self.run_function(
+- "cmd.run",
+- ['echo {{grains.id}} | awk "{print $1}"'],
+- template="jinja",
+- python_shell=True,
+- ),
+- "minion",
+- )
+- self.assertEqual(
+- self.run_function(
+- "cmd.run", ["grep f"], stdin="one\ntwo\nthree\nfour\nfive\n"
+- ),
+- "four\nfive",
+- )
+- self.assertEqual(
+- self.run_function(
+- "cmd.run", ['echo "a=b" | sed -e s/=/:/g'], python_shell=True
+- ),
+- "a:b",
+- )
+-
+- @pytest.mark.slow_test
+- def test_stdout(self):
+- """
+- cmd.run_stdout
+- """
+- self.assertEqual(
+- self.run_function("cmd.run_stdout", ['echo "cheese"']).rstrip(),
+- "cheese" if not salt.utils.platform.is_windows() else '"cheese"',
+- )
+-
+- @pytest.mark.slow_test
+- def test_stderr(self):
+- """
+- cmd.run_stderr
+- """
+- if sys.platform.startswith(("freebsd", "openbsd")):
+- shell = "/bin/sh"
+- else:
+- shell = "/bin/bash"
+-
+- self.assertEqual(
+- self.run_function(
+- "cmd.run_stderr",
+- ['echo "cheese" 1>&2', "shell={}".format(shell)],
+- python_shell=True,
+- ).rstrip(),
+- "cheese" if not salt.utils.platform.is_windows() else '"cheese"',
+- )
+-
+- @pytest.mark.slow_test
+- def test_run_all(self):
+- """
+- cmd.run_all
+- """
+- if sys.platform.startswith(("freebsd", "openbsd")):
+- shell = "/bin/sh"
+- else:
+- shell = "/bin/bash"
+-
+- ret = self.run_function(
+- "cmd.run_all",
+- ['echo "cheese" 1>&2', "shell={}".format(shell)],
+- python_shell=True,
+- )
+- self.assertTrue("pid" in ret)
+- self.assertTrue("retcode" in ret)
+- self.assertTrue("stdout" in ret)
+- self.assertTrue("stderr" in ret)
+- self.assertTrue(isinstance(ret.get("pid"), int))
+- self.assertTrue(isinstance(ret.get("retcode"), int))
+- self.assertTrue(isinstance(ret.get("stdout"), str))
+- self.assertTrue(isinstance(ret.get("stderr"), str))
+- self.assertEqual(
+- ret.get("stderr").rstrip(),
+- "cheese" if not salt.utils.platform.is_windows() else '"cheese"',
+- )
+-
+- @pytest.mark.slow_test
+- def test_retcode(self):
+- """
+- cmd.retcode
+- """
+- self.assertEqual(
+- self.run_function("cmd.retcode", ["exit 0"], python_shell=True), 0
+- )
+- self.assertEqual(
+- self.run_function("cmd.retcode", ["exit 1"], python_shell=True), 1
+- )
+-
+- @pytest.mark.slow_test
+- def test_run_all_with_success_retcodes(self):
+- """
+- cmd.run with success_retcodes
+- """
+- ret = self.run_function(
+- "cmd.run_all", ["exit 42"], success_retcodes=[42], python_shell=True
+- )
+-
+- self.assertTrue("retcode" in ret)
+- self.assertEqual(ret.get("retcode"), 0)
+-
+- @pytest.mark.slow_test
+- def test_retcode_with_success_retcodes(self):
+- """
+- cmd.run with success_retcodes
+- """
+- ret = self.run_function(
+- "cmd.retcode", ["exit 42"], success_retcodes=[42], python_shell=True
+- )
+-
+- self.assertEqual(ret, 0)
+-
+- @pytest.mark.slow_test
+- def test_run_all_with_success_stderr(self):
+- """
+- cmd.run with success_retcodes
+- """
+- random_file = "{}{}{}".format(
+- RUNTIME_VARS.TMP_ROOT_DIR, os.path.sep, random.random()
+- )
+-
+- if salt.utils.platform.is_windows():
+- func = "type"
+- expected_stderr = "cannot find the file specified"
+- else:
+- func = "cat"
+- expected_stderr = "No such file or directory"
+- ret = self.run_function(
+- "cmd.run_all",
+- ["{} {}".format(func, random_file)],
+- success_stderr=[expected_stderr],
+- python_shell=True,
+- )
+-
+- self.assertTrue("retcode" in ret)
+- self.assertEqual(ret.get("retcode"), 0)
+-
+- @pytest.mark.slow_test
+- def test_blacklist_glob(self):
+- """
+- cmd_blacklist_glob
+- """
+- self.assertEqual(
+- self.run_function("cmd.run", ["bad_command --foo"]).rstrip(),
+- 'ERROR: The shell command "bad_command --foo" is not permitted',
+- )
+-
+- @pytest.mark.slow_test
+- def test_script(self):
+- """
+- cmd.script
+- """
+- args = "saltines crackers biscuits=yes"
+- script = "salt://script.py"
+- ret = self.run_function("cmd.script", [script, args], saltenv="base")
+- self.assertEqual(ret["stdout"], args)
+-
+- @pytest.mark.slow_test
+- def test_script_query_string(self):
+- """
+- cmd.script
+- """
+- args = "saltines crackers biscuits=yes"
+- script = "salt://script.py?saltenv=base"
+- ret = self.run_function("cmd.script", [script, args], saltenv="base")
+- self.assertEqual(ret["stdout"], args)
+-
+- @pytest.mark.slow_test
+- def test_script_retcode(self):
+- """
+- cmd.script_retcode
+- """
+- script = "salt://script.py"
+- ret = self.run_function("cmd.script_retcode", [script], saltenv="base")
+- self.assertEqual(ret, 0)
+-
+- @pytest.mark.slow_test
+- def test_script_cwd(self):
+- """
+- cmd.script with cwd
+- """
+- tmp_cwd = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
+- args = "saltines crackers biscuits=yes"
+- script = "salt://script.py"
+- ret = self.run_function(
+- "cmd.script", [script, args], cwd=tmp_cwd, saltenv="base"
+- )
+- self.assertEqual(ret["stdout"], args)
+-
+- @pytest.mark.slow_test
+- def test_script_cwd_with_space(self):
+- """
+- cmd.script with cwd
+- """
+- tmp_cwd = "{}{}test 2".format(
+- tempfile.mkdtemp(dir=RUNTIME_VARS.TMP), os.path.sep
+- )
+- os.mkdir(tmp_cwd)
+-
+- args = "saltines crackers biscuits=yes"
+- script = "salt://script.py"
+- ret = self.run_function(
+- "cmd.script", [script, args], cwd=tmp_cwd, saltenv="base"
+- )
+- self.assertEqual(ret["stdout"], args)
+-
+- @pytest.mark.destructive_test
+- def test_tty(self):
+- """
+- cmd.tty
+- """
+- for tty in ("tty0", "pts3"):
+- if os.path.exists(os.path.join("/dev", tty)):
+- ret = self.run_function("cmd.tty", [tty, "apply salt liberally"])
+- self.assertTrue("Success" in ret)
+-
+- @pytest.mark.skip_on_windows
+- @pytest.mark.skip_if_binaries_missing("which")
+- def test_which(self):
+- """
+- cmd.which
+- """
+- cmd_which = self.run_function("cmd.which", ["cat"])
+- self.assertIsInstance(cmd_which, str)
+- cmd_run = self.run_function("cmd.run", ["which cat"])
+- self.assertIsInstance(cmd_run, str)
+- self.assertEqual(cmd_which.rstrip(), cmd_run.rstrip())
+-
+- @pytest.mark.skip_on_windows
+- @pytest.mark.skip_if_binaries_missing("which")
+- def test_which_bin(self):
+- """
+- cmd.which_bin
+- """
+- cmds = ["pip3", "pip2", "pip", "pip-python"]
+- ret = self.run_function("cmd.which_bin", [cmds])
+- self.assertTrue(os.path.split(ret)[1] in cmds)
+-
+- @pytest.mark.slow_test
+- def test_has_exec(self):
+- """
+- cmd.has_exec
+- """
+- self.assertTrue(
+- self.run_function("cmd.has_exec", [AVAILABLE_PYTHON_EXECUTABLE])
+- )
+- self.assertFalse(
+- self.run_function("cmd.has_exec", ["alllfsdfnwieulrrh9123857ygf"])
+- )
+-
+- @pytest.mark.slow_test
+- def test_exec_code(self):
+- """
+- cmd.exec_code
+- """
+- code = dedent(
+- """
+- import sys
+- sys.stdout.write('cheese')
+- """
+- )
+- self.assertEqual(
+- self.run_function(
+- "cmd.exec_code", [AVAILABLE_PYTHON_EXECUTABLE, code]
+- ).rstrip(),
+- "cheese",
+- )
+-
+- @pytest.mark.slow_test
+- def test_exec_code_with_single_arg(self):
+- """
+- cmd.exec_code
+- """
+- code = dedent(
+- """
+- import sys
+- sys.stdout.write(sys.argv[1])
+- """
+- )
+- arg = "cheese"
+- self.assertEqual(
+- self.run_function(
+- "cmd.exec_code", [AVAILABLE_PYTHON_EXECUTABLE, code], args=arg
+- ).rstrip(),
+- arg,
+- )
+-
+- @pytest.mark.slow_test
+- def test_exec_code_with_multiple_args(self):
+- """
+- cmd.exec_code
+- """
+- code = dedent(
+- """
+- import sys
+- sys.stdout.write(sys.argv[1])
+- """
+- )
+- arg = "cheese"
+- self.assertEqual(
+- self.run_function(
+- "cmd.exec_code", [AVAILABLE_PYTHON_EXECUTABLE, code], args=[arg, "test"]
+- ).rstrip(),
+- arg,
+- )
+-
+- @pytest.mark.slow_test
+- def test_quotes(self):
+- """
+- cmd.run with quoted command
+- """
+- cmd = """echo 'SELECT * FROM foo WHERE bar="baz"' """
+- expected_result = 'SELECT * FROM foo WHERE bar="baz"'
+- if salt.utils.platform.is_windows():
+- expected_result = "'SELECT * FROM foo WHERE bar=\"baz\"'"
+- result = self.run_function("cmd.run_stdout", [cmd]).strip()
+- self.assertEqual(result, expected_result)
+-
+- @pytest.mark.skip_if_not_root
+- @pytest.mark.skip_on_windows(reason="Skip on Windows, requires password")
+- def test_quotes_runas(self):
+- """
+- cmd.run with quoted command
+- """
+- cmd = """echo 'SELECT * FROM foo WHERE bar="baz"' """
+- expected_result = 'SELECT * FROM foo WHERE bar="baz"'
+- result = self.run_function(
+- "cmd.run_all", [cmd], runas=RUNTIME_VARS.RUNNING_TESTS_USER
+- )
+- errmsg = "The command returned: {}".format(result)
+- self.assertEqual(result["retcode"], 0, errmsg)
+- self.assertEqual(result["stdout"], expected_result, errmsg)
+-
+- @pytest.mark.destructive_test
+- @pytest.mark.skip_if_not_root
+- @pytest.mark.skip_on_windows(reason="Skip on Windows, uses unix commands")
+- @pytest.mark.slow_test
+- def test_avoid_injecting_shell_code_as_root(self):
+- """
+- cmd.run should execute the whole command as the "runas" user, not
+- running substitutions as root.
+- """
+- cmd = "echo $(id -u)"
+-
+- root_id = self.run_function("cmd.run_stdout", [cmd])
+- runas_root_id = self.run_function(
+- "cmd.run_stdout", [cmd], runas=RUNTIME_VARS.RUNNING_TESTS_USER
+- )
+- with self._ensure_user_exists(self.runas_usr):
+- user_id = self.run_function("cmd.run_stdout", [cmd], runas=self.runas_usr)
+-
+- self.assertNotEqual(user_id, root_id)
+- self.assertNotEqual(user_id, runas_root_id)
+- self.assertEqual(root_id, runas_root_id)
+-
+- @pytest.mark.destructive_test
+- @pytest.mark.skip_if_not_root
+- @pytest.mark.skip_on_windows(reason="Skip on Windows, uses unix commands")
+- @pytest.mark.slow_test
+- def test_cwd_runas(self):
+- """
+- cmd.run should be able to change working directory correctly, whether
+- or not runas is in use.
+- """
+- cmd = "pwd"
+- tmp_cwd = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
+- os.chmod(tmp_cwd, 0o711)
+-
+- cwd_normal = self.run_function("cmd.run_stdout", [cmd], cwd=tmp_cwd).rstrip(
+- "\n"
+- )
+- self.assertEqual(tmp_cwd, cwd_normal)
+-
+- with self._ensure_user_exists(self.runas_usr):
+- cwd_runas = self.run_function(
+- "cmd.run_stdout", [cmd], cwd=tmp_cwd, runas=self.runas_usr
+- ).rstrip("\n")
+- self.assertEqual(tmp_cwd, cwd_runas)
+-
+- @pytest.mark.destructive_test
+- @pytest.mark.skip_if_not_root
+- @pytest.mark.skip_unless_on_darwin(reason="Applicable to MacOS only")
+- @pytest.mark.slow_test
+- def test_runas_env(self):
+- """
+- cmd.run should be able to change working directory correctly, whether
+- or not runas is in use.
+- """
+- with self._ensure_user_exists(self.runas_usr):
+- user_path = self.run_function(
+- "cmd.run_stdout", ['printf %s "$PATH"'], runas=self.runas_usr
+- )
+- # XXX: Not sure of a better way. Environment starts out with
+- # /bin:/usr/bin and should be populated by path helper and the bash
+- # profile.
+- self.assertNotEqual("/bin:/usr/bin", user_path)
+-
+- @pytest.mark.destructive_test
+- @pytest.mark.skip_if_not_root
+- @pytest.mark.skip_unless_on_darwin(reason="Applicable to MacOS only")
+- @pytest.mark.slow_test
+- def test_runas_complex_command_bad_cwd(self):
+- """
+- cmd.run should not accidentally run parts of a complex command when
+- given a cwd which cannot be used by the user the command is run as.
+-
+- Due to the need to use `su -l` to login to another user on MacOS, we
+- cannot cd into directories that the target user themselves does not
+- have execute permission for. To an extent, this test is testing that
+- buggy behaviour, but its purpose is to ensure that the greater bug of
+- running commands after failing to cd does not occur.
+- """
+- tmp_cwd = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
+- os.chmod(tmp_cwd, 0o700)
+-
+- with self._ensure_user_exists(self.runas_usr):
+- cmd_result = self.run_function(
+- "cmd.run_all",
+- ['pwd; pwd; : $(echo "You have failed the test" >&2)'],
+- cwd=tmp_cwd,
+- runas=self.runas_usr,
+- )
+-
+- self.assertEqual("", cmd_result["stdout"])
+- self.assertNotIn("You have failed the test", cmd_result["stderr"])
+- self.assertNotEqual(0, cmd_result["retcode"])
+-
+- @SKIP_INITIAL_PHOTONOS_FAILURES
+- @pytest.mark.skip_on_windows
+- @pytest.mark.skip_if_not_root
+- @pytest.mark.destructive_test
+- @pytest.mark.slow_test
+- def test_runas(self):
+- """
+- Ensure that the env is the runas user's
+- """
+- with self._ensure_user_exists(self.runas_usr):
+- out = self.run_function(
+- "cmd.run", ["env"], runas=self.runas_usr
+- ).splitlines()
+- self.assertIn("USER={}".format(self.runas_usr), out)
+-
+- @pytest.mark.skip_if_binaries_missing("sleep", reason="sleep cmd not installed")
+- def test_timeout(self):
+- """
+- cmd.run trigger timeout
+- """
+- out = self.run_function(
+- "cmd.run", ["sleep 2 && echo hello"], f_timeout=1, python_shell=True
+- )
+- self.assertTrue("Timed out" in out)
+-
+- @pytest.mark.skip_if_binaries_missing("sleep", reason="sleep cmd not installed")
+- def test_timeout_success(self):
+- """
+- cmd.run sufficient timeout to succeed
+- """
+- out = self.run_function(
+- "cmd.run", ["sleep 1 && echo hello"], f_timeout=2, python_shell=True
+- )
+- self.assertEqual(out, "hello")
+-
+- @pytest.mark.slow_test
+- def test_hide_output(self):
+- """
+- Test the hide_output argument
+- """
+- ls_command = (
+- ["ls", "/"] if not salt.utils.platform.is_windows() else ["dir", "c:\\"]
+- )
+-
+- error_command = ["thiscommanddoesnotexist"]
+-
+- # cmd.run
+- out = self.run_function("cmd.run", ls_command, hide_output=True)
+- self.assertEqual(out, "")
+-
+- # cmd.shell
+- out = self.run_function("cmd.shell", ls_command, hide_output=True)
+- self.assertEqual(out, "")
+-
+- # cmd.run_stdout
+- out = self.run_function("cmd.run_stdout", ls_command, hide_output=True)
+- self.assertEqual(out, "")
+-
+- # cmd.run_stderr
+- out = self.run_function("cmd.shell", error_command, hide_output=True)
+- self.assertEqual(out, "")
+-
+- # cmd.run_all (command should have produced stdout)
+- out = self.run_function("cmd.run_all", ls_command, hide_output=True)
+- self.assertEqual(out["stdout"], "")
+- self.assertEqual(out["stderr"], "")
+-
+- # cmd.run_all (command should have produced stderr)
+- out = self.run_function("cmd.run_all", error_command, hide_output=True)
+- self.assertEqual(out["stdout"], "")
+- self.assertEqual(out["stderr"], "")
+-
+- @pytest.mark.slow_test
+- def test_cmd_run_whoami(self):
+- """
+- test return of whoami
+- """
+- if not salt.utils.platform.is_windows():
+- user = RUNTIME_VARS.RUNTIME_CONFIGS["master"]["user"]
+- else:
+- user = salt.utils.user.get_specific_user()
+- if user.startswith("sudo_"):
+- user = user.replace("sudo_", "")
+- cmd = self.run_function("cmd.run", ["whoami"])
+- try:
+- self.assertEqual(user.lower(), cmd.lower())
+- except AssertionError as exc:
+- if not salt.utils.platform.is_windows():
+- raise exc from None
+- if "\\" in user:
+- user = user.split("\\")[-1]
+- self.assertEqual(user.lower(), cmd.lower())
+-
+- @pytest.mark.skip_unless_on_windows(reason="Minion is not Windows")
+- @pytest.mark.slow_test
+- def test_windows_env_handling(self):
+- """
+- Ensure that nt.environ is used properly with cmd.run*
+- """
+- out = self.run_function(
+- "cmd.run", ["set"], env={"abc": "123", "ABC": "456"}
+- ).splitlines()
+- self.assertIn("abc=123", out)
+- self.assertIn("ABC=456", out)
+-
+- @pytest.mark.slow_test
+- @pytest.mark.skip_unless_on_windows(reason="Minion is not Windows")
+- def test_windows_powershell_script_args(self):
+- """
+- Ensure that powershell processes inline script in args
+- """
+- val = "i like cheese"
+- args = (
+- '-SecureString (ConvertTo-SecureString -String "{}" -AsPlainText -Force)'
+- " -ErrorAction Stop".format(val)
+- )
+- script = "salt://issue-56195/test.ps1"
+- ret = self.run_function(
+- "cmd.script", [script], args=args, shell="powershell", saltenv="base"
+- )
+- self.assertEqual(ret["stdout"], val)
+-
+- @pytest.mark.slow_test
+- @pytest.mark.skip_unless_on_windows(reason="Minion is not Windows")
+- @pytest.mark.skip_if_binaries_missing("pwsh")
+- def test_windows_powershell_script_args_pwsh(self):
+- """
+- Ensure that powershell processes inline script in args with powershell
+- core
+- """
+- val = "i like cheese"
+- args = (
+- '-SecureString (ConvertTo-SecureString -String "{}" -AsPlainText -Force)'
+- " -ErrorAction Stop".format(val)
+- )
+- script = "salt://issue-56195/test.ps1"
+- ret = self.run_function(
+- "cmd.script", [script], args=args, shell="pwsh", saltenv="base"
+- )
+- self.assertEqual(ret["stdout"], val)
+diff --git a/tests/integration/modules/test_cp.py b/tests/integration/modules/test_cp.py
+index ad7538b4ba8..cd3e4c2f5ad 100644
+--- a/tests/integration/modules/test_cp.py
++++ b/tests/integration/modules/test_cp.py
+@@ -234,9 +234,9 @@ class CPModuleTest(ModuleCase):
+ self.run_function("cp.get_url", ["https://repo.saltproject.io/index.html", tgt])
+ with salt.utils.files.fopen(tgt, "r") as instructions:
+ data = salt.utils.stringutils.to_unicode(instructions.read())
+- self.assertIn("Bootstrap", data)
+- self.assertIn("Debian", data)
+- self.assertIn("Windows", data)
++ self.assertIn("Salt Project", data)
++ self.assertIn("Package", data)
++ self.assertIn("Repo", data)
+ self.assertNotIn("AYBABTU", data)
+
+ @pytest.mark.slow_test
+@@ -250,9 +250,9 @@ class CPModuleTest(ModuleCase):
+
+ with salt.utils.files.fopen(ret, "r") as instructions:
+ data = salt.utils.stringutils.to_unicode(instructions.read())
+- self.assertIn("Bootstrap", data)
+- self.assertIn("Debian", data)
+- self.assertIn("Windows", data)
++ self.assertIn("Salt Project", data)
++ self.assertIn("Package", data)
++ self.assertIn("Repo", data)
+ self.assertNotIn("AYBABTU", data)
+
+ @pytest.mark.slow_test
+@@ -273,9 +273,9 @@ class CPModuleTest(ModuleCase):
+ time.sleep(sleep)
+ if ret.find("HTTP 599") != -1:
+ raise Exception("https://repo.saltproject.io/index.html returned 599 error")
+- self.assertIn("Bootstrap", ret)
+- self.assertIn("Debian", ret)
+- self.assertIn("Windows", ret)
++ self.assertIn("Salt Project", ret)
++ self.assertIn("Package", ret)
++ self.assertIn("Repo", ret)
+ self.assertNotIn("AYBABTU", ret)
+
+ @pytest.mark.slow_test
+@@ -346,9 +346,9 @@ class CPModuleTest(ModuleCase):
+ """
+ src = "https://repo.saltproject.io/index.html"
+ ret = self.run_function("cp.get_file_str", [src])
+- self.assertIn("Bootstrap", ret)
+- self.assertIn("Debian", ret)
+- self.assertIn("Windows", ret)
++ self.assertIn("Salt Project", ret)
++ self.assertIn("Package", ret)
++ self.assertIn("Repo", ret)
+ self.assertNotIn("AYBABTU", ret)
+
+ @pytest.mark.slow_test
+diff --git a/tests/integration/modules/test_timezone.py b/tests/integration/modules/test_timezone.py
+index 8d7180cbd13..c1dc8a7b73d 100644
+--- a/tests/integration/modules/test_timezone.py
++++ b/tests/integration/modules/test_timezone.py
+@@ -4,6 +4,7 @@ Integration tests for timezone module
+ Linux and Solaris are supported
+ """
+ import pytest
++import os
+
+ from tests.support.case import ModuleCase
+
+@@ -15,6 +16,8 @@ except ImportError:
+ HAS_TZLOCAL = False
+
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++@pytest.mark.skipif(INSIDE_CONTAINER, reason="No hwclock in a container")
+ class TimezoneLinuxModuleTest(ModuleCase):
+ def setUp(self):
+ """
+diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py
+index 68c14daaa15..5b4cbda95c9 100644
+--- a/tests/integration/pillar/test_git_pillar.py
++++ b/tests/integration/pillar/test_git_pillar.py
+@@ -63,6 +63,7 @@ https://github.com/git/git/commit/6bc0cb5
+ https://github.com/unbit/uwsgi/commit/ac1e354
+ """
+
++import os
+ import random
+ import string
+ import sys
+@@ -100,9 +101,11 @@ try:
+ except Exception: # pylint: disable=broad-except
+ HAS_PYGIT2 = False
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+ pytestmark = [
+ SKIP_INITIAL_PHOTONOS_FAILURES,
+ pytest.mark.skip_on_platforms(windows=True, darwin=True),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Communication problems between containers."),
+ ]
+
+
+diff --git a/tests/integration/ssh/test_state.py b/tests/integration/ssh/test_state.py
+index a9fd3e7f2d3..69245454e85 100644
+--- a/tests/integration/ssh/test_state.py
++++ b/tests/integration/ssh/test_state.py
+@@ -283,53 +283,6 @@ class SSHStateTest(SSHCase):
+ check_file = self.run_function("file.file_exists", [SSH_SLS_FILE], wipe=False)
+ self.assertTrue(check_file)
+
+- @pytest.mark.slow_test
+- def test_state_running(self):
+- """
+- test state.running with salt-ssh
+- """
+-
+- retval = []
+-
+- def _run_in_background():
+- retval.append(self.run_function("state.sls", ["running"], wipe=False))
+-
+- bg_thread = threading.Thread(target=_run_in_background)
+- bg_thread.start()
+-
+- expected = 'The function "state.pkg" is running as'
+- state_ret = []
+- for _ in range(30):
+- if not bg_thread.is_alive():
+- continue
+- get_sls = self.run_function("state.running", wipe=False)
+- state_ret.append(get_sls)
+- if expected in " ".join(get_sls):
+- # We found the expected return
+- break
+- time.sleep(1)
+- else:
+- if not bg_thread.is_alive():
+- bg_failed_msg = "Failed to return clean data"
+- if retval and bg_failed_msg in retval.pop().get("_error", ""):
+- pytest.skip("Background state run failed, skipping")
+- self.fail(
+- "Did not find '{}' in state.running return: {}".format(
+- expected, state_ret
+- )
+- )
+-
+- # make sure we wait until the earlier state is complete
+- future = time.time() + 120
+- while True:
+- if expected not in " ".join(self.run_function("state.running", wipe=False)):
+- break
+- if time.time() > future:
+- self.fail(
+- "state.pkg is still running overtime. Test did not clean up"
+- " correctly."
+- )
+-
+ def tearDown(self):
+ """
+ make sure to clean up any old ssh directories
+diff --git a/tests/pytests/functional/cache/test_consul.py b/tests/pytests/functional/cache/test_consul.py
+index 3a38e495a93..c6e16d2588e 100644
+--- a/tests/pytests/functional/cache/test_consul.py
++++ b/tests/pytests/functional/cache/test_consul.py
+@@ -1,4 +1,5 @@
+ import logging
++import os
+ import socket
+ import time
+
+@@ -13,9 +14,12 @@ docker = pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/cache/test_mysql.py b/tests/pytests/functional/cache/test_mysql.py
+index c283872c08c..e15fc732a4a 100644
+--- a/tests/pytests/functional/cache/test_mysql.py
++++ b/tests/pytests/functional/cache/test_mysql.py
+@@ -1,4 +1,5 @@
+ import logging
++import os
+
+ import pytest
+
+@@ -11,9 +12,12 @@ docker = pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/fileserver/hgfs/test_hgfs.py b/tests/pytests/functional/fileserver/hgfs/test_hgfs.py
+index 571fe75e403..bfd927fd0fe 100644
+--- a/tests/pytests/functional/fileserver/hgfs/test_hgfs.py
++++ b/tests/pytests/functional/fileserver/hgfs/test_hgfs.py
+@@ -16,6 +16,8 @@ try:
+ except ImportError:
+ HAS_HG = False
+
++pytestmark = [pytest.mark.skipif(not HAS_HG, reason="missing hglib library")]
++
+
+ @pytest.fixture(scope="module")
+ def configure_loader_modules(master_opts):
+diff --git a/tests/pytests/functional/modules/test_cmdmod.py b/tests/pytests/functional/modules/test_cmdmod.py
+new file mode 100644
+index 00000000000..d30b474c6d2
+--- /dev/null
++++ b/tests/pytests/functional/modules/test_cmdmod.py
+@@ -0,0 +1,561 @@
++import os
++import random
++import sys
++from contextlib import contextmanager
++
++import pytest
++
++import salt.config
++import salt.utils.path
++import salt.utils.platform
++import salt.utils.user
++from tests.support.helpers import SKIP_INITIAL_PHOTONOS_FAILURES, dedent
++
++pytestmark = [pytest.mark.windows_whitelisted]
++
++
++@pytest.fixture(scope="module")
++def cmdmod(modules):
++ return modules.cmd
++
++
++@pytest.fixture(scope="module")
++def usermod(modules):
++ return modules.user
++
++
++@pytest.fixture(scope="module")
++def available_python_executable():
++ yield salt.utils.path.which_bin(["python", "python3"])
++
++
++@pytest.fixture
++def runas_usr():
++ runas_usr = "nobody"
++ if salt.utils.platform.is_darwin():
++ runas_usr = "macsalttest"
++ yield runas_usr
++
++
++@pytest.fixture
++def running_username():
++ """
++ Return the username that is running the code.
++ """
++ return salt.utils.user.get_user()
++
++
++@pytest.fixture
++def script_contents(state_tree):
++ _contents = """
++ #!/usr/bin/env python3
++ import sys
++ print(" ".join(sys.argv[1:]))
++ """
++
++ with pytest.helpers.temp_file("script.py", _contents, state_tree):
++ yield
++
++
++@pytest.fixture
++def issue_56195_test_ps1(state_tree):
++ _contents = """
++ [CmdLetBinding()]
++ Param(
++ [SecureString] $SecureString
++ )
++ $Credential = New-Object System.Net.NetworkCredential("DummyId", $SecureString)
++ $Credential.Password
++ """
++
++ with pytest.helpers.temp_file("issue_56195_test.ps1", _contents, state_tree):
++ yield
++
++
++@contextmanager
++def _ensure_user_exists(name, usermod):
++ if name in usermod.info(name).values():
++ # User already exists; don't touch
++ yield
++ else:
++ # Need to create user for test
++ usermod.add(name)
++ try:
++ yield
++ finally:
++ usermod.delete(name, remove=True)
++
++
++@pytest.mark.slow_test
++def test_run(cmdmod):
++ """
++ cmd.run
++ """
++ shell = os.environ.get("SHELL")
++ if shell is None:
++ # Failed to get the SHELL var, don't run
++ pytest.skip("Unable to get the SHELL environment variable")
++
++ assert cmdmod.run("echo $SHELL")
++ assert cmdmod.run("echo $SHELL", shell=shell, python_shell=True).rstrip() == shell
++ assert cmdmod.run("ls / | grep etc", python_shell=True) == "etc"
++ assert (
++ cmdmod.run(
++ 'echo {{grains.id}} | awk "{print $1}"',
++ template="jinja",
++ python_shell=True,
++ )
++ == "func-tests-minion"
++ )
++ assert cmdmod.run("grep f", stdin="one\ntwo\nthree\nfour\nfive\n") == "four\nfive"
++ assert cmdmod.run('echo "a=b" | sed -e s/=/:/g', python_shell=True) == "a:b"
++
++
++@pytest.mark.slow_test
++def test_stdout(cmdmod):
++ """
++ cmd.run_stdout
++ """
++ assert (
++ cmdmod.run_stdout('echo "cheese"').rstrip() == "cheese"
++ if not salt.utils.platform.is_windows()
++ else '"cheese"'
++ )
++
++
++@pytest.mark.slow_test
++def test_stderr(cmdmod):
++ """
++ cmd.run_stderr
++ """
++ if sys.platform.startswith(("freebsd", "openbsd")):
++ shell = "/bin/sh"
++ else:
++ shell = "/bin/bash"
++
++ assert (
++ cmdmod.run_stderr(
++ 'echo "cheese" 1>&2',
++ shell=shell,
++ python_shell=True,
++ ).rstrip()
++ == "cheese"
++ if not salt.utils.platform.is_windows()
++ else '"cheese"'
++ )
++
++
++@pytest.mark.slow_test
++def test_run_all(cmdmod):
++ """
++ cmd.run_all
++ """
++ if sys.platform.startswith(("freebsd", "openbsd")):
++ shell = "/bin/sh"
++ else:
++ shell = "/bin/bash"
++
++ ret = cmdmod.run_all(
++ 'echo "cheese" 1>&2',
++ shell=shell,
++ python_shell=True,
++ )
++ assert "pid" in ret
++ assert "retcode" in ret
++ assert "stdout" in ret
++ assert "stderr" in ret
++ assert isinstance(ret.get("pid"), int)
++ assert isinstance(ret.get("retcode"), int)
++ assert isinstance(ret.get("stdout"), str)
++ assert isinstance(ret.get("stderr"), str)
++ assert (
++ ret.get("stderr").rstrip() == "cheese"
++ if not salt.utils.platform.is_windows()
++ else '"cheese"'
++ )
++
++
++@pytest.mark.slow_test
++def test_retcode(cmdmod):
++ """
++ cmd.retcode
++ """
++ assert cmdmod.retcode("exit 0", python_shell=True) == 0
++ assert cmdmod.retcode("exit 1", python_shell=True) == 1
++
++
++@pytest.mark.slow_test
++def test_run_all_with_success_retcodes(cmdmod):
++ """
++ cmd.run with success_retcodes
++ """
++ ret = cmdmod.run_all("exit 42", success_retcodes=[42], python_shell=True)
++
++ assert "retcode" in ret
++ assert ret.get("retcode") == 0
++
++
++@pytest.mark.slow_test
++def test_retcode_with_success_retcodes(cmdmod):
++ """
++ cmd.run with success_retcodes
++ """
++ ret = cmdmod.retcode("exit 42", success_retcodes=[42], python_shell=True)
++
++ assert ret == 0
++
++
++@pytest.mark.slow_test
++def test_run_all_with_success_stderr(cmdmod, tmp_path):
++ """
++ cmd.run with success_retcodes
++ """
++ random_file = str(tmp_path / f"{random.random()}")
++
++ if salt.utils.platform.is_windows():
++ func = "type"
++ expected_stderr = "cannot find the file specified"
++ else:
++ func = "cat"
++ expected_stderr = "No such file or directory"
++ ret = cmdmod.run_all(
++ f"{func} {random_file}",
++ success_stderr=[expected_stderr],
++ python_shell=True,
++ )
++
++ assert "retcode" in ret
++ assert ret.get("retcode") == 0
++
++
++@pytest.mark.slow_test
++def test_script(cmdmod, script_contents):
++ """
++ cmd.script
++ """
++ args = "saltines crackers biscuits=yes"
++ script = "salt://script.py"
++ ret = cmdmod.script(script, args, saltenv="base")
++ assert ret["stdout"] == args
++
++
++@pytest.mark.slow_test
++def test_script_query_string(cmdmod, script_contents):
++ """
++ cmd.script
++ """
++ args = "saltines crackers biscuits=yes"
++ script = "salt://script.py?saltenv=base"
++ ret = cmdmod.script(script, args, saltenv="base")
++ assert ret["stdout"] == args
++
++
++@pytest.mark.slow_test
++def test_script_retcode(cmdmod, script_contents):
++ """
++ cmd.script_retcode
++ """
++ script = "salt://script.py"
++ ret = cmdmod.script_retcode(script, saltenv="base")
++ assert ret == 0
++
++
++@pytest.mark.slow_test
++def test_script_cwd(cmdmod, script_contents, tmp_path):
++ """
++ cmd.script with cwd
++ """
++ tmp_cwd = str(tmp_path)
++ args = "saltines crackers biscuits=yes"
++ script = "salt://script.py"
++ ret = cmdmod.script(script, args, cwd=tmp_cwd, saltenv="base")
++ assert ret["stdout"] == args
++
++
++@pytest.mark.slow_test
++def test_script_cwd_with_space(cmdmod, script_contents, tmp_path):
++ """
++ cmd.script with cwd
++ """
++ tmp_cwd = str(tmp_path / "test 2")
++ os.mkdir(tmp_cwd)
++
++ args = "saltines crackers biscuits=yes"
++ script = "salt://script.py"
++ ret = cmdmod.script(script, args, cwd=tmp_cwd, saltenv="base")
++ assert ret["stdout"] == args
++
++
++@pytest.mark.destructive_test
++def test_tty(cmdmod):
++ """
++ cmd.tty
++ """
++ for tty in ("tty0", "pts3"):
++ if os.path.exists(os.path.join("/dev", tty)):
++ ret = cmdmod.tty(tty, "apply salt liberally")
++ assert "Success" in ret
++
++
++@pytest.mark.skip_on_windows
++@pytest.mark.skip_if_binaries_missing("which")
++def test_which(cmdmod):
++ """
++ cmd.which
++ """
++ cmd_which = cmdmod.which("cat")
++ assert isinstance(cmd_which, str)
++ cmd_run = cmdmod.run("which cat")
++ assert isinstance(cmd_run, str)
++ assert cmd_which.rstrip() == cmd_run.rstrip()
++
++
++@pytest.mark.skip_on_windows
++@pytest.mark.skip_if_binaries_missing("which")
++def test_which_bin(cmdmod):
++ """
++ cmd.which_bin
++ """
++ cmds = ["pip3", "pip2", "pip", "pip-python"]
++ ret = cmdmod.which_bin(cmds)
++ assert os.path.split(ret)[1] in cmds
++
++
++@pytest.mark.slow_test
++def test_has_exec(cmdmod, available_python_executable):
++ """
++ cmd.has_exec
++ """
++ assert cmdmod.has_exec(available_python_executable)
++ assert not cmdmod.has_exec("alllfsdfnwieulrrh9123857ygf")
++
++
++@pytest.mark.slow_test
++def test_exec_code(cmdmod, available_python_executable):
++ """
++ cmd.exec_code
++ """
++ code = dedent(
++ """
++ import sys
++ sys.stdout.write('cheese')
++ """
++ )
++ assert cmdmod.exec_code(available_python_executable, code).rstrip() == "cheese"
++
++
++@pytest.mark.slow_test
++def test_exec_code_with_single_arg(cmdmod, available_python_executable):
++ """
++ cmd.exec_code
++ """
++ code = dedent(
++ """
++ import sys
++ sys.stdout.write(sys.argv[1])
++ """
++ )
++ arg = "cheese"
++ assert cmdmod.exec_code(available_python_executable, code, args=arg).rstrip() == arg
++
++
++@pytest.mark.slow_test
++def test_exec_code_with_multiple_args(cmdmod, available_python_executable):
++ """
++ cmd.exec_code
++ """
++ code = dedent(
++ """
++ import sys
++ sys.stdout.write(sys.argv[1])
++ """
++ )
++ arg = "cheese"
++ assert (
++ cmdmod.exec_code(available_python_executable, code, args=[arg, "test"]).rstrip()
++ == arg
++ )
++
++
++@pytest.mark.slow_test
++def test_quotes(cmdmod):
++ """
++ cmd.run with quoted command
++ """
++ cmd = """echo 'SELECT * FROM foo WHERE bar="baz"' """
++ expected_result = 'SELECT * FROM foo WHERE bar="baz"'
++ result = cmdmod.run_stdout(cmd).strip()
++ assert result == expected_result
++
++
++@pytest.mark.skip_if_not_root
++@pytest.mark.skip_on_windows(reason="Skip on Windows, requires password")
++def test_quotes_runas(cmdmod, running_username):
++ """
++ cmd.run with quoted command
++ """
++ cmd = """echo 'SELECT * FROM foo WHERE bar="baz"' """
++ expected_result = 'SELECT * FROM foo WHERE bar="baz"'
++ result = cmdmod.run_all(cmd, runas=running_username)
++ errmsg = f"The command returned: {result}"
++ assert result["retcode"] == 0, errmsg
++ assert result["stdout"] == expected_result, errmsg
++
++
++@pytest.mark.destructive_test
++@pytest.mark.skip_if_not_root
++@pytest.mark.skip_on_windows(reason="Skip on Windows, uses unix commands")
++@pytest.mark.slow_test
++def test_cwd_runas(cmdmod, usermod, runas_usr, tmp_path):
++ """
++ cmd.run should be able to change working directory correctly, whether
++ or not runas is in use.
++ """
++ cmd = "pwd"
++ tmp_cwd = str(tmp_path)
++ os.chmod(tmp_cwd, 0o711)
++
++ cwd_normal = cmdmod.run_stdout(cmd, cwd=tmp_cwd).rstrip("\n")
++ assert tmp_cwd == cwd_normal
++
++ with _ensure_user_exists(runas_usr, usermod):
++ cwd_runas = cmdmod.run_stdout(cmd, cwd=tmp_cwd, runas=runas_usr).rstrip("\n")
++ assert tmp_cwd == cwd_runas
++
++
++@pytest.mark.destructive_test
++@pytest.mark.skip_if_not_root
++@pytest.mark.skip_unless_on_darwin(reason="Applicable to MacOS only")
++@pytest.mark.slow_test
++def test_runas_env(cmdmod, usermod, runas_usr):
++ """
++ cmd.run should be able to change working directory correctly, whether
++ or not runas is in use.
++ """
++ with _ensure_user_exists(runas_usr, usermod):
++ user_path = cmdmod.run_stdout('printf %s "$PATH"', runas=runas_usr)
++ # XXX: Not sure of a better way. Environment starts out with
++ # /bin:/usr/bin and should be populated by path helper and the bash
++ # profile.
++ assert "/bin:/usr/bin" != user_path
++
++
++@pytest.mark.destructive_test
++@pytest.mark.skip_if_not_root
++@pytest.mark.skip_unless_on_darwin(reason="Applicable to MacOS only")
++@pytest.mark.slow_test
++def test_runas_complex_command_bad_cwd(cmdmod, usermod, runas_usr, tmp_path):
++ """
++ cmd.run should not accidentally run parts of a complex command when
++ given a cwd which cannot be used by the user the command is run as.
++ Due to the need to use `su -l` to login to another user on MacOS, we
++ cannot cd into directories that the target user themselves does not
++ have execute permission for. To an extent, this test is testing that
++ buggy behaviour, but its purpose is to ensure that the greater bug of
++ running commands after failing to cd does not occur.
++ """
++ tmp_cwd = str(tmp_path)
++ os.chmod(tmp_cwd, 0o700)
++
++ with _ensure_user_exists(runas_usr, usermod):
++ cmd_result = cmdmod.run_all(
++ 'pwd; pwd; : $(echo "You have failed the test" >&2)',
++ cwd=tmp_cwd,
++ runas=runas_usr,
++ )
++
++ assert "" == cmd_result["stdout"]
++ assert "You have failed the test" not in cmd_result["stderr"]
++ assert 0 != cmd_result["retcode"]
++
++
++@SKIP_INITIAL_PHOTONOS_FAILURES
++@pytest.mark.skip_on_windows
++@pytest.mark.skip_if_not_root
++@pytest.mark.destructive_test
++@pytest.mark.slow_test
++def test_runas(cmdmod, usermod, runas_usr):
++ """
++ Ensure that the env is the runas user's
++ """
++ with _ensure_user_exists(runas_usr, usermod):
++ out = cmdmod.run("env", runas=runas_usr).splitlines()
++ assert f"USER={runas_usr}" in out
++
++
++@pytest.mark.skip_if_binaries_missing("sleep", reason="sleep cmd not installed")
++def test_timeout(cmdmod):
++ """
++ cmd.run trigger timeout
++ """
++ out = cmdmod.run("sleep 2 && echo hello", timeout=1, python_shell=True)
++ assert "Timed out" in out
++
++
++@pytest.mark.skip_if_binaries_missing("sleep", reason="sleep cmd not installed")
++def test_timeout_success(cmdmod):
++ """
++ cmd.run sufficient timeout to succeed
++ """
++ out = cmdmod.run("sleep 1 && echo hello", timeout=2, python_shell=True)
++ assert out == "hello"
++
++
++@pytest.mark.slow_test
++def test_cmd_run_whoami(cmdmod, running_username):
++ """
++ test return of whoami
++ """
++ if not salt.utils.platform.is_windows():
++ user = running_username
++ else:
++ user = salt.utils.user.get_specific_user()
++ if user.startswith("sudo_"):
++ user = user.replace("sudo_", "")
++ cmd = cmdmod.run("whoami")
++ assert user.lower() == cmd.lower()
++
++
++@pytest.mark.skip_unless_on_windows(reason="Minion is not Windows")
++@pytest.mark.slow_test
++def test_windows_env_handling(cmdmod):
++ """
++ Ensure that nt.environ is used properly with cmd.run*
++ """
++ out = cmdmod.run("set", env={"abc": "123", "ABC": "456"}).splitlines()
++ assert "abc=123" in out
++ assert "ABC=456" in out
++
++
++@pytest.mark.slow_test
++@pytest.mark.skip_unless_on_windows(reason="Minion is not Windows")
++def test_windows_powershell_script_args(cmdmod, issue_56195_test_ps1):
++ """
++ Ensure that powershell processes inline script in args
++ """
++ val = "i like cheese"
++ args = (
++ '-SecureString (ConvertTo-SecureString -String "{}" -AsPlainText -Force)'
++ " -ErrorAction Stop".format(val)
++ )
++ script = "salt://issue_56195_test.ps1"
++ ret = cmdmod.script(script, args=args, shell="powershell", saltenv="base")
++ assert ret["stdout"] == val
++
++
++@pytest.mark.slow_test
++@pytest.mark.skip_unless_on_windows(reason="Minion is not Windows")
++@pytest.mark.skip_if_binaries_missing("pwsh")
++def test_windows_powershell_script_args_pwsh(cmdmod, issue_56195_test_ps1):
++ """
++ Ensure that powershell processes inline script in args with powershell
++ core
++ """
++ val = "i like cheese"
++ args = (
++ '-SecureString (ConvertTo-SecureString -String "{}" -AsPlainText -Force)'
++ " -ErrorAction Stop".format(val)
++ )
++ script = "salt://issue_56195_test.ps1"
++ ret = cmdmod.script(script, args=args, shell="pwsh", saltenv="base")
++ assert ret["stdout"] == val
+diff --git a/tests/pytests/functional/modules/test_dockermod.py b/tests/pytests/functional/modules/test_dockermod.py
+index 3c7bb25e461..a5b40869352 100644
+--- a/tests/pytests/functional/modules/test_dockermod.py
++++ b/tests/pytests/functional/modules/test_dockermod.py
+@@ -2,6 +2,7 @@
+ Integration tests for the docker_container states
+ """
+ import logging
++import os
+
+ import pytest
+ from saltfactories.utils import random_string
+@@ -11,9 +12,12 @@ pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("docker", "dockerd", check_all=False),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run inside a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/modules/test_swarm.py b/tests/pytests/functional/modules/test_swarm.py
+index 8c0ce8cbd93..9dc70f5b3dc 100644
+--- a/tests/pytests/functional/modules/test_swarm.py
++++ b/tests/pytests/functional/modules/test_swarm.py
+@@ -1,10 +1,15 @@
++import os
++
+ import pytest
+
+ import salt.utils.versions
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="No hwclock in a container"),
+ ]
+
+ # The swarm module need the docker-py library installed
+diff --git a/tests/pytests/functional/modules/test_system.py b/tests/pytests/functional/modules/test_system.py
+index 2dabaaebfad..3b669c46afd 100644
+--- a/tests/pytests/functional/modules/test_system.py
++++ b/tests/pytests/functional/modules/test_system.py
+@@ -9,9 +9,12 @@ import pytest
+
+ import salt.utils.files
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.skip_unless_on_linux,
+ pytest.mark.slow_test,
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="No systemd in container."),
+ ]
+
+ log = logging.getLogger(__name__)
+diff --git a/tests/pytests/functional/pillar/hg_pillar/test_hg_pillar.py b/tests/pytests/functional/pillar/hg_pillar/test_hg_pillar.py
+index 183b002d8b2..44603d96f1d 100644
+--- a/tests/pytests/functional/pillar/hg_pillar/test_hg_pillar.py
++++ b/tests/pytests/functional/pillar/hg_pillar/test_hg_pillar.py
+@@ -60,6 +60,7 @@ def hg_setup_and_teardown():
+ @pytest.mark.skip_on_windows(
+ reason="just testing if this or hgfs causes the issue with total crash"
+ )
++@pytest.mark.skipif(not HAS_HG, reason="missing hglib library")
+ def test_ext_pillar(hg_setup_and_teardown):
+ data = hg_pillar.ext_pillar("*", None, hg_setup_and_teardown)
+ assert data == {"testinfo": "info", "testinfo2": "info"}
+diff --git a/tests/pytests/functional/states/rabbitmq/test_cluster.py b/tests/pytests/functional/states/rabbitmq/test_cluster.py
+index f8b4bdc225e..210b22a2360 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_cluster.py
++++ b/tests/pytests/functional/states/rabbitmq/test_cluster.py
+@@ -3,6 +3,7 @@ Integration tests for the rabbitmq_cluster states
+ """
+
+ import logging
++import os
+
+ import pytest
+
+@@ -13,11 +14,14 @@ pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing(
+ "docker", "dockerd", reason="Docker not installed"
+ ),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_plugin.py b/tests/pytests/functional/states/rabbitmq/test_plugin.py
+index e1b686e3365..f1191490536 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_plugin.py
++++ b/tests/pytests/functional/states/rabbitmq/test_plugin.py
+@@ -3,6 +3,7 @@ Integration tests for the rabbitmq_plugin states
+ """
+
+ import logging
++import os
+
+ import pytest
+
+@@ -14,11 +15,14 @@ log = logging.getLogger(__name__)
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing(
+ "docker", "dockerd", reason="Docker not installed"
+ ),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_policy.py b/tests/pytests/functional/states/rabbitmq/test_policy.py
+index e5cee97cbc8..7ccf6a522e0 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_policy.py
++++ b/tests/pytests/functional/states/rabbitmq/test_policy.py
+@@ -3,6 +3,7 @@ Integration tests for the rabbitmq_policy states
+ """
+
+ import logging
++import os
+
+ import pytest
+
+@@ -14,11 +15,14 @@ log = logging.getLogger(__name__)
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing(
+ "docker", "dockerd", reason="Docker not installed"
+ ),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_upstream.py b/tests/pytests/functional/states/rabbitmq/test_upstream.py
+index cfdad35aba6..c7bcf3b0d44 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_upstream.py
++++ b/tests/pytests/functional/states/rabbitmq/test_upstream.py
+@@ -3,6 +3,7 @@ Integration tests for the rabbitmq_user states
+ """
+
+ import logging
++import os
+
+ import pytest
+
+@@ -13,11 +14,14 @@ log = logging.getLogger(__name__)
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing(
+ "docker", "dockerd", reason="Docker not installed"
+ ),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_user.py b/tests/pytests/functional/states/rabbitmq/test_user.py
+index 2f9b22d28d2..31723df7be8 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_user.py
++++ b/tests/pytests/functional/states/rabbitmq/test_user.py
+@@ -3,6 +3,7 @@ Integration tests for the rabbitmq_user states
+ """
+
+ import logging
++import os
+
+ import pytest
+
+@@ -13,11 +14,14 @@ log = logging.getLogger(__name__)
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing(
+ "docker", "dockerd", reason="Docker not installed"
+ ),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_vhost.py b/tests/pytests/functional/states/rabbitmq/test_vhost.py
+index a648d41854f..d6ac6901a25 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_vhost.py
++++ b/tests/pytests/functional/states/rabbitmq/test_vhost.py
+@@ -3,6 +3,7 @@ Integration tests for the rabbitmq_user states
+ """
+
+ import logging
++import os
+
+ import pytest
+
+@@ -13,11 +14,14 @@ log = logging.getLogger(__name__)
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing(
+ "docker", "dockerd", reason="Docker not installed"
+ ),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/test_docker_network.py b/tests/pytests/functional/states/test_docker_network.py
+index 16a78b13a4a..0da01ed8bac 100644
+--- a/tests/pytests/functional/states/test_docker_network.py
++++ b/tests/pytests/functional/states/test_docker_network.py
+@@ -1,5 +1,6 @@
+ import functools
+ import logging
++import os
+ import random
+
+ import pytest
+@@ -15,9 +16,13 @@ pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("docker", "dockerd", check_all=False),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/test_pkg.py b/tests/pytests/functional/states/test_pkg.py
+index 0e82dc608ba..12318c996d1 100644
+--- a/tests/pytests/functional/states/test_pkg.py
++++ b/tests/pytests/functional/states/test_pkg.py
+@@ -64,7 +64,7 @@ def PKG_CAP_TARGETS(grains):
+ _PKG_CAP_TARGETS = []
+ if grains["os_family"] == "Suse":
+ if grains["os"] == "SUSE":
+- _PKG_CAP_TARGETS = [("perl(ZNC)", "znc-perl")]
++ _PKG_CAP_TARGETS = [("perl(Error)", "perl-Error")]
+ if not _PKG_CAP_TARGETS:
+ pytest.skip("Capability not provided")
+ return _PKG_CAP_TARGETS
+@@ -856,8 +856,8 @@ def test_pkg_cap_003_installed_multipkg_with_version(
+ This is a destructive test as it installs and then removes two packages
+ """
+ target, realpkg = PKG_CAP_TARGETS[0]
+- version = latest_version(target)
+- realver = latest_version(realpkg)
++ version = modules.pkg.version(target)
++ realver = modules.pkg.version(realpkg)
+
+ # If this condition is False, we need to find new targets.
+ # This needs to be able to test successful installation of packages.
+diff --git a/tests/pytests/integration/cli/test_syndic_eauth.py b/tests/pytests/integration/cli/test_syndic_eauth.py
+index 57e9c0a467a..218022b9e3c 100644
+--- a/tests/pytests/integration/cli/test_syndic_eauth.py
++++ b/tests/pytests/integration/cli/test_syndic_eauth.py
+@@ -1,4 +1,5 @@
+ import json
++import os
+ import pathlib
+ import tempfile
+ import time
+@@ -7,9 +8,11 @@ import pytest
+
+ docker = pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+ pytestmark = [
+ pytest.mark.core_test,
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/integration/daemons/test_memory_leak.py b/tests/pytests/integration/daemons/test_memory_leak.py
+index 1b782760418..8157091c44e 100644
+--- a/tests/pytests/integration/daemons/test_memory_leak.py
++++ b/tests/pytests/integration/daemons/test_memory_leak.py
+@@ -1,3 +1,4 @@
++import os
+ import time
+ from multiprocessing import Manager, Process
+
+@@ -8,6 +9,8 @@ pytestmark = [
+ pytest.mark.slow_test,
+ ]
+
++GITHUB_ACTIONS = bool(os.getenv("GITHUB_ACTIONS", False))
++
+
+ @pytest.fixture
+ def testfile_path(tmp_path):
+@@ -45,6 +48,7 @@ def file_add_delete_sls(testfile_path, base_env_state_tree_root_dir):
+
+
+ @pytest.mark.skip_on_darwin(reason="MacOS is a spawning platform, won't work")
++@pytest.mark.skipif(GITHUB_ACTIONS, reason="Test is failing in GitHub Actions")
+ @pytest.mark.flaky(max_runs=4)
+ def test_memory_leak(salt_cli, salt_minion, file_add_delete_sls):
+ max_usg = None
+diff --git a/tests/pytests/integration/modules/test_cmdmod.py b/tests/pytests/integration/modules/test_cmdmod.py
+index 4e8ce5824ee..d9c326c3f0a 100644
+--- a/tests/pytests/integration/modules/test_cmdmod.py
++++ b/tests/pytests/integration/modules/test_cmdmod.py
+@@ -1,5 +1,11 @@
++import logging
++
+ import pytest
+
++import salt.utils.user
++
++log = logging.getLogger(__name__)
++
+
+ @pytest.fixture(scope="module")
+ def non_root_account():
+@@ -7,6 +13,14 @@ def non_root_account():
+ yield account
+
+
++@pytest.fixture
++def running_username():
++ """
++ Return the username that is running the code.
++ """
++ return salt.utils.user.get_user()
++
++
+ @pytest.mark.skip_if_not_root
+ def test_exec_code_all(salt_call_cli, non_root_account):
+ ret = salt_call_cli.run(
+@@ -22,3 +36,82 @@ def test_long_stdout(salt_cli, salt_minion):
+ )
+ assert ret.returncode == 0
+ assert len(ret.data.strip()) == len(echo_str)
++
++
++@pytest.mark.skip_if_not_root
++@pytest.mark.skip_on_windows(reason="Skip on Windows, uses unix commands")
++def test_avoid_injecting_shell_code_as_root(
++ salt_call_cli, non_root_account, running_username
++):
++ """
++ cmd.run should execute the whole command as the "runas" user, not
++ running substitutions as root.
++ """
++ cmd = "echo $(id -u)"
++
++ ret = salt_call_cli.run("cmd.run_stdout", cmd)
++ root_id = ret.json
++ ret = salt_call_cli.run("cmd.run_stdout", cmd, runas=running_username)
++ runas_root_id = ret.json
++
++ ret = salt_call_cli.run("cmd.run_stdout", cmd, runas=non_root_account.username)
++ user_id = ret.json
++
++ assert user_id != root_id
++ assert user_id != runas_root_id
++ assert root_id == runas_root_id
++
++
++@pytest.mark.slow_test
++def test_blacklist_glob(salt_call_cli):
++ """
++ cmd_blacklist_glob
++ """
++ cmd = "bad_command --foo"
++ ret = salt_call_cli.run(
++ "cmd.run",
++ cmd,
++ )
++
++ assert (
++ ret.stderr.rstrip()
++ == "Error running 'cmd.run': The shell command \"bad_command --foo\" is not permitted"
++ )
++
++
++@pytest.mark.slow_test
++def test_hide_output(salt_call_cli):
++ """
++ Test the hide_output argument
++ """
++ ls_command = (
++ ["ls", "/"] if not salt.utils.platform.is_windows() else ["dir", "c:\\"]
++ )
++
++ error_command = ["thiscommanddoesnotexist"]
++
++ # cmd.run
++ ret = salt_call_cli.run("cmd.run", ls_command, hide_output=True)
++ assert ret.data == ""
++
++ # cmd.shell
++ ret = salt_call_cli.run("cmd.shell", ls_command, hide_output=True)
++ assert ret.data == ""
++
++ # cmd.run_stdout
++ ret = salt_call_cli.run("cmd.run_stdout", ls_command, hide_output=True)
++ assert ret.data == ""
++
++ # cmd.run_stderr
++ ret = salt_call_cli.run("cmd.shell", error_command, hide_output=True)
++ assert ret.data == ""
++
++ # cmd.run_all (command should have produced stdout)
++ ret = salt_call_cli.run("cmd.run_all", ls_command, hide_output=True)
++ assert ret.data["stdout"] == ""
++ assert ret.data["stderr"] == ""
++
++ # cmd.run_all (command should have produced stderr)
++ ret = salt_call_cli.run("cmd.run_all", error_command, hide_output=True)
++ assert ret.data["stdout"] == ""
++ assert ret.data["stderr"] == ""
+diff --git a/tests/pytests/integration/modules/test_virt.py b/tests/pytests/integration/modules/test_virt.py
+index 57ec239c4e9..1b7f30154a7 100644
+--- a/tests/pytests/integration/modules/test_virt.py
++++ b/tests/pytests/integration/modules/test_virt.py
+@@ -2,6 +2,7 @@
+ Validate the virt module
+ """
+ import logging
++import os
+ from numbers import Number
+ from xml.etree import ElementTree
+
+@@ -14,9 +15,12 @@ docker = pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("docker"),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/integration/ssh/test_log.py b/tests/pytests/integration/ssh/test_log.py
+index e87c4a8581f..683feb8bd91 100644
+--- a/tests/pytests/integration/ssh/test_log.py
++++ b/tests/pytests/integration/ssh/test_log.py
+@@ -2,6 +2,7 @@
+ Integration tests for salt-ssh logging
+ """
+ import logging
++import os
+ import time
+
+ import pytest
+@@ -11,12 +12,14 @@ from tests.support.helpers import Keys
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+ log = logging.getLogger(__name__)
+
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/integration/ssh/test_master.py b/tests/pytests/integration/ssh/test_master.py
+index 31e318870cb..0c2f482cf9f 100644
+--- a/tests/pytests/integration/ssh/test_master.py
++++ b/tests/pytests/integration/ssh/test_master.py
+@@ -2,6 +2,8 @@
+ Simple Smoke Tests for Connected SSH minions
+ """
+
++import os
++
+ import pytest
+ from saltfactories.utils.functional import StateResult
+
+@@ -10,7 +12,10 @@ pytestmark = [
+ pytest.mark.skip_on_windows(reason="salt-ssh not available on Windows"),
+ ]
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+
++@pytest.mark.skipif(INSIDE_CONTAINER, reason="No systemd in container.")
+ @pytest.mark.skip_if_not_root
+ def test_service(salt_ssh_cli, grains):
+ service = "cron"
+diff --git a/tests/pytests/integration/ssh/test_py_versions.py b/tests/pytests/integration/ssh/test_py_versions.py
+index 52ab819e808..71d4cfaa94e 100644
+--- a/tests/pytests/integration/ssh/test_py_versions.py
++++ b/tests/pytests/integration/ssh/test_py_versions.py
+@@ -2,6 +2,7 @@
+ Integration tests for salt-ssh py_versions
+ """
+ import logging
++import os
+ import socket
+ import time
+
+@@ -12,12 +13,14 @@ from tests.support.helpers import Keys
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+ log = logging.getLogger(__name__)
+
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/integration/ssh/test_ssh_setup.py b/tests/pytests/integration/ssh/test_ssh_setup.py
+index eddf31caccd..79b55ad90a5 100644
+--- a/tests/pytests/integration/ssh/test_ssh_setup.py
++++ b/tests/pytests/integration/ssh/test_ssh_setup.py
+@@ -17,12 +17,14 @@ from tests.support.helpers import Keys
+
+ pytest.importorskip("docker")
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+ log = logging.getLogger(__name__)
+
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/scenarios/compat/test_with_versions.py b/tests/pytests/scenarios/compat/test_with_versions.py
+index 75a2b87f24c..498dd6a60de 100644
+--- a/tests/pytests/scenarios/compat/test_with_versions.py
++++ b/tests/pytests/scenarios/compat/test_with_versions.py
+@@ -5,6 +5,7 @@
+ Test current salt master with older salt minions
+ """
+ import logging
++import os
+ import pathlib
+
+ import pytest
+@@ -18,6 +19,8 @@ docker = pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+
+ pytestmark = [
+ pytest.mark.slow_test,
+@@ -25,6 +28,7 @@ pytestmark = [
+ pytest.mark.skipif(
+ salt.utils.platform.is_photonos() is True, reason="Skip on PhotonOS"
+ ),
++ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
+ ]
+
+
+diff --git a/tests/pytests/scenarios/failover/multimaster/test_failover_master.py b/tests/pytests/scenarios/failover/multimaster/test_failover_master.py
+index 6efecfb8334..9f6251a4d6f 100644
+--- a/tests/pytests/scenarios/failover/multimaster/test_failover_master.py
++++ b/tests/pytests/scenarios/failover/multimaster/test_failover_master.py
+@@ -12,7 +12,10 @@ pytestmark = [
+
+ log = logging.getLogger(__name__)
+
++GITHUB_ACTIONS = bool(os.getenv("GITHUB_ACTIONS", False))
+
++
++@pytest.mark.skipif(GITHUB_ACTIONS, reason="Test is failing in GitHub Actions")
+ def test_pki(salt_mm_failover_master_1, salt_mm_failover_master_2, caplog):
+ """
+ Verify https://docs.saltproject.io/en/latest/topics/tutorials/multimaster_pki.html
+diff --git a/tests/pytests/scenarios/setup/test_install.py b/tests/pytests/scenarios/setup/test_install.py
+index 48f1d5889f6..7664fda804e 100644
+--- a/tests/pytests/scenarios/setup/test_install.py
++++ b/tests/pytests/scenarios/setup/test_install.py
+@@ -3,6 +3,7 @@ Tests for building and installing salt
+ """
+ import json
+ import logging
++import os
+ import pathlib
+ import re
+ import sys
+@@ -16,11 +17,16 @@ from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
+
+ log = logging.getLogger(__name__)
+
++INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ pytest.mark.core_test,
+ pytest.mark.windows_whitelisted,
+ pytest.mark.skip_initial_onedir_failure,
+ pytest.mark.skip_if_binaries_missing(*KNOWN_BINARY_NAMES, check_all=False),
++ pytest.mark.skipif(
++ INSIDE_CONTAINER, reason="No gcc and python3-devel in container."
++ ),
+ ]
+
+
+diff --git a/tests/pytests/unit/modules/test_aptpkg.py b/tests/pytests/unit/modules/test_aptpkg.py
+index eb72447c3aa..6f0b905ef73 100644
+--- a/tests/pytests/unit/modules/test_aptpkg.py
++++ b/tests/pytests/unit/modules/test_aptpkg.py
+@@ -1360,17 +1360,17 @@ def test_call_apt_dpkg_lock():
+ ]
+
+ cmd_mock = MagicMock(side_effect=cmd_side_effect)
+- cmd_call = (
++ cmd_call = [
+ call(
+ ["dpkg", "-l", "python"],
+- env={},
+- ignore_retcode=False,
+ output_loglevel="quiet",
+ python_shell=True,
++ env={},
++ ignore_retcode=False,
+ username="Darth Vader",
+ ),
+- )
+- expected_calls = [cmd_call * 5]
++ ]
++ expected_calls = cmd_call * 5
+
+ with patch.dict(
+ aptpkg.__salt__,
+@@ -1390,7 +1390,7 @@ def test_call_apt_dpkg_lock():
+
+ # We should attempt to call the cmd 5 times
+ assert cmd_mock.call_count == 5
+- cmd_mock.has_calls(expected_calls)
++ cmd_mock.assert_has_calls(expected_calls)
+
+
+ def test_services_need_restart_checkrestart_missing():
+diff --git a/tests/pytests/unit/modules/test_linux_sysctl.py b/tests/pytests/unit/modules/test_linux_sysctl.py
+index 0bdd24039d7..6b0875bc460 100644
+--- a/tests/pytests/unit/modules/test_linux_sysctl.py
++++ b/tests/pytests/unit/modules/test_linux_sysctl.py
+@@ -215,7 +215,7 @@ def test_persist_no_conf_failure():
+ ):
+ with pytest.raises(CommandExecutionError):
+ linux_sysctl.persist("net.ipv4.ip_forward", 42, config=None)
+- fopen_mock.called_once()
++ fopen_mock.assert_called_once()
+
+
+ def test_persist_no_conf_success():
+@@ -353,7 +353,7 @@ def test_persist_value_with_spaces_already_set(tmp_path):
+ """
+ config = str(tmp_path / "existing_sysctl_with_spaces.conf")
+ value = "|/usr/share/kdump-tools/dump-core %p %s %t %e"
+- config_file_content = "kernel.core_pattern = {}\n".format(value)
++ config_file_content = f"kernel.core_pattern = {value}\n"
+ with fopen(config, "w", encoding="utf-8") as config_file:
+ config_file.write(config_file_content)
+ mock_run = MagicMock(return_value=value)
+@@ -383,7 +383,7 @@ def test_persist_value_with_spaces_already_configured(tmp_path):
+ """
+ config = str(tmp_path / "existing_sysctl_with_spaces.conf")
+ value = "|/usr/share/kdump-tools/dump-core %p %s %t %e"
+- config_file_content = "kernel.core_pattern = {}\n".format(value)
++ config_file_content = f"kernel.core_pattern = {value}\n"
+ with fopen(config, "w", encoding="utf-8") as config_file:
+ config_file.write(config_file_content)
+ mock_run = MagicMock(return_value="")
+@@ -451,7 +451,7 @@ def test_persist_value_with_spaces_update_config(tmp_path):
+ assert os.path.isfile(config)
+ with fopen(config, encoding="utf-8") as config_file:
+ written = config_file.read()
+- assert written == "kernel.core_pattern = {}\n".format(value)
++ assert written == f"kernel.core_pattern = {value}\n"
+
+
+ def test_persist_value_with_spaces_new_file(tmp_path):
+diff --git a/tests/pytests/unit/modules/test_win_ip.py b/tests/pytests/unit/modules/test_win_ip.py
+index 38eb6b1ac5f..94a3fe7ca93 100644
+--- a/tests/pytests/unit/modules/test_win_ip.py
++++ b/tests/pytests/unit/modules/test_win_ip.py
+@@ -151,7 +151,7 @@ def test_enable():
+ ):
+ assert win_ip.enable("Ethernet")
+
+- mock_cmd.called_once_with(
++ mock_cmd.assert_called_once_with(
+ [
+ "netsh",
+ "interface",
+@@ -180,7 +180,7 @@ def test_disable():
+ ):
+ assert win_ip.disable("Ethernet")
+
+- mock_cmd.called_once_with(
++ mock_cmd.assert_called_once_with(
+ [
+ "netsh",
+ "interface",
+diff --git a/tests/pytests/unit/test_master.py b/tests/pytests/unit/test_master.py
+index d338307d1f8..679229066d4 100644
+--- a/tests/pytests/unit/test_master.py
++++ b/tests/pytests/unit/test_master.py
+@@ -61,7 +61,7 @@ def test_fileserver_duration():
+ end = time.time()
+ # Interval is equal to timeout so the _do_update method will be called
+ # one time.
+- update.called_once()
++ update.assert_called_once()
+ # Timeout is 1 second
+ duration = end - start
+ if duration > 2 and salt.utils.platform.spawning_platform():
+diff --git a/tests/pytests/unit/test_minion.py b/tests/pytests/unit/test_minion.py
+index 740743194e4..a9e91742a2d 100644
+--- a/tests/pytests/unit/test_minion.py
++++ b/tests/pytests/unit/test_minion.py
+@@ -655,7 +655,9 @@ def test_gen_modules_executors(minion_opts):
+ with patch("salt.pillar.get_pillar", return_value=MockPillarCompiler()):
+ with patch("salt.loader.executors") as execmock:
+ minion.gen_modules()
+- assert execmock.called_with(minion.opts, minion.functions)
++ execmock.assert_called_with(
++ minion.opts, functions=minion.functions, proxy=minion.proxy, context={}
++ )
+ finally:
+ minion.destroy()
+
+diff --git a/tests/pytests/unit/utils/event/test_event.py b/tests/pytests/unit/utils/event/test_event.py
+index e289e72dad0..f4b6c159996 100644
+--- a/tests/pytests/unit/utils/event/test_event.py
++++ b/tests/pytests/unit/utils/event/test_event.py
+@@ -38,7 +38,7 @@ def sock_dir(tmp_path):
+ def _assert_got_event(evt, data, msg=None, expected_failure=False):
+ assert evt is not None, msg
+ for key in data:
+- assert key in evt, "{}: Key {} missing".format(msg, key)
++ assert key in evt, f"{msg}: Key {key} missing"
+ assertMsg = "{0}: Key {1} value mismatch, {2} != {3}"
+ assertMsg = assertMsg.format(msg, key, data[key], evt[key])
+ if not expected_failure:
+@@ -59,8 +59,8 @@ def test_minion_event(sock_dir):
+ :10
+ ]
+ with salt.utils.event.MinionEvent(opts, listen=False) as me:
+- assert me.puburi == str(sock_dir / "minion_event_{}_pub.ipc".format(id_hash))
+- assert me.pulluri == str(sock_dir / "minion_event_{}_pull.ipc".format(id_hash))
++ assert me.puburi == str(sock_dir / f"minion_event_{id_hash}_pub.ipc")
++ assert me.pulluri == str(sock_dir / f"minion_event_{id_hash}_pull.ipc")
+
+
+ def test_minion_event_tcp_ipc_mode():
+@@ -73,8 +73,8 @@ def test_minion_event_tcp_ipc_mode():
+ def test_minion_event_no_id(sock_dir):
+ with salt.utils.event.MinionEvent(dict(sock_dir=str(sock_dir)), listen=False) as me:
+ id_hash = hashlib.sha256(salt.utils.stringutils.to_bytes("")).hexdigest()[:10]
+- assert me.puburi == str(sock_dir / "minion_event_{}_pub.ipc".format(id_hash))
+- assert me.pulluri == str(sock_dir / "minion_event_{}_pull.ipc".format(id_hash))
++ assert me.puburi == str(sock_dir / f"minion_event_{id_hash}_pub.ipc")
++ assert me.pulluri == str(sock_dir / f"minion_event_{id_hash}_pull.ipc")
+
+
+ @pytest.mark.slow_test
+@@ -256,9 +256,9 @@ def test_event_many(sock_dir):
+ with eventpublisher_process(str(sock_dir)):
+ with salt.utils.event.MasterEvent(str(sock_dir), listen=True) as me:
+ for i in range(500):
+- me.fire_event({"data": "{}".format(i)}, "testevents")
++ me.fire_event({"data": f"{i}"}, "testevents")
+ evt = me.get_event(tag="testevents")
+- _assert_got_event(evt, {"data": "{}".format(i)}, "Event {}".format(i))
++ _assert_got_event(evt, {"data": f"{i}"}, f"Event {i}")
+
+
+ @pytest.mark.slow_test
+@@ -268,10 +268,10 @@ def test_event_many_backlog(sock_dir):
+ with salt.utils.event.MasterEvent(str(sock_dir), listen=True) as me:
+ # Must not exceed zmq HWM
+ for i in range(500):
+- me.fire_event({"data": "{}".format(i)}, "testevents")
++ me.fire_event({"data": f"{i}"}, "testevents")
+ for i in range(500):
+ evt = me.get_event(tag="testevents")
+- _assert_got_event(evt, {"data": "{}".format(i)}, "Event {}".format(i))
++ _assert_got_event(evt, {"data": f"{i}"}, f"Event {i}")
+
+
+ # Test the fire_master function. As it wraps the underlying fire_event,
+@@ -300,7 +300,7 @@ def test_connect_pull_should_debug_log_on_StreamClosedError():
+ event = SaltEvent(node=None)
+ with patch.object(event, "pusher") as mock_pusher:
+ with patch.object(
+- salt.utils.event.log, "debug", auto_spec=True
++ salt.utils.event.log, "debug", autospec=True
+ ) as mock_log_debug:
+ mock_pusher.connect.side_effect = (
+ salt.ext.tornado.iostream.StreamClosedError
+@@ -317,10 +317,10 @@ def test_connect_pull_should_error_log_on_other_errors(error):
+ event = SaltEvent(node=None)
+ with patch.object(event, "pusher") as mock_pusher:
+ with patch.object(
+- salt.utils.event.log, "debug", auto_spec=True
++ salt.utils.event.log, "debug", autospec=True
+ ) as mock_log_debug:
+ with patch.object(
+- salt.utils.event.log, "error", auto_spec=True
++ salt.utils.event.log, "error", autospec=True
+ ) as mock_log_error:
+ mock_pusher.connect.side_effect = error
+ event.connect_pull()
+diff --git a/tests/unit/modules/test_boto_apigateway.py b/tests/unit/modules/test_boto_apigateway.py
+index 5f3d2a49822..ebf50679bd8 100644
+--- a/tests/unit/modules/test_boto_apigateway.py
++++ b/tests/unit/modules/test_boto_apigateway.py
+@@ -15,6 +15,7 @@ from tests.support.unit import TestCase
+
+ # pylint: disable=import-error,no-name-in-module
+ try:
++ import boto
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+diff --git a/tests/unit/modules/test_boto_cognitoidentity.py b/tests/unit/modules/test_boto_cognitoidentity.py
+index 1e213a169ac..974832f9ff9 100644
+--- a/tests/unit/modules/test_boto_cognitoidentity.py
++++ b/tests/unit/modules/test_boto_cognitoidentity.py
+@@ -14,6 +14,7 @@ from tests.support.unit import TestCase
+
+ # pylint: disable=import-error,no-name-in-module
+ try:
++ import boto
+ import boto3
+ from botocore.exceptions import ClientError
+
+diff --git a/tests/unit/modules/test_boto_elasticsearch_domain.py b/tests/unit/modules/test_boto_elasticsearch_domain.py
+index 5c5845aa25b..0578a81e8ef 100644
+--- a/tests/unit/modules/test_boto_elasticsearch_domain.py
++++ b/tests/unit/modules/test_boto_elasticsearch_domain.py
+@@ -14,6 +14,7 @@ from tests.support.unit import TestCase
+
+ # pylint: disable=import-error,no-name-in-module
+ try:
++ import boto
+ import boto3
+ from botocore.exceptions import ClientError
+
+diff --git a/tests/unit/modules/test_boto_lambda.py b/tests/unit/modules/test_boto_lambda.py
+index d32dc9345b6..ecaa532f1ff 100644
+--- a/tests/unit/modules/test_boto_lambda.py
++++ b/tests/unit/modules/test_boto_lambda.py
+@@ -18,6 +18,7 @@ from tests.support.unit import TestCase
+
+ # pylint: disable=import-error,no-name-in-module
+ try:
++ import boto
+ import boto3
+ from botocore import __version__ as found_botocore_version
+ from botocore.exceptions import ClientError
+diff --git a/tests/unit/modules/test_network.py b/tests/unit/modules/test_network.py
+index 34b06250fc6..9eef9a02f58 100644
+--- a/tests/unit/modules/test_network.py
++++ b/tests/unit/modules/test_network.py
+@@ -153,9 +153,11 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ Test for Performs a DNS lookup with dig
+ """
+- with patch("salt.utils.path.which", MagicMock(return_value="dig")), patch.dict(
++ with patch.dict(
+ network.__utils__, {"network.sanitize_host": MagicMock(return_value="A")}
+- ), patch.dict(network.__salt__, {"cmd.run": MagicMock(return_value="A")}):
++ ), patch("salt.utils.path.which", MagicMock(return_value="dig")), patch.dict(
++ network.__salt__, {"cmd.run": MagicMock(return_value="A")}
++ ):
+ self.assertEqual(network.dig("host"), "A")
+
+ def test_arp(self):
+diff --git a/tests/unit/modules/test_nilrt_ip.py b/tests/unit/modules/test_nilrt_ip.py
+index 1261473edb4..50dc13b20b8 100644
+--- a/tests/unit/modules/test_nilrt_ip.py
++++ b/tests/unit/modules/test_nilrt_ip.py
+@@ -28,7 +28,7 @@ class NilrtIPTestCase(TestCase, LoaderModuleMockMixin):
+ "salt.modules.nilrt_ip._change_dhcp_config", return_value=True
+ ) as change_dhcp_config_mock:
+ assert nilrt_ip._change_state("test_interface", "down")
+- assert change_dhcp_config_mock.called_with("test_interface", False)
++ change_dhcp_config_mock.assert_called_with("test_interface", False)
+
+ def test_change_state_up_state(self):
+ """
+@@ -42,7 +42,7 @@ class NilrtIPTestCase(TestCase, LoaderModuleMockMixin):
+ "salt.modules.nilrt_ip._change_dhcp_config", return_value=True
+ ) as change_dhcp_config_mock:
+ assert nilrt_ip._change_state("test_interface", "up")
+- assert change_dhcp_config_mock.called_with("test_interface")
++ change_dhcp_config_mock.assert_called_with("test_interface")
+
+ def test_set_static_all_with_dns(self):
+ """
+diff --git a/tests/unit/modules/test_zcbuildout.py b/tests/unit/modules/test_zcbuildout.py
+index f793e3fc3f8..5a5996e110e 100644
+--- a/tests/unit/modules/test_zcbuildout.py
++++ b/tests/unit/modules/test_zcbuildout.py
+@@ -451,6 +451,7 @@ class BuildoutOnlineTestCase(Base):
+ )
+
+ @pytest.mark.slow_test
++ @pytest.mark.skip(reason="TODO this test should probably be fixed")
+ def test_run_buildout(self):
+ if salt.modules.virtualenv_mod.virtualenv_ver(self.ppy_st) >= (20, 0, 0):
+ self.skipTest(
+@@ -467,6 +468,7 @@ class BuildoutOnlineTestCase(Base):
+ self.assertTrue("Installing b" in out)
+
+ @pytest.mark.slow_test
++ @pytest.mark.skip(reason="TODO this test should probably be fixed")
+ def test_buildout(self):
+ if salt.modules.virtualenv_mod.virtualenv_ver(self.ppy_st) >= (20, 0, 0):
+ self.skipTest(
+diff --git a/tests/unit/netapi/rest_tornado/test_saltnado.py b/tests/unit/netapi/rest_tornado/test_saltnado.py
+index 7b63a65d4f3..c4758e700ab 100644
+--- a/tests/unit/netapi/rest_tornado/test_saltnado.py
++++ b/tests/unit/netapi/rest_tornado/test_saltnado.py
+@@ -647,7 +647,6 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ with patch.object(
+ self.handler.application.event_listener,
+ "get_event",
+- autospec=True,
+ side_effect=fancy_get_event,
+ ), patch.dict(
+ self.handler.application.opts,
+@@ -698,7 +697,6 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ with patch.object(
+ self.handler.application.event_listener,
+ "get_event",
+- autospec=True,
+ side_effect=fancy_get_event,
+ ), patch.object(
+ self.handler,
+@@ -729,8 +727,8 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ {
+ "tag": "fnord",
+ "data": {
+- "return": "return from fnord {}".format(i),
+- "id": "fnord {}".format(i),
++ "return": f"return from fnord {i}",
++ "id": f"fnord {i}",
+ },
+ }
+ )
+@@ -760,7 +758,6 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ with patch.object(
+ self.handler.application.event_listener,
+ "get_event",
+- autospec=True,
+ side_effect=fancy_get_event,
+ ), patch.object(
+ self.handler,
+@@ -794,8 +791,8 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ {
+ "tag": "fnord",
+ "data": {
+- "return": "return from fnord {}".format(i),
+- "id": "fnord {}".format(i),
++ "return": f"return from fnord {i}",
++ "id": f"fnord {i}",
+ },
+ }
+ )
+@@ -820,7 +817,6 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ with patch.object(
+ self.handler.application.event_listener,
+ "get_event",
+- autospec=True,
+ side_effect=fancy_get_event,
+ ), patch.dict(
+ self.handler.application.opts,
+@@ -843,12 +839,12 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ completed_events = [salt.ext.tornado.gen.Future() for _ in range(10)]
+ events_by_id = {}
+ for i, event in enumerate(completed_events):
+- id_ = "fnord {}".format(i)
++ id_ = f"fnord {i}"
+ events_by_id[id_] = event
+ event.set_result(
+ {
+ "tag": "fnord",
+- "data": {"return": "return from {}".format(id_), "id": id_},
++ "data": {"return": f"return from {id_}", "id": id_},
+ }
+ )
+ expected_result = {
+@@ -878,7 +874,6 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ with patch.object(
+ self.handler.application.event_listener,
+ "get_event",
+- autospec=True,
+ side_effect=fancy_get_event,
+ ), patch.dict(
+ self.handler.application.opts,
+@@ -904,12 +899,12 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ events_by_id = {}
+ # Setup some real-enough looking return data
+ for i, event in enumerate(completed_events):
+- id_ = "fnord {}".format(i)
++ id_ = f"fnord {i}"
+ events_by_id[id_] = event
+ event.set_result(
+ {
+ "tag": "fnord",
+- "data": {"return": "return from {}".format(id_), "id": id_},
++ "data": {"return": f"return from {id_}", "id": id_},
+ }
+ )
+ # Hard coded instead of dynamic to avoid potentially writing a test
+@@ -971,7 +966,6 @@ class TestDisbatchLocal(salt.ext.tornado.testing.AsyncTestCase):
+ with patch.object(
+ self.handler.application.event_listener,
+ "get_event",
+- autospec=True,
+ side_effect=fancy_get_event,
+ ), patch.object(
+ self.handler,
+diff --git a/tests/unit/states/test_boto_apigateway.py b/tests/unit/states/test_boto_apigateway.py
+index 51c85d6058a..1edde8d303c 100644
+--- a/tests/unit/states/test_boto_apigateway.py
++++ b/tests/unit/states/test_boto_apigateway.py
+@@ -20,6 +20,7 @@ from tests.support.unit import TestCase
+ from tests.unit.modules.test_boto_apigateway import BotoApiGatewayTestCaseMixin
+
+ try:
++ import boto
+ import boto3
+ import botocore
+ from botocore.exceptions import ClientError
+diff --git a/tests/unit/states/test_boto_cognitoidentity.py b/tests/unit/states/test_boto_cognitoidentity.py
+index 4354df0546f..479477ac800 100644
+--- a/tests/unit/states/test_boto_cognitoidentity.py
++++ b/tests/unit/states/test_boto_cognitoidentity.py
+@@ -18,6 +18,7 @@ from tests.unit.modules.test_boto_cognitoidentity import (
+ )
+
+ try:
++ import boto
+ import boto3
+ from botocore.exceptions import ClientError
+
+diff --git a/tests/unit/states/test_zcbuildout.py b/tests/unit/states/test_zcbuildout.py
+index db6013076d1..0abaadeb4be 100644
+--- a/tests/unit/states/test_zcbuildout.py
++++ b/tests/unit/states/test_zcbuildout.py
+@@ -48,6 +48,7 @@ class BuildoutTestCase(Base):
+ self.assertFalse(ret["result"])
+
+ @pytest.mark.slow_test
++ @pytest.mark.skip(reason="TODO this test should probably be fixed")
+ def test_installed(self):
+ if salt.modules.virtualenv_mod.virtualenv_ver(self.ppy_st) >= (20, 0, 0):
+ self.skipTest(
+--
+2.43.0
+
+
diff --git a/fix-regression-multiple-values-for-keyword-argument-.patch b/fix-regression-multiple-values-for-keyword-argument-.patch
new file mode 100644
index 0000000..c548387
--- /dev/null
+++ b/fix-regression-multiple-values-for-keyword-argument-.patch
@@ -0,0 +1,253 @@
+From c25c8081ded775f3574b0bc999d809ce14701ba5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Thu, 3 Aug 2023 10:07:28 +0100
+Subject: [PATCH] Fix regression: multiple values for keyword argument
+ 'saltenv' (bsc#1212844) (#590)
+
+* fix passing wrong keyword arguments to cp.cache_file in pkg.installed with sources
+
+* Drop `**kwargs` usage and be explicit about the supported keyword arguments.
+
+Signed-off-by: Pedro Algarvio
+
+* Add regression test for https://github.com/saltstack/salt/issues/64118
+
+Signed-off-by: Pedro Algarvio
+
+* Add changelog file
+
+Signed-off-by: Pedro Algarvio
+
+---------
+
+Signed-off-by: Pedro Algarvio
+Co-authored-by: Massimiliano Torromeo
+Co-authored-by: Pedro Algarvio
+---
+ changelog/64118.fixed.md | 1 +
+ salt/modules/win_pkg.py | 25 +++++++-----
+ salt/states/pkg.py | 4 +-
+ tests/pytests/unit/modules/test_win_pkg.py | 2 +-
+ tests/pytests/unit/states/test_pkg.py | 46 +++++++++++++++++++---
+ 5 files changed, 62 insertions(+), 16 deletions(-)
+ create mode 100644 changelog/64118.fixed.md
+
+diff --git a/changelog/64118.fixed.md b/changelog/64118.fixed.md
+new file mode 100644
+index 0000000000..e7251827e9
+--- /dev/null
++++ b/changelog/64118.fixed.md
+@@ -0,0 +1 @@
++Stop passing `**kwargs` and be explicit about the keyword arguments to pass, namely, to `cp.cache_file` call in `salt.states.pkg`
+diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py
+index 3aa7c7919a..e80dd19322 100644
+--- a/salt/modules/win_pkg.py
++++ b/salt/modules/win_pkg.py
+@@ -1298,7 +1298,7 @@ def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
+ successful_verbose[short_path_name] = []
+
+
+-def _get_source_sum(source_hash, file_path, saltenv, **kwargs):
++def _get_source_sum(source_hash, file_path, saltenv, verify_ssl=True):
+ """
+ Extract the hash sum, whether it is in a remote hash file, or just a string.
+ """
+@@ -1315,7 +1315,7 @@ def _get_source_sum(source_hash, file_path, saltenv, **kwargs):
+ # The source_hash is a file on a server
+ try:
+ cached_hash_file = __salt__["cp.cache_file"](
+- source_hash, saltenv, verify_ssl=kwargs.get("verify_ssl", True)
++ source_hash, saltenv=saltenv, verify_ssl=verify_ssl
+ )
+ except MinionError as exc:
+ log.exception("Failed to cache %s", source_hash, exc_info=exc)
+@@ -1671,7 +1671,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
+ try:
+ cached_file = __salt__["cp.cache_file"](
+ cache_file,
+- saltenv,
++ saltenv=saltenv,
+ verify_ssl=kwargs.get("verify_ssl", True),
+ )
+ except MinionError as exc:
+@@ -1686,7 +1686,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
+ try:
+ cached_file = __salt__["cp.cache_file"](
+ cache_file,
+- saltenv,
++ saltenv=saltenv,
+ verify_ssl=kwargs.get("verify_ssl", True),
+ )
+ except MinionError as exc:
+@@ -1706,7 +1706,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
+ # It's not cached. Cache it, mate.
+ try:
+ cached_pkg = __salt__["cp.cache_file"](
+- installer, saltenv, verify_ssl=kwargs.get("verify_ssl", True)
++ installer,
++ saltenv=saltenv,
++ verify_ssl=kwargs.get("verify_ssl", True),
+ )
+ except MinionError as exc:
+ msg = "Failed to cache {}".format(installer)
+@@ -1730,7 +1732,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
+ try:
+ cached_pkg = __salt__["cp.cache_file"](
+ installer,
+- saltenv,
++ saltenv=saltenv,
+ verify_ssl=kwargs.get("verify_ssl", True),
+ )
+ except MinionError as exc:
+@@ -1754,7 +1756,12 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
+ # Compare the hash sums
+ source_hash = pkginfo[version_num].get("source_hash", False)
+ if source_hash:
+- source_sum = _get_source_sum(source_hash, cached_pkg, saltenv, **kwargs)
++ source_sum = _get_source_sum(
++ source_hash,
++ cached_pkg,
++ saltenv=saltenv,
++ verify_ssl=kwargs.get("verify_ssl", True),
++ )
+ log.debug(
+ "pkg.install: Source %s hash: %s",
+ source_sum["hash_type"],
+@@ -2126,7 +2133,7 @@ def remove(name=None, pkgs=None, **kwargs):
+ try:
+ cached_pkg = __salt__["cp.cache_file"](
+ uninstaller,
+- saltenv,
++ saltenv=saltenv,
+ verify_ssl=kwargs.get("verify_ssl", True),
+ )
+ except MinionError as exc:
+@@ -2150,7 +2157,7 @@ def remove(name=None, pkgs=None, **kwargs):
+ try:
+ cached_pkg = __salt__["cp.cache_file"](
+ uninstaller,
+- saltenv,
++ saltenv=saltenv,
+ verify_ssl=kwargs.get("verify_ssl", True),
+ )
+ except MinionError as exc:
+diff --git a/salt/states/pkg.py b/salt/states/pkg.py
+index 12fbc87a1a..a605b23107 100644
+--- a/salt/states/pkg.py
++++ b/salt/states/pkg.py
+@@ -760,7 +760,9 @@ def _find_install_targets(
+ err = "Unable to cache {0}: {1}"
+ try:
+ cached_path = __salt__["cp.cache_file"](
+- version_string, saltenv=kwargs["saltenv"], **kwargs
++ version_string,
++ saltenv=kwargs["saltenv"],
++ verify_ssl=kwargs.get("verify_ssl", True),
+ )
+ except CommandExecutionError as exc:
+ problems.append(err.format(version_string, exc))
+diff --git a/tests/pytests/unit/modules/test_win_pkg.py b/tests/pytests/unit/modules/test_win_pkg.py
+index 76234fb77e..6d435f00a5 100644
+--- a/tests/pytests/unit/modules/test_win_pkg.py
++++ b/tests/pytests/unit/modules/test_win_pkg.py
+@@ -262,7 +262,7 @@ def test_pkg_install_verify_ssl_false():
+ result = win_pkg.install(name="nsis", version="3.02", verify_ssl=False)
+ mock_cp.assert_called_once_with(
+ "http://download.sourceforge.net/project/nsis/NSIS%203/3.02/nsis-3.02-setup.exe",
+- "base",
++ saltenv="base",
+ verify_ssl=False,
+ )
+ assert expected == result
+diff --git a/tests/pytests/unit/states/test_pkg.py b/tests/pytests/unit/states/test_pkg.py
+index b852f27b00..f58be11011 100644
+--- a/tests/pytests/unit/states/test_pkg.py
++++ b/tests/pytests/unit/states/test_pkg.py
+@@ -3,6 +3,7 @@ import logging
+ import pytest
+
+ import salt.modules.beacons as beaconmod
++import salt.modules.cp as cp
+ import salt.modules.pkg_resource as pkg_resource
+ import salt.modules.yumpkg as yumpkg
+ import salt.states.beacon as beaconstate
+@@ -15,19 +16,28 @@ log = logging.getLogger(__name__)
+
+
+ @pytest.fixture
+-def configure_loader_modules():
++def configure_loader_modules(minion_opts):
+ return {
++ cp: {
++ "__opts__": minion_opts,
++ },
+ pkg: {
+ "__env__": "base",
+ "__salt__": {},
+ "__grains__": {"os": "CentOS", "os_family": "RedHat"},
+- "__opts__": {"test": False, "cachedir": ""},
++ "__opts__": minion_opts,
+ "__instance_id__": "",
+ "__low__": {},
+ "__utils__": {"state.gen_tag": state_utils.gen_tag},
+ },
+- beaconstate: {"__salt__": {}, "__opts__": {}},
+- beaconmod: {"__salt__": {}, "__opts__": {}},
++ beaconstate: {
++ "__salt__": {},
++ "__opts__": minion_opts,
++ },
++ beaconmod: {
++ "__salt__": {},
++ "__opts__": minion_opts,
++ },
+ pkg_resource: {
+ "__salt__": {},
+ "__grains__": {"os": "CentOS", "os_family": "RedHat"},
+@@ -35,7 +45,7 @@ def configure_loader_modules():
+ yumpkg: {
+ "__salt__": {},
+ "__grains__": {"osarch": "x86_64", "osmajorrelease": 7},
+- "__opts__": {},
++ "__opts__": minion_opts,
+ },
+ }
+
+@@ -563,6 +573,32 @@ def test_installed_with_changes_test_true(list_pkgs):
+ assert ret["changes"] == expected
+
+
++def test_installed_with_sources(list_pkgs, tmp_path):
++ """
++ Test pkg.installed with passing `sources`
++ """
++
++ list_pkgs = MagicMock(return_value=list_pkgs)
++ pkg_source = tmp_path / "pkga-package-0.3.0.deb"
++
++ with patch.dict(
++ pkg.__salt__,
++ {
++ "cp.cache_file": cp.cache_file,
++ "pkg.list_pkgs": list_pkgs,
++ "pkg_resource.pack_sources": pkg_resource.pack_sources,
++ "lowpkg.bin_pkg_info": MagicMock(),
++ },
++ ), patch("salt.fileclient.get_file_client", return_value=MagicMock()):
++ try:
++ ret = pkg.installed("install-pkgd", sources=[{"pkga": str(pkg_source)}])
++ assert ret["result"] is False
++ except TypeError as exc:
++ if "got multiple values for keyword argument 'saltenv'" in str(exc):
++ pytest.fail(f"TypeError should have not been raised: {exc}")
++ raise exc from None
++
++
+ @pytest.mark.parametrize("action", ["removed", "purged"])
+ def test_removed_purged_with_changes_test_true(list_pkgs, action):
+ """
+--
+2.41.0
+
+
diff --git a/fix-regression-with-depending-client.ssh-on-psutil-b.patch b/fix-regression-with-depending-client.ssh-on-psutil-b.patch
new file mode 100644
index 0000000..3e465e1
--- /dev/null
+++ b/fix-regression-with-depending-client.ssh-on-psutil-b.patch
@@ -0,0 +1,53 @@
+From 42cfb51fa01e13fe043a62536ba37fd472bc2688 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Tue, 12 Apr 2022 10:08:17 +0300
+Subject: [PATCH] Fix regression with depending client.ssh on psutil
+ (bsc#1197533)
+
+---
+ salt/client/ssh/__init__.py | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
+index d5a679821e..b120e0002e 100644
+--- a/salt/client/ssh/__init__.py
++++ b/salt/client/ssh/__init__.py
+@@ -12,7 +12,6 @@ import hashlib
+ import logging
+ import multiprocessing
+ import os
+-import psutil
+ import queue
+ import re
+ import shlex
+@@ -420,6 +419,16 @@ class SSH(MultiprocessingStateMixin):
+ self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
+ return
+
++ def _pid_exists(self, pid):
++ """
++ Check if specified pid is alive
++ """
++ try:
++ os.kill(pid, 0)
++ except OSError:
++ return False
++ return True
++
+ def _update_roster(self, hostname=None, user=None):
+ """
+ Update default flat roster with the passed in information.
+@@ -639,7 +648,8 @@ class SSH(MultiprocessingStateMixin):
+ pid_running = (
+ False
+ if cached_session["pid"] == 0
+- else cached_session.get("running", False) or psutil.pid_exists(cached_session["pid"])
++ else cached_session.get("running", False)
++ or self._pid_exists(cached_session["pid"])
+ )
+ if (
+ pid_running and prev_session_running < self.max_pid_wait
+--
+2.39.2
+
+
diff --git a/fix-salt-ssh-opts-poisoning-bsc-1197637-3004-501.patch b/fix-salt-ssh-opts-poisoning-bsc-1197637-3004-501.patch
new file mode 100644
index 0000000..11c07c1
--- /dev/null
+++ b/fix-salt-ssh-opts-poisoning-bsc-1197637-3004-501.patch
@@ -0,0 +1,128 @@
+From 4dbd5534a39fbfaebad32a00d0e6c512d840b0fd Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Thu, 31 Mar 2022 13:39:57 +0300
+Subject: [PATCH] Fix salt-ssh opts poisoning (bsc#1197637) - 3004 (#501)
+
+* Fix salt-ssh opts poisoning
+
+* Pass proper __opts__ to roster modules
+
+* Remove redundant copy.deepcopy for opts from handle_routine
+---
+ salt/client/ssh/__init__.py | 17 ++++++++++-------
+ salt/loader/__init__.py | 7 ++++++-
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
+index e6837df4e5..a527c03de6 100644
+--- a/salt/client/ssh/__init__.py
++++ b/salt/client/ssh/__init__.py
+@@ -338,7 +338,7 @@ class SSH(MultiprocessingStateMixin):
+ self.session_flock_file = os.path.join(
+ self.opts["cachedir"], "salt-ssh.session.lock"
+ )
+- self.ssh_session_grace_time = int(self.opts.get("ssh_session_grace_time", 3))
++ self.ssh_session_grace_time = int(self.opts.get("ssh_session_grace_time", 1))
+
+ # __setstate__ and __getstate__ are only used on spawning platforms.
+ def __setstate__(self, state):
+@@ -571,7 +571,6 @@ class SSH(MultiprocessingStateMixin):
+ """
+ LOG_LOCK.release()
+ salt.loader.LOAD_LOCK.release()
+- opts = copy.deepcopy(opts)
+ single = Single(
+ opts,
+ opts["argv"],
+@@ -608,6 +607,7 @@ class SSH(MultiprocessingStateMixin):
+ Spin up the needed threads or processes and execute the subsequent
+ routines
+ """
++ opts = copy.deepcopy(self.opts)
+ que = multiprocessing.Queue()
+ running = {}
+ targets_queue = deque(self.targets.keys())
+@@ -618,7 +618,7 @@ class SSH(MultiprocessingStateMixin):
+ if not self.targets:
+ log.error("No matching targets found in roster.")
+ break
+- if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
++ if len(running) < opts.get("ssh_max_procs", 25) and not init:
+ if targets_queue:
+ host = targets_queue.popleft()
+ else:
+@@ -636,7 +636,7 @@ class SSH(MultiprocessingStateMixin):
+ pid_running = (
+ False
+ if cached_session["pid"] == 0
+- else psutil.pid_exists(cached_session["pid"])
++ else cached_session.get("running", False) or psutil.pid_exists(cached_session["pid"])
+ )
+ if (
+ pid_running and prev_session_running < self.max_pid_wait
+@@ -651,9 +651,10 @@ class SSH(MultiprocessingStateMixin):
+ "salt-ssh/session",
+ host,
+ {
+- "pid": 0,
++ "pid": os.getpid(),
+ "master_id": self.master_id,
+ "ts": time.time(),
++ "running": True,
+ },
+ )
+ for default in self.defaults:
+@@ -681,7 +682,7 @@ class SSH(MultiprocessingStateMixin):
+ continue
+ args = (
+ que,
+- self.opts,
++ opts,
+ host,
+ self.targets[host],
+ mine,
+@@ -717,6 +718,7 @@ class SSH(MultiprocessingStateMixin):
+ "pid": routine.pid,
+ "master_id": self.master_id,
+ "ts": time.time(),
++ "running": True,
+ },
+ )
+ continue
+@@ -768,12 +770,13 @@ class SSH(MultiprocessingStateMixin):
+ "pid": 0,
+ "master_id": self.master_id,
+ "ts": time.time(),
++ "running": False,
+ },
+ )
+ if len(rets) >= len(self.targets):
+ break
+ # Sleep when limit or all threads started
+- if len(running) >= self.opts.get("ssh_max_procs", 25) or len(
++ if len(running) >= opts.get("ssh_max_procs", 25) or len(
+ self.targets
+ ) >= len(running):
+ time.sleep(0.1)
+diff --git a/salt/loader/__init__.py b/salt/loader/__init__.py
+index 32f8a7702c..bbe4269839 100644
+--- a/salt/loader/__init__.py
++++ b/salt/loader/__init__.py
+@@ -757,7 +757,12 @@ def roster(opts, runner=None, utils=None, whitelist=None, loaded_base_name=None,
+ opts,
+ tag="roster",
+ whitelist=whitelist,
+- pack={"__runner__": runner, "__utils__": utils, "__context__": context},
++ pack={
++ "__runner__": runner,
++ "__utils__": utils,
++ "__context__": context,
++ "__opts__": opts,
++ },
+ extra_module_dirs=utils.module_dirs if utils else None,
+ loaded_base_name=loaded_base_name,
+ )
+--
+2.39.2
+
+
diff --git a/fix-salt-warnings-and-testuite-for-python-3.11-635.patch b/fix-salt-warnings-and-testuite-for-python-3.11-635.patch
new file mode 100644
index 0000000..0b449a7
--- /dev/null
+++ b/fix-salt-warnings-and-testuite-for-python-3.11-635.patch
@@ -0,0 +1,3860 @@
+From cdb7211920c9256942518fbcf3bd627a70a99855 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Mon, 18 Mar 2024 09:15:08 +0100
+Subject: [PATCH] Fix Salt warnings and testuite for Python 3.11 (#635)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* Switch to `FullArgSpec` since Py 3.11 no longer has `ArgSpec`, deprecated since Py 3.0
+
+Signed-off-by: Pedro Algarvio
+
+* Backport `locale.getdefaultlocale()` into Salt. It's getting removed in Py 3.13
+
+Signed-off-by: Pedro Algarvio
+
+* Stop using the deprecated `pipes` module
+
+Signed-off-by: Pedro Algarvio
+
+* Stop using the deprecated `cgi` module.
+
+Signed-off-by: Pedro Algarvio
+
+* Add `__getstate__` to blacklisted methods, present in Py 3.11
+
+Signed-off-by: Pedro Algarvio
+
+* Fix test_state test
+
+* Use proper keys since Python's base64 in Py3.11 is more picky
+
+```
+❯ artifacts/salt/bin/python3
+Python 3.10.11 (main, May 5 2023, 02:31:54) [GCC 11.2.0] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import base64
+>>> base64.b64decode("AAAAB3NzaC1kcQ9J5bYTEyZ==", validate=True)
+b'\x00\x00\x00\x07ssh-dq\x0fI\xe5\xb6\x13\x13&'
+```
+```
+$ artifacts/salt/bin/python3
+Python 3.11.3 (main, May 5 2023, 02:31:40) [GCC 11.2.0] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import base64
+>>> base64.b64decode("AAAAB3NzaC1kcQ9J5bYTEyZ==", validate=True)
+Traceback (most recent call last):
+ File "", line 1, in
+ File "/tmp/testing/artifacts/salt/lib/python3.11/base64.py", line 88, in b64decode
+ return binascii.a2b_base64(s, strict_mode=validate)
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+binascii.Error: Excess data after padding
+```
+
+Signed-off-by: Pedro Algarvio
+
+---------
+
+Signed-off-by: Pedro Algarvio
+Co-authored-by: Pedro Algarvio
+Co-authored-by: Marek Czernek
+---
+ salt/__init__.py | 43 ++-
+ salt/grains/core.py | 108 +++----
+ salt/modules/container_resource.py | 74 ++---
+ salt/modules/deb_postgres.py | 16 +-
+ salt/modules/dockermod.py | 178 +++++------
+ salt/modules/lxc.py | 50 +--
+ salt/modules/mac_keychain.py | 32 +-
+ salt/modules/macpackage.py | 45 +--
+ salt/modules/openstack_config.py | 41 +--
+ salt/modules/postgres.py | 116 +++----
+ salt/utils/cloud.py | 300 ++++++++----------
+ salt/utils/http.py | 48 ++-
+ salt/utils/jinja.py | 25 +-
+ salt/utils/locales.py | 39 ++-
+ tests/integration/states/test_ssh_auth.py | 50 ++-
+ .../pytests/unit/modules/state/test_state.py | 2 +-
+ tests/unit/states/test_module.py | 56 ++--
+ tests/unit/test_master.py | 8 +-
+ 18 files changed, 586 insertions(+), 645 deletions(-)
+
+diff --git a/salt/__init__.py b/salt/__init__.py
+index e06b8ad7127..b5fe3677c22 100644
+--- a/salt/__init__.py
++++ b/salt/__init__.py
+@@ -72,6 +72,44 @@ warnings.filterwarnings(
+ )
+
+
++def __getdefaultlocale(envvars=("LC_ALL", "LC_CTYPE", "LANG", "LANGUAGE")):
++ """
++ This function was backported from Py3.11 which started triggering a
++ deprecation warning about it's removal in 3.13.
++ """
++ import locale
++
++ try:
++ # check if it's supported by the _locale module
++ import _locale
++
++ code, encoding = _locale._getdefaultlocale()
++ except (ImportError, AttributeError):
++ pass
++ else:
++ # make sure the code/encoding values are valid
++ if sys.platform == "win32" and code and code[:2] == "0x":
++ # map windows language identifier to language name
++ code = locale.windows_locale.get(int(code, 0))
++ # ...add other platform-specific processing here, if
++ # necessary...
++ return code, encoding
++
++ # fall back on POSIX behaviour
++ import os
++
++ lookup = os.environ.get
++ for variable in envvars:
++ localename = lookup(variable, None)
++ if localename:
++ if variable == "LANGUAGE":
++ localename = localename.split(":")[0]
++ break
++ else:
++ localename = "C"
++ return locale._parse_localename(localename)
++
++
+ def __define_global_system_encoding_variable__():
+ import sys
+
+@@ -90,17 +128,14 @@ def __define_global_system_encoding_variable__():
+ # If the system is properly configured this should return a valid
+ # encoding. MS Windows has problems with this and reports the wrong
+ # encoding
+- import locale
+
+ try:
+- encoding = locale.getdefaultlocale()[-1]
++ encoding = __getdefaultlocale()[-1]
+ except ValueError:
+ # A bad locale setting was most likely found:
+ # https://github.com/saltstack/salt/issues/26063
+ pass
+
+- # This is now garbage collectable
+- del locale
+ if not encoding:
+ # This is most likely ascii which is not the best but we were
+ # unable to find a better encoding. If this fails, we fall all
+diff --git a/salt/grains/core.py b/salt/grains/core.py
+index 5c125563461..4454c303fed 100644
+--- a/salt/grains/core.py
++++ b/salt/grains/core.py
+@@ -11,7 +11,6 @@ as those returned here
+
+ import datetime
+ import hashlib
+-import locale
+ import logging
+ import os
+ import platform
+@@ -34,6 +33,7 @@ import salt.modules.smbios
+ import salt.utils.args
+ import salt.utils.dns
+ import salt.utils.files
++import salt.utils.locales
+ import salt.utils.network
+ import salt.utils.path
+ import salt.utils.pkg.rpm
+@@ -290,7 +290,7 @@ def _linux_gpu_data():
+
+ devs = []
+ try:
+- lspci_out = __salt__["cmd.run"]("{} -vmm".format(lspci))
++ lspci_out = __salt__["cmd.run"](f"{lspci} -vmm")
+
+ cur_dev = {}
+ error = False
+@@ -364,7 +364,7 @@ def _netbsd_gpu_data():
+ for line in pcictl_out.splitlines():
+ for vendor in known_vendors:
+ vendor_match = re.match(
+- r"[0-9:]+ ({}) (.+) \(VGA .+\)".format(vendor), line, re.IGNORECASE
++ rf"[0-9:]+ ({vendor}) (.+) \(VGA .+\)", line, re.IGNORECASE
+ )
+ if vendor_match:
+ gpus.append(
+@@ -426,18 +426,18 @@ def _bsd_cpudata(osdata):
+ if sysctl:
+ cmds.update(
+ {
+- "num_cpus": "{} -n hw.ncpu".format(sysctl),
+- "cpuarch": "{} -n hw.machine".format(sysctl),
+- "cpu_model": "{} -n hw.model".format(sysctl),
++ "num_cpus": f"{sysctl} -n hw.ncpu",
++ "cpuarch": f"{sysctl} -n hw.machine",
++ "cpu_model": f"{sysctl} -n hw.model",
+ }
+ )
+
+ if arch and osdata["kernel"] == "OpenBSD":
+- cmds["cpuarch"] = "{} -s".format(arch)
++ cmds["cpuarch"] = f"{arch} -s"
+
+ if osdata["kernel"] == "Darwin":
+- cmds["cpu_model"] = "{} -n machdep.cpu.brand_string".format(sysctl)
+- cmds["cpu_flags"] = "{} -n machdep.cpu.features".format(sysctl)
++ cmds["cpu_model"] = f"{sysctl} -n machdep.cpu.brand_string"
++ cmds["cpu_flags"] = f"{sysctl} -n machdep.cpu.features"
+
+ grains = {k: __salt__["cmd.run"](v) for k, v in cmds.items()}
+
+@@ -522,7 +522,7 @@ def _aix_cpudata():
+ grains = {}
+ cmd = salt.utils.path.which("prtconf")
+ if cmd:
+- data = __salt__["cmd.run"]("{}".format(cmd)) + os.linesep
++ data = __salt__["cmd.run"](f"{cmd}") + os.linesep
+ for dest, regstring in (
+ ("cpuarch", r"(?im)^\s*Processor\s+Type:\s+(\S+)"),
+ ("cpu_flags", r"(?im)^\s*Processor\s+Version:\s+(\S+)"),
+@@ -568,9 +568,9 @@ def _osx_memdata():
+
+ sysctl = salt.utils.path.which("sysctl")
+ if sysctl:
+- mem = __salt__["cmd.run"]("{} -n hw.memsize".format(sysctl))
++ mem = __salt__["cmd.run"](f"{sysctl} -n hw.memsize")
+ swap_total = (
+- __salt__["cmd.run"]("{} -n vm.swapusage".format(sysctl))
++ __salt__["cmd.run"](f"{sysctl} -n vm.swapusage")
+ .split()[2]
+ .replace(",", ".")
+ )
+@@ -595,20 +595,20 @@ def _bsd_memdata(osdata):
+
+ sysctl = salt.utils.path.which("sysctl")
+ if sysctl:
+- mem = __salt__["cmd.run"]("{} -n hw.physmem".format(sysctl))
++ mem = __salt__["cmd.run"](f"{sysctl} -n hw.physmem")
+ if osdata["kernel"] == "NetBSD" and mem.startswith("-"):
+- mem = __salt__["cmd.run"]("{} -n hw.physmem64".format(sysctl))
++ mem = __salt__["cmd.run"](f"{sysctl} -n hw.physmem64")
+ grains["mem_total"] = int(mem) // 1024 // 1024
+
+ if osdata["kernel"] in ["OpenBSD", "NetBSD"]:
+ swapctl = salt.utils.path.which("swapctl")
+- swap_data = __salt__["cmd.run"]("{} -sk".format(swapctl))
++ swap_data = __salt__["cmd.run"](f"{swapctl} -sk")
+ if swap_data == "no swap devices configured":
+ swap_total = 0
+ else:
+ swap_total = swap_data.split(" ")[1]
+ else:
+- swap_total = __salt__["cmd.run"]("{} -n vm.swap_total".format(sysctl))
++ swap_total = __salt__["cmd.run"](f"{sysctl} -n vm.swap_total")
+ grains["swap_total"] = int(swap_total) // 1024 // 1024
+ return grains
+
+@@ -626,7 +626,7 @@ def _sunos_memdata():
+ grains["mem_total"] = int(comps[2].strip())
+
+ swap_cmd = salt.utils.path.which("swap")
+- swap_data = __salt__["cmd.run"]("{} -s".format(swap_cmd)).split()
++ swap_data = __salt__["cmd.run"](f"{swap_cmd} -s").split()
+ try:
+ swap_avail = int(swap_data[-2][:-1])
+ swap_used = int(swap_data[-4][:-1])
+@@ -654,7 +654,7 @@ def _aix_memdata():
+
+ swap_cmd = salt.utils.path.which("swap")
+ if swap_cmd:
+- swap_data = __salt__["cmd.run"]("{} -s".format(swap_cmd)).split()
++ swap_data = __salt__["cmd.run"](f"{swap_cmd} -s").split()
+ try:
+ swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
+ except ValueError:
+@@ -707,7 +707,7 @@ def _aix_get_machine_id():
+ grains = {}
+ cmd = salt.utils.path.which("lsattr")
+ if cmd:
+- data = __salt__["cmd.run"]("{} -El sys0".format(cmd)) + os.linesep
++ data = __salt__["cmd.run"](f"{cmd} -El sys0") + os.linesep
+ uuid_regexes = [re.compile(r"(?im)^\s*os_uuid\s+(\S+)\s+(.*)")]
+ for regex in uuid_regexes:
+ res = regex.search(data)
+@@ -1018,7 +1018,7 @@ def _virtual(osdata):
+ subtype_cmd = "{} -c current get -H -o value {}-role".format(
+ command, role
+ )
+- ret = __salt__["cmd.run"]("{}".format(subtype_cmd))
++ ret = __salt__["cmd.run"](f"{subtype_cmd}")
+ if ret == "true":
+ roles.append(role)
+ if roles:
+@@ -1164,14 +1164,14 @@ def _virtual(osdata):
+ elif osdata["kernel"] == "FreeBSD":
+ kenv = salt.utils.path.which("kenv")
+ if kenv:
+- product = __salt__["cmd.run"]("{} smbios.system.product".format(kenv))
+- maker = __salt__["cmd.run"]("{} smbios.system.maker".format(kenv))
++ product = __salt__["cmd.run"](f"{kenv} smbios.system.product")
++ maker = __salt__["cmd.run"](f"{kenv} smbios.system.maker")
+ if product.startswith("VMware"):
+ grains["virtual"] = "VMware"
+ if product.startswith("VirtualBox"):
+ grains["virtual"] = "VirtualBox"
+ if maker.startswith("Xen"):
+- grains["virtual_subtype"] = "{} {}".format(maker, product)
++ grains["virtual_subtype"] = f"{maker} {product}"
+ grains["virtual"] = "xen"
+ if maker.startswith("Microsoft") and product.startswith("Virtual"):
+ grains["virtual"] = "VirtualPC"
+@@ -1182,9 +1182,9 @@ def _virtual(osdata):
+ if maker.startswith("Amazon EC2"):
+ grains["virtual"] = "Nitro"
+ if sysctl:
+- hv_vendor = __salt__["cmd.run"]("{} -n hw.hv_vendor".format(sysctl))
+- model = __salt__["cmd.run"]("{} -n hw.model".format(sysctl))
+- jail = __salt__["cmd.run"]("{} -n security.jail.jailed".format(sysctl))
++ hv_vendor = __salt__["cmd.run"](f"{sysctl} -n hw.hv_vendor")
++ model = __salt__["cmd.run"](f"{sysctl} -n hw.model")
++ jail = __salt__["cmd.run"](f"{sysctl} -n security.jail.jailed")
+ if "bhyve" in hv_vendor:
+ grains["virtual"] = "bhyve"
+ elif "QEMU Virtual CPU" in model:
+@@ -1200,22 +1200,19 @@ def _virtual(osdata):
+ elif osdata["kernel"] == "NetBSD":
+ if sysctl:
+ if "QEMU Virtual CPU" in __salt__["cmd.run"](
+- "{} -n machdep.cpu_brand".format(sysctl)
++ f"{sysctl} -n machdep.cpu_brand"
+ ):
+ grains["virtual"] = "kvm"
+ elif "invalid" not in __salt__["cmd.run"](
+- "{} -n machdep.xen.suspend".format(sysctl)
++ f"{sysctl} -n machdep.xen.suspend"
+ ):
+ grains["virtual"] = "Xen PV DomU"
+ elif "VMware" in __salt__["cmd.run"](
+- "{} -n machdep.dmi.system-vendor".format(sysctl)
++ f"{sysctl} -n machdep.dmi.system-vendor"
+ ):
+ grains["virtual"] = "VMware"
+ # NetBSD has Xen dom0 support
+- elif (
+- __salt__["cmd.run"]("{} -n machdep.idle-mechanism".format(sysctl))
+- == "xen"
+- ):
++ elif __salt__["cmd.run"](f"{sysctl} -n machdep.idle-mechanism") == "xen":
+ if os.path.isfile("/var/run/xenconsoled.pid"):
+ grains["virtual_subtype"] = "Xen Dom0"
+ elif osdata["kernel"] == "SunOS":
+@@ -1223,7 +1220,7 @@ def _virtual(osdata):
+ # check the zonename here as fallback
+ zonename = salt.utils.path.which("zonename")
+ if zonename:
+- zone = __salt__["cmd.run"]("{}".format(zonename))
++ zone = __salt__["cmd.run"](f"{zonename}")
+ if zone != "global":
+ grains["virtual"] = "zone"
+
+@@ -1252,7 +1249,7 @@ def _virtual(osdata):
+ r".*Product Name: ([^\r\n]*).*", output, flags=re.DOTALL
+ )
+ if product:
+- grains["virtual_subtype"] = "Amazon EC2 ({})".format(product[1])
++ grains["virtual_subtype"] = f"Amazon EC2 ({product[1]})"
+ elif re.match(r".*Version: [^\r\n]+\.amazon.*", output, flags=re.DOTALL):
+ grains["virtual_subtype"] = "Amazon EC2"
+
+@@ -1284,9 +1281,7 @@ def _virtual_hv(osdata):
+ try:
+ version = {}
+ for fn in ("major", "minor", "extra"):
+- with salt.utils.files.fopen(
+- "/sys/hypervisor/version/{}".format(fn), "r"
+- ) as fhr:
++ with salt.utils.files.fopen(f"/sys/hypervisor/version/{fn}", "r") as fhr:
+ version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
+ grains["virtual_hv_version"] = "{}.{}{}".format(
+ version["major"], version["minor"], version["extra"]
+@@ -1442,7 +1437,7 @@ def _windows_os_release_grain(caption, product_type):
+ # ie: R2
+ if re.match(r"^R\d+$", item):
+ release = item
+- os_release = "{}Server{}".format(version, release)
++ os_release = f"{version}Server{release}"
+ else:
+ for item in caption.split(" "):
+ # If it's a number, decimal number, Thin or Vista, then it's the
+@@ -1633,7 +1628,7 @@ def _linux_devicetree_platform_data():
+ try:
+ # /proc/device-tree should be used instead of /sys/firmware/devicetree/base
+ # see https://github.com/torvalds/linux/blob/v5.13/Documentation/ABI/testing/sysfs-firmware-ofw#L14
+- loc = "/proc/device-tree/{}".format(path)
++ loc = f"/proc/device-tree/{path}"
+ if os.path.isfile(loc):
+ with salt.utils.files.fopen(loc, mode="r") as f:
+ return f.read().rstrip("\x00") # all strings are null-terminated
+@@ -1872,18 +1867,13 @@ def _linux_bin_exists(binary):
+ """
+ for search_cmd in ("which", "type -ap"):
+ try:
+- return __salt__["cmd.retcode"]("{} {}".format(search_cmd, binary)) == 0
++ return __salt__["cmd.retcode"](f"{search_cmd} {binary}") == 0
+ except salt.exceptions.CommandExecutionError:
+ pass
+
+ try:
+ return (
+- len(
+- __salt__["cmd.run_all"]("whereis -b {}".format(binary))[
+- "stdout"
+- ].split()
+- )
+- > 1
++ len(__salt__["cmd.run_all"](f"whereis -b {binary}")["stdout"].split()) > 1
+ )
+ except salt.exceptions.CommandExecutionError:
+ return False
+@@ -1901,7 +1891,7 @@ def _parse_lsb_release():
+ pass
+ else:
+ # Adds lsb_distrib_{id,release,codename,description}
+- ret["lsb_{}".format(key.lower())] = value.rstrip()
++ ret[f"lsb_{key.lower()}"] = value.rstrip()
+ except OSError as exc:
+ log.trace("Failed to parse /etc/lsb-release: %s", exc)
+ return ret
+@@ -2634,7 +2624,7 @@ def os_data():
+ osbuild = __salt__["cmd.run"]("sw_vers -buildVersion")
+ grains["os"] = "MacOS"
+ grains["os_family"] = "MacOS"
+- grains["osfullname"] = "{} {}".format(osname, osrelease)
++ grains["osfullname"] = f"{osname} {osrelease}"
+ grains["osrelease"] = osrelease
+ grains["osbuild"] = osbuild
+ grains["init"] = "launchd"
+@@ -2708,7 +2698,7 @@ def locale_info():
+ (
+ grains["locale_info"]["defaultlanguage"],
+ grains["locale_info"]["defaultencoding"],
+- ) = locale.getdefaultlocale()
++ ) = salt.utils.locales.getdefaultlocale()
+ except Exception: # pylint: disable=broad-except
+ # locale.getdefaultlocale can ValueError!! Catch anything else it
+ # might do, per #2205
+@@ -3175,7 +3165,7 @@ def _hw_data(osdata):
+ "productname": "DeviceDesc",
+ }
+ for grain_name, cmd_key in hwdata.items():
+- result = __salt__["cmd.run_all"]("fw_printenv {}".format(cmd_key))
++ result = __salt__["cmd.run_all"](f"fw_printenv {cmd_key}")
+ if result["retcode"] == 0:
+ uboot_keyval = result["stdout"].split("=")
+ grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
+@@ -3195,7 +3185,7 @@ def _hw_data(osdata):
+ "uuid": "smbios.system.uuid",
+ }
+ for key, val in fbsd_hwdata.items():
+- value = __salt__["cmd.run"]("{} {}".format(kenv, val))
++ value = __salt__["cmd.run"](f"{kenv} {val}")
+ grains[key] = _clean_value(key, value)
+ elif osdata["kernel"] == "OpenBSD":
+ sysctl = salt.utils.path.which("sysctl")
+@@ -3207,7 +3197,7 @@ def _hw_data(osdata):
+ "uuid": "hw.uuid",
+ }
+ for key, oid in hwdata.items():
+- value = __salt__["cmd.run"]("{} -n {}".format(sysctl, oid))
++ value = __salt__["cmd.run"](f"{sysctl} -n {oid}")
+ if not value.endswith(" value is not available"):
+ grains[key] = _clean_value(key, value)
+ elif osdata["kernel"] == "NetBSD":
+@@ -3222,7 +3212,7 @@ def _hw_data(osdata):
+ "uuid": "machdep.dmi.system-uuid",
+ }
+ for key, oid in nbsd_hwdata.items():
+- result = __salt__["cmd.run_all"]("{} -n {}".format(sysctl, oid))
++ result = __salt__["cmd.run_all"](f"{sysctl} -n {oid}")
+ if result["retcode"] == 0:
+ grains[key] = _clean_value(key, result["stdout"])
+ elif osdata["kernel"] == "Darwin":
+@@ -3230,7 +3220,7 @@ def _hw_data(osdata):
+ sysctl = salt.utils.path.which("sysctl")
+ hwdata = {"productname": "hw.model"}
+ for key, oid in hwdata.items():
+- value = __salt__["cmd.run"]("{} -b {}".format(sysctl, oid))
++ value = __salt__["cmd.run"](f"{sysctl} -b {oid}")
+ if not value.endswith(" is invalid"):
+ grains[key] = _clean_value(key, value)
+ elif osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc"):
+@@ -3244,7 +3234,7 @@ def _hw_data(osdata):
+ ("/usr/sbin/virtinfo", "-a"),
+ ):
+ if salt.utils.path.which(cmd): # Also verifies that cmd is executable
+- data += __salt__["cmd.run"]("{} {}".format(cmd, args))
++ data += __salt__["cmd.run"](f"{cmd} {args}")
+ data += "\n"
+
+ sn_regexes = [
+@@ -3359,7 +3349,7 @@ def _hw_data(osdata):
+ elif osdata["kernel"] == "AIX":
+ cmd = salt.utils.path.which("prtconf")
+ if cmd:
+- data = __salt__["cmd.run"]("{}".format(cmd)) + os.linesep
++ data = __salt__["cmd.run"](f"{cmd}") + os.linesep
+ for dest, regstring in (
+ ("serialnumber", r"(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)"),
+ ("systemfirmware", r"(?im)^\s*Firmware\s+Version:\s+(.*)"),
+@@ -3480,14 +3470,14 @@ def default_gateway():
+ for line in out.splitlines():
+ if line.startswith("default"):
+ grains["ip_gw"] = True
+- grains["ip{}_gw".format(ip_version)] = True
++ grains[f"ip{ip_version}_gw"] = True
+ try:
+ via, gw_ip = line.split()[1:3]
+ except ValueError:
+ pass
+ else:
+ if via == "via":
+- grains["ip{}_gw".format(ip_version)] = gw_ip
++ grains[f"ip{ip_version}_gw"] = gw_ip
+ break
+ except Exception: # pylint: disable=broad-except
+ continue
+diff --git a/salt/modules/container_resource.py b/salt/modules/container_resource.py
+index a29cba2e468..ceec72a7b20 100644
+--- a/salt/modules/container_resource.py
++++ b/salt/modules/container_resource.py
+@@ -8,13 +8,11 @@ These functions are not designed to be called directly, but instead from the
+ :mod:`docker ` execution modules. They provide for
+ common logic to be re-used for common actions.
+ """
+-
+-
+ import copy
+ import functools
+ import logging
+ import os
+-import pipes
++import shlex
+ import time
+ import traceback
+
+@@ -68,14 +66,14 @@ def _nsenter(pid):
+ """
+ Return the nsenter command to attach to the named container
+ """
+- return "nsenter --target {} --mount --uts --ipc --net --pid".format(pid)
++ return f"nsenter --target {pid} --mount --uts --ipc --net --pid"
+
+
+ def _get_md5(name, path, run_func):
+ """
+ Get the MD5 checksum of a file from a container
+ """
+- output = run_func(name, "md5sum {}".format(pipes.quote(path)), ignore_retcode=True)[
++ output = run_func(name, f"md5sum {shlex.quote(path)}", ignore_retcode=True)[
+ "stdout"
+ ]
+ try:
+@@ -102,10 +100,10 @@ def cache_file(source):
+ if source.startswith("salt://"):
+ cached_source = __salt__["cp.cache_file"](source)
+ if not cached_source:
+- raise CommandExecutionError("Unable to cache {}".format(source))
++ raise CommandExecutionError(f"Unable to cache {source}")
+ return cached_source
+ except AttributeError:
+- raise SaltInvocationError("Invalid source file {}".format(source))
++ raise SaltInvocationError(f"Invalid source file {source}")
+ return source
+
+
+@@ -164,55 +162,47 @@ def run(
+ if exec_driver == "lxc-attach":
+ full_cmd = "lxc-attach "
+ if path:
+- full_cmd += "-P {} ".format(pipes.quote(path))
++ full_cmd += f"-P {shlex.quote(path)} "
+ if keep_env is not True:
+ full_cmd += "--clear-env "
+ if "PATH" not in to_keep:
+- full_cmd += "--set-var {} ".format(PATH)
++ full_cmd += f"--set-var {PATH} "
+ # --clear-env results in a very restrictive PATH
+ # (/bin:/usr/bin), use a good fallback.
+ full_cmd += " ".join(
+ [
+- "--set-var {}={}".format(x, pipes.quote(os.environ[x]))
++ f"--set-var {x}={shlex.quote(os.environ[x])}"
+ for x in to_keep
+ if x in os.environ
+ ]
+ )
+- full_cmd += " -n {} -- {}".format(pipes.quote(name), cmd)
++ full_cmd += f" -n {shlex.quote(name)} -- {cmd}"
+ elif exec_driver == "nsenter":
+- pid = __salt__["{}.pid".format(container_type)](name)
+- full_cmd = "nsenter --target {} --mount --uts --ipc --net --pid -- ".format(pid)
++ pid = __salt__[f"{container_type}.pid"](name)
++ full_cmd = f"nsenter --target {pid} --mount --uts --ipc --net --pid -- "
+ if keep_env is not True:
+ full_cmd += "env -i "
+ if "PATH" not in to_keep:
+- full_cmd += "{} ".format(PATH)
++ full_cmd += f"{PATH} "
+ full_cmd += " ".join(
+- [
+- "{}={}".format(x, pipes.quote(os.environ[x]))
+- for x in to_keep
+- if x in os.environ
+- ]
++ [f"{x}={shlex.quote(os.environ[x])}" for x in to_keep if x in os.environ]
+ )
+- full_cmd += " {}".format(cmd)
++ full_cmd += f" {cmd}"
+ elif exec_driver == "docker-exec":
+ # We're using docker exec on the CLI as opposed to via docker-py, since
+ # the Docker API doesn't return stdout and stderr separately.
+ full_cmd = "docker exec "
+ if stdin:
+ full_cmd += "-i "
+- full_cmd += "{} ".format(name)
++ full_cmd += f"{name} "
+ if keep_env is not True:
+ full_cmd += "env -i "
+ if "PATH" not in to_keep:
+- full_cmd += "{} ".format(PATH)
++ full_cmd += f"{PATH} "
+ full_cmd += " ".join(
+- [
+- "{}={}".format(x, pipes.quote(os.environ[x]))
+- for x in to_keep
+- if x in os.environ
+- ]
++ [f"{x}={shlex.quote(os.environ[x])}" for x in to_keep if x in os.environ]
+ )
+- full_cmd += " {}".format(cmd)
++ full_cmd += f" {cmd}"
+
+ if not use_vt:
+ ret = __salt__[cmd_func](
+@@ -299,13 +289,13 @@ def copy_to(
+ salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter
+ """
+ # Get the appropriate functions
+- state = __salt__["{}.state".format(container_type)]
++ state = __salt__[f"{container_type}.state"]
+
+ def run_all(*args, **akwargs):
+ akwargs = copy.deepcopy(akwargs)
+ if container_type in ["lxc"] and "path" not in akwargs:
+ akwargs["path"] = path
+- return __salt__["{}.run_all".format(container_type)](*args, **akwargs)
++ return __salt__[f"{container_type}.run_all"](*args, **akwargs)
+
+ state_kwargs = {}
+ cmd_kwargs = {"ignore_retcode": True}
+@@ -321,7 +311,7 @@ def copy_to(
+
+ c_state = _state(name)
+ if c_state != "running":
+- raise CommandExecutionError("Container '{}' is not running".format(name))
++ raise CommandExecutionError(f"Container '{name}' is not running")
+
+ local_file = cache_file(source)
+ source_dir, source_name = os.path.split(local_file)
+@@ -330,17 +320,14 @@ def copy_to(
+ if not os.path.isabs(local_file):
+ raise SaltInvocationError("Source path must be absolute")
+ elif not os.path.exists(local_file):
+- raise SaltInvocationError("Source file {} does not exist".format(local_file))
++ raise SaltInvocationError(f"Source file {local_file} does not exist")
+ elif not os.path.isfile(local_file):
+ raise SaltInvocationError("Source must be a regular file")
+
+ # Destination file sanity checks
+ if not os.path.isabs(dest):
+ raise SaltInvocationError("Destination path must be absolute")
+- if (
+- run_all(name, "test -d {}".format(pipes.quote(dest)), **cmd_kwargs)["retcode"]
+- == 0
+- ):
++ if run_all(name, f"test -d {shlex.quote(dest)}", **cmd_kwargs)["retcode"] == 0:
+ # Destination is a directory, full path to dest file will include the
+ # basename of the source file.
+ dest = os.path.join(dest, source_name)
+@@ -350,14 +337,12 @@ def copy_to(
+ # parent directory.
+ dest_dir, dest_name = os.path.split(dest)
+ if (
+- run_all(name, "test -d {}".format(pipes.quote(dest_dir)), **cmd_kwargs)[
+- "retcode"
+- ]
++ run_all(name, f"test -d {shlex.quote(dest_dir)}", **cmd_kwargs)["retcode"]
+ != 0
+ ):
+ if makedirs:
+ result = run_all(
+- name, "mkdir -p {}".format(pipes.quote(dest_dir)), **cmd_kwargs
++ name, f"mkdir -p {shlex.quote(dest_dir)}", **cmd_kwargs
+ )
+ if result["retcode"] != 0:
+ error = (
+@@ -375,10 +360,7 @@ def copy_to(
+ )
+ if (
+ not overwrite
+- and run_all(name, "test -e {}".format(pipes.quote(dest)), **cmd_kwargs)[
+- "retcode"
+- ]
+- == 0
++ and run_all(name, f"test -e {shlex.quote(dest)}", **cmd_kwargs)["retcode"] == 0
+ ):
+ raise CommandExecutionError(
+ "Destination path {} already exists. Use overwrite=True to "
+@@ -401,14 +383,14 @@ def copy_to(
+ if exec_driver == "lxc-attach":
+ lxcattach = "lxc-attach"
+ if path:
+- lxcattach += " -P {}".format(pipes.quote(path))
++ lxcattach += f" -P {shlex.quote(path)}"
+ copy_cmd = (
+ 'cat "{0}" | {4} --clear-env --set-var {1} -n {2} -- tee "{3}"'.format(
+ local_file, PATH, name, dest, lxcattach
+ )
+ )
+ elif exec_driver == "nsenter":
+- pid = __salt__["{}.pid".format(container_type)](name)
++ pid = __salt__[f"{container_type}.pid"](name)
+ copy_cmd = 'cat "{}" | {} env -i {} tee "{}"'.format(
+ local_file, _nsenter(pid), PATH, dest
+ )
+diff --git a/salt/modules/deb_postgres.py b/salt/modules/deb_postgres.py
+index 3ecd4a8ba49..d92859562d4 100644
+--- a/salt/modules/deb_postgres.py
++++ b/salt/modules/deb_postgres.py
+@@ -2,10 +2,8 @@
+ Module to provide Postgres compatibility to salt for debian family specific tools.
+
+ """
+-
+-
+ import logging
+-import pipes
++import shlex
+
+ import salt.utils.path
+
+@@ -76,7 +74,7 @@ def cluster_create(
+ cmd += ["--data-checksums"]
+ if wal_segsize:
+ cmd += ["--wal-segsize", wal_segsize]
+- cmdstr = " ".join([pipes.quote(c) for c in cmd])
++ cmdstr = " ".join([shlex.quote(c) for c in cmd])
+ ret = __salt__["cmd.run_all"](cmdstr, python_shell=False)
+ if ret.get("retcode", 0) != 0:
+ log.error("Error creating a Postgresql cluster %s/%s", version, name)
+@@ -97,7 +95,7 @@ def cluster_list(verbose=False):
+ salt '*' postgres.cluster_list verbose=True
+ """
+ cmd = [salt.utils.path.which("pg_lsclusters"), "--no-header"]
+- ret = __salt__["cmd.run_all"](" ".join([pipes.quote(c) for c in cmd]))
++ ret = __salt__["cmd.run_all"](" ".join([shlex.quote(c) for c in cmd]))
+ if ret.get("retcode", 0) != 0:
+ log.error("Error listing clusters")
+ cluster_dict = _parse_pg_lscluster(ret["stdout"])
+@@ -118,7 +116,7 @@ def cluster_exists(version, name="main"):
+
+ salt '*' postgres.cluster_exists '9.3' 'main'
+ """
+- return "{}/{}".format(version, name) in cluster_list()
++ return f"{version}/{name}" in cluster_list()
+
+
+ def cluster_remove(version, name="main", stop=False):
+@@ -141,13 +139,13 @@ def cluster_remove(version, name="main", stop=False):
+ if stop:
+ cmd += ["--stop"]
+ cmd += [str(version), name]
+- cmdstr = " ".join([pipes.quote(c) for c in cmd])
++ cmdstr = " ".join([shlex.quote(c) for c in cmd])
+ ret = __salt__["cmd.run_all"](cmdstr, python_shell=False)
+ # FIXME - return Boolean ?
+ if ret.get("retcode", 0) != 0:
+ log.error("Error removing a Postgresql cluster %s/%s", version, name)
+ else:
+- ret["changes"] = "Successfully removed cluster {}/{}".format(version, name)
++ ret["changes"] = f"Successfully removed cluster {version}/{name}"
+ return ret
+
+
+@@ -158,7 +156,7 @@ def _parse_pg_lscluster(output):
+ cluster_dict = {}
+ for line in output.splitlines():
+ version, name, port, status, user, datadir, log = line.split()
+- cluster_dict["{}/{}".format(version, name)] = {
++ cluster_dict[f"{version}/{name}"] = {
+ "port": int(port),
+ "status": status,
+ "user": user,
+diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
+index 69b722f0c95..331b6bb7482 100644
+--- a/salt/modules/dockermod.py
++++ b/salt/modules/dockermod.py
+@@ -206,8 +206,8 @@ import json
+ import logging
+ import os
+ import pathlib
+-import pipes
+ import re
++import shlex
+ import shutil
+ import string
+ import subprocess
+@@ -257,7 +257,6 @@ except ImportError:
+
+ HAS_NSENTER = bool(salt.utils.path.which("nsenter"))
+
+-# Set up logging
+ log = logging.getLogger(__name__)
+
+ # Don't shadow built-in's.
+@@ -397,7 +396,7 @@ def _get_client(timeout=NOTSET, **kwargs):
+ )
+ except Exception as exc: # pylint: disable=broad-except
+ raise CommandExecutionError(
+- "Docker machine {} failed: {}".format(docker_machine, exc)
++ f"Docker machine {docker_machine} failed: {exc}"
+ )
+ try:
+ # docker-py 2.0 renamed this client attribute
+@@ -497,7 +496,7 @@ def _change_state(name, action, expected, *args, **kwargs):
+ return {
+ "result": False,
+ "state": {"old": expected, "new": expected},
+- "comment": "Container '{}' already {}".format(name, expected),
++ "comment": f"Container '{name}' already {expected}",
+ }
+ _client_wrapper(action, name, *args, **kwargs)
+ _clear_context()
+@@ -535,9 +534,7 @@ def _get_md5(name, path):
+ """
+ Get the MD5 checksum of a file from a container
+ """
+- output = run_stdout(
+- name, "md5sum {}".format(pipes.quote(path)), ignore_retcode=True
+- )
++ output = run_stdout(name, f"md5sum {shlex.quote(path)}", ignore_retcode=True)
+ try:
+ return output.split()[0]
+ except IndexError:
+@@ -616,7 +613,7 @@ def _scrub_links(links, name):
+ if isinstance(links, list):
+ ret = []
+ for l in links:
+- ret.append(l.replace("/{}/".format(name), "/", 1))
++ ret.append(l.replace(f"/{name}/", "/", 1))
+ else:
+ ret = links
+
+@@ -639,11 +636,11 @@ def _size_fmt(num):
+ try:
+ num = int(num)
+ if num < 1024:
+- return "{} bytes".format(num)
++ return f"{num} bytes"
+ num /= 1024.0
+ for unit in ("KiB", "MiB", "GiB", "TiB", "PiB"):
+ if num < 1024.0:
+- return "{:3.1f} {}".format(num, unit)
++ return f"{num:3.1f} {unit}"
+ num /= 1024.0
+ except Exception: # pylint: disable=broad-except
+ log.error("Unable to format file size for '%s'", num)
+@@ -658,7 +655,7 @@ def _client_wrapper(attr, *args, **kwargs):
+ catch_api_errors = kwargs.pop("catch_api_errors", True)
+ func = getattr(__context__["docker.client"], attr, None)
+ if func is None or not hasattr(func, "__call__"):
+- raise SaltInvocationError("Invalid client action '{}'".format(attr))
++ raise SaltInvocationError(f"Invalid client action '{attr}'")
+ if attr in ("push", "pull"):
+ try:
+ # Refresh auth config from config.json
+@@ -678,7 +675,7 @@ def _client_wrapper(attr, *args, **kwargs):
+ if catch_api_errors:
+ # Generic handling of Docker API errors
+ raise CommandExecutionError(
+- "Error {}: {}".format(exc.response.status_code, exc.explanation)
++ f"Error {exc.response.status_code}: {exc.explanation}"
+ )
+ else:
+ # Allow API errors to be caught further up the stack
+@@ -693,9 +690,9 @@ def _client_wrapper(attr, *args, **kwargs):
+
+ # If we're here, it's because an exception was caught earlier, and the
+ # API command failed.
+- msg = "Unable to perform {}".format(attr)
++ msg = f"Unable to perform {attr}"
+ if err:
+- msg += ": {}".format(err)
++ msg += f": {err}"
+ raise CommandExecutionError(msg)
+
+
+@@ -722,7 +719,7 @@ def _import_status(data, item, repo_name, repo_tag):
+ return
+ elif all(x in string.hexdigits for x in status):
+ # Status is an image ID
+- data["Image"] = "{}:{}".format(repo_name, repo_tag)
++ data["Image"] = f"{repo_name}:{repo_tag}"
+ data["Id"] = status
+ except (AttributeError, TypeError):
+ pass
+@@ -881,7 +878,7 @@ def _get_create_kwargs(
+ ignore_collisions=False,
+ validate_ip_addrs=True,
+ client_args=None,
+- **kwargs
++ **kwargs,
+ ):
+ """
+ Take input kwargs and return a kwargs dict to pass to docker-py's
+@@ -899,7 +896,7 @@ def _get_create_kwargs(
+ skip_translate=skip_translate,
+ ignore_collisions=ignore_collisions,
+ validate_ip_addrs=validate_ip_addrs,
+- **__utils__["args.clean_kwargs"](**kwargs)
++ **__utils__["args.clean_kwargs"](**kwargs),
+ )
+
+ if networks:
+@@ -912,7 +909,7 @@ def _get_create_kwargs(
+ log.error(
+ "docker.create: Error getting client args: '%s'", exc, exc_info=True
+ )
+- raise CommandExecutionError("Failed to get client args: {}".format(exc))
++ raise CommandExecutionError(f"Failed to get client args: {exc}")
+
+ full_host_config = {}
+ host_kwargs = {}
+@@ -1473,15 +1470,15 @@ def login(*registries):
+ results = ret.setdefault("Results", {})
+ for registry in registries:
+ if registry not in registry_auth:
+- errors.append("No match found for registry '{}'".format(registry))
++ errors.append(f"No match found for registry '{registry}'")
+ continue
+ try:
+ username = registry_auth[registry]["username"]
+ password = registry_auth[registry]["password"]
+ except TypeError:
+- errors.append("Invalid configuration for registry '{}'".format(registry))
++ errors.append(f"Invalid configuration for registry '{registry}'")
+ except KeyError as exc:
+- errors.append("Missing {} for registry '{}'".format(exc, registry))
++ errors.append(f"Missing {exc} for registry '{registry}'")
+ else:
+ cmd = ["docker", "login", "-u", username, "-p", password]
+ if registry.lower() != "hub":
+@@ -1567,7 +1564,7 @@ def logout(*registries):
+ results = ret.setdefault("Results", {})
+ for registry in registries:
+ if registry not in registry_auth:
+- errors.append("No match found for registry '{}'".format(registry))
++ errors.append(f"No match found for registry '{registry}'")
+ continue
+ else:
+ cmd = ["docker", "logout"]
+@@ -1689,7 +1686,7 @@ def exists(name):
+
+ salt myminion docker.exists mycontainer
+ """
+- contextkey = "docker.exists.{}".format(name)
++ contextkey = f"docker.exists.{name}"
+ if contextkey in __context__:
+ return __context__[contextkey]
+ try:
+@@ -1780,7 +1777,7 @@ def history(name, quiet=False):
+ )
+ for param in ("Size",):
+ if param in step:
+- step["{}_Human".format(param)] = _size_fmt(step[param])
++ step[f"{param}_Human"] = _size_fmt(step[param])
+ ret.append(copy.deepcopy(step))
+ if quiet:
+ return [x.get("Command") for x in ret]
+@@ -1842,9 +1839,7 @@ def images(verbose=False, **kwargs):
+ )
+ for param in ("Size", "VirtualSize"):
+ if param in bucket.get(img_id, {}):
+- bucket[img_id]["{}_Human".format(param)] = _size_fmt(
+- bucket[img_id][param]
+- )
++ bucket[img_id][f"{param}_Human"] = _size_fmt(bucket[img_id][param])
+
+ context_data = __context__.get("docker.images", {})
+ ret = copy.deepcopy(context_data.get("tagged", {}))
+@@ -1927,7 +1922,7 @@ def inspect(name):
+ raise
+
+ raise CommandExecutionError(
+- "Error 404: No such image/container/volume/network: {}".format(name)
++ f"Error 404: No such image/container/volume/network: {name}"
+ )
+
+
+@@ -1983,7 +1978,7 @@ def inspect_image(name):
+ ret = _client_wrapper("inspect_image", name)
+ for param in ("Size", "VirtualSize"):
+ if param in ret:
+- ret["{}_Human".format(param)] = _size_fmt(ret[param])
++ ret[f"{param}_Human"] = _size_fmt(ret[param])
+ return ret
+
+
+@@ -2277,7 +2272,7 @@ def port(name, private_port=None):
+ else:
+ # Sanity checks
+ if isinstance(private_port, int):
+- pattern = "{}/*".format(private_port)
++ pattern = f"{private_port}/*"
+ else:
+ err = (
+ "Invalid private_port '{}'. Must either be a port number, "
+@@ -2398,7 +2393,7 @@ def state(name):
+
+ salt myminion docker.state mycontainer
+ """
+- contextkey = "docker.state.{}".format(name)
++ contextkey = f"docker.state.{name}"
+ if contextkey in __context__:
+ return __context__[contextkey]
+ __context__[contextkey] = _get_state(inspect_container(name))
+@@ -2438,9 +2433,7 @@ def search(name, official=False, trusted=False):
+ """
+ response = _client_wrapper("search", name)
+ if not response:
+- raise CommandExecutionError(
+- "No images matched the search string '{}'".format(name)
+- )
++ raise CommandExecutionError(f"No images matched the search string '{name}'")
+
+ key_map = {
+ "description": "Description",
+@@ -2555,7 +2548,7 @@ def create(
+ ignore_collisions=False,
+ validate_ip_addrs=True,
+ client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
+- **kwargs
++ **kwargs,
+ ):
+ """
+ Create a new container
+@@ -3281,7 +3274,7 @@ def create(
+ skip_translate=skip_translate,
+ ignore_collisions=ignore_collisions,
+ validate_ip_addrs=validate_ip_addrs,
+- **kwargs
++ **kwargs,
+ )
+
+ if unused_kwargs:
+@@ -3293,7 +3286,7 @@ def create(
+
+ log.debug(
+ "docker.create: creating container %susing the following arguments: %s",
+- "with name '{}' ".format(name) if name is not None else "",
++ f"with name '{name}' " if name is not None else "",
+ kwargs,
+ )
+ time_started = time.time()
+@@ -3331,7 +3324,7 @@ def run_container(
+ replace=False,
+ force=False,
+ networks=None,
+- **kwargs
++ **kwargs,
+ ):
+ """
+ .. versionadded:: 2018.3.0
+@@ -3433,7 +3426,7 @@ def run_container(
+ skip_translate=skip_translate,
+ ignore_collisions=ignore_collisions,
+ validate_ip_addrs=validate_ip_addrs,
+- **kwargs
++ **kwargs,
+ )
+
+ # _get_create_kwargs() will have processed auto_remove and put it into the
+@@ -3458,7 +3451,7 @@ def run_container(
+
+ log.debug(
+ "docker.create: creating container %susing the following arguments: %s",
+- "with name '{}' ".format(name) if name is not None else "",
++ f"with name '{name}' " if name is not None else "",
+ kwargs,
+ )
+
+@@ -3498,7 +3491,7 @@ def run_container(
+ rm_(name)
+ except CommandExecutionError as rm_exc:
+ exc_info.setdefault("other_errors", []).append(
+- "Failed to auto_remove container: {}".format(rm_exc)
++ f"Failed to auto_remove container: {rm_exc}"
+ )
+ # Raise original exception with additional info
+ raise CommandExecutionError(exc.__str__(), info=exc_info)
+@@ -3593,7 +3586,7 @@ def copy_from(name, source, dest, overwrite=False, makedirs=False):
+ """
+ c_state = state(name)
+ if c_state != "running":
+- raise CommandExecutionError("Container '{}' is not running".format(name))
++ raise CommandExecutionError(f"Container '{name}' is not running")
+
+ # Destination file sanity checks
+ if not os.path.isabs(dest):
+@@ -3619,9 +3612,7 @@ def copy_from(name, source, dest, overwrite=False, makedirs=False):
+ )
+ )
+ else:
+- raise SaltInvocationError(
+- "Directory {} does not exist".format(dest_dir)
+- )
++ raise SaltInvocationError(f"Directory {dest_dir} does not exist")
+ if not overwrite and os.path.exists(dest):
+ raise CommandExecutionError(
+ "Destination path {} already exists. Use overwrite=True to "
+@@ -3632,19 +3623,14 @@ def copy_from(name, source, dest, overwrite=False, makedirs=False):
+ if not os.path.isabs(source):
+ raise SaltInvocationError("Source path must be absolute")
+ else:
+- if (
+- retcode(name, "test -e {}".format(pipes.quote(source)), ignore_retcode=True)
+- == 0
+- ):
++ if retcode(name, f"test -e {shlex.quote(source)}", ignore_retcode=True) == 0:
+ if (
+- retcode(
+- name, "test -f {}".format(pipes.quote(source)), ignore_retcode=True
+- )
++ retcode(name, f"test -f {shlex.quote(source)}", ignore_retcode=True)
+ != 0
+ ):
+ raise SaltInvocationError("Source must be a regular file")
+ else:
+- raise SaltInvocationError("Source file {} does not exist".format(source))
++ raise SaltInvocationError(f"Source file {source} does not exist")
+
+ # Before we try to replace the file, compare checksums.
+ source_md5 = _get_md5(name, source)
+@@ -3657,7 +3643,7 @@ def copy_from(name, source, dest, overwrite=False, makedirs=False):
+ try:
+ src_path = ":".join((name, source))
+ except TypeError:
+- src_path = "{}:{}".format(name, source)
++ src_path = f"{name}:{source}"
+ cmd = ["docker", "cp", src_path, dest_dir]
+ __salt__["cmd.run"](cmd, python_shell=False)
+ return source_md5 == __salt__["file.get_sum"](dest, "md5")
+@@ -3784,7 +3770,7 @@ def export(name, path, overwrite=False, makedirs=False, compression=None, **kwar
+ salt myminion docker.export mycontainer /tmp/mycontainer.tar
+ salt myminion docker.export mycontainer /tmp/mycontainer.tar.xz push=True
+ """
+- err = "Path '{}' is not absolute".format(path)
++ err = f"Path '{path}' is not absolute"
+ try:
+ if not os.path.isabs(path):
+ raise SaltInvocationError(err)
+@@ -3792,7 +3778,7 @@ def export(name, path, overwrite=False, makedirs=False, compression=None, **kwar
+ raise SaltInvocationError(err)
+
+ if os.path.exists(path) and not overwrite:
+- raise CommandExecutionError("{} already exists".format(path))
++ raise CommandExecutionError(f"{path} already exists")
+
+ if compression is None:
+ if path.endswith(".tar.gz") or path.endswith(".tgz"):
+@@ -3815,7 +3801,7 @@ def export(name, path, overwrite=False, makedirs=False, compression=None, **kwar
+ compression = "xz"
+
+ if compression and compression not in ("gzip", "bzip2", "xz"):
+- raise SaltInvocationError("Invalid compression type '{}'".format(compression))
++ raise SaltInvocationError(f"Invalid compression type '{compression}'")
+
+ parent_dir = os.path.dirname(path)
+ if not os.path.isdir(parent_dir):
+@@ -3828,16 +3814,14 @@ def export(name, path, overwrite=False, makedirs=False, compression=None, **kwar
+ os.makedirs(parent_dir)
+ except OSError as exc:
+ raise CommandExecutionError(
+- "Unable to make parent dir {}: {}".format(parent_dir, exc)
++ f"Unable to make parent dir {parent_dir}: {exc}"
+ )
+
+ if compression == "gzip":
+ try:
+ out = gzip.open(path, "wb")
+ except OSError as exc:
+- raise CommandExecutionError(
+- "Unable to open {} for writing: {}".format(path, exc)
+- )
++ raise CommandExecutionError(f"Unable to open {path} for writing: {exc}")
+ elif compression == "bzip2":
+ compressor = bz2.BZ2Compressor()
+ elif compression == "xz":
+@@ -3875,9 +3859,7 @@ def export(name, path, overwrite=False, makedirs=False, compression=None, **kwar
+ os.remove(path)
+ except OSError:
+ pass
+- raise CommandExecutionError(
+- "Error occurred during container export: {}".format(exc)
+- )
++ raise CommandExecutionError(f"Error occurred during container export: {exc}")
+ finally:
+ out.close()
+ ret = {"Time_Elapsed": time.time() - time_started}
+@@ -4112,7 +4094,7 @@ def build(
+ # For the build function in the low-level API, the "tag" refers to the full
+ # tag (e.g. myuser/myimage:mytag). This is different than in other
+ # functions, where the repo and tag are passed separately.
+- image_tag = "{}:{}".format(repository, tag) if repository and tag else None
++ image_tag = f"{repository}:{tag}" if repository and tag else None
+
+ time_started = time.time()
+ response = _client_wrapper(
+@@ -4131,7 +4113,7 @@ def build(
+
+ if not response:
+ raise CommandExecutionError(
+- "Build failed for {}, no response returned from Docker API".format(path)
++ f"Build failed for {path}, no response returned from Docker API"
+ )
+
+ stream_data = []
+@@ -4168,7 +4150,7 @@ def build(
+ if "Id" not in ret:
+ # API returned information, but there was no confirmation of a
+ # successful build.
+- msg = "Build failed for {}".format(path)
++ msg = f"Build failed for {path}"
+ log.error(msg)
+ log.error(stream_data)
+ if errors:
+@@ -4179,7 +4161,7 @@ def build(
+ if resolved_tag:
+ ret["Image"] = resolved_tag
+ else:
+- ret["Warning"] = "Failed to tag image as {}".format(image_tag)
++ ret["Warning"] = f"Failed to tag image as {image_tag}"
+
+ if api_response:
+ ret["API_Response"] = stream_data
+@@ -4386,7 +4368,7 @@ def import_(source, repository, tag="latest", api_response=False):
+
+ if not response:
+ raise CommandExecutionError(
+- "Import failed for {}, no response returned from Docker API".format(source)
++ f"Import failed for {source}, no response returned from Docker API"
+ )
+ elif api_response:
+ ret["API_Response"] = response
+@@ -4406,7 +4388,7 @@ def import_(source, repository, tag="latest", api_response=False):
+ if "Id" not in ret:
+ # API returned information, but there was no confirmation of a
+ # successful push.
+- msg = "Import failed for {}".format(source)
++ msg = f"Import failed for {source}"
+ if errors:
+ msg += ". Error(s) follow:\n\n{}".format("\n\n".join(errors))
+ raise CommandExecutionError(msg)
+@@ -4481,7 +4463,7 @@ def load(path, repository=None, tag=None):
+
+ local_path = __salt__["container_resource.cache_file"](path)
+ if not os.path.isfile(local_path):
+- raise CommandExecutionError("Source file {} does not exist".format(path))
++ raise CommandExecutionError(f"Source file {path} does not exist")
+
+ pre = images(all=True)
+ cmd = ["docker", "load", "-i", local_path]
+@@ -4491,7 +4473,7 @@ def load(path, repository=None, tag=None):
+ _clear_context()
+ post = images(all=True)
+ if result["retcode"] != 0:
+- msg = "Failed to load image(s) from {}".format(path)
++ msg = f"Failed to load image(s) from {path}"
+ if result["stderr"]:
+ msg += ": {}".format(result["stderr"])
+ raise CommandExecutionError(msg)
+@@ -4512,7 +4494,7 @@ def load(path, repository=None, tag=None):
+ # strings when passed (e.g. a numeric tag would be loaded as an int
+ # or float), and because the tag_ function will stringify them if
+ # need be, a str.format is the correct thing to do here.
+- tagged_image = "{}:{}".format(repository, tag)
++ tagged_image = f"{repository}:{tag}"
+ try:
+ result = tag_(top_level_images[0], repository=repository, tag=tag)
+ ret["Image"] = tagged_image
+@@ -4549,7 +4531,7 @@ def layers(name):
+ ):
+ ret.append(line)
+ if not ret:
+- raise CommandExecutionError("Image '{}' not found".format(name))
++ raise CommandExecutionError(f"Image '{name}' not found")
+ return ret
+
+
+@@ -4620,7 +4602,7 @@ def pull(
+
+ if not response:
+ raise CommandExecutionError(
+- "Pull failed for {}, no response returned from Docker API".format(image)
++ f"Pull failed for {image}, no response returned from Docker API"
+ )
+ elif api_response:
+ ret["API_Response"] = response
+@@ -4633,7 +4615,7 @@ def pull(
+ event = salt.utils.json.loads(event)
+ except Exception as exc: # pylint: disable=broad-except
+ raise CommandExecutionError(
+- "Unable to interpret API event: '{}'".format(event),
++ f"Unable to interpret API event: '{event}'",
+ info={"Error": exc.__str__()},
+ )
+ try:
+@@ -4715,7 +4697,7 @@ def push(
+
+ if not response:
+ raise CommandExecutionError(
+- "Push failed for {}, no response returned from Docker API".format(image)
++ f"Push failed for {image}, no response returned from Docker API"
+ )
+ elif api_response:
+ ret["API_Response"] = response
+@@ -4727,7 +4709,7 @@ def push(
+ event = salt.utils.json.loads(event)
+ except Exception as exc: # pylint: disable=broad-except
+ raise CommandExecutionError(
+- "Unable to interpret API event: '{}'".format(event),
++ f"Unable to interpret API event: '{event}'",
+ info={"Error": exc.__str__()},
+ )
+ try:
+@@ -4807,9 +4789,7 @@ def rmi(*names, **kwargs):
+ err += "image(s): {}".format(", ".join(deps["Images"]))
+ errors.append(err)
+ else:
+- errors.append(
+- "Error {}: {}".format(exc.response.status_code, exc.explanation)
+- )
++ errors.append(f"Error {exc.response.status_code}: {exc.explanation}")
+
+ _clear_context()
+ ret = {
+@@ -4897,7 +4877,7 @@ def save(name, path, overwrite=False, makedirs=False, compression=None, **kwargs
+ salt myminion docker.save centos:7 /tmp/cent7.tar
+ salt myminion docker.save 0123456789ab cdef01234567 /tmp/saved.tar
+ """
+- err = "Path '{}' is not absolute".format(path)
++ err = f"Path '{path}' is not absolute"
+ try:
+ if not os.path.isabs(path):
+ raise SaltInvocationError(err)
+@@ -4905,7 +4885,7 @@ def save(name, path, overwrite=False, makedirs=False, compression=None, **kwargs
+ raise SaltInvocationError(err)
+
+ if os.path.exists(path) and not overwrite:
+- raise CommandExecutionError("{} already exists".format(path))
++ raise CommandExecutionError(f"{path} already exists")
+
+ if compression is None:
+ if path.endswith(".tar.gz") or path.endswith(".tgz"):
+@@ -4928,7 +4908,7 @@ def save(name, path, overwrite=False, makedirs=False, compression=None, **kwargs
+ compression = "xz"
+
+ if compression and compression not in ("gzip", "bzip2", "xz"):
+- raise SaltInvocationError("Invalid compression type '{}'".format(compression))
++ raise SaltInvocationError(f"Invalid compression type '{compression}'")
+
+ parent_dir = os.path.dirname(path)
+ if not os.path.isdir(parent_dir):
+@@ -4950,7 +4930,7 @@ def save(name, path, overwrite=False, makedirs=False, compression=None, **kwargs
+ time_started = time.time()
+ result = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if result["retcode"] != 0:
+- err = "Failed to save image(s) to {}".format(path)
++ err = f"Failed to save image(s) to {path}"
+ if result["stderr"]:
+ err += ": {}".format(result["stderr"])
+ raise CommandExecutionError(err)
+@@ -4960,9 +4940,7 @@ def save(name, path, overwrite=False, makedirs=False, compression=None, **kwargs
+ try:
+ out = gzip.open(path, "wb")
+ except OSError as exc:
+- raise CommandExecutionError(
+- "Unable to open {} for writing: {}".format(path, exc)
+- )
++ raise CommandExecutionError(f"Unable to open {path} for writing: {exc}")
+ elif compression == "bzip2":
+ compressor = bz2.BZ2Compressor()
+ elif compression == "xz":
+@@ -4998,9 +4976,7 @@ def save(name, path, overwrite=False, makedirs=False, compression=None, **kwargs
+ os.remove(path)
+ except OSError:
+ pass
+- raise CommandExecutionError(
+- "Error occurred during image save: {}".format(exc)
+- )
++ raise CommandExecutionError(f"Error occurred during image save: {exc}")
+ finally:
+ try:
+ # Clean up temp file
+@@ -5120,7 +5096,7 @@ def create_network(
+ ignore_collisions=False,
+ validate_ip_addrs=True,
+ client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
+- **kwargs
++ **kwargs,
+ ):
+ """
+ .. versionchanged:: 2018.3.0
+@@ -5360,7 +5336,7 @@ def create_network(
+ skip_translate=skip_translate,
+ ignore_collisions=ignore_collisions,
+ validate_ip_addrs=validate_ip_addrs,
+- **__utils__["args.clean_kwargs"](**kwargs)
++ **__utils__["args.clean_kwargs"](**kwargs),
+ )
+
+ if "ipam" not in kwargs:
+@@ -5692,7 +5668,7 @@ def pause(name):
+ return {
+ "result": False,
+ "state": {"old": orig_state, "new": orig_state},
+- "comment": "Container '{}' is stopped, cannot pause".format(name),
++ "comment": f"Container '{name}' is stopped, cannot pause",
+ }
+ return _change_state(name, "pause", "paused")
+
+@@ -5791,7 +5767,7 @@ def start_(name):
+ return {
+ "result": False,
+ "state": {"old": orig_state, "new": orig_state},
+- "comment": "Container '{}' is paused, cannot start".format(name),
++ "comment": f"Container '{name}' is paused, cannot start",
+ }
+
+ return _change_state(name, "start", "running")
+@@ -5896,7 +5872,7 @@ def unpause(name):
+ return {
+ "result": False,
+ "state": {"old": orig_state, "new": orig_state},
+- "comment": "Container '{}' is stopped, cannot unpause".format(name),
++ "comment": f"Container '{name}' is stopped, cannot unpause",
+ }
+ return _change_state(name, "unpause", "running")
+
+@@ -5945,7 +5921,7 @@ def wait(name, ignore_already_stopped=False, fail_on_exit_status=False):
+ # Container doesn't exist anymore
+ return {
+ "result": ignore_already_stopped,
+- "comment": "Container '{}' absent".format(name),
++ "comment": f"Container '{name}' absent",
+ }
+ already_stopped = pre == "stopped"
+ response = _client_wrapper("wait", name)
+@@ -5969,7 +5945,7 @@ def wait(name, ignore_already_stopped=False, fail_on_exit_status=False):
+ "exit_status": response,
+ }
+ if already_stopped:
+- result["comment"] = "Container '{}' already stopped".format(name)
++ result["comment"] = f"Container '{name}' already stopped"
+ if fail_on_exit_status and result["result"]:
+ result["result"] = result["exit_status"] == 0
+ return result
+@@ -5982,7 +5958,7 @@ def prune(
+ build=False,
+ volumes=False,
+ system=None,
+- **filters
++ **filters,
+ ):
+ """
+ .. versionadded:: 2019.2.0
+@@ -6668,7 +6644,7 @@ def script_retcode(
+
+
+ def _generate_tmp_path():
+- return os.path.join("/tmp", "salt.docker.{}".format(uuid.uuid4().hex[:6]))
++ return os.path.join("/tmp", f"salt.docker.{uuid.uuid4().hex[:6]}")
+
+
+ def _prepare_trans_tar(name, sls_opts, mods=None, pillar=None, extra_filerefs=""):
+@@ -6929,7 +6905,7 @@ def call(name, function, *args, **kwargs):
+ ]
+ + list(args)
+ + [
+- "{}={}".format(key, value)
++ f"{key}={value}"
+ for (key, value) in kwargs.items()
+ if not key.startswith("__")
+ ]
+diff --git a/salt/modules/lxc.py b/salt/modules/lxc.py
+index bea6445db98..d2c1e66491e 100644
+--- a/salt/modules/lxc.py
++++ b/salt/modules/lxc.py
+@@ -12,9 +12,9 @@ import datetime
+ import difflib
+ import logging
+ import os
+-import pipes
+ import random
+ import re
++import shlex
+ import shutil
+ import string
+ import tempfile
+@@ -1834,8 +1834,8 @@ def _after_ignition_network_profile(cmd, ret, name, network_profile, path, nic_o
+ # destroy the container if it was partially created
+ cmd = "lxc-destroy"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
+- cmd += " -n {}".format(name)
++ cmd += f" -P {shlex.quote(path)}"
++ cmd += f" -n {name}"
+ __salt__["cmd.retcode"](cmd, python_shell=False)
+ raise CommandExecutionError(
+ "Container could not be created with cmd '{}': {}".format(
+@@ -1997,7 +1997,7 @@ def create(
+ )
+ options["imgtar"] = img_tar
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
++ cmd += f" -P {shlex.quote(path)}"
+ if not os.path.exists(path):
+ os.makedirs(path)
+ if config:
+@@ -2138,7 +2138,7 @@ def clone(name, orig, profile=None, network_profile=None, nic_opts=None, **kwarg
+ cmd = "lxc-clone"
+ cmd += " {} -o {} -n {}".format(snapshot, orig, name)
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
++ cmd += f" -P {shlex.quote(path)}"
+ if not os.path.exists(path):
+ os.makedirs(path)
+ if backing:
+@@ -2186,7 +2186,7 @@ def ls_(active=None, cache=True, path=None):
+ ret = []
+ cmd = "lxc-ls"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
++ cmd += f" -P {shlex.quote(path)}"
+ if active:
+ cmd += " --active"
+ output = __salt__["cmd.run_stdout"](cmd, python_shell=False)
+@@ -2242,8 +2242,8 @@ def list_(extra=False, limit=None, path=None):
+ for container in ctnrs:
+ cmd = "lxc-info"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
+- cmd += " -n {}".format(container)
++ cmd += f" -P {shlex.quote(path)}"
++ cmd += f" -n {container}"
+ c_info = __salt__["cmd.run"](cmd, python_shell=False, output_loglevel="debug")
+ c_state = None
+ for line in c_info.splitlines():
+@@ -2301,13 +2301,13 @@ def _change_state(
+ # Kill the container first
+ scmd = "lxc-stop"
+ if path:
+- scmd += " -P {}".format(pipes.quote(path))
+- scmd += " -k -n {}".format(name)
++ scmd += f" -P {shlex.quote(path)}"
++ scmd += f" -k -n {name}"
+ __salt__["cmd.run"](scmd, python_shell=False)
+
+ if path and " -P " not in cmd:
+- cmd += " -P {}".format(pipes.quote(path))
+- cmd += " -n {}".format(name)
++ cmd += f" -P {shlex.quote(path)}"
++ cmd += f" -n {name}"
+
+ # certain lxc commands need to be taken with care (lxc-start)
+ # as te command itself mess with double forks; we must not
+@@ -2337,8 +2337,8 @@ def _change_state(
+ # some commands do not wait, so we will
+ rcmd = "lxc-wait"
+ if path:
+- rcmd += " -P {}".format(pipes.quote(path))
+- rcmd += " -n {} -s {}".format(name, expected.upper())
++ rcmd += f" -P {shlex.quote(path)}"
++ rcmd += f" -n {name} -s {expected.upper()}"
+ __salt__["cmd.run"](rcmd, python_shell=False, timeout=30)
+ _clear_context()
+ post = state(name, path=path)
+@@ -2459,7 +2459,7 @@ def start(name, **kwargs):
+ lxc_config = os.path.join(cpath, name, "config")
+ # we try to start, even without config, if global opts are there
+ if os.path.exists(lxc_config):
+- cmd += " -f {}".format(pipes.quote(lxc_config))
++ cmd += f" -f {shlex.quote(lxc_config)}"
+ cmd += " -d"
+ _ensure_exists(name, path=path)
+ if state(name, path=path) == "frozen":
+@@ -2564,7 +2564,7 @@ def freeze(name, **kwargs):
+ start(name, path=path)
+ cmd = "lxc-freeze"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
++ cmd += f" -P {shlex.quote(path)}"
+ ret = _change_state(cmd, name, "frozen", use_vt=use_vt, path=path)
+ if orig_state == "stopped" and start_:
+ ret["state"]["old"] = orig_state
+@@ -2599,7 +2599,7 @@ def unfreeze(name, path=None, use_vt=None):
+ raise CommandExecutionError("Container '{}' is stopped".format(name))
+ cmd = "lxc-unfreeze"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
++ cmd += f" -P {shlex.quote(path)}"
+ return _change_state(cmd, name, "running", path=path, use_vt=use_vt)
+
+
+@@ -2693,8 +2693,8 @@ def state(name, path=None):
+ else:
+ cmd = "lxc-info"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
+- cmd += " -n {}".format(name)
++ cmd += f" -P {shlex.quote(path)}"
++ cmd += f" -n {name}"
+ ret = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if ret["retcode"] != 0:
+ _clear_context()
+@@ -2731,8 +2731,8 @@ def get_parameter(name, parameter, path=None):
+ _ensure_exists(name, path=path)
+ cmd = "lxc-cgroup"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
+- cmd += " -n {} {}".format(name, parameter)
++ cmd += f" -P {shlex.quote(path)}"
++ cmd += f" -n {name} {parameter}"
+ ret = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if ret["retcode"] != 0:
+ raise CommandExecutionError(
+@@ -2762,8 +2762,8 @@ def set_parameter(name, parameter, value, path=None):
+
+ cmd = "lxc-cgroup"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
+- cmd += " -n {} {} {}".format(name, parameter, value)
++ cmd += f" -P {shlex.quote(path)}"
++ cmd += f" -n {name} {parameter} {value}"
+ ret = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if ret["retcode"] != 0:
+ return False
+@@ -3662,8 +3662,8 @@ def attachable(name, path=None):
+ log.debug("Checking if LXC container %s is attachable", name)
+ cmd = "lxc-attach"
+ if path:
+- cmd += " -P {}".format(pipes.quote(path))
+- cmd += " --clear-env -n {} -- /usr/bin/env".format(name)
++ cmd += f" -P {shlex.quote(path)}"
++ cmd += f" --clear-env -n {name} -- /usr/bin/env"
+ result = (
+ __salt__["cmd.retcode"](
+ cmd, python_shell=False, output_loglevel="quiet", ignore_retcode=True
+diff --git a/salt/modules/mac_keychain.py b/salt/modules/mac_keychain.py
+index a823c428b76..7fdc162b9aa 100644
+--- a/salt/modules/mac_keychain.py
++++ b/salt/modules/mac_keychain.py
+@@ -11,20 +11,6 @@ import shlex
+
+ import salt.utils.platform
+
+-try:
+- import pipes
+-
+- HAS_DEPS = True
+-except ImportError:
+- HAS_DEPS = False
+-
+-if hasattr(shlex, "quote"):
+- _quote = shlex.quote
+-elif HAS_DEPS and hasattr(pipes, "quote"):
+- _quote = pipes.quote
+-else:
+- _quote = None
+-
+ log = logging.getLogger(__name__)
+
+ __virtualname__ = "keychain"
+@@ -34,7 +20,7 @@ def __virtual__():
+ """
+ Only work on Mac OS
+ """
+- if salt.utils.platform.is_darwin() and _quote is not None:
++ if salt.utils.platform.is_darwin():
+ return __virtualname__
+ return (False, "Only available on Mac OS systems with pipes")
+
+@@ -82,7 +68,7 @@ def install(
+ if keychain_password is not None:
+ unlock_keychain(keychain, keychain_password)
+
+- cmd = "security import {} -P {} -k {}".format(cert, password, keychain)
++ cmd = f"security import {cert} -P {password} -k {keychain}"
+ if allow_any:
+ cmd += " -A"
+ return __salt__["cmd.run"](cmd)
+@@ -117,7 +103,7 @@ def uninstall(
+ if keychain_password is not None:
+ unlock_keychain(keychain, keychain_password)
+
+- cmd = 'security delete-certificate -c "{}" {}'.format(cert_name, keychain)
++ cmd = f'security delete-certificate -c "{cert_name}" {keychain}'
+ return __salt__["cmd.run"](cmd)
+
+
+@@ -137,7 +123,7 @@ def list_certs(keychain="/Library/Keychains/System.keychain"):
+ """
+ cmd = (
+ 'security find-certificate -a {} | grep -o "alis".*\\" | '
+- "grep -o '\\\"[-A-Za-z0-9.:() ]*\\\"'".format(_quote(keychain))
++ "grep -o '\\\"[-A-Za-z0-9.:() ]*\\\"'".format(shlex.quote(keychain))
+ )
+ out = __salt__["cmd.run"](cmd, python_shell=True)
+ return out.replace('"', "").split("\n")
+@@ -165,7 +151,7 @@ def get_friendly_name(cert, password):
+ """
+ cmd = (
+ "openssl pkcs12 -in {} -passin pass:{} -info -nodes -nokeys 2> /dev/null | "
+- "grep friendlyName:".format(_quote(cert), _quote(password))
++ "grep friendlyName:".format(shlex.quote(cert), shlex.quote(password))
+ )
+ out = __salt__["cmd.run"](cmd, python_shell=True)
+ return out.replace("friendlyName: ", "").strip()
+@@ -187,7 +173,7 @@ def get_default_keychain(user=None, domain="user"):
+
+ salt '*' keychain.get_default_keychain
+ """
+- cmd = "security default-keychain -d {}".format(domain)
++ cmd = f"security default-keychain -d {domain}"
+ return __salt__["cmd.run"](cmd, runas=user)
+
+
+@@ -210,7 +196,7 @@ def set_default_keychain(keychain, domain="user", user=None):
+
+ salt '*' keychain.set_keychain /Users/fred/Library/Keychains/login.keychain
+ """
+- cmd = "security default-keychain -d {} -s {}".format(domain, keychain)
++ cmd = f"security default-keychain -d {domain} -s {keychain}"
+ return __salt__["cmd.run"](cmd, runas=user)
+
+
+@@ -233,7 +219,7 @@ def unlock_keychain(keychain, password):
+
+ salt '*' keychain.unlock_keychain /tmp/test.p12 test123
+ """
+- cmd = "security unlock-keychain -p {} {}".format(password, keychain)
++ cmd = f"security unlock-keychain -p {password} {keychain}"
+ __salt__["cmd.run"](cmd)
+
+
+@@ -261,7 +247,7 @@ def get_hash(name, password=None):
+ name, password
+ )
+ else:
+- cmd = 'security find-certificate -c "{}" -m -p'.format(name)
++ cmd = f'security find-certificate -c "{name}" -m -p'
+
+ out = __salt__["cmd.run"](cmd)
+ matches = re.search(
+diff --git a/salt/modules/macpackage.py b/salt/modules/macpackage.py
+index faf5810d4fc..f9a6b7bb95c 100644
+--- a/salt/modules/macpackage.py
++++ b/salt/modules/macpackage.py
+@@ -9,31 +9,16 @@ import shlex
+
+ import salt.utils.platform
+
+-try:
+- import pipes
+-
+- HAS_DEPS = True
+-except ImportError:
+- HAS_DEPS = False
+-
+-
+ log = logging.getLogger(__name__)
+-__virtualname__ = "macpackage"
+-
+
+-if hasattr(shlex, "quote"):
+- _quote = shlex.quote
+-elif HAS_DEPS and hasattr(pipes, "quote"):
+- _quote = pipes.quote
+-else:
+- _quote = None
++__virtualname__ = "macpackage"
+
+
+ def __virtual__():
+ """
+ Only work on Mac OS
+ """
+- if salt.utils.platform.is_darwin() and _quote is not None:
++ if salt.utils.platform.is_darwin():
+ return __virtualname__
+ return (False, "Only available on Mac OS systems with pipes")
+
+@@ -60,11 +45,11 @@ def install(pkg, target="LocalSystem", store=False, allow_untrusted=False):
+ """
+ if "*." not in pkg:
+ # If we use wildcards, we cannot use quotes
+- pkg = _quote(pkg)
++ pkg = shlex.quote(pkg)
+
+- target = _quote(target)
++ target = shlex.quote(target)
+
+- cmd = "installer -pkg {} -target {}".format(pkg, target)
++ cmd = f"installer -pkg {pkg} -target {target}"
+ if store:
+ cmd += " -store"
+ if allow_untrusted:
+@@ -109,7 +94,7 @@ def install_app(app, target="/Applications/"):
+ if not app[-1] == "/":
+ app += "/"
+
+- cmd = 'rsync -a --delete "{}" "{}"'.format(app, target)
++ cmd = f'rsync -a --delete "{app}" "{target}"'
+ return __salt__["cmd.run"](cmd)
+
+
+@@ -154,7 +139,7 @@ def mount(dmg):
+
+ temp_dir = __salt__["temp.dir"](prefix="dmg-")
+
+- cmd = 'hdiutil attach -readonly -nobrowse -mountpoint {} "{}"'.format(temp_dir, dmg)
++ cmd = f'hdiutil attach -readonly -nobrowse -mountpoint {temp_dir} "{dmg}"'
+
+ return __salt__["cmd.run"](cmd), temp_dir
+
+@@ -176,7 +161,7 @@ def unmount(mountpoint):
+ salt '*' macpackage.unmount /dev/disk2
+ """
+
+- cmd = 'hdiutil detach "{}"'.format(mountpoint)
++ cmd = f'hdiutil detach "{mountpoint}"'
+
+ return __salt__["cmd.run"](cmd)
+
+@@ -216,7 +201,7 @@ def get_pkg_id(pkg):
+
+ salt '*' macpackage.get_pkg_id /tmp/test.pkg
+ """
+- pkg = _quote(pkg)
++ pkg = shlex.quote(pkg)
+ package_ids = []
+
+ # Create temp directory
+@@ -224,7 +209,7 @@ def get_pkg_id(pkg):
+
+ try:
+ # List all of the PackageInfo files
+- cmd = "xar -t -f {} | grep PackageInfo".format(pkg)
++ cmd = f"xar -t -f {pkg} | grep PackageInfo"
+ out = __salt__["cmd.run"](cmd, python_shell=True, output_loglevel="quiet")
+ files = out.split("\n")
+
+@@ -264,12 +249,12 @@ def get_mpkg_ids(mpkg):
+
+ salt '*' macpackage.get_mpkg_ids /dev/disk2
+ """
+- mpkg = _quote(mpkg)
++ mpkg = shlex.quote(mpkg)
+ package_infos = []
+ base_path = os.path.dirname(mpkg)
+
+ # List all of the .pkg files
+- cmd = "find {} -name *.pkg".format(base_path)
++ cmd = f"find {base_path} -name *.pkg"
+ out = __salt__["cmd.run"](cmd, python_shell=True)
+
+ pkg_files = out.split("\n")
+@@ -281,7 +266,7 @@ def get_mpkg_ids(mpkg):
+
+ def _get_pkg_id_from_pkginfo(pkginfo):
+ # Find our identifiers
+- pkginfo = _quote(pkginfo)
++ pkginfo = shlex.quote(pkginfo)
+ cmd = "cat {} | grep -Eo 'identifier=\"[a-zA-Z.0-9\\-]*\"' | cut -c 13- | tr -d '\"'".format(
+ pkginfo
+ )
+@@ -294,8 +279,8 @@ def _get_pkg_id_from_pkginfo(pkginfo):
+
+
+ def _get_pkg_id_dir(path):
+- path = _quote(os.path.join(path, "Contents/Info.plist"))
+- cmd = '/usr/libexec/PlistBuddy -c "print :CFBundleIdentifier" {}'.format(path)
++ path = shlex.quote(os.path.join(path, "Contents/Info.plist"))
++ cmd = f'/usr/libexec/PlistBuddy -c "print :CFBundleIdentifier" {path}'
+
+ # We can only use wildcards in python_shell which is
+ # sent by the macpackage state
+diff --git a/salt/modules/openstack_config.py b/salt/modules/openstack_config.py
+index 823afbf1c60..937c10da61a 100644
+--- a/salt/modules/openstack_config.py
++++ b/salt/modules/openstack_config.py
+@@ -13,28 +13,11 @@ import shlex
+ import salt.exceptions
+ import salt.utils.decorators.path
+
+-try:
+- import pipes
+-
+- HAS_DEPS = True
+-except ImportError:
+- HAS_DEPS = False
+-
+-if hasattr(shlex, "quote"):
+- _quote = shlex.quote
+-elif HAS_DEPS and hasattr(pipes, "quote"):
+- _quote = pipes.quote
+-else:
+- _quote = None
+-
+-
+ # Don't shadow built-in's.
+ __func_alias__ = {"set_": "set"}
+
+
+ def __virtual__():
+- if _quote is None and not HAS_DEPS:
+- return (False, "Missing dependencies")
+ return True
+
+
+@@ -69,10 +52,10 @@ def set_(filename, section, parameter, value):
+ salt-call openstack_config.set /etc/keystone/keystone.conf sql connection foo
+ """
+
+- filename = _quote(filename)
+- section = _quote(section)
+- parameter = _quote(parameter)
+- value = _quote(str(value))
++ filename = shlex.quote(filename)
++ section = shlex.quote(section)
++ parameter = shlex.quote(parameter)
++ value = shlex.quote(str(value))
+
+ result = __salt__["cmd.run_all"](
+ "openstack-config --set {} {} {} {}".format(
+@@ -109,12 +92,12 @@ def get(filename, section, parameter):
+
+ """
+
+- filename = _quote(filename)
+- section = _quote(section)
+- parameter = _quote(parameter)
++ filename = shlex.quote(filename)
++ section = shlex.quote(section)
++ parameter = shlex.quote(parameter)
+
+ result = __salt__["cmd.run_all"](
+- "openstack-config --get {} {} {}".format(filename, section, parameter),
++ f"openstack-config --get {filename} {section} {parameter}",
+ python_shell=False,
+ )
+
+@@ -145,12 +128,12 @@ def delete(filename, section, parameter):
+ salt-call openstack_config.delete /etc/keystone/keystone.conf sql connection
+ """
+
+- filename = _quote(filename)
+- section = _quote(section)
+- parameter = _quote(parameter)
++ filename = shlex.quote(filename)
++ section = shlex.quote(section)
++ parameter = shlex.quote(parameter)
+
+ result = __salt__["cmd.run_all"](
+- "openstack-config --del {} {} {}".format(filename, section, parameter),
++ f"openstack-config --del {filename} {section} {parameter}",
+ python_shell=False,
+ )
+
+diff --git a/salt/modules/postgres.py b/salt/modules/postgres.py
+index 25a72f1063c..f73959a92ed 100644
+--- a/salt/modules/postgres.py
++++ b/salt/modules/postgres.py
+@@ -46,8 +46,8 @@ import hmac
+ import io
+ import logging
+ import os
+-import pipes
+ import re
++import shlex
+ import tempfile
+
+ import salt.utils.files
+@@ -136,7 +136,7 @@ def __virtual__():
+ for util in utils:
+ if not salt.utils.path.which(util):
+ if not _find_pg_binary(util):
+- return (False, "{} was not found".format(util))
++ return (False, f"{util} was not found")
+ return True
+
+
+@@ -241,14 +241,14 @@ def _run_initdb(
+ raise CommandExecutionError("initdb executable not found.")
+ cmd = [
+ _INITDB_BIN,
+- "--pgdata={}".format(name),
+- "--username={}".format(user),
+- "--auth={}".format(auth),
+- "--encoding={}".format(encoding),
++ f"--pgdata={name}",
++ f"--username={user}",
++ f"--auth={auth}",
++ f"--encoding={encoding}",
+ ]
+
+ if locale is not None:
+- cmd.append("--locale={}".format(locale))
++ cmd.append(f"--locale={locale}")
+
+ # intentionally use short option, as the long option name has been
+ # renamed from "xlogdir" to "waldir" in PostgreSQL 10
+@@ -262,9 +262,9 @@ def _run_initdb(
+ if password is not None:
+ pgpassfile = salt.utils.files.mkstemp(text=True)
+ with salt.utils.files.fopen(pgpassfile, "w") as fp_:
+- fp_.write(salt.utils.stringutils.to_str("{}".format(password)))
++ fp_.write(salt.utils.stringutils.to_str(f"{password}"))
+ __salt__["file.chown"](pgpassfile, runas, "")
+- cmd.extend(["--pwfile={}".format(pgpassfile)])
++ cmd.extend([f"--pwfile={pgpassfile}"])
+
+ kwargs = dict(
+ runas=runas,
+@@ -273,7 +273,7 @@ def _run_initdb(
+ "postgres.timeout", default=_DEFAULT_COMMAND_TIMEOUT_SECS
+ ),
+ )
+- cmdstr = " ".join([pipes.quote(c) for c in cmd])
++ cmdstr = " ".join([shlex.quote(c) for c in cmd])
+ ret = __salt__["cmd.run_all"](cmdstr, python_shell=False, **kwargs)
+
+ if ret.get("retcode", 0) != 0:
+@@ -582,9 +582,7 @@ def _quote_ddl_value(value, quote="'"):
+ if value is None:
+ return None
+ if quote in value: # detect trivial sqli
+- raise SaltInvocationError(
+- "Unsupported character {} in value: {}".format(quote, value)
+- )
++ raise SaltInvocationError(f"Unsupported character {quote} in value: {value}")
+ return "{quote}{value}{quote}".format(quote=quote, value=value)
+
+
+@@ -617,7 +615,7 @@ def db_create(
+ """
+
+ # Base query to create a database
+- query = 'CREATE DATABASE "{}"'.format(name)
++ query = f'CREATE DATABASE "{name}"'
+
+ # "With"-options to create a database
+ with_args = salt.utils.odict.OrderedDict(
+@@ -685,11 +683,9 @@ def db_alter(
+ else:
+ queries = []
+ if owner:
+- queries.append('ALTER DATABASE "{}" OWNER TO "{}"'.format(name, owner))
++ queries.append(f'ALTER DATABASE "{name}" OWNER TO "{owner}"')
+ if tablespace:
+- queries.append(
+- 'ALTER DATABASE "{}" SET TABLESPACE "{}"'.format(name, tablespace)
+- )
++ queries.append(f'ALTER DATABASE "{name}" SET TABLESPACE "{tablespace}"')
+ for query in queries:
+ ret = _psql_prepare_and_run(
+ ["-c", query],
+@@ -726,10 +722,10 @@ def db_remove(
+ salt '*' postgres.db_remove 'dbname'
+ """
+ for query in [
+- 'REVOKE CONNECT ON DATABASE "{db}" FROM public;'.format(db=name),
++ f'REVOKE CONNECT ON DATABASE "{name}" FROM public;',
+ "SELECT pid, pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname ="
+ " '{db}' AND pid <> pg_backend_pid();".format(db=name),
+- 'DROP DATABASE "{db}";'.format(db=name),
++ f'DROP DATABASE "{name}";',
+ ]:
+ ret = _psql_prepare_and_run(
+ ["-c", query],
+@@ -741,7 +737,7 @@ def db_remove(
+ password=password,
+ )
+ if ret["retcode"] != 0:
+- raise Exception("Failed: ret={}".format(ret))
++ raise Exception(f"Failed: ret={ret}")
+ return True
+
+
+@@ -846,10 +842,10 @@ def tablespace_create(
+ owner_query = ""
+ options_query = ""
+ if owner:
+- owner_query = 'OWNER "{}"'.format(owner)
++ owner_query = f'OWNER "{owner}"'
+ # should come out looking like: 'OWNER postgres'
+ if options:
+- optionstext = ["{} = {}".format(k, v) for k, v in options.items()]
++ optionstext = [f"{k} = {v}" for k, v in options.items()]
+ options_query = "WITH ( {} )".format(", ".join(optionstext))
+ # should come out looking like: 'WITH ( opt1 = 1.0, opt2 = 4.0 )'
+ query = "CREATE TABLESPACE \"{}\" {} LOCATION '{}' {}".format(
+@@ -902,9 +898,9 @@ def tablespace_alter(
+ queries = []
+
+ if new_name:
+- queries.append('ALTER TABLESPACE "{}" RENAME TO "{}"'.format(name, new_name))
++ queries.append(f'ALTER TABLESPACE "{name}" RENAME TO "{new_name}"')
+ if new_owner:
+- queries.append('ALTER TABLESPACE "{}" OWNER TO "{}"'.format(name, new_owner))
++ queries.append(f'ALTER TABLESPACE "{name}" OWNER TO "{new_owner}"')
+ if set_option:
+ queries.append(
+ 'ALTER TABLESPACE "{}" SET ({} = {})'.format(
+@@ -912,7 +908,7 @@ def tablespace_alter(
+ )
+ )
+ if reset_option:
+- queries.append('ALTER TABLESPACE "{}" RESET ({})'.format(name, reset_option))
++ queries.append(f'ALTER TABLESPACE "{name}" RESET ({reset_option})')
+
+ for query in queries:
+ ret = _psql_prepare_and_run(
+@@ -950,7 +946,7 @@ def tablespace_remove(
+
+ .. versionadded:: 2015.8.0
+ """
+- query = 'DROP TABLESPACE "{}"'.format(name)
++ query = f'DROP TABLESPACE "{name}"'
+ ret = _psql_prepare_and_run(
+ ["-c", query],
+ user=user,
+@@ -1158,11 +1154,11 @@ def _add_role_flag(string, test, flag, cond=None, prefix="NO", addtxt="", skip=F
+ cond = test
+ if test is not None:
+ if cond:
+- string = "{} {}".format(string, flag)
++ string = f"{string} {flag}"
+ else:
+- string = "{0} {2}{1}".format(string, flag, prefix)
++ string = f"{string} {prefix}{flag}"
+ if addtxt:
+- string = "{} {}".format(string, addtxt)
++ string = f"{string} {addtxt}"
+ return string
+
+
+@@ -1224,7 +1220,7 @@ def _verify_password(role, password, verifier, method):
+ def _md5_password(role, password):
+ return "md5{}".format(
+ hashlib.md5( # nosec
+- salt.utils.stringutils.to_bytes("{}{}".format(password, role))
++ salt.utils.stringutils.to_bytes(f"{password}{role}")
+ ).hexdigest()
+ )
+
+@@ -1343,7 +1339,7 @@ def _role_cmd_args(
+ if isinstance(groups, list):
+ groups = ",".join(groups)
+ for group in groups.split(","):
+- sub_cmd = '{}; GRANT "{}" TO "{}"'.format(sub_cmd, group, name)
++ sub_cmd = f'{sub_cmd}; GRANT "{group}" TO "{name}"'
+ return sub_cmd
+
+
+@@ -1380,7 +1376,7 @@ def _role_create(
+ log.info("%s '%s' already exists", typ_.capitalize(), name)
+ return False
+
+- sub_cmd = 'CREATE ROLE "{}" WITH'.format(name)
++ sub_cmd = f'CREATE ROLE "{name}" WITH'
+ sub_cmd = "{} {}".format(
+ sub_cmd,
+ _role_cmd_args(
+@@ -1506,7 +1502,7 @@ def _role_update(
+ log.info("%s '%s' could not be found", typ_.capitalize(), name)
+ return False
+
+- sub_cmd = 'ALTER ROLE "{}" WITH'.format(name)
++ sub_cmd = f'ALTER ROLE "{name}" WITH'
+ sub_cmd = "{} {}".format(
+ sub_cmd,
+ _role_cmd_args(
+@@ -1613,7 +1609,7 @@ def _role_remove(
+ return False
+
+ # user exists, proceed
+- sub_cmd = 'DROP ROLE "{}"'.format(name)
++ sub_cmd = f'DROP ROLE "{name}"'
+ _psql_prepare_and_run(
+ ["-c", sub_cmd],
+ runas=runas,
+@@ -1995,14 +1991,14 @@ def create_extension(
+ args = ["CREATE EXTENSION"]
+ if if_not_exists:
+ args.append("IF NOT EXISTS")
+- args.append('"{}"'.format(name))
++ args.append(f'"{name}"')
+ sargs = []
+ if schema:
+- sargs.append('SCHEMA "{}"'.format(schema))
++ sargs.append(f'SCHEMA "{schema}"')
+ if ext_version:
+- sargs.append("VERSION {}".format(ext_version))
++ sargs.append(f"VERSION {ext_version}")
+ if from_version:
+- sargs.append("FROM {}".format(from_version))
++ sargs.append(f"FROM {from_version}")
+ if sargs:
+ args.append("WITH")
+ args.extend(sargs)
+@@ -2011,13 +2007,9 @@ def create_extension(
+ else:
+ args = []
+ if schema and _EXTENSION_TO_MOVE in mtdata:
+- args.append(
+- 'ALTER EXTENSION "{}" SET SCHEMA "{}";'.format(name, schema)
+- )
++ args.append(f'ALTER EXTENSION "{name}" SET SCHEMA "{schema}";')
+ if ext_version and _EXTENSION_TO_UPGRADE in mtdata:
+- args.append(
+- 'ALTER EXTENSION "{}" UPDATE TO {};'.format(name, ext_version)
+- )
++ args.append(f'ALTER EXTENSION "{name}" UPDATE TO {ext_version};')
+ cmd = " ".join(args).strip()
+ if cmd:
+ _psql_prepare_and_run(
+@@ -2227,7 +2219,7 @@ def owner_to(
+
+ sqlfile = tempfile.NamedTemporaryFile()
+ sqlfile.write("begin;\n")
+- sqlfile.write('alter database "{}" owner to "{}";\n'.format(dbname, ownername))
++ sqlfile.write(f'alter database "{dbname}" owner to "{ownername}";\n')
+
+ queries = (
+ # schemas
+@@ -2335,9 +2327,9 @@ def schema_create(
+ log.info("'%s' already exists in '%s'", name, dbname)
+ return False
+
+- sub_cmd = 'CREATE SCHEMA "{}"'.format(name)
++ sub_cmd = f'CREATE SCHEMA "{name}"'
+ if owner is not None:
+- sub_cmd = '{} AUTHORIZATION "{}"'.format(sub_cmd, owner)
++ sub_cmd = f'{sub_cmd} AUTHORIZATION "{owner}"'
+
+ ret = _psql_prepare_and_run(
+ ["-c", sub_cmd],
+@@ -2401,7 +2393,7 @@ def schema_remove(
+ return False
+
+ # schema exists, proceed
+- sub_cmd = 'DROP SCHEMA "{}"'.format(name)
++ sub_cmd = f'DROP SCHEMA "{name}"'
+ _psql_prepare_and_run(
+ ["-c", sub_cmd],
+ runas=user,
+@@ -2721,7 +2713,7 @@ def language_create(
+ log.info("Language %s already exists in %s", name, maintenance_db)
+ return False
+
+- query = "CREATE LANGUAGE {}".format(name)
++ query = f"CREATE LANGUAGE {name}"
+
+ ret = _psql_prepare_and_run(
+ ["-c", query],
+@@ -2776,7 +2768,7 @@ def language_remove(
+ log.info("Language %s does not exist in %s", name, maintenance_db)
+ return False
+
+- query = "DROP LANGUAGE {}".format(name)
++ query = f"DROP LANGUAGE {name}"
+
+ ret = _psql_prepare_and_run(
+ ["-c", query],
+@@ -3035,9 +3027,7 @@ def _validate_privileges(object_type, privs, privileges):
+ _perms.append("ALL")
+
+ if object_type not in _PRIVILEGES_OBJECTS:
+- raise SaltInvocationError(
+- "Invalid object_type: {} provided".format(object_type)
+- )
++ raise SaltInvocationError(f"Invalid object_type: {object_type} provided")
+
+ if not set(privs).issubset(set(_perms)):
+ raise SaltInvocationError(
+@@ -3145,9 +3135,7 @@ def privileges_list(
+ query = _make_privileges_list_query(name, object_type, prepend)
+
+ if object_type not in _PRIVILEGES_OBJECTS:
+- raise SaltInvocationError(
+- "Invalid object_type: {} provided".format(object_type)
+- )
++ raise SaltInvocationError(f"Invalid object_type: {object_type} provided")
+
+ rows = psql_query(
+ query,
+@@ -3439,15 +3427,15 @@ def privileges_grant(
+ _grants = ",".join(_privs)
+
+ if object_type in ["table", "sequence"]:
+- on_part = '{}."{}"'.format(prepend, object_name)
++ on_part = f'{prepend}."{object_name}"'
+ elif object_type == "function":
+- on_part = "{}".format(object_name)
++ on_part = f"{object_name}"
+ else:
+- on_part = '"{}"'.format(object_name)
++ on_part = f'"{object_name}"'
+
+ if grant_option:
+ if object_type == "group":
+- query = 'GRANT {} TO "{}" WITH ADMIN OPTION'.format(object_name, name)
++ query = f'GRANT {object_name} TO "{name}" WITH ADMIN OPTION'
+ elif object_type in ("table", "sequence") and object_name.upper() == "ALL":
+ query = 'GRANT {} ON ALL {}S IN SCHEMA {} TO "{}" WITH GRANT OPTION'.format(
+ _grants, object_type.upper(), prepend, name
+@@ -3458,7 +3446,7 @@ def privileges_grant(
+ )
+ else:
+ if object_type == "group":
+- query = 'GRANT {} TO "{}"'.format(object_name, name)
++ query = f'GRANT {object_name} TO "{name}"'
+ elif object_type in ("table", "sequence") and object_name.upper() == "ALL":
+ query = 'GRANT {} ON ALL {}S IN SCHEMA {} TO "{}"'.format(
+ _grants, object_type.upper(), prepend, name
+@@ -3587,12 +3575,12 @@ def privileges_revoke(
+ _grants = ",".join(_privs)
+
+ if object_type in ["table", "sequence"]:
+- on_part = "{}.{}".format(prepend, object_name)
++ on_part = f"{prepend}.{object_name}"
+ else:
+ on_part = object_name
+
+ if object_type == "group":
+- query = "REVOKE {} FROM {}".format(object_name, name)
++ query = f"REVOKE {object_name} FROM {name}"
+ else:
+ query = "REVOKE {} ON {} {} FROM {}".format(
+ _grants, object_type.upper(), on_part, name
+diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py
+index 9edf006c299..b7208dc4a64 100644
+--- a/salt/utils/cloud.py
++++ b/salt/utils/cloud.py
+@@ -10,8 +10,8 @@ import hashlib
+ import logging
+ import multiprocessing
+ import os
+-import pipes
+ import re
++import shlex
+ import shutil
+ import socket
+ import stat
+@@ -199,7 +199,7 @@ def __ssh_gateway_arguments(kwargs):
+ "-oUserKnownHostsFile=/dev/null",
+ "-oControlPath=none",
+ str(ssh_gateway_key),
+- "{}@{}".format(ssh_gateway_user, ssh_gateway),
++ f"{ssh_gateway_user}@{ssh_gateway}",
+ "-p",
+ str(ssh_gateway_port),
+ str(ssh_gateway_command),
+@@ -228,18 +228,18 @@ def os_script(os_, vm_=None, opts=None, minion=""):
+ # The user provided an absolute path to the deploy script, let's use it
+ return __render_script(os_, vm_, opts, minion)
+
+- if os.path.isabs("{}.sh".format(os_)):
++ if os.path.isabs(f"{os_}.sh"):
+ # The user provided an absolute path to the deploy script, although no
+ # extension was provided. Let's use it anyway.
+- return __render_script("{}.sh".format(os_), vm_, opts, minion)
++ return __render_script(f"{os_}.sh", vm_, opts, minion)
+
+ for search_path in opts["deploy_scripts_search_path"]:
+ if os.path.isfile(os.path.join(search_path, os_)):
+ return __render_script(os.path.join(search_path, os_), vm_, opts, minion)
+
+- if os.path.isfile(os.path.join(search_path, "{}.sh".format(os_))):
++ if os.path.isfile(os.path.join(search_path, f"{os_}.sh")):
+ return __render_script(
+- os.path.join(search_path, "{}.sh".format(os_)), vm_, opts, minion
++ os.path.join(search_path, f"{os_}.sh"), vm_, opts, minion
+ )
+ # No deploy script was found, return an empty string
+ return ""
+@@ -416,7 +416,7 @@ def bootstrap(vm_, opts=None):
+ )
+ if key_filename is not None and not os.path.isfile(key_filename):
+ raise SaltCloudConfigError(
+- "The defined ssh_keyfile '{}' does not exist".format(key_filename)
++ f"The defined ssh_keyfile '{key_filename}' does not exist"
+ )
+ has_ssh_agent = False
+ if (
+@@ -782,8 +782,8 @@ def wait_for_port(
+ # Don't add new hosts to the host key database
+ "-oStrictHostKeyChecking=no",
+ # make sure ssh can time out on connection lose
+- "-oServerAliveInterval={}".format(server_alive_interval),
+- "-oServerAliveCountMax={}".format(server_alive_count_max),
++ f"-oServerAliveInterval={server_alive_interval}",
++ f"-oServerAliveCountMax={server_alive_count_max}",
+ # Set hosts key database path to /dev/null, i.e., non-existing
+ "-oUserKnownHostsFile=/dev/null",
+ # Don't re-use the SSH connection. Less failures.
+@@ -808,21 +808,21 @@ def wait_for_port(
+ ]
+ )
+ # Netcat command testing remote port
+- command = "nc -z -w5 -q0 {} {}".format(host, port)
++ command = f"nc -z -w5 -q0 {host} {port}"
+ # SSH command
+ pcmd = "ssh {} {}@{} -p {} {}".format(
+ " ".join(ssh_args),
+ gateway["ssh_gateway_user"],
+ ssh_gateway,
+ ssh_gateway_port,
+- pipes.quote("date"),
++ shlex.quote("date"),
+ )
+ cmd = "ssh {} {}@{} -p {} {}".format(
+ " ".join(ssh_args),
+ gateway["ssh_gateway_user"],
+ ssh_gateway,
+ ssh_gateway_port,
+- pipes.quote(command),
++ shlex.quote(command),
+ )
+ log.debug("SSH command: '%s'", cmd)
+
+@@ -893,7 +893,7 @@ class Client:
+ service_name=None,
+ ):
+ self.service_name = service_name
+- self._exe_file = "{}.exe".format(self.service_name)
++ self._exe_file = f"{self.service_name}.exe"
+ self._client = PsExecClient(server, username, password, port, encrypt)
+ self._client._service = ScmrService(self.service_name, self._client.session)
+
+@@ -943,7 +943,7 @@ class Client:
+ # delete the PAExec executable
+ smb_tree = TreeConnect(
+ self._client.session,
+- r"\\{}\ADMIN$".format(self._client.connection.server_name),
++ rf"\\{self._client.connection.server_name}\ADMIN$",
+ )
+ log.info("Connecting to SMB Tree %s", smb_tree.share_name)
+ smb_tree.connect()
+@@ -968,10 +968,10 @@ def run_winexe_command(cmd, args, host, username, password, port=445):
+ """
+ Run a command remotely via the winexe executable
+ """
+- creds = "-U '{}%{}' //{}".format(username, password, host)
+- logging_creds = "-U '{}%XXX-REDACTED-XXX' //{}".format(username, host)
+- cmd = "winexe {} {} {}".format(creds, cmd, args)
+- logging_cmd = "winexe {} {} {}".format(logging_creds, cmd, args)
++ creds = f"-U '{username}%{password}' //{host}"
++ logging_creds = f"-U '{username}%XXX-REDACTED-XXX' //{host}"
++ cmd = f"winexe {creds} {cmd} {args}"
++ logging_cmd = f"winexe {logging_creds} {cmd} {args}"
+ return win_cmd(cmd, logging_command=logging_cmd)
+
+
+@@ -979,7 +979,7 @@ def run_psexec_command(cmd, args, host, username, password, port=445):
+ """
+ Run a command remotely using the psexec protocol
+ """
+- service_name = "PS-Exec-{}".format(uuid.uuid4())
++ service_name = f"PS-Exec-{uuid.uuid4()}"
+ with Client(
+ host, username, password, port=port, encrypt=False, service_name=service_name
+ ) as client:
+@@ -1098,7 +1098,7 @@ def validate_windows_cred_winexe(
+ """
+ Check if the windows credentials are valid
+ """
+- cmd = "winexe -U '{}%{}' //{} \"hostname\"".format(username, password, host)
++ cmd = f"winexe -U '{username}%{password}' //{host} \"hostname\""
+ logging_cmd = "winexe -U '{}%XXX-REDACTED-XXX' //{} \"hostname\"".format(
+ username, host
+ )
+@@ -1230,7 +1230,7 @@ def deploy_windows(
+ winrm_port=5986,
+ winrm_use_ssl=True,
+ winrm_verify_ssl=True,
+- **kwargs
++ **kwargs,
+ ):
+ """
+ Copy the install files to a remote Windows box, and execute them
+@@ -1289,20 +1289,20 @@ def deploy_windows(
+
+ salt.utils.smb.mkdirs("salttemp", conn=smb_conn)
+ root_dir = "ProgramData/Salt Project/Salt"
+- salt.utils.smb.mkdirs("{}/conf/pki/minion".format(root_dir), conn=smb_conn)
++ salt.utils.smb.mkdirs(f"{root_dir}/conf/pki/minion", conn=smb_conn)
+ root_dir = "ProgramData\\Salt Project\\Salt"
+
+ if minion_pub:
+ salt.utils.smb.put_str(
+ minion_pub,
+- "{}\\conf\\pki\\minion\\minion.pub".format(root_dir),
++ f"{root_dir}\\conf\\pki\\minion\\minion.pub",
+ conn=smb_conn,
+ )
+
+ if minion_pem:
+ salt.utils.smb.put_str(
+ minion_pem,
+- "{}\\conf\\pki\\minion\\minion.pem".format(root_dir),
++ f"{root_dir}\\conf\\pki\\minion\\minion.pem",
+ conn=smb_conn,
+ )
+
+@@ -1314,7 +1314,7 @@ def deploy_windows(
+ try:
+ salt.utils.smb.put_file(
+ master_sign_pub_file,
+- "{}\\conf\\pki\\minion\\master_sign.pub".format(root_dir),
++ f"{root_dir}\\conf\\pki\\minion\\master_sign.pub",
+ conn=smb_conn,
+ )
+ except Exception as e: # pylint: disable=broad-except
+@@ -1332,26 +1332,27 @@ def deploy_windows(
+ installer = comps[-1]
+ salt.utils.smb.put_file(
+ win_installer,
+- "salttemp\\{}".format(installer),
++ f"salttemp\\{installer}",
+ "C$",
+ conn=smb_conn,
+ )
+
++ cmd = f"c:\\salttemp\\{installer}"
++ args = [
++ "/S",
++ f"/master={_format_master_param(master)}",
++ f"/minion-name={name}",
++ ]
++
+ if use_winrm:
+- winrm_cmd(
+- winrm_session,
+- "c:\\salttemp\\{}".format(installer),
+- ["/S", "/master={}".format(master), "/minion-name={}".format(name)],
+- )
++ winrm_cmd(winrm_session, cmd, args)
+ else:
+- cmd = "c:\\salttemp\\{}".format(installer)
+- args = "/S /master={} /minion-name={}".format(master, name)
+ stdout, stderr, ret_code = run_psexec_command(
+- cmd, args, host, username, password
++ cmd, " ".join(args), host, username, password
+ )
+
+ if ret_code != 0:
+- raise Exception("Fail installer {}".format(ret_code))
++ raise Exception(f"Fail installer {ret_code}")
+
+ # Copy over minion_conf
+ if minion_conf:
+@@ -1367,7 +1368,7 @@ def deploy_windows(
+ if minion_grains:
+ salt.utils.smb.put_str(
+ salt_config_to_yaml(minion_grains, line_break="\r\n"),
+- "{}\\conf\\grains".format(root_dir),
++ f"{root_dir}\\conf\\grains",
+ conn=smb_conn,
+ )
+ # Add special windows minion configuration
+@@ -1384,7 +1385,7 @@ def deploy_windows(
+ minion_conf = dict(minion_conf, **windows_minion_conf)
+ salt.utils.smb.put_str(
+ salt_config_to_yaml(minion_conf, line_break="\r\n"),
+- "{}\\conf\\minion".format(root_dir),
++ f"{root_dir}\\conf\\minion",
+ conn=smb_conn,
+ )
+ # Delete C:\salttmp\ and installer file
+@@ -1394,7 +1395,7 @@ def deploy_windows(
+ winrm_cmd(winrm_session, "rmdir", ["/Q", "/S", "C:\\salttemp\\"])
+ else:
+ salt.utils.smb.delete_file(
+- "salttemp\\{}".format(installer), "C$", conn=smb_conn
++ f"salttemp\\{installer}", "C$", conn=smb_conn
+ )
+ salt.utils.smb.delete_directory("salttemp", "C$", conn=smb_conn)
+ # Shell out to psexec to ensure salt-minion service started
+@@ -1418,8 +1419,8 @@ def deploy_windows(
+ # Fire deploy action
+ fire_event(
+ "event",
+- "{} has been deployed at {}".format(name, host),
+- "salt/cloud/{}/deploy_windows".format(name),
++ f"{name} has been deployed at {host}",
++ f"salt/cloud/{name}/deploy_windows",
+ args={"name": name},
+ sock_dir=opts.get("sock_dir", os.path.join(__opts__["sock_dir"], "master")),
+ transport=opts.get("transport", "zeromq"),
+@@ -1469,7 +1470,7 @@ def deploy_script(
+ master_sign_pub_file=None,
+ cloud_grains=None,
+ force_minion_config=False,
+- **kwargs
++ **kwargs,
+ ):
+ """
+ Copy a deploy script to a remote server, execute it, and remove it
+@@ -1485,7 +1486,7 @@ def deploy_script(
+ )
+ if key_filename is not None and not os.path.isfile(key_filename):
+ raise SaltCloudConfigError(
+- "The defined key_filename '{}' does not exist".format(key_filename)
++ f"The defined key_filename '{key_filename}' does not exist"
+ )
+
+ gateway = None
+@@ -1532,35 +1533,28 @@ def deploy_script(
+ ssh_kwargs["password"] = password
+
+ if root_cmd(
+- "test -e '{}'".format(tmp_dir),
+- tty,
+- sudo,
+- allow_failure=True,
+- **ssh_kwargs
++ f"test -e '{tmp_dir}'", tty, sudo, allow_failure=True, **ssh_kwargs
+ ):
+ ret = root_cmd(
+- "sh -c \"( mkdir -p -m 700 '{}' )\"".format(tmp_dir),
++ f"sh -c \"( mkdir -p -m 700 '{tmp_dir}' )\"",
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ if ret:
+ raise SaltCloudSystemExit(
+- "Can't create temporary directory in {} !".format(tmp_dir)
++ f"Can't create temporary directory in {tmp_dir} !"
+ )
+ if sudo:
+ comps = tmp_dir.lstrip("/").rstrip("/").split("/")
+ if comps:
+ if len(comps) > 1 or comps[0] != "tmp":
+ ret = root_cmd(
+- 'chown {} "{}"'.format(username, tmp_dir),
+- tty,
+- sudo,
+- **ssh_kwargs
++ f'chown {username} "{tmp_dir}"', tty, sudo, **ssh_kwargs
+ )
+ if ret:
+ raise SaltCloudSystemExit(
+- "Cant set {} ownership on {}".format(username, tmp_dir)
++ f"Cant set {username} ownership on {tmp_dir}"
+ )
+
+ if not isinstance(file_map, dict):
+@@ -1590,15 +1584,13 @@ def deploy_script(
+ remote_dir = os.path.dirname(remote_file)
+
+ if remote_dir not in remote_dirs:
+- root_cmd(
+- "mkdir -p '{}'".format(remote_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"mkdir -p '{remote_dir}'", tty, sudo, **ssh_kwargs)
+ if ssh_kwargs["username"] != "root":
+ root_cmd(
+ "chown {} '{}'".format(ssh_kwargs["username"], remote_dir),
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ remote_dirs.append(remote_dir)
+ ssh_file(opts, remote_file, kwargs=ssh_kwargs, local_file=local_file)
+@@ -1606,21 +1598,21 @@ def deploy_script(
+
+ # Minion configuration
+ if minion_pem:
+- ssh_file(opts, "{}/minion.pem".format(tmp_dir), minion_pem, ssh_kwargs)
++ ssh_file(opts, f"{tmp_dir}/minion.pem", minion_pem, ssh_kwargs)
+ ret = root_cmd(
+- "chmod 600 '{}/minion.pem'".format(tmp_dir), tty, sudo, **ssh_kwargs
++ f"chmod 600 '{tmp_dir}/minion.pem'", tty, sudo, **ssh_kwargs
+ )
+ if ret:
+ raise SaltCloudSystemExit(
+- "Can't set perms on {}/minion.pem".format(tmp_dir)
++ f"Can't set perms on {tmp_dir}/minion.pem"
+ )
+ if minion_pub:
+- ssh_file(opts, "{}/minion.pub".format(tmp_dir), minion_pub, ssh_kwargs)
++ ssh_file(opts, f"{tmp_dir}/minion.pub", minion_pub, ssh_kwargs)
+
+ if master_sign_pub_file:
+ ssh_file(
+ opts,
+- "{}/master_sign.pub".format(tmp_dir),
++ f"{tmp_dir}/master_sign.pub",
+ kwargs=ssh_kwargs,
+ local_file=master_sign_pub_file,
+ )
+@@ -1638,7 +1630,7 @@ def deploy_script(
+ if minion_grains:
+ ssh_file(
+ opts,
+- "{}/grains".format(tmp_dir),
++ f"{tmp_dir}/grains",
+ salt_config_to_yaml(minion_grains),
+ ssh_kwargs,
+ )
+@@ -1646,24 +1638,22 @@ def deploy_script(
+ minion_conf["grains"] = {"salt-cloud": cloud_grains}
+ ssh_file(
+ opts,
+- "{}/minion".format(tmp_dir),
++ f"{tmp_dir}/minion",
+ salt_config_to_yaml(minion_conf),
+ ssh_kwargs,
+ )
+
+ # Master configuration
+ if master_pem:
+- ssh_file(opts, "{}/master.pem".format(tmp_dir), master_pem, ssh_kwargs)
++ ssh_file(opts, f"{tmp_dir}/master.pem", master_pem, ssh_kwargs)
+ ret = root_cmd(
+- "chmod 600 '{}/master.pem'".format(tmp_dir), tty, sudo, **ssh_kwargs
++ f"chmod 600 '{tmp_dir}/master.pem'", tty, sudo, **ssh_kwargs
+ )
+ if ret:
+- raise SaltCloudSystemExit(
+- "Cant set perms on {}/master.pem".format(tmp_dir)
+- )
++ raise SaltCloudSystemExit(f"Cant set perms on {tmp_dir}/master.pem")
+
+ if master_pub:
+- ssh_file(opts, "{}/master.pub".format(tmp_dir), master_pub, ssh_kwargs)
++ ssh_file(opts, f"{tmp_dir}/master.pub", master_pub, ssh_kwargs)
+
+ if master_conf:
+ if not isinstance(master_conf, dict):
+@@ -1677,34 +1667,31 @@ def deploy_script(
+
+ ssh_file(
+ opts,
+- "{}/master".format(tmp_dir),
++ f"{tmp_dir}/master",
+ salt_config_to_yaml(master_conf),
+ ssh_kwargs,
+ )
+
+ # XXX: We need to make these paths configurable
+- preseed_minion_keys_tempdir = "{}/preseed-minion-keys".format(tmp_dir)
++ preseed_minion_keys_tempdir = f"{tmp_dir}/preseed-minion-keys"
+ if preseed_minion_keys is not None:
+ # Create remote temp dir
+ ret = root_cmd(
+- "mkdir '{}'".format(preseed_minion_keys_tempdir),
+- tty,
+- sudo,
+- **ssh_kwargs
++ f"mkdir '{preseed_minion_keys_tempdir}'", tty, sudo, **ssh_kwargs
+ )
+ if ret:
+ raise SaltCloudSystemExit(
+- "Cant create {}".format(preseed_minion_keys_tempdir)
++ f"Cant create {preseed_minion_keys_tempdir}"
+ )
+ ret = root_cmd(
+- "chmod 700 '{}'".format(preseed_minion_keys_tempdir),
++ f"chmod 700 '{preseed_minion_keys_tempdir}'",
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ if ret:
+ raise SaltCloudSystemExit(
+- "Can't set perms on {}".format(preseed_minion_keys_tempdir)
++ f"Can't set perms on {preseed_minion_keys_tempdir}"
+ )
+ if ssh_kwargs["username"] != "root":
+ root_cmd(
+@@ -1713,7 +1700,7 @@ def deploy_script(
+ ),
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+
+ # Copy pre-seed minion keys
+@@ -1723,10 +1710,10 @@ def deploy_script(
+
+ if ssh_kwargs["username"] != "root":
+ root_cmd(
+- "chown -R root '{}'".format(preseed_minion_keys_tempdir),
++ f"chown -R root '{preseed_minion_keys_tempdir}'",
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ if ret:
+ raise SaltCloudSystemExit(
+@@ -1740,25 +1727,21 @@ def deploy_script(
+ for command in preflight_cmds:
+ cmd_ret = root_cmd(command, tty, sudo, **ssh_kwargs)
+ if cmd_ret:
+- raise SaltCloudSystemExit(
+- "Pre-flight command failed: '{}'".format(command)
+- )
++ raise SaltCloudSystemExit(f"Pre-flight command failed: '{command}'")
+
+ # The actual deploy script
+ if script:
+ # got strange escaping issues with sudoer, going onto a
+ # subshell fixes that
+- ssh_file(opts, "{}/deploy.sh".format(tmp_dir), script, ssh_kwargs)
++ ssh_file(opts, f"{tmp_dir}/deploy.sh", script, ssh_kwargs)
+ ret = root_cmd(
+- "sh -c \"( chmod +x '{}/deploy.sh' )\";exit $?".format(tmp_dir),
++ f"sh -c \"( chmod +x '{tmp_dir}/deploy.sh' )\";exit $?",
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ if ret:
+- raise SaltCloudSystemExit(
+- "Can't set perms on {}/deploy.sh".format(tmp_dir)
+- )
++ raise SaltCloudSystemExit(f"Can't set perms on {tmp_dir}/deploy.sh")
+
+ time_used = time.mktime(time.localtime()) - time.mktime(starttime)
+ newtimeout = timeout - time_used
+@@ -1774,7 +1757,7 @@ def deploy_script(
+ kwargs=dict(
+ name=name, sock_dir=sock_dir, timeout=newtimeout, queue=queue
+ ),
+- name="DeployScriptCheckAuth({})".format(name),
++ name=f"DeployScriptCheckAuth({name})",
+ )
+ log.debug("Starting new process to wait for salt-minion")
+ process.start()
+@@ -1782,7 +1765,7 @@ def deploy_script(
+ # Run the deploy script
+ if script:
+ if "bootstrap-salt" in script:
+- deploy_command += " -c '{}'".format(tmp_dir)
++ deploy_command += f" -c '{tmp_dir}'"
+ if force_minion_config:
+ deploy_command += " -F"
+ if make_syndic is True:
+@@ -1794,9 +1777,9 @@ def deploy_script(
+ if keep_tmp is True:
+ deploy_command += " -K"
+ if preseed_minion_keys is not None:
+- deploy_command += " -k '{}'".format(preseed_minion_keys_tempdir)
++ deploy_command += f" -k '{preseed_minion_keys_tempdir}'"
+ if script_args:
+- deploy_command += " {}".format(script_args)
++ deploy_command += f" {script_args}"
+
+ if script_env:
+ if not isinstance(script_env, dict):
+@@ -1815,15 +1798,15 @@ def deploy_script(
+ # Upload our environ setter wrapper
+ ssh_file(
+ opts,
+- "{}/environ-deploy-wrapper.sh".format(tmp_dir),
++ f"{tmp_dir}/environ-deploy-wrapper.sh",
+ "\n".join(environ_script_contents),
+ ssh_kwargs,
+ )
+ root_cmd(
+- "chmod +x '{}/environ-deploy-wrapper.sh'".format(tmp_dir),
++ f"chmod +x '{tmp_dir}/environ-deploy-wrapper.sh'",
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ # The deploy command is now our wrapper
+ deploy_command = "'{}/environ-deploy-wrapper.sh'".format(
+@@ -1831,22 +1814,20 @@ def deploy_script(
+ )
+ if root_cmd(deploy_command, tty, sudo, **ssh_kwargs) != 0:
+ raise SaltCloudSystemExit(
+- "Executing the command '{}' failed".format(deploy_command)
++ f"Executing the command '{deploy_command}' failed"
+ )
+ log.debug("Executed command '%s'", deploy_command)
+
+ # Remove the deploy script
+ if not keep_tmp:
+- root_cmd(
+- "rm -f '{}/deploy.sh'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/deploy.sh'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/deploy.sh", tmp_dir)
+ if script_env:
+ root_cmd(
+- "rm -f '{}/environ-deploy-wrapper.sh'".format(tmp_dir),
++ f"rm -f '{tmp_dir}/environ-deploy-wrapper.sh'",
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ log.debug("Removed %s/environ-deploy-wrapper.sh", tmp_dir)
+
+@@ -1855,57 +1836,40 @@ def deploy_script(
+ else:
+ # Remove minion configuration
+ if minion_pub:
+- root_cmd(
+- "rm -f '{}/minion.pub'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/minion.pub'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/minion.pub", tmp_dir)
+ if minion_pem:
+- root_cmd(
+- "rm -f '{}/minion.pem'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/minion.pem'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/minion.pem", tmp_dir)
+ if minion_conf:
+- root_cmd(
+- "rm -f '{}/grains'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/grains'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/grains", tmp_dir)
+- root_cmd(
+- "rm -f '{}/minion'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/minion'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/minion", tmp_dir)
+ if master_sign_pub_file:
+ root_cmd(
+- "rm -f {}/master_sign.pub".format(tmp_dir),
+- tty,
+- sudo,
+- **ssh_kwargs
++ f"rm -f {tmp_dir}/master_sign.pub", tty, sudo, **ssh_kwargs
+ )
+ log.debug("Removed %s/master_sign.pub", tmp_dir)
+
+ # Remove master configuration
+ if master_pub:
+- root_cmd(
+- "rm -f '{}/master.pub'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/master.pub'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/master.pub", tmp_dir)
+ if master_pem:
+- root_cmd(
+- "rm -f '{}/master.pem'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/master.pem'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/master.pem", tmp_dir)
+ if master_conf:
+- root_cmd(
+- "rm -f '{}/master'".format(tmp_dir), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"rm -f '{tmp_dir}/master'", tty, sudo, **ssh_kwargs)
+ log.debug("Removed %s/master", tmp_dir)
+
+ # Remove pre-seed keys directory
+ if preseed_minion_keys is not None:
+ root_cmd(
+- "rm -rf '{}'".format(preseed_minion_keys_tempdir),
++ f"rm -rf '{preseed_minion_keys_tempdir}'",
+ tty,
+ sudo,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ log.debug("Removed %s", preseed_minion_keys_tempdir)
+
+@@ -1920,15 +1884,13 @@ def deploy_script(
+ # for line in output:
+ # print(line)
+ log.info("Executing %s on the salt-minion", start_action)
+- root_cmd(
+- "salt-call {}".format(start_action), tty, sudo, **ssh_kwargs
+- )
++ root_cmd(f"salt-call {start_action}", tty, sudo, **ssh_kwargs)
+ log.info("Finished executing %s on the salt-minion", start_action)
+ # Fire deploy action
+ fire_event(
+ "event",
+- "{} has been deployed at {}".format(name, host),
+- "salt/cloud/{}/deploy_script".format(name),
++ f"{name} has been deployed at {host}",
++ f"salt/cloud/{name}/deploy_script",
+ args={"name": name, "host": host},
+ sock_dir=opts.get(
+ "sock_dir", os.path.join(__opts__["sock_dir"], "master")
+@@ -1961,7 +1923,7 @@ def run_inline_script(
+ tty=None,
+ opts=None,
+ tmp_dir="/tmp/.saltcloud-inline_script",
+- **kwargs
++ **kwargs,
+ ):
+ """
+ Run the inline script commands, one by one
+@@ -2018,11 +1980,11 @@ def run_inline_script(
+ # TODO: check edge cases (e.g. ssh gateways, salt deploy disabled, etc.)
+ if (
+ root_cmd(
+- 'test -e \\"{}\\"'.format(tmp_dir),
++ f'test -e \\"{tmp_dir}\\"',
+ tty,
+ sudo,
+ allow_failure=True,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ and inline_script
+ ):
+@@ -2030,11 +1992,11 @@ def run_inline_script(
+ for cmd_line in inline_script:
+ log.info("Executing inline command: %s", cmd_line)
+ ret = root_cmd(
+- 'sh -c "( {} )"'.format(cmd_line),
++ f'sh -c "( {cmd_line} )"',
+ tty,
+ sudo,
+ allow_failure=True,
+- **ssh_kwargs
++ **ssh_kwargs,
+ )
+ if ret:
+ log.info("[%s] Output: %s", cmd_line, ret)
+@@ -2138,7 +2100,7 @@ def _exec_ssh_cmd(cmd, error_msg=None, allow_failure=False, **kwargs):
+ time.sleep(0.5)
+ if proc.exitstatus != 0 and allow_failure is False:
+ raise SaltCloudSystemExit(
+- "Command '{}' failed. Exit code: {}".format(cmd, proc.exitstatus)
++ f"Command '{cmd}' failed. Exit code: {proc.exitstatus}"
+ )
+ return proc.exitstatus
+ except salt.utils.vt.TerminalException as err:
+@@ -2241,7 +2203,7 @@ def scp_file(dest_path, contents=None, kwargs=None, local_file=None):
+ cmd,
+ error_msg="Failed to upload file '{0}': {1}\n{2}",
+ password_retries=3,
+- **kwargs
++ **kwargs,
+ )
+ finally:
+ if contents is not None:
+@@ -2359,7 +2321,7 @@ def sftp_file(dest_path, contents=None, kwargs=None, local_file=None):
+ cmd,
+ error_msg="Failed to upload file '{0}': {1}\n{2}",
+ password_retries=3,
+- **kwargs
++ **kwargs,
+ )
+ finally:
+ if contents is not None:
+@@ -2419,11 +2381,11 @@ def root_cmd(command, tty, sudo, allow_failure=False, **kwargs):
+
+ if sudo:
+ if sudo_password is None:
+- command = "sudo {}".format(command)
++ command = f"sudo {command}"
+ logging_command = command
+ else:
+- logging_command = 'sudo -S "XXX-REDACTED-XXX" {}'.format(command)
+- command = "sudo -S {}".format(command)
++ logging_command = f'sudo -S "XXX-REDACTED-XXX" {command}'
++ command = f"sudo -S {command}"
+
+ log.debug("Using sudo to run command %s", logging_command)
+
+@@ -2442,9 +2404,9 @@ def root_cmd(command, tty, sudo, allow_failure=False, **kwargs):
+ ssh_args.extend(
+ [
+ # Don't add new hosts to the host key database
+- "-oStrictHostKeyChecking={}".format(host_key_checking),
++ f"-oStrictHostKeyChecking={host_key_checking}",
+ # Set hosts key database path to /dev/null, i.e., non-existing
+- "-oUserKnownHostsFile={}".format(known_hosts_file),
++ f"-oUserKnownHostsFile={known_hosts_file}",
+ # Don't re-use the SSH connection. Less failures.
+ "-oControlPath=none",
+ ]
+@@ -2477,12 +2439,12 @@ def root_cmd(command, tty, sudo, allow_failure=False, **kwargs):
+
+ cmd = "ssh {0} {1[username]}@{1[hostname]} ".format(" ".join(ssh_args), kwargs)
+ logging_command = cmd + logging_command
+- cmd = cmd + pipes.quote(command)
++ cmd = cmd + shlex.quote(command)
+
+ hard_timeout = kwargs.get("hard_timeout")
+ if hard_timeout is not None:
+- logging_command = "timeout {} {}".format(hard_timeout, logging_command)
+- cmd = "timeout {} {}".format(hard_timeout, cmd)
++ logging_command = f"timeout {hard_timeout} {logging_command}"
++ cmd = f"timeout {hard_timeout} {cmd}"
+
+ log.debug("SSH command: '%s'", logging_command)
+
+@@ -2504,7 +2466,7 @@ def check_auth(name, sock_dir=None, queue=None, timeout=300):
+ ret = event.get_event(full=True)
+ if ret is None:
+ continue
+- if ret["tag"] == "salt/minion/{}/start".format(name):
++ if ret["tag"] == f"salt/minion/{name}/start":
+ queue.put(name)
+ newtimeout = 0
+ log.debug("Minion %s is ready to receive commands", name)
+@@ -2550,7 +2512,7 @@ def check_name(name, safe_chars):
+ """
+ Check whether the specified name contains invalid characters
+ """
+- regexp = re.compile("[^{}]".format(safe_chars))
++ regexp = re.compile(f"[^{safe_chars}]")
+ if regexp.search(name):
+ raise SaltCloudException(
+ "{} contains characters not supported by this cloud provider. "
+@@ -2844,7 +2806,7 @@ def request_minion_cachedir(
+ "provider": provider,
+ }
+
+- fname = "{}.p".format(minion_id)
++ fname = f"{minion_id}.p"
+ path = os.path.join(base, "requested", fname)
+ with salt.utils.files.fopen(path, "wb") as fh_:
+ salt.utils.msgpack.dump(data, fh_, encoding=MSGPACK_ENCODING)
+@@ -2875,7 +2837,7 @@ def change_minion_cachedir(
+ if base is None:
+ base = __opts__["cachedir"]
+
+- fname = "{}.p".format(minion_id)
++ fname = f"{minion_id}.p"
+ path = os.path.join(base, cachedir, fname)
+
+ with salt.utils.files.fopen(path, "r") as fh_:
+@@ -2898,7 +2860,7 @@ def activate_minion_cachedir(minion_id, base=None):
+ if base is None:
+ base = __opts__["cachedir"]
+
+- fname = "{}.p".format(minion_id)
++ fname = f"{minion_id}.p"
+ src = os.path.join(base, "requested", fname)
+ dst = os.path.join(base, "active")
+ shutil.move(src, dst)
+@@ -2920,7 +2882,7 @@ def delete_minion_cachedir(minion_id, provider, opts, base=None):
+ base = __opts__["cachedir"]
+
+ driver = next(iter(__opts__["providers"][provider].keys()))
+- fname = "{}.p".format(minion_id)
++ fname = f"{minion_id}.p"
+ for cachedir in "requested", "active":
+ path = os.path.join(base, cachedir, driver, provider, fname)
+ log.debug("path: %s", path)
+@@ -3013,7 +2975,7 @@ def update_bootstrap(config, url=None):
+ # in last case, assuming we got a script content
+ else:
+ script_content = url
+- script_name = "{}.sh".format(hashlib.sha1(script_content).hexdigest())
++ script_name = f"{hashlib.sha1(script_content).hexdigest()}.sh"
+
+ if not script_content:
+ raise ValueError("No content in bootstrap script !")
+@@ -3107,7 +3069,7 @@ def cache_node_list(nodes, provider, opts):
+
+ for node in nodes:
+ diff_node_cache(prov_dir, node, nodes[node], opts)
+- path = os.path.join(prov_dir, "{}.p".format(node))
++ path = os.path.join(prov_dir, f"{node}.p")
+ with salt.utils.files.fopen(path, "wb") as fh_:
+ salt.utils.msgpack.dump(nodes[node], fh_, encoding=MSGPACK_ENCODING)
+
+@@ -3162,7 +3124,7 @@ def missing_node_cache(prov_dir, node_list, provider, opts):
+ fire_event(
+ "event",
+ "cached node missing from provider",
+- "salt/cloud/{}/cache_node_missing".format(node),
++ f"salt/cloud/{node}/cache_node_missing",
+ args={"missing node": node},
+ sock_dir=opts.get(
+ "sock_dir", os.path.join(__opts__["sock_dir"], "master")
+@@ -3190,7 +3152,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
+
+ if node is None:
+ return
+- path = "{}.p".format(os.path.join(prov_dir, node))
++ path = f"{os.path.join(prov_dir, node)}.p"
+
+ if not os.path.exists(path):
+ event_data = _strip_cache_events(new_data, opts)
+@@ -3198,7 +3160,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
+ fire_event(
+ "event",
+ "new node found",
+- "salt/cloud/{}/cache_node_new".format(node),
++ f"salt/cloud/{node}/cache_node_new",
+ args={"new_data": event_data},
+ sock_dir=opts.get("sock_dir", os.path.join(__opts__["sock_dir"], "master")),
+ transport=opts.get("transport", "zeromq"),
+@@ -3222,7 +3184,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
+ fire_event(
+ "event",
+ "node data differs",
+- "salt/cloud/{}/cache_node_diff".format(node),
++ f"salt/cloud/{node}/cache_node_diff",
+ args={
+ "new_data": _strip_cache_events(new_data, opts),
+ "cache_data": _strip_cache_events(cache_data, opts),
+@@ -3266,7 +3228,7 @@ def _salt_cloud_force_ascii(exc):
+ errors.
+ """
+ if not isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
+- raise TypeError("Can't handle {}".format(exc))
++ raise TypeError(f"Can't handle {exc}")
+
+ unicode_trans = {
+ # Convert non-breaking space to space
+@@ -3326,7 +3288,7 @@ def store_password_in_keyring(credential_id, username, password=None):
+
+ # pylint: enable=import-error
+ if password is None:
+- prompt = "Please enter password for {}: ".format(credential_id)
++ prompt = f"Please enter password for {credential_id}: "
+ try:
+ password = getpass.getpass(prompt)
+ except EOFError:
+diff --git a/salt/utils/http.py b/salt/utils/http.py
+index 91c5cbf08ed..26f2e85c2ee 100644
+--- a/salt/utils/http.py
++++ b/salt/utils/http.py
+@@ -5,7 +5,7 @@ and the like, but also useful for basic HTTP testing.
+ .. versionadded:: 2015.5.0
+ """
+
+-import cgi
++import email.message
+ import gzip
+ import http.client
+ import http.cookiejar
+@@ -84,7 +84,7 @@ except ImportError:
+ HAS_CERTIFI = False
+
+ log = logging.getLogger(__name__)
+-USERAGENT = "Salt/{}".format(salt.version.__version__)
++USERAGENT = f"Salt/{salt.version.__version__}"
+
+
+ def __decompressContent(coding, pgctnt):
+@@ -170,7 +170,7 @@ def query(
+ formdata_fieldname=None,
+ formdata_filename=None,
+ decode_body=True,
+- **kwargs
++ **kwargs,
+ ):
+ """
+ Query a resource, and decode the return data
+@@ -295,7 +295,7 @@ def query(
+ auth = (username, password)
+
+ if agent == USERAGENT:
+- agent = "{} http.query()".format(agent)
++ agent = f"{agent} http.query()"
+ header_dict["User-agent"] = agent
+
+ if backend == "requests":
+@@ -360,14 +360,14 @@ def query(
+ url,
+ params=params,
+ files={formdata_fieldname: (formdata_filename, io.StringIO(data))},
+- **req_kwargs
++ **req_kwargs,
+ )
+ else:
+ result = sess.request(method, url, params=params, data=data, **req_kwargs)
+ result.raise_for_status()
+ if stream is True:
+ # fake a HTTP response header
+- header_callback("HTTP/1.0 {} MESSAGE".format(result.status_code))
++ header_callback(f"HTTP/1.0 {result.status_code} MESSAGE")
+ # fake streaming the content
+ streaming_callback(result.content)
+ return {
+@@ -483,15 +483,12 @@ def query(
+ result_headers = dict(result.info())
+ result_text = result.read()
+ if "Content-Type" in result_headers:
+- res_content_type, res_params = cgi.parse_header(
+- result_headers["Content-Type"]
+- )
+- if (
+- res_content_type.startswith("text/")
+- and "charset" in res_params
+- and not isinstance(result_text, str)
+- ):
+- result_text = result_text.decode(res_params["charset"])
++ msg = email.message.EmailMessage()
++ msg.add_header("Content-Type", result_headers["Content-Type"])
++ if msg.get_content_type().startswith("text/"):
++ content_charset = msg.get_content_charset()
++ if content_charset and not isinstance(result_text, str):
++ result_text = result_text.decode(content_charset)
+ if isinstance(result_text, bytes) and decode_body:
+ result_text = result_text.decode("utf-8")
+ ret["body"] = result_text
+@@ -636,15 +633,12 @@ def query(
+ result_headers = result.headers
+ result_text = result.body
+ if "Content-Type" in result_headers:
+- res_content_type, res_params = cgi.parse_header(
+- result_headers["Content-Type"]
+- )
+- if (
+- res_content_type.startswith("text/")
+- and "charset" in res_params
+- and not isinstance(result_text, str)
+- ):
+- result_text = result_text.decode(res_params["charset"])
++ msg = email.message.EmailMessage()
++ msg.add_header("Content-Type", result_headers["Content-Type"])
++ if msg.get_content_type().startswith("text/"):
++ content_charset = msg.get_content_charset()
++ if content_charset and not isinstance(result_text, str):
++ result_text = result_text.decode(content_charset)
+ if isinstance(result_text, bytes) and decode_body:
+ result_text = result_text.decode("utf-8")
+ ret["body"] = result_text
+@@ -1038,12 +1032,12 @@ def _sanitize_url_components(comp_list, field):
+ """
+ if not comp_list:
+ return ""
+- elif comp_list[0].startswith("{}=".format(field)):
+- ret = "{}=XXXXXXXXXX&".format(field)
++ elif comp_list[0].startswith(f"{field}="):
++ ret = f"{field}=XXXXXXXXXX&"
+ comp_list.remove(comp_list[0])
+ return ret + _sanitize_url_components(comp_list, field)
+ else:
+- ret = "{}&".format(comp_list[0])
++ ret = f"{comp_list[0]}&"
+ comp_list.remove(comp_list[0])
+ return ret + _sanitize_url_components(comp_list, field)
+
+diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py
+index a6a8a279605..d90957a0087 100644
+--- a/salt/utils/jinja.py
++++ b/salt/utils/jinja.py
+@@ -2,13 +2,12 @@
+ Jinja loading utils to enable a more powerful backend for jinja templates
+ """
+
+-
+ import itertools
+ import logging
+ import os.path
+-import pipes
+ import pprint
+ import re
++import shlex
+ import time
+ import uuid
+ import warnings
+@@ -242,11 +241,11 @@ class PrintableDict(OrderedDict):
+ if isinstance(value, str):
+ # keeps quotes around strings
+ # pylint: disable=repr-flag-used-in-string
+- output.append("{!r}: {!r}".format(key, value))
++ output.append(f"{key!r}: {value!r}")
+ # pylint: enable=repr-flag-used-in-string
+ else:
+ # let default output
+- output.append("{!r}: {!s}".format(key, value))
++ output.append(f"{key!r}: {value!s}")
+ return "{" + ", ".join(output) + "}"
+
+ def __repr__(self): # pylint: disable=W0221
+@@ -255,7 +254,7 @@ class PrintableDict(OrderedDict):
+ # Raw string formatter required here because this is a repr
+ # function.
+ # pylint: disable=repr-flag-used-in-string
+- output.append("{!r}: {!r}".format(key, value))
++ output.append(f"{key!r}: {value!r}")
+ # pylint: enable=repr-flag-used-in-string
+ return "{" + ", ".join(output) + "}"
+
+@@ -441,7 +440,7 @@ def quote(txt):
+
+ 'my_text'
+ """
+- return pipes.quote(txt)
++ return shlex.quote(txt)
+
+
+ @jinja_filter()
+@@ -1095,13 +1094,13 @@ class SerializerExtension(Extension):
+ # to the stringified version of the exception.
+ msg += str(exc)
+ else:
+- msg += "{}\n".format(problem)
++ msg += f"{problem}\n"
+ msg += salt.utils.stringutils.get_context(
+ buf, line, marker=" <======================"
+ )
+ raise TemplateRuntimeError(msg)
+ except AttributeError:
+- raise TemplateRuntimeError("Unable to load yaml from {}".format(value))
++ raise TemplateRuntimeError(f"Unable to load yaml from {value}")
+
+ def load_json(self, value):
+ if isinstance(value, TemplateModule):
+@@ -1109,7 +1108,7 @@ class SerializerExtension(Extension):
+ try:
+ return salt.utils.json.loads(value)
+ except (ValueError, TypeError, AttributeError):
+- raise TemplateRuntimeError("Unable to load json from {}".format(value))
++ raise TemplateRuntimeError(f"Unable to load json from {value}")
+
+ def load_text(self, value):
+ if isinstance(value, TemplateModule):
+@@ -1144,7 +1143,7 @@ class SerializerExtension(Extension):
+ return self._parse_profile_block(parser, label, "profile block", body, lineno)
+
+ def _create_profile_id(self, parser):
+- return "_salt_profile_{}".format(parser.free_identifier().name)
++ return f"_salt_profile_{parser.free_identifier().name}"
+
+ def _profile_start(self, label, source):
+ return (label, source, time.time())
+@@ -1186,7 +1185,7 @@ class SerializerExtension(Extension):
+ filter_name = parser.stream.current.value
+ lineno = next(parser.stream).lineno
+ if filter_name not in self.environment.filters:
+- parser.fail("Unable to parse {}".format(filter_name), lineno)
++ parser.fail(f"Unable to parse {filter_name}", lineno)
+
+ parser.stream.expect("name:as")
+ target = parser.parse_assign_target()
+@@ -1225,7 +1224,7 @@ class SerializerExtension(Extension):
+ nodes.Name(target, "store").set_lineno(lineno),
+ nodes.Filter(
+ nodes.Name(target, "load").set_lineno(lineno),
+- "load_{}".format(converter),
++ f"load_{converter}",
+ [],
+ [],
+ None,
+@@ -1234,7 +1233,7 @@ class SerializerExtension(Extension):
+ ).set_lineno(lineno),
+ ]
+ return self._parse_profile_block(
+- parser, import_node.template, "import_{}".format(converter), body, lineno
++ parser, import_node.template, f"import_{converter}", body, lineno
+ )
+
+ def dict_to_sls_yaml_params(self, value, flow_style=False):
+diff --git a/salt/utils/locales.py b/salt/utils/locales.py
+index 8017958d5de..a380ddbe7a2 100644
+--- a/salt/utils/locales.py
++++ b/salt/utils/locales.py
+@@ -1,8 +1,7 @@
+ """
+ the locale utils used by salt
+ """
+-
+-
++import locale
+ import sys
+
+ from salt.utils.decorators import memoize as real_memoize
+@@ -83,3 +82,39 @@ def normalize_locale(loc):
+ comps["codeset"] = comps["codeset"].lower().replace("-", "")
+ comps["charmap"] = ""
+ return join_locale(comps)
++
++
++def getdefaultlocale(envvars=("LC_ALL", "LC_CTYPE", "LANG", "LANGUAGE")):
++ """
++ This function was backported from Py3.11 which started triggering a
++ deprecation warning about it's removal in 3.13.
++ """
++ try:
++ # check if it's supported by the _locale module
++ import _locale
++
++ code, encoding = _locale._getdefaultlocale()
++ except (ImportError, AttributeError):
++ pass
++ else:
++ # make sure the code/encoding values are valid
++ if sys.platform == "win32" and code and code[:2] == "0x":
++ # map windows language identifier to language name
++ code = locale.windows_locale.get(int(code, 0))
++ # ...add other platform-specific processing here, if
++ # necessary...
++ return code, encoding
++
++ # fall back on POSIX behaviour
++ import os
++
++ lookup = os.environ.get
++ for variable in envvars:
++ localename = lookup(variable, None)
++ if localename:
++ if variable == "LANGUAGE":
++ localename = localename.split(":")[0]
++ break
++ else:
++ localename = "C"
++ return locale._parse_localename(localename)
+diff --git a/tests/integration/states/test_ssh_auth.py b/tests/integration/states/test_ssh_auth.py
+index 660c3f62d6a..46ffc9b4115 100644
+--- a/tests/integration/states/test_ssh_auth.py
++++ b/tests/integration/states/test_ssh_auth.py
+@@ -24,6 +24,20 @@ class SSHAuthStateTests(ModuleCase, SaltReturnAssertsMixin):
+ user_ssh_dir = os.path.join(userdetails["home"], ".ssh")
+ authorized_keys_file = os.path.join(user_ssh_dir, "authorized_keys")
+
++ key1 = (
++ # Explicit no ending line break
++ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC3dd5ACsvJhnIOrn6bSOkX5"
++ "KyVDpTYsVAaJj3AmEo6Fr5cHXJFJoJS+Ld8K5vCscPzuXashdYUdrhL1E5Liz"
++ "bza+zneQ5AkJ7sn2NXymD6Bbra+infO4NgnQXbGMp/NyY65jbQGqJeQ081iEV"
++ f"YbDP2zXp6fmrqqmFCaakZfGRbVw== root"
++ )
++ key2 = (
++ "AAAAB3NzaC1yc2EAAAADAQABAAAAgQC7h77HyBPCUDONCs5bI/PrrPwyYJegl0"
++ "f9YWLaBofVYOUl/uSv1ux8zjIoLVs4kguY1ihtIoK2kho4YsjNtIaAd6twdua9"
++ "oqCg2g/54cIK/8WbIjwnb3LFRgyTG5DFuj+7526EdJycAZvhSzIZYui3RUj4Vp"
++ "eMoF7mcB6TIK2/2w=="
++ )
++
+ ret = self.run_state(
+ "file.managed",
+ name=authorized_keys_file,
+@@ -31,23 +45,22 @@ class SSHAuthStateTests(ModuleCase, SaltReturnAssertsMixin):
+ makedirs=True,
+ contents_newline=False,
+ # Explicit no ending line break
+- contents="ssh-rsa AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY== root",
++ contents=key1,
+ )
+
+ ret = self.run_state(
+ "ssh_auth.present",
+- name="AAAAB3NzaC1kcQ9J5bYTEyZ==",
++ name=key2,
+ enc="ssh-rsa",
+ user=username,
+ comment=username,
+ )
+ self.assertSaltTrueReturn(ret)
+- self.assertSaltStateChangesEqual(ret, {"AAAAB3NzaC1kcQ9J5bYTEyZ==": "New"})
++ self.assertSaltStateChangesEqual(ret, {key2: "New"})
+ with salt.utils.files.fopen(authorized_keys_file, "r") as fhr:
+ self.assertEqual(
+ fhr.read(),
+- "ssh-rsa AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY== root\n"
+- "ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {}\n".format(username),
++ f"{key1}\nssh-rsa {key2} {username}\n",
+ )
+
+ @pytest.mark.destructive_test
+@@ -60,39 +73,48 @@ class SSHAuthStateTests(ModuleCase, SaltReturnAssertsMixin):
+ authorized_keys_file = os.path.join(user_ssh_dir, "authorized_keys")
+
+ key_fname = "issue_10198.id_rsa.pub"
++ key_contents = (
++ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC3dd5ACsvJhnIOrn6bSOkX5"
++ "KyVDpTYsVAaJj3AmEo6Fr5cHXJFJoJS+Ld8K5vCscPzuXashdYUdrhL1E5Liz"
++ "bza+zneQ5AkJ7sn2NXymD6Bbra+infO4NgnQXbGMp/NyY65jbQGqJeQ081iEV"
++ f"YbDP2zXp6fmrqqmFCaakZfGRbVw== {username}\n"
++ )
+
+ # Create the keyfile that we expect to get back on the state call
+ with salt.utils.files.fopen(
+ os.path.join(RUNTIME_VARS.TMP_PRODENV_STATE_TREE, key_fname), "w"
+ ) as kfh:
+- kfh.write("ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {}\n".format(username))
++ kfh.write(key_contents)
+
+ # Create a bogus key file on base environment
+ with salt.utils.files.fopen(
+ os.path.join(RUNTIME_VARS.TMP_STATE_TREE, key_fname), "w"
+ ) as kfh:
+- kfh.write("ssh-rsa BAAAB3NzaC1kcQ9J5bYTEyZ== {}\n".format(username))
++ kfh.write(
++ "ssh-rsa A!AAB3NzaC1yc2EAAAADAQABAAAAgQC3dd5ACsvJhnIOrn6bSOkX5"
++ "KyVDpTYsVAaJj3AmEo6Fr5cHXJFJoJS+Ld8K5vCscPzuXashdYUdrhL1E5Liz"
++ "bza+zneQ5AkJ7sn2NXymD6Bbra+infO4NgnQXbGMp/NyY65jbQGqJeQ081iEV"
++ f"YbDP2zXp6fmrqqmFCaakZfGRbVw== {username}\n"
++ )
+
+ ret = self.run_state(
+ "ssh_auth.present",
+ name="Setup Keys",
+- source="salt://{}?saltenv=prod".format(key_fname),
++ source=f"salt://{key_fname}?saltenv=prod",
+ enc="ssh-rsa",
+ user=username,
+ comment=username,
+ )
+ self.assertSaltTrueReturn(ret)
+ with salt.utils.files.fopen(authorized_keys_file, "r") as fhr:
+- self.assertEqual(
+- fhr.read(), "ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {}\n".format(username)
+- )
++ self.assertEqual(fhr.read(), key_contents)
+
+ os.unlink(authorized_keys_file)
+
+ ret = self.run_state(
+ "ssh_auth.present",
+ name="Setup Keys",
+- source="salt://{}".format(key_fname),
++ source=f"salt://{key_fname}",
+ enc="ssh-rsa",
+ user=username,
+ comment=username,
+@@ -100,6 +122,4 @@ class SSHAuthStateTests(ModuleCase, SaltReturnAssertsMixin):
+ )
+ self.assertSaltTrueReturn(ret)
+ with salt.utils.files.fopen(authorized_keys_file, "r") as fhr:
+- self.assertEqual(
+- fhr.read(), "ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {}\n".format(username)
+- )
++ self.assertEqual(fhr.read(), key_contents)
+diff --git a/tests/pytests/unit/modules/state/test_state.py b/tests/pytests/unit/modules/state/test_state.py
+index 7c42646bcf7..cff66defa9d 100644
+--- a/tests/pytests/unit/modules/state/test_state.py
++++ b/tests/pytests/unit/modules/state/test_state.py
+@@ -610,7 +610,7 @@ def test_show_states_missing_sls():
+ chunks_mock = MagicMock(side_effect=[msg])
+ mock = MagicMock(side_effect=["A", None])
+ with patch.object(state, "_check_queue", mock), patch(
+- "salt.state.HighState.compile_low_chunks", chunks_mock
++ "salt.modules.state.salt.state.HighState.compile_low_chunks", chunks_mock
+ ):
+ assert state.show_low_sls("foo") == "A"
+ assert state.show_states("foo") == [msg[0]]
+diff --git a/tests/unit/states/test_module.py b/tests/unit/states/test_module.py
+index a705bd30285..4853c24ca07 100644
+--- a/tests/unit/states/test_module.py
++++ b/tests/unit/states/test_module.py
+@@ -4,7 +4,7 @@
+
+
+ import logging
+-from inspect import ArgSpec
++from inspect import FullArgSpec
+
+ import salt.states.module as module
+ from tests.support.mixins import LoaderModuleMockMixin
+@@ -117,11 +117,25 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+
+ @classmethod
+ def setUpClass(cls):
+- cls.aspec = ArgSpec(
+- args=["hello", "world"], varargs=None, keywords=None, defaults=False
++ cls.aspec = FullArgSpec(
++ args=["hello", "world"],
++ varargs=None,
++ varkw=None,
++ defaults=False,
++ kwonlyargs=None,
++ kwonlydefaults=None,
++ annotations=None,
+ )
+
+- cls.bspec = ArgSpec(args=[], varargs="names", keywords="kwargs", defaults=None)
++ cls.bspec = FullArgSpec(
++ args=[],
++ varargs="names",
++ varkw=None,
++ defaults=None,
++ kwonlyargs="kwargs",
++ kwonlydefaults=None,
++ annotations=None,
++ )
+
+ @classmethod
+ def tearDownClass(cls):
+@@ -137,8 +151,8 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ module.__opts__, {"use_superseded": ["module.run"]}
+ ):
+ ret = module.run(**{CMD: None})
+- if ret["comment"] != "Unavailable function: {}.".format(CMD) or ret["result"]:
+- self.fail("module.run did not fail as expected: {}".format(ret))
++ if ret["comment"] != f"Unavailable function: {CMD}." or ret["result"]:
++ self.fail(f"module.run did not fail as expected: {ret}")
+
+ def test_run_module_not_available_testmode(self):
+ """
+@@ -151,10 +165,10 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ ):
+ ret = module.run(**{CMD: None})
+ if (
+- ret["comment"] != "Unavailable function: {}.".format(CMD)
++ ret["comment"] != f"Unavailable function: {CMD}."
+ or ret["result"] is not False
+ ):
+- self.fail("module.run did not fail as expected: {}".format(ret))
++ self.fail(f"module.run did not fail as expected: {ret}")
+
+ def test_run_module_noop(self):
+ """
+@@ -166,7 +180,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ ):
+ ret = module.run()
+ if ret["comment"] != "No function provided." or ret["result"] is not False:
+- self.fail("module.run did not fail as expected: {}".format(ret))
++ self.fail(f"module.run did not fail as expected: {ret}")
+
+ def test_module_run_hidden_varargs(self):
+ """
+@@ -189,10 +203,10 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ ):
+ ret = module.run(**{CMD: None})
+ if (
+- ret["comment"] != "Function {} to be executed.".format(CMD)
++ ret["comment"] != f"Function {CMD} to be executed."
+ or ret["result"] is not None
+ ):
+- self.fail("module.run failed: {}".format(ret))
++ self.fail(f"module.run failed: {ret}")
+
+ def test_run_missing_arg(self):
+ """
+@@ -203,9 +217,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ module.__opts__, {"use_superseded": ["module.run"]}
+ ):
+ ret = module.run(**{CMD: None})
+- self.assertEqual(
+- ret["comment"], "'{}' failed: Missing arguments: name".format(CMD)
+- )
++ self.assertEqual(ret["comment"], f"'{CMD}' failed: Missing arguments: name")
+
+ def test_run_correct_arg(self):
+ """
+@@ -216,8 +228,8 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ module.__opts__, {"use_superseded": ["module.run"]}
+ ):
+ ret = module.run(**{CMD: ["Fred"]})
+- if ret["comment"] != "{}: Success".format(CMD) or not ret["result"]:
+- self.fail("module.run failed: {}".format(ret))
++ if ret["comment"] != f"{CMD}: Success" or not ret["result"]:
++ self.fail(f"module.run failed: {ret}")
+
+ def test_run_state_apply_result_false(self):
+ """
+@@ -294,9 +306,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ ):
+ ret = module.run(**{CMD: ["bla", {"example": "bla"}]})
+ self.assertFalse(ret["result"])
+- self.assertEqual(
+- ret["comment"], "'{}' failed: Missing arguments: arg2".format(CMD)
+- )
++ self.assertEqual(ret["comment"], f"'{CMD}' failed: Missing arguments: arg2")
+
+ def test_run_42270_kwargs_to_args(self):
+ """
+@@ -390,9 +400,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ with patch.dict(module.__salt__, {}, clear=True):
+ ret = module._legacy_run(CMD)
+ self.assertFalse(ret["result"])
+- self.assertEqual(
+- ret["comment"], "Module function {} is not available".format(CMD)
+- )
++ self.assertEqual(ret["comment"], f"Module function {CMD} is not available")
+
+ def test_module_run_test_true(self):
+ """
+@@ -400,9 +408,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
+ """
+ with patch.dict(module.__opts__, {"test": True}):
+ ret = module._legacy_run(CMD)
+- self.assertEqual(
+- ret["comment"], "Module function {} is set to execute".format(CMD)
+- )
++ self.assertEqual(ret["comment"], f"Module function {CMD} is set to execute")
+
+ def test_module_run_missing_arg(self):
+ """
+diff --git a/tests/unit/test_master.py b/tests/unit/test_master.py
+index b454882f06c..96fe2a54595 100644
+--- a/tests/unit/test_master.py
++++ b/tests/unit/test_master.py
+@@ -56,6 +56,7 @@ class TransportMethodsTest(TestCase):
+ "__format__",
+ "__ge__",
+ "__getattribute__",
++ "__getstate__",
+ "__gt__",
+ "__hash__",
+ "__init__",
+@@ -71,9 +72,9 @@ class TransportMethodsTest(TestCase):
+ "__sizeof__",
+ "__str__",
+ "__subclasshook__",
++ "destroy",
+ "get_method",
+ "run_func",
+- "destroy",
+ ]
+ for name in dir(aes_funcs):
+ if name in aes_funcs.expose_methods:
+@@ -108,6 +109,7 @@ class TransportMethodsTest(TestCase):
+ "__format__",
+ "__ge__",
+ "__getattribute__",
++ "__getstate__",
+ "__gt__",
+ "__hash__",
+ "__init__",
+@@ -128,9 +130,9 @@ class TransportMethodsTest(TestCase):
+ "_prep_pub",
+ "_send_pub",
+ "_send_ssh_pub",
+- "get_method",
+- "destroy",
+ "connect",
++ "destroy",
++ "get_method",
+ ]
+ for name in dir(clear_funcs):
+ if name in clear_funcs.expose_methods:
+--
+2.44.0
+
diff --git a/fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch b/fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
new file mode 100644
index 0000000..ea63d91
--- /dev/null
+++ b/fix-salt.utils.stringutils.to_str-calls-to-make-it-w.patch
@@ -0,0 +1,141 @@
+From b4b2c59bfd479d59faeaf0e4d26d672828a519c8 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 25 Nov 2020 15:09:41 +0300
+Subject: [PATCH] Fix salt.utils.stringutils.to_str calls to make it
+ working with numeric uid/gid
+
+Fix upstream tests to work with 3006.
+---
+ salt/modules/file.py | 22 ++++++++++++-------
+ salt/states/file.py | 11 ++++++++--
+ .../unit/modules/file/test_file_check.py | 10 ++++-----
+ 3 files changed, 28 insertions(+), 15 deletions(-)
+
+diff --git a/salt/modules/file.py b/salt/modules/file.py
+index 4612d65511..55b236fe41 100644
+--- a/salt/modules/file.py
++++ b/salt/modules/file.py
+@@ -5127,14 +5127,20 @@ def check_perms(
+ is_dir = os.path.isdir(name)
+ is_link = os.path.islink(name)
+
++ def __safe_to_str(s):
++ try:
++ return salt.utils.stringutils.to_str(s)
++ except:
++ return salt.utils.stringutils.to_str(str(s))
++
+ # Check and make user/group/mode changes, then verify they were successful
+ if user:
+ if (
+ salt.utils.platform.is_windows() and not user_to_uid(user) == cur["uid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not salt.utils.stringutils.to_str(user) == cur["user"]
+- and not salt.utils.stringutils.to_str(user) == cur["uid"]
++ and not __safe_to_str(user) == cur["user"]
++ and not user == cur["uid"]
+ ):
+ perms["cuser"] = user
+
+@@ -5143,8 +5149,8 @@ def check_perms(
+ salt.utils.platform.is_windows() and not group_to_gid(group) == cur["gid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not salt.utils.stringutils.to_str(group) == cur["group"]
+- and not salt.utils.stringutils.to_str(group) == cur["gid"]
++ and not __safe_to_str(group) == cur["group"]
++ and not group == cur["gid"]
+ ):
+ perms["cgroup"] = group
+
+@@ -5188,8 +5194,8 @@ def check_perms(
+ salt.utils.platform.is_windows() and not user_to_uid(user) == post["uid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not salt.utils.stringutils.to_str(user) == post["user"]
+- and not salt.utils.stringutils.to_str(user) == post["uid"]
++ and not __safe_to_str(user) == post["user"]
++ and not user == post["uid"]
+ ):
+ if __opts__["test"] is True:
+ ret["changes"]["user"] = user
+@@ -5204,8 +5210,8 @@ def check_perms(
+ salt.utils.platform.is_windows() and not group_to_gid(group) == post["gid"]
+ ) or (
+ not salt.utils.platform.is_windows()
+- and not salt.utils.stringutils.to_str(group) == post["group"]
+- and not salt.utils.stringutils.to_str(group) == post["gid"]
++ and not __safe_to_str(group) == post["group"]
++ and not group == post["gid"]
+ ):
+ if __opts__["test"] is True:
+ ret["changes"]["group"] = group
+diff --git a/salt/states/file.py b/salt/states/file.py
+index 024e5e34ce..9630ff7096 100644
+--- a/salt/states/file.py
++++ b/salt/states/file.py
+@@ -864,15 +864,22 @@ def _check_dir_meta(name, user, group, mode, follow_symlinks=False):
+ if not stats:
+ changes["directory"] = "new"
+ return changes
++
++ def __safe_to_str(s):
++ try:
++ return salt.utils.stringutils.to_str(s)
++ except:
++ return salt.utils.stringutils.to_str(str(s))
++
+ if (
+ user is not None
+- and salt.utils.stringutils.to_str(user) != stats["user"]
++ and __safe_to_str(user) != stats["user"]
+ and user != stats.get("uid")
+ ):
+ changes["user"] = user
+ if (
+ group is not None
+- and salt.utils.stringutils.to_str(group) != stats["group"]
++ and __safe_to_str(group) != stats["group"]
+ and group != stats.get("gid")
+ ):
+ changes["group"] = group
+diff --git a/tests/pytests/unit/modules/file/test_file_check.py b/tests/pytests/unit/modules/file/test_file_check.py
+index ce86acd7fc..2294e6760b 100644
+--- a/tests/pytests/unit/modules/file/test_file_check.py
++++ b/tests/pytests/unit/modules/file/test_file_check.py
+@@ -17,7 +17,7 @@ def configure_loader_modules():
+ return {
+ filemod: {
+ "__context__": {},
+- "__opts__": {"test": False},
++ "__opts__": {"test": True},
+ }
+ }
+
+@@ -172,7 +172,7 @@ def test_check_managed_changes_follow_symlinks(a_link, tfile):
+ ),
+ # no user/group changes needed by id
+ (
+- {"user": 3001, "group": 4001},
++ {"user": 2001, "group": 1001},
+ {},
+ ),
+ ],
+@@ -184,9 +184,9 @@ def test_check_perms_user_group_name_and_id(input, expected):
+ stat_out = {
+ "user": "luser",
+ "group": "lgroup",
+- "uid": 3001,
+- "gid": 4001,
+- "mode": "123",
++ "uid": 2001,
++ "gid": 1001,
++ "mode": "0123",
+ }
+
+ patch_stats = patch(
+--
+2.39.2
+
+
diff --git a/fix-some-issues-detected-in-salt-support-cli-module-.patch b/fix-some-issues-detected-in-salt-support-cli-module-.patch
new file mode 100644
index 0000000..d0de813
--- /dev/null
+++ b/fix-some-issues-detected-in-salt-support-cli-module-.patch
@@ -0,0 +1,118 @@
+From 38de9af6bd243d35464713e0ee790255d3b40a7e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Fri, 23 Jun 2023 13:02:51 +0100
+Subject: [PATCH] Fix some issues detected in "salt-support" CLI, module
+ and tests (bsc#1211591) (#580)
+
+* saltsupport: avoid debug traceback due missing import
+
+* Use yaml and json wrappers provides by Salt utils
+
+* Remove unnecessary call to deprecated setup_logfile_logger
+
+* Move unittest saltsupport tests to proper place
+
+* Fix test assertion error due wrong capturing of message
+---
+ salt/cli/support/__init__.py | 4 ++--
+ salt/cli/support/collector.py | 6 ++----
+ tests/{pytests => }/unit/cli/test_support.py | 0
+ tests/unit/modules/test_saltsupport.py | 6 +++---
+ 4 files changed, 7 insertions(+), 9 deletions(-)
+ rename tests/{pytests => }/unit/cli/test_support.py (100%)
+
+diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py
+index 59c2609e07..0a7da72e93 100644
+--- a/salt/cli/support/__init__.py
++++ b/salt/cli/support/__init__.py
+@@ -6,7 +6,7 @@ import os
+
+ import jinja2
+ import salt.exceptions
+-import yaml
++import salt.utils.yaml
+
+ log = logging.getLogger(__name__)
+
+@@ -48,7 +48,7 @@ def get_profile(profile, caller, runner):
+ try:
+ rendered_template = _render_profile(profile_path, caller, runner)
+ log.debug("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template))
+- data.update(yaml.load(rendered_template))
++ data.update(salt.utils.yaml.load(rendered_template))
+ except Exception as ex:
+ log.debug(ex, exc_info=True)
+ raise salt.exceptions.SaltException(
+diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py
+index 1879cc5220..0ba987580c 100644
+--- a/salt/cli/support/collector.py
++++ b/salt/cli/support/collector.py
+@@ -1,6 +1,5 @@
+ import builtins as exceptions
+ import copy
+-import json
+ import logging
+ import os
+ import sys
+@@ -16,10 +15,10 @@ import salt.cli.support.intfunc
+ import salt.cli.support.localrunner
+ import salt.defaults.exitcodes
+ import salt.exceptions
+-import salt.ext.six as six
+ import salt.output.table_out
+ import salt.runner
+ import salt.utils.files
++import salt.utils.json
+ import salt.utils.parsers
+ import salt.utils.platform
+ import salt.utils.process
+@@ -169,7 +168,7 @@ class SupportDataCollector:
+ content = None
+
+ if content is None:
+- data = json.loads(json.dumps(data))
++ data = salt.utils.json.loads(salt.utils.json.dumps(data))
+ if isinstance(data, dict) and data.get("return"):
+ data = data.get("return")
+ content = yaml.safe_dump(data, default_flow_style=False, indent=4)
+@@ -506,7 +505,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
+ self.out.error(ex)
+ else:
+ if self.config["log_level"] not in ("quiet",):
+- self.setup_logfile_logger()
+ salt.utils.verify.verify_log(self.config)
+ salt.cli.support.log = log # Pass update logger so trace is available
+
+diff --git a/tests/pytests/unit/cli/test_support.py b/tests/unit/cli/test_support.py
+similarity index 100%
+rename from tests/pytests/unit/cli/test_support.py
+rename to tests/unit/cli/test_support.py
+diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py
+index 4ef04246b9..2afdd69b3e 100644
+--- a/tests/unit/modules/test_saltsupport.py
++++ b/tests/unit/modules/test_saltsupport.py
+@@ -251,8 +251,8 @@ professor: Farnsworth
+ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
+ support.sync("group-name")
+ assert (
+- ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found'
+- in str(err)
++ 'Support archive "/mnt/storage/three-support-222-222.bz2" was not found'
++ in str(err.value)
+ )
+
+ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
+@@ -274,7 +274,7 @@ professor: Farnsworth
+
+ with pytest.raises(salt.exceptions.SaltInvocationError) as err:
+ support.sync("group-name", name="lost.bz2")
+- assert ' Support archive "lost.bz2" was not found' in str(err)
++ assert 'Support archive "lost.bz2" was not found' in str(err.value)
+
+ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy")))
+ @patch("os.path.exists", MagicMock(return_value=False))
+--
+2.41.0
+
+
diff --git a/fix-status.diskusage-and-exclude-some-tests-to-run-w.patch b/fix-status.diskusage-and-exclude-some-tests-to-run-w.patch
new file mode 100644
index 0000000..604d58c
--- /dev/null
+++ b/fix-status.diskusage-and-exclude-some-tests-to-run-w.patch
@@ -0,0 +1,243 @@
+From 4555f215614c2f2d5c4b5c376264df9b3f23a55b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 18 Jun 2024 15:55:31 +0100
+Subject: [PATCH] Fix "status.diskusage" and exclude some tests to run
+ when testing Salt Bundle (#659)
+
+* Show warning instead of crashing when stats cannot be fetched
+
+* Skip tests that are not compatible with Salt Bundle
+
+* test_syndic_eauth: do not produce error if docker service is not running
+
+* test_cmdmod: assert properly in case of DeprecationsWarnings
+
+* Include path as part of output in case of errors
+
+Co-authored-by: Marek Czernek
+
+---------
+
+Co-authored-by: Marek Czernek
+---
+ salt/modules/status.py | 14 +++++++++-----
+ tests/integration/modules/test_pip.py | 5 +++++
+ tests/integration/ssh/test_state.py | 5 +++++
+ tests/pytests/functional/modules/test_pip.py | 4 ++++
+ .../functional/modules/test_virtualenv_mod.py | 5 +++++
+ tests/pytests/functional/states/test_pip_state.py | 4 ++++
+ tests/pytests/integration/cli/test_syndic_eauth.py | 3 +++
+ tests/pytests/integration/modules/test_cmdmod.py | 4 +++-
+ .../pytests/integration/netapi/test_ssh_client.py | 6 ++++++
+ tests/pytests/integration/ssh/conftest.py | 9 +++++++++
+ tests/unit/utils/test_thin.py | 4 ++++
+ 11 files changed, 57 insertions(+), 6 deletions(-)
+ create mode 100644 tests/pytests/integration/ssh/conftest.py
+
+diff --git a/salt/modules/status.py b/salt/modules/status.py
+index 33e5d7b8df5..8d6241a9dce 100644
+--- a/salt/modules/status.py
++++ b/salt/modules/status.py
+@@ -1053,11 +1053,15 @@ def diskusage(*args):
+ ret = {}
+ for path in selected:
+ if os.path.exists(path):
+- fsstats = os.statvfs(path)
+- blksz = fsstats.f_bsize
+- available = fsstats.f_bavail * blksz
+- total = fsstats.f_blocks * blksz
+- ret[path] = {"available": available, "total": total}
++ try:
++ fsstats = os.statvfs(path)
++ blksz = fsstats.f_bsize
++ available = fsstats.f_bavail * blksz
++ total = fsstats.f_blocks * blksz
++ ret[path] = {"available": available, "total": total}
++ except OSError as exc:
++ log.warning("Cannot get stats from '{}': {}".format(path, exc))
++ ret[path] = {"available": None, "total": None}
+ return ret
+
+
+diff --git a/tests/integration/modules/test_pip.py b/tests/integration/modules/test_pip.py
+index d57e9cd2aea..85045dec90b 100644
+--- a/tests/integration/modules/test_pip.py
++++ b/tests/integration/modules/test_pip.py
+@@ -2,6 +2,7 @@ import os
+ import pprint
+ import re
+ import shutil
++import sys
+ import tempfile
+
+ import pytest
+@@ -16,6 +17,10 @@ from tests.support.runtests import RUNTIME_VARS
+
+
+ @pytest.mark.skip_if_binaries_missing(*KNOWN_BINARY_NAMES, check_all=False)
++@pytest.mark.skipif(
++ "venv-salt-minion" in sys.executable,
++ reason="Skipping for Salt Bundle (tests are not compatible)",
++)
+ @pytest.mark.windows_whitelisted
+ class PipModuleTest(ModuleCase):
+ def setUp(self):
+diff --git a/tests/integration/ssh/test_state.py b/tests/integration/ssh/test_state.py
+index 69245454e85..daa478b45be 100644
+--- a/tests/integration/ssh/test_state.py
++++ b/tests/integration/ssh/test_state.py
+@@ -2,6 +2,7 @@ import glob
+ import logging
+ import os
+ import shutil
++import sys
+ import threading
+ import time
+
+@@ -18,6 +19,10 @@ log = logging.getLogger(__name__)
+
+
+ @pytest.mark.slow_test
++@pytest.mark.skipif(
++ "venv-salt-minion" in sys.executable,
++ reason="Skipping for Salt Bundle (tests are not compatible)",
++)
+ class SSHStateTest(SSHCase):
+ """
+ testing the state system with salt-ssh
+diff --git a/tests/pytests/functional/modules/test_pip.py b/tests/pytests/functional/modules/test_pip.py
+index e04baa7c43f..1f0104e3e6d 100644
+--- a/tests/pytests/functional/modules/test_pip.py
++++ b/tests/pytests/functional/modules/test_pip.py
+@@ -23,6 +23,10 @@ from tests.support.helpers import VirtualEnv
+ @pytest.mark.requires_network
+ @pytest.mark.slow_test
+ @pytest.mark.skip_if_binaries_missing("virtualenv", reason="Needs virtualenv binary")
++@pytest.mark.skipif(
++ "venv-salt-minion" in sys.executable,
++ reason="Skipping for Salt Bundle (tests are not compatible)",
++)
+ def test_list_available_packages(modules, pip_version, tmp_path):
+ with VirtualEnv(venv_dir=tmp_path, pip_requirement=pip_version) as virtualenv:
+ virtualenv.install("-U", pip_version)
+diff --git a/tests/pytests/functional/modules/test_virtualenv_mod.py b/tests/pytests/functional/modules/test_virtualenv_mod.py
+index 2b6abf91e23..69e1866c6e3 100644
+--- a/tests/pytests/functional/modules/test_virtualenv_mod.py
++++ b/tests/pytests/functional/modules/test_virtualenv_mod.py
+@@ -1,4 +1,5 @@
+ import shutil
++import sys
+
+ import pytest
+
+@@ -68,6 +69,10 @@ def test_clear(virtualenv, venv_dir, modules):
+ bool(salt.utils.path.which("transactional-update")),
+ reason="Skipping on transactional systems",
+ )
++@pytest.mark.skipif(
++ "venv-salt-minion" in sys.executable,
++ reason="Skipping for Salt Bundle (tests are not compatible)",
++)
+ def test_virtualenv_ver(virtualenv, venv_dir):
+ ret = virtualenv.create(str(venv_dir))
+ assert ret
+diff --git a/tests/pytests/functional/states/test_pip_state.py b/tests/pytests/functional/states/test_pip_state.py
+index 1f2080f1f86..28c1f9fd1f3 100644
+--- a/tests/pytests/functional/states/test_pip_state.py
++++ b/tests/pytests/functional/states/test_pip_state.py
+@@ -84,6 +84,10 @@ def create_virtualenv(modules):
+ bool(salt.utils.path.which("transactional-update")),
+ reason="Skipping on transactional systems",
+ )
++@pytest.mark.skipif(
++ "venv-salt-minion" in sys.executable,
++ reason="Skipping for Salt Bundle (tests are not compatible)",
++)
+ def test_pip_installed_removed(modules, states):
+ """
+ Tests installed and removed states
+diff --git a/tests/pytests/integration/cli/test_syndic_eauth.py b/tests/pytests/integration/cli/test_syndic_eauth.py
+index dde4c25bc91..f2d36c13abb 100644
+--- a/tests/pytests/integration/cli/test_syndic_eauth.py
++++ b/tests/pytests/integration/cli/test_syndic_eauth.py
+@@ -68,6 +68,9 @@ def syndic_network():
+ try:
+ network = client.networks.create(name="syndic_test_net", ipam=ipam_config)
+ yield network.name
++ except Exception as e:
++ # Docker failed, it's gonna be an environment issue, let's just skip
++ pytest.skip(f"Docker failed with error {e}")
+ finally:
+ if network is not None:
+ network.remove()
+diff --git a/tests/pytests/integration/modules/test_cmdmod.py b/tests/pytests/integration/modules/test_cmdmod.py
+index d0b993ddbcf..20a6f808933 100644
+--- a/tests/pytests/integration/modules/test_cmdmod.py
++++ b/tests/pytests/integration/modules/test_cmdmod.py
+@@ -75,7 +75,9 @@ def test_blacklist_glob(salt_call_cli):
+ )
+
+ assert (
+- ret.stderr.rstrip()
++ ret.stderr.rstrip().split("\n")[
++ -1
++ ] # Taking only the last line in case of DeprecationWarnings
+ == "Error running 'cmd.run': The shell command \"bad_command --foo\" is not permitted"
+ )
+
+diff --git a/tests/pytests/integration/netapi/test_ssh_client.py b/tests/pytests/integration/netapi/test_ssh_client.py
+index 42db6d0eacd..457c151c94f 100644
+--- a/tests/pytests/integration/netapi/test_ssh_client.py
++++ b/tests/pytests/integration/netapi/test_ssh_client.py
+@@ -1,3 +1,5 @@
++import sys
++
+ import pytest
+
+ import salt.netapi
+@@ -8,6 +10,10 @@ from tests.support.mock import patch
+ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.requires_sshd_server,
++ pytest.mark.skipif(
++ "venv-salt-minion" in sys.executable,
++ reason="Skipping for Salt Bundle (tests are not compatible)",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/integration/ssh/conftest.py b/tests/pytests/integration/ssh/conftest.py
+new file mode 100644
+index 00000000000..ba6e5f2773a
+--- /dev/null
++++ b/tests/pytests/integration/ssh/conftest.py
+@@ -0,0 +1,9 @@
++import sys
++
++import pytest
++
++
++@pytest.fixture(scope="package", autouse=True)
++def _auto_skip_on_salt_bundle():
++ if "venv-salt-minion" in sys.executable:
++ pytest.skip("Skipping for Salt Bundle (tests are not compatible)")
+diff --git a/tests/unit/utils/test_thin.py b/tests/unit/utils/test_thin.py
+index c4e9c3b3bef..b31199976c8 100644
+--- a/tests/unit/utils/test_thin.py
++++ b/tests/unit/utils/test_thin.py
+@@ -1383,6 +1383,10 @@ class SSHThinTestCase(TestCase):
+ "virtualenv", reason="Needs virtualenv binary"
+ )
+ @pytest.mark.skip_on_windows(reason="salt-ssh does not deploy to/from windows")
++ @pytest.mark.skipif(
++ "venv-salt-minion" in sys.executable,
++ reason="Skipping for Salt Bundle (tests are not compatible)",
++ )
+ def test_thin_dir(self):
+ """
+ Test the thin dir to make sure salt-call can run
+--
+2.44.0
+
+
diff --git a/fix-test_debian-to-work-in-our-infrastructure-676.patch b/fix-test_debian-to-work-in-our-infrastructure-676.patch
new file mode 100644
index 0000000..d639188
--- /dev/null
+++ b/fix-test_debian-to-work-in-our-infrastructure-676.patch
@@ -0,0 +1,25 @@
+From a6d27a6f50bbbea539ec64bf96a5b9755e32bf69 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Wed, 4 Sep 2024 13:11:05 +0200
+Subject: [PATCH] Fix test_debian to work in our infrastructure (#676)
+
+---
+ tests/pytests/functional/states/pkgrepo/test_debian.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tests/pytests/functional/states/pkgrepo/test_debian.py b/tests/pytests/functional/states/pkgrepo/test_debian.py
+index 87716706d5e..7bda100b634 100644
+--- a/tests/pytests/functional/states/pkgrepo/test_debian.py
++++ b/tests/pytests/functional/states/pkgrepo/test_debian.py
+@@ -205,7 +205,7 @@ def ubuntu_state_tree(system_aptsources, state_tree, grains):
+ - dist: {{ codename }}
+ - file: /etc/apt/sources.list.d/firefox-beta.list
+ - keyid: CE49EC21
+- - keyserver: keyserver.ubuntu.com
++ - keyserver: hkp://keyserver.ubuntu.com:80
+ {%- endif %}
+
+ {%- if backports %}{%- do ubuntu_repos.append('kubuntu-ppa') %}
+--
+2.46.0
+
diff --git a/fix-test_system-flaky-setup_teardown-fn.patch b/fix-test_system-flaky-setup_teardown-fn.patch
new file mode 100644
index 0000000..b3cbcd0
--- /dev/null
+++ b/fix-test_system-flaky-setup_teardown-fn.patch
@@ -0,0 +1,44 @@
+From 5567f2bd51d66b7797c986cf64f79f71ca57eb63 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Wed, 4 Sep 2024 13:10:44 +0200
+Subject: [PATCH] Fix test_system flaky setup_teardown fn
+
+---
+ tests/pytests/functional/modules/test_system.py | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/tests/pytests/functional/modules/test_system.py b/tests/pytests/functional/modules/test_system.py
+index 2cd03a3a3e4..270aafbe2cd 100644
+--- a/tests/pytests/functional/modules/test_system.py
++++ b/tests/pytests/functional/modules/test_system.py
+@@ -4,10 +4,12 @@ import os
+ import signal
+ import subprocess
+ import textwrap
++import time
+
+ import pytest
+
+ import salt.utils.files
++from salt.exceptions import CommandExecutionError
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -80,7 +82,13 @@ def setup_teardown_vars(file, service, system):
+ file.remove("/etc/machine-info")
+
+ if _systemd_timesyncd_available_:
+- res = service.start("systemd-timesyncd")
++ try:
++ res = service.start("systemd-timesyncd")
++ except CommandExecutionError:
++ # We possibly did too many restarts in too short time
++ # Wait 10s (default systemd timeout) and try again
++ time.sleep(10)
++ res = service.start("systemd-timesyncd")
+ assert res
+
+
+--
+2.46.0
+
diff --git a/fix-tests-failures-after-repo.saltproject.io-depreca.patch b/fix-tests-failures-after-repo.saltproject.io-depreca.patch
new file mode 100644
index 0000000..8cdad99
--- /dev/null
+++ b/fix-tests-failures-after-repo.saltproject.io-depreca.patch
@@ -0,0 +1,518 @@
+From a630c6a707a1d5227b4a1fa8f0f751fefd3ef47f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 19 Feb 2025 13:56:01 +0000
+Subject: [PATCH] Fix tests failures after "repo.saltproject.io"
+ deprecation (#704)
+
+* Use broadcom.com instead of repo.saltproject.io for test_cp
+
+* Change repo.saltproject.io to new url
+
+---------
+
+Co-authored-by: Daniel A. Wozniak
+Co-authored-by: twangboy
+---
+ README.rst | 4 +-
+ doc/_themes/saltstack2/layout.html | 15 +--
+ doc/conf.py | 12 +-
+ doc/ref/configuration/delta_proxy.rst | 6 +-
+ doc/topics/cloud/windows.rst | 2 +-
+ pkg/tests/support/helpers.py | 115 ++++++++++++------
+ salt/modules/saltutil.py | 4 +-
+ salt/runners/manage.py | 5 +-
+ salt/states/pkgrepo.py | 6 +-
+ tests/integration/modules/test_cp.py | 41 ++++---
+ tests/pytests/functional/modules/test_pkg.py | 8 +-
+ .../functional/states/pkgrepo/test_debian.py | 6 +-
+ .../integration/netapi/test_ssh_client.py | 3 +-
+ tests/support/win_installer.py | 1 +
+ 14 files changed, 135 insertions(+), 93 deletions(-)
+
+diff --git a/README.rst b/README.rst
+index f5121f1a74..77806aa14a 100644
+--- a/README.rst
++++ b/README.rst
+@@ -93,7 +93,9 @@ for more information.
+
+ To download and install Salt, see:
+ * `The Salt install guide `_
+-* `Salt Project repository `_
++* `Salt Project repository `_
++* `Salt Project debian repository `_
++* `Salt Project redhat repository `_
+
+
+ Technical support
+diff --git a/doc/_themes/saltstack2/layout.html b/doc/_themes/saltstack2/layout.html
+index 04bff89e1f..83918a7fb3 100644
+--- a/doc/_themes/saltstack2/layout.html
++++ b/doc/_themes/saltstack2/layout.html
+@@ -157,16 +157,11 @@
+
+
+
+diff --git a/doc/conf.py b/doc/conf.py
+index 653d912c20..24420d402e 100644
+--- a/doc/conf.py
++++ b/doc/conf.py
+@@ -178,17 +178,17 @@ rst_prolog = """\
+ .. |windownload| raw:: html
+
+ Python3 x86: Salt-Minion-{release}-x86-Setup.exe
+- | md5
++ href="https://packages.broadcom.com/artifactory/saltproject-generic/windows/{release}/Salt-Minion-{release}-Py3-x86-Setup.exe">Salt-Minion-{release}-x86-Setup.exe
++ | md5
+
+ Python3 AMD64: Salt-Minion-{release}-AMD64-Setup.exe
+- | md5
++ href="https://packages.broadcom.com/artifactory/saltproject-generic/windows/{release}/Salt-Minion-{release}-Py3-AMD64-Setup.exe">Salt-Minion-{release}-AMD64-Setup.exe
++ | md5
+
+ .. |osxdownloadpy3| raw:: html
+
+- x86_64: salt-{release}-py3-x86_64.pkg
+- | md5
++ x86_64: salt-{release}-py3-x86_64.pkg
++ | md5
+
+ """.format(
+ release=stripped_release
+diff --git a/doc/ref/configuration/delta_proxy.rst b/doc/ref/configuration/delta_proxy.rst
+index be1831da39..bce5f821c9 100644
+--- a/doc/ref/configuration/delta_proxy.rst
++++ b/doc/ref/configuration/delta_proxy.rst
+@@ -146,10 +146,8 @@ Before installing the delta proxy minion, ensure that:
+ Install or upgrade Salt
+ -----------------------
+ Ensure your Salt masters are running at least Salt version 3004. For instructions
+-on installing or upgrading Salt, see `repo.saltproject.io
+-`_. For RedHat systems, see `Install or Upgrade Salt
+-`_.
+-
++on installing or upgrading Salt, see the `Salt install guide
++`_.
+
+
+ .. _delta-proxy-install:
+diff --git a/doc/topics/cloud/windows.rst b/doc/topics/cloud/windows.rst
+index 9dfdde6db5..79d6665a5a 100644
+--- a/doc/topics/cloud/windows.rst
++++ b/doc/topics/cloud/windows.rst
+@@ -62,7 +62,7 @@ from saltstack.com:
+
+ * `SaltStack Download Area`__
+
+-.. __: https://repo.saltproject.io/windows/
++.. __: https://packages.broadcom.com/artifactory/saltproject-generic/windows/
+
+ .. _new-pywinrm:
+
+diff --git a/pkg/tests/support/helpers.py b/pkg/tests/support/helpers.py
+index 90abf8b88e..ce23f699b6 100644
+--- a/pkg/tests/support/helpers.py
++++ b/pkg/tests/support/helpers.py
+@@ -636,8 +636,7 @@ class SaltPkgInstall:
+
+ def install_previous(self):
+ """
+- Install previous version. This is used for
+- upgrade tests.
++ Install previous version. This is used for upgrade tests.
+ """
+ major_ver = self.major
+ minor_ver = self.minor
+@@ -648,16 +647,12 @@ class SaltPkgInstall:
+ distro_name = self.distro_name
+ if distro_name == "centos" or distro_name == "fedora":
+ distro_name = "redhat"
+- root_url = "salt/py3/"
+- if self.classic:
+- root_url = "py3/"
++ root_url = "https://packages.broadcom.com/artifactory"
+
+ if self.distro_name in ["redhat", "centos", "amazon", "fedora", "vmware"]:
+ for fp in pathlib.Path("/etc", "yum.repos.d").glob("epel*"):
+ fp.unlink()
+- gpg_key = "SALTSTACK-GPG-KEY.pub"
+- if self.distro_version == "9":
+- gpg_key = "SALTSTACK-GPG-KEY2.pub"
++
+ if platform.is_aarch64():
+ arch = "aarch64"
+ else:
+@@ -694,46 +689,86 @@ class SaltPkgInstall:
+ arch = "arm64"
+ else:
+ arch = "amd64"
++
+ pathlib.Path("/etc/apt/keyrings").mkdir(parents=True, exist_ok=True)
++ gpg_full_path = "/etc/apt/keyrings/salt-archive-keyring.gpg"
++
++ # download the gpg pub key
+ download_file(
+- f"https://repo.saltproject.io/{root_url}{distro_name}/{self.distro_version}/{arch}/{major_ver}/salt-archive-keyring.gpg",
+- "/etc/apt/keyrings/salt-archive-keyring.gpg",
++ f"{root_url}/api/security/keypair/SaltProjectKey/public",
++ f"{gpg_full_path}",
+ )
+- with open(
++ with salt.utils.files.fopen(
+ pathlib.Path("/etc", "apt", "sources.list.d", "salt.list"), "w"
+ ) as fp:
+ fp.write(
+- f"deb [signed-by=/etc/apt/keyrings/salt-archive-keyring.gpg arch={arch}] "
+- f"https://repo.saltproject.io/{root_url}{distro_name}/{self.distro_version}/{arch}/{major_ver} {self.distro_codename} main"
++ f"deb [signed-by={gpg_full_path} arch={arch}] "
++ f"{root_url}/saltproject-deb/ {self.distro_codename} main"
+ )
+- ret = self.proc.run(self.pkg_mngr, "update")
+- self._check_retcode(ret)
+- ret = self.proc.run(
+- self.pkg_mngr,
+- "install",
+- *self.salt_pkgs,
+- "-y",
+- )
+ self._check_retcode(ret)
++
++ cmd = [self.pkg_mngr, "install", *self.salt_pkgs, "-y"]
++
++ if downgrade:
++ pref_file = pathlib.Path("/etc", "apt", "preferences.d", "salt.pref")
++ pref_file.parent.mkdir(exist_ok=True)
++ # TODO: There's probably something I should put in here to say what version
++ # TODO: But maybe that's done elsewhere, hopefully in self.salt_pkgs
++ pref_file.write_text(
++ textwrap.dedent(
++ f"""\
++ Package: salt*
++ Pin: origin "{root_url}/saltproject-deb"
++ Pin-Priority: 1001
++ """
++ ),
++ encoding="utf-8",
++ )
++ cmd.append("--allow-downgrades")
++ env = os.environ.copy()
++ env["DEBIAN_FRONTEND"] = "noninteractive"
++ extra_args = [
++ "-o",
++ "DPkg::Options::=--force-confdef",
++ "-o",
++ "DPkg::Options::=--force-confold",
++ ]
++ self.proc.run(self.pkg_mngr, "update", *extra_args, env=env)
++
++ cmd.extend(extra_args)
++
++ ret = self.proc.run(*cmd, env=env)
++ # Pre-relenv packages down get downgraded to cleanly programmatically
++ # They work manually, and the install tests after downgrades will catch problems with the install
++ # Let's not check the returncode if this is the case
++ if not (
++ downgrade
++ and packaging.version.parse(self.prev_version)
++ < packaging.version.parse("3006.0")
++ ):
++ self._check_retcode(ret)
++ if downgrade:
++ pref_file.unlink()
+ self.stop_services()
+ elif platform.is_windows():
+ self.onedir = True
+ self.installer_pkg = True
+ self.bin_dir = self.install_dir / "bin"
+- self.run_root = self.bin_dir / f"salt.exe"
+- self.ssm_bin = self.bin_dir / "ssm.exe"
+- if self.file_ext == "msi":
+- self.ssm_bin = self.install_dir / "ssm.exe"
++ self.run_root = self.bin_dir / "salt.exe"
++ self.ssm_bin = self.install_dir / "ssm.exe"
+
+- if not self.classic:
+- win_pkg = f"salt-{full_version}-windows-amd64.{self.file_ext}"
+- win_pkg_url = f"https://repo.saltproject.io/salt/py3/windows/{full_version}/{win_pkg}"
++ if self.file_ext == "exe":
++ win_pkg = (
++ f"Salt-Minion-{self.prev_version}-Py3-AMD64-Setup.{self.file_ext}"
++ )
++ elif self.file_ext == "msi":
++ win_pkg = f"Salt-Minion-{self.prev_version}-Py3-AMD64.{self.file_ext}"
+ else:
+- if self.file_ext == "msi":
+- win_pkg = f"Salt-Minion-{min_ver}-1-Py3-AMD64.{self.file_ext}"
+- elif self.file_ext == "exe":
+- win_pkg = f"Salt-Minion-{min_ver}-1-Py3-AMD64-Setup.{self.file_ext}"
+- win_pkg_url = f"https://repo.saltproject.io/windows/{win_pkg}"
++ log.debug(f"Unknown windows file extension: {self.file_ext}")
++
++ win_pkg_url = (
++ f"{root_url}/saltproject-generic/windows/{major_ver}/{win_pkg}"
++ )
+ pkg_path = pathlib.Path(r"C:\TEMP", win_pkg)
+ pkg_path.parent.mkdir(exist_ok=True)
+ ret = requests.get(win_pkg_url)
+@@ -763,12 +798,16 @@ class SaltPkgInstall:
+ self._install_system_service()
+
+ elif platform.is_darwin():
+- if self.classic:
+- mac_pkg = f"salt-{min_ver}.{minor_ver}-1-py3-x86_64.pkg"
+- mac_pkg_url = f"https://repo.saltproject.io/osx/{mac_pkg}"
++ if relenv and platform.is_aarch64():
++ arch = "arm64"
++ elif platform.is_aarch64() and self.classic:
++ arch = "arm64"
+ else:
+- mac_pkg = f"salt-{min_ver}.{minor_ver}-1-macos-x86_64.pkg"
+- mac_pkg_url = f"https://repo.saltproject.io/salt/py3/macos/{major_ver}.{minor_ver}-1/{mac_pkg}"
++ arch = "x86_64"
++
++ mac_pkg = f"salt-{self.prev_version}-py3-{arch}.pkg"
++ mac_pkg_url = f"{root_url}/saltproject-generic/macos/{major_ver}/{mac_pkg}"
++
+ mac_pkg_path = f"/tmp/{mac_pkg}"
+ if not os.path.exists(mac_pkg_path):
+ download_file(
+diff --git a/salt/modules/saltutil.py b/salt/modules/saltutil.py
+index a692c3f34d..320b9c34fa 100644
+--- a/salt/modules/saltutil.py
++++ b/salt/modules/saltutil.py
+@@ -128,8 +128,8 @@ def _sync(form, saltenv=None, extmod_whitelist=None, extmod_blacklist=None):
+ def update(version=None):
+ """
+ Update the salt minion from the URL defined in opts['update_url']
+- VMware, Inc provides the latest builds here:
+- update_url: https://repo.saltproject.io/windows/
++ Broadcom, Inc provides the latest builds here:
++ update_url: https://packages.broadcom.com/artifactory/saltproject-generic/windows/
+
+ Be aware that as of 2014-8-11 there's a bug in esky such that only the
+ latest version available in the update_url can be downloaded and installed.
+diff --git a/salt/runners/manage.py b/salt/runners/manage.py
+index 9dc67ed728..81197ca41f 100644
+--- a/salt/runners/manage.py
++++ b/salt/runners/manage.py
+@@ -772,7 +772,7 @@ def bootstrap_psexec(
+
+ installer_url
+ URL of minion installer executable. Defaults to the latest version from
+- https://repo.saltproject.io/windows/
++ https://packages.broadcom.com/artifactory/saltproject-generic/windows/
+
+ username
+ Optional user name for login on remote computer.
+@@ -790,6 +790,9 @@ def bootstrap_psexec(
+ salt-run manage.bootstrap_psexec hosts='host1,host2' installer_url='http://exampledomain/salt-installer.exe'
+ """
+
++ # TODO: Need to make this gets the latest version from the new repo location
++ # TODO: Similar to tests/support/win_installer.py
++ # TODO: Maybe need to move that ^^^^ to a salt util
+ if not installer_url:
+ base_url = "https://repo.saltproject.io/windows/"
+ source = urllib.request.urlopen(base_url).read()
+diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
+index f041644287..4ef5fd9c2f 100644
+--- a/salt/states/pkgrepo.py
++++ b/salt/states/pkgrepo.py
+@@ -99,17 +99,17 @@ Using ``aptkey: False`` with ``key_url`` example:
+
+ .. code-block:: yaml
+
+- deb [signed-by=/etc/apt/keyrings/salt-archive-keyring.gpg arch=amd64] https://repo.saltproject.io/py3/ubuntu/18.04/amd64/latest bionic main:
++ deb [signed-by=/etc/apt/keyrings/salt-archive-keyring.gpg arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ bionic main:
+ pkgrepo.managed:
+ - file: /etc/apt/sources.list.d/salt.list
+- - key_url: https://repo.saltproject.io/py3/ubuntu/18.04/amd64/latest/salt-archive-keyring.gpg
++ - key_url: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public
+ - aptkey: False
+
+ Using ``aptkey: False`` with ``keyserver`` and ``keyid``:
+
+ .. code-block:: yaml
+
+- deb [signed-by=/etc/apt/keyrings/salt-archive-keyring.gpg arch=amd64] https://repo.saltproject.io/py3/ubuntu/18.04/amd64/latest bionic main:
++ deb [signed-by=/etc/apt/keyrings/salt-archive-keyring.gpg arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ bionic main:
+ pkgrepo.managed:
+ - file: /etc/apt/sources.list.d/salt.list
+ - keyserver: keyserver.ubuntu.com
+diff --git a/tests/integration/modules/test_cp.py b/tests/integration/modules/test_cp.py
+index cd3e4c2f5a..d417f90ddc 100644
+--- a/tests/integration/modules/test_cp.py
++++ b/tests/integration/modules/test_cp.py
+@@ -231,12 +231,15 @@ class CPModuleTest(ModuleCase):
+ """
+ cp.get_url with https:// source given
+ """
+- self.run_function("cp.get_url", ["https://repo.saltproject.io/index.html", tgt])
++ self.run_function(
++ "cp.get_url",
++ ["https://packages.broadcom.com/artifactory/saltproject-generic/", tgt],
++ )
+ with salt.utils.files.fopen(tgt, "r") as instructions:
+ data = salt.utils.stringutils.to_unicode(instructions.read())
+- self.assertIn("Salt Project", data)
+- self.assertIn("Package", data)
+- self.assertIn("Repo", data)
++ self.assertIn("Index of saltproject", data)
++ self.assertIn("onedir", data)
++ self.assertIn("Artifactory Online Server", data)
+ self.assertNotIn("AYBABTU", data)
+
+ @pytest.mark.slow_test
+@@ -245,14 +248,15 @@ class CPModuleTest(ModuleCase):
+ cp.get_url with https:// source given and destination omitted.
+ """
+ ret = self.run_function(
+- "cp.get_url", ["https://repo.saltproject.io/index.html"]
++ "cp.get_url",
++ ["https://packages.broadcom.com/artifactory/saltproject-generic/"],
+ )
+
+ with salt.utils.files.fopen(ret, "r") as instructions:
+ data = salt.utils.stringutils.to_unicode(instructions.read())
+- self.assertIn("Salt Project", data)
+- self.assertIn("Package", data)
+- self.assertIn("Repo", data)
++ self.assertIn("Index of saltproject", data)
++ self.assertIn("onedir", data)
++ self.assertIn("Artifactory Online Server", data)
+ self.assertNotIn("AYBABTU", data)
+
+ @pytest.mark.slow_test
+@@ -266,16 +270,19 @@ class CPModuleTest(ModuleCase):
+ tgt = None
+ while time.time() - start <= timeout:
+ ret = self.run_function(
+- "cp.get_url", ["https://repo.saltproject.io/index.html", tgt]
++ "cp.get_url",
++ ["https://packages.broadcom.com/artifactory/saltproject-generic/", tgt],
+ )
+ if ret.find("HTTP 599") == -1:
+ break
+ time.sleep(sleep)
+ if ret.find("HTTP 599") != -1:
+- raise Exception("https://repo.saltproject.io/index.html returned 599 error")
+- self.assertIn("Salt Project", ret)
+- self.assertIn("Package", ret)
+- self.assertIn("Repo", ret)
++ raise Exception(
++ "https://packages.broadcom.com/artifactory/saltproject-generic/ returned 599 error"
++ )
++ self.assertIn("Index of saltproject", ret)
++ self.assertIn("onedir", ret)
++ self.assertIn("Artifactory Online Server", ret)
+ self.assertNotIn("AYBABTU", ret)
+
+ @pytest.mark.slow_test
+@@ -344,11 +351,11 @@ class CPModuleTest(ModuleCase):
+ """
+ cp.get_file_str with https:// source given
+ """
+- src = "https://repo.saltproject.io/index.html"
++ src = "https://packages.broadcom.com/artifactory/saltproject-generic/"
+ ret = self.run_function("cp.get_file_str", [src])
+- self.assertIn("Salt Project", ret)
+- self.assertIn("Package", ret)
+- self.assertIn("Repo", ret)
++ self.assertIn("Index of saltproject", ret)
++ self.assertIn("onedir", ret)
++ self.assertIn("Artifactory Online Server", ret)
+ self.assertNotIn("AYBABTU", ret)
+
+ @pytest.mark.slow_test
+diff --git a/tests/pytests/functional/modules/test_pkg.py b/tests/pytests/functional/modules/test_pkg.py
+index 82d0801965..addb3da3d1 100644
+--- a/tests/pytests/functional/modules/test_pkg.py
++++ b/tests/pytests/functional/modules/test_pkg.py
+@@ -130,12 +130,8 @@ def test_mod_del_repo(grains, modules, refresh_db):
+ elif grains["os_family"] == "RedHat":
+ repo = "saltstack"
+ name = "SaltStack repo for RHEL/CentOS {}".format(grains["osmajorrelease"])
+- baseurl = "https://repo.saltproject.io/py3/redhat/{}/x86_64/latest/".format(
+- grains["osmajorrelease"]
+- )
+- gpgkey = "https://repo.saltproject.io/py3/redhat/{}/x86_64/latest/SALTSTACK-GPG-KEY.pub".format(
+- grains["osmajorrelease"]
+- )
++ baseurl = "https://packages.broadcom.com/artifactory/saltproject-rpm/"
++ gpgkey = "https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public"
+ gpgcheck = 1
+ enabled = 1
+ ret = modules.pkg.mod_repo(
+diff --git a/tests/pytests/functional/states/pkgrepo/test_debian.py b/tests/pytests/functional/states/pkgrepo/test_debian.py
+index 7bda100b63..307fcb5819 100644
+--- a/tests/pytests/functional/states/pkgrepo/test_debian.py
++++ b/tests/pytests/functional/states/pkgrepo/test_debian.py
+@@ -616,8 +616,8 @@ class Repo:
+ @alt_repo.default
+ def _default_alt_repo(self):
+ """
+- Use an alternative repo, packages do not
+- exist for the OS on repo.saltproject.io
++ Use an alternative repo, packages do not exist for the OS on
++ packages.broadcom.com
+ """
+ if (
+ self.grains["osfullname"] == "Ubuntu"
+@@ -777,7 +777,7 @@ def test_adding_repo_file_signedby_alt_file(pkgrepo, states, repo):
+ assert repo.repo_content in ret.comment
+
+ key_file = repo.key_file.parent / "salt-alt-key.gpg"
+- repo_content = "deb [arch=amd64 signed-by={}] https://repo.saltproject.io/py3/debian/10/amd64/latest buster main".format(
++ repo_content = "deb [arch=amd64 signed-by={}] https://packages.broadcom.com/artifactory/saltproject-deb/ buster main".format(
+ str(key_file)
+ )
+ ret = states.pkgrepo.managed(
+diff --git a/tests/pytests/integration/netapi/test_ssh_client.py b/tests/pytests/integration/netapi/test_ssh_client.py
+index 457c151c94..7dd540d9b9 100644
+--- a/tests/pytests/integration/netapi/test_ssh_client.py
++++ b/tests/pytests/integration/netapi/test_ssh_client.py
+@@ -149,7 +149,8 @@ def test_shell_inject_ssh_priv(
+ """
+ # ZDI-CAN-11143
+ path = tmp_path / "test-11143"
+- tgts = ["repo.saltproject.io", "www.zerodayinitiative.com"]
++ tgts = ["packages.broadcom.com", "www.zerodayinitiative.com"]
++ ret = None
+ for tgt in tgts:
+ low = {
+ "roster": "cache",
+diff --git a/tests/support/win_installer.py b/tests/support/win_installer.py
+index 6a2f387dc8..d67105e8a0 100644
+--- a/tests/support/win_installer.py
++++ b/tests/support/win_installer.py
+@@ -10,6 +10,7 @@
+ """
+
+ import hashlib
++from html.parser import HTMLParser
+
+ import requests
+
+--
+2.48.1
+
diff --git a/fix-tests-failures-and-errors-when-detected-on-vm-ex.patch b/fix-tests-failures-and-errors-when-detected-on-vm-ex.patch
new file mode 100644
index 0000000..624a532
--- /dev/null
+++ b/fix-tests-failures-and-errors-when-detected-on-vm-ex.patch
@@ -0,0 +1,772 @@
+From 737b0bd931c07239d50e7395eb7425c06f485848 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Thu, 14 Mar 2024 13:03:00 +0000
+Subject: [PATCH] Fix tests failures and errors when detected on VM
+ execution from Salt Shaker (#636)
+
+* test_chmod: fix test expectation
+
+* test_pkg: Adjust package expectation for SUSE family
+
+* test_docker_network: Skip non-supported operation for SUSE family
+
+* Fix tests failing due wrong docker-py version
+
+* test_version: skip test in packaged scenario when setup.py is missing
+
+* Fix issue related to docker version used during testing
+
+* Fix test errors when setup.py is not available
+
+* test_loader: do not run if setup.py is missing
+
+* test_install: Fix test errors when setup.py is not available
+
+* test_master: use a right service name expected on SUSE family
+
+* test_jinja_filters: prevent test failure when which binary is not available
+
+* Prevent errors when x509 utils cannot be loaded
+
+* test_thin: skip test if virtualenv binary is missing
+---
+ tests/integration/pillar/test_git_pillar.py | 12 +++++++++++-
+ tests/pytests/functional/cache/test_consul.py | 5 +++++
+ tests/pytests/functional/cache/test_mysql.py | 5 +++++
+ tests/pytests/functional/loader/test_loader.py | 9 +++++++++
+ .../functional/modules/state/test_jinja_filters.py | 4 ++--
+ tests/pytests/functional/modules/test_cmdmod.py | 2 +-
+ tests/pytests/functional/modules/test_dockermod.py | 8 +++++++-
+ tests/pytests/functional/modules/test_pkg.py | 2 ++
+ tests/pytests/functional/modules/test_swarm.py | 6 +++++-
+ tests/pytests/functional/states/rabbitmq/conftest.py | 11 +++++++++++
+ .../functional/states/rabbitmq/test_cluster.py | 7 ++++++-
+ .../functional/states/rabbitmq/test_plugin.py | 8 +++++++-
+ .../functional/states/rabbitmq/test_policy.py | 7 ++++++-
+ .../functional/states/rabbitmq/test_upstream.py | 7 ++++++-
+ .../pytests/functional/states/rabbitmq/test_user.py | 7 ++++++-
+ .../pytests/functional/states/rabbitmq/test_vhost.py | 7 ++++++-
+ .../pytests/functional/states/test_docker_network.py | 7 ++++++-
+ tests/pytests/functional/states/test_pkg.py | 2 +-
+ tests/pytests/functional/test_version.py | 9 +++++++++
+ tests/pytests/integration/modules/test_virt.py | 5 +++++
+ tests/pytests/integration/modules/test_x509_v2.py | 2 +-
+ tests/pytests/integration/ssh/test_log.py | 7 ++++++-
+ tests/pytests/integration/ssh/test_master.py | 2 +-
+ tests/pytests/integration/ssh/test_py_versions.py | 7 ++++++-
+ tests/pytests/integration/ssh/test_ssh_setup.py | 7 ++++++-
+ tests/pytests/integration/states/test_x509_v2.py | 2 +-
+ tests/pytests/scenarios/setup/test_install.py | 8 ++++++++
+ tests/pytests/unit/modules/test_pip.py | 8 ++++++++
+ tests/pytests/unit/utils/test_x509.py | 3 ++-
+ tests/unit/states/test_pip_state.py | 6 ++++++
+ tests/unit/utils/test_thin.py | 3 +++
+ 31 files changed, 164 insertions(+), 21 deletions(-)
+
+diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py
+index 5b4cbda95c9..d56785f97c2 100644
+--- a/tests/integration/pillar/test_git_pillar.py
++++ b/tests/integration/pillar/test_git_pillar.py
+@@ -79,6 +79,7 @@ from salt.utils.gitfs import (
+ PYGIT2_VERSION,
+ FileserverConfigError,
+ )
++from salt.utils.versions import Version
+ from tests.support.gitfs import ( # pylint: disable=unused-import
+ PASSWORD,
+ USERNAME,
+@@ -101,11 +102,20 @@ try:
+ except Exception: # pylint: disable=broad-except
+ HAS_PYGIT2 = False
+
++docker = pytest.importorskip("docker")
++
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
++
+ pytestmark = [
+ SKIP_INITIAL_PHOTONOS_FAILURES,
+ pytest.mark.skip_on_platforms(windows=True, darwin=True),
+- pytest.mark.skipif(INSIDE_CONTAINER, reason="Communication problems between containers."),
++ pytest.mark.skipif(
++ INSIDE_CONTAINER, reason="Communication problems between containers."
++ ),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/cache/test_consul.py b/tests/pytests/functional/cache/test_consul.py
+index c6e16d2588e..30dc6925f26 100644
+--- a/tests/pytests/functional/cache/test_consul.py
++++ b/tests/pytests/functional/cache/test_consul.py
+@@ -8,6 +8,7 @@ from saltfactories.utils import random_string
+
+ import salt.cache
+ import salt.loader
++from salt.utils.versions import Version
+ from tests.pytests.functional.cache.helpers import run_common_cache_tests
+
+ docker = pytest.importorskip("docker")
+@@ -20,6 +21,10 @@ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/cache/test_mysql.py b/tests/pytests/functional/cache/test_mysql.py
+index e15fc732a4a..93c6c7c6f6f 100644
+--- a/tests/pytests/functional/cache/test_mysql.py
++++ b/tests/pytests/functional/cache/test_mysql.py
+@@ -5,6 +5,7 @@ import pytest
+
+ import salt.cache
+ import salt.loader
++from salt.utils.versions import Version
+ from tests.pytests.functional.cache.helpers import run_common_cache_tests
+ from tests.support.pytest.mysql import * # pylint: disable=wildcard-import,unused-wildcard-import
+
+@@ -18,6 +19,10 @@ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/loader/test_loader.py b/tests/pytests/functional/loader/test_loader.py
+index 963d33f59c3..e81ef126ca3 100644
+--- a/tests/pytests/functional/loader/test_loader.py
++++ b/tests/pytests/functional/loader/test_loader.py
+@@ -1,14 +1,23 @@
+ import json
++import os
+
+ import pytest
+
+ from salt.utils.versions import Version
+ from tests.support.helpers import SaltVirtualEnv
+ from tests.support.pytest.helpers import FakeSaltExtension
++from tests.support.runtests import RUNTIME_VARS
++
++MISSING_SETUP_PY_FILE = not os.path.exists(
++ os.path.join(RUNTIME_VARS.CODE_DIR, "setup.py")
++)
+
+ pytestmark = [
+ # These are slow because they create a virtualenv and install salt in it
+ pytest.mark.slow_test,
++ pytest.mark.skipif(
++ MISSING_SETUP_PY_FILE, reason="This test only work if setup.py is available"
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/modules/state/test_jinja_filters.py b/tests/pytests/functional/modules/state/test_jinja_filters.py
+index 220310aaaf0..cc8ffcb731b 100644
+--- a/tests/pytests/functional/modules/state/test_jinja_filters.py
++++ b/tests/pytests/functional/modules/state/test_jinja_filters.py
+@@ -798,9 +798,9 @@ def _filter_id(value):
+ ),
+ Filter(
+ name="which",
+- expected={"ret": salt.utils.path.which("which")},
++ expected={"ret": salt.utils.path.which("ls")},
+ sls="""
+- {% set result = 'which' | which() %}
++ {% set result = 'ls' | which() %}
+ test:
+ module.run:
+ - name: test.echo
+diff --git a/tests/pytests/functional/modules/test_cmdmod.py b/tests/pytests/functional/modules/test_cmdmod.py
+index d30b474c6d2..adaf469c283 100644
+--- a/tests/pytests/functional/modules/test_cmdmod.py
++++ b/tests/pytests/functional/modules/test_cmdmod.py
+@@ -105,7 +105,7 @@ def test_run(cmdmod):
+ template="jinja",
+ python_shell=True,
+ )
+- == "func-tests-minion"
++ == "func-tests-minion-opts"
+ )
+ assert cmdmod.run("grep f", stdin="one\ntwo\nthree\nfour\nfive\n") == "four\nfive"
+ assert cmdmod.run('echo "a=b" | sed -e s/=/:/g', python_shell=True) == "a:b"
+diff --git a/tests/pytests/functional/modules/test_dockermod.py b/tests/pytests/functional/modules/test_dockermod.py
+index a5b40869352..eb0cc20f9ff 100644
+--- a/tests/pytests/functional/modules/test_dockermod.py
++++ b/tests/pytests/functional/modules/test_dockermod.py
+@@ -8,7 +8,9 @@ import pytest
+ from saltfactories.utils import random_string
+ from saltfactories.utils.functional import StateResult
+
+-pytest.importorskip("docker")
++from salt.utils.versions import Version
++
++docker = pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
+@@ -18,6 +20,10 @@ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("docker", "dockerd", check_all=False),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run inside a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/modules/test_pkg.py b/tests/pytests/functional/modules/test_pkg.py
+index 707361c227b..7cedd32bf6c 100644
+--- a/tests/pytests/functional/modules/test_pkg.py
++++ b/tests/pytests/functional/modules/test_pkg.py
+@@ -67,6 +67,8 @@ def test_pkg(grains):
+ _pkg = "units"
+ elif grains["os_family"] == "Debian":
+ _pkg = "ifenslave"
++ elif grains["os_family"] == "Suse":
++ _pkg = "wget"
+ return _pkg
+
+
+diff --git a/tests/pytests/functional/modules/test_swarm.py b/tests/pytests/functional/modules/test_swarm.py
+index 9dc70f5b3dc..fc3c2b739cd 100644
+--- a/tests/pytests/functional/modules/test_swarm.py
++++ b/tests/pytests/functional/modules/test_swarm.py
+@@ -20,7 +20,11 @@ pytest.importorskip("docker")
+ def docker_version(shell, grains):
+ ret = shell.run("docker", "--version")
+ assert ret.returncode == 0
+- return salt.utils.versions.Version(ret.stdout.split(",")[0].split()[-1].strip())
++ # Example output:
++ # Docker version 24.0.7-ce, build 311b9ff0aa93
++ return salt.utils.versions.Version(
++ ret.stdout.split(",")[0].split()[-1].split("-")[0].strip()
++ )
+
+
+ @pytest.fixture
+diff --git a/tests/pytests/functional/states/rabbitmq/conftest.py b/tests/pytests/functional/states/rabbitmq/conftest.py
+index d8ccc1761b8..60f8206a088 100644
+--- a/tests/pytests/functional/states/rabbitmq/conftest.py
++++ b/tests/pytests/functional/states/rabbitmq/conftest.py
+@@ -5,8 +5,19 @@ import attr
+ import pytest
+ from saltfactories.utils import random_string
+
++from salt.utils.versions import Version
++
+ log = logging.getLogger(__name__)
+
++docker = pytest.importorskip("docker")
++
++pytestmark = [
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
++]
++
+
+ @attr.s(kw_only=True, slots=True)
+ class RabbitMQImage:
+diff --git a/tests/pytests/functional/states/rabbitmq/test_cluster.py b/tests/pytests/functional/states/rabbitmq/test_cluster.py
+index 210b22a2360..df85f04f78d 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_cluster.py
++++ b/tests/pytests/functional/states/rabbitmq/test_cluster.py
+@@ -9,8 +9,9 @@ import pytest
+
+ import salt.modules.rabbitmq as rabbitmq
+ import salt.states.rabbitmq_cluster as rabbitmq_cluster
++from salt.utils.versions import Version
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ log = logging.getLogger(__name__)
+
+@@ -22,6 +23,10 @@ pytestmark = [
+ "docker", "dockerd", reason="Docker not installed"
+ ),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_plugin.py b/tests/pytests/functional/states/rabbitmq/test_plugin.py
+index f1191490536..6ed4cdc9238 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_plugin.py
++++ b/tests/pytests/functional/states/rabbitmq/test_plugin.py
+@@ -9,11 +9,13 @@ import pytest
+
+ import salt.modules.rabbitmq as rabbitmq
+ import salt.states.rabbitmq_plugin as rabbitmq_plugin
++from salt.utils.versions import Version
+ from tests.support.mock import patch
+
+ log = logging.getLogger(__name__)
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
++
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -23,6 +25,10 @@ pytestmark = [
+ "docker", "dockerd", reason="Docker not installed"
+ ),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_policy.py b/tests/pytests/functional/states/rabbitmq/test_policy.py
+index 7ccf6a522e0..c648c9ff947 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_policy.py
++++ b/tests/pytests/functional/states/rabbitmq/test_policy.py
+@@ -9,11 +9,12 @@ import pytest
+
+ import salt.modules.rabbitmq as rabbitmq
+ import salt.states.rabbitmq_policy as rabbitmq_policy
++from salt.utils.versions import Version
+ from tests.support.mock import MagicMock, patch
+
+ log = logging.getLogger(__name__)
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -23,6 +24,10 @@ pytestmark = [
+ "docker", "dockerd", reason="Docker not installed"
+ ),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_upstream.py b/tests/pytests/functional/states/rabbitmq/test_upstream.py
+index c7bcf3b0d44..0a9686d6948 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_upstream.py
++++ b/tests/pytests/functional/states/rabbitmq/test_upstream.py
+@@ -9,10 +9,11 @@ import pytest
+
+ import salt.modules.rabbitmq as rabbitmq
+ import salt.states.rabbitmq_upstream as rabbitmq_upstream
++from salt.utils.versions import Version
+
+ log = logging.getLogger(__name__)
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -22,6 +23,10 @@ pytestmark = [
+ "docker", "dockerd", reason="Docker not installed"
+ ),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_user.py b/tests/pytests/functional/states/rabbitmq/test_user.py
+index 31723df7be8..a6b0766087f 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_user.py
++++ b/tests/pytests/functional/states/rabbitmq/test_user.py
+@@ -9,10 +9,11 @@ import pytest
+
+ import salt.modules.rabbitmq as rabbitmq
+ import salt.states.rabbitmq_user as rabbitmq_user
++from salt.utils.versions import Version
+
+ log = logging.getLogger(__name__)
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -22,6 +23,10 @@ pytestmark = [
+ "docker", "dockerd", reason="Docker not installed"
+ ),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/rabbitmq/test_vhost.py b/tests/pytests/functional/states/rabbitmq/test_vhost.py
+index d6ac6901a25..f3553c03e58 100644
+--- a/tests/pytests/functional/states/rabbitmq/test_vhost.py
++++ b/tests/pytests/functional/states/rabbitmq/test_vhost.py
+@@ -9,10 +9,11 @@ import pytest
+
+ import salt.modules.rabbitmq as rabbitmq
+ import salt.states.rabbitmq_vhost as rabbitmq_vhost
++from salt.utils.versions import Version
+
+ log = logging.getLogger(__name__)
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -22,6 +23,10 @@ pytestmark = [
+ "docker", "dockerd", reason="Docker not installed"
+ ),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/functional/states/test_docker_network.py b/tests/pytests/functional/states/test_docker_network.py
+index 0da01ed8bac..19868d03ad1 100644
+--- a/tests/pytests/functional/states/test_docker_network.py
++++ b/tests/pytests/functional/states/test_docker_network.py
+@@ -220,10 +220,15 @@ def test_present_with_containers(network, docker, docker_network, container):
+
+
+ @pytest.mark.parametrize("reconnect", [True, False])
+-def test_present_with_reconnect(network, docker, docker_network, container, reconnect):
++def test_present_with_reconnect(
++ network, docker, docker_network, container, reconnect, grains
++):
+ """
+ Test reconnecting with containers not passed to state
+ """
++ if grains["os_family"] == "Suse":
++ pytest.skip("This test is failing for SUSE family")
++
+ with network() as net:
+ ret = docker_network.present(name=net.name, driver="bridge")
+ assert ret.result is True
+diff --git a/tests/pytests/functional/states/test_pkg.py b/tests/pytests/functional/states/test_pkg.py
+index 12318c996d1..864c1d025f3 100644
+--- a/tests/pytests/functional/states/test_pkg.py
++++ b/tests/pytests/functional/states/test_pkg.py
+@@ -55,7 +55,7 @@ def PKG_TARGETS(grains):
+ else:
+ _PKG_TARGETS = ["units", "zsh-html"]
+ elif grains["os_family"] == "Suse":
+- _PKG_TARGETS = ["lynx", "htop"]
++ _PKG_TARGETS = ["iotop", "screen"]
+ return _PKG_TARGETS
+
+
+diff --git a/tests/pytests/functional/test_version.py b/tests/pytests/functional/test_version.py
+index dfa8850557e..3b85c05ccc6 100644
+--- a/tests/pytests/functional/test_version.py
++++ b/tests/pytests/functional/test_version.py
+@@ -1,14 +1,23 @@
+ import json
+ import logging
++import os
+
+ import pytest
+
+ from tests.support.helpers import SaltVirtualEnv
+ from tests.support.pytest.helpers import FakeSaltExtension
++from tests.support.runtests import RUNTIME_VARS
++
++MISSING_SETUP_PY_FILE = not os.path.exists(
++ os.path.join(RUNTIME_VARS.CODE_DIR, "setup.py")
++)
+
+ pytestmark = [
+ # These are slow because they create a virtualenv and install salt in it
+ pytest.mark.slow_test,
++ pytest.mark.skipif(
++ MISSING_SETUP_PY_FILE, reason="This test only work if setup.py is available"
++ ),
+ ]
+
+ log = logging.getLogger(__name__)
+diff --git a/tests/pytests/integration/modules/test_virt.py b/tests/pytests/integration/modules/test_virt.py
+index 1b7f30154a7..572923764bb 100644
+--- a/tests/pytests/integration/modules/test_virt.py
++++ b/tests/pytests/integration/modules/test_virt.py
+@@ -9,6 +9,7 @@ from xml.etree import ElementTree
+ import pytest
+
+ import salt.version
++from salt.utils.versions import Version
+ from tests.support.virt import SaltVirtMinionContainerFactory
+
+ docker = pytest.importorskip("docker")
+@@ -21,6 +22,10 @@ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("docker"),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/integration/modules/test_x509_v2.py b/tests/pytests/integration/modules/test_x509_v2.py
+index 2fd005778c5..cc8712e45cd 100644
+--- a/tests/pytests/integration/modules/test_x509_v2.py
++++ b/tests/pytests/integration/modules/test_x509_v2.py
+@@ -11,7 +11,7 @@ from pathlib import Path
+ import pytest
+ from saltfactories.utils import random_string
+
+-import salt.utils.x509 as x509util
++x509util = pytest.importorskip("salt.utils.x509")
+
+ try:
+ import cryptography
+diff --git a/tests/pytests/integration/ssh/test_log.py b/tests/pytests/integration/ssh/test_log.py
+index 683feb8bd91..a63dd72373d 100644
+--- a/tests/pytests/integration/ssh/test_log.py
++++ b/tests/pytests/integration/ssh/test_log.py
+@@ -8,9 +8,10 @@ import time
+ import pytest
+ from saltfactories.utils import random_string
+
++from salt.utils.versions import Version
+ from tests.support.helpers import Keys
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -20,6 +21,10 @@ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/integration/ssh/test_master.py b/tests/pytests/integration/ssh/test_master.py
+index 0c2f482cf9f..c658123726b 100644
+--- a/tests/pytests/integration/ssh/test_master.py
++++ b/tests/pytests/integration/ssh/test_master.py
+@@ -23,7 +23,7 @@ def test_service(salt_ssh_cli, grains):
+ os_release = grains["osrelease"]
+ if os_family == "RedHat":
+ service = "crond"
+- elif os_family == "Arch":
++ elif os_family in ["Suse", "Arch"]:
+ service = "sshd"
+ elif os_family == "MacOS":
+ service = "org.ntp.ntpd"
+diff --git a/tests/pytests/integration/ssh/test_py_versions.py b/tests/pytests/integration/ssh/test_py_versions.py
+index 71d4cfaa94e..991a3b71c44 100644
+--- a/tests/pytests/integration/ssh/test_py_versions.py
++++ b/tests/pytests/integration/ssh/test_py_versions.py
+@@ -9,9 +9,10 @@ import time
+ import pytest
+ from saltfactories.utils import random_string
+
++from salt.utils.versions import Version
+ from tests.support.helpers import Keys
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -21,6 +22,10 @@ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/integration/ssh/test_ssh_setup.py b/tests/pytests/integration/ssh/test_ssh_setup.py
+index 79b55ad90a5..97494bed36b 100644
+--- a/tests/pytests/integration/ssh/test_ssh_setup.py
++++ b/tests/pytests/integration/ssh/test_ssh_setup.py
+@@ -13,9 +13,10 @@ import pytest
+ from pytestshellutils.utils.processes import ProcessResult, terminate_process
+ from saltfactories.utils import random_string
+
++from salt.utils.versions import Version
+ from tests.support.helpers import Keys
+
+-pytest.importorskip("docker")
++docker = pytest.importorskip("docker")
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
+@@ -25,6 +26,10 @@ pytestmark = [
+ pytest.mark.slow_test,
+ pytest.mark.skip_if_binaries_missing("dockerd"),
+ pytest.mark.skipif(INSIDE_CONTAINER, reason="Cannot run in a container"),
++ pytest.mark.skipif(
++ Version(docker.__version__) < Version("4.0.0"),
++ reason="Test does not work in this version of docker-py",
++ ),
+ ]
+
+
+diff --git a/tests/pytests/integration/states/test_x509_v2.py b/tests/pytests/integration/states/test_x509_v2.py
+index 9a1c09bb8bd..4f943412950 100644
+--- a/tests/pytests/integration/states/test_x509_v2.py
++++ b/tests/pytests/integration/states/test_x509_v2.py
+@@ -10,7 +10,7 @@ from pathlib import Path
+ import pytest
+ from saltfactories.utils import random_string
+
+-import salt.utils.x509 as x509util
++x509util = pytest.importorskip("salt.utils.x509")
+
+ try:
+ import cryptography
+diff --git a/tests/pytests/scenarios/setup/test_install.py b/tests/pytests/scenarios/setup/test_install.py
+index 7664fda804e..7a4abfc6e9e 100644
+--- a/tests/pytests/scenarios/setup/test_install.py
++++ b/tests/pytests/scenarios/setup/test_install.py
+@@ -14,11 +14,16 @@ import salt.utils.path
+ import salt.utils.platform
+ import salt.version
+ from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
++from tests.support.runtests import RUNTIME_VARS
+
+ log = logging.getLogger(__name__)
+
+ INSIDE_CONTAINER = os.getenv("HOSTNAME", "") == "salt-test-container"
+
++MISSING_SETUP_PY_FILE = not os.path.exists(
++ os.path.join(RUNTIME_VARS.CODE_DIR, "setup.py")
++)
++
+ pytestmark = [
+ pytest.mark.core_test,
+ pytest.mark.windows_whitelisted,
+@@ -27,6 +32,9 @@ pytestmark = [
+ pytest.mark.skipif(
+ INSIDE_CONTAINER, reason="No gcc and python3-devel in container."
+ ),
++ pytest.mark.skipif(
++ MISSING_SETUP_PY_FILE, reason="This test only work if setup.py is available"
++ ),
+ ]
+
+
+diff --git a/tests/pytests/unit/modules/test_pip.py b/tests/pytests/unit/modules/test_pip.py
+index c03e6ed292b..4b2da77786b 100644
+--- a/tests/pytests/unit/modules/test_pip.py
++++ b/tests/pytests/unit/modules/test_pip.py
+@@ -9,6 +9,11 @@ import salt.utils.files
+ import salt.utils.platform
+ from salt.exceptions import CommandExecutionError
+ from tests.support.mock import MagicMock, patch
++from tests.support.runtests import RUNTIME_VARS
++
++MISSING_SETUP_PY_FILE = not os.path.exists(
++ os.path.join(RUNTIME_VARS.CODE_DIR, "setup.py")
++)
+
+
+ class FakeFopen:
+@@ -1738,6 +1743,9 @@ def test_when_version_is_called_with_a_user_it_should_be_passed_to_undelying_run
+ )
+
+
++@pytest.mark.skipif(
++ MISSING_SETUP_PY_FILE, reason="This test only work if setup.py is available"
++)
+ @pytest.mark.parametrize(
+ "bin_env,target,target_env,expected_target",
+ [
+diff --git a/tests/pytests/unit/utils/test_x509.py b/tests/pytests/unit/utils/test_x509.py
+index 25971af40d8..dade9eda46b 100644
+--- a/tests/pytests/unit/utils/test_x509.py
++++ b/tests/pytests/unit/utils/test_x509.py
+@@ -4,9 +4,10 @@ import ipaddress
+ import pytest
+
+ import salt.exceptions
+-import salt.utils.x509 as x509
+ from tests.support.mock import ANY, Mock, patch
+
++x509 = pytest.importorskip("salt.utils.x509")
++
+ try:
+ import cryptography
+ import cryptography.x509 as cx509
+diff --git a/tests/unit/states/test_pip_state.py b/tests/unit/states/test_pip_state.py
+index 981ad46a135..d70b1150008 100644
+--- a/tests/unit/states/test_pip_state.py
++++ b/tests/unit/states/test_pip_state.py
+@@ -27,6 +27,9 @@ try:
+ except ImportError:
+ HAS_PIP = False
+
++MISSING_SETUP_PY_FILE = not os.path.exists(
++ os.path.join(RUNTIME_VARS.CODE_DIR, "setup.py")
++)
+
+ log = logging.getLogger(__name__)
+
+@@ -408,6 +411,9 @@ class PipStateUtilsTest(TestCase):
+
+ @pytest.mark.skip_if_binaries_missing(*KNOWN_BINARY_NAMES, check_all=False)
+ @pytest.mark.requires_network
++@pytest.mark.skipif(
++ MISSING_SETUP_PY_FILE, reason="This test only work if setup.py is available"
++)
+ class PipStateInstallationErrorTest(TestCase):
+ @pytest.mark.slow_test
+ def test_importable_installation_error(self):
+diff --git a/tests/unit/utils/test_thin.py b/tests/unit/utils/test_thin.py
+index 7fd1e7b5dc3..c4e9c3b3bef 100644
+--- a/tests/unit/utils/test_thin.py
++++ b/tests/unit/utils/test_thin.py
+@@ -1379,6 +1379,9 @@ class SSHThinTestCase(TestCase):
+ assert [x for x in calls if "{}".format(_file) in x[-2]]
+
+ @pytest.mark.slow_test
++ @pytest.mark.skip_if_binaries_missing(
++ "virtualenv", reason="Needs virtualenv binary"
++ )
+ @pytest.mark.skip_on_windows(reason="salt-ssh does not deploy to/from windows")
+ def test_thin_dir(self):
+ """
+--
+2.43.0
+
+
diff --git a/fix-tests-to-make-them-running-with-salt-testsuite.patch b/fix-tests-to-make-them-running-with-salt-testsuite.patch
new file mode 100644
index 0000000..42b9e6f
--- /dev/null
+++ b/fix-tests-to-make-them-running-with-salt-testsuite.patch
@@ -0,0 +1,841 @@
+From 290d092c06dc378647dd1e49f000f012a7c07904 Mon Sep 17 00:00:00 2001
+From: vzhestkov
+Date: Wed, 2 Aug 2023 16:13:49 +0200
+Subject: [PATCH] Fix tests to make them running with salt-testsuite
+
+---
+ tests/pytests/unit/cli/test_batch_async.py | 718 +++++++++++----------
+ tests/unit/cli/test_support.py | 6 +-
+ tests/unit/modules/test_saltsupport.py | 4 +-
+ 3 files changed, 364 insertions(+), 364 deletions(-)
+
+diff --git a/tests/pytests/unit/cli/test_batch_async.py b/tests/pytests/unit/cli/test_batch_async.py
+index c0b708de76..e0774ffff3 100644
+--- a/tests/pytests/unit/cli/test_batch_async.py
++++ b/tests/pytests/unit/cli/test_batch_async.py
+@@ -1,386 +1,392 @@
++import pytest
++
+ import salt.ext.tornado
+ from salt.cli.batch_async import BatchAsync
+-from salt.ext.tornado.testing import AsyncTestCase
+ from tests.support.mock import MagicMock, patch
+-from tests.support.unit import TestCase, skipIf
+-
+-
+-class AsyncBatchTestCase(AsyncTestCase, TestCase):
+- def setUp(self):
+- self.io_loop = self.get_new_ioloop()
+- opts = {
+- "batch": "1",
+- "conf_file": {},
+- "tgt": "*",
+- "timeout": 5,
+- "gather_job_timeout": 5,
+- "batch_presence_ping_timeout": 1,
+- "transport": None,
+- "sock_dir": "",
+- }
+-
+- with patch("salt.client.get_local_client", MagicMock(return_value=MagicMock())):
+- with patch(
+- "salt.cli.batch_async.batch_get_opts", MagicMock(return_value=opts)
+- ):
+- self.batch = BatchAsync(
+- opts,
+- MagicMock(side_effect=["1234", "1235", "1236"]),
+- {
+- "tgt": "",
+- "fun": "",
+- "kwargs": {"batch": "", "batch_presence_ping_timeout": 1},
+- },
+- )
+-
+- def test_ping_jid(self):
+- self.assertEqual(self.batch.ping_jid, "1234")
+-
+- def test_batch_jid(self):
+- self.assertEqual(self.batch.batch_jid, "1235")
+-
+- def test_find_job_jid(self):
+- self.assertEqual(self.batch.find_job_jid, "1236")
+-
+- def test_batch_size(self):
+- """
+- Tests passing batch value as a number
+- """
+- self.batch.opts = {"batch": "2", "timeout": 5}
+- self.batch.minions = {"foo", "bar"}
+- self.batch.start_batch()
+- self.assertEqual(self.batch.batch_size, 2)
+-
+- @salt.ext.tornado.testing.gen_test
+- def test_batch_start_on_batch_presence_ping_timeout(self):
+- self.batch.event = MagicMock()
+- future = salt.ext.tornado.gen.Future()
+- future.set_result({"minions": ["foo", "bar"]})
+- self.batch.local.run_job_async.return_value = future
+- ret = self.batch.start()
+- # assert start_batch is called later with batch_presence_ping_timeout as param
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.start_batch,),
+- )
+- # assert test.ping called
+- self.assertEqual(
+- self.batch.local.run_job_async.call_args[0], ("*", "test.ping", [], "glob")
+- )
+- # assert targeted_minions == all minions matched by tgt
+- self.assertEqual(self.batch.targeted_minions, {"foo", "bar"})
+-
+- @salt.ext.tornado.testing.gen_test
+- def test_batch_start_on_gather_job_timeout(self):
+- self.batch.event = MagicMock()
+- future = salt.ext.tornado.gen.Future()
+- future.set_result({"minions": ["foo", "bar"]})
+- self.batch.local.run_job_async.return_value = future
+- self.batch.batch_presence_ping_timeout = None
+- ret = self.batch.start()
+- # assert start_batch is called later with gather_job_timeout as param
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.start_batch,),
+- )
+
+- def test_batch_fire_start_event(self):
+- self.batch.minions = {"foo", "bar"}
+- self.batch.opts = {"batch": "2", "timeout": 5}
+- self.batch.event = MagicMock()
+- self.batch.metadata = {"mykey": "myvalue"}
+- self.batch.start_batch()
+- self.assertEqual(
+- self.batch.event.fire_event.call_args[0],
+- (
++
++@pytest.fixture
++def batch(temp_salt_master):
++ opts = {
++ "batch": "1",
++ "conf_file": {},
++ "tgt": "*",
++ "timeout": 5,
++ "gather_job_timeout": 5,
++ "batch_presence_ping_timeout": 1,
++ "transport": None,
++ "sock_dir": "",
++ }
++
++ with patch("salt.client.get_local_client", MagicMock(return_value=MagicMock())):
++ with patch("salt.cli.batch_async.batch_get_opts", MagicMock(return_value=opts)):
++ batch = BatchAsync(
++ opts,
++ MagicMock(side_effect=["1234", "1235", "1236"]),
+ {
+- "available_minions": {"foo", "bar"},
+- "down_minions": set(),
+- "metadata": self.batch.metadata,
++ "tgt": "",
++ "fun": "",
++ "kwargs": {"batch": "", "batch_presence_ping_timeout": 1},
+ },
+- "salt/batch/1235/start",
+- ),
+- )
++ )
++ yield batch
+
+- @salt.ext.tornado.testing.gen_test
+- def test_start_batch_calls_next(self):
+- self.batch.run_next = MagicMock(return_value=MagicMock())
+- self.batch.event = MagicMock()
+- self.batch.start_batch()
+- self.assertEqual(self.batch.initialized, True)
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0], (self.batch.run_next,)
+- )
+
+- def test_batch_fire_done_event(self):
+- self.batch.targeted_minions = {"foo", "baz", "bar"}
+- self.batch.minions = {"foo", "bar"}
+- self.batch.done_minions = {"foo"}
+- self.batch.timedout_minions = {"bar"}
+- self.batch.event = MagicMock()
+- self.batch.metadata = {"mykey": "myvalue"}
+- old_event = self.batch.event
+- self.batch.end_batch()
+- self.assertEqual(
+- old_event.fire_event.call_args[0],
+- (
+- {
+- "available_minions": {"foo", "bar"},
+- "done_minions": self.batch.done_minions,
+- "down_minions": {"baz"},
+- "timedout_minions": self.batch.timedout_minions,
+- "metadata": self.batch.metadata,
+- },
+- "salt/batch/1235/done",
+- ),
+- )
++def test_ping_jid(batch):
++ assert batch.ping_jid == "1234"
+
+- def test_batch__del__(self):
+- batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
+- event = MagicMock()
+- batch.event = event
+- batch.__del__()
+- self.assertEqual(batch.local, None)
+- self.assertEqual(batch.event, None)
+- self.assertEqual(batch.ioloop, None)
+-
+- def test_batch_close_safe(self):
+- batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
+- event = MagicMock()
+- batch.event = event
+- batch.patterns = {
+- ("salt/job/1234/ret/*", "find_job_return"),
+- ("salt/job/4321/ret/*", "find_job_return"),
+- }
+- batch.close_safe()
+- self.assertEqual(batch.local, None)
+- self.assertEqual(batch.event, None)
+- self.assertEqual(batch.ioloop, None)
+- self.assertEqual(len(event.unsubscribe.mock_calls), 2)
+- self.assertEqual(len(event.remove_event_handler.mock_calls), 1)
+-
+- @salt.ext.tornado.testing.gen_test
+- def test_batch_next(self):
+- self.batch.event = MagicMock()
+- self.batch.opts["fun"] = "my.fun"
+- self.batch.opts["arg"] = []
+- self.batch._get_next = MagicMock(return_value={"foo", "bar"})
+- self.batch.batch_size = 2
+- future = salt.ext.tornado.gen.Future()
+- future.set_result({"minions": ["foo", "bar"]})
+- self.batch.local.run_job_async.return_value = future
+- self.batch.run_next()
+- self.assertEqual(
+- self.batch.local.run_job_async.call_args[0],
+- ({"foo", "bar"}, "my.fun", [], "list"),
+- )
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.find_job, {"foo", "bar"}),
+- )
+- self.assertEqual(self.batch.active, {"bar", "foo"})
+-
+- def test_next_batch(self):
+- self.batch.minions = {"foo", "bar"}
+- self.batch.batch_size = 2
+- self.assertEqual(self.batch._get_next(), {"foo", "bar"})
+-
+- def test_next_batch_one_done(self):
+- self.batch.minions = {"foo", "bar"}
+- self.batch.done_minions = {"bar"}
+- self.batch.batch_size = 2
+- self.assertEqual(self.batch._get_next(), {"foo"})
+-
+- def test_next_batch_one_done_one_active(self):
+- self.batch.minions = {"foo", "bar", "baz"}
+- self.batch.done_minions = {"bar"}
+- self.batch.active = {"baz"}
+- self.batch.batch_size = 2
+- self.assertEqual(self.batch._get_next(), {"foo"})
+-
+- def test_next_batch_one_done_one_active_one_timedout(self):
+- self.batch.minions = {"foo", "bar", "baz", "faz"}
+- self.batch.done_minions = {"bar"}
+- self.batch.active = {"baz"}
+- self.batch.timedout_minions = {"faz"}
+- self.batch.batch_size = 2
+- self.assertEqual(self.batch._get_next(), {"foo"})
+-
+- def test_next_batch_bigger_size(self):
+- self.batch.minions = {"foo", "bar"}
+- self.batch.batch_size = 3
+- self.assertEqual(self.batch._get_next(), {"foo", "bar"})
+-
+- def test_next_batch_all_done(self):
+- self.batch.minions = {"foo", "bar"}
+- self.batch.done_minions = {"foo", "bar"}
+- self.batch.batch_size = 2
+- self.assertEqual(self.batch._get_next(), set())
+-
+- def test_next_batch_all_active(self):
+- self.batch.minions = {"foo", "bar"}
+- self.batch.active = {"foo", "bar"}
+- self.batch.batch_size = 2
+- self.assertEqual(self.batch._get_next(), set())
+-
+- def test_next_batch_all_timedout(self):
+- self.batch.minions = {"foo", "bar"}
+- self.batch.timedout_minions = {"foo", "bar"}
+- self.batch.batch_size = 2
+- self.assertEqual(self.batch._get_next(), set())
+-
+- def test_batch__event_handler_ping_return(self):
+- self.batch.targeted_minions = {"foo"}
+- self.batch.event = MagicMock(
+- unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
+- )
+- self.batch.start()
+- self.assertEqual(self.batch.minions, set())
+- self.batch._BatchAsync__event_handler(MagicMock())
+- self.assertEqual(self.batch.minions, {"foo"})
+- self.assertEqual(self.batch.done_minions, set())
+-
+- def test_batch__event_handler_call_start_batch_when_all_pings_return(self):
+- self.batch.targeted_minions = {"foo"}
+- self.batch.event = MagicMock(
+- unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
+- )
+- self.batch.start()
+- self.batch._BatchAsync__event_handler(MagicMock())
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.start_batch,),
+- )
+
+- def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self):
+- self.batch.targeted_minions = {"foo", "bar"}
+- self.batch.event = MagicMock(
+- unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
+- )
+- self.batch.start()
+- self.batch._BatchAsync__event_handler(MagicMock())
+- self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0)
++def test_batch_jid(batch):
++ assert batch.batch_jid == "1235"
++
++
++def test_find_job_jid(batch):
++ assert batch.find_job_jid == "1236"
++
+
+- def test_batch__event_handler_batch_run_return(self):
+- self.batch.event = MagicMock(
+- unpack=MagicMock(return_value=("salt/job/1235/ret/foo", {"id": "foo"}))
++def test_batch_size(batch):
++ """
++ Tests passing batch value as a number
++ """
++ batch.opts = {"batch": "2", "timeout": 5}
++ batch.minions = {"foo", "bar"}
++ batch.start_batch()
++ assert batch.batch_size == 2
++
++
++def test_batch_start_on_batch_presence_ping_timeout(batch):
++ # batch_async = BatchAsyncMock();
++ batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({"minions": ["foo", "bar"]})
++ batch.local.run_job_async.return_value = future
++ with patch("salt.ext.tornado.gen.sleep", return_value=future):
++ # ret = batch_async.start(batch)
++ ret = batch.start()
++ # assert start_batch is called later with batch_presence_ping_timeout as param
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (batch.start_batch,)
++ # assert test.ping called
++ assert batch.local.run_job_async.call_args[0] == ("*", "test.ping", [], "glob")
++ # assert targeted_minions == all minions matched by tgt
++ assert batch.targeted_minions == {"foo", "bar"}
++
++
++def test_batch_start_on_gather_job_timeout(batch):
++ # batch_async = BatchAsyncMock();
++ batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({"minions": ["foo", "bar"]})
++ batch.local.run_job_async.return_value = future
++ batch.batch_presence_ping_timeout = None
++ with patch("salt.ext.tornado.gen.sleep", return_value=future):
++ # ret = batch_async.start(batch)
++ ret = batch.start()
++ # assert start_batch is called later with gather_job_timeout as param
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (batch.start_batch,)
++
++
++def test_batch_fire_start_event(batch):
++ batch.minions = {"foo", "bar"}
++ batch.opts = {"batch": "2", "timeout": 5}
++ batch.event = MagicMock()
++ batch.metadata = {"mykey": "myvalue"}
++ batch.start_batch()
++ assert batch.event.fire_event.call_args[0] == (
++ {
++ "available_minions": {"foo", "bar"},
++ "down_minions": set(),
++ "metadata": batch.metadata,
++ },
++ "salt/batch/1235/start",
++ )
++
++
++def test_start_batch_calls_next(batch):
++ batch.run_next = MagicMock(return_value=MagicMock())
++ batch.event = MagicMock()
++ batch.start_batch()
++ assert batch.initialized
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (batch.run_next,)
++
++
++def test_batch_fire_done_event(batch):
++ batch.targeted_minions = {"foo", "baz", "bar"}
++ batch.minions = {"foo", "bar"}
++ batch.done_minions = {"foo"}
++ batch.timedout_minions = {"bar"}
++ batch.event = MagicMock()
++ batch.metadata = {"mykey": "myvalue"}
++ old_event = batch.event
++ batch.end_batch()
++ assert old_event.fire_event.call_args[0] == (
++ {
++ "available_minions": {"foo", "bar"},
++ "done_minions": batch.done_minions,
++ "down_minions": {"baz"},
++ "timedout_minions": batch.timedout_minions,
++ "metadata": batch.metadata,
++ },
++ "salt/batch/1235/done",
++ )
++
++
++def test_batch__del__(batch):
++ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
++ event = MagicMock()
++ batch.event = event
++ batch.__del__()
++ assert batch.local is None
++ assert batch.event is None
++ assert batch.ioloop is None
++
++
++def test_batch_close_safe(batch):
++ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock())
++ event = MagicMock()
++ batch.event = event
++ batch.patterns = {
++ ("salt/job/1234/ret/*", "find_job_return"),
++ ("salt/job/4321/ret/*", "find_job_return"),
++ }
++ batch.close_safe()
++ assert batch.local is None
++ assert batch.event is None
++ assert batch.ioloop is None
++ assert len(event.unsubscribe.mock_calls) == 2
++ assert len(event.remove_event_handler.mock_calls) == 1
++
++
++def test_batch_next(batch):
++ batch.event = MagicMock()
++ batch.opts["fun"] = "my.fun"
++ batch.opts["arg"] = []
++ batch._get_next = MagicMock(return_value={"foo", "bar"})
++ batch.batch_size = 2
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({"minions": ["foo", "bar"]})
++ batch.local.run_job_async.return_value = future
++ with patch("salt.ext.tornado.gen.sleep", return_value=future):
++ batch.run_next()
++ assert batch.local.run_job_async.call_args[0] == (
++ {"foo", "bar"},
++ "my.fun",
++ [],
++ "list",
+ )
+- self.batch.start()
+- self.batch.active = {"foo"}
+- self.batch._BatchAsync__event_handler(MagicMock())
+- self.assertEqual(self.batch.active, set())
+- self.assertEqual(self.batch.done_minions, {"foo"})
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.schedule_next,),
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (
++ batch.find_job,
++ {"foo", "bar"},
+ )
++ assert batch.active == {"bar", "foo"}
++
+
+- def test_batch__event_handler_find_job_return(self):
+- self.batch.event = MagicMock(
+- unpack=MagicMock(
+- return_value=(
+- "salt/job/1236/ret/foo",
+- {"id": "foo", "return": "deadbeaf"},
+- )
++def test_next_batch(batch):
++ batch.minions = {"foo", "bar"}
++ batch.batch_size = 2
++ assert batch._get_next() == {"foo", "bar"}
++
++
++def test_next_batch_one_done(batch):
++ batch.minions = {"foo", "bar"}
++ batch.done_minions = {"bar"}
++ batch.batch_size = 2
++ assert batch._get_next() == {"foo"}
++
++
++def test_next_batch_one_done_one_active(batch):
++ batch.minions = {"foo", "bar", "baz"}
++ batch.done_minions = {"bar"}
++ batch.active = {"baz"}
++ batch.batch_size = 2
++ assert batch._get_next() == {"foo"}
++
++
++def test_next_batch_one_done_one_active_one_timedout(batch):
++ batch.minions = {"foo", "bar", "baz", "faz"}
++ batch.done_minions = {"bar"}
++ batch.active = {"baz"}
++ batch.timedout_minions = {"faz"}
++ batch.batch_size = 2
++ assert batch._get_next() == {"foo"}
++
++
++def test_next_batch_bigger_size(batch):
++ batch.minions = {"foo", "bar"}
++ batch.batch_size = 3
++ assert batch._get_next() == {"foo", "bar"}
++
++
++def test_next_batch_all_done(batch):
++ batch.minions = {"foo", "bar"}
++ batch.done_minions = {"foo", "bar"}
++ batch.batch_size = 2
++ assert batch._get_next() == set()
++
++
++def test_next_batch_all_active(batch):
++ batch.minions = {"foo", "bar"}
++ batch.active = {"foo", "bar"}
++ batch.batch_size = 2
++ assert batch._get_next() == set()
++
++
++def test_next_batch_all_timedout(batch):
++ batch.minions = {"foo", "bar"}
++ batch.timedout_minions = {"foo", "bar"}
++ batch.batch_size = 2
++ assert batch._get_next() == set()
++
++
++def test_batch__event_handler_ping_return(batch):
++ batch.targeted_minions = {"foo"}
++ batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
++ )
++ batch.start()
++ assert batch.minions == set()
++ batch._BatchAsync__event_handler(MagicMock())
++ assert batch.minions == {"foo"}
++ assert batch.done_minions == set()
++
++
++def test_batch__event_handler_call_start_batch_when_all_pings_return(batch):
++ batch.targeted_minions = {"foo"}
++ batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
++ )
++ batch.start()
++ batch._BatchAsync__event_handler(MagicMock())
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (batch.start_batch,)
++
++
++def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(batch):
++ batch.targeted_minions = {"foo", "bar"}
++ batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"}))
++ )
++ batch.start()
++ batch._BatchAsync__event_handler(MagicMock())
++ assert len(batch.event.io_loop.spawn_callback.mock_calls) == 0
++
++
++def test_batch__event_handler_batch_run_return(batch):
++ batch.event = MagicMock(
++ unpack=MagicMock(return_value=("salt/job/1235/ret/foo", {"id": "foo"}))
++ )
++ batch.start()
++ batch.active = {"foo"}
++ batch._BatchAsync__event_handler(MagicMock())
++ assert batch.active == set()
++ assert batch.done_minions == {"foo"}
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (batch.schedule_next,)
++
++
++def test_batch__event_handler_find_job_return(batch):
++ batch.event = MagicMock(
++ unpack=MagicMock(
++ return_value=(
++ "salt/job/1236/ret/foo",
++ {"id": "foo", "return": "deadbeaf"},
+ )
+ )
+- self.batch.start()
+- self.batch.patterns.add(("salt/job/1236/ret/*", "find_job_return"))
+- self.batch._BatchAsync__event_handler(MagicMock())
+- self.assertEqual(self.batch.find_job_returned, {"foo"})
+-
+- @salt.ext.tornado.testing.gen_test
+- def test_batch_run_next_end_batch_when_no_next(self):
+- self.batch.end_batch = MagicMock()
+- self.batch._get_next = MagicMock(return_value={})
+- self.batch.run_next()
+- self.assertEqual(len(self.batch.end_batch.mock_calls), 1)
+-
+- @salt.ext.tornado.testing.gen_test
+- def test_batch_find_job(self):
+- self.batch.event = MagicMock()
+- future = salt.ext.tornado.gen.Future()
+- future.set_result({})
+- self.batch.local.run_job_async.return_value = future
+- self.batch.minions = {"foo", "bar"}
+- self.batch.jid_gen = MagicMock(return_value="1234")
+- salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
+- self.batch.find_job({"foo", "bar"})
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.check_find_job, {"foo", "bar"}, "1234"),
++ )
++ batch.start()
++ batch.patterns.add(("salt/job/1236/ret/*", "find_job_return"))
++ batch._BatchAsync__event_handler(MagicMock())
++ assert batch.find_job_returned == {"foo"}
++
++
++def test_batch_run_next_end_batch_when_no_next(batch):
++ batch.end_batch = MagicMock()
++ batch._get_next = MagicMock(return_value={})
++ batch.run_next()
++ assert len(batch.end_batch.mock_calls) == 1
++
++
++def test_batch_find_job(batch):
++ batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({})
++ batch.local.run_job_async.return_value = future
++ batch.minions = {"foo", "bar"}
++ batch.jid_gen = MagicMock(return_value="1234")
++ with patch("salt.ext.tornado.gen.sleep", return_value=future):
++ batch.find_job({"foo", "bar"})
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (
++ batch.check_find_job,
++ {"foo", "bar"},
++ "1234",
+ )
+
+- @salt.ext.tornado.testing.gen_test
+- def test_batch_find_job_with_done_minions(self):
+- self.batch.done_minions = {"bar"}
+- self.batch.event = MagicMock()
+- future = salt.ext.tornado.gen.Future()
+- future.set_result({})
+- self.batch.local.run_job_async.return_value = future
+- self.batch.minions = {"foo", "bar"}
+- self.batch.jid_gen = MagicMock(return_value="1234")
+- salt.ext.tornado.gen.sleep = MagicMock(return_value=future)
+- self.batch.find_job({"foo", "bar"})
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.check_find_job, {"foo"}, "1234"),
+- )
+
+- def test_batch_check_find_job_did_not_return(self):
+- self.batch.event = MagicMock()
+- self.batch.active = {"foo"}
+- self.batch.find_job_returned = set()
+- self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
+- self.batch.check_find_job({"foo"}, jid="1234")
+- self.assertEqual(self.batch.find_job_returned, set())
+- self.assertEqual(self.batch.active, set())
+- self.assertEqual(len(self.batch.event.io_loop.add_callback.mock_calls), 0)
+-
+- def test_batch_check_find_job_did_return(self):
+- self.batch.event = MagicMock()
+- self.batch.find_job_returned = {"foo"}
+- self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
+- self.batch.check_find_job({"foo"}, jid="1234")
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.find_job, {"foo"}),
++def test_batch_find_job_with_done_minions(batch):
++ batch.done_minions = {"bar"}
++ batch.event = MagicMock()
++ future = salt.ext.tornado.gen.Future()
++ future.set_result({})
++ batch.local.run_job_async.return_value = future
++ batch.minions = {"foo", "bar"}
++ batch.jid_gen = MagicMock(return_value="1234")
++ with patch("salt.ext.tornado.gen.sleep", return_value=future):
++ batch.find_job({"foo", "bar"})
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (
++ batch.check_find_job,
++ {"foo"},
++ "1234",
+ )
+
+- def test_batch_check_find_job_multiple_states(self):
+- self.batch.event = MagicMock()
+- # currently running minions
+- self.batch.active = {"foo", "bar"}
+
+- # minion is running and find_job returns
+- self.batch.find_job_returned = {"foo"}
++def test_batch_check_find_job_did_not_return(batch):
++ batch.event = MagicMock()
++ batch.active = {"foo"}
++ batch.find_job_returned = set()
++ batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ batch.check_find_job({"foo"}, jid="1234")
++ assert batch.find_job_returned == set()
++ assert batch.active == set()
++ assert len(batch.event.io_loop.add_callback.mock_calls) == 0
+
+- # minion started running but find_job did not return
+- self.batch.timedout_minions = {"faz"}
+
+- # minion finished
+- self.batch.done_minions = {"baz"}
++def test_batch_check_find_job_did_return(batch):
++ batch.event = MagicMock()
++ batch.find_job_returned = {"foo"}
++ batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ batch.check_find_job({"foo"}, jid="1234")
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (batch.find_job, {"foo"})
+
+- # both not yet done but only 'foo' responded to find_job
+- not_done = {"foo", "bar"}
+
+- self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
+- self.batch.check_find_job(not_done, jid="1234")
++def test_batch_check_find_job_multiple_states(batch):
++ batch.event = MagicMock()
++ # currently running minions
++ batch.active = {"foo", "bar"}
+
+- # assert 'bar' removed from active
+- self.assertEqual(self.batch.active, {"foo"})
++ # minion is running and find_job returns
++ batch.find_job_returned = {"foo"}
+
+- # assert 'bar' added to timedout_minions
+- self.assertEqual(self.batch.timedout_minions, {"bar", "faz"})
++ # minion started running but find_job did not return
++ batch.timedout_minions = {"faz"}
++
++ # minion finished
++ batch.done_minions = {"baz"}
++
++ # both not yet done but only 'foo' responded to find_job
++ not_done = {"foo", "bar"}
++
++ batch.patterns = {("salt/job/1234/ret/*", "find_job_return")}
++ batch.check_find_job(not_done, jid="1234")
++
++ # assert 'bar' removed from active
++ assert batch.active == {"foo"}
++
++ # assert 'bar' added to timedout_minions
++ assert batch.timedout_minions == {"bar", "faz"}
++
++ # assert 'find_job' schedueled again only for 'foo'
++ assert batch.event.io_loop.spawn_callback.call_args[0] == (batch.find_job, {"foo"})
+
+- # assert 'find_job' schedueled again only for 'foo'
+- self.assertEqual(
+- self.batch.event.io_loop.spawn_callback.call_args[0],
+- (self.batch.find_job, {"foo"}),
+- )
+
+- def test_only_on_run_next_is_scheduled(self):
+- self.batch.event = MagicMock()
+- self.batch.scheduled = True
+- self.batch.schedule_next()
+- self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0)
++def test_only_on_run_next_is_scheduled(batch):
++ batch.event = MagicMock()
++ batch.scheduled = True
++ batch.schedule_next()
++ assert len(batch.event.io_loop.spawn_callback.mock_calls) == 0
+diff --git a/tests/unit/cli/test_support.py b/tests/unit/cli/test_support.py
+index dc0e99bb3d..971a0f122b 100644
+--- a/tests/unit/cli/test_support.py
++++ b/tests/unit/cli/test_support.py
+@@ -14,7 +14,7 @@ from salt.cli.support.collector import SaltSupport, SupportDataCollector
+ from salt.cli.support.console import IndentOutput
+ from salt.utils.color import get_colors
+ from salt.utils.stringutils import to_bytes
+-from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
+
+ try:
+@@ -24,7 +24,6 @@ except ImportError:
+
+
+ @skipIf(not bool(pytest), "Pytest needs to be installed")
+-@skipIf(NO_MOCK, NO_MOCK_REASON)
+ class SaltSupportIndentOutputTestCase(TestCase):
+ """
+ Unit Tests for the salt-support indent output.
+@@ -100,7 +99,6 @@ class SaltSupportIndentOutputTestCase(TestCase):
+
+
+ @skipIf(not bool(pytest), "Pytest needs to be installed")
+-@skipIf(NO_MOCK, NO_MOCK_REASON)
+ class SaltSupportCollectorTestCase(TestCase):
+ """
+ Collector tests.
+@@ -232,7 +230,6 @@ class SaltSupportCollectorTestCase(TestCase):
+
+
+ @skipIf(not bool(pytest), "Pytest needs to be installed")
+-@skipIf(NO_MOCK, NO_MOCK_REASON)
+ class SaltSupportRunnerTestCase(TestCase):
+ """
+ Test runner class.
+@@ -468,7 +465,6 @@ class SaltSupportRunnerTestCase(TestCase):
+
+
+ @skipIf(not bool(pytest), "Pytest needs to be installed")
+-@skipIf(NO_MOCK, NO_MOCK_REASON)
+ class ProfileIntegrityTestCase(TestCase):
+ """
+ Default profile integrity
+diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py
+index 1715c68f4c..2afdd69b3e 100644
+--- a/tests/unit/modules/test_saltsupport.py
++++ b/tests/unit/modules/test_saltsupport.py
+@@ -8,7 +8,7 @@ import datetime
+ import salt.exceptions
+ from salt.modules import saltsupport
+ from tests.support.mixins import LoaderModuleMockMixin
+-from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
++from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase, skipIf
+
+ try:
+@@ -18,7 +18,6 @@ except ImportError:
+
+
+ @skipIf(not bool(pytest), "Pytest required")
+-@skipIf(NO_MOCK, NO_MOCK_REASON)
+ class SaltSupportModuleTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ Test cases for salt.modules.support::SaltSupportModule
+@@ -361,7 +360,6 @@ professor: Farnsworth
+
+
+ @skipIf(not bool(pytest), "Pytest required")
+-@skipIf(NO_MOCK, NO_MOCK_REASON)
+ class LogCollectorTestCase(TestCase, LoaderModuleMockMixin):
+ """
+ Test cases for salt.modules.support::LogCollector
+--
+2.41.0
+
diff --git a/fix-the-aptpkg.py-unit-test-failure.patch b/fix-the-aptpkg.py-unit-test-failure.patch
new file mode 100644
index 0000000..3d7c363
--- /dev/null
+++ b/fix-the-aptpkg.py-unit-test-failure.patch
@@ -0,0 +1,25 @@
+From 4bc3be7814daf5365d63b88f164f791ea53b418f Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Wed, 17 Jan 2024 15:04:53 +0100
+Subject: [PATCH] Fix the aptpkg.py unit test failure
+
+---
+ salt/modules/aptpkg.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index 9885e9fb60..ad5450c415 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -3128,7 +3128,7 @@ def expand_repo_def(**kwargs):
+ NOT USABLE IN THE CLI
+ """
+ warn_until_date(
+- "20240101",
++ "20250101",
+ "The pkg.expand_repo_def function is deprecated and set for removal "
+ "after {date}. This is only unsed internally by the apt pkg state "
+ "module. If that's not the case, please file an new issue requesting "
+--
+2.43.0
+
diff --git a/fix-the-regression-for-yumnotify-plugin-456.patch b/fix-the-regression-for-yumnotify-plugin-456.patch
new file mode 100644
index 0000000..c12e2e3
--- /dev/null
+++ b/fix-the-regression-for-yumnotify-plugin-456.patch
@@ -0,0 +1,23 @@
+From b80c0d515e8715c160f94124dff8b5b90e773cd0 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
+Date: Tue, 9 Nov 2021 16:19:56 +0300
+Subject: [PATCH] Fix the regression for yumnotify plugin (#456)
+
+---
+ scripts/suse/yum/plugins/yumnotify.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/scripts/suse/yum/plugins/yumnotify.py b/scripts/suse/yum/plugins/yumnotify.py
+index 0d117e8946..cec5256d20 100644
+--- a/scripts/suse/yum/plugins/yumnotify.py
++++ b/scripts/suse/yum/plugins/yumnotify.py
+@@ -63,4 +63,4 @@ def posttrans_hook(conduit):
+ )
+ )
+ except OSError as e:
+- print("Unable to save the cookie file: %s" % (e), file=sys.stderr)
++ sys.stderr.write("Unable to save the cookie file: %s\n" % (e))
+--
+2.39.2
+
+
diff --git a/fix-the-regression-of-user.present-state-when-group-.patch b/fix-the-regression-of-user.present-state-when-group-.patch
new file mode 100644
index 0000000..669caee
--- /dev/null
+++ b/fix-the-regression-of-user.present-state-when-group-.patch
@@ -0,0 +1,154 @@
+From 502354be32fcff9b0607f6e435ca8825a4c2cd56 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Thu, 3 Aug 2023 11:07:03 +0200
+Subject: [PATCH] Fix the regression of user.present state when group is
+ unset (#589)
+
+* Fix user.present state when group is unset
+
+* Fix user unit test
+
+---------
+
+Co-authored-by: Megan Wilhite
+---
+ changelog/64211.fixed.md | 1 +
+ salt/states/user.py | 2 +-
+ tests/pytests/functional/states/test_user.py | 74 +++++++++++++++++++-
+ tests/pytests/unit/states/test_user.py | 2 +
+ 4 files changed, 76 insertions(+), 3 deletions(-)
+ create mode 100644 changelog/64211.fixed.md
+
+diff --git a/changelog/64211.fixed.md b/changelog/64211.fixed.md
+new file mode 100644
+index 0000000000..26b39acf02
+--- /dev/null
++++ b/changelog/64211.fixed.md
+@@ -0,0 +1 @@
++Fix user.present state when groups is unset to ensure the groups are unchanged, as documented.
+diff --git a/salt/states/user.py b/salt/states/user.py
+index ed2d5a05f4..929afb2cd1 100644
+--- a/salt/states/user.py
++++ b/salt/states/user.py
+@@ -100,7 +100,7 @@ def _changes(
+
+ change = {}
+ wanted_groups = sorted(set((groups or []) + (optional_groups or [])))
+- if not remove_groups:
++ if not remove_groups or groups is None and not optional_groups:
+ wanted_groups = sorted(set(wanted_groups + lusr["groups"]))
+ if uid and lusr["uid"] != uid:
+ change["uid"] = uid
+diff --git a/tests/pytests/functional/states/test_user.py b/tests/pytests/functional/states/test_user.py
+index 09d34da168..96b1ec55c8 100644
+--- a/tests/pytests/functional/states/test_user.py
++++ b/tests/pytests/functional/states/test_user.py
+@@ -117,7 +117,6 @@ def test_user_present_when_home_dir_does_not_18843(states, existing_account):
+ ret = states.user.present(
+ name=existing_account.username,
+ home=existing_account.info.home,
+- remove_groups=False,
+ )
+ assert ret.result is True
+ assert pathlib.Path(existing_account.info.home).is_dir()
+@@ -228,7 +227,6 @@ def test_user_present_unicode(states, username, subtests):
+ roomnumber="①②③",
+ workphone="١٢٣٤",
+ homephone="६७८",
+- remove_groups=False,
+ )
+ assert ret.result is True
+
+@@ -429,3 +427,75 @@ def test_user_present_change_optional_groups(
+ user_info = modules.user.info(username)
+ assert user_info
+ assert user_info["groups"] == [group_1.name]
++
++
++@pytest.mark.skip_unless_on_linux(reason="underlying functionality only runs on Linux")
++def test_user_present_no_groups(modules, states, username):
++ """
++ test user.present when groups arg is not
++ included by the group is created in another
++ state. Re-run the states to ensure there are
++ not changes and it is idempotent.
++ """
++ groups = ["testgroup1", "testgroup2"]
++ try:
++ ret = states.group.present(name=username, gid=61121)
++ assert ret.result is True
++
++ ret = states.user.present(
++ name=username,
++ uid=61121,
++ gid=61121,
++ )
++ assert ret.result is True
++ assert ret.changes["groups"] == [username]
++ assert ret.changes["name"] == username
++
++ ret = states.group.present(
++ name=groups[0],
++ members=[username],
++ )
++ assert ret.changes["members"] == [username]
++
++ ret = states.group.present(
++ name=groups[1],
++ members=[username],
++ )
++ assert ret.changes["members"] == [username]
++
++ user_info = modules.user.info(username)
++ assert user_info
++ assert user_info["groups"] == [username, groups[0], groups[1]]
++
++ # run again, expecting no changes
++ ret = states.group.present(name=username)
++ assert ret.result is True
++ assert ret.changes == {}
++
++ ret = states.user.present(
++ name=username,
++ )
++ assert ret.result is True
++ assert ret.changes == {}
++
++ ret = states.group.present(
++ name=groups[0],
++ members=[username],
++ )
++ assert ret.result is True
++ assert ret.changes == {}
++
++ ret = states.group.present(
++ name=groups[1],
++ members=[username],
++ )
++ assert ret.result is True
++ assert ret.changes == {}
++
++ user_info = modules.user.info(username)
++ assert user_info
++ assert user_info["groups"] == [username, groups[0], groups[1]]
++ finally:
++ for group in groups:
++ ret = states.group.absent(name=group)
++ assert ret.result is True
+diff --git a/tests/pytests/unit/states/test_user.py b/tests/pytests/unit/states/test_user.py
+index 94e69d70ed..d50d16e3be 100644
+--- a/tests/pytests/unit/states/test_user.py
++++ b/tests/pytests/unit/states/test_user.py
+@@ -189,6 +189,8 @@ def test_present_uid_gid_change():
+ "user.chgid": Mock(),
+ "file.group_to_gid": mock_group_to_gid,
+ "file.gid_to_group": mock_gid_to_group,
++ "group.info": MagicMock(return_value=after),
++ "user.chgroups": MagicMock(return_value=True),
+ }
+ with patch.dict(user.__grains__, {"kernel": "Linux"}), patch.dict(
+ user.__salt__, dunder_salt
+--
+2.41.0
+
+
diff --git a/fix-the-selinux-context-for-salt-minion-service-bsc-.patch b/fix-the-selinux-context-for-salt-minion-service-bsc-.patch
new file mode 100644
index 0000000..a4281ff
--- /dev/null
+++ b/fix-the-selinux-context-for-salt-minion-service-bsc-.patch
@@ -0,0 +1,83 @@
+From d933c8f0795fdada84a01a2cc754586fa720993d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 10 Sep 2024 13:46:09 +0100
+Subject: [PATCH] Fix the SELinux context for Salt Minion service
+ (bsc#1219041) (#670)
+
+Currently there are no SELinux policies for Salt.
+
+By default, the Salt Minion service runs as 'unconfined_service_t' when
+SELinux is enabled. This works fine in most cases but generates a problem
+then trying to transition to an 'unconfined_t', i.a. when running
+"cmd.run .... runas=nobody". Then we see this denied in audit logs:
+
+type=AVC msg=audit(1722870119.142:718): avc: denied { transition } for pid=3421 comm="su" path="/usr/bin/bash" dev="vda3" ino=28565 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=unconfined_u:unconfined_r:unconfined_t:s0 tclass=process permissive=0
+
+(This happens for cmd.run at the time of trying to invoke a shell as a
+different user to gather the environment variables from this particular
+user)
+
+Fixing the SELinuxContext for the Salt Minion systemd service to a
+general 'unconfined_t' workarounds this situation.
+
+SELinuxContext attribute was added on systemd version 209.
+---
+ pkg/common/salt-minion.service | 1 +
+ pkg/old/deb/salt-minion.service | 1 +
+ pkg/old/suse/salt-minion.service | 1 +
+ pkg/old/suse/salt-minion.service.rhel7 | 1 +
+ 4 files changed, 4 insertions(+)
+
+diff --git a/pkg/common/salt-minion.service b/pkg/common/salt-minion.service
+index 69aff18c583..696d0263c39 100644
+--- a/pkg/common/salt-minion.service
++++ b/pkg/common/salt-minion.service
+@@ -9,6 +9,7 @@ Type=notify
+ NotifyAccess=all
+ LimitNOFILE=8192
+ ExecStart=/usr/bin/salt-minion
++SELinuxContext=system_u:system_r:unconfined_t:s0
+
+ [Install]
+ WantedBy=multi-user.target
+diff --git a/pkg/old/deb/salt-minion.service b/pkg/old/deb/salt-minion.service
+index 7e6cf146549..b0ad82c1334 100644
+--- a/pkg/old/deb/salt-minion.service
++++ b/pkg/old/deb/salt-minion.service
+@@ -8,6 +8,7 @@ KillMode=process
+ NotifyAccess=all
+ LimitNOFILE=8192
+ ExecStart=/usr/bin/salt-minion
++SELinuxContext=system_u:system_r:unconfined_t:s0
+
+ [Install]
+ WantedBy=multi-user.target
+diff --git a/pkg/old/suse/salt-minion.service b/pkg/old/suse/salt-minion.service
+index 12f28314cb1..b99ef063522 100644
+--- a/pkg/old/suse/salt-minion.service
++++ b/pkg/old/suse/salt-minion.service
+@@ -10,6 +10,7 @@ ExecStart=/usr/bin/salt-minion
+ KillMode=process
+ Restart=on-failure
+ RestartSec=15
++SELinuxContext=system_u:system_r:unconfined_t:s0
+
+ [Install]
+ WantedBy=multi-user.target
+diff --git a/pkg/old/suse/salt-minion.service.rhel7 b/pkg/old/suse/salt-minion.service.rhel7
+index 69172677140..92cc66d32f4 100644
+--- a/pkg/old/suse/salt-minion.service.rhel7
++++ b/pkg/old/suse/salt-minion.service.rhel7
+@@ -9,6 +9,7 @@ ExecStart=/usr/bin/salt-minion
+ KillMode=process
+ Restart=on-failure
+ RestartSec=15
++SELinuxContext=system_u:system_r:unconfined_t:s0
+
+ [Install]
+ WantedBy=multi-user.target
+--
+2.46.0
+
+
diff --git a/fix-traceback.print_exc-calls-for-test_pip_state-432.patch b/fix-traceback.print_exc-calls-for-test_pip_state-432.patch
new file mode 100644
index 0000000..a063edf
--- /dev/null
+++ b/fix-traceback.print_exc-calls-for-test_pip_state-432.patch
@@ -0,0 +1,26 @@
+From c37992e305978e95da1ac0a40a8142f578271320 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com>
+Date: Mon, 8 Nov 2021 17:43:02 +0300
+Subject: [PATCH] Fix traceback.print_exc calls for test_pip_state (#432)
+
+---
+ tests/unit/states/test_pip_state.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tests/unit/states/test_pip_state.py b/tests/unit/states/test_pip_state.py
+index 5e4b6e0af1..981ad46a13 100644
+--- a/tests/unit/states/test_pip_state.py
++++ b/tests/unit/states/test_pip_state.py
+@@ -442,7 +442,7 @@ class PipStateInstallationErrorTest(TestCase):
+ sys.stdout.flush()
+ sys.exit(2)
+ except Exception as exc:
+- traceback.print_exc(exc, file=sys.stdout)
++ traceback.print_exc(file=sys.stdout)
+ sys.stdout.flush()
+ sys.exit(3)
+ sys.exit(0)
+--
+2.39.2
+
+
diff --git a/fix-user.list_groups-omits-remote-groups.patch b/fix-user.list_groups-omits-remote-groups.patch
new file mode 100644
index 0000000..c1b5c4f
--- /dev/null
+++ b/fix-user.list_groups-omits-remote-groups.patch
@@ -0,0 +1,265 @@
+From 70509ff67d4eb734c88032913134092257a0d35b Mon Sep 17 00:00:00 2001
+From: Flex Liu
+Date: Tue, 2 Jul 2024 15:25:30 +0800
+Subject: [PATCH] Fix user.list_groups omits remote groups
+
+* fixes saltstack/salt#64953 user.list_groups omits remote groups
+
+* fixes saltstack/salt#65029 support for pysss can be removed
+
+* add changlog entries
+
+* add tests for _getgrall and local vs remote group handling
+
+* add negative tests for _getgrall
+
+* root can still read the file and tests run as root
+
+* remove permission check as its probably an unreachable edge case
+
+---------
+
+Co-authored-by: nicholasmhughes
+---
+ changelog/64888.fixed.md | 1 +
+ changelog/64953.fixed.md | 1 +
+ changelog/65029.removed.md | 1 +
+ salt/auth/pam.py | 9 ---
+ salt/utils/user.py | 73 ++++++++++++-------
+ .../functional/utils/user/test__getgrall.py | 44 +++++++++++
+ tests/pytests/unit/utils/test_user.py | 29 ++++++++
+ 7 files changed, 122 insertions(+), 36 deletions(-)
+ create mode 100644 changelog/64888.fixed.md
+ create mode 100644 changelog/64953.fixed.md
+ create mode 100644 changelog/65029.removed.md
+ create mode 100644 tests/pytests/functional/utils/user/test__getgrall.py
+ create mode 100644 tests/pytests/unit/utils/test_user.py
+
+diff --git a/changelog/64888.fixed.md b/changelog/64888.fixed.md
+new file mode 100644
+index 0000000000..08b2efd042
+--- /dev/null
++++ b/changelog/64888.fixed.md
+@@ -0,0 +1 @@
++Fixed grp.getgrall() in utils/user.py causing performance issues
+diff --git a/changelog/64953.fixed.md b/changelog/64953.fixed.md
+new file mode 100644
+index 0000000000..f0b1ed46f1
+--- /dev/null
++++ b/changelog/64953.fixed.md
+@@ -0,0 +1 @@
++Fix user.list_groups omits remote groups via sssd, etc.
+diff --git a/changelog/65029.removed.md b/changelog/65029.removed.md
+new file mode 100644
+index 0000000000..d09f10b4ba
+--- /dev/null
++++ b/changelog/65029.removed.md
+@@ -0,0 +1 @@
++Tech Debt - support for pysss removed due to functionality addition in Python 3.3
+diff --git a/salt/auth/pam.py b/salt/auth/pam.py
+index f0397c1062..12af29bbdb 100644
+--- a/salt/auth/pam.py
++++ b/salt/auth/pam.py
+@@ -24,15 +24,6 @@ authenticated against. This defaults to `login`
+
+ The Python interface to PAM does not support authenticating as ``root``.
+
+-.. note:: Using PAM groups with SSSD groups on python2.
+-
+- To use sssd with the PAM eauth module and groups the `pysss` module is
+- needed. On RedHat/CentOS this is `python-sss`.
+-
+- This should not be needed with python >= 3.3, because the `os` modules has the
+- `getgrouplist` function.
+-
+-
+ .. note:: This module executes itself in a subprocess in order to user the system python
+ and pam libraries. We do this to avoid openssl version conflicts when
+ running under a salt onedir build.
+diff --git a/salt/utils/user.py b/salt/utils/user.py
+index 2f1ca65cf9..3588b3804a 100644
+--- a/salt/utils/user.py
++++ b/salt/utils/user.py
+@@ -31,13 +31,6 @@ try:
+ except ImportError:
+ HAS_GRP = False
+
+-try:
+- import pysss
+-
+- HAS_PYSSS = True
+-except ImportError:
+- HAS_PYSSS = False
+-
+ try:
+ import salt.utils.win_functions
+
+@@ -289,30 +282,35 @@ def get_group_list(user, include_default=True):
+ return []
+ group_names = None
+ ugroups = set()
+- if hasattr(os, "getgrouplist"):
+- # Try os.getgrouplist, available in python >= 3.3
+- log.trace("Trying os.getgrouplist for '%s'", user)
+- try:
+- user_group_list = os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
+- group_names = [
+- _group.gr_name
+- for _group in grp.getgrall()
+- if _group.gr_gid in user_group_list
+- ]
+- except Exception: # pylint: disable=broad-except
+- pass
+- elif HAS_PYSSS:
+- # Try pysss.getgrouplist
+- log.trace("Trying pysss.getgrouplist for '%s'", user)
+- try:
+- group_names = list(pysss.getgrouplist(user))
+- except Exception: # pylint: disable=broad-except
+- pass
++ # Try os.getgrouplist, available in python >= 3.3
++ log.trace("Trying os.getgrouplist for '%s'", user)
++ try:
++ user_group_list = sorted(os.getgrouplist(user, pwd.getpwnam(user).pw_gid))
++ local_grall = _getgrall()
++ local_gids = sorted(lgrp.gr_gid for lgrp in local_grall)
++ max_idx = -1
++ local_max = local_gids[max_idx]
++ while local_max >= 65000:
++ max_idx -= 1
++ local_max = local_gids[max_idx]
++ user_group_list_local = [lgrp for lgrp in user_group_list if lgrp <= local_max]
++ user_group_list_remote = [rgrp for rgrp in user_group_list if rgrp > local_max]
++ local_group_names = [
++ _group.gr_name
++ for _group in local_grall
++ if _group.gr_gid in user_group_list_local
++ ]
++ remote_group_names = [
++ grp.getgrgid(group_id).gr_name for group_id in user_group_list_remote
++ ]
++ group_names = local_group_names + remote_group_names
++ except Exception: # pylint: disable=broad-except
++ pass
+
+ if group_names is None:
+ # Fall back to generic code
+ # Include the user's default group to match behavior of
+- # os.getgrouplist() and pysss.getgrouplist()
++ # os.getgrouplist()
+ log.trace("Trying generic group list for '%s'", user)
+ group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
+ try:
+@@ -389,3 +387,24 @@ def get_gid(group=None):
+ return grp.getgrnam(group).gr_gid
+ except KeyError:
+ return None
++
++
++def _getgrall(root=None):
++ """
++ Alternative implemetantion for getgrall, that uses only /etc/group
++ """
++ ret = []
++ root = "/" if not root else root
++ etc_group = os.path.join(root, "etc/group")
++ with salt.utils.files.fopen(etc_group) as fp_:
++ for line in fp_:
++ line = salt.utils.stringutils.to_unicode(line)
++ comps = line.strip().split(":")
++ # Generate a getgrall compatible output
++ comps[2] = int(comps[2])
++ if comps[3]:
++ comps[3] = [mem.strip() for mem in comps[3].split(",")]
++ else:
++ comps[3] = []
++ ret.append(grp.struct_group(comps))
++ return ret
+diff --git a/tests/pytests/functional/utils/user/test__getgrall.py b/tests/pytests/functional/utils/user/test__getgrall.py
+new file mode 100644
+index 0000000000..db994019e6
+--- /dev/null
++++ b/tests/pytests/functional/utils/user/test__getgrall.py
+@@ -0,0 +1,44 @@
++from textwrap import dedent
++
++import pytest
++
++pytest.importorskip("grp")
++
++import grp
++
++import salt.utils.user
++
++
++@pytest.fixture(scope="function")
++def etc_group(tmp_path):
++ etcgrp = tmp_path / "etc" / "group"
++ etcgrp.parent.mkdir()
++ etcgrp.write_text(
++ dedent(
++ """games:x:50:
++ docker:x:959:debian,salt
++ salt:x:1000:"""
++ )
++ )
++ return etcgrp
++
++
++def test__getgrall(etc_group):
++ group_lines = [
++ ["games", "x", 50, []],
++ ["docker", "x", 959, ["debian", "salt"]],
++ ["salt", "x", 1000, []],
++ ]
++ expected_grall = [grp.struct_group(comps) for comps in group_lines]
++
++ grall = salt.utils.user._getgrall(root=str(etc_group.parent.parent))
++
++ assert grall == expected_grall
++
++
++def test__getgrall_bad_format(etc_group):
++ with etc_group.open("a") as _fp:
++ _fp.write("\n# some comment here\n")
++
++ with pytest.raises(IndexError):
++ salt.utils.user._getgrall(root=str(etc_group.parent.parent))
+diff --git a/tests/pytests/unit/utils/test_user.py b/tests/pytests/unit/utils/test_user.py
+new file mode 100644
+index 0000000000..17c6b1551f
+--- /dev/null
++++ b/tests/pytests/unit/utils/test_user.py
+@@ -0,0 +1,29 @@
++from types import SimpleNamespace
++
++import pytest
++
++from tests.support.mock import MagicMock, patch
++
++pytest.importorskip("grp")
++
++import grp
++
++import salt.utils.user
++
++
++def test_get_group_list():
++ getpwname = SimpleNamespace(pw_gid=1000)
++ getgrgid = MagicMock(side_effect=[SimpleNamespace(gr_name="remote")])
++ group_lines = [
++ ["games", "x", 50, []],
++ ["salt", "x", 1000, []],
++ ]
++ getgrall = [grp.struct_group(comps) for comps in group_lines]
++ with patch("os.getgrouplist", MagicMock(return_value=[50, 1000, 12000])), patch(
++ "pwd.getpwnam", MagicMock(return_value=getpwname)
++ ), patch("salt.utils.user._getgrall", MagicMock(return_value=getgrall)), patch(
++ "grp.getgrgid", getgrgid
++ ):
++ group_list = salt.utils.user.get_group_list("salt")
++ assert group_list == ["games", "remote", "salt"]
++ getgrgid.assert_called_once()
+--
+2.35.3
+
diff --git a/fix-utf8-handling-in-pass-renderer-and-make-it-more-.patch b/fix-utf8-handling-in-pass-renderer-and-make-it-more-.patch
new file mode 100644
index 0000000..ae97a49
--- /dev/null
+++ b/fix-utf8-handling-in-pass-renderer-and-make-it-more-.patch
@@ -0,0 +1,181 @@
+From 027cbef223616f5ab6c73e60bcaa9f9e81a6ce67 Mon Sep 17 00:00:00 2001
+From: Daniel Mach
+Date: Wed, 28 Jun 2023 16:39:42 +0200
+Subject: [PATCH] Fix utf8 handling in 'pass' renderer and make it more
+ robust (#579)
+
+* Migrate string formatting in 'pass' renderer to a f-string
+
+* Fix utf8 handling in 'pass' renderer and make it more robust
+---
+ changelog/64300.fixed.md | 1 +
+ salt/renderers/pass.py | 12 +--
+ tests/pytests/unit/renderers/test_pass.py | 99 +++++++++++++++++++++++
+ 3 files changed, 103 insertions(+), 9 deletions(-)
+ create mode 100644 changelog/64300.fixed.md
+
+diff --git a/changelog/64300.fixed.md b/changelog/64300.fixed.md
+new file mode 100644
+index 0000000000..4418db1d04
+--- /dev/null
++++ b/changelog/64300.fixed.md
+@@ -0,0 +1 @@
++Fix utf8 handling in 'pass' renderer
+diff --git a/salt/renderers/pass.py b/salt/renderers/pass.py
+index ba0f152c23..ae75bba443 100644
+--- a/salt/renderers/pass.py
++++ b/salt/renderers/pass.py
+@@ -145,23 +145,17 @@ def _fetch_secret(pass_path):
+ env["GNUPGHOME"] = pass_gnupghome
+
+ try:
+- proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env)
++ proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env, encoding="utf-8")
+ pass_data, pass_error = proc.communicate()
+ pass_returncode = proc.returncode
+- except OSError as e:
++ except (OSError, UnicodeDecodeError) as e:
+ pass_data, pass_error = "", str(e)
+ pass_returncode = 1
+
+ # The version of pass used during development sent output to
+ # stdout instead of stderr even though its returncode was non zero.
+ if pass_returncode or not pass_data:
+- try:
+- pass_error = pass_error.decode("utf-8")
+- except (AttributeError, ValueError):
+- pass
+- msg = "Could not fetch secret '{}' from the password store: {}".format(
+- pass_path, pass_error
+- )
++ msg = f"Could not fetch secret '{pass_path}' from the password store: {pass_error}"
+ if pass_strict_fetch:
+ raise SaltRenderError(msg)
+ else:
+diff --git a/tests/pytests/unit/renderers/test_pass.py b/tests/pytests/unit/renderers/test_pass.py
+index 1e2ebb7ea8..f7c79e1fe1 100644
+--- a/tests/pytests/unit/renderers/test_pass.py
++++ b/tests/pytests/unit/renderers/test_pass.py
+@@ -1,8 +1,12 @@
+ import importlib
++import os
++import shutil
++import tempfile
+
+ import pytest
+
+ import salt.exceptions
++import salt.utils.files
+ from tests.support.mock import MagicMock, patch
+
+ # "pass" is a reserved keyword, we need to import it differently
+@@ -19,6 +23,47 @@ def configure_loader_modules(master_opts):
+ }
+
+
++@pytest.fixture()
++def pass_executable(request):
++ tmp_dir = tempfile.mkdtemp(prefix="salt_pass_")
++ pass_path = os.path.join(tmp_dir, "pass")
++ with salt.utils.files.fopen(pass_path, "w") as f:
++ f.write("#!/bin/sh\n")
++ # return path path wrapped into unicode characters
++ # pass args ($1, $2) are ("show", )
++ f.write('echo "α>>> $2 <<<β"\n')
++ os.chmod(pass_path, 0o755)
++ yield pass_path
++ shutil.rmtree(tmp_dir)
++
++
++@pytest.fixture()
++def pass_executable_error(request):
++ tmp_dir = tempfile.mkdtemp(prefix="salt_pass_")
++ pass_path = os.path.join(tmp_dir, "pass")
++ with salt.utils.files.fopen(pass_path, "w") as f:
++ f.write("#!/bin/sh\n")
++ # return error message with unicode characters
++ f.write('echo "ERROR: αβγ" >&2\n')
++ f.write("exit 1\n")
++ os.chmod(pass_path, 0o755)
++ yield pass_path
++ shutil.rmtree(tmp_dir)
++
++
++@pytest.fixture()
++def pass_executable_invalid_utf8(request):
++ tmp_dir = tempfile.mkdtemp(prefix="salt_pass_")
++ pass_path = os.path.join(tmp_dir, "pass")
++ with salt.utils.files.fopen(pass_path, "wb") as f:
++ f.write(b"#!/bin/sh\n")
++ # return invalid utf-8 sequence
++ f.write(b'echo "\x80\x81"\n')
++ os.chmod(pass_path, 0o755)
++ yield pass_path
++ shutil.rmtree(tmp_dir)
++
++
+ # The default behavior is that if fetching a secret from pass fails,
+ # the value is passed through. Even the trailing newlines are preserved.
+ def test_passthrough():
+@@ -161,3 +206,57 @@ def test_env():
+ call_args, call_kwargs = popen_mock.call_args_list[0]
+ assert call_kwargs["env"]["GNUPGHOME"] == config["pass_gnupghome"]
+ assert call_kwargs["env"]["PASSWORD_STORE_DIR"] == config["pass_dir"]
++
++
++@pytest.mark.skip_on_windows(reason="Not supported on Windows")
++def test_utf8(pass_executable):
++ config = {
++ "pass_variable_prefix": "pass:",
++ "pass_strict_fetch": True,
++ }
++ mocks = {
++ "_get_pass_exec": MagicMock(return_value=pass_executable),
++ }
++
++ pass_path = "pass:secret"
++ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
++ result = pass_.render(pass_path)
++ assert result == "α>>> secret <<<β"
++
++
++@pytest.mark.skip_on_windows(reason="Not supported on Windows")
++def test_utf8_error(pass_executable_error):
++ config = {
++ "pass_variable_prefix": "pass:",
++ "pass_strict_fetch": True,
++ }
++ mocks = {
++ "_get_pass_exec": MagicMock(return_value=pass_executable_error),
++ }
++
++ pass_path = "pass:secret"
++ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
++ with pytest.raises(
++ salt.exceptions.SaltRenderError,
++ match=r"Could not fetch secret 'secret' from the password store: ERROR: αβγ",
++ ):
++ result = pass_.render(pass_path)
++
++
++@pytest.mark.skip_on_windows(reason="Not supported on Windows")
++def test_invalid_utf8(pass_executable_invalid_utf8):
++ config = {
++ "pass_variable_prefix": "pass:",
++ "pass_strict_fetch": True,
++ }
++ mocks = {
++ "_get_pass_exec": MagicMock(return_value=pass_executable_invalid_utf8),
++ }
++
++ pass_path = "pass:secret"
++ with patch.dict(pass_.__opts__, config), patch.dict(pass_.__dict__, mocks):
++ with pytest.raises(
++ salt.exceptions.SaltRenderError,
++ match=r"Could not fetch secret 'secret' from the password store: 'utf-8' codec can't decode byte 0x80 in position 0: invalid start byte",
++ ):
++ result = pass_.render(pass_path)
+--
+2.41.0
+
+
diff --git a/fix-version-detection-and-avoid-building-and-testing.patch b/fix-version-detection-and-avoid-building-and-testing.patch
new file mode 100644
index 0000000..0b18663
--- /dev/null
+++ b/fix-version-detection-and-avoid-building-and-testing.patch
@@ -0,0 +1,58 @@
+From c0fae09e5a4f6997a60007d970c7c6a5614d9102 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 19 Apr 2023 10:41:28 +0100
+Subject: [PATCH] Fix version detection and avoid building and testing
+ failures
+
+---
+ salt/version.py | 20 ++------------------
+ 1 file changed, 2 insertions(+), 18 deletions(-)
+
+diff --git a/salt/version.py b/salt/version.py
+index 43cb5f86f7..67719bd020 100644
+--- a/salt/version.py
++++ b/salt/version.py
+@@ -1,7 +1,6 @@
+ """
+ Set up the version of Salt
+ """
+-import argparse
+ import operator
+ import os
+ import platform
+@@ -78,7 +77,7 @@ class SaltVersionsInfo(type):
+ ALUMINIUM = SaltVersion("Aluminium" , info=3003, released=True)
+ SILICON = SaltVersion("Silicon" , info=3004, released=True)
+ PHOSPHORUS = SaltVersion("Phosphorus" , info=3005, released=True)
+- SULFUR = SaltVersion("Sulfur" , info=(3006, 0), released=True)
++ SULFUR = SaltVersion("Sulfur" , info=(3006, 0))
+ CHLORINE = SaltVersion("Chlorine" , info=(3007, 0))
+ ARGON = SaltVersion("Argon" , info=(3008, 0))
+ POTASSIUM = SaltVersion("Potassium" , info=(3009, 0))
+@@ -922,20 +921,5 @@ def versions_report(include_salt_cloud=False, include_extensions=True):
+ yield from info
+
+
+-def _parser():
+- parser = argparse.ArgumentParser()
+- parser.add_argument(
+- "--next-release", help="Return the next release", action="store_true"
+- )
+- # When pip installing we pass in other args to this script.
+- # This allows us to catch those args but not use them
+- parser.add_argument("unknown", nargs=argparse.REMAINDER)
+- return parser.parse_args()
+-
+-
+ if __name__ == "__main__":
+- args = _parser()
+- if args.next_release:
+- print(__saltstack_version__.next_release())
+- else:
+- print(__version__)
++ print(__version__)
+--
+2.39.2
+
+
diff --git a/fix-virtual-grains-for-vms-running-on-nutanix-ahv-bs.patch b/fix-virtual-grains-for-vms-running-on-nutanix-ahv-bs.patch
new file mode 100644
index 0000000..62081b7
--- /dev/null
+++ b/fix-virtual-grains-for-vms-running-on-nutanix-ahv-bs.patch
@@ -0,0 +1,282 @@
+From 609a9fb9c0a66fa57b02f03267bbaa00bd624f20 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 22 Jan 2025 13:01:39 +0000
+Subject: [PATCH] Fix virtual grains for VMs running on Nutanix AHV
+ (bsc#1234022) (#697)
+
+* Fix virtual grains for Nutanix AHV instances
+
+* Add changelog
+---
+ changelog/67180.fixed.md | 1 +
+ salt/grains/core.py | 14 ++
+ tests/pytests/unit/grains/test_core.py | 184 +++++++++++++++++++++++++
+ 3 files changed, 199 insertions(+)
+ create mode 100644 changelog/67180.fixed.md
+
+diff --git a/changelog/67180.fixed.md b/changelog/67180.fixed.md
+new file mode 100644
+index 0000000000..bcdc0e9360
+--- /dev/null
++++ b/changelog/67180.fixed.md
+@@ -0,0 +1 @@
++Fix virtual grains for VMs running on Nutanix AHV
+diff --git a/salt/grains/core.py b/salt/grains/core.py
+index 98bbd3868e..84d5b179dd 100644
+--- a/salt/grains/core.py
++++ b/salt/grains/core.py
+@@ -768,6 +768,9 @@ def _windows_virtual(osdata):
+ # Manufacturer: Parallels Software International Inc.
+ elif "Parallels" in manufacturer:
+ grains["virtual"] = "Parallels"
++ elif "Nutanix" in manufacturer and "AHV" in product_name:
++ grains["virtual"] = "kvm"
++ grains["virtual_subtype"] = "Nutanix AHV"
+ # Apache CloudStack
+ elif "CloudStack KVM Hypervisor" in productname:
+ grains["virtual"] = "kvm"
+@@ -927,6 +930,10 @@ def _virtual(osdata):
+ elif "parallels" in line:
+ grains["virtual"] = "Parallels"
+ break
++ elif "nutanix" in line:
++ grains["virtual"] = "kvm"
++ grains["virtual_subtype"] = "Nutanix AHV"
++ break
+ elif "hyperv" in line:
+ grains["virtual"] = "HyperV"
+ break
+@@ -978,6 +985,9 @@ def _virtual(osdata):
+ grains["virtual"] = "Parallels"
+ elif "Manufacturer: Google" in output:
+ grains["virtual"] = "kvm"
++ elif "Manufacturer: Nutanix" in output and "Product Name: AHV" in output:
++ grains["virtual"] = "kvm"
++ grains["virtual_subtype"] = "Nutanix AHV"
+ # Proxmox KVM
+ elif "Vendor: SeaBIOS" in output:
+ grains["virtual"] = "kvm"
+@@ -1234,6 +1244,7 @@ def _virtual(osdata):
+ grains["virtual"] = "virtual"
+
+ # Try to detect if the instance is running on Amazon EC2
++ # or Nutanix AHV
+ if grains["virtual"] in ("qemu", "kvm", "xen", "amazon"):
+ dmidecode = salt.utils.path.which("dmidecode")
+ if dmidecode:
+@@ -1253,6 +1264,9 @@ def _virtual(osdata):
+ elif re.match(r".*Version: [^\r\n]+\.amazon.*", output, flags=re.DOTALL):
+ grains["virtual_subtype"] = "Amazon EC2"
+
++ elif "Manufacturer: Nutanix" in output and "Product Name: AHV" in output:
++ grains["virtual_subtype"] = "Nutanix AHV"
++
+ for command in failed_commands:
+ log.info(
+ "Although '%s' was found in path, the current user "
+diff --git a/tests/pytests/unit/grains/test_core.py b/tests/pytests/unit/grains/test_core.py
+index b64b8c4bf8..3d2beaa2c9 100644
+--- a/tests/pytests/unit/grains/test_core.py
++++ b/tests/pytests/unit/grains/test_core.py
+@@ -2770,6 +2770,10 @@ def test_virtual_has_virtual_grain():
+ {"kernel": "Windows", "manufacturer": "Parallels Software"},
+ {"virtual": "Parallels"},
+ ),
++ (
++ {"kernel": "Windows", "manufacturer": "Nutanix", "product_name": "AHV"},
++ {"virtual": "kvm", "virtual_subtype": "Nutanix AHV"},
++ ),
+ ],
+ )
+ def test__windows_virtual(osdata, expected):
+@@ -3453,6 +3457,186 @@ def test_virtual_linux_proc_files_with_non_utf8_chars():
+ assert virt_grains == {"virtual": "physical"}
+
+
++@pytest.mark.skip_unless_on_linux
++def test_virtual_nutanix_virt_what():
++ osdata = {}
++
++ (
++ osdata["kernel"],
++ osdata["nodename"],
++ osdata["kernelrelease"],
++ osdata["kernelversion"],
++ osdata["cpuarch"],
++ _,
++ ) = platform.uname()
++
++ which_mock = MagicMock(
++ side_effect=[
++ # Check with virt-what
++ "/usr/sbin/virt-what",
++ "/usr/sbin/virt-what",
++ None,
++ "/usr/sbin/dmidecode",
++ ]
++ )
++ cmd_run_all_mock = MagicMock(
++ side_effect=[
++ # Check with virt-what
++ {"retcode": 0, "stderr": "", "stdout": "nutanix_ahv"},
++ {
++ "retcode": 0,
++ "stderr": "",
++ "stdout": "\n".join(
++ [
++ "dmidecode 3.4",
++ "Getting SMBIOS data from sysfs.",
++ "SMBIOS 2.8 present.",
++ "",
++ "Handle 0x0001, DMI type 1, 27 bytes",
++ "System Information",
++ " Manufacturer: Nutanix",
++ " Product Name: AHV",
++ " Version: Not Specified",
++ " Serial Number: 01234567-dcba-1234-abcd-abcdef012345",
++ " UUID: 12345678-abcd-4321-dcba-0123456789ab",
++ " Wake-up Type: Power Switch",
++ " SKU Number: Not Specified",
++ " Family: Not Specified",
++ "",
++ "Handle 0x2000, DMI type 32, 11 bytes",
++ "System Boot Information",
++ " Status: No errors detected",
++ ]
++ ),
++ },
++ ]
++ )
++
++ with patch("salt.utils.path.which", which_mock), patch.dict(
++ core.__salt__,
++ {
++ "cmd.run": salt.modules.cmdmod.run,
++ "cmd.run_all": cmd_run_all_mock,
++ "cmd.retcode": salt.modules.cmdmod.retcode,
++ "smbios.get": salt.modules.smbios.get,
++ },
++ ):
++
++ virtual_grains = core._virtual(osdata.copy())
++
++ assert virtual_grains["virtual"] == "kvm"
++ assert virtual_grains["virtual_subtype"] == "Nutanix AHV"
++
++
++@pytest.mark.skip_unless_on_linux
++def test_virtual_nutanix_dmidecode():
++ osdata = {}
++
++ (
++ osdata["kernel"],
++ osdata["nodename"],
++ osdata["kernelrelease"],
++ osdata["kernelversion"],
++ osdata["cpuarch"],
++ _,
++ ) = platform.uname()
++
++ which_mock = MagicMock(
++ side_effect=[
++ # Check with virt-what
++ None,
++ None,
++ None,
++ "/usr/sbin/dmidecode",
++ None,
++ "/usr/sbin/dmidecode",
++ ]
++ )
++ cmd_run_all_mock = MagicMock(
++ side_effect=[
++ {
++ "retcode": 0,
++ "stderr": "",
++ "stdout": "\n".join(
++ [
++ "dmidecode 3.4",
++ "Getting SMBIOS data from sysfs.",
++ "SMBIOS 2.8 present.",
++ "",
++ "Handle 0x0001, DMI type 1, 27 bytes",
++ "System Information",
++ " Manufacturer: Nutanix",
++ " Product Name: AHV",
++ " Version: Not Specified",
++ " Serial Number: 01234567-dcba-1234-abcd-abcdef012345",
++ " UUID: 12345678-abcd-4321-dcba-0123456789ab",
++ " Wake-up Type: Power Switch",
++ " SKU Number: Not Specified",
++ " Family: Not Specified",
++ "",
++ "Handle 0x2000, DMI type 32, 11 bytes",
++ "System Boot Information",
++ " Status: No errors detected",
++ ]
++ ),
++ },
++ {
++ "retcode": 0,
++ "stderr": "",
++ "stdout": "\n".join(
++ [
++ "dmidecode 3.4",
++ "Getting SMBIOS data from sysfs.",
++ "SMBIOS 2.8 present.",
++ "",
++ "Handle 0x0001, DMI type 1, 27 bytes",
++ "System Information",
++ " Manufacturer: Nutanix",
++ " Product Name: AHV",
++ " Version: Not Specified",
++ " Serial Number: 01234567-dcba-1234-abcd-abcdef012345",
++ " UUID: 12345678-abcd-4321-dcba-0123456789ab",
++ " Wake-up Type: Power Switch",
++ " SKU Number: Not Specified",
++ " Family: Not Specified",
++ "",
++ "Handle 0x2000, DMI type 32, 11 bytes",
++ "System Boot Information",
++ " Status: No errors detected",
++ ]
++ ),
++ },
++ ]
++ )
++
++ def _mock_is_file(filename):
++ if filename in (
++ "/proc/1/cgroup",
++ "/proc/cpuinfo",
++ "/sys/devices/virtual/dmi/id/product_name",
++ "/proc/xen/xsd_kva",
++ "/proc/xen/capabilities",
++ ):
++ return False
++ return True
++
++ with patch("salt.utils.path.which", which_mock), patch.dict(
++ core.__salt__,
++ {
++ "cmd.run": salt.modules.cmdmod.run,
++ "cmd.run_all": cmd_run_all_mock,
++ "cmd.retcode": salt.modules.cmdmod.retcode,
++ "smbios.get": salt.modules.smbios.get,
++ },
++ ), patch("os.path.isfile", _mock_is_file), patch(
++ "os.path.isdir", return_value=False
++ ):
++ virtual_grains = core._virtual(osdata.copy())
++
++ assert virtual_grains["virtual"] == "kvm"
++ assert virtual_grains["virtual_subtype"] == "Nutanix AHV"
++
++
+ @pytest.mark.skip_unless_on_linux
+ def test_virtual_set_virtual_ec2():
+ osdata = {}
+--
+2.47.0
+
diff --git a/fix-x509-private-key-tests-and-test_suse-on-sle12-68.patch b/fix-x509-private-key-tests-and-test_suse-on-sle12-68.patch
new file mode 100644
index 0000000..92f7f1e
--- /dev/null
+++ b/fix-x509-private-key-tests-and-test_suse-on-sle12-68.patch
@@ -0,0 +1,87 @@
+From 43e05d3beea1d6e772fe88c051abf006c2a9bf90 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Thu, 3 Oct 2024 13:08:16 +0200
+Subject: [PATCH] Fix x509 private key tests and test_suse on SLE12
+ (#684)
+
+---
+ .../functional/modules/test_x509_v2.py | 12 +++++++++--
+ .../functional/states/pkgrepo/test_suse.py | 20 +++++++++----------
+ 2 files changed, 20 insertions(+), 12 deletions(-)
+
+diff --git a/tests/pytests/functional/modules/test_x509_v2.py b/tests/pytests/functional/modules/test_x509_v2.py
+index 7de8f3b01f..3db78c1b63 100644
+--- a/tests/pytests/functional/modules/test_x509_v2.py
++++ b/tests/pytests/functional/modules/test_x509_v2.py
+@@ -1442,14 +1442,22 @@ def test_create_private_key_with_passphrase(x509, algo):
+
+ @pytest.mark.slow_test
+ def test_create_private_key_der(x509):
+- res = x509.create_private_key(algo="ec", encoding="der")
++ try:
++ res = x509.create_private_key(algo="ec", encoding="der")
++ except NotImplementedError:
++ pytest.skip("Algorithm 'ec' is not supported on this OpenSSL version")
+ assert base64.b64decode(res)
+
+
+ @pytest.mark.slow_test
+ @pytest.mark.parametrize("passphrase", [None, "hunter2"])
+ def test_create_private_key_pkcs12(x509, passphrase):
+- res = x509.create_private_key(algo="ec", encoding="pkcs12", passphrase=passphrase)
++ try:
++ res = x509.create_private_key(
++ algo="ec", encoding="pkcs12", passphrase=passphrase
++ )
++ except NotImplementedError:
++ pytest.skip("Algorithm 'ec' is not supported on this OpenSSL version")
+ assert base64.b64decode(res)
+
+
+diff --git a/tests/pytests/functional/states/pkgrepo/test_suse.py b/tests/pytests/functional/states/pkgrepo/test_suse.py
+index 3bafeedc94..d21a9aeb9d 100644
+--- a/tests/pytests/functional/states/pkgrepo/test_suse.py
++++ b/tests/pytests/functional/states/pkgrepo/test_suse.py
+@@ -28,14 +28,14 @@ def suse_state_tree(grains, pkgrepo, state_tree):
+ - comments:
+ - '# Salt Test'
+ - refresh: 1
+- {% if grains['osmajorrelease'] == 15 %}
+- - baseurl: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/
+- - humanname: openSUSE Backports for SLE 15 SP4
+- - gpgkey: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/repodata/repomd.xml.key
+- {% elif grains['osfullname'] == 'openSUSE Tumbleweed' %}
++ {% if grains['osfullname'] == 'openSUSE Tumbleweed' %}
+ - baseurl: http://download.opensuse.org/tumbleweed/repo/oss/
+ - humanname: openSUSE Tumbleweed OSS
+ - gpgkey: https://download.opensuse.org/tumbleweed/repo/oss/repodata/repomd.xml.key
++ {% else %}
++ - baseurl: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/
++ - humanname: openSUSE Backports for SLE 15 SP4
++ - gpgkey: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/repodata/repomd.xml.key
+ {% endif %}
+ """
+
+@@ -53,14 +53,14 @@ def suse_state_tree(grains, pkgrepo, state_tree):
+ - comments:
+ - '# Salt Test (modified)'
+ - refresh: 1
+- {% if grains['osmajorrelease'] == 15 %}
+- - baseurl: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/
+- - humanname: Salt modified Backports
+- - gpgkey: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/repodata/repomd.xml.key
+- {% elif grains['osfullname'] == 'openSUSE Tumbleweed' %}
++ {% if grains['osfullname'] == 'openSUSE Tumbleweed' %}
+ - baseurl: http://download.opensuse.org/tumbleweed/repo/oss/
+ - humanname: Salt modified OSS
+ - gpgkey: https://download.opensuse.org/tumbleweed/repo/oss/repodata/repomd.xml.key
++ {% else %}
++ - baseurl: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/
++ - humanname: Salt modified Backports
++ - gpgkey: https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15-SP4/standard/repodata/repomd.xml.key
+ {% endif %}
+ """
+
+--
+2.46.1
+
diff --git a/fix-x509-test-fails-on-old-openssl-systems-682.patch b/fix-x509-test-fails-on-old-openssl-systems-682.patch
new file mode 100644
index 0000000..7e6ae6c
--- /dev/null
+++ b/fix-x509-test-fails-on-old-openssl-systems-682.patch
@@ -0,0 +1,261 @@
+From 7daf461528c90776b8f865cd58d20e23bd5b6f3f Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Wed, 2 Oct 2024 09:09:34 +0200
+Subject: [PATCH] Fix x509 test fails on old openssl systems (#682)
+
+---
+ .../functional/modules/test_x509_v2.py | 41 +++++++++++++----
+ .../pytests/functional/states/test_x509_v2.py | 44 +++++++++++++++----
+ .../scenarios/performance/test_performance.py | 8 +++-
+ 3 files changed, 75 insertions(+), 18 deletions(-)
+
+diff --git a/tests/pytests/functional/modules/test_x509_v2.py b/tests/pytests/functional/modules/test_x509_v2.py
+index 2e8152d04a..7de8f3b01f 100644
+--- a/tests/pytests/functional/modules/test_x509_v2.py
++++ b/tests/pytests/functional/modules/test_x509_v2.py
+@@ -681,8 +681,13 @@ def test_create_certificate_self_signed(x509, algo, request):
+ privkey = request.getfixturevalue(f"{algo}_privkey")
+ try:
+ res = x509.create_certificate(signing_private_key=privkey, CN="success")
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ except salt.exceptions.CommandExecutionError as e:
++ if "Could not load PEM-encoded" in e.error:
++ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ else:
++ raise e
+ assert res.startswith("-----BEGIN CERTIFICATE-----")
+ cert = _get_cert(res)
+ assert cert.subject.rfc4514_string() == "CN=success"
+@@ -754,8 +759,13 @@ def test_create_certificate_from_privkey(x509, ca_key, ca_cert, algo, request):
+ private_key=privkey,
+ CN="success",
+ )
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ except salt.exceptions.CommandExecutionError as e:
++ if "Could not load PEM-encoded" in e.error:
++ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ else:
++ raise e
+ assert res.startswith("-----BEGIN CERTIFICATE-----")
+ cert = _get_cert(res)
+ assert cert.subject.rfc4514_string() == "CN=success"
+@@ -802,8 +812,13 @@ def test_create_certificate_from_pubkey(x509, ca_key, ca_cert, algo, request):
+ public_key=pubkey,
+ CN="success",
+ )
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ except salt.exceptions.CommandExecutionError as e:
++ if "Could not load PEM-encoded" in e.error:
++ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ else:
++ raise e
+ assert res.startswith("-----BEGIN CERTIFICATE-----")
+ cert = _get_cert(res)
+ assert cert.subject.rfc4514_string() == "CN=success"
+@@ -1341,8 +1356,13 @@ def test_create_csr(x509, algo, request):
+ privkey = request.getfixturevalue(f"{algo}_privkey")
+ try:
+ res = x509.create_csr(private_key=privkey)
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ except salt.exceptions.CommandExecutionError as e:
++ if "Could not load PEM-encoded" in e.error:
++ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ else:
++ raise e
+ assert res.startswith("-----BEGIN CERTIFICATE REQUEST-----")
+
+
+@@ -1402,7 +1422,7 @@ def test_create_csr_raw(x509, rsa_privkey):
+ def test_create_private_key(x509, algo):
+ try:
+ res = x509.create_private_key(algo=algo)
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
+ assert res.startswith("-----BEGIN PRIVATE KEY-----")
+
+@@ -1413,7 +1433,7 @@ def test_create_private_key_with_passphrase(x509, algo):
+ passphrase = "hunter2"
+ try:
+ res = x509.create_private_key(algo=algo, passphrase=passphrase)
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
+ assert res.startswith("-----BEGIN ENCRYPTED PRIVATE KEY-----")
+ # ensure it can be loaded
+@@ -1465,8 +1485,13 @@ def test_get_private_key_size(x509, algo, expected, request):
+ privkey = request.getfixturevalue(f"{algo}_privkey")
+ try:
+ res = x509.get_private_key_size(privkey)
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ except salt.exceptions.CommandExecutionError as e:
++ if "Could not load PEM-encoded" in e.error:
++ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
++ else:
++ raise e
+ assert res == expected
+
+
+@@ -1612,7 +1637,7 @@ def test_verify_signature(x509, algo, request):
+ wrong_privkey = request.getfixturevalue(f"{algo}_privkey")
+ try:
+ privkey = x509.create_private_key(algo=algo)
+- except UnsupportedAlgorithm:
++ except (UnsupportedAlgorithm, NotImplementedError):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
+ cert = x509.create_certificate(signing_private_key=privkey)
+ assert x509.verify_signature(cert, privkey)
+diff --git a/tests/pytests/functional/states/test_x509_v2.py b/tests/pytests/functional/states/test_x509_v2.py
+index 47a1c555f8..139f7b1906 100644
+--- a/tests/pytests/functional/states/test_x509_v2.py
++++ b/tests/pytests/functional/states/test_x509_v2.py
+@@ -574,9 +574,9 @@ def existing_cert(x509, cert_args, ca_key, rsa_privkey, request):
+ ca_key,
+ encoding=cert_args.get("encoding", "pem"),
+ passphrase=cert_args.get("pkcs12_passphrase"),
+- subject=subject
+- if "signing_policy" not in cert_args
+- else "CN=from_signing_policy",
++ subject=(
++ subject if "signing_policy" not in cert_args else "CN=from_signing_policy"
++ ),
+ )
+ yield cert_args["name"]
+
+@@ -694,8 +694,12 @@ def existing_csr_exts(x509, csr_args, csr_args_exts, ca_key, rsa_privkey, reques
+ def existing_pk(x509, pk_args, request):
+ pk_args.update(request.param)
+ ret = x509.private_key_managed(**pk_args)
+- if ret.result == False and "UnsupportedAlgorithm" in ret.comment:
+- pytest.skip(f"Algorithm '{pk_args['algo']}' is not supported on this OpenSSL version")
++ if ret.result == False and (
++ "UnsupportedAlgorithm" in ret.comment or "NotImplementedError" in ret.comment
++ ):
++ pytest.skip(
++ f"Algorithm '{pk_args['algo']}' is not supported on this OpenSSL version"
++ )
+ _assert_pk_basic(
+ ret,
+ pk_args.get("algo", "rsa"),
+@@ -1054,6 +1058,8 @@ def test_certificate_managed_days_valid_does_not_override_days_remaining(
+ def test_certificate_managed_privkey_change(x509, cert_args, ec_privkey, ca_key):
+ cert_args["private_key"] = ec_privkey
+ ret = x509.certificate_managed(**cert_args)
++ if ret.result == False and "NotImplementedError" in ret.comment:
++ pytest.skip("Current OpenSSL does not support 'ec' algorithm")
+ _assert_cert_basic(ret, cert_args["name"], ec_privkey, ca_key)
+ assert ret.changes["private_key"]
+
+@@ -1237,6 +1243,8 @@ def test_certificate_managed_wrong_ca_key(
+ cert_args["private_key"] = ec_privkey
+ cert_args["signing_private_key"] = rsa_privkey
+ ret = x509.certificate_managed(**cert_args)
++ if ret.result == False and "NotImplementedError" in ret.comment:
++ pytest.skip("Current OpenSSL does not support 'ec' algorithm")
+ assert ret.result is False
+ assert not ret.changes
+ assert "Signing private key does not match the certificate" in ret.comment
+@@ -1917,6 +1925,8 @@ def test_csr_managed_existing_invalid_version(x509, csr_args, rsa_privkey):
+ def test_csr_managed_privkey_change(x509, csr_args, ec_privkey):
+ csr_args["private_key"] = ec_privkey
+ ret = x509.csr_managed(**csr_args)
++ if ret.result == False and "NotImplementedError" in ret.comment:
++ pytest.skip("Current OpenSSL does not support 'ec' algorithm")
+ _assert_csr_basic(ret, ec_privkey)
+ assert ret.changes["private_key"]
+
+@@ -2141,11 +2151,14 @@ def test_private_key_managed(x509, pk_args, algo, encoding, passphrase):
+ pytest.skip(
+ "PKCS12 serialization of Edwards-curve keys requires cryptography v37"
+ )
++
+ pk_args["algo"] = algo
+ pk_args["encoding"] = encoding
+ pk_args["passphrase"] = passphrase
+ ret = x509.private_key_managed(**pk_args)
+- if ret.result == False and "UnsupportedAlgorithm" in ret.comment:
++ if ret.result == False and (
++ "UnsupportedAlgorithm" in ret.comment or "NotImplementedError" in ret.comment
++ ):
+ pytest.skip(f"Algorithm '{algo}' is not supported on this OpenSSL version")
+ _assert_pk_basic(ret, algo, encoding, passphrase)
+
+@@ -2155,6 +2168,8 @@ def test_private_key_managed_keysize(x509, pk_args, algo, keysize):
+ pk_args["algo"] = algo
+ pk_args["keysize"] = keysize
+ ret = x509.private_key_managed(**pk_args)
++ if ret.result == False and "NotImplementedError" in ret.comment:
++ pytest.skip("Current OpenSSL does not support 'ec' algorithm")
+ pk = _assert_pk_basic(ret, algo)
+ assert pk.key_size == keysize
+
+@@ -2174,8 +2189,12 @@ def test_private_key_managed_keysize(x509, pk_args, algo, keysize):
+ )
+ def test_private_key_managed_existing(x509, pk_args):
+ ret = x509.private_key_managed(**pk_args)
+- if ret.result == False and "UnsupportedAlgorithm" in ret.comment:
+- pytest.skip(f"Algorithm '{pk_args['algo']}' is not supported on this OpenSSL version")
++ if ret.result == False and (
++ "UnsupportedAlgorithm" in ret.comment or "NotImplementedError" in ret.comment
++ ):
++ pytest.skip(
++ f"Algorithm '{pk_args['algo']}' is not supported on this OpenSSL version"
++ )
+ _assert_not_changed(ret)
+
+
+@@ -2382,6 +2401,8 @@ def test_private_key_managed_follow_symlinks_changes(
+ pk_args["encoding"] = encoding
+ pk_args["algo"] = "ec"
+ ret = x509.private_key_managed(**pk_args)
++ if ret.result == False and "NotImplementedError" in ret.comment:
++ pytest.skip("Current OpenSSL does not support 'ec' algorithm")
+ assert ret.changes
+ assert Path(ret.name).is_symlink() == follow
+
+@@ -2722,7 +2743,12 @@ def _get_cert(cert, encoding="pem", passphrase=None):
+ def _belongs_to(cert_or_pubkey, privkey):
+ if isinstance(cert_or_pubkey, cx509.Certificate):
+ cert_or_pubkey = cert_or_pubkey.public_key()
+- return x509util.is_pair(cert_or_pubkey, x509util.load_privkey(privkey))
++ try:
++ return x509util.is_pair(cert_or_pubkey, x509util.load_privkey(privkey))
++ except NotImplementedError:
++ pytest.skip(
++ "This OpenSSL version does not support current cryptographic algorithm"
++ )
+
+
+ def _signed_by(cert, privkey):
+diff --git a/tests/pytests/scenarios/performance/test_performance.py b/tests/pytests/scenarios/performance/test_performance.py
+index 85b92ed986..6319e26ce1 100644
+--- a/tests/pytests/scenarios/performance/test_performance.py
++++ b/tests/pytests/scenarios/performance/test_performance.py
+@@ -10,7 +10,13 @@ from saltfactories.utils import random_string
+
+ from salt.version import SaltVersionsInfo, __version__
+
+-pytestmark = [pytest.mark.skip_if_binaries_missing("docker")]
++pytestmark = [
++ pytest.mark.skip_if_binaries_missing("docker"),
++ pytest.mark.skipif(
++ os.environ.get("GITHUB_ACTIONS", "") == "true",
++ reason="Cannot spawn containers in GH actions run",
++ ),
++]
+
+
+ class ContainerMaster(SaltDaemon, master.SaltMaster):
+--
+2.46.1
+
diff --git a/fixed-gitfs-cachedir_basename-to-avoid-hash-collisio.patch b/fixed-gitfs-cachedir_basename-to-avoid-hash-collisio.patch
new file mode 100644
index 0000000..1d18946
--- /dev/null
+++ b/fixed-gitfs-cachedir_basename-to-avoid-hash-collisio.patch
@@ -0,0 +1,833 @@
+From 7051f86bb48dbd618a7422d469f3aae4c6f18008 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Thu, 31 Aug 2023 10:41:53 +0100
+Subject: [PATCH] Fixed gitfs cachedir_basename to avoid hash collisions
+ (#599)
+
+(bsc#1193948, bsc#1214797, CVE-2023-20898)
+
+Fix gitfs tests
+
+It's `gitfs` not `gtfs`, plus some code fixes and cleanup
+
+Signed-off-by: Pedro Algarvio
+
+fix doc
+
+wrap sha in base64
+
+clean up cache name
+
+stop branch collision
+
+run pre
+
+Co-authored-by: cmcmarrow
+---
+ changelog/cve-2023-20898.security.md | 1 +
+ salt/utils/gitfs.py | 83 ++++++-
+ tests/pytests/unit/utils/test_gitfs.py | 255 +++++++++++++++++++++
+ tests/unit/utils/test_gitfs.py | 305 ++++++-------------------
+ 4 files changed, 403 insertions(+), 241 deletions(-)
+ create mode 100644 changelog/cve-2023-20898.security.md
+ create mode 100644 tests/pytests/unit/utils/test_gitfs.py
+
+diff --git a/changelog/cve-2023-20898.security.md b/changelog/cve-2023-20898.security.md
+new file mode 100644
+index 0000000000..44f1729192
+--- /dev/null
++++ b/changelog/cve-2023-20898.security.md
+@@ -0,0 +1 @@
++Fixed gitfs cachedir_basename to avoid hash collisions. Added MP Lock to gitfs. These changes should stop race conditions.
+diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py
+index 38e84f38aa..af61aa0dda 100644
+--- a/salt/utils/gitfs.py
++++ b/salt/utils/gitfs.py
+@@ -3,6 +3,7 @@ Classes which provide the shared base for GitFS, git_pillar, and winrepo
+ """
+
+
++import base64
+ import contextlib
+ import copy
+ import errno
+@@ -11,10 +12,12 @@ import glob
+ import hashlib
+ import io
+ import logging
++import multiprocessing
+ import os
+ import shlex
+ import shutil
+ import stat
++import string
+ import subprocess
+ import time
+ import weakref
+@@ -22,6 +25,7 @@ from datetime import datetime
+
+ import salt.ext.tornado.ioloop
+ import salt.fileserver
++import salt.syspaths
+ import salt.utils.configparser
+ import salt.utils.data
+ import salt.utils.files
+@@ -34,7 +38,6 @@ import salt.utils.stringutils
+ import salt.utils.url
+ import salt.utils.user
+ import salt.utils.versions
+-import salt.syspaths
+ from salt.config import DEFAULT_MASTER_OPTS as _DEFAULT_MASTER_OPTS
+ from salt.exceptions import FileserverConfigError, GitLockError, get_error_message
+ from salt.utils.event import tagify
+@@ -226,6 +229,10 @@ class GitProvider:
+ invoking the parent class' __init__.
+ """
+
++ # master lock should only be locked for very short periods of times "seconds"
++ # the master lock should be used when ever git provider reads or writes to one if it locks
++ _master_lock = multiprocessing.Lock()
++
+ def __init__(
+ self,
+ opts,
+@@ -452,13 +459,44 @@ class GitProvider:
+ failhard(self.role)
+
+ hash_type = getattr(hashlib, self.opts.get("hash_type", "md5"))
++ # Generate full id.
++ # Full id helps decrease the chances of collections in the gitfs cache.
++ try:
++ target = str(self.get_checkout_target())
++ except AttributeError:
++ target = ""
++ self._full_id = "-".join(
++ [
++ getattr(self, "name", ""),
++ self.id,
++ getattr(self, "env", ""),
++ getattr(self, "_root", ""),
++ self.role,
++ getattr(self, "base", ""),
++ getattr(self, "branch", ""),
++ target,
++ ]
++ )
+ # We loaded this data from yaml configuration files, so, its safe
+ # to use UTF-8
+- self.hash = hash_type(self.id.encode("utf-8")).hexdigest()
+- self.cachedir_basename = getattr(self, "name", self.hash)
++ base64_hash = str(
++ base64.b64encode(hash_type(self._full_id.encode("utf-8")).digest()),
++ encoding="ascii", # base64 only outputs ascii
++ ).replace(
++ "/", "_"
++ ) # replace "/" with "_" to not cause trouble with file system
++
++ # limit name length to 19, so we don't eat up all the path length for windows
++ # this is due to pygit2 limitations
++ # replace any unknown char with "_" to not cause trouble with file system
++ name_chars = string.ascii_letters + string.digits + "-"
++ cache_name = "".join(
++ c if c in name_chars else "_" for c in getattr(self, "name", "")[:19]
++ )
++
++ self.cachedir_basename = f"{cache_name}-{base64_hash}"
+ self.cachedir = salt.utils.path.join(cache_root, self.cachedir_basename)
+ self.linkdir = salt.utils.path.join(cache_root, "links", self.cachedir_basename)
+-
+ if not os.path.isdir(self.cachedir):
+ os.makedirs(self.cachedir)
+
+@@ -473,6 +511,12 @@ class GitProvider:
+ log.critical(msg, exc_info=True)
+ failhard(self.role)
+
++ def full_id(self):
++ return self._full_id
++
++ def get_cachedir_basename(self):
++ return self.cachedir_basename
++
+ def _get_envs_from_ref_paths(self, refs):
+ """
+ Return the names of remote refs (stripped of the remote name) and tags
+@@ -663,6 +707,19 @@ class GitProvider:
+ """
+ Clear update.lk
+ """
++ if self.__class__._master_lock.acquire(timeout=60) is False:
++ # if gitfs works right we should never see this timeout error.
++ log.error("gitfs master lock timeout!")
++ raise TimeoutError("gitfs master lock timeout!")
++ try:
++ return self._clear_lock(lock_type)
++ finally:
++ self.__class__._master_lock.release()
++
++ def _clear_lock(self, lock_type="update"):
++ """
++ Clear update.lk without MultiProcessing locks
++ """
+ lock_file = self._get_lock_file(lock_type=lock_type)
+
+ def _add_error(errlist, exc):
+@@ -838,6 +895,20 @@ class GitProvider:
+ """
+ Place a lock file if (and only if) it does not already exist.
+ """
++ if self.__class__._master_lock.acquire(timeout=60) is False:
++ # if gitfs works right we should never see this timeout error.
++ log.error("gitfs master lock timeout!")
++ raise TimeoutError("gitfs master lock timeout!")
++ try:
++ return self.__lock(lock_type, failhard)
++ finally:
++ self.__class__._master_lock.release()
++
++ def __lock(self, lock_type="update", failhard=False):
++ """
++ Place a lock file if (and only if) it does not already exist.
++ Without MultiProcessing locks.
++ """
+ try:
+ fh_ = os.open(
+ self._get_lock_file(lock_type), os.O_CREAT | os.O_EXCL | os.O_WRONLY
+@@ -904,9 +975,9 @@ class GitProvider:
+ lock_type,
+ lock_file,
+ )
+- success, fail = self.clear_lock()
++ success, fail = self._clear_lock()
+ if success:
+- return self._lock(lock_type="update", failhard=failhard)
++ return self.__lock(lock_type="update", failhard=failhard)
+ elif failhard:
+ raise
+ return
+diff --git a/tests/pytests/unit/utils/test_gitfs.py b/tests/pytests/unit/utils/test_gitfs.py
+new file mode 100644
+index 0000000000..e9915de412
+--- /dev/null
++++ b/tests/pytests/unit/utils/test_gitfs.py
+@@ -0,0 +1,255 @@
++import os
++import string
++import time
++
++import pytest
++
++import salt.fileserver.gitfs
++import salt.utils.gitfs
++from salt.exceptions import FileserverConfigError
++from tests.support.helpers import patched_environ
++from tests.support.mock import MagicMock, patch
++
++try:
++ HAS_PYGIT2 = (
++ salt.utils.gitfs.PYGIT2_VERSION
++ and salt.utils.gitfs.PYGIT2_VERSION >= salt.utils.gitfs.PYGIT2_MINVER
++ and salt.utils.gitfs.LIBGIT2_VERSION
++ and salt.utils.gitfs.LIBGIT2_VERSION >= salt.utils.gitfs.LIBGIT2_MINVER
++ )
++except AttributeError:
++ HAS_PYGIT2 = False
++
++
++if HAS_PYGIT2:
++ import pygit2
++
++
++@pytest.mark.parametrize(
++ "role_name,role_class",
++ (
++ ("gitfs", salt.utils.gitfs.GitFS),
++ ("git_pillar", salt.utils.gitfs.GitPillar),
++ ("winrepo", salt.utils.gitfs.WinRepo),
++ ),
++)
++def test_provider_case_insensitive_gitfs_provider(minion_opts, role_name, role_class):
++ """
++ Ensure that both lowercase and non-lowercase values are supported
++ """
++ provider = "GitPython"
++ key = "{}_provider".format(role_name)
++ with patch.object(role_class, "verify_gitpython", MagicMock(return_value=True)):
++ with patch.object(role_class, "verify_pygit2", MagicMock(return_value=False)):
++ args = [minion_opts, {}]
++ kwargs = {"init_remotes": False}
++ if role_name == "winrepo":
++ kwargs["cache_root"] = "/tmp/winrepo-dir"
++ with patch.dict(minion_opts, {key: provider}):
++ # Try to create an instance with uppercase letters in
++ # provider name. If it fails then a
++ # FileserverConfigError will be raised, so no assert is
++ # necessary.
++ role_class(*args, **kwargs)
++ # Now try to instantiate an instance with all lowercase
++ # letters. Again, no need for an assert here.
++ role_class(*args, **kwargs)
++
++
++@pytest.mark.parametrize(
++ "role_name,role_class",
++ (
++ ("gitfs", salt.utils.gitfs.GitFS),
++ ("git_pillar", salt.utils.gitfs.GitPillar),
++ ("winrepo", salt.utils.gitfs.WinRepo),
++ ),
++)
++def test_valid_provider_gitfs_provider(minion_opts, role_name, role_class):
++ """
++ Ensure that an invalid provider is not accepted, raising a
++ FileserverConfigError.
++ """
++
++ def _get_mock(verify, provider):
++ """
++ Return a MagicMock with the desired return value
++ """
++ return MagicMock(return_value=verify.endswith(provider))
++
++ key = "{}_provider".format(role_name)
++ for provider in salt.utils.gitfs.GIT_PROVIDERS:
++ verify = "verify_gitpython"
++ mock1 = _get_mock(verify, provider)
++ with patch.object(role_class, verify, mock1):
++ verify = "verify_pygit2"
++ mock2 = _get_mock(verify, provider)
++ with patch.object(role_class, verify, mock2):
++ args = [minion_opts, {}]
++ kwargs = {"init_remotes": False}
++ if role_name == "winrepo":
++ kwargs["cache_root"] = "/tmp/winrepo-dir"
++ with patch.dict(minion_opts, {key: provider}):
++ role_class(*args, **kwargs)
++ with patch.dict(minion_opts, {key: "foo"}):
++ # Set the provider name to a known invalid provider
++ # and make sure it raises an exception.
++ with pytest.raises(FileserverConfigError):
++ role_class(*args, **kwargs)
++
++
++@pytest.fixture
++def _prepare_remote_repository_pygit2(tmp_path):
++ remote = os.path.join(tmp_path, "pygit2-repo")
++ filecontent = "This is an empty README file"
++ filename = "README"
++ signature = pygit2.Signature(
++ "Dummy Commiter", "dummy@dummy.com", int(time.time()), 0
++ )
++ repository = pygit2.init_repository(remote, False)
++ builder = repository.TreeBuilder()
++ tree = builder.write()
++ commit = repository.create_commit(
++ "HEAD", signature, signature, "Create master branch", tree, []
++ )
++ repository.create_reference("refs/tags/simple_tag", commit)
++ with salt.utils.files.fopen(
++ os.path.join(repository.workdir, filename), "w"
++ ) as file:
++ file.write(filecontent)
++ blob = repository.create_blob_fromworkdir(filename)
++ builder = repository.TreeBuilder()
++ builder.insert(filename, blob, pygit2.GIT_FILEMODE_BLOB)
++ tree = builder.write()
++ repository.index.read()
++ repository.index.add(filename)
++ repository.index.write()
++ commit = repository.create_commit(
++ "HEAD",
++ signature,
++ signature,
++ "Added a README",
++ tree,
++ [repository.head.target],
++ )
++ repository.create_tag(
++ "annotated_tag", commit, pygit2.GIT_OBJ_COMMIT, signature, "some message"
++ )
++ return remote
++
++
++@pytest.fixture
++def _prepare_provider(tmp_path, minion_opts, _prepare_remote_repository_pygit2):
++ cache = tmp_path / "pygit2-repo-cache"
++ minion_opts.update(
++ {
++ "cachedir": str(cache),
++ "gitfs_disable_saltenv_mapping": False,
++ "gitfs_base": "master",
++ "gitfs_insecure_auth": False,
++ "gitfs_mountpoint": "",
++ "gitfs_passphrase": "",
++ "gitfs_password": "",
++ "gitfs_privkey": "",
++ "gitfs_provider": "pygit2",
++ "gitfs_pubkey": "",
++ "gitfs_ref_types": ["branch", "tag", "sha"],
++ "gitfs_refspecs": [
++ "+refs/heads/*:refs/remotes/origin/*",
++ "+refs/tags/*:refs/tags/*",
++ ],
++ "gitfs_root": "",
++ "gitfs_saltenv_blacklist": [],
++ "gitfs_saltenv_whitelist": [],
++ "gitfs_ssl_verify": True,
++ "gitfs_update_interval": 3,
++ "gitfs_user": "",
++ "verified_gitfs_provider": "pygit2",
++ }
++ )
++ per_remote_defaults = {
++ "base": "master",
++ "disable_saltenv_mapping": False,
++ "insecure_auth": False,
++ "ref_types": ["branch", "tag", "sha"],
++ "passphrase": "",
++ "mountpoint": "",
++ "password": "",
++ "privkey": "",
++ "pubkey": "",
++ "refspecs": [
++ "+refs/heads/*:refs/remotes/origin/*",
++ "+refs/tags/*:refs/tags/*",
++ ],
++ "root": "",
++ "saltenv_blacklist": [],
++ "saltenv_whitelist": [],
++ "ssl_verify": True,
++ "update_interval": 60,
++ "user": "",
++ }
++ per_remote_only = ("all_saltenvs", "name", "saltenv")
++ override_params = tuple(per_remote_defaults)
++ cache_root = cache / "gitfs"
++ role = "gitfs"
++ provider = salt.utils.gitfs.Pygit2(
++ minion_opts,
++ _prepare_remote_repository_pygit2,
++ per_remote_defaults,
++ per_remote_only,
++ override_params,
++ str(cache_root),
++ role,
++ )
++ return provider
++
++
++@pytest.mark.skipif(not HAS_PYGIT2, reason="This host lacks proper pygit2 support")
++@pytest.mark.skip_on_windows(
++ reason="Skip Pygit2 on windows, due to pygit2 access error on windows"
++)
++def test_checkout_pygit2(_prepare_provider):
++ provider = _prepare_provider
++ provider.remotecallbacks = None
++ provider.credentials = None
++ provider.init_remote()
++ provider.fetch()
++ provider.branch = "master"
++ assert provider.cachedir in provider.checkout()
++ provider.branch = "simple_tag"
++ assert provider.cachedir in provider.checkout()
++ provider.branch = "annotated_tag"
++ assert provider.cachedir in provider.checkout()
++ provider.branch = "does_not_exist"
++ assert provider.checkout() is None
++
++
++@pytest.mark.skipif(not HAS_PYGIT2, reason="This host lacks proper pygit2 support")
++@pytest.mark.skip_on_windows(
++ reason="Skip Pygit2 on windows, due to pygit2 access error on windows"
++)
++def test_checkout_pygit2_with_home_env_unset(_prepare_provider):
++ provider = _prepare_provider
++ provider.remotecallbacks = None
++ provider.credentials = None
++ with patched_environ(__cleanup__=["HOME"]):
++ assert "HOME" not in os.environ
++ provider.init_remote()
++ provider.fetch()
++ assert "HOME" in os.environ
++
++
++def test_full_id_pygit2(_prepare_provider):
++ assert _prepare_provider.full_id().startswith("-")
++ assert _prepare_provider.full_id().endswith("/pygit2-repo---gitfs-master--")
++
++
++@pytest.mark.skipif(not HAS_PYGIT2, reason="This host lacks proper pygit2 support")
++@pytest.mark.skip_on_windows(
++ reason="Skip Pygit2 on windows, due to pygit2 access error on windows"
++)
++def test_get_cachedir_basename_pygit2(_prepare_provider):
++ basename = _prepare_provider.get_cachedir_basename()
++ assert len(basename) == 45
++ assert basename[0] == "-"
++ # check that a valid base64 is given '/' -> '_'
++ assert all(c in string.ascii_letters + string.digits + "+_=" for c in basename[1:])
+diff --git a/tests/unit/utils/test_gitfs.py b/tests/unit/utils/test_gitfs.py
+index 7c400b69af..6d8e97a239 100644
+--- a/tests/unit/utils/test_gitfs.py
++++ b/tests/unit/utils/test_gitfs.py
+@@ -2,37 +2,20 @@
+ These only test the provider selection and verification logic, they do not init
+ any remotes.
+ """
+-import os
+-import shutil
+-from time import time
++
++import tempfile
+
+ import pytest
+
++import salt.ext.tornado.ioloop
+ import salt.fileserver.gitfs
+ import salt.utils.files
+ import salt.utils.gitfs
++import salt.utils.path
+ import salt.utils.platform
+-import tests.support.paths
+-from salt.exceptions import FileserverConfigError
+-from tests.support.helpers import patched_environ
+ from tests.support.mixins import AdaptedConfigurationTestCaseMixin
+-from tests.support.mock import MagicMock, patch
+ from tests.support.unit import TestCase
+
+-try:
+- HAS_PYGIT2 = (
+- salt.utils.gitfs.PYGIT2_VERSION
+- and salt.utils.gitfs.PYGIT2_VERSION >= salt.utils.gitfs.PYGIT2_MINVER
+- and salt.utils.gitfs.LIBGIT2_VERSION
+- and salt.utils.gitfs.LIBGIT2_VERSION >= salt.utils.gitfs.LIBGIT2_MINVER
+- )
+-except AttributeError:
+- HAS_PYGIT2 = False
+-
+-
+-if HAS_PYGIT2:
+- import pygit2
+-
+
+ def _clear_instance_map():
+ try:
+@@ -45,6 +28,9 @@ def _clear_instance_map():
+
+ class TestGitBase(TestCase, AdaptedConfigurationTestCaseMixin):
+ def setUp(self):
++ self._tmp_dir = tempfile.TemporaryDirectory()
++ tmp_name = self._tmp_dir.name
++
+ class MockedProvider(
+ salt.utils.gitfs.GitProvider
+ ): # pylint: disable=abstract-method
+@@ -71,6 +57,7 @@ class TestGitBase(TestCase, AdaptedConfigurationTestCaseMixin):
+ )
+
+ def init_remote(self):
++ self.gitdir = salt.utils.path.join(tmp_name, ".git")
+ self.repo = True
+ new = False
+ return new
+@@ -107,6 +94,7 @@ class TestGitBase(TestCase, AdaptedConfigurationTestCaseMixin):
+ for remote in self.main_class.remotes:
+ remote.fetched = False
+ del self.main_class
++ self._tmp_dir.cleanup()
+
+ def test_update_all(self):
+ self.main_class.update()
+@@ -126,226 +114,73 @@ class TestGitBase(TestCase, AdaptedConfigurationTestCaseMixin):
+ self.assertTrue(self.main_class.remotes[0].fetched)
+ self.assertFalse(self.main_class.remotes[1].fetched)
+
+-
+-class TestGitFSProvider(TestCase):
+- def setUp(self):
+- self.opts = {"cachedir": "/tmp/gitfs-test-cache"}
+-
+- def tearDown(self):
+- self.opts = None
+-
+- def test_provider_case_insensitive(self):
+- """
+- Ensure that both lowercase and non-lowercase values are supported
+- """
+- provider = "GitPython"
+- for role_name, role_class in (
+- ("gitfs", salt.utils.gitfs.GitFS),
+- ("git_pillar", salt.utils.gitfs.GitPillar),
+- ("winrepo", salt.utils.gitfs.WinRepo),
+- ):
+-
+- key = "{}_provider".format(role_name)
+- with patch.object(
+- role_class, "verify_gitpython", MagicMock(return_value=True)
+- ):
+- with patch.object(
+- role_class, "verify_pygit2", MagicMock(return_value=False)
+- ):
+- args = [self.opts, {}]
+- kwargs = {"init_remotes": False}
+- if role_name == "winrepo":
+- kwargs["cache_root"] = "/tmp/winrepo-dir"
+- with patch.dict(self.opts, {key: provider}):
+- # Try to create an instance with uppercase letters in
+- # provider name. If it fails then a
+- # FileserverConfigError will be raised, so no assert is
+- # necessary.
+- role_class(*args, **kwargs)
+- # Now try to instantiate an instance with all lowercase
+- # letters. Again, no need for an assert here.
+- role_class(*args, **kwargs)
+-
+- def test_valid_provider(self):
+- """
+- Ensure that an invalid provider is not accepted, raising a
+- FileserverConfigError.
+- """
+-
+- def _get_mock(verify, provider):
+- """
+- Return a MagicMock with the desired return value
+- """
+- return MagicMock(return_value=verify.endswith(provider))
+-
+- for role_name, role_class in (
+- ("gitfs", salt.utils.gitfs.GitFS),
+- ("git_pillar", salt.utils.gitfs.GitPillar),
+- ("winrepo", salt.utils.gitfs.WinRepo),
+- ):
+- key = "{}_provider".format(role_name)
+- for provider in salt.utils.gitfs.GIT_PROVIDERS:
+- verify = "verify_gitpython"
+- mock1 = _get_mock(verify, provider)
+- with patch.object(role_class, verify, mock1):
+- verify = "verify_pygit2"
+- mock2 = _get_mock(verify, provider)
+- with patch.object(role_class, verify, mock2):
+- args = [self.opts, {}]
+- kwargs = {"init_remotes": False}
+- if role_name == "winrepo":
+- kwargs["cache_root"] = "/tmp/winrepo-dir"
+-
+- with patch.dict(self.opts, {key: provider}):
+- role_class(*args, **kwargs)
+-
+- with patch.dict(self.opts, {key: "foo"}):
+- # Set the provider name to a known invalid provider
+- # and make sure it raises an exception.
+- self.assertRaises(
+- FileserverConfigError, role_class, *args, **kwargs
+- )
+-
+-
+-@pytest.mark.skipif(not HAS_PYGIT2, reason="This host lacks proper pygit2 support")
+-@pytest.mark.skip_on_windows(
+- reason="Skip Pygit2 on windows, due to pygit2 access error on windows"
+-)
+-class TestPygit2(TestCase):
+- def _prepare_remote_repository(self, path):
+- shutil.rmtree(path, ignore_errors=True)
+-
+- filecontent = "This is an empty README file"
+- filename = "README"
+-
+- signature = pygit2.Signature(
+- "Dummy Commiter", "dummy@dummy.com", int(time()), 0
++ def test_full_id(self):
++ self.assertEqual(
++ self.main_class.remotes[0].full_id(), "-file://repo1.git---gitfs-master--"
+ )
+
+- repository = pygit2.init_repository(path, False)
+- builder = repository.TreeBuilder()
+- tree = builder.write()
+- commit = repository.create_commit(
+- "HEAD", signature, signature, "Create master branch", tree, []
++ def test_full_id_with_name(self):
++ self.assertEqual(
++ self.main_class.remotes[1].full_id(),
++ "repo2-file://repo2.git---gitfs-master--",
+ )
+- repository.create_reference("refs/tags/simple_tag", commit)
+
+- with salt.utils.files.fopen(
+- os.path.join(repository.workdir, filename), "w"
+- ) as file:
+- file.write(filecontent)
+-
+- blob = repository.create_blob_fromworkdir(filename)
+- builder = repository.TreeBuilder()
+- builder.insert(filename, blob, pygit2.GIT_FILEMODE_BLOB)
+- tree = builder.write()
+-
+- repository.index.read()
+- repository.index.add(filename)
+- repository.index.write()
+-
+- commit = repository.create_commit(
+- "HEAD",
+- signature,
+- signature,
+- "Added a README",
+- tree,
+- [repository.head.target],
+- )
+- repository.create_tag(
+- "annotated_tag", commit, pygit2.GIT_OBJ_COMMIT, signature, "some message"
++ def test_get_cachedir_basename(self):
++ self.assertEqual(
++ self.main_class.remotes[0].get_cachedir_basename(),
++ "-jXhnbGDemchtZwTwaD2s6VOaVvs98a7w+AtiYlmOVb0=",
+ )
+
+- def _prepare_cache_repository(self, remote, cache):
+- opts = {
+- "cachedir": cache,
+- "__role": "minion",
+- "gitfs_disable_saltenv_mapping": False,
+- "gitfs_base": "master",
+- "gitfs_insecure_auth": False,
+- "gitfs_mountpoint": "",
+- "gitfs_passphrase": "",
+- "gitfs_password": "",
+- "gitfs_privkey": "",
+- "gitfs_provider": "pygit2",
+- "gitfs_pubkey": "",
+- "gitfs_ref_types": ["branch", "tag", "sha"],
+- "gitfs_refspecs": [
+- "+refs/heads/*:refs/remotes/origin/*",
+- "+refs/tags/*:refs/tags/*",
+- ],
+- "gitfs_root": "",
+- "gitfs_saltenv_blacklist": [],
+- "gitfs_saltenv_whitelist": [],
+- "gitfs_ssl_verify": True,
+- "gitfs_update_interval": 3,
+- "gitfs_user": "",
+- "verified_gitfs_provider": "pygit2",
+- }
+- per_remote_defaults = {
+- "base": "master",
+- "disable_saltenv_mapping": False,
+- "insecure_auth": False,
+- "ref_types": ["branch", "tag", "sha"],
+- "passphrase": "",
+- "mountpoint": "",
+- "password": "",
+- "privkey": "",
+- "pubkey": "",
+- "refspecs": [
+- "+refs/heads/*:refs/remotes/origin/*",
+- "+refs/tags/*:refs/tags/*",
+- ],
+- "root": "",
+- "saltenv_blacklist": [],
+- "saltenv_whitelist": [],
+- "ssl_verify": True,
+- "update_interval": 60,
+- "user": "",
+- }
+- per_remote_only = ("all_saltenvs", "name", "saltenv")
+- override_params = tuple(per_remote_defaults.keys())
+- cache_root = os.path.join(cache, "gitfs")
+- role = "gitfs"
+- shutil.rmtree(cache_root, ignore_errors=True)
+- provider = salt.utils.gitfs.Pygit2(
+- opts,
+- remote,
+- per_remote_defaults,
+- per_remote_only,
+- override_params,
+- cache_root,
+- role,
++ def test_get_cachedir_base_with_name(self):
++ self.assertEqual(
++ self.main_class.remotes[1].get_cachedir_basename(),
++ "repo2-nuezpiDtjQRFC0ZJDByvi+F6Vb8ZhfoH41n_KFxTGsU=",
+ )
+- return provider
+
+- def test_checkout(self):
+- remote = os.path.join(tests.support.paths.TMP, "pygit2-repo")
+- cache = os.path.join(tests.support.paths.TMP, "pygit2-repo-cache")
+- self._prepare_remote_repository(remote)
+- provider = self._prepare_cache_repository(remote, cache)
+- provider.remotecallbacks = None
+- provider.credentials = None
+- provider.init_remote()
+- provider.fetch()
+- provider.branch = "master"
+- self.assertIn(provider.cachedir, provider.checkout())
+- provider.branch = "simple_tag"
+- self.assertIn(provider.cachedir, provider.checkout())
+- provider.branch = "annotated_tag"
+- self.assertIn(provider.cachedir, provider.checkout())
+- provider.branch = "does_not_exist"
+- self.assertIsNone(provider.checkout())
++ def test_git_provider_mp_lock(self):
++ """
++ Check that lock is released after provider.lock()
++ """
++ provider = self.main_class.remotes[0]
++ provider.lock()
++ # check that lock has been released
++ self.assertTrue(provider._master_lock.acquire(timeout=5))
++ provider._master_lock.release()
+
+- def test_checkout_with_home_env_unset(self):
+- remote = os.path.join(tests.support.paths.TMP, "pygit2-repo")
+- cache = os.path.join(tests.support.paths.TMP, "pygit2-repo-cache")
+- self._prepare_remote_repository(remote)
+- provider = self._prepare_cache_repository(remote, cache)
+- provider.remotecallbacks = None
+- provider.credentials = None
+- with patched_environ(__cleanup__=["HOME"]):
+- self.assertTrue("HOME" not in os.environ)
+- provider.init_remote()
+- provider.fetch()
+- self.assertTrue("HOME" in os.environ)
++ def test_git_provider_mp_clear_lock(self):
++ """
++ Check that lock is released after provider.clear_lock()
++ """
++ provider = self.main_class.remotes[0]
++ provider.clear_lock()
++ # check that lock has been released
++ self.assertTrue(provider._master_lock.acquire(timeout=5))
++ provider._master_lock.release()
++
++ @pytest.mark.slow_test
++ def test_git_provider_mp_lock_timeout(self):
++ """
++ Check that lock will time out if master lock is locked.
++ """
++ provider = self.main_class.remotes[0]
++ # Hijack the lock so git provider is fooled into thinking another instance is doing somthing.
++ self.assertTrue(provider._master_lock.acquire(timeout=5))
++ try:
++ # git provider should raise timeout error to avoid lock race conditions
++ self.assertRaises(TimeoutError, provider.lock)
++ finally:
++ provider._master_lock.release()
++
++ @pytest.mark.slow_test
++ def test_git_provider_mp_clear_lock_timeout(self):
++ """
++ Check that clear lock will time out if master lock is locked.
++ """
++ provider = self.main_class.remotes[0]
++ # Hijack the lock so git provider is fooled into thinking another instance is doing somthing.
++ self.assertTrue(provider._master_lock.acquire(timeout=5))
++ try:
++ # git provider should raise timeout error to avoid lock race conditions
++ self.assertRaises(TimeoutError, provider.clear_lock)
++ finally:
++ provider._master_lock.release()
+--
+2.41.0
+
+
diff --git a/fixed-keyerror-in-logs-when-running-a-state-that-fai.patch b/fixed-keyerror-in-logs-when-running-a-state-that-fai.patch
new file mode 100644
index 0000000..eefa743
--- /dev/null
+++ b/fixed-keyerror-in-logs-when-running-a-state-that-fai.patch
@@ -0,0 +1,121 @@
+From f41a8e2a142a8487e13af481990928e0afb5f15e Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Thu, 18 Jan 2024 17:02:03 +0100
+Subject: [PATCH] Fixed KeyError in logs when running a state that
+ fails. (#615)
+
+Co-authored-by: Megan Wilhite
+---
+ changelog/64231.fixed.md | 1 +
+ salt/master.py | 2 +-
+ salt/minion.py | 4 ++
+ salt/utils/event.py | 3 +-
+ .../integration/states/test_state_test.py | 38 +++++++++++++++++++
+ 5 files changed, 46 insertions(+), 2 deletions(-)
+ create mode 100644 changelog/64231.fixed.md
+ create mode 100644 tests/pytests/integration/states/test_state_test.py
+
+diff --git a/changelog/64231.fixed.md b/changelog/64231.fixed.md
+new file mode 100644
+index 0000000000..0991c5a8b9
+--- /dev/null
++++ b/changelog/64231.fixed.md
+@@ -0,0 +1 @@
++Fixed KeyError in logs when running a state that fails.
+diff --git a/salt/master.py b/salt/master.py
+index fc243ef674..3d2ba1e29d 100644
+--- a/salt/master.py
++++ b/salt/master.py
+@@ -1790,7 +1790,7 @@ class AESFuncs(TransportMethods):
+ def pub_ret(self, load):
+ """
+ Request the return data from a specific jid, only allowed
+- if the requesting minion also initialted the execution.
++ if the requesting minion also initiated the execution.
+
+ :param dict load: The minion payload
+
+diff --git a/salt/minion.py b/salt/minion.py
+index 4db0d31bd4..2ccd0cd5a9 100644
+--- a/salt/minion.py
++++ b/salt/minion.py
+@@ -2022,6 +2022,8 @@ class Minion(MinionBase):
+ ret["jid"] = data["jid"]
+ ret["fun"] = data["fun"]
+ ret["fun_args"] = data["arg"]
++ if "user" in data:
++ ret["user"] = data["user"]
+ if "master_id" in data:
+ ret["master_id"] = data["master_id"]
+ if "metadata" in data:
+@@ -2141,6 +2143,8 @@ class Minion(MinionBase):
+ ret["jid"] = data["jid"]
+ ret["fun"] = data["fun"]
+ ret["fun_args"] = data["arg"]
++ if "user" in data:
++ ret["user"] = data["user"]
+ if "metadata" in data:
+ ret["metadata"] = data["metadata"]
+ if minion_instance.connected:
+diff --git a/salt/utils/event.py b/salt/utils/event.py
+index 869e12a140..e6d7b00520 100644
+--- a/salt/utils/event.py
++++ b/salt/utils/event.py
+@@ -902,7 +902,8 @@ class SaltEvent:
+ data["success"] = False
+ data["return"] = "Error: {}.{}".format(tags[0], tags[-1])
+ data["fun"] = fun
+- data["user"] = load["user"]
++ if "user" in load:
++ data["user"] = load["user"]
+ self.fire_event(
+ data,
+ tagify([load["jid"], "sub", load["id"], "error", fun], "job"),
+diff --git a/tests/pytests/integration/states/test_state_test.py b/tests/pytests/integration/states/test_state_test.py
+new file mode 100644
+index 0000000000..b2328a4c2b
+--- /dev/null
++++ b/tests/pytests/integration/states/test_state_test.py
+@@ -0,0 +1,38 @@
++def test_failing_sls(salt_master, salt_minion, salt_cli, caplog):
++ """
++ Test when running state.sls and the state fails.
++ When the master stores the job and attempts to send
++ an event a KeyError was previously being logged.
++ This test ensures we do not log an error when
++ attempting to send an event about a failing state.
++ """
++ statesls = """
++ test_state:
++ test.fail_without_changes:
++ - name: "bla"
++ """
++ with salt_master.state_tree.base.temp_file("test_failure.sls", statesls):
++ ret = salt_cli.run("state.sls", "test_failure", minion_tgt=salt_minion.id)
++ for message in caplog.messages:
++ assert "Event iteration failed with" not in message
++
++
++def test_failing_sls_compound(salt_master, salt_minion, salt_cli, caplog):
++ """
++ Test when running state.sls in a compound command and the state fails.
++ When the master stores the job and attempts to send
++ an event a KeyError was previously being logged.
++ This test ensures we do not log an error when
++ attempting to send an event about a failing state.
++ """
++ statesls = """
++ test_state:
++ test.fail_without_changes:
++ - name: "bla"
++ """
++ with salt_master.state_tree.base.temp_file("test_failure.sls", statesls):
++ ret = salt_cli.run(
++ "state.sls,cmd.run", "test_failure,ls", minion_tgt=salt_minion.id
++ )
++ for message in caplog.messages:
++ assert "Event iteration failed with" not in message
+--
+2.43.0
+
+
diff --git a/fixes-for-python-3.10-502.patch b/fixes-for-python-3.10-502.patch
new file mode 100644
index 0000000..890d439
--- /dev/null
+++ b/fixes-for-python-3.10-502.patch
@@ -0,0 +1,44 @@
+From 4996f423f14369fad14a9e6d2d3b8bd750c77fc7 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Tue, 5 Apr 2022 12:04:46 +0300
+Subject: [PATCH] Fixes for Python 3.10 (#502)
+
+* Use collections.abc.Mapping instead collections.Mapping in state
+---
+ salt/state.py | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/salt/state.py b/salt/state.py
+index ab84cb8b4d..489424a083 100644
+--- a/salt/state.py
++++ b/salt/state.py
+@@ -12,7 +12,6 @@ The data sent to the state calls is as follows:
+ """
+
+
+-import collections
+ import copy
+ import datetime
+ import fnmatch
+@@ -27,6 +26,8 @@ import sys
+ import time
+ import traceback
+
++from collections.abc import Mapping
++
+ import salt.channel.client
+ import salt.fileclient
+ import salt.loader
+@@ -3513,7 +3514,7 @@ class State:
+ """
+ for chunk in high:
+ state = high[chunk]
+- if not isinstance(state, collections.Mapping):
++ if not isinstance(state, Mapping):
+ continue
+ for state_ref in state:
+ needs_default = True
+--
+2.39.2
+
+
diff --git a/handle-logger-flushing-already-closed-file-686.patch b/handle-logger-flushing-already-closed-file-686.patch
new file mode 100644
index 0000000..558bcdb
--- /dev/null
+++ b/handle-logger-flushing-already-closed-file-686.patch
@@ -0,0 +1,58 @@
+From e23dce108588a9c52d3f7542636892750d6efcbd Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Thu, 17 Oct 2024 15:52:00 +0200
+Subject: [PATCH] Handle logger flushing already closed file (#686)
+
+This is a partial cherry-pick of
+https://github.com/saltstack/salt/commit/9683260d61668da8559ecde6caf63a52fedd8790
+---
+ salt/_logging/handlers.py | 4 ++++
+ salt/_logging/impl.py | 10 +++++++++-
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/salt/_logging/handlers.py b/salt/_logging/handlers.py
+index 5a1a1613137..d8bc68a49db 100644
+--- a/salt/_logging/handlers.py
++++ b/salt/_logging/handlers.py
+@@ -95,6 +95,9 @@ class DeferredStreamHandler(StreamHandler):
+ super().__init__(stream)
+ self.__messages = deque(maxlen=max_queue_size)
+ self.__emitting = False
++ import traceback
++
++ self.stack = "".join(traceback.format_stack())
+
+ def handle(self, record):
+ self.acquire()
+@@ -116,6 +119,7 @@ class DeferredStreamHandler(StreamHandler):
+ super().handle(record)
+ finally:
+ self.__emitting = False
++ # This will raise a ValueError if the file handle has been closed.
+ super().flush()
+
+ def sync_with_handlers(self, handlers=()):
+diff --git a/salt/_logging/impl.py b/salt/_logging/impl.py
+index 4d1ebd2495f..9d76c3174e2 100644
+--- a/salt/_logging/impl.py
++++ b/salt/_logging/impl.py
+@@ -488,7 +488,15 @@ def setup_temp_handler(log_level=None):
+ break
+ else:
+ handler = DeferredStreamHandler(sys.stderr)
+- atexit.register(handler.flush)
++
++ def tryflush():
++ try:
++ handler.flush()
++ except ValueError:
++ # File handle has already been closed.
++ pass
++
++ atexit.register(tryflush)
+ handler.setLevel(log_level)
+
+ # Set the default temporary console formatter config
+--
+2.47.0
+
diff --git a/html.tar.bz2 b/html.tar.bz2
new file mode 100644
index 0000000..88d8461
--- /dev/null
+++ b/html.tar.bz2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f6fba74e3093218b099e917e5ae52eb3ecdea2a7611b7b46b91dfca518e2a022
+size 10804131
diff --git a/implement-the-calling-for-batch-async-from-the-salt-.patch b/implement-the-calling-for-batch-async-from-the-salt-.patch
new file mode 100644
index 0000000..128dea6
--- /dev/null
+++ b/implement-the-calling-for-batch-async-from-the-salt-.patch
@@ -0,0 +1,145 @@
+From 7ab208fd2d23eaa582cdbba912d4538d8c87e5f4 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Mon, 2 Oct 2023 13:24:15 +0200
+Subject: [PATCH] Implement the calling for batch async from the salt
+ CLI
+
+* Implement calling batch async with salt CLI
+
+* Add the test for calling batch async with salt CLI
+---
+ salt/cli/salt.py | 53 ++++++++++++++++++++++++++++-
+ tests/pytests/unit/cli/test_salt.py | 50 +++++++++++++++++++++++++++
+ 2 files changed, 102 insertions(+), 1 deletion(-)
+ create mode 100644 tests/pytests/unit/cli/test_salt.py
+
+diff --git a/salt/cli/salt.py b/salt/cli/salt.py
+index f90057f668..e19cfa5ce6 100644
+--- a/salt/cli/salt.py
++++ b/salt/cli/salt.py
+@@ -47,7 +47,12 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
+ self.exit(2, "{}\n".format(exc))
+ return
+
+- if self.options.batch or self.options.static:
++ if self.options.batch and self.config["async"]:
++ # _run_batch_async() will just return the jid and exit
++ # Execution will not continue past this point
++ # in batch async mode. Batch async is handled by the master.
++ self._run_batch_async()
++ elif self.options.batch or self.options.static:
+ # _run_batch() will handle all output and
+ # exit with the appropriate error condition
+ # Execution will not continue past this point
+@@ -296,6 +301,52 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
+ retcode = job_retcode
+ sys.exit(retcode)
+
++ def _run_batch_async(self):
++ kwargs = {
++ "tgt": self.config["tgt"],
++ "fun": self.config["fun"],
++ "arg": self.config["arg"],
++ "timeout": self.options.timeout,
++ "show_timeout": self.options.show_timeout,
++ "show_jid": self.options.show_jid,
++ "batch": self.config["batch"],
++ }
++ tgt = kwargs.pop("tgt", "")
++ fun = kwargs.pop("fun", "")
++
++ if self.config.get("eauth", ""):
++ kwargs.update(
++ {
++ "eauth": self.config["eauth"],
++ }
++ )
++ for opt in ("username", "password"):
++ if opt in self.config:
++ kwargs[opt] = self.config[opt]
++
++ try:
++ ret = self.local_client.run_job(tgt, fun, **kwargs)
++ except (
++ AuthenticationError,
++ AuthorizationError,
++ SaltInvocationError,
++ EauthAuthenticationError,
++ SaltClientError,
++ ) as exc:
++ ret = str(exc)
++ self.exit(2, "ERROR: {}\n".format(exc))
++ if "jid" in ret and "error" not in ret:
++ salt.utils.stringutils.print_cli(
++ "Executed command with job ID: {}".format(ret["jid"])
++ )
++ else:
++ self._output_ret(ret, self.config.get("output", "nested"))
++
++ if "error" in ret:
++ sys.exit(1)
++
++ sys.exit(0)
++
+ def _print_errors_summary(self, errors):
+ if errors:
+ salt.utils.stringutils.print_cli("\n")
+diff --git a/tests/pytests/unit/cli/test_salt.py b/tests/pytests/unit/cli/test_salt.py
+new file mode 100644
+index 0000000000..d9f4b5b097
+--- /dev/null
++++ b/tests/pytests/unit/cli/test_salt.py
+@@ -0,0 +1,50 @@
++import pytest
++
++from tests.support.mock import MagicMock, patch
++
++
++def test_saltcmd_batch_async_call():
++ """
++ Test calling batch async with salt CLI
++ """
++ import salt.cli.salt
++
++ local_client = MagicMock()
++ local_client.run_job = MagicMock(return_value={"jid": 123456})
++ with pytest.raises(SystemExit) as exit_info, patch(
++ "sys.argv",
++ [
++ "salt",
++ "--batch=10",
++ "--async",
++ "*",
++ "test.arg",
++ "arg1",
++ "arg2",
++ "kwarg1=val1",
++ ],
++ ), patch("salt.cli.salt.SaltCMD.process_config_dir", MagicMock), patch(
++ "salt.output.display_output", MagicMock()
++ ), patch(
++ "salt.client.get_local_client", return_value=local_client
++ ), patch(
++ "salt.utils.stringutils.print_cli", MagicMock()
++ ) as print_cli:
++ salt_cmd = salt.cli.salt.SaltCMD()
++ salt_cmd.config = {
++ "async": True,
++ "batch": 10,
++ "tgt": "*",
++ "fun": "test.arg",
++ "arg": ["arg1", "arg2", {"__kwarg__": True, "kwarg1": "val1"}],
++ }
++ salt_cmd._mixin_after_parsed_funcs = []
++ salt_cmd.run()
++
++ local_client.run_job.assert_called_once()
++ assert local_client.run_job.mock_calls[0].args[0] == "*"
++ assert local_client.run_job.mock_calls[0].args[1] == "test.arg"
++ assert local_client.run_job.mock_calls[0].kwargs["arg"] == ["arg1", "arg2", {"__kwarg__": True, "kwarg1": "val1"}]
++ assert local_client.run_job.mock_calls[0].kwargs["batch"] == 10
++ print_cli.assert_called_once_with("Executed command with job ID: 123456")
++ assert exit_info.value.code == 0
+--
+2.42.0
+
diff --git a/improve-broken-events-catching-and-reporting.patch b/improve-broken-events-catching-and-reporting.patch
new file mode 100644
index 0000000..a79544e
--- /dev/null
+++ b/improve-broken-events-catching-and-reporting.patch
@@ -0,0 +1,202 @@
+From 88bd54971d39b34d9728f3fe5fcb493cec3ff2fd Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 15 May 2024 09:22:11 +0200
+Subject: [PATCH] Improve broken events catching and reporting
+
+* Improve broken events catching and reporting
+
+* Add test of catching SaltDeserializationError on reading event
+
+* Add test for fire_ret_load
+---
+ salt/utils/event.py | 23 +++-
+ tests/pytests/unit/utils/event/test_event.py | 107 +++++++++++++++++++
+ 2 files changed, 128 insertions(+), 2 deletions(-)
+
+diff --git a/salt/utils/event.py b/salt/utils/event.py
+index e6d7b00520..ef048335ae 100644
+--- a/salt/utils/event.py
++++ b/salt/utils/event.py
+@@ -75,6 +75,7 @@ import salt.utils.platform
+ import salt.utils.process
+ import salt.utils.stringutils
+ import salt.utils.zeromq
++from salt.exceptions import SaltDeserializationError
+
+ log = logging.getLogger(__name__)
+
+@@ -461,7 +462,13 @@ class SaltEvent:
+ salt.utils.stringutils.to_bytes(TAGEND)
+ ) # split tag from data
+ mtag = salt.utils.stringutils.to_str(mtag)
+- data = salt.payload.loads(mdata, encoding="utf-8")
++ try:
++ data = salt.payload.loads(mdata, encoding="utf-8")
++ except SaltDeserializationError:
++ log.warning(
++ "SaltDeserializationError on unpacking data, the payload could be incomplete"
++ )
++ raise
+ return mtag, data
+
+ def _get_match_func(self, match_type=None):
+@@ -583,6 +590,9 @@ class SaltEvent:
+ raise
+ else:
+ return None
++ except SaltDeserializationError:
++ log.error("Unable to deserialize received event")
++ return None
+ except RuntimeError:
+ return None
+
+@@ -889,6 +899,14 @@ class SaltEvent:
+ ret = load.get("return", {})
+ retcode = load["retcode"]
+
++ if not isinstance(ret, dict):
++ log.error(
++ "Event with bad payload received from '%s': %s",
++ load.get("id", "UNKNOWN"),
++ "".join(ret) if isinstance(ret, list) else ret,
++ )
++ return
++
+ try:
+ for tag, data in ret.items():
+ data["retcode"] = retcode
+@@ -910,7 +928,8 @@ class SaltEvent:
+ )
+ except Exception as exc: # pylint: disable=broad-except
+ log.error(
+- "Event iteration failed with exception: %s",
++ "Event from '%s' iteration failed with exception: %s",
++ load.get("id", "UNKNOWN"),
+ exc,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
+diff --git a/tests/pytests/unit/utils/event/test_event.py b/tests/pytests/unit/utils/event/test_event.py
+index f4b6c15999..3eadfaf6ba 100644
+--- a/tests/pytests/unit/utils/event/test_event.py
++++ b/tests/pytests/unit/utils/event/test_event.py
+@@ -12,6 +12,7 @@ import salt.ext.tornado.ioloop
+ import salt.ext.tornado.iostream
+ import salt.utils.event
+ import salt.utils.stringutils
++from salt.exceptions import SaltDeserializationError
+ from salt.utils.event import SaltEvent
+ from tests.support.events import eventpublisher_process, eventsender_process
+ from tests.support.mock import patch
+@@ -340,3 +341,109 @@ def test_master_pub_permissions(sock_dir):
+ assert bool(os.lstat(p).st_mode & stat.S_IRUSR)
+ assert not bool(os.lstat(p).st_mode & stat.S_IRGRP)
+ assert not bool(os.lstat(p).st_mode & stat.S_IROTH)
++
++
++def test_event_unpack_with_SaltDeserializationError(sock_dir):
++ with eventpublisher_process(str(sock_dir)), salt.utils.event.MasterEvent(
++ str(sock_dir), listen=True
++ ) as me, patch.object(
++ salt.utils.event.log, "warning", autospec=True
++ ) as mock_log_warning, patch.object(
++ salt.utils.event.log, "error", autospec=True
++ ) as mock_log_error:
++ me.fire_event({"data": "foo1"}, "evt1")
++ me.fire_event({"data": "foo2"}, "evt2")
++ evt2 = me.get_event(tag="")
++ with patch("salt.payload.loads", side_effect=SaltDeserializationError):
++ evt1 = me.get_event(tag="")
++ _assert_got_event(evt2, {"data": "foo2"}, expected_failure=True)
++ assert evt1 is None
++ assert (
++ mock_log_warning.mock_calls[0].args[0]
++ == "SaltDeserializationError on unpacking data, the payload could be incomplete"
++ )
++ assert (
++ mock_log_error.mock_calls[0].args[0]
++ == "Unable to deserialize received event"
++ )
++
++
++def test_event_fire_ret_load():
++ event = SaltEvent(node=None)
++ test_load = {
++ "id": "minion_id.example.org",
++ "jid": "20240212095247760376",
++ "fun": "state.highstate",
++ "retcode": 254,
++ "return": {
++ "saltutil_|-sync_states_|-sync_states_|-sync_states": {
++ "result": True,
++ },
++ "saltutil_|-sync_modules_|-sync_modules_|-sync_modules": {
++ "result": False,
++ },
++ },
++ }
++ test_fire_event_data = {
++ "result": False,
++ "retcode": 254,
++ "jid": "20240212095247760376",
++ "id": "minion_id.example.org",
++ "success": False,
++ "return": "Error: saltutil.sync_modules",
++ "fun": "state.highstate",
++ }
++ test_unhandled_exc = "Unhandled exception running state.highstate"
++ test_traceback = [
++ "Traceback (most recent call last):\n",
++ " Just an example of possible return as a list\n",
++ ]
++ with patch.object(
++ event, "fire_event", side_effect=[None, None, Exception]
++ ) as mock_fire_event, patch.object(
++ salt.utils.event.log, "error", autospec=True
++ ) as mock_log_error:
++ event.fire_ret_load(test_load)
++ assert len(mock_fire_event.mock_calls) == 2
++ assert mock_fire_event.mock_calls[0].args[0] == test_fire_event_data
++ assert mock_fire_event.mock_calls[0].args[1] == "saltutil.sync_modules"
++ assert mock_fire_event.mock_calls[1].args[0] == test_fire_event_data
++ assert (
++ mock_fire_event.mock_calls[1].args[1]
++ == "salt/job/20240212095247760376/sub/minion_id.example.org/error/state.highstate"
++ )
++ assert not mock_log_error.mock_calls
++
++ mock_log_error.reset_mock()
++
++ event.fire_ret_load(test_load)
++ assert (
++ mock_log_error.mock_calls[0].args[0]
++ == "Event from '%s' iteration failed with exception: %s"
++ )
++ assert mock_log_error.mock_calls[0].args[1] == "minion_id.example.org"
++
++ mock_log_error.reset_mock()
++ test_load["return"] = test_unhandled_exc
++
++ event.fire_ret_load(test_load)
++ assert (
++ mock_log_error.mock_calls[0].args[0]
++ == "Event with bad payload received from '%s': %s"
++ )
++ assert mock_log_error.mock_calls[0].args[1] == "minion_id.example.org"
++ assert (
++ mock_log_error.mock_calls[0].args[2]
++ == "Unhandled exception running state.highstate"
++ )
++
++ mock_log_error.reset_mock()
++ test_load["return"] = test_traceback
++
++ event.fire_ret_load(test_load)
++ assert (
++ mock_log_error.mock_calls[0].args[0]
++ == "Event with bad payload received from '%s': %s"
++ )
++ assert mock_log_error.mock_calls[0].args[1] == "minion_id.example.org"
++ assert mock_log_error.mock_calls[0].args[2] == "".join(test_traceback)
+--
+2.45.0
+
diff --git a/improve-error-handling-with-different-openssl-versio.patch b/improve-error-handling-with-different-openssl-versio.patch
new file mode 100644
index 0000000..4d16812
--- /dev/null
+++ b/improve-error-handling-with-different-openssl-versio.patch
@@ -0,0 +1,98 @@
+From 4e226426d0897f2d9dc64891ced78487b181d40e Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Fri, 30 Aug 2024 14:33:51 +0200
+Subject: [PATCH] Improve error handling with different OpenSSL
+ versions
+
+* Make error checking of x509 more flexible
+
+for most recent cryptography and openSSL versions
+
+* Add test for different exception value on loading private key
+
+* Add fix for test_privkey_new_with_prereq on old OpenSSL
+---
+ salt/utils/x509.py | 3 +-
+ .../pytests/functional/states/test_x509_v2.py | 29 +++++++++++++++++++
+ .../integration/states/test_x509_v2.py | 7 +++++
+ 3 files changed, 38 insertions(+), 1 deletion(-)
+
+diff --git a/salt/utils/x509.py b/salt/utils/x509.py
+index 5b2ae15882..f9fdca64d9 100644
+--- a/salt/utils/x509.py
++++ b/salt/utils/x509.py
+@@ -695,7 +695,8 @@ def load_privkey(pk, passphrase=None, get_encoding=False):
+ return pk, "pem", None
+ return pk
+ except ValueError as err:
+- if "Bad decrypt" in str(err):
++ str_err = str(err)
++ if "Bad decrypt" in str_err or "Could not deserialize key data" in str_err:
+ raise SaltInvocationError(
+ "Bad decrypt - is the password correct?"
+ ) from err
+diff --git a/tests/pytests/functional/states/test_x509_v2.py b/tests/pytests/functional/states/test_x509_v2.py
+index 929be014cd..47a1c555f8 100644
+--- a/tests/pytests/functional/states/test_x509_v2.py
++++ b/tests/pytests/functional/states/test_x509_v2.py
+@@ -3,6 +3,8 @@ from pathlib import Path
+
+ import pytest
+
++from tests.support.mock import patch
++
+ try:
+ import cryptography
+ import cryptography.x509 as cx509
+@@ -2826,3 +2828,30 @@ def _get_privkey(pk, encoding="pem", passphrase=None):
+ pk = base64.b64decode(pk)
+ return pkcs12.load_pkcs12(pk, passphrase).key
+ raise ValueError("Need correct encoding")
++
++
++@pytest.mark.usefixtures("existing_pk")
++@pytest.mark.parametrize("existing_pk", [{"passphrase": "password"}], indirect=True)
++def test_exceptions_on_calling_load_pem_private_key(x509, pk_args):
++ pk_args["passphrase"] = "hunter1"
++ pk_args["overwrite"] = True
++
++ with patch(
++ "cryptography.hazmat.primitives.serialization.load_pem_private_key",
++ side_effect=ValueError("Bad decrypt. Incorrect password?"),
++ ):
++ ret = x509.private_key_managed(**pk_args)
++ _assert_pk_basic(ret, "rsa", passphrase="hunter1")
++
++ with patch(
++ "cryptography.hazmat.primitives.serialization.load_pem_private_key",
++ side_effect=ValueError(
++ "Could not deserialize key data. The data may be in an incorrect format, "
++ "the provided password may be incorrect, "
++ "it may be encrypted with an unsupported algorithm, "
++ "or it may be an unsupported key type "
++ "(e.g. EC curves with explicit parameters)."
++ ),
++ ):
++ ret = x509.private_key_managed(**pk_args)
++ _assert_pk_basic(ret, "rsa", passphrase="hunter1")
+diff --git a/tests/pytests/integration/states/test_x509_v2.py b/tests/pytests/integration/states/test_x509_v2.py
+index 4f94341295..ad8d904c92 100644
+--- a/tests/pytests/integration/states/test_x509_v2.py
++++ b/tests/pytests/integration/states/test_x509_v2.py
+@@ -195,6 +195,13 @@ Certificate:
+ """
+ with x509_salt_master.state_tree.base.temp_file("manage_cert.sls", state):
+ ret = x509_salt_call_cli.run("state.apply", "manage_cert")
++ if (
++ ret.returncode == 1
++ and "NotImplementedError: ECDSA keys with unnamed curves" in ret.stdout
++ ):
++ pytest.skip(
++ "The version of OpenSSL doesn't support ECDSA keys with unnamed curves"
++ )
+ assert ret.returncode == 0
+ assert ret.data[next(iter(ret.data))]["changes"]
+ assert (tmp_path / "priv.key").exists()
+--
+2.46.0
+
diff --git a/improve-pip-target-override-condition-with-venv_pip_.patch b/improve-pip-target-override-condition-with-venv_pip_.patch
new file mode 100644
index 0000000..1728271
--- /dev/null
+++ b/improve-pip-target-override-condition-with-venv_pip_.patch
@@ -0,0 +1,113 @@
+From da938aa8a572138b5b9b1535c5c3d69326e5194e Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Thu, 18 Jan 2024 17:02:23 +0100
+Subject: [PATCH] Improve pip target override condition with
+ VENV_PIP_TARGET environment variable (bsc#1216850) (#613)
+
+* Improve pip target override condition
+
+* Improve pip test with different condition of overriding the target
+
+* Add changelog entry
+---
+ changelog/65562.fixed.md | 1 +
+ salt/modules/pip.py | 6 ++--
+ tests/pytests/unit/modules/test_pip.py | 50 +++++++++++++++++---------
+ 3 files changed, 38 insertions(+), 19 deletions(-)
+ create mode 100644 changelog/65562.fixed.md
+
+diff --git a/changelog/65562.fixed.md b/changelog/65562.fixed.md
+new file mode 100644
+index 0000000000..ba483b4b77
+--- /dev/null
++++ b/changelog/65562.fixed.md
+@@ -0,0 +1 @@
++Improve the condition of overriding target for pip with VENV_PIP_TARGET environment variable.
+diff --git a/salt/modules/pip.py b/salt/modules/pip.py
+index a60bdca0bb..68a2a442a1 100644
+--- a/salt/modules/pip.py
++++ b/salt/modules/pip.py
+@@ -857,9 +857,11 @@ def install(
+ cmd.extend(["--build", build])
+
+ # Use VENV_PIP_TARGET environment variable value as target
+- # if set and no target specified on the function call
++ # if set and no target specified on the function call.
++ # Do not set target if bin_env specified, use default
++ # for specified binary environment or expect explicit target specification.
+ target_env = os.environ.get("VENV_PIP_TARGET", None)
+- if target is None and target_env is not None:
++ if target is None and target_env is not None and bin_env is None:
+ target = target_env
+
+ if target:
+diff --git a/tests/pytests/unit/modules/test_pip.py b/tests/pytests/unit/modules/test_pip.py
+index b7ad1ea3fd..c03e6ed292 100644
+--- a/tests/pytests/unit/modules/test_pip.py
++++ b/tests/pytests/unit/modules/test_pip.py
+@@ -1738,28 +1738,44 @@ def test_when_version_is_called_with_a_user_it_should_be_passed_to_undelying_run
+ )
+
+
+-def test_install_target_from_VENV_PIP_TARGET_in_resulting_command(python_binary):
++@pytest.mark.parametrize(
++ "bin_env,target,target_env,expected_target",
++ [
++ (None, None, None, None),
++ (None, "/tmp/foo", None, "/tmp/foo"),
++ (None, None, "/tmp/bar", "/tmp/bar"),
++ (None, "/tmp/foo", "/tmp/bar", "/tmp/foo"),
++ ("/tmp/venv", "/tmp/foo", None, "/tmp/foo"),
++ ("/tmp/venv", None, "/tmp/bar", None),
++ ("/tmp/venv", "/tmp/foo", "/tmp/bar", "/tmp/foo"),
++ ],
++)
++def test_install_target_from_VENV_PIP_TARGET_in_resulting_command(
++ python_binary, bin_env, target, target_env, expected_target
++):
+ pkg = "pep8"
+- target = "/tmp/foo"
+- target_env = "/tmp/bar"
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ environment = os.environ.copy()
+- environment["VENV_PIP_TARGET"] = target_env
++ real_get_pip_bin = pip._get_pip_bin
++
++ def mock_get_pip_bin(bin_env):
++ if not bin_env:
++ return real_get_pip_bin(bin_env)
++ return [f"{bin_env}/bin/pip"]
++
++ if target_env is not None:
++ environment["VENV_PIP_TARGET"] = target_env
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}), patch.object(
+ os, "environ", environment
+- ):
+- pip.install(pkg)
+- expected = [*python_binary, "install", "--target", target_env, pkg]
+- mock.assert_called_with(
+- expected,
+- saltenv="base",
+- runas=None,
+- use_vt=False,
+- python_shell=False,
+- )
+- mock.reset_mock()
+- pip.install(pkg, target=target)
+- expected = [*python_binary, "install", "--target", target, pkg]
++ ), patch.object(pip, "_get_pip_bin", mock_get_pip_bin):
++ pip.install(pkg, bin_env=bin_env, target=target)
++ expected_binary = python_binary
++ if bin_env is not None:
++ expected_binary = [f"{bin_env}/bin/pip"]
++ if expected_target is not None:
++ expected = [*expected_binary, "install", "--target", expected_target, pkg]
++ else:
++ expected = [*expected_binary, "install", pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+--
+2.43.0
+
+
diff --git a/improve-salt.utils.json.find_json-bsc-1213293.patch b/improve-salt.utils.json.find_json-bsc-1213293.patch
new file mode 100644
index 0000000..0b0bafa
--- /dev/null
+++ b/improve-salt.utils.json.find_json-bsc-1213293.patch
@@ -0,0 +1,204 @@
+From 4e6b445f2dbe8a79d220c697abff946e00b2e57b Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Mon, 2 Oct 2023 13:26:20 +0200
+Subject: [PATCH] Improve salt.utils.json.find_json (bsc#1213293)
+
+* Improve salt.utils.json.find_json
+
+* Move tests of find_json to pytest
+---
+ salt/utils/json.py | 39 +++++++-
+ tests/pytests/unit/utils/test_json.py | 122 ++++++++++++++++++++++++++
+ 2 files changed, 158 insertions(+), 3 deletions(-)
+ create mode 100644 tests/pytests/unit/utils/test_json.py
+
+diff --git a/salt/utils/json.py b/salt/utils/json.py
+index 33cdbf401d..0845b64694 100644
+--- a/salt/utils/json.py
++++ b/salt/utils/json.py
+@@ -32,18 +32,51 @@ def find_json(raw):
+ """
+ ret = {}
+ lines = __split(raw)
++ lengths = list(map(len, lines))
++ starts = []
++ ends = []
++
++ # Search for possible starts end ends of the json fragments
+ for ind, _ in enumerate(lines):
++ line = lines[ind].lstrip()
++ if line == "{" or line == "[":
++ starts.append((ind, line))
++ if line == "}" or line == "]":
++ ends.append((ind, line))
++
++ # List all the possible pairs of starts and ends,
++ # and fill the length of each block to sort by size after
++ starts_ends = []
++ for start, start_br in starts:
++ for end, end_br in reversed(ends):
++ if end > start and (
++ (start_br == "{" and end_br == "}")
++ or (start_br == "[" and end_br == "]")
++ ):
++ starts_ends.append((start, end, sum(lengths[start : end + 1])))
++
++ # Iterate through all the possible pairs starting from the largest
++ starts_ends.sort(key=lambda x: (x[2], x[1] - x[0], x[0]), reverse=True)
++ for start, end, _ in starts_ends:
++ working = "\n".join(lines[start : end + 1])
+ try:
+- working = "\n".join(lines[ind:])
+- except UnicodeDecodeError:
+- working = "\n".join(salt.utils.data.decode(lines[ind:]))
++ ret = json.loads(working)
++ except ValueError:
++ continue
++ if ret:
++ return ret
+
++ # Fall back to old implementation for backward compatibility
++ # excpecting json after the text
++ for ind, _ in enumerate(lines):
++ working = "\n".join(lines[ind:])
+ try:
+ ret = json.loads(working)
+ except ValueError:
+ continue
+ if ret:
+ return ret
++
+ if not ret:
+ # Not json, raise an error
+ raise ValueError
+diff --git a/tests/pytests/unit/utils/test_json.py b/tests/pytests/unit/utils/test_json.py
+new file mode 100644
+index 0000000000..72b1023003
+--- /dev/null
++++ b/tests/pytests/unit/utils/test_json.py
+@@ -0,0 +1,122 @@
++"""
++Tests for salt.utils.json
++"""
++
++import textwrap
++
++import pytest
++
++import salt.utils.json
++
++
++def test_find_json():
++ some_junk_text = textwrap.dedent(
++ """
++ Just some junk text
++ with multiline
++ """
++ )
++ some_warning_message = textwrap.dedent(
++ """
++ [WARNING] Test warning message
++ """
++ )
++ test_small_json = textwrap.dedent(
++ """
++ {
++ "local": true
++ }
++ """
++ )
++ test_sample_json = """
++ {
++ "glossary": {
++ "title": "example glossary",
++ "GlossDiv": {
++ "title": "S",
++ "GlossList": {
++ "GlossEntry": {
++ "ID": "SGML",
++ "SortAs": "SGML",
++ "GlossTerm": "Standard Generalized Markup Language",
++ "Acronym": "SGML",
++ "Abbrev": "ISO 8879:1986",
++ "GlossDef": {
++ "para": "A meta-markup language, used to create markup languages such as DocBook.",
++ "GlossSeeAlso": ["GML", "XML"]
++ },
++ "GlossSee": "markup"
++ }
++ }
++ }
++ }
++ }
++ """
++ expected_ret = {
++ "glossary": {
++ "GlossDiv": {
++ "GlossList": {
++ "GlossEntry": {
++ "GlossDef": {
++ "GlossSeeAlso": ["GML", "XML"],
++ "para": (
++ "A meta-markup language, used to create markup"
++ " languages such as DocBook."
++ ),
++ },
++ "GlossSee": "markup",
++ "Acronym": "SGML",
++ "GlossTerm": "Standard Generalized Markup Language",
++ "SortAs": "SGML",
++ "Abbrev": "ISO 8879:1986",
++ "ID": "SGML",
++ }
++ },
++ "title": "S",
++ },
++ "title": "example glossary",
++ }
++ }
++
++ # First test the valid JSON
++ ret = salt.utils.json.find_json(test_sample_json)
++ assert ret == expected_ret
++
++ # Now pre-pend some garbage and re-test
++ garbage_prepend_json = f"{some_junk_text}{test_sample_json}"
++ ret = salt.utils.json.find_json(garbage_prepend_json)
++ assert ret == expected_ret
++
++ # Now post-pend some garbage and re-test
++ garbage_postpend_json = f"{test_sample_json}{some_junk_text}"
++ ret = salt.utils.json.find_json(garbage_postpend_json)
++ assert ret == expected_ret
++
++ # Now pre-pend some warning and re-test
++ warning_prepend_json = f"{some_warning_message}{test_sample_json}"
++ ret = salt.utils.json.find_json(warning_prepend_json)
++ assert ret == expected_ret
++
++ # Now post-pend some warning and re-test
++ warning_postpend_json = f"{test_sample_json}{some_warning_message}"
++ ret = salt.utils.json.find_json(warning_postpend_json)
++ assert ret == expected_ret
++
++ # Now put around some garbage and re-test
++ garbage_around_json = f"{some_junk_text}{test_sample_json}{some_junk_text}"
++ ret = salt.utils.json.find_json(garbage_around_json)
++ assert ret == expected_ret
++
++ # Now pre-pend small json and re-test
++ small_json_pre_json = f"{test_small_json}{test_sample_json}"
++ ret = salt.utils.json.find_json(small_json_pre_json)
++ assert ret == expected_ret
++
++ # Now post-pend small json and re-test
++ small_json_post_json = f"{test_sample_json}{test_small_json}"
++ ret = salt.utils.json.find_json(small_json_post_json)
++ assert ret == expected_ret
++
++ # Test to see if a ValueError is raised if no JSON is passed in
++ with pytest.raises(ValueError):
++ ret = salt.utils.json.find_json(some_junk_text)
+--
+2.42.0
+
diff --git a/include-aliases-in-the-fqdns-grains.patch b/include-aliases-in-the-fqdns-grains.patch
new file mode 100644
index 0000000..ab65d0b
--- /dev/null
+++ b/include-aliases-in-the-fqdns-grains.patch
@@ -0,0 +1,138 @@
+From 4f459d670886a8f4a410fdbd1ec595477d45e4e2 Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Tue, 18 Jan 2022 17:10:37 +0100
+Subject: [PATCH] Include aliases in the fqdns grains
+
+Add UT for "is_fqdn"
+
+Add "is_fqdn" check to the network utils
+
+Bugfix: include FQDNs aliases
+
+Deprecate UnitTest assertion in favour of built-in assert keyword
+
+Add UT for fqdns aliases
+
+Leverage cached interfaces, if any.
+
+Implement network.fqdns module function (bsc#1134860) (#172)
+
+* Duplicate fqdns logic in module.network
+* Move _get_interfaces to utils.network
+* Reuse network.fqdns in grains.core.fqdns
+* Return empty list when fqdns grains is disabled
+
+Co-authored-by: Eric Siebigteroth
+---
+ salt/modules/network.py | 5 +++-
+ salt/utils/network.py | 16 +++++++++++
+ tests/pytests/unit/modules/test_network.py | 4 +--
+ tests/unit/utils/test_network.py | 32 ++++++++++++++++++++++
+ 4 files changed, 54 insertions(+), 3 deletions(-)
+
+diff --git a/salt/modules/network.py b/salt/modules/network.py
+index 524b1b74fa..f959dbf97b 100644
+--- a/salt/modules/network.py
++++ b/salt/modules/network.py
+@@ -2096,7 +2096,10 @@ def fqdns():
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=19329
+ time.sleep(random.randint(5, 25) / 1000)
+ try:
+- return [socket.getfqdn(socket.gethostbyaddr(ip)[0])]
++ name, aliaslist, addresslist = socket.gethostbyaddr(ip)
++ return [socket.getfqdn(name)] + [
++ als for als in aliaslist if salt.utils.network.is_fqdn(als)
++ ]
+ except socket.herror as err:
+ if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
+ # No FQDN for this IP address, so we don't need to know this all the time.
+diff --git a/salt/utils/network.py b/salt/utils/network.py
+index 2bea2cf129..6ec993a678 100644
+--- a/salt/utils/network.py
++++ b/salt/utils/network.py
+@@ -2372,3 +2372,19 @@ def ip_bracket(addr, strip=False):
+ addr = addr.rstrip("]")
+ addr = ipaddress.ip_address(addr)
+ return ("[{}]" if addr.version == 6 and not strip else "{}").format(addr)
++
++
++def is_fqdn(hostname):
++ """
++ Verify if hostname conforms to be a FQDN.
++
++ :param hostname: text string with the name of the host
++ :return: bool, True if hostname is correct FQDN, False otherwise
++ """
++
++ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?
+Date: Tue, 25 Jan 2022 17:12:47 +0100
+Subject: [PATCH] info_installed works without status attr now
+
+If 'status' was excluded via attr, info_installed was no longer able to
+detect if a package was installed or not. Now info_installed adds the
+'status' for the 'lowpkg.info' request again.
+---
+ salt/modules/aptpkg.py | 9 +++++++++
+ tests/pytests/unit/modules/test_aptpkg.py | 18 ++++++++++++++++++
+ 2 files changed, 27 insertions(+)
+
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index 938e37cc9e..3289f6604d 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -3461,6 +3461,15 @@ def info_installed(*names, **kwargs):
+ failhard = kwargs.pop("failhard", True)
+ kwargs.pop("errors", None) # Only for compatibility with RPM
+ attr = kwargs.pop("attr", None) # Package attributes to return
++
++ # status is needed to see if a package is installed. So we have to add it,
++ # even if it's excluded via attr parameter. Otherwise all packages are
++ # returned.
++ if attr:
++ attr_list = set(attr.split(","))
++ attr_list.add("status")
++ attr = ",".join(attr_list)
++
+ all_versions = kwargs.pop(
+ "all_versions", False
+ ) # This is for backward compatible structure only
+diff --git a/tests/pytests/unit/modules/test_aptpkg.py b/tests/pytests/unit/modules/test_aptpkg.py
+index 4226957eeb..eb72447c3a 100644
+--- a/tests/pytests/unit/modules/test_aptpkg.py
++++ b/tests/pytests/unit/modules/test_aptpkg.py
+@@ -385,6 +385,24 @@ def test_info_installed_attr(lowpkg_info_var):
+ assert ret["wget"] == expected_pkg
+
+
++def test_info_installed_attr_without_status(lowpkg_info_var):
++ """
++ Test info_installed 'attr' for inclusion of 'status' attribute.
++
++ Since info_installed should only return installed packages, we need to
++ call __salt__['lowpkg.info'] with the 'status' attribute even if the user
++ is not asking for it in 'attr'. Otherwise info_installed would not be able
++ to check if the package is installed and would return everything.
++
++ :return:
++ """
++ mock = MagicMock(return_value=lowpkg_info_var)
++ with patch.dict(aptpkg.__salt__, {"lowpkg.info": mock}):
++ aptpkg.info_installed("wget", attr="version")
++ assert "status" in mock.call_args.kwargs["attr"]
++ assert "version" in mock.call_args.kwargs["attr"]
++
++
+ def test_info_installed_all_versions(lowpkg_info_var):
+ """
+ Test info_installed 'all_versions'.
+--
+2.39.2
+
+
diff --git a/join-masters-if-it-is-a-list-671.patch b/join-masters-if-it-is-a-list-671.patch
new file mode 100644
index 0000000..2f384ab
--- /dev/null
+++ b/join-masters-if-it-is-a-list-671.patch
@@ -0,0 +1,105 @@
+From 94973ee85d766d7e98d02d89f4c81e59b36cb716 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Thu, 29 Aug 2024 10:01:12 +0200
+Subject: [PATCH] Join masters if it is a list (#671)
+
+Co-authored-by: Twangboy
+---
+ changelog/64170.fixed.md | 2 +
+ salt/utils/cloud.py | 10 +++++
+ tests/pytests/unit/utils/test_cloud.py | 52 ++++++++++++++++++++++++++
+ 3 files changed, 64 insertions(+)
+ create mode 100644 changelog/64170.fixed.md
+
+diff --git a/changelog/64170.fixed.md b/changelog/64170.fixed.md
+new file mode 100644
+index 0000000000..1d20355bf1
+--- /dev/null
++++ b/changelog/64170.fixed.md
+@@ -0,0 +1,2 @@
++Fixed issue in salt-cloud so that multiple masters specified in the cloud
++are written to the minion config properly
+diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py
+index b7208dc4a6..a084313059 100644
+--- a/salt/utils/cloud.py
++++ b/salt/utils/cloud.py
+@@ -1202,6 +1202,16 @@ def wait_for_passwd(
+ time.sleep(trysleep)
+
+
++def _format_master_param(master):
++ """
++ If the master is a list, we need to convert it to a comma delimited string
++ Otherwise, we just return master
++ """
++ if isinstance(master, list):
++ return ",".join(master)
++ return master
++
++
+ def deploy_windows(
+ host,
+ port=445,
+diff --git a/tests/pytests/unit/utils/test_cloud.py b/tests/pytests/unit/utils/test_cloud.py
+index 550b63c974..db9d258d39 100644
+--- a/tests/pytests/unit/utils/test_cloud.py
++++ b/tests/pytests/unit/utils/test_cloud.py
+@@ -605,3 +605,55 @@ def test_deploy_script_ssh_timeout():
+ ssh_kwargs = root_cmd.call_args.kwargs
+ assert "ssh_timeout" in ssh_kwargs
+ assert ssh_kwargs["ssh_timeout"] == 34
++
++
++@pytest.mark.parametrize(
++ "master,expected",
++ [
++ (None, None),
++ ("single_master", "single_master"),
++ (["master1", "master2", "master3"], "master1,master2,master3"),
++ ],
++)
++def test__format_master_param(master, expected):
++ result = cloud._format_master_param(master)
++ assert result == expected
++
++
++@pytest.mark.skip_unless_on_windows(reason="Only applicable for Windows.")
++@pytest.mark.parametrize(
++ "master,expected",
++ [
++ (None, None),
++ ("single_master", "single_master"),
++ (["master1", "master2", "master3"], "master1,master2,master3"),
++ ],
++)
++def test_deploy_windows_master(master, expected):
++ """
++ Test deploy_windows with master parameter
++ """
++ mock_true = MagicMock(return_value=True)
++ mock_tuple = MagicMock(return_value=(0, 0, 0))
++ with patch("salt.utils.smb.get_conn", MagicMock()), patch(
++ "salt.utils.smb.mkdirs", MagicMock()
++ ), patch("salt.utils.smb.put_file", MagicMock()), patch(
++ "salt.utils.smb.delete_file", MagicMock()
++ ), patch(
++ "salt.utils.smb.delete_directory", MagicMock()
++ ), patch(
++ "time.sleep", MagicMock()
++ ), patch.object(
++ cloud, "wait_for_port", mock_true
++ ), patch.object(
++ cloud, "fire_event", MagicMock()
++ ), patch.object(
++ cloud, "wait_for_psexecsvc", mock_true
++ ), patch.object(
++ cloud, "run_psexec_command", mock_tuple
++ ) as mock:
++ cloud.deploy_windows(host="test", win_installer="install.exe", master=master)
++ expected_cmd = "c:\\salttemp\\install.exe"
++ expected_args = "/S /master={} /minion-name=None".format(expected)
++ assert mock.call_args_list[0].args[0] == expected_cmd
++ assert mock.call_args_list[0].args[1] == expected_args
+--
+2.44.0
+
diff --git a/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch b/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
new file mode 100644
index 0000000..5a5cdc2
--- /dev/null
+++ b/let-salt-ssh-use-platform-python-binary-in-rhel8-191.patch
@@ -0,0 +1,32 @@
+From 1de8313e55317a62c36a1a6262e7b9463544d69c Mon Sep 17 00:00:00 2001
+From: Can Bulut Bayburt <1103552+cbbayburt@users.noreply.github.com>
+Date: Wed, 4 Dec 2019 15:59:46 +0100
+Subject: [PATCH] Let salt-ssh use 'platform-python' binary in RHEL8
+ (#191)
+
+RHEL/CentOS 8 has an internal Python interpreter called 'platform-python'
+included in the base setup.
+
+Add this binary to the list of Python executables to look for when
+creating the sh shim.
+---
+ salt/client/ssh/__init__.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
+index 88365a6099..049baff51a 100644
+--- a/salt/client/ssh/__init__.py
++++ b/salt/client/ssh/__init__.py
+@@ -146,7 +146,7 @@ if [ "$SUDO" ] && [ "$SUDO_USER" ]
+ then SUDO="$SUDO -u $SUDO_USER"
+ fi
+ EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
+-PYTHON_CMDS="python3 python27 python2.7 python26 python2.6 python2 python /usr/libexec/platform-python"
++PYTHON_CMDS="python3 /usr/libexec/platform-python python27 python2.7 python26 python2.6 python2 python"
+ for py_cmd in $PYTHON_CMDS
+ do
+ if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
+--
+2.39.2
+
+
diff --git a/make-aptpkg.list_repos-compatible-on-enabled-disable.patch b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch
new file mode 100644
index 0000000..0fafcc8
--- /dev/null
+++ b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch
@@ -0,0 +1,28 @@
+From f9731227e7af0b1bf0a54993e0cac890225517f6 Mon Sep 17 00:00:00 2001
+From: Bo Maryniuk
+Date: Fri, 16 Nov 2018 10:54:12 +0100
+Subject: [PATCH] Make aptpkg.list_repos compatible on enabled/disabled
+ output
+
+---
+ salt/modules/aptpkg.py | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
+index f68b1907e8..8e89744b5e 100644
+--- a/salt/modules/aptpkg.py
++++ b/salt/modules/aptpkg.py
+@@ -1919,6 +1919,9 @@ def list_repos(**kwargs):
+ repo["file"] = source.file
+ repo["comps"] = getattr(source, "comps", [])
+ repo["disabled"] = source.disabled
++ repo["enabled"] = not repo[
++ "disabled"
++ ] # This is for compatibility with the other modules
+ repo["dist"] = source.dist
+ repo["type"] = source.type
+ repo["uri"] = source.uri
+--
+2.39.2
+
+
diff --git a/make-importing-seco.range-thread-safe-bsc-1211649.patch b/make-importing-seco.range-thread-safe-bsc-1211649.patch
new file mode 100644
index 0000000..f60e60c
--- /dev/null
+++ b/make-importing-seco.range-thread-safe-bsc-1211649.patch
@@ -0,0 +1,63 @@
+From 0913a58a36ef69d957dd9cc5c95fafe6d56448d5 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Mon, 4 Mar 2024 11:27:35 +0100
+Subject: [PATCH] Make importing seco.range thread safe (bsc#1211649)
+
+---
+ salt/roster/range.py | 5 +++++
+ salt/utils/roster_matcher.py | 5 +++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/salt/roster/range.py b/salt/roster/range.py
+index 3f039dcef42..1525f70c32b 100644
+--- a/salt/roster/range.py
++++ b/salt/roster/range.py
+@@ -15,16 +15,21 @@ import copy
+ import fnmatch
+ import logging
+
++import salt.loader
++
+ log = logging.getLogger(__name__)
+
+ # Try to import range from https://github.com/ytoolshed/range
+ HAS_RANGE = False
+ try:
++ salt.loader.LOAD_LOCK.acquire()
+ import seco.range
+
+ HAS_RANGE = True
+ except ImportError:
+ log.error("Unable to load range library")
++finally:
++ salt.loader.LOAD_LOCK.release()
+ # pylint: enable=import-error
+
+
+diff --git a/salt/utils/roster_matcher.py b/salt/utils/roster_matcher.py
+index db5dfda3e03..5165dc122b7 100644
+--- a/salt/utils/roster_matcher.py
++++ b/salt/utils/roster_matcher.py
+@@ -8,14 +8,19 @@ import functools
+ import logging
+ import re
+
++import salt.loader
++
+ # Try to import range from https://github.com/ytoolshed/range
+ HAS_RANGE = False
+ try:
++ salt.loader.LOAD_LOCK.acquire()
+ import seco.range
+
+ HAS_RANGE = True
+ except ImportError:
+ pass
++finally:
++ salt.loader.LOAD_LOCK.release()
+ # pylint: enable=import-error
+
+
+--
+2.44.0
+
diff --git a/make-logging-calls-lighter.patch b/make-logging-calls-lighter.patch
new file mode 100644
index 0000000..194da2b
--- /dev/null
+++ b/make-logging-calls-lighter.patch
@@ -0,0 +1,233 @@
+From 48b6f57ece7eb9f58b8e6da40ec241b6df3f6d01 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 15 May 2024 09:20:18 +0200
+Subject: [PATCH] Make logging calls lighter
+
+* Call set_lowest_log_level_by_opts with set_logging_options_dict
+
+* Fix the _logging test with setting minimum logging level
+
+* Fix test_deferred_stream_handler test
+
+* Fix vt.Terminal failing test: test_log_sanitize
+
+Fixes failing test added in a09b4f445052be66f0ac53fd01fa02bfa5b82ea6
+
+We can't assume tests are run at debug level, so this ensures the test
+passes regardless of what logging level is currently set by capturing
+the output in caplog at DEBUG which stream_stdout/stream_stderr uses by
+default.
+
+Signed-off-by: Joe Groocock
+
+---------
+
+Signed-off-by: Joe Groocock
+Co-authored-by: Joe Groocock
+---
+ salt/_logging/impl.py | 1 +
+ .../integration/_logging/test_logging.py | 106 ++++++++++++++++++
+ .../handlers/test_deferred_stream_handler.py | 9 +-
+ tests/pytests/unit/utils/test_vt.py | 6 +-
+ 4 files changed, 117 insertions(+), 5 deletions(-)
+ create mode 100644 tests/pytests/integration/_logging/test_logging.py
+
+diff --git a/salt/_logging/impl.py b/salt/_logging/impl.py
+index 2d1a276cb8..1d71cb8be8 100644
+--- a/salt/_logging/impl.py
++++ b/salt/_logging/impl.py
+@@ -426,6 +426,7 @@ def set_logging_options_dict(opts):
+ except AttributeError:
+ pass
+ set_logging_options_dict.__options_dict__ = opts
++ set_lowest_log_level_by_opts(opts)
+
+
+ def freeze_logging_options_dict():
+diff --git a/tests/pytests/integration/_logging/test_logging.py b/tests/pytests/integration/_logging/test_logging.py
+new file mode 100644
+index 0000000000..8e38f55b38
+--- /dev/null
++++ b/tests/pytests/integration/_logging/test_logging.py
+@@ -0,0 +1,106 @@
++import logging
++import os
++
++import pytest
++
++import salt._logging.impl as log_impl
++from tests.support.mock import MagicMock, patch
++
++pytestmark = [
++ pytest.mark.skip_on_windows(reason="Temporarily skipped on the newer golden images")
++]
++
++
++log = logging.getLogger(__name__)
++
++
++@pytest.fixture
++def configure_loader_modules():
++ return {log_impl: {}}
++
++
++def log_nameToLevel(name):
++ """
++ Return the numeric representation of textual logging level
++ """
++ # log level values
++ CRITICAL = 50
++ FATAL = CRITICAL
++ ERROR = 40
++ WARNING = 30
++ WARN = WARNING
++ INFO = 20
++ DEBUG = 10
++ NOTSET = 0
++
++ _nameToLevel = {
++ "CRITICAL": CRITICAL,
++ "FATAL": FATAL,
++ "ERROR": ERROR,
++ "WARN": WARNING,
++ "WARNING": WARNING,
++ "INFO": INFO,
++ "DEBUG": DEBUG,
++ "NOTSET": NOTSET,
++ }
++ return _nameToLevel.get(name, None)
++
++
++def test_lowest_log_level():
++ ret = log_impl.get_lowest_log_level()
++ assert ret is not None
++
++ log_impl.set_lowest_log_level(log_nameToLevel("DEBUG"))
++ ret = log_impl.get_lowest_log_level()
++ assert ret is log_nameToLevel("DEBUG")
++
++ log_impl.set_lowest_log_level(log_nameToLevel("WARNING"))
++ ret = log_impl.get_lowest_log_level()
++ assert ret is log_nameToLevel("WARNING")
++
++ opts = {"log_level": "ERROR", "log_level_logfile": "INFO"}
++ log_impl.set_lowest_log_level_by_opts(opts)
++ ret = log_impl.get_lowest_log_level()
++ assert ret is log_nameToLevel("INFO")
++
++
++def test_get_logging_level_from_string(caplog):
++ ret = log_impl.get_logging_level_from_string(None)
++ assert ret is log_nameToLevel("WARNING")
++
++ ret = log_impl.get_logging_level_from_string(log_nameToLevel("DEBUG"))
++ assert ret is log_nameToLevel("DEBUG")
++
++ ret = log_impl.get_logging_level_from_string("CRITICAL")
++ assert ret is log_nameToLevel("CRITICAL")
++
++ caplog.clear()
++ with caplog.at_level(logging.WARNING):
++ msg = "Could not translate the logging level string 'BADLEVEL' into an actual logging level integer. Returning 'logging.ERROR'."
++ ret = log_impl.get_logging_level_from_string("BADLEVEL")
++ assert ret is log_nameToLevel("ERROR")
++ assert msg in caplog.text
++
++
++def test_logfile_handler(caplog):
++ caplog.clear()
++ with caplog.at_level(logging.WARNING):
++ ret = log_impl.is_logfile_handler_configured()
++ assert ret is False
++
++ msg = "log_path setting is set to `None`. Nothing else to do"
++ log_path = None
++ assert log_impl.setup_logfile_handler(log_path) is None
++ assert msg in caplog.text
++
++
++def test_in_mainprocess():
++ ret = log_impl.in_mainprocess()
++ assert ret is True
++
++ curr_pid = os.getpid()
++ with patch(
++ "os.getpid", MagicMock(side_effect=[AttributeError, curr_pid, curr_pid])
++ ):
++ ret = log_impl.in_mainprocess()
++ assert ret is True
+diff --git a/tests/pytests/unit/_logging/handlers/test_deferred_stream_handler.py b/tests/pytests/unit/_logging/handlers/test_deferred_stream_handler.py
+index 76b0e88eca..62c0dff4be 100644
+--- a/tests/pytests/unit/_logging/handlers/test_deferred_stream_handler.py
++++ b/tests/pytests/unit/_logging/handlers/test_deferred_stream_handler.py
+@@ -9,6 +9,7 @@ import pytest
+ from pytestshellutils.utils.processes import terminate_process
+
+ from salt._logging.handlers import DeferredStreamHandler
++from salt._logging.impl import set_lowest_log_level
+ from salt.utils.nb_popen import NonBlockingPopen
+ from tests.support.helpers import CaptureOutput, dedent
+ from tests.support.runtests import RUNTIME_VARS
+@@ -20,7 +21,7 @@ def _sync_with_handlers_proc_target():
+
+ with CaptureOutput() as stds:
+ handler = DeferredStreamHandler(sys.stderr)
+- handler.setLevel(logging.DEBUG)
++ set_lowest_log_level(logging.DEBUG)
+ formatter = logging.Formatter("%(message)s")
+ handler.setFormatter(formatter)
+ logging.root.addHandler(handler)
+@@ -45,7 +46,7 @@ def _deferred_write_on_flush_proc_target():
+
+ with CaptureOutput() as stds:
+ handler = DeferredStreamHandler(sys.stderr)
+- handler.setLevel(logging.DEBUG)
++ set_lowest_log_level(logging.DEBUG)
+ formatter = logging.Formatter("%(message)s")
+ handler.setFormatter(formatter)
+ logging.root.addHandler(handler)
+@@ -126,7 +127,7 @@ def test_deferred_write_on_atexit(tmp_path):
+ # Just loop consuming output
+ while True:
+ if time.time() > max_time:
+- pytest.fail("Script didn't exit after {} second".format(execution_time))
++ pytest.fail(f"Script didn't exit after {execution_time} second")
+
+ time.sleep(0.125)
+ _out = proc.recv()
+@@ -146,7 +147,7 @@ def test_deferred_write_on_atexit(tmp_path):
+ finally:
+ terminate_process(proc.pid, kill_children=True)
+ if b"Foo" not in err:
+- pytest.fail("'Foo' should be in stderr and it's not: {}".format(err))
++ pytest.fail(f"'Foo' should be in stderr and it's not: {err}")
+
+
+ @pytest.mark.skip_on_windows(reason="Windows does not support SIGINT")
+diff --git a/tests/pytests/unit/utils/test_vt.py b/tests/pytests/unit/utils/test_vt.py
+index 438a6eb09c..c31b25e623 100644
+--- a/tests/pytests/unit/utils/test_vt.py
++++ b/tests/pytests/unit/utils/test_vt.py
+@@ -1,3 +1,4 @@
++import logging
+ import os
+ import signal
+
+@@ -43,10 +44,13 @@ def test_log_sanitize(test_cmd, caplog):
+ cmd,
+ log_stdout=True,
+ log_stderr=True,
++ log_stdout_level="debug",
++ log_stderr_level="debug",
+ log_sanitize=password,
+ stream_stdout=False,
+ stream_stderr=False,
+ )
+- ret = term.recv()
++ with caplog.at_level(logging.DEBUG):
++ ret = term.recv()
+ assert password not in caplog.text
+ assert "******" in caplog.text
+--
+2.45.0
+
diff --git a/make-master_tops-compatible-with-salt-3000-and-older.patch b/make-master_tops-compatible-with-salt-3000-and-older.patch
new file mode 100644
index 0000000..bfe217d
--- /dev/null
+++ b/make-master_tops-compatible-with-salt-3000-and-older.patch
@@ -0,0 +1,37 @@
+From 53a5a62191b81c6838c3041cf95ffeb12fbab5b5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Mon, 19 Jun 2023 15:35:41 +0100
+Subject: [PATCH] Make master_tops compatible with Salt 3000 and older
+ minions (bsc#1212516) (bsc#1212517) (#587)
+
+---
+ salt/master.py | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/salt/master.py b/salt/master.py
+index da1eb8cef5..fc243ef674 100644
+--- a/salt/master.py
++++ b/salt/master.py
+@@ -1213,6 +1213,7 @@ class AESFuncs(TransportMethods):
+ "_dir_list",
+ "_symlink_list",
+ "_file_envs",
++ "_ext_nodes", # To keep compatibility with old Salt minion versions
+ )
+
+ def __init__(self, opts, context=None):
+@@ -1412,6 +1413,9 @@ class AESFuncs(TransportMethods):
+ return {}
+ return self.masterapi._master_tops(load, skip_verify=True)
+
++ # Needed so older minions can request master_tops
++ _ext_nodes = _master_tops
++
+ def _master_opts(self, load):
+ """
+ Return the master options to the minion
+--
+2.41.0
+
+
diff --git a/make-minion-reconnecting-on-changing-master-ip-bsc-1.patch b/make-minion-reconnecting-on-changing-master-ip-bsc-1.patch
new file mode 100644
index 0000000..ac9dba7
--- /dev/null
+++ b/make-minion-reconnecting-on-changing-master-ip-bsc-1.patch
@@ -0,0 +1,770 @@
+From eb6c67e6f535cdfbf685a54c6352018673e37a12 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Tue, 26 Nov 2024 11:59:08 +0300
+Subject: [PATCH] Make minion reconnecting on changing master IP
+ (bsc#1228182)
+
+* Minions check dns when re-connecting to a master
+
+Check for a chainging dns record anytime a minion gets disconnected from
+it's master. See github issue #63654 #61482.
+
+* Regression tests for dns defined masters
+
+Adding tests to validate we check for changing dns anytime we're
+disconnected from the currently connected master
+
+* Update docs for master dns changes
+
+Update docs to use master_alive_interval to detect master ip changes via
+DNS.
+
+* Remove comment which is not true anymore
+
+* Make minion reconnecting on changing master IP
+
+with zeromq transport
+
+* Don't create schedule for alive if no master_alive_interval
+
+* Skip the tests if running with non-root user
+
+* Skip if unable to set additional IP address
+
+* Set master_tries to -1 for minions
+
+* Fix the tests
+
+---------
+
+Co-authored-by: Daniel A. Wozniak
+---
+ conf/minion | 5 +-
+ doc/ref/configuration/minion.rst | 4 +-
+ salt/channel/client.py | 2 -
+ salt/config/__init__.py | 4 +-
+ salt/minion.py | 190 ++++++++----------
+ salt/transport/zeromq.py | 17 +-
+ tests/pytests/scenarios/dns/__init__.py | 0
+ tests/pytests/scenarios/dns/conftest.py | 99 +++++++++
+ .../scenarios/dns/multimaster/conftest.py | 124 ++++++++++++
+ .../scenarios/dns/multimaster/test_dns.py | 54 +++++
+ tests/pytests/scenarios/dns/test_dns.py | 37 ++++
+ .../multimaster/test_failover_master.py | 4 -
+ tests/pytests/unit/test_minion.py | 2 +
+ 13 files changed, 422 insertions(+), 120 deletions(-)
+ create mode 100644 tests/pytests/scenarios/dns/__init__.py
+ create mode 100644 tests/pytests/scenarios/dns/conftest.py
+ create mode 100644 tests/pytests/scenarios/dns/multimaster/conftest.py
+ create mode 100644 tests/pytests/scenarios/dns/multimaster/test_dns.py
+ create mode 100644 tests/pytests/scenarios/dns/test_dns.py
+
+diff --git a/conf/minion b/conf/minion
+index eeef626fa8..f89e18451f 100644
+--- a/conf/minion
++++ b/conf/minion
+@@ -271,9 +271,8 @@
+ #ping_interval: 0
+
+ # To auto recover minions if master changes IP address (DDNS)
+-# auth_tries: 10
+-# auth_safemode: True
+-# ping_interval: 2
++# master_alive_interval: 10
++# master_tries: -1
+ #
+ # Minions won't know master is missing until a ping fails. After the ping fail,
+ # the minion will attempt authentication and likely fails out and cause a restart.
+diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst
+index 57af5ce4a3..a1b0f2e86e 100644
+--- a/doc/ref/configuration/minion.rst
++++ b/doc/ref/configuration/minion.rst
+@@ -291,7 +291,9 @@ Default: ``0``
+
+ Configures how often, in seconds, the minion will verify that the current
+ master is alive and responding. The minion will try to establish a connection
+-to the next master in the list if it finds the existing one is dead.
++to the next master in the list if it finds the existing one is dead. This
++setting can also be used to detect master DNS record changes when a minion has
++been disconnected.
+
+ .. code-block:: yaml
+
+diff --git a/salt/channel/client.py b/salt/channel/client.py
+index 76d7a8e5b9..34aafb2c9e 100644
+--- a/salt/channel/client.py
++++ b/salt/channel/client.py
+@@ -385,8 +385,6 @@ class AsyncPubChannel:
+ # else take the relayed publish_port master reports
+ else:
+ publish_port = self.auth.creds["publish_port"]
+- # TODO: The zeromq transport does not use connect_callback and
+- # disconnect_callback.
+ yield self.transport.connect(
+ publish_port, self.connect_callback, self.disconnect_callback
+ )
+diff --git a/salt/config/__init__.py b/salt/config/__init__.py
+index b3cd5d85ae..d4865807e6 100644
+--- a/salt/config/__init__.py
++++ b/salt/config/__init__.py
+@@ -75,7 +75,7 @@ elif salt.utils.platform.is_darwin():
+ else:
+ _DFLT_IPC_MODE = "ipc"
+ _DFLT_FQDNS_GRAINS = False
+- _MASTER_TRIES = 1
++ _MASTER_TRIES = -1
+ _MASTER_USER = salt.utils.user.get_user()
+
+
+@@ -1272,7 +1272,7 @@ DEFAULT_MINION_OPTS = immutabletypes.freeze(
+ "username": None,
+ "password": None,
+ "zmq_filtering": False,
+- "zmq_monitor": False,
++ "zmq_monitor": True,
+ "cache_sreqs": True,
+ "cmd_safe": True,
+ "sudo_user": "",
+diff --git a/salt/minion.py b/salt/minion.py
+index e21a017cfd..834f0848c6 100644
+--- a/salt/minion.py
++++ b/salt/minion.py
+@@ -2737,10 +2737,64 @@ class Minion(MinionBase):
+ # we are not connected anymore
+ self.connected = False
+ log.info("Connection to master %s lost", self.opts["master"])
++ if self.opts["transport"] != "tcp":
++ self.schedule.delete_job(name=master_event(type="alive"))
++
++ log.info("Trying to tune in to next master from master-list")
++
++ if hasattr(self, "pub_channel"):
++ self.pub_channel.on_recv(None)
++ if hasattr(self.pub_channel, "auth"):
++ self.pub_channel.auth.invalidate()
++ if hasattr(self.pub_channel, "close"):
++ self.pub_channel.close()
++ if hasattr(self, "req_channel") and self.req_channel:
++ self.req_channel.close()
++ self.req_channel = None
++
++ # if eval_master finds a new master for us, self.connected
++ # will be True again on successful master authentication
++ try:
++ master, self.pub_channel = yield self.eval_master(
++ opts=self.opts,
++ failed=True,
++ failback=tag.startswith(master_event(type="failback")),
++ )
++ except SaltClientError:
++ pass
++
++ if self.connected:
++ self.opts["master"] = master
++
++ # re-init the subsystems to work with the new master
++ log.info(
++ "Re-initialising subsystems for new master %s",
++ self.opts["master"],
++ )
++
++ self.req_channel = salt.channel.client.AsyncReqChannel.factory(
++ self.opts, io_loop=self.io_loop
++ )
+
+- if self.opts["master_type"] != "failover":
+- # modify the scheduled job to fire on reconnect
+- if self.opts["transport"] != "tcp":
++ # put the current schedule into the new loaders
++ self.opts["schedule"] = self.schedule.option("schedule")
++ (
++ self.functions,
++ self.returners,
++ self.function_errors,
++ self.executors,
++ ) = self._load_modules()
++ # make the schedule to use the new 'functions' loader
++ self.schedule.functions = self.functions
++ self.pub_channel.on_recv(self._handle_payload)
++ self._fire_master_minion_start()
++ log.info("Minion is ready to receive requests!")
++
++ # update scheduled job to run with the new master addr
++ if (
++ self.opts["transport"] != "tcp"
++ and self.opts["master_alive_interval"] > 0
++ ):
+ schedule = {
+ "function": "status.master",
+ "seconds": self.opts["master_alive_interval"],
+@@ -2749,116 +2803,35 @@ class Minion(MinionBase):
+ "return_job": False,
+ "kwargs": {
+ "master": self.opts["master"],
+- "connected": False,
++ "connected": True,
+ },
+ }
+ self.schedule.modify_job(
+ name=master_event(type="alive", master=self.opts["master"]),
+ schedule=schedule,
+ )
+- else:
+- # delete the scheduled job to don't interfere with the failover process
+- if self.opts["transport"] != "tcp":
+- self.schedule.delete_job(name=master_event(type="alive"))
+-
+- log.info("Trying to tune in to next master from master-list")
+-
+- if hasattr(self, "pub_channel"):
+- self.pub_channel.on_recv(None)
+- if hasattr(self.pub_channel, "auth"):
+- self.pub_channel.auth.invalidate()
+- if hasattr(self.pub_channel, "close"):
+- self.pub_channel.close()
+- del self.pub_channel
+-
+- # if eval_master finds a new master for us, self.connected
+- # will be True again on successful master authentication
+- try:
+- master, self.pub_channel = yield self.eval_master(
+- opts=self.opts,
+- failed=True,
+- failback=tag.startswith(master_event(type="failback")),
+- )
+- except SaltClientError:
+- pass
+-
+- if self.connected:
+- self.opts["master"] = master
+-
+- # re-init the subsystems to work with the new master
+- log.info(
+- "Re-initialising subsystems for new master %s",
+- self.opts["master"],
+- )
+-
+- self.req_channel = (
+- salt.transport.client.AsyncReqChannel.factory(
+- self.opts, io_loop=self.io_loop
+- )
+- )
+-
+- # put the current schedule into the new loaders
+- self.opts["schedule"] = self.schedule.option("schedule")
+- (
+- self.functions,
+- self.returners,
+- self.function_errors,
+- self.executors,
+- ) = self._load_modules()
+- # make the schedule to use the new 'functions' loader
+- self.schedule.functions = self.functions
+- self.pub_channel.on_recv(self._handle_payload)
+- self._fire_master_minion_start()
+- log.info("Minion is ready to receive requests!")
+-
+- # update scheduled job to run with the new master addr
+- if self.opts["transport"] != "tcp":
+- schedule = {
+- "function": "status.master",
+- "seconds": self.opts["master_alive_interval"],
+- "jid_include": True,
+- "maxrunning": 1,
+- "return_job": False,
+- "kwargs": {
+- "master": self.opts["master"],
+- "connected": True,
+- },
+- }
+- self.schedule.modify_job(
+- name=master_event(
+- type="alive", master=self.opts["master"]
+- ),
+- schedule=schedule,
+- )
+
+- if (
+- self.opts["master_failback"]
+- and "master_list" in self.opts
+- ):
+- if self.opts["master"] != self.opts["master_list"][0]:
+- schedule = {
+- "function": "status.ping_master",
+- "seconds": self.opts[
+- "master_failback_interval"
+- ],
+- "jid_include": True,
+- "maxrunning": 1,
+- "return_job": False,
+- "kwargs": {
+- "master": self.opts["master_list"][0]
+- },
+- }
+- self.schedule.modify_job(
+- name=master_event(type="failback"),
+- schedule=schedule,
+- )
+- else:
+- self.schedule.delete_job(
+- name=master_event(type="failback"), persist=True
+- )
+- else:
+- self.restart = True
+- self.io_loop.stop()
++ if self.opts["master_failback"] and "master_list" in self.opts:
++ if self.opts["master"] != self.opts["master_list"][0]:
++ schedule = {
++ "function": "status.ping_master",
++ "seconds": self.opts["master_failback_interval"],
++ "jid_include": True,
++ "maxrunning": 1,
++ "return_job": False,
++ "kwargs": {"master": self.opts["master_list"][0]},
++ }
++ self.schedule.modify_job(
++ name=master_event(type="failback"),
++ schedule=schedule,
++ )
++ else:
++ self.schedule.delete_job(
++ name=master_event(type="failback"), persist=True
++ )
++ else:
++ self.restart = True
++ self.io_loop.stop()
+
+ elif tag.startswith(master_event(type="connected")):
+ # handle this event only once. otherwise it will pollute the log
+@@ -2870,7 +2843,10 @@ class Minion(MinionBase):
+ self.connected = True
+ # modify the __master_alive job to only fire,
+ # if the connection is lost again
+- if self.opts["transport"] != "tcp":
++ if (
++ self.opts["transport"] != "tcp"
++ and self.opts["master_alive_interval"] > 0
++ ):
+ schedule = {
+ "function": "status.master",
+ "seconds": self.opts["master_alive_interval"],
+diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py
+index 7cc6b9987f..89f705190e 100644
+--- a/salt/transport/zeromq.py
++++ b/salt/transport/zeromq.py
+@@ -1,6 +1,7 @@
+ """
+ Zeromq transport classes
+ """
++
+ import errno
+ import hashlib
+ import logging
+@@ -211,6 +212,12 @@ class PublishClient(salt.transport.base.PublishClient):
+ self.master_pub,
+ )
+ log.debug("%r connecting to %s", self, self.master_pub)
++ if (
++ hasattr(self, "_monitor")
++ and self._monitor is not None
++ and disconnect_callback is not None
++ ):
++ self._monitor.disconnect_callback = disconnect_callback
+ self._socket.connect(self.master_pub)
+ connect_callback(True)
+
+@@ -680,13 +687,21 @@ class ZeroMQSocketMonitor:
+ log.debug("ZeroMQ event: %s", evt)
+ if evt["event"] == zmq.EVENT_MONITOR_STOPPED:
+ self.stop()
++ elif evt["event"] == zmq.EVENT_DISCONNECTED:
++ if (
++ hasattr(self, "disconnect_callback")
++ and self.disconnect_callback is not None
++ ):
++ self.disconnect_callback()
+
+ def stop(self):
+ if self._socket is None:
+ return
+ self._socket.disable_monitor()
+ self._socket = None
+- self._monitor_socket = None
++ if self._monitor_socket is not None:
++ self._monitor_socket.close()
++ self._monitor_socket = None
+ if self._monitor_stream is not None:
+ self._monitor_stream.close()
+ self._monitor_stream = None
+diff --git a/tests/pytests/scenarios/dns/__init__.py b/tests/pytests/scenarios/dns/__init__.py
+new file mode 100644
+index 0000000000..e69de29bb2
+diff --git a/tests/pytests/scenarios/dns/conftest.py b/tests/pytests/scenarios/dns/conftest.py
+new file mode 100644
+index 0000000000..5a8850719f
+--- /dev/null
++++ b/tests/pytests/scenarios/dns/conftest.py
+@@ -0,0 +1,99 @@
++import logging
++import pathlib
++import subprocess
++
++import pytest
++
++log = logging.getLogger(__name__)
++
++
++@pytest.fixture(scope="package")
++def master_alive_interval():
++ return 5
++
++
++class HostsFile:
++ """
++ Simple helper class for tests that need to modify /etc/hosts.
++ """
++
++ def __init__(self, path, orig_text):
++ self._path = path
++ self._orig_text = orig_text
++
++ @property
++ def orig_text(self):
++ return self._orig_text
++
++ def __getattr__(self, key):
++ if key in ["_path", "_orig_text", "orig_text"]:
++ return self.__getattribute__(key)
++ return getattr(self._path, key)
++
++
++@pytest.fixture
++def etc_hosts():
++ hosts = pathlib.Path("/etc/hosts")
++ orig_text = hosts.read_text(encoding="utf-8")
++ hosts = HostsFile(hosts, orig_text)
++ try:
++ yield hosts
++ finally:
++ hosts.write_text(orig_text)
++
++
++@pytest.fixture(scope="package")
++def master(request, salt_factories):
++
++ try:
++ subprocess.check_output(["ip", "addr", "add", "172.16.0.1/32", "dev", "lo"])
++ ip_addr_set = True
++ except subprocess.CalledProcessError:
++ ip_addr_set = False
++
++ config_defaults = {
++ "open_mode": True,
++ "transport": request.config.getoption("--transport"),
++ }
++ config_overrides = {
++ "interface": "0.0.0.0",
++ }
++ factory = salt_factories.salt_master_daemon(
++ "master",
++ defaults=config_defaults,
++ overrides=config_overrides,
++ extra_cli_arguments_after_first_start_failure=["--log-level=info"],
++ )
++ factory.ip_addr_set = ip_addr_set
++ with factory.started(start_timeout=180):
++ yield factory
++
++ try:
++ subprocess.check_output(["ip", "addr", "del", "172.16.0.1/32", "dev", "lo"])
++ except subprocess.CalledProcessError:
++ pass
++
++
++@pytest.fixture(scope="package")
++def salt_cli(master):
++ return master.salt_cli(timeout=180)
++
++
++@pytest.fixture(scope="package")
++def minion(master, master_alive_interval):
++ config_defaults = {
++ "transport": master.config["transport"],
++ }
++ port = master.config["ret_port"]
++ config_overrides = {
++ "master": f"master.local:{port}",
++ "publish_port": master.config["publish_port"],
++ "master_alive_interval": master_alive_interval,
++ }
++ factory = master.salt_minion_daemon(
++ "minion",
++ defaults=config_defaults,
++ overrides=config_overrides,
++ extra_cli_arguments_after_first_start_failure=["--log-level=info"],
++ )
++ return factory
+diff --git a/tests/pytests/scenarios/dns/multimaster/conftest.py b/tests/pytests/scenarios/dns/multimaster/conftest.py
+new file mode 100644
+index 0000000000..3333f812ce
+--- /dev/null
++++ b/tests/pytests/scenarios/dns/multimaster/conftest.py
+@@ -0,0 +1,124 @@
++import logging
++import os
++import shutil
++import subprocess
++
++import pytest
++
++log = logging.getLogger(__name__)
++
++
++@pytest.fixture(scope="package")
++def salt_mm_master_1(request, salt_factories):
++
++ try:
++ subprocess.check_output(["ip", "addr", "add", "172.16.0.1/32", "dev", "lo"])
++ ip_addr_set = True
++ except subprocess.CalledProcessError:
++ ip_addr_set = False
++
++ config_defaults = {
++ "open_mode": True,
++ "transport": request.config.getoption("--transport"),
++ }
++ config_overrides = {
++ "interface": "0.0.0.0",
++ "master_sign_pubkey": True,
++ }
++ factory = salt_factories.salt_master_daemon(
++ "mm-master-1",
++ defaults=config_defaults,
++ overrides=config_overrides,
++ extra_cli_arguments_after_first_start_failure=["--log-level=info"],
++ )
++ factory.ip_addr_set = ip_addr_set
++ try:
++ with factory.started(start_timeout=180):
++ yield factory
++ finally:
++
++ try:
++ subprocess.check_output(["ip", "addr", "del", "172.16.0.1/32", "dev", "lo"])
++ except subprocess.CalledProcessError:
++ pass
++
++
++@pytest.fixture(scope="package")
++def mm_master_1_salt_cli(salt_mm_master_1):
++ return salt_mm_master_1.salt_cli(timeout=180)
++
++
++@pytest.fixture(scope="package")
++def salt_mm_master_2(salt_factories, salt_mm_master_1):
++ # if salt.utils.platform.is_darwin() or salt.utils.platform.is_freebsd():
++ # subprocess.check_output(["ifconfig", "lo0", "alias", "127.0.0.2", "up"])
++
++ config_defaults = {
++ "open_mode": True,
++ "transport": salt_mm_master_1.config["transport"],
++ }
++ config_overrides = {
++ "interface": "0.0.0.0",
++ "master_sign_pubkey": True,
++ }
++
++ # Use the same ports for both masters, they are binding to different interfaces
++ for key in (
++ "ret_port",
++ "publish_port",
++ ):
++ config_overrides[key] = salt_mm_master_1.config[key] + 1
++ factory = salt_factories.salt_master_daemon(
++ "mm-master-2",
++ defaults=config_defaults,
++ overrides=config_overrides,
++ extra_cli_arguments_after_first_start_failure=["--log-level=info"],
++ )
++
++ # Both masters will share the same signing key pair
++ for keyfile in ("master_sign.pem", "master_sign.pub"):
++ shutil.copyfile(
++ os.path.join(salt_mm_master_1.config["pki_dir"], keyfile),
++ os.path.join(factory.config["pki_dir"], keyfile),
++ )
++ with factory.started(start_timeout=180):
++ yield factory
++
++
++@pytest.fixture(scope="package")
++def mm_master_2_salt_cli(salt_mm_master_2):
++ return salt_mm_master_2.salt_cli(timeout=180)
++
++
++@pytest.fixture(scope="package")
++def salt_mm_minion_1(salt_mm_master_1, salt_mm_master_2, master_alive_interval):
++ config_defaults = {
++ "transport": salt_mm_master_1.config["transport"],
++ }
++
++ mm_master_1_port = salt_mm_master_1.config["ret_port"]
++ mm_master_2_port = salt_mm_master_2.config["ret_port"]
++ config_overrides = {
++ "master": [
++ f"master1.local:{mm_master_1_port}",
++ f"master2.local:{mm_master_2_port}",
++ ],
++ "publish_port": salt_mm_master_1.config["publish_port"],
++ "master_alive_interval": master_alive_interval,
++ "master_tries": -1,
++ "verify_master_pubkey_sign": True,
++ "retry_dns": True,
++ }
++ factory = salt_mm_master_1.salt_minion_daemon(
++ "mm-minion-1",
++ defaults=config_defaults,
++ overrides=config_overrides,
++ extra_cli_arguments_after_first_start_failure=["--log-level=info"],
++ )
++ # Need to grab the public signing key from the master, either will do
++ shutil.copyfile(
++ os.path.join(salt_mm_master_1.config["pki_dir"], "master_sign.pub"),
++ os.path.join(factory.config["pki_dir"], "master_sign.pub"),
++ )
++ # with factory.started(start_timeout=180):
++ yield factory
+diff --git a/tests/pytests/scenarios/dns/multimaster/test_dns.py b/tests/pytests/scenarios/dns/multimaster/test_dns.py
+new file mode 100644
+index 0000000000..fafb30c12e
+--- /dev/null
++++ b/tests/pytests/scenarios/dns/multimaster/test_dns.py
+@@ -0,0 +1,54 @@
++import logging
++import subprocess
++import time
++
++import pytest
++
++log = logging.getLogger(__name__)
++
++
++@pytest.mark.skip_unless_on_linux
++@pytest.mark.skip_if_not_root
++def test_multimaster_dns(
++ salt_mm_master_1,
++ salt_mm_minion_1,
++ mm_master_1_salt_cli,
++ etc_hosts,
++ caplog,
++ master_alive_interval,
++):
++ """
++ Verify a minion configured with multimaster hot/hot will pick up a master's
++ dns change if it's been disconnected.
++ """
++
++ if not salt_mm_master_1.ip_addr_set:
++ pytest.skip("Unable to set additional IP address for master1")
++
++ etc_hosts.write_text(
++ f"{etc_hosts.orig_text}\n172.16.0.1 master1.local master2.local"
++ )
++
++ log.info("Added hosts record for master1.local and master2.local")
++
++ with salt_mm_minion_1.started(start_timeout=180):
++ with caplog.at_level(logging.INFO):
++ ret = mm_master_1_salt_cli.run("test.ping", minion_tgt="mm-minion-1")
++ assert ret.returncode == 0
++ etc_hosts.write_text(
++ f"{etc_hosts.orig_text}\n127.0.0.1 master1.local master2.local"
++ )
++ log.info("Changed hosts record for master1.local and master2.local")
++ subprocess.check_output(["ip", "addr", "del", "172.16.0.1/32", "dev", "lo"])
++ log.info("Removed secondary master IP address.")
++ # Wait for the minion's master_alive_interval, adding a second for
++ # reliablity.
++ time.sleep(master_alive_interval + 1)
++ assert (
++ "Master ip address changed from 172.16.0.1 to 127.0.0.1" in caplog.text
++ )
++ ret = mm_master_1_salt_cli.run("test.ping", minion_tgt="mm-minion-1")
++ assert ret.returncode == 0
++ assert (
++ "Master ip address changed from 172.16.0.1 to 127.0.0.1" in caplog.text
++ )
+diff --git a/tests/pytests/scenarios/dns/test_dns.py b/tests/pytests/scenarios/dns/test_dns.py
+new file mode 100644
+index 0000000000..cd33f0e7f0
+--- /dev/null
++++ b/tests/pytests/scenarios/dns/test_dns.py
+@@ -0,0 +1,37 @@
++import logging
++import subprocess
++import time
++
++import pytest
++
++log = logging.getLogger(__name__)
++
++
++@pytest.mark.skip_unless_on_linux
++@pytest.mark.skip_if_not_root
++def test_dns_change(master, minion, salt_cli, etc_hosts, caplog, master_alive_interval):
++ """
++ Verify a minion will pick up a master's dns change if it's been disconnected.
++ """
++
++ if not master.ip_addr_set:
++ pytest.skip("Unable to set additional IP address for master")
++
++ etc_hosts.write_text(f"{etc_hosts.orig_text}\n172.16.0.1 master.local")
++
++ with minion.started(start_timeout=180):
++ with caplog.at_level(logging.INFO):
++ ret = salt_cli.run("test.ping", minion_tgt="minion")
++ assert ret.returncode == 0
++ etc_hosts.write_text(f"{etc_hosts.orig_text}\n127.0.0.1 master.local")
++ log.info("Changed hosts record for master1.local and master2.local")
++ subprocess.check_output(["ip", "addr", "del", "172.16.0.1/32", "dev", "lo"])
++ log.info("Removed secondary master IP address.")
++ # Wait for the minion's master_alive_interval, adding a second for
++ # reliablity.
++ time.sleep(master_alive_interval + 1)
++ assert (
++ "Master ip address changed from 172.16.0.1 to 127.0.0.1" in caplog.text
++ )
++ ret = salt_cli.run("test.ping", minion_tgt="minion")
++ assert ret.returncode == 0
+diff --git a/tests/pytests/scenarios/failover/multimaster/test_failover_master.py b/tests/pytests/scenarios/failover/multimaster/test_failover_master.py
+index 9f6251a4d6..ebb2899ff0 100644
+--- a/tests/pytests/scenarios/failover/multimaster/test_failover_master.py
++++ b/tests/pytests/scenarios/failover/multimaster/test_failover_master.py
+@@ -162,10 +162,6 @@ def test_minions_alive_with_no_master(
+ """
+ Make sure the minions stay alive after all masters have stopped.
+ """
+- if grains["os_family"] == "Debian" and grains["osmajorrelease"] == 9:
+- pytest.skip(
+- "Skipping on Debian 9 until flaky issues resolved. See issue #61749"
+- )
+ start_time = time.time()
+ with salt_mm_failover_master_1.stopped():
+ with salt_mm_failover_master_2.stopped():
+diff --git a/tests/pytests/unit/test_minion.py b/tests/pytests/unit/test_minion.py
+index a9e91742a2..017c28d163 100644
+--- a/tests/pytests/unit/test_minion.py
++++ b/tests/pytests/unit/test_minion.py
+@@ -884,6 +884,8 @@ async def test_master_type_failover(minion_opts):
+ assert opts["master"] == "master2"
+ return MockPubChannel()
+
++ minion_opts["master_tries"] = 1
++
+ with patch("salt.minion.resolve_dns", mock_resolve_dns), patch(
+ "salt.channel.client.AsyncPubChannel.factory", mock_channel_factory
+ ), patch("salt.loader.grains", MagicMock(return_value=[])):
+--
+2.47.0
+
diff --git a/make-reactor-engine-less-blocking-the-eventpublisher.patch b/make-reactor-engine-less-blocking-the-eventpublisher.patch
new file mode 100644
index 0000000..ba1e393
--- /dev/null
+++ b/make-reactor-engine-less-blocking-the-eventpublisher.patch
@@ -0,0 +1,104 @@
+From 0d35f09288700f5c961567442c3fcc25838b8de4 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 15 May 2024 09:44:21 +0200
+Subject: [PATCH] Make reactor engine less blocking the EventPublisher
+
+---
+ salt/utils/reactor.py | 45 +++++++++++++++++++++++++++----------------
+ 1 file changed, 28 insertions(+), 17 deletions(-)
+
+diff --git a/salt/utils/reactor.py b/salt/utils/reactor.py
+index 19420a51cf..78adad34da 100644
+--- a/salt/utils/reactor.py
++++ b/salt/utils/reactor.py
+@@ -1,10 +1,12 @@
+ """
+ Functions which implement running reactor jobs
+ """
++
+ import fnmatch
+ import glob
+ import logging
+ import os
++from threading import Lock
+
+ import salt.client
+ import salt.defaults.exitcodes
+@@ -194,13 +196,6 @@ class Reactor(salt.utils.process.SignalHandlingProcess, salt.state.Compiler):
+ self.resolve_aliases(chunks)
+ return chunks
+
+- def call_reactions(self, chunks):
+- """
+- Execute the reaction state
+- """
+- for chunk in chunks:
+- self.wrap.run(chunk)
+-
+ def run(self):
+ """
+ Enter into the server loop
+@@ -218,7 +213,7 @@ class Reactor(salt.utils.process.SignalHandlingProcess, salt.state.Compiler):
+ ) as event:
+ self.wrap = ReactWrap(self.opts)
+
+- for data in event.iter_events(full=True):
++ for data in event.iter_events(full=True, auto_reconnect=True):
+ # skip all events fired by ourselves
+ if data["data"].get("user") == self.wrap.event_user:
+ continue
+@@ -268,15 +263,9 @@ class Reactor(salt.utils.process.SignalHandlingProcess, salt.state.Compiler):
+ if not self.is_leader:
+ continue
+ else:
+- reactors = self.list_reactors(data["tag"])
+- if not reactors:
+- continue
+- chunks = self.reactions(data["tag"], data["data"], reactors)
+- if chunks:
+- try:
+- self.call_reactions(chunks)
+- except SystemExit:
+- log.warning("Exit ignored by reactor")
++ self.wrap.call_reactions(
++ data, self.list_reactors, self.reactions
++ )
+
+
+ class ReactWrap:
+@@ -297,6 +286,7 @@ class ReactWrap:
+
+ def __init__(self, opts):
+ self.opts = opts
++ self._run_lock = Lock()
+ if ReactWrap.client_cache is None:
+ ReactWrap.client_cache = salt.utils.cache.CacheDict(
+ opts["reactor_refresh_interval"]
+@@ -480,3 +470,24 @@ class ReactWrap:
+ Wrap LocalCaller to execute remote exec functions locally on the Minion
+ """
+ self.client_cache["caller"].cmd(fun, *kwargs["arg"], **kwargs["kwarg"])
++
++ def _call_reactions(self, data, list_reactors, get_reactions):
++ reactors = list_reactors(data["tag"])
++ if not reactors:
++ return
++ chunks = get_reactions(data["tag"], data["data"], reactors)
++ if not chunks:
++ return
++ with self._run_lock:
++ try:
++ for chunk in chunks:
++ self.run(chunk)
++ except Exception as exc: # pylint: disable=broad-except
++ log.error(
++ "Exception while calling the reactions: %s", exc, exc_info=True
++ )
++
++ def call_reactions(self, data, list_reactors, get_reactions):
++ return self.pool.fire_async(
++ self._call_reactions, args=(data, list_reactors, get_reactions)
++ )
+--
+2.45.0
+
diff --git a/make-salt-master-self-recoverable-on-killing-eventpu.patch b/make-salt-master-self-recoverable-on-killing-eventpu.patch
new file mode 100644
index 0000000..c353e49
--- /dev/null
+++ b/make-salt-master-self-recoverable-on-killing-eventpu.patch
@@ -0,0 +1,243 @@
+From 794b5d1aa7b8e880e9a21940183d241c6cbde9c9 Mon Sep 17 00:00:00 2001
+From: Victor Zhestkov
+Date: Wed, 15 May 2024 09:42:23 +0200
+Subject: [PATCH] Make salt-master self recoverable on killing
+ EventPublisher
+
+* Implement timeout and tries to transport.ipc.IPCClient.send
+
+* Make timeout and tries configurable for fire_event
+
+* Add test of timeout and tries
+
+* Prevent exceptions from tornado Future on closing the IPC connection
+---
+ salt/transport/ipc.py | 73 +++++++++++++++++---
+ salt/utils/event.py | 21 +++++-
+ tests/pytests/unit/utils/event/test_event.py | 43 ++++++++++++
+ 3 files changed, 125 insertions(+), 12 deletions(-)
+
+diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py
+index cee100b086..6631781c5c 100644
+--- a/salt/transport/ipc.py
++++ b/salt/transport/ipc.py
+@@ -2,7 +2,6 @@
+ IPC transport classes
+ """
+
+-
+ import errno
+ import logging
+ import socket
+@@ -340,7 +339,8 @@ class IPCClient:
+ try:
+ log.trace("IPCClient: Connecting to socket: %s", self.socket_path)
+ yield self.stream.connect(sock_addr)
+- self._connecting_future.set_result(True)
++ if self._connecting_future is not None:
++ self._connecting_future.set_result(True)
+ break
+ except Exception as e: # pylint: disable=broad-except
+ if self.stream.closed():
+@@ -350,7 +350,8 @@ class IPCClient:
+ if self.stream is not None:
+ self.stream.close()
+ self.stream = None
+- self._connecting_future.set_exception(e)
++ if self._connecting_future is not None:
++ self._connecting_future.set_exception(e)
+ break
+
+ yield salt.ext.tornado.gen.sleep(1)
+@@ -365,7 +366,13 @@ class IPCClient:
+ return
+
+ self._closing = True
+- self._connecting_future = None
++ if self._connecting_future is not None:
++ try:
++ self._connecting_future.set_result(True)
++ self._connecting_future.exception() # pylint: disable=E0203
++ except Exception as e: # pylint: disable=broad-except
++ log.warning("Unhandled connecting exception: %s", e, exc_info=True)
++ self._connecting_future = None
+
+ log.debug("Closing %s instance", self.__class__.__name__)
+
+@@ -435,8 +442,6 @@ class IPCMessageClient(IPCClient):
+ "close",
+ ]
+
+- # FIXME timeout unimplemented
+- # FIXME tries unimplemented
+ @salt.ext.tornado.gen.coroutine
+ def send(self, msg, timeout=None, tries=None):
+ """
+@@ -445,12 +450,60 @@ class IPCMessageClient(IPCClient):
+ If the socket is not currently connected, a connection will be established.
+
+ :param dict msg: The message to be sent
+- :param int timeout: Timeout when sending message (Currently unimplemented)
++ :param int timeout: Timeout when sending message
++ :param int tries: Maximum numer of tries to send message
+ """
+- if not self.connected():
+- yield self.connect()
++ if tries is None or tries < 1:
++ tries = 1
++ due_time = None
++ if timeout is not None:
++ due_time = time.time() + timeout
++ _try = 1
++ exc_count = 0
+ pack = salt.transport.frame.frame_msg_ipc(msg, raw_body=True)
+- yield self.stream.write(pack)
++ while _try <= tries:
++ if not self.connected():
++ self.close()
++ self.stream = None
++ self._closing = False
++ try:
++ yield self.connect(
++ timeout=(
++ None if due_time is None else max(due_time - time.time(), 1)
++ )
++ )
++ except StreamClosedError:
++ log.warning(
++ "IPCMessageClient: Unable to reconnect IPC stream on sending message with ID: 0x%016x%s",
++ id(msg),
++ f", retry {_try} of {tries}" if tries > 1 else "",
++ )
++ exc_count += 1
++ if self.connected():
++ try:
++ yield self.stream.write(pack)
++ return
++ except StreamClosedError:
++ if self._closing:
++ break
++ log.warning(
++ "IPCMessageClient: Stream was closed on sending message with ID: 0x%016x",
++ id(msg),
++ )
++ exc_count += 1
++ if exc_count == 1:
++ # Give one more chance in case if stream was detected as closed
++ # on the first write attempt
++ continue
++ cur_time = time.time()
++ _try += 1
++ if _try > tries or (due_time is not None and cur_time > due_time):
++ return
++ yield salt.ext.tornado.gen.sleep(
++ 1
++ if due_time is None
++ else (due_time - cur_time) / max(tries - _try + 1, 1)
++ )
+
+
+ class IPCMessageServer(IPCServer):
+diff --git a/salt/utils/event.py b/salt/utils/event.py
+index ef048335ae..36b530d1af 100644
+--- a/salt/utils/event.py
++++ b/salt/utils/event.py
+@@ -270,6 +270,10 @@ class SaltEvent:
+ # and don't read out events from the buffer on an on-going basis,
+ # the buffer will grow resulting in big memory usage.
+ self.connect_pub()
++ self.pusher_send_timeout = self.opts.get(
++ "pusher_send_timeout", self.opts.get("timeout")
++ )
++ self.pusher_send_tries = self.opts.get("pusher_send_tries", 3)
+
+ @classmethod
+ def __load_cache_regex(cls):
+@@ -839,10 +843,18 @@ class SaltEvent:
+ ]
+ )
+ msg = salt.utils.stringutils.to_bytes(event, "utf-8")
++ if timeout is None:
++ timeout_s = self.pusher_send_timeout
++ else:
++ timeout_s = float(timeout) / 1000
+ if self._run_io_loop_sync:
+ with salt.utils.asynchronous.current_ioloop(self.io_loop):
+ try:
+- self.pusher.send(msg)
++ self.pusher.send(
++ msg,
++ timeout=timeout_s,
++ tries=self.pusher_send_tries,
++ )
+ except Exception as exc: # pylint: disable=broad-except
+ log.debug(
+ "Publisher send failed with exception: %s",
+@@ -851,7 +863,12 @@ class SaltEvent:
+ )
+ raise
+ else:
+- self.io_loop.spawn_callback(self.pusher.send, msg)
++ self.io_loop.spawn_callback(
++ self.pusher.send,
++ msg,
++ timeout=timeout_s,
++ tries=self.pusher_send_tries,
++ )
+ return True
+
+ def fire_master(self, data, tag, timeout=1000):
+diff --git a/tests/pytests/unit/utils/event/test_event.py b/tests/pytests/unit/utils/event/test_event.py
+index 3eadfaf6ba..fa9e420a93 100644
+--- a/tests/pytests/unit/utils/event/test_event.py
++++ b/tests/pytests/unit/utils/event/test_event.py
+@@ -447,3 +447,46 @@ def test_event_fire_ret_load():
+ )
+ assert mock_log_error.mock_calls[0].args[1] == "minion_id.example.org"
+ assert mock_log_error.mock_calls[0].args[2] == "".join(test_traceback)
++
++
++@pytest.mark.slow_test
++def test_event_single_timeout_tries(sock_dir):
++ """Test an event is sent with timout and tries"""
++
++ write_calls_count = 0
++ real_stream_write = None
++
++ @salt.ext.tornado.gen.coroutine
++ def write_mock(pack):
++ nonlocal write_calls_count
++ nonlocal real_stream_write
++ write_calls_count += 1
++ if write_calls_count > 3:
++ yield real_stream_write(pack)
++ else:
++ raise salt.ext.tornado.iostream.StreamClosedError()
++
++ with eventpublisher_process(str(sock_dir)), salt.utils.event.MasterEvent(
++ str(sock_dir), listen=True
++ ) as me:
++ me.fire_event({"data": "foo1"}, "evt1")
++ evt1 = me.get_event(tag="evt1")
++ _assert_got_event(evt1, {"data": "foo1"})
++ real_stream_write = me.pusher.stream.write
++ with patch.object(
++ me.pusher,
++ "connected",
++ side_effect=[True, True, False, False, True, True],
++ ), patch.object(
++ me.pusher,
++ "connect",
++ side_effect=salt.ext.tornado.iostream.StreamClosedError,
++ ), patch.object(
++ me.pusher.stream,
++ "write",
++ write_mock,
++ ):
++ me.fire_event({"data": "bar2"}, "evt2", timeout=5000)
++ evt2 = me.get_event(tag="evt2")
++ _assert_got_event(evt2, {"data": "bar2"})
++ assert write_calls_count == 4
+--
+2.45.0
+
diff --git a/make-setup.py-script-to-not-require-setuptools-9.1.patch b/make-setup.py-script-to-not-require-setuptools-9.1.patch
new file mode 100644
index 0000000..ea91881
--- /dev/null
+++ b/make-setup.py-script-to-not-require-setuptools-9.1.patch
@@ -0,0 +1,33 @@
+From d2b4c8170d7ff30bf33623fcbbb6ebb6d7af934e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Wed, 25 Mar 2020 13:09:52 +0000
+Subject: [PATCH] Make setup.py script to not require setuptools > 9.1
+
+---
+ setup.py | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/setup.py b/setup.py
+index e60f1b7085..8ca8a66d45 100755
+--- a/setup.py
++++ b/setup.py
+@@ -632,14 +632,6 @@ class Install(install):
+ install.finalize_options(self)
+
+ def run(self):
+- if LooseVersion(setuptools.__version__) < LooseVersion("9.1"):
+- sys.stderr.write(
+- "\n\nInstalling Salt requires setuptools >= 9.1\n"
+- "Available setuptools version is {}\n\n".format(setuptools.__version__)
+- )
+- sys.stderr.flush()
+- sys.exit(1)
+-
+ # Let's set the running_salt_install attribute so we can add
+ # _version.txt in the build command
+ self.distribution.running_salt_install = True
+--
+2.39.2
+
+
diff --git a/make-sure-configured-user-is-properly-set-by-salt-bs.patch b/make-sure-configured-user-is-properly-set-by-salt-bs.patch
new file mode 100644
index 0000000..4702068
--- /dev/null
+++ b/make-sure-configured-user-is-properly-set-by-salt-bs.patch
@@ -0,0 +1,204 @@
+From 5ea4add5c8e2bed50b9825edfff7565e5f6124f3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
+
+Date: Tue, 22 Aug 2023 12:57:44 +0100
+Subject: [PATCH] Make sure configured user is properly set by Salt
+ (bsc#1210994) (#596)
+
+* Make sure Salt user and env is validated before daemon init
+
+* Ensure HOME is always present in env and set according to pwuser
+
+* Set User to salt in salt-master.service files
+
+* Return proper exitcode if user is not valid
+
+* Fix environment also for salt-ssh command
+
+* Increase start_timeout to avoid test to be flaky
+---
+ pkg/common/salt-master.service | 1 +
+ pkg/old/deb/salt-master.service | 1 +
+ pkg/old/suse/salt-master.service | 1 +
+ salt/cli/daemons.py | 27 +++++++++++++++++++
+ salt/cli/ssh.py | 8 ++++++
+ salt/utils/verify.py | 4 +--
+ .../integration/cli/test_salt_minion.py | 4 +--
+ 7 files changed, 42 insertions(+), 4 deletions(-)
+
+diff --git a/pkg/common/salt-master.service b/pkg/common/salt-master.service
+index 377c87afeb..257ecc283f 100644
+--- a/pkg/common/salt-master.service
++++ b/pkg/common/salt-master.service
+@@ -8,6 +8,7 @@ LimitNOFILE=100000
+ Type=notify
+ NotifyAccess=all
+ ExecStart=/usr/bin/salt-master
++User=salt
+
+ [Install]
+ WantedBy=multi-user.target
+diff --git a/pkg/old/deb/salt-master.service b/pkg/old/deb/salt-master.service
+index b5d0cdd22c..f9dca296b4 100644
+--- a/pkg/old/deb/salt-master.service
++++ b/pkg/old/deb/salt-master.service
+@@ -7,6 +7,7 @@ LimitNOFILE=16384
+ Type=notify
+ NotifyAccess=all
+ ExecStart=/usr/bin/salt-master
++User=salt
+
+ [Install]
+ WantedBy=multi-user.target
+diff --git a/pkg/old/suse/salt-master.service b/pkg/old/suse/salt-master.service
+index 9e002d16ca..caabca511c 100644
+--- a/pkg/old/suse/salt-master.service
++++ b/pkg/old/suse/salt-master.service
+@@ -8,6 +8,7 @@ LimitNOFILE=100000
+ Type=simple
+ ExecStart=/usr/bin/salt-master
+ TasksMax=infinity
++User=salt
+
+ [Install]
+ WantedBy=multi-user.target
+diff --git a/salt/cli/daemons.py b/salt/cli/daemons.py
+index ecc05c919e..c9ee9ced91 100644
+--- a/salt/cli/daemons.py
++++ b/salt/cli/daemons.py
+@@ -7,6 +7,7 @@ import logging
+ import os
+ import warnings
+
++import salt.defaults.exitcodes
+ import salt.utils.kinds as kinds
+ from salt.exceptions import SaltClientError, SaltSystemExit, get_error_message
+ from salt.utils import migrations
+@@ -73,6 +74,16 @@ class DaemonsMixin: # pylint: disable=no-init
+ self.__class__.__name__,
+ )
+
++ def verify_user(self):
++ """
++ Verify Salt configured user for Salt and shutdown daemon if not valid.
++
++ :return:
++ """
++ if not check_user(self.config["user"]):
++ self.action_log_info("Cannot switch to configured user for Salt. Exiting")
++ self.shutdown(salt.defaults.exitcodes.EX_NOUSER)
++
+ def action_log_info(self, action):
+ """
+ Say daemon starting.
+@@ -178,6 +189,10 @@ class Master(
+ self.config["interface"] = ip_bracket(self.config["interface"])
+ migrations.migrate_paths(self.config)
+
++ # Ensure configured user is valid and environment is properly set
++ # before initializating rest of the stack.
++ self.verify_user()
++
+ # Late import so logging works correctly
+ import salt.master
+
+@@ -290,6 +305,10 @@ class Minion(
+
+ transport = self.config.get("transport").lower()
+
++ # Ensure configured user is valid and environment is properly set
++ # before initializating rest of the stack.
++ self.verify_user()
++
+ try:
+ # Late import so logging works correctly
+ import salt.minion
+@@ -478,6 +497,10 @@ class ProxyMinion(
+ self.action_log_info("An instance is already running. Exiting")
+ self.shutdown(1)
+
++ # Ensure configured user is valid and environment is properly set
++ # before initializating rest of the stack.
++ self.verify_user()
++
+ # TODO: AIO core is separate from transport
+ # Late import so logging works correctly
+ import salt.minion
+@@ -576,6 +599,10 @@ class Syndic(
+
+ self.action_log_info('Setting up "{}"'.format(self.config["id"]))
+
++ # Ensure configured user is valid and environment is properly set
++ # before initializating rest of the stack.
++ self.verify_user()
++
+ # Late import so logging works correctly
+ import salt.minion
+
+diff --git a/salt/cli/ssh.py b/salt/cli/ssh.py
+index 6048cb5f58..672f32b8c0 100644
+--- a/salt/cli/ssh.py
++++ b/salt/cli/ssh.py
+@@ -1,7 +1,9 @@
+ import sys
+
+ import salt.client.ssh
++import salt.defaults.exitcodes
+ import salt.utils.parsers
++from salt.utils.verify import check_user
+
+
+ class SaltSSH(salt.utils.parsers.SaltSSHOptionParser):
+@@ -15,5 +17,11 @@ class SaltSSH(salt.utils.parsers.SaltSSHOptionParser):
+ # that won't be used anyways with -H or --hosts
+ self.parse_args()
+
++ if not check_user(self.config["user"]):
++ self.exit(
++ salt.defaults.exitcodes.EX_NOUSER,
++ "Cannot switch to configured user for Salt. Exiting",
++ )
++
+ ssh = salt.client.ssh.SSH(self.config)
+ ssh.run()
+diff --git a/salt/utils/verify.py b/salt/utils/verify.py
+index 879128f231..7899fbe538 100644
+--- a/salt/utils/verify.py
++++ b/salt/utils/verify.py
+@@ -335,8 +335,8 @@ def check_user(user):
+
+ # We could just reset the whole environment but let's just override
+ # the variables we can get from pwuser
+- if "HOME" in os.environ:
+- os.environ["HOME"] = pwuser.pw_dir
++ # We ensure HOME is always present and set according to pwuser
++ os.environ["HOME"] = pwuser.pw_dir
+
+ if "SHELL" in os.environ:
+ os.environ["SHELL"] = pwuser.pw_shell
+diff --git a/tests/pytests/integration/cli/test_salt_minion.py b/tests/pytests/integration/cli/test_salt_minion.py
+index c0d6013474..bde2dd51d7 100644
+--- a/tests/pytests/integration/cli/test_salt_minion.py
++++ b/tests/pytests/integration/cli/test_salt_minion.py
+@@ -41,7 +41,7 @@ def test_exit_status_unknown_user(salt_master, minion_id):
+ factory = salt_master.salt_minion_daemon(
+ minion_id, overrides={"user": "unknown-user"}
+ )
+- factory.start(start_timeout=10, max_start_attempts=1)
++ factory.start(start_timeout=30, max_start_attempts=1)
+
+ assert exc.value.process_result.returncode == salt.defaults.exitcodes.EX_NOUSER
+ assert "The user is not available." in exc.value.process_result.stderr
+@@ -53,7 +53,7 @@ def test_exit_status_unknown_argument(salt_master, minion_id):
+ """
+ with pytest.raises(FactoryNotStarted) as exc:
+ factory = salt_master.salt_minion_daemon(minion_id)
+- factory.start("--unknown-argument", start_timeout=10, max_start_attempts=1)
++ factory.start("--unknown-argument", start_timeout=30, max_start_attempts=1)
+
+ assert exc.value.process_result.returncode == salt.defaults.exitcodes.EX_USAGE
+ assert "Usage" in exc.value.process_result.stderr
+--
+2.41.0
+
+
diff --git a/make-sure-the-file-client-is-destroyed-upon-used.patch b/make-sure-the-file-client-is-destroyed-upon-used.patch
new file mode 100644
index 0000000..db8d41f
--- /dev/null
+++ b/make-sure-the-file-client-is-destroyed-upon-used.patch
@@ -0,0 +1,850 @@
+From a1fc5287d501a1ecdbd259e5bbdd4f7d5d06dd13 Mon Sep 17 00:00:00 2001
+From: Alexander Graul
+Date: Fri, 28 Apr 2023 09:41:28 +0200
+Subject: [PATCH] Make sure the file client is destroyed upon used
+
+Backport of https://github.com/saltstack/salt/pull/64113
+---
+ salt/client/ssh/wrapper/saltcheck.py | 108 +++----
+ salt/fileclient.py | 11 -
+ salt/modules/dockermod.py | 17 +-
+ salt/pillar/__init__.py | 6 +-
+ salt/states/ansiblegate.py | 11 +-
+ salt/utils/asynchronous.py | 2 +-
+ salt/utils/jinja.py | 53 ++-
+ salt/utils/mako.py | 7 +
+ salt/utils/templates.py | 303 +++++++++---------
+ .../integration/states/test_include.py | 40 +++
+ .../utils/jinja/test_salt_cache_loader.py | 47 ++-
+ 11 files changed, 330 insertions(+), 275 deletions(-)
+ create mode 100644 tests/pytests/integration/states/test_include.py
+
+diff --git a/salt/client/ssh/wrapper/saltcheck.py b/salt/client/ssh/wrapper/saltcheck.py
+index d47b5cf6883..b0b94593809 100644
+--- a/salt/client/ssh/wrapper/saltcheck.py
++++ b/salt/client/ssh/wrapper/saltcheck.py
+@@ -9,6 +9,7 @@ import tarfile
+ import tempfile
+ from contextlib import closing
+
++import salt.fileclient
+ import salt.utils.files
+ import salt.utils.json
+ import salt.utils.url
+@@ -28,65 +29,62 @@ def update_master_cache(states, saltenv="base"):
+ # Setup for copying states to gendir
+ gendir = tempfile.mkdtemp()
+ trans_tar = salt.utils.files.mkstemp()
+- if "cp.fileclient_{}".format(id(__opts__)) not in __context__:
+- __context__[
+- "cp.fileclient_{}".format(id(__opts__))
+- ] = salt.fileclient.get_file_client(__opts__)
+-
+- # generate cp.list_states output and save to gendir
+- cp_output = salt.utils.json.dumps(__salt__["cp.list_states"]())
+- cp_output_file = os.path.join(gendir, "cp_output.txt")
+- with salt.utils.files.fopen(cp_output_file, "w") as fp:
+- fp.write(cp_output)
+-
+- # cp state directories to gendir
+- already_processed = []
+- sls_list = salt.utils.args.split_input(states)
+- for state_name in sls_list:
+- # generate low data for each state and save to gendir
+- state_low_file = os.path.join(gendir, state_name + ".low")
+- state_low_output = salt.utils.json.dumps(
+- __salt__["state.show_low_sls"](state_name)
+- )
+- with salt.utils.files.fopen(state_low_file, "w") as fp:
+- fp.write(state_low_output)
+-
+- state_name = state_name.replace(".", os.sep)
+- if state_name in already_processed:
+- log.debug("Already cached state for %s", state_name)
+- else:
+- file_copy_file = os.path.join(gendir, state_name + ".copy")
+- log.debug("copying %s to %s", state_name, gendir)
+- qualified_name = salt.utils.url.create(state_name, saltenv)
+- # Duplicate cp.get_dir to gendir
+- copy_result = __context__["cp.fileclient_{}".format(id(__opts__))].get_dir(
+- qualified_name, gendir, saltenv
++ with salt.fileclient.get_file_client(__opts__) as cp_fileclient:
++
++ # generate cp.list_states output and save to gendir
++ cp_output = salt.utils.json.dumps(__salt__["cp.list_states"]())
++ cp_output_file = os.path.join(gendir, "cp_output.txt")
++ with salt.utils.files.fopen(cp_output_file, "w") as fp:
++ fp.write(cp_output)
++
++ # cp state directories to gendir
++ already_processed = []
++ sls_list = salt.utils.args.split_input(states)
++ for state_name in sls_list:
++ # generate low data for each state and save to gendir
++ state_low_file = os.path.join(gendir, state_name + ".low")
++ state_low_output = salt.utils.json.dumps(
++ __salt__["state.show_low_sls"](state_name)
+ )
+- if copy_result:
+- copy_result = [dir.replace(gendir, state_cache) for dir in copy_result]
+- copy_result_output = salt.utils.json.dumps(copy_result)
+- with salt.utils.files.fopen(file_copy_file, "w") as fp:
+- fp.write(copy_result_output)
+- already_processed.append(state_name)
++ with salt.utils.files.fopen(state_low_file, "w") as fp:
++ fp.write(state_low_output)
++
++ state_name = state_name.replace(".", os.sep)
++ if state_name in already_processed:
++ log.debug("Already cached state for %s", state_name)
+ else:
+- # If files were not copied, assume state.file.sls was given and just copy state
+- state_name = os.path.dirname(state_name)
+ file_copy_file = os.path.join(gendir, state_name + ".copy")
+- if state_name in already_processed:
+- log.debug("Already cached state for %s", state_name)
++ log.debug("copying %s to %s", state_name, gendir)
++ qualified_name = salt.utils.url.create(state_name, saltenv)
++ # Duplicate cp.get_dir to gendir
++ copy_result = cp_fileclient.get_dir(qualified_name, gendir, saltenv)
++ if copy_result:
++ copy_result = [
++ dir.replace(gendir, state_cache) for dir in copy_result
++ ]
++ copy_result_output = salt.utils.json.dumps(copy_result)
++ with salt.utils.files.fopen(file_copy_file, "w") as fp:
++ fp.write(copy_result_output)
++ already_processed.append(state_name)
+ else:
+- qualified_name = salt.utils.url.create(state_name, saltenv)
+- copy_result = __context__[
+- "cp.fileclient_{}".format(id(__opts__))
+- ].get_dir(qualified_name, gendir, saltenv)
+- if copy_result:
+- copy_result = [
+- dir.replace(gendir, state_cache) for dir in copy_result
+- ]
+- copy_result_output = salt.utils.json.dumps(copy_result)
+- with salt.utils.files.fopen(file_copy_file, "w") as fp:
+- fp.write(copy_result_output)
+- already_processed.append(state_name)
++ # If files were not copied, assume state.file.sls was given and just copy state
++ state_name = os.path.dirname(state_name)
++ file_copy_file = os.path.join(gendir, state_name + ".copy")
++ if state_name in already_processed:
++ log.debug("Already cached state for %s", state_name)
++ else:
++ qualified_name = salt.utils.url.create(state_name, saltenv)
++ copy_result = cp_fileclient.get_dir(
++ qualified_name, gendir, saltenv
++ )
++ if copy_result:
++ copy_result = [
++ dir.replace(gendir, state_cache) for dir in copy_result
++ ]
++ copy_result_output = salt.utils.json.dumps(copy_result)
++ with salt.utils.files.fopen(file_copy_file, "w") as fp:
++ fp.write(copy_result_output)
++ already_processed.append(state_name)
+
+ # turn gendir into tarball and remove gendir
+ try:
+diff --git a/salt/fileclient.py b/salt/fileclient.py
+index fef5154a0be..f01a86dd0d4 100644
+--- a/salt/fileclient.py
++++ b/salt/fileclient.py
+@@ -849,7 +849,6 @@ class Client:
+ kwargs.pop("env")
+
+ kwargs["saltenv"] = saltenv
+- url_data = urllib.parse.urlparse(url)
+ sfn = self.cache_file(url, saltenv, cachedir=cachedir)
+ if not sfn or not os.path.exists(sfn):
+ return ""
+@@ -1165,13 +1164,8 @@ class RemoteClient(Client):
+
+ if not salt.utils.platform.is_windows():
+ hash_server, stat_server = self.hash_and_stat_file(path, saltenv)
+- try:
+- mode_server = stat_server[0]
+- except (IndexError, TypeError):
+- mode_server = None
+ else:
+ hash_server = self.hash_file(path, saltenv)
+- mode_server = None
+
+ # Check if file exists on server, before creating files and
+ # directories
+@@ -1214,13 +1208,8 @@ class RemoteClient(Client):
+ if dest2check and os.path.isfile(dest2check):
+ if not salt.utils.platform.is_windows():
+ hash_local, stat_local = self.hash_and_stat_file(dest2check, saltenv)
+- try:
+- mode_local = stat_local[0]
+- except (IndexError, TypeError):
+- mode_local = None
+ else:
+ hash_local = self.hash_file(dest2check, saltenv)
+- mode_local = None
+
+ if hash_local == hash_server:
+ return dest2check
+diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
+index f7344b66ac6..69b722f0c95 100644
+--- a/salt/modules/dockermod.py
++++ b/salt/modules/dockermod.py
+@@ -6667,14 +6667,6 @@ def script_retcode(
+ )["retcode"]
+
+
+-def _mk_fileclient():
+- """
+- Create a file client and add it to the context.
+- """
+- if "cp.fileclient" not in __context__:
+- __context__["cp.fileclient"] = salt.fileclient.get_file_client(__opts__)
+-
+-
+ def _generate_tmp_path():
+ return os.path.join("/tmp", "salt.docker.{}".format(uuid.uuid4().hex[:6]))
+
+@@ -6688,11 +6680,10 @@ def _prepare_trans_tar(name, sls_opts, mods=None, pillar=None, extra_filerefs=""
+ # reuse it from salt.ssh, however this function should
+ # be somewhere else
+ refs = salt.client.ssh.state.lowstate_file_refs(chunks, extra_filerefs)
+- _mk_fileclient()
+- trans_tar = salt.client.ssh.state.prep_trans_tar(
+- __context__["cp.fileclient"], chunks, refs, pillar, name
+- )
+- return trans_tar
++ with salt.fileclient.get_file_client(__opts__) as fileclient:
++ return salt.client.ssh.state.prep_trans_tar(
++ fileclient, chunks, refs, pillar, name
++ )
+
+
+ def _compile_state(sls_opts, mods=None):
+diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py
+index 0dfab4cc579..26312b3bd53 100644
+--- a/salt/pillar/__init__.py
++++ b/salt/pillar/__init__.py
+@@ -9,7 +9,6 @@ import logging
+ import os
+ import sys
+ import traceback
+-import uuid
+
+ import salt.channel.client
+ import salt.ext.tornado.gen
+@@ -1351,6 +1350,11 @@ class Pillar:
+ if hasattr(self, "_closing") and self._closing:
+ return
+ self._closing = True
++ if self.client:
++ try:
++ self.client.destroy()
++ except AttributeError:
++ pass
+
+ # pylint: disable=W1701
+ def __del__(self):
+diff --git a/salt/states/ansiblegate.py b/salt/states/ansiblegate.py
+index 7fd4deb6c2a..9abd418c42c 100644
+--- a/salt/states/ansiblegate.py
++++ b/salt/states/ansiblegate.py
+@@ -32,12 +32,10 @@ state:
+ - state: installed
+
+ """
+-
+ import logging
+ import os
+ import sys
+
+-# Import salt modules
+ import salt.fileclient
+ import salt.utils.decorators.path
+ from salt.utils.decorators import depends
+@@ -108,13 +106,6 @@ def __virtual__():
+ return __virtualname__
+
+
+-def _client():
+- """
+- Get a fileclient
+- """
+- return salt.fileclient.get_file_client(__opts__)
+-
+-
+ def _changes(plays):
+ """
+ Find changes in ansible return data
+@@ -171,7 +162,7 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=
+ }
+ if git_repo:
+ if not isinstance(rundir, str) or not os.path.isdir(rundir):
+- with _client() as client:
++ with salt.fileclient.get_file_client(__opts__) as client:
+ rundir = client._extrn_path(git_repo, "base")
+ log.trace("rundir set to %s", rundir)
+ if not isinstance(git_kwargs, dict):
+diff --git a/salt/utils/asynchronous.py b/salt/utils/asynchronous.py
+index 2a858feee98..0c645bbc3bb 100644
+--- a/salt/utils/asynchronous.py
++++ b/salt/utils/asynchronous.py
+@@ -131,7 +131,7 @@ class SyncWrapper:
+ result = io_loop.run_sync(lambda: getattr(self.obj, key)(*args, **kwargs))
+ results.append(True)
+ results.append(result)
+- except Exception as exc: # pylint: disable=broad-except
++ except Exception: # pylint: disable=broad-except
+ results.append(False)
+ results.append(sys.exc_info())
+
+diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py
+index fcc5aec497e..a6a8a279605 100644
+--- a/salt/utils/jinja.py
++++ b/salt/utils/jinja.py
+@@ -58,19 +58,6 @@ class SaltCacheLoader(BaseLoader):
+ and only loaded once per loader instance.
+ """
+
+- _cached_pillar_client = None
+- _cached_client = None
+-
+- @classmethod
+- def shutdown(cls):
+- for attr in ("_cached_client", "_cached_pillar_client"):
+- client = getattr(cls, attr, None)
+- if client is not None:
+- # PillarClient and LocalClient objects do not have a destroy method
+- if hasattr(client, "destroy"):
+- client.destroy()
+- setattr(cls, attr, None)
+-
+ def __init__(
+ self,
+ opts,
+@@ -93,8 +80,7 @@ class SaltCacheLoader(BaseLoader):
+ log.debug("Jinja search path: %s", self.searchpath)
+ self.cached = []
+ self._file_client = _file_client
+- # Instantiate the fileclient
+- self.file_client()
++ self._close_file_client = _file_client is None
+
+ def file_client(self):
+ """
+@@ -108,18 +94,10 @@ class SaltCacheLoader(BaseLoader):
+ or not hasattr(self._file_client, "opts")
+ or self._file_client.opts["file_roots"] != self.opts["file_roots"]
+ ):
+- attr = "_cached_pillar_client" if self.pillar_rend else "_cached_client"
+- cached_client = getattr(self, attr, None)
+- if (
+- cached_client is None
+- or not hasattr(cached_client, "opts")
+- or cached_client.opts["file_roots"] != self.opts["file_roots"]
+- ):
+- cached_client = salt.fileclient.get_file_client(
+- self.opts, self.pillar_rend
+- )
+- setattr(SaltCacheLoader, attr, cached_client)
+- self._file_client = cached_client
++ self._file_client = salt.fileclient.get_file_client(
++ self.opts, self.pillar_rend
++ )
++ self._close_file_client = True
+ return self._file_client
+
+ def cache_file(self, template):
+@@ -221,6 +199,27 @@ class SaltCacheLoader(BaseLoader):
+ # there is no template file within searchpaths
+ raise TemplateNotFound(template)
+
++ def destroy(self):
++ if self._close_file_client is False:
++ return
++ if self._file_client is None:
++ return
++ file_client = self._file_client
++ self._file_client = None
++
++ try:
++ file_client.destroy()
++ except AttributeError:
++ # PillarClient and LocalClient objects do not have a destroy method
++ pass
++
++ def __enter__(self):
++ self.file_client()
++ return self
++
++ def __exit__(self, *args):
++ self.destroy()
++
+
+ class PrintableDict(OrderedDict):
+ """
+diff --git a/salt/utils/mako.py b/salt/utils/mako.py
+index 69618de9837..037d5d86deb 100644
+--- a/salt/utils/mako.py
++++ b/salt/utils/mako.py
+@@ -97,3 +97,10 @@ if HAS_MAKO:
+ self.cache[fpath] = self.file_client().get_file(
+ fpath, "", True, self.saltenv
+ )
++
++ def destroy(self):
++ if self.client:
++ try:
++ self.client.destroy()
++ except AttributeError:
++ pass
+diff --git a/salt/utils/templates.py b/salt/utils/templates.py
+index 4947b820a36..4a8adf2a14f 100644
+--- a/salt/utils/templates.py
++++ b/salt/utils/templates.py
+@@ -362,163 +362,169 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
+ elif tmplstr.endswith("\n"):
+ newline = "\n"
+
+- if not saltenv:
+- if tmplpath:
+- loader = jinja2.FileSystemLoader(os.path.dirname(tmplpath))
+- else:
+- loader = salt.utils.jinja.SaltCacheLoader(
+- opts,
+- saltenv,
+- pillar_rend=context.get("_pillar_rend", False),
+- _file_client=file_client,
+- )
++ try:
++ if not saltenv:
++ if tmplpath:
++ loader = jinja2.FileSystemLoader(os.path.dirname(tmplpath))
++ else:
++ loader = salt.utils.jinja.SaltCacheLoader(
++ opts,
++ saltenv,
++ pillar_rend=context.get("_pillar_rend", False),
++ _file_client=file_client,
++ )
+
+- env_args = {"extensions": [], "loader": loader}
+-
+- if hasattr(jinja2.ext, "with_"):
+- env_args["extensions"].append("jinja2.ext.with_")
+- if hasattr(jinja2.ext, "do"):
+- env_args["extensions"].append("jinja2.ext.do")
+- if hasattr(jinja2.ext, "loopcontrols"):
+- env_args["extensions"].append("jinja2.ext.loopcontrols")
+- env_args["extensions"].append(salt.utils.jinja.SerializerExtension)
+-
+- opt_jinja_env = opts.get("jinja_env", {})
+- opt_jinja_sls_env = opts.get("jinja_sls_env", {})
+-
+- opt_jinja_env = opt_jinja_env if isinstance(opt_jinja_env, dict) else {}
+- opt_jinja_sls_env = opt_jinja_sls_env if isinstance(opt_jinja_sls_env, dict) else {}
+-
+- # Pass through trim_blocks and lstrip_blocks Jinja parameters
+- # trim_blocks removes newlines around Jinja blocks
+- # lstrip_blocks strips tabs and spaces from the beginning of
+- # line to the start of a block.
+- if opts.get("jinja_trim_blocks", False):
+- log.debug("Jinja2 trim_blocks is enabled")
+- log.warning(
+- "jinja_trim_blocks is deprecated and will be removed in a future release,"
+- " please use jinja_env and/or jinja_sls_env instead"
+- )
+- opt_jinja_env["trim_blocks"] = True
+- opt_jinja_sls_env["trim_blocks"] = True
+- if opts.get("jinja_lstrip_blocks", False):
+- log.debug("Jinja2 lstrip_blocks is enabled")
+- log.warning(
+- "jinja_lstrip_blocks is deprecated and will be removed in a future release,"
+- " please use jinja_env and/or jinja_sls_env instead"
+- )
+- opt_jinja_env["lstrip_blocks"] = True
+- opt_jinja_sls_env["lstrip_blocks"] = True
+-
+- def opt_jinja_env_helper(opts, optname):
+- for k, v in opts.items():
+- k = k.lower()
+- if hasattr(jinja2.defaults, k.upper()):
+- log.debug("Jinja2 environment %s was set to %s by %s", k, v, optname)
+- env_args[k] = v
+- else:
+- log.warning("Jinja2 environment %s is not recognized", k)
++ env_args = {"extensions": [], "loader": loader}
+
+- if "sls" in context and context["sls"] != "":
+- opt_jinja_env_helper(opt_jinja_sls_env, "jinja_sls_env")
+- else:
+- opt_jinja_env_helper(opt_jinja_env, "jinja_env")
++ if hasattr(jinja2.ext, "with_"):
++ env_args["extensions"].append("jinja2.ext.with_")
++ if hasattr(jinja2.ext, "do"):
++ env_args["extensions"].append("jinja2.ext.do")
++ if hasattr(jinja2.ext, "loopcontrols"):
++ env_args["extensions"].append("jinja2.ext.loopcontrols")
++ env_args["extensions"].append(salt.utils.jinja.SerializerExtension)
+
+- if opts.get("allow_undefined", False):
+- jinja_env = jinja2.sandbox.SandboxedEnvironment(**env_args)
+- else:
+- jinja_env = jinja2.sandbox.SandboxedEnvironment(
+- undefined=jinja2.StrictUndefined, **env_args
+- )
++ opt_jinja_env = opts.get("jinja_env", {})
++ opt_jinja_sls_env = opts.get("jinja_sls_env", {})
+
+- indent_filter = jinja_env.filters.get("indent")
+- jinja_env.tests.update(JinjaTest.salt_jinja_tests)
+- jinja_env.filters.update(JinjaFilter.salt_jinja_filters)
+- if salt.utils.jinja.JINJA_VERSION >= Version("2.11"):
+- # Use the existing indent filter on Jinja versions where it's not broken
+- jinja_env.filters["indent"] = indent_filter
+- jinja_env.globals.update(JinjaGlobal.salt_jinja_globals)
+-
+- # globals
+- jinja_env.globals["odict"] = OrderedDict
+- jinja_env.globals["show_full_context"] = salt.utils.jinja.show_full_context
+-
+- jinja_env.tests["list"] = salt.utils.data.is_list
+-
+- decoded_context = {}
+- for key, value in context.items():
+- if not isinstance(value, str):
+- if isinstance(value, NamedLoaderContext):
+- decoded_context[key] = value.value()
+- else:
+- decoded_context[key] = value
+- continue
++ opt_jinja_env = opt_jinja_env if isinstance(opt_jinja_env, dict) else {}
++ opt_jinja_sls_env = (
++ opt_jinja_sls_env if isinstance(opt_jinja_sls_env, dict) else {}
++ )
+
+- try:
+- decoded_context[key] = salt.utils.stringutils.to_unicode(
+- value, encoding=SLS_ENCODING
++ # Pass through trim_blocks and lstrip_blocks Jinja parameters
++ # trim_blocks removes newlines around Jinja blocks
++ # lstrip_blocks strips tabs and spaces from the beginning of
++ # line to the start of a block.
++ if opts.get("jinja_trim_blocks", False):
++ log.debug("Jinja2 trim_blocks is enabled")
++ log.warning(
++ "jinja_trim_blocks is deprecated and will be removed in a future release,"
++ " please use jinja_env and/or jinja_sls_env instead"
+ )
+- except UnicodeDecodeError as ex:
+- log.debug(
+- "Failed to decode using default encoding (%s), trying system encoding",
+- SLS_ENCODING,
++ opt_jinja_env["trim_blocks"] = True
++ opt_jinja_sls_env["trim_blocks"] = True
++ if opts.get("jinja_lstrip_blocks", False):
++ log.debug("Jinja2 lstrip_blocks is enabled")
++ log.warning(
++ "jinja_lstrip_blocks is deprecated and will be removed in a future release,"
++ " please use jinja_env and/or jinja_sls_env instead"
+ )
+- decoded_context[key] = salt.utils.data.decode(value)
++ opt_jinja_env["lstrip_blocks"] = True
++ opt_jinja_sls_env["lstrip_blocks"] = True
++
++ def opt_jinja_env_helper(opts, optname):
++ for k, v in opts.items():
++ k = k.lower()
++ if hasattr(jinja2.defaults, k.upper()):
++ log.debug(
++ "Jinja2 environment %s was set to %s by %s", k, v, optname
++ )
++ env_args[k] = v
++ else:
++ log.warning("Jinja2 environment %s is not recognized", k)
+
+- jinja_env.globals.update(decoded_context)
+- try:
+- template = jinja_env.from_string(tmplstr)
+- output = template.render(**decoded_context)
+- except jinja2.exceptions.UndefinedError as exc:
+- trace = traceback.extract_tb(sys.exc_info()[2])
+- line, out = _get_jinja_error(trace, context=decoded_context)
+- if not line:
+- tmplstr = ""
+- raise SaltRenderError("Jinja variable {}{}".format(exc, out), line, tmplstr)
+- except (
+- jinja2.exceptions.TemplateRuntimeError,
+- jinja2.exceptions.TemplateSyntaxError,
+- jinja2.exceptions.SecurityError,
+- ) as exc:
+- trace = traceback.extract_tb(sys.exc_info()[2])
+- line, out = _get_jinja_error(trace, context=decoded_context)
+- if not line:
+- tmplstr = ""
+- raise SaltRenderError(
+- "Jinja syntax error: {}{}".format(exc, out), line, tmplstr
+- )
+- except (SaltInvocationError, CommandExecutionError) as exc:
+- trace = traceback.extract_tb(sys.exc_info()[2])
+- line, out = _get_jinja_error(trace, context=decoded_context)
+- if not line:
+- tmplstr = ""
+- raise SaltRenderError(
+- "Problem running salt function in Jinja template: {}{}".format(exc, out),
+- line,
+- tmplstr,
+- )
+- except Exception as exc: # pylint: disable=broad-except
+- tracestr = traceback.format_exc()
+- trace = traceback.extract_tb(sys.exc_info()[2])
+- line, out = _get_jinja_error(trace, context=decoded_context)
+- if not line:
+- tmplstr = ""
++ if "sls" in context and context["sls"] != "":
++ opt_jinja_env_helper(opt_jinja_sls_env, "jinja_sls_env")
+ else:
+- tmplstr += "\n{}".format(tracestr)
+- log.debug("Jinja Error")
+- log.debug("Exception:", exc_info=True)
+- log.debug("Out: %s", out)
+- log.debug("Line: %s", line)
+- log.debug("TmplStr: %s", tmplstr)
+- log.debug("TraceStr: %s", tracestr)
++ opt_jinja_env_helper(opt_jinja_env, "jinja_env")
+
+- raise SaltRenderError(
+- "Jinja error: {}{}".format(exc, out), line, tmplstr, trace=tracestr
+- )
++ if opts.get("allow_undefined", False):
++ jinja_env = jinja2.sandbox.SandboxedEnvironment(**env_args)
++ else:
++ jinja_env = jinja2.sandbox.SandboxedEnvironment(
++ undefined=jinja2.StrictUndefined, **env_args
++ )
++
++ indent_filter = jinja_env.filters.get("indent")
++ jinja_env.tests.update(JinjaTest.salt_jinja_tests)
++ jinja_env.filters.update(JinjaFilter.salt_jinja_filters)
++ if salt.utils.jinja.JINJA_VERSION >= Version("2.11"):
++ # Use the existing indent filter on Jinja versions where it's not broken
++ jinja_env.filters["indent"] = indent_filter
++ jinja_env.globals.update(JinjaGlobal.salt_jinja_globals)
++
++ # globals
++ jinja_env.globals["odict"] = OrderedDict
++ jinja_env.globals["show_full_context"] = salt.utils.jinja.show_full_context
++
++ jinja_env.tests["list"] = salt.utils.data.is_list
++
++ decoded_context = {}
++ for key, value in context.items():
++ if not isinstance(value, str):
++ if isinstance(value, NamedLoaderContext):
++ decoded_context[key] = value.value()
++ else:
++ decoded_context[key] = value
++ continue
++
++ try:
++ decoded_context[key] = salt.utils.stringutils.to_unicode(
++ value, encoding=SLS_ENCODING
++ )
++ except UnicodeDecodeError:
++ log.debug(
++ "Failed to decode using default encoding (%s), trying system encoding",
++ SLS_ENCODING,
++ )
++ decoded_context[key] = salt.utils.data.decode(value)
++
++ jinja_env.globals.update(decoded_context)
++ try:
++ template = jinja_env.from_string(tmplstr)
++ output = template.render(**decoded_context)
++ except jinja2.exceptions.UndefinedError as exc:
++ trace = traceback.extract_tb(sys.exc_info()[2])
++ line, out = _get_jinja_error(trace, context=decoded_context)
++ if not line:
++ tmplstr = ""
++ raise SaltRenderError("Jinja variable {}{}".format(exc, out), line, tmplstr)
++ except (
++ jinja2.exceptions.TemplateRuntimeError,
++ jinja2.exceptions.TemplateSyntaxError,
++ jinja2.exceptions.SecurityError,
++ ) as exc:
++ trace = traceback.extract_tb(sys.exc_info()[2])
++ line, out = _get_jinja_error(trace, context=decoded_context)
++ if not line:
++ tmplstr = ""
++ raise SaltRenderError(
++ "Jinja syntax error: {}{}".format(exc, out), line, tmplstr
++ )
++ except (SaltInvocationError, CommandExecutionError) as exc:
++ trace = traceback.extract_tb(sys.exc_info()[2])
++ line, out = _get_jinja_error(trace, context=decoded_context)
++ if not line:
++ tmplstr = ""
++ raise SaltRenderError(
++ "Problem running salt function in Jinja template: {}{}".format(
++ exc, out
++ ),
++ line,
++ tmplstr,
++ )
++ except Exception as exc: # pylint: disable=broad-except
++ tracestr = traceback.format_exc()
++ trace = traceback.extract_tb(sys.exc_info()[2])
++ line, out = _get_jinja_error(trace, context=decoded_context)
++ if not line:
++ tmplstr = ""
++ else:
++ tmplstr += "\n{}".format(tracestr)
++ log.debug("Jinja Error")
++ log.debug("Exception:", exc_info=True)
++ log.debug("Out: %s", out)
++ log.debug("Line: %s", line)
++ log.debug("TmplStr: %s", tmplstr)
++ log.debug("TraceStr: %s", tracestr)
++
++ raise SaltRenderError(
++ "Jinja error: {}{}".format(exc, out), line, tmplstr, trace=tracestr
++ )
+ finally:
+- if loader and hasattr(loader, "_file_client"):
+- if hasattr(loader._file_client, "destroy"):
+- loader._file_client.destroy()
++ if loader and isinstance(loader, salt.utils.jinja.SaltCacheLoader):
++ loader.destroy()
+
+ # Workaround a bug in Jinja that removes the final newline
+ # (https://github.com/mitsuhiko/jinja2/issues/75)
+@@ -569,9 +575,8 @@ def render_mako_tmpl(tmplstr, context, tmplpath=None):
+ except Exception: # pylint: disable=broad-except
+ raise SaltRenderError(mako.exceptions.text_error_template().render())
+ finally:
+- if lookup and hasattr(lookup, "_file_client"):
+- if hasattr(lookup._file_client, "destroy"):
+- lookup._file_client.destroy()
++ if lookup and isinstance(lookup, SaltMakoTemplateLookup):
++ lookup.destroy()
+
+
+ def render_wempy_tmpl(tmplstr, context, tmplpath=None):
+diff --git a/tests/pytests/integration/states/test_include.py b/tests/pytests/integration/states/test_include.py
+new file mode 100644
+index 00000000000..f814328c5e4
+--- /dev/null
++++ b/tests/pytests/integration/states/test_include.py
+@@ -0,0 +1,40 @@
++"""
++Integration tests for the jinja includes in states
++"""
++import logging
++
++import pytest
++
++log = logging.getLogger(__name__)
++
++
++@pytest.mark.slow_test
++def test_issue_64111(salt_master, salt_minion, salt_call_cli):
++ # This needs to be an integration test. A functional test does not trigger
++ # the issue fixed.
++
++ macros_jinja = """
++ {% macro a_jinja_macro(arg) -%}
++ {{ arg }}
++ {%- endmacro %}
++ """
++
++ init_sls = """
++ include:
++ - common.file1
++ """
++
++ file1_sls = """
++ {% from 'common/macros.jinja' import a_jinja_macro with context %}
++
++ a state id:
++ cmd.run:
++ - name: echo {{ a_jinja_macro("hello world") }}
++ """
++ tf = salt_master.state_tree.base.temp_file
++
++ with tf("common/macros.jinja", macros_jinja):
++ with tf("common/init.sls", init_sls):
++ with tf("common/file1.sls", file1_sls):
++ ret = salt_call_cli.run("state.apply", "common")
++ assert ret.returncode == 0
+diff --git a/tests/pytests/unit/utils/jinja/test_salt_cache_loader.py b/tests/pytests/unit/utils/jinja/test_salt_cache_loader.py
+index 38c5ce5b724..e0f5fa158ff 100644
+--- a/tests/pytests/unit/utils/jinja/test_salt_cache_loader.py
++++ b/tests/pytests/unit/utils/jinja/test_salt_cache_loader.py
+@@ -15,7 +15,7 @@ import salt.utils.json # pylint: disable=unused-import
+ import salt.utils.stringutils # pylint: disable=unused-import
+ import salt.utils.yaml # pylint: disable=unused-import
+ from salt.utils.jinja import SaltCacheLoader
+-from tests.support.mock import Mock, patch
++from tests.support.mock import Mock, call, patch
+
+
+ @pytest.fixture
+@@ -224,14 +224,45 @@ def test_file_client_kwarg(minion_opts, mock_file_client):
+ assert loader._file_client is mock_file_client
+
+
+-def test_cache_loader_shutdown(minion_opts, mock_file_client):
++def test_cache_loader_passed_file_client(minion_opts, mock_file_client):
+ """
+ The shudown method can be called without raising an exception when the
+ file_client does not have a destroy method
+ """
+- assert not hasattr(mock_file_client, "destroy")
+- mock_file_client.opts = minion_opts
+- loader = SaltCacheLoader(minion_opts, _file_client=mock_file_client)
+- assert loader._file_client is mock_file_client
+- # Shutdown method should not raise any exceptions
+- loader.shutdown()
++ # Test SaltCacheLoader creating and destroying the file client created
++ file_client = Mock()
++ with patch("salt.fileclient.get_file_client", return_value=file_client):
++ loader = SaltCacheLoader(minion_opts)
++ assert loader._file_client is None
++ with loader:
++ assert loader._file_client is file_client
++ assert loader._file_client is None
++ assert file_client.mock_calls == [call.destroy()]
++
++ # Test SaltCacheLoader reusing the file client passed
++ file_client = Mock()
++ file_client.opts = {"file_roots": minion_opts["file_roots"]}
++ with patch("salt.fileclient.get_file_client", return_value=Mock()):
++ loader = SaltCacheLoader(minion_opts, _file_client=file_client)
++ assert loader._file_client is file_client
++ with loader:
++ assert loader._file_client is file_client
++ assert loader._file_client is file_client
++ assert file_client.mock_calls == []
++
++ # Test SaltCacheLoader creating a client even though a file client was
++ # passed because the "file_roots" option is different, and, as such,
++ # the destroy method on the new file client is called, but not on the
++ # file client passed in.
++ file_client = Mock()
++ file_client.opts = {"file_roots": ""}
++ new_file_client = Mock()
++ with patch("salt.fileclient.get_file_client", return_value=new_file_client):
++ loader = SaltCacheLoader(minion_opts, _file_client=file_client)
++ assert loader._file_client is file_client
++ with loader:
++ assert loader._file_client is not file_client
++ assert loader._file_client is new_file_client
++ assert loader._file_client is None
++ assert file_client.mock_calls == []
++ assert new_file_client.mock_calls == [call.destroy()]
+--
+2.40.0
+
diff --git a/make-tests-compatible-with-venv-bundle.patch b/make-tests-compatible-with-venv-bundle.patch
new file mode 100644
index 0000000..10e4022
--- /dev/null
+++ b/make-tests-compatible-with-venv-bundle.patch
@@ -0,0 +1,883 @@
+From 25c3df7713bd2a19a0980358fa72c1c48a08a1f4 Mon Sep 17 00:00:00 2001
+From: Marek Czernek
+Date: Wed, 7 Aug 2024 10:28:07 +0200
+Subject: [PATCH] Make tests compatible with venv bundle
+
+Co-authored-by: cmcmarrow
+---
+ tests/pytests/functional/modules/test_sdb.py | 1 +
+ tests/pytests/functional/modules/test_yaml.py | 2 +-
+ .../rthooks/test_salt_utils_vt_terminal.py | 22 +++++--
+ .../pyinstaller/rthooks/test_subprocess.py | 22 +++++--
+ .../utils/yamllint/test_yamllint.py | 2 +-
+ tests/pytests/unit/modules/test_pip.py | 63 +++++++++++++------
+ .../unit/modules/test_transactional_update.py | 13 ++--
+ tests/pytests/unit/states/test_pkgrepo.py | 3 +-
+ tests/pytests/unit/test_fileserver.py | 8 +--
+ tests/pytests/unit/utils/test_gitfs.py | 18 ++++++
+ tests/pytests/unit/utils/test_msgpack.py | 2 +-
+ tests/pytests/unit/utils/test_pycrypto.py | 25 ++++----
+ tests/unit/test_config.py | 20 +++++-
+ tests/unit/utils/test_sdb.py | 2 +-
+ tests/unit/utils/test_templates.py | 34 ++++++++++
+ 15 files changed, 177 insertions(+), 60 deletions(-)
+
+diff --git a/tests/pytests/functional/modules/test_sdb.py b/tests/pytests/functional/modules/test_sdb.py
+index 5519bf8ab57..837e7515d30 100644
+--- a/tests/pytests/functional/modules/test_sdb.py
++++ b/tests/pytests/functional/modules/test_sdb.py
+@@ -16,6 +16,7 @@ def minion_config_overrides():
+ }
+
+
++@pytest.mark.skip("Great module migration")
+ @pytest.mark.parametrize(
+ "expected_value",
+ (
+diff --git a/tests/pytests/functional/modules/test_yaml.py b/tests/pytests/functional/modules/test_yaml.py
+index 2a8fbc113ff..9aad0dfdc8c 100644
+--- a/tests/pytests/functional/modules/test_yaml.py
++++ b/tests/pytests/functional/modules/test_yaml.py
+@@ -13,7 +13,7 @@ try:
+ import salt.modules.yaml
+ import salt.utils.yamllint
+
+- YAMLLINT_AVAILABLE = True
++ YAMLLINT_AVAILABLE = salt.utils.yamllint.has_yamllint()
+ except ImportError:
+ YAMLLINT_AVAILABLE = False
+
+diff --git a/tests/pytests/functional/utils/pyinstaller/rthooks/test_salt_utils_vt_terminal.py b/tests/pytests/functional/utils/pyinstaller/rthooks/test_salt_utils_vt_terminal.py
+index c45b5730a8e..ea687c0776d 100644
+--- a/tests/pytests/functional/utils/pyinstaller/rthooks/test_salt_utils_vt_terminal.py
++++ b/tests/pytests/functional/utils/pyinstaller/rthooks/test_salt_utils_vt_terminal.py
+@@ -8,6 +8,9 @@ import salt.utils.pyinstaller.rthooks._overrides as overrides
+ from tests.support import mock
+ from tests.support.helpers import PatchedEnviron
+
++LD_LIBRARY_PATH = ""
++if os.environ.get('VIRTUAL_ENV'):
++ LD_LIBRARY_PATH = f"{os.environ.get('VIRTUAL_ENV')}/lib"
+
+ @pytest.fixture(params=("LD_LIBRARY_PATH", "LIBPATH"))
+ def envvar(request):
+@@ -17,9 +20,14 @@ def envvar(request):
+ @pytest.fixture
+ def meipass(envvar):
+ with mock.patch("salt.utils.pyinstaller.rthooks._overrides.sys") as patched_sys:
+- patched_sys._MEIPASS = "{}_VALUE".format(envvar)
+- assert overrides.sys._MEIPASS == "{}_VALUE".format(envvar)
+- yield "{}_VALUE".format(envvar)
++ ld_path_mock_val = f"{envvar}_VALUE"
++ if envvar == "LD_LIBRARY_PATH" and LD_LIBRARY_PATH:
++ # venv-minion python wrapper hardcodes LD_LIB_PATH that
++ # we cannot overwrite from the testsuite
++ ld_path_mock_val = LD_LIBRARY_PATH
++ patched_sys._MEIPASS = ld_path_mock_val
++ assert overrides.sys._MEIPASS == ld_path_mock_val
++ yield ld_path_mock_val
+ assert not hasattr(sys, "_MEIPASS")
+ assert not hasattr(overrides.sys, "_MEIPASS")
+
+@@ -111,7 +119,8 @@ def test_vt_terminal_environ_cleanup(envvar, meipass):
+ returned_env = json.loads(buffer_o)
+ assert returned_env != original_env
+ assert envvar in returned_env
+- assert returned_env[envvar] == ""
++ envvar_value = LD_LIBRARY_PATH if envvar == "LD_LIBRARY_PATH" else ""
++ assert returned_env[envvar] == envvar_value
+
+
+ def test_vt_terminal_environ_cleanup_passed_directly_not_removed(envvar, meipass):
+@@ -139,4 +148,7 @@ def test_vt_terminal_environ_cleanup_passed_directly_not_removed(envvar, meipass
+ returned_env = json.loads(buffer_o)
+ assert returned_env != original_env
+ assert envvar in returned_env
+- assert returned_env[envvar] == envvar
++ envvar_val = envvar
++ if LD_LIBRARY_PATH and envvar == "LD_LIBRARY_PATH":
++ envvar_val = LD_LIBRARY_PATH
++ assert returned_env[envvar] == envvar_val
+diff --git a/tests/pytests/functional/utils/pyinstaller/rthooks/test_subprocess.py b/tests/pytests/functional/utils/pyinstaller/rthooks/test_subprocess.py
+index 836e392d016..e4b5420d5e3 100644
+--- a/tests/pytests/functional/utils/pyinstaller/rthooks/test_subprocess.py
++++ b/tests/pytests/functional/utils/pyinstaller/rthooks/test_subprocess.py
+@@ -9,6 +9,9 @@ import salt.utils.pyinstaller.rthooks._overrides as overrides
+ from tests.support import mock
+ from tests.support.helpers import PatchedEnviron
+
++LD_LIBRARY_PATH = ""
++if os.environ.get('VIRTUAL_ENV'):
++ LD_LIBRARY_PATH = f"{os.environ.get('VIRTUAL_ENV')}/lib"
+
+ @pytest.fixture(params=("LD_LIBRARY_PATH", "LIBPATH"))
+ def envvar(request):
+@@ -18,9 +21,14 @@ def envvar(request):
+ @pytest.fixture
+ def meipass(envvar):
+ with mock.patch("salt.utils.pyinstaller.rthooks._overrides.sys") as patched_sys:
+- patched_sys._MEIPASS = "{}_VALUE".format(envvar)
+- assert overrides.sys._MEIPASS == "{}_VALUE".format(envvar)
+- yield "{}_VALUE".format(envvar)
++ ld_path_mock_val = f"{envvar}_VALUE"
++ if envvar == "LD_LIBRARY_PATH" and LD_LIBRARY_PATH:
++ # venv-minion python wrapper hardcodes LD_LIB_PATH that
++ # we cannot overwrite from the testsuite
++ ld_path_mock_val = LD_LIBRARY_PATH
++ patched_sys._MEIPASS = ld_path_mock_val
++ assert overrides.sys._MEIPASS == ld_path_mock_val
++ yield ld_path_mock_val
+ assert not hasattr(sys, "_MEIPASS")
+ assert not hasattr(overrides.sys, "_MEIPASS")
+
+@@ -88,7 +96,8 @@ def test_subprocess_popen_environ_cleanup(envvar, meipass):
+ returned_env = json.loads(stdout)
+ assert returned_env != original_env
+ assert envvar in returned_env
+- assert returned_env[envvar] == ""
++ envvar_value = LD_LIBRARY_PATH if envvar == "LD_LIBRARY_PATH" else ""
++ assert returned_env[envvar] == envvar_value
+
+
+ def test_subprocess_popen_environ_cleanup_passed_directly_not_removed(envvar, meipass):
+@@ -108,4 +117,7 @@ def test_subprocess_popen_environ_cleanup_passed_directly_not_removed(envvar, me
+ returned_env = json.loads(stdout)
+ assert returned_env != original_env
+ assert envvar in returned_env
+- assert returned_env[envvar] == envvar
++ envvar_val = envvar
++ if LD_LIBRARY_PATH and envvar == "LD_LIBRARY_PATH":
++ envvar_val = LD_LIBRARY_PATH
++ assert returned_env[envvar] == envvar_val
+diff --git a/tests/pytests/functional/utils/yamllint/test_yamllint.py b/tests/pytests/functional/utils/yamllint/test_yamllint.py
+index 403c6fc610e..3c730523c4d 100644
+--- a/tests/pytests/functional/utils/yamllint/test_yamllint.py
++++ b/tests/pytests/functional/utils/yamllint/test_yamllint.py
+@@ -7,7 +7,7 @@ import salt.utils.versions as versions
+ try:
+ import salt.utils.yamllint as yamllint
+
+- YAMLLINT_AVAILABLE = True
++ YAMLLINT_AVAILABLE = yamllint.has_yamllint()
+ except ImportError:
+ YAMLLINT_AVAILABLE = False
+
+diff --git a/tests/pytests/unit/modules/test_pip.py b/tests/pytests/unit/modules/test_pip.py
+index 4b2da77786b..fbe0dc5f1cf 100644
+--- a/tests/pytests/unit/modules/test_pip.py
++++ b/tests/pytests/unit/modules/test_pip.py
+@@ -15,6 +15,10 @@ MISSING_SETUP_PY_FILE = not os.path.exists(
+ os.path.join(RUNTIME_VARS.CODE_DIR, "setup.py")
+ )
+
++TARGET = []
++if os.environ.get('VENV_PIP_TARGET'):
++ TARGET = ["--target", os.environ.get('VENV_PIP_TARGET')]
++
+
+ class FakeFopen:
+ def __init__(self, filename):
+@@ -102,6 +106,7 @@ def test_install_frozen_app(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ pkg,
+ ]
+ mock.assert_called_with(
+@@ -123,6 +128,7 @@ def test_install_source_app(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ pkg,
+ ]
+ mock.assert_called_with(
+@@ -143,6 +149,7 @@ def test_fix4361(python_binary):
+ "install",
+ "--requirement",
+ "requirements.txt",
++ *TARGET,
+ ]
+ mock.assert_called_with(
+ expected_cmd,
+@@ -169,7 +176,7 @@ def test_install_multiple_editable(python_binary):
+ "git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting",
+ ]
+
+- expected = [*python_binary, "install"]
++ expected = [*python_binary, "install", *TARGET]
+ for item in editables:
+ expected.extend(["--editable", item])
+
+@@ -205,7 +212,7 @@ def test_install_multiple_pkgs_and_editables(python_binary):
+ "git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting",
+ ]
+
+- expected = [*python_binary, "install"]
++ expected = [*python_binary, "install", *TARGET]
+ expected.extend(pkgs)
+ for item in editables:
+ expected.extend(["--editable", item])
+@@ -241,6 +248,7 @@ def test_install_multiple_pkgs_and_editables(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ pkgs[0],
+ "--editable",
+ editables[0],
+@@ -268,7 +276,7 @@ def test_issue5940_install_multiple_pip_mirrors(python_binary):
+ expected = [*python_binary, "install", "--use-mirrors"]
+ for item in mirrors:
+ expected.extend(["--mirrors", item])
+- expected.append("pep8")
++ expected = [*expected, *TARGET, "pep8"]
+
+ # Passing mirrors as a list
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+@@ -300,6 +308,7 @@ def test_issue5940_install_multiple_pip_mirrors(python_binary):
+ "--use-mirrors",
+ "--mirrors",
+ mirrors[0],
++ *TARGET,
+ "pep8",
+ ]
+
+@@ -327,7 +336,7 @@ def test_install_with_multiple_find_links(python_binary):
+ expected = [*python_binary, "install"]
+ for item in find_links:
+ expected.extend(["--find-links", item])
+- expected.append(pkg)
++ expected = [*expected, *TARGET, pkg]
+
+ # Passing mirrors as a list
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+@@ -370,6 +379,7 @@ def test_install_with_multiple_find_links(python_binary):
+ "install",
+ "--find-links",
+ find_links[0],
++ *TARGET,
+ pkg,
+ ]
+
+@@ -435,6 +445,7 @@ def test_install_cached_requirements_used(python_binary):
+ "install",
+ "--requirement",
+ "my_cached_reqs",
++ *TARGET,
+ ]
+ mock.assert_called_with(
+ expected,
+@@ -491,6 +502,7 @@ def test_install_log_argument_in_resulting_command(python_binary):
+ "install",
+ "--log",
+ log_path,
++ *TARGET,
+ pkg,
+ ]
+ mock.assert_called_with(
+@@ -521,7 +533,7 @@ def test_install_timeout_argument_in_resulting_command(python_binary):
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, timeout=10)
+ mock.assert_called_with(
+- expected + [10, pkg],
++ expected + [10, *TARGET, pkg],
+ saltenv="base",
+ runas=None,
+ use_vt=False,
+@@ -533,7 +545,7 @@ def test_install_timeout_argument_in_resulting_command(python_binary):
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, timeout="10")
+ mock.assert_called_with(
+- expected + ["10", pkg],
++ expected + ["10", *TARGET, pkg],
+ saltenv="base",
+ runas=None,
+ use_vt=False,
+@@ -557,6 +569,7 @@ def test_install_index_url_argument_in_resulting_command(python_binary):
+ "install",
+ "--index-url",
+ index_url,
++ *TARGET,
+ pkg,
+ ]
+ mock.assert_called_with(
+@@ -579,6 +592,7 @@ def test_install_extra_index_url_argument_in_resulting_command(python_binary):
+ "install",
+ "--extra-index-url",
+ extra_index_url,
++ *TARGET,
+ pkg,
+ ]
+ mock.assert_called_with(
+@@ -595,7 +609,7 @@ def test_install_no_index_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, no_index=True)
+- expected = [*python_binary, "install", "--no-index", pkg]
++ expected = [*python_binary, "install", "--no-index", *TARGET, pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -611,7 +625,7 @@ def test_install_build_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, build=build)
+- expected = [*python_binary, "install", "--build", build, pkg]
++ expected = [*python_binary, "install", "--build", build, *TARGET, pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -646,6 +660,7 @@ def test_install_download_argument_in_resulting_command(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ "--download",
+ download,
+ pkg,
+@@ -664,7 +679,7 @@ def test_install_no_download_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, no_download=True)
+- expected = [*python_binary, "install", "--no-download", pkg]
++ expected = [*python_binary, "install", *TARGET, "--no-download", pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -691,6 +706,7 @@ def test_install_download_cache_dir_arguments_in_resulting_command(python_binary
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ cmd_arg,
+ download_cache,
+ pkg,
+@@ -720,7 +736,7 @@ def test_install_source_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, source=source)
+- expected = [*python_binary, "install", "--source", source, pkg]
++ expected = [*python_binary, "install", *TARGET, "--source", source, pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -739,6 +755,7 @@ def test_install_exists_action_argument_in_resulting_command(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ "--exists-action",
+ action,
+ pkg,
+@@ -761,7 +778,7 @@ def test_install_install_options_argument_in_resulting_command(python_binary):
+ install_options = ["--exec-prefix=/foo/bar", "--install-scripts=/foo/bar/bin"]
+ pkg = "pep8"
+
+- expected = [*python_binary, "install"]
++ expected = [*python_binary, "install", *TARGET]
+ for item in install_options:
+ expected.extend(["--install-option", item])
+ expected.append(pkg)
+@@ -797,6 +814,7 @@ def test_install_install_options_argument_in_resulting_command(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ "--install-option",
+ install_options[0],
+ pkg,
+@@ -814,7 +832,7 @@ def test_install_global_options_argument_in_resulting_command(python_binary):
+ global_options = ["--quiet", "--no-user-cfg"]
+ pkg = "pep8"
+
+- expected = [*python_binary, "install"]
++ expected = [*python_binary, "install", *TARGET]
+ for item in global_options:
+ expected.extend(["--global-option", item])
+ expected.append(pkg)
+@@ -850,6 +868,7 @@ def test_install_global_options_argument_in_resulting_command(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ "--global-option",
+ global_options[0],
+ pkg,
+@@ -868,7 +887,7 @@ def test_install_upgrade_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, upgrade=True)
+- expected = [*python_binary, "install", "--upgrade", pkg]
++ expected = [*python_binary, "install", *TARGET, "--upgrade", pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -886,6 +905,7 @@ def test_install_force_reinstall_argument_in_resulting_command(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ "--force-reinstall",
+ pkg,
+ ]
+@@ -906,6 +926,7 @@ def test_install_ignore_installed_argument_in_resulting_command(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ "--ignore-installed",
+ pkg,
+ ]
+@@ -923,7 +944,7 @@ def test_install_no_deps_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, no_deps=True)
+- expected = [*python_binary, "install", "--no-deps", pkg]
++ expected = [*python_binary, "install", *TARGET, "--no-deps", pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -938,7 +959,7 @@ def test_install_no_install_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, no_install=True)
+- expected = [*python_binary, "install", "--no-install", pkg]
++ expected = [*python_binary, "install", *TARGET, "--no-install", pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -954,7 +975,7 @@ def test_install_proxy_argument_in_resulting_command(python_binary):
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ pip.install(pkg, proxy=proxy)
+- expected = [*python_binary, "install", "--proxy", proxy, pkg]
++ expected = [*python_binary, "install", "--proxy", proxy, *TARGET, pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -981,7 +1002,7 @@ def test_install_proxy_false_argument_in_resulting_command(python_binary):
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ with patch.dict(pip.__opts__, config_mock):
+ pip.install(pkg, proxy=proxy)
+- expected = [*python_binary, "install", pkg]
++ expected = [*python_binary, "install", *TARGET, pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -1012,6 +1033,7 @@ def test_install_global_proxy_in_resulting_command(python_binary):
+ "install",
+ "--proxy",
+ proxy,
++ *TARGET,
+ pkg,
+ ]
+ mock.assert_called_with(
+@@ -1032,6 +1054,7 @@ def test_install_multiple_requirements_arguments_in_resulting_command(python_bin
+ expected = [*python_binary, "install"]
+ for item in cached_reqs:
+ expected.extend(["--requirement", item])
++ expected.extend(TARGET)
+
+ # Passing option as a list
+ mock = MagicMock(return_value={"retcode": 0, "stdout": ""})
+@@ -1068,6 +1091,7 @@ def test_install_multiple_requirements_arguments_in_resulting_command(python_bin
+ "install",
+ "--requirement",
+ cached_reqs[0],
++ *TARGET,
+ ]
+ mock.assert_called_with(
+ expected,
+@@ -1088,6 +1112,7 @@ def test_install_extra_args_arguments_in_resulting_command(python_binary):
+ expected = [
+ *python_binary,
+ "install",
++ *TARGET,
+ pkg,
+ "--latest-pip-kwarg",
+ "param",
+@@ -1604,7 +1629,7 @@ def test_install_pre_argument_in_resulting_command(python_binary):
+ with patch.dict(pip.__salt__, {"cmd.run_all": mock}):
+ with patch("salt.modules.pip.version", MagicMock(return_value="1.3")):
+ pip.install(pkg, pre_releases=True)
+- expected = [*python_binary, "install", pkg]
++ expected = [*python_binary, "install", *TARGET, pkg]
+ mock.assert_called_with(
+ expected,
+ saltenv="base",
+@@ -1620,7 +1645,7 @@ def test_install_pre_argument_in_resulting_command(python_binary):
+ ):
+ with patch("salt.modules.pip._get_pip_bin", MagicMock(return_value=["pip"])):
+ pip.install(pkg, pre_releases=True)
+- expected = ["pip", "install", "--pre", pkg]
++ expected = ["pip", "install", *TARGET, "--pre", pkg]
+ mock_run_all.assert_called_with(
+ expected,
+ saltenv="base",
+diff --git a/tests/pytests/unit/modules/test_transactional_update.py b/tests/pytests/unit/modules/test_transactional_update.py
+index dbd72fd74bf..e0ef2abd0f3 100644
+--- a/tests/pytests/unit/modules/test_transactional_update.py
++++ b/tests/pytests/unit/modules/test_transactional_update.py
+@@ -1,3 +1,4 @@
++import os
+ import pytest
+
+ import salt.loader.context
+@@ -10,6 +11,10 @@ pytestmark = [
+ pytest.mark.skip_on_windows(reason="Not supported on Windows"),
+ ]
+
++SALT_CALL_BINARY = "salt-call"
++if os.environ.get('VIRTUAL_ENV'):
++ SALT_CALL_BINARY = f"{os.environ.get('VIRTUAL_ENV')}/bin/salt-call"
++
+
+ @pytest.fixture
+ def configure_loader_modules():
+@@ -379,7 +384,7 @@ def test_call_fails_function():
+ "--continue",
+ "--quiet",
+ "run",
+- "salt-call",
++ SALT_CALL_BINARY,
+ "--out",
+ "json",
+ "-l",
+@@ -411,7 +416,7 @@ def test_call_success_no_reboot():
+ "--continue",
+ "--quiet",
+ "run",
+- "salt-call",
++ SALT_CALL_BINARY,
+ "--out",
+ "json",
+ "-l",
+@@ -454,7 +459,7 @@ def test_call_success_reboot():
+ "--continue",
+ "--quiet",
+ "run",
+- "salt-call",
++ SALT_CALL_BINARY,
+ "--out",
+ "json",
+ "-l",
+@@ -488,7 +493,7 @@ def test_call_success_parameters():
+ "--continue",
+ "--quiet",
+ "run",
+- "salt-call",
++ SALT_CALL_BINARY,
+ "--out",
+ "json",
+ "-l",
+diff --git a/tests/pytests/unit/states/test_pkgrepo.py b/tests/pytests/unit/states/test_pkgrepo.py
+index 5f540bd2454..14d17ad3f9f 100644
+--- a/tests/pytests/unit/states/test_pkgrepo.py
++++ b/tests/pytests/unit/states/test_pkgrepo.py
+@@ -1,7 +1,6 @@
+ """
+ :codeauthor: Tyler Johnson
+ """
+-
+ import pytest
+
+ import salt.states.pkgrepo as pkgrepo
+@@ -390,7 +389,7 @@ def test_migrated_wrong_method():
+ with patch.dict(pkgrepo.__grains__, grains), patch.dict(
+ pkgrepo.__salt__, salt_mock
+ ):
+- assert pkgrepo.migrated("/mnt", method_="magic") == {
++ assert pkgrepo.migrated("/mnt", method="magic") == {
+ "name": "/mnt",
+ "result": False,
+ "changes": {},
+diff --git a/tests/pytests/unit/test_fileserver.py b/tests/pytests/unit/test_fileserver.py
+index 8dd3ea0a27d..49be3967dc4 100644
+--- a/tests/pytests/unit/test_fileserver.py
++++ b/tests/pytests/unit/test_fileserver.py
+@@ -75,9 +75,7 @@ def test_file_server_url_escape(tmp_path):
+ opts = {
+ "fileserver_backend": ["roots"],
+ "extension_modules": "",
+- "optimization_order": [
+- 0,
+- ],
++ "optimization_order": [0, 1],
+ "file_roots": {
+ "base": [fileroot],
+ },
+@@ -102,9 +100,7 @@ def test_file_server_serve_url_escape(tmp_path):
+ opts = {
+ "fileserver_backend": ["roots"],
+ "extension_modules": "",
+- "optimization_order": [
+- 0,
+- ],
++ "optimization_order": [0, 1],
+ "file_roots": {
+ "base": [fileroot],
+ },
+diff --git a/tests/pytests/unit/utils/test_gitfs.py b/tests/pytests/unit/utils/test_gitfs.py
+index 2bf627049f9..bd7d74cb2b2 100644
+--- a/tests/pytests/unit/utils/test_gitfs.py
++++ b/tests/pytests/unit/utils/test_gitfs.py
+@@ -3,6 +3,7 @@ import time
+
+ import pytest
+
++import salt.config
+ import salt.fileserver.gitfs
+ import salt.utils.gitfs
+ from salt.exceptions import FileserverConfigError
+@@ -24,6 +25,23 @@ if HAS_PYGIT2:
+ import pygit2
+
+
++@pytest.fixture
++def minion_opts(tmp_path):
++ """
++ Default minion configuration with relative temporary paths to not require root permissions.
++ """
++ root_dir = tmp_path / "minion"
++ opts = salt.config.DEFAULT_MINION_OPTS.copy()
++ opts["__role"] = "minion"
++ opts["root_dir"] = str(root_dir)
++ for name in ("cachedir", "pki_dir", "sock_dir", "conf_dir"):
++ dirpath = root_dir / name
++ dirpath.mkdir(parents=True)
++ opts[name] = str(dirpath)
++ opts["log_file"] = "logs/minion.log"
++ return opts
++
++
+ @pytest.mark.parametrize(
+ "role_name,role_class",
+ (
+diff --git a/tests/pytests/unit/utils/test_msgpack.py b/tests/pytests/unit/utils/test_msgpack.py
+index a09b6e5b8b1..3d0b9d7fc8c 100644
+--- a/tests/pytests/unit/utils/test_msgpack.py
++++ b/tests/pytests/unit/utils/test_msgpack.py
+@@ -3,7 +3,7 @@ import pytest
+ import salt.utils.msgpack
+ from tests.support.mock import MagicMock, patch
+
+-
++@pytest.mark.skipif(salt.utils.msgpack.version < (1, 0, 0), reason="Test requires msgpack version >= 1.0.0")
+ def test_load_encoding(tmp_path):
+ """
+ test when using msgpack version >= 1.0.0 we
+diff --git a/tests/pytests/unit/utils/test_pycrypto.py b/tests/pytests/unit/utils/test_pycrypto.py
+index 693ad10e240..9e0b58d1b35 100644
+--- a/tests/pytests/unit/utils/test_pycrypto.py
++++ b/tests/pytests/unit/utils/test_pycrypto.py
+@@ -57,21 +57,20 @@ def test_gen_hash_crypt(algorithm, expected):
+ """
+ Test gen_hash with crypt library
+ """
+- with patch("salt.utils.pycrypto.methods", {}):
+- ret = salt.utils.pycrypto.gen_hash(
+- crypt_salt=expected["salt"], password=passwd, algorithm=algorithm
+- )
+- assert ret == expected["hashed"]
++ ret = salt.utils.pycrypto.gen_hash(
++ crypt_salt=expected["salt"], password=passwd, algorithm=algorithm
++ )
++ assert ret == expected["hashed"]
+
+- ret = salt.utils.pycrypto.gen_hash(
+- crypt_salt=expected["badsalt"], password=passwd, algorithm=algorithm
+- )
+- assert ret != expected["hashed"]
++ ret = salt.utils.pycrypto.gen_hash(
++ crypt_salt=expected["badsalt"], password=passwd, algorithm=algorithm
++ )
++ assert ret != expected["hashed"]
+
+- ret = salt.utils.pycrypto.gen_hash(
+- crypt_salt=None, password=passwd, algorithm=algorithm
+- )
+- assert ret != expected["hashed"]
++ ret = salt.utils.pycrypto.gen_hash(
++ crypt_salt=None, password=passwd, algorithm=algorithm
++ )
++ assert ret != expected["hashed"]
+
+
+ @pytest.mark.skipif(not salt.utils.pycrypto.HAS_CRYPT, reason="crypt not available")
+diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py
+index 5cc58c273d0..6995b01c892 100644
+--- a/tests/unit/test_config.py
++++ b/tests/unit/test_config.py
+@@ -83,9 +83,12 @@ class SampleConfTest(DefaultConfigsBase, TestCase):
+ """
+ master_config = SAMPLE_CONF_DIR + "master"
+ ret = salt.config._read_conf_file(master_config)
++ # openSUSE modified the default config in
++ # https://github.com/opensuse/salt/commit/6ffbf7fcc178f32c670b177b25ed64658c59f1bf
++ expected_config = {"user": "salt", "syndic_user": "salt"}
+ self.assertEqual(
+ ret,
+- {},
++ expected_config,
+ "Sample config file '{}' must be commented out.".format(master_config),
+ )
+
+@@ -347,7 +350,10 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
+
+ with patched_environ(SALT_MINION_CONFIG=env_fpath):
+ # Should load from env variable, not the default configuration file
+- config = salt.config.minion_config("{}/minion".format(CONFIG_DIR))
++ # Override defaults from venv-minion conf
++ defaults = salt.config.DEFAULT_MINION_OPTS.copy()
++ defaults["default_include"] = ""
++ config = salt.config.minion_config("{}/minion".format(CONFIG_DIR), defaults=defaults)
+ self.assertEqual(config["log_file"], env_fpath)
+
+ root_dir = os.path.join(tempdir, "foo", "bar")
+@@ -1946,6 +1952,11 @@ class APIConfigTestCase(DefaultConfigsBase, TestCase):
+ if salt.utils.platform.is_windows():
+ expected = "{}\\var\\log\\salt\\api".format(RUNTIME_VARS.TMP_ROOT_DIR)
+
++ if os.environ.get("VIRTUAL_ENV"):
++ # venv bundle configures --salt-logs-dir=%{_localstatedir}/log
++ # in the RPM spec file
++ expected = expected.replace("/salt/api", "/api")
++
+ ret = salt.config.api_config("/some/fake/path")
+ self.assertEqual(ret["log_file"], expected)
+
+@@ -2017,6 +2028,11 @@ class APIConfigTestCase(DefaultConfigsBase, TestCase):
+ mock_pid = "c:\\mock\\root\\var\\run\\salt-api.pid"
+ mock_master_config["root_dir"] = "c:\\mock\\root"
+
++ if os.environ.get("VIRTUAL_ENV"):
++ # venv bundle configures --salt-logs-dir=%{_localstatedir}/log
++ # in the RPM spec file
++ mock_log = mock_log.replace("/salt", "")
++
+ with patch(
+ "salt.config.client_config", MagicMock(return_value=mock_master_config)
+ ):
+diff --git a/tests/unit/utils/test_sdb.py b/tests/unit/utils/test_sdb.py
+index 87886cbc521..69cbda07beb 100644
+--- a/tests/unit/utils/test_sdb.py
++++ b/tests/unit/utils/test_sdb.py
+@@ -49,7 +49,7 @@ class SdbTestCase(TestCase, LoaderModuleMockMixin):
+ # test with SQLite database write and read
+
+ def test_sqlite_get_found(self):
+- expected = {b"name": b"testone", b"number": 46}
++ expected = {"name": "testone", "number": 46}
+ sdb.sdb_set("sdb://test_sdb_data/test1", expected, self.sdb_opts)
+ resp = sdb.sdb_get("sdb://test_sdb_data/test1", self.sdb_opts)
+ self.assertEqual(resp, expected)
+diff --git a/tests/unit/utils/test_templates.py b/tests/unit/utils/test_templates.py
+index 264b4ae801d..604395f5e08 100644
+--- a/tests/unit/utils/test_templates.py
++++ b/tests/unit/utils/test_templates.py
+@@ -1,6 +1,7 @@
+ """
+ Unit tests for salt.utils.templates.py
+ """
++
+ import logging
+ import os
+ import sys
+@@ -22,6 +23,20 @@ try:
+ except ImportError:
+ HAS_CHEETAH = False
+
++try:
++ import genshi as _
++
++ HAS_GENSHI = True
++except ImportError:
++ HAS_GENSHI = False
++
++try:
++ import mako as _
++
++ HAS_MAKO = True
++except ImportError:
++ HAS_MAKO = False
++
+ log = logging.getLogger(__name__)
+
+
+@@ -83,16 +98,19 @@ class RenderTestCase(TestCase):
+ assert res == expected
+
+ ### Tests for mako template
++ @pytest.mark.skipif(not HAS_MAKO, reason="Mako module not available for testing")
+ def test_render_mako_sanity(self):
+ tmpl = """OK"""
+ res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
+ self.assertEqual(res, "OK")
+
++ @pytest.mark.skipif(not HAS_MAKO, reason="Mako module not available for testing")
+ def test_render_mako_evaluate(self):
+ tmpl = """${ "OK" }"""
+ res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
+ self.assertEqual(res, "OK")
+
++ @pytest.mark.skipif(not HAS_MAKO, reason="Mako module not available for testing")
+ def test_render_mako_evaluate_multi(self):
+ tmpl = """
+ % if 1:
+@@ -103,6 +121,7 @@ class RenderTestCase(TestCase):
+ stripped = res.strip()
+ self.assertEqual(stripped, "OK")
+
++ @pytest.mark.skipif(not HAS_MAKO, reason="Mako module not available for testing")
+ def test_render_mako_variable(self):
+ tmpl = """${ var }"""
+
+@@ -152,21 +171,33 @@ class RenderTestCase(TestCase):
+ self.assertEqual(res, "OK")
+
+ ### Tests for genshi template (xml-based)
++ @pytest.mark.skipif(
++ not HAS_GENSHI, reason="Genshi module not available for testing"
++ )
+ def test_render_genshi_sanity(self):
+ tmpl = """OK"""
+ res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
+ self.assertEqual(res, "OK")
+
++ @pytest.mark.skipif(
++ not HAS_GENSHI, reason="Genshi module not available for testing"
++ )
+ def test_render_genshi_evaluate(self):
+ tmpl = """${ "OK" }"""
+ res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
+ self.assertEqual(res, "OK")
+
++ @pytest.mark.skipif(
++ not HAS_GENSHI, reason="Genshi module not available for testing"
++ )
+ def test_render_genshi_evaluate_condition(self):
+ tmpl = """OK"""
+ res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
+ self.assertEqual(res, "OK")
+
++ @pytest.mark.skipif(
++ not HAS_GENSHI, reason="Genshi module not available for testing"
++ )
+ def test_render_genshi_variable(self):
+ tmpl = """$var"""
+
+@@ -175,6 +206,9 @@ class RenderTestCase(TestCase):
+ res = salt.utils.templates.render_genshi_tmpl(tmpl, ctx)
+ self.assertEqual(res, "OK")
+
++ @pytest.mark.skipif(
++ not HAS_GENSHI, reason="Genshi module not available for testing"
++ )
+ def test_render_genshi_variable_replace(self):
+ tmpl = """not ok"""
+
+--
+2.46.0
+
diff --git a/mark-salt-3006-as-released-586.patch b/mark-salt-3006-as-released-586.patch
new file mode 100644
index 0000000..c37ca75
--- /dev/null
+++ b/mark-salt-3006-as-released-586.patch
@@ -0,0 +1,480 @@
+From c1408333364ac25ff5d316afa9674f7687217b0c Mon Sep 17 00:00:00 2001
+From: Dominik Gedon
+Date: Thu, 3 Aug 2023 11:08:21 +0200
+Subject: [PATCH] Mark Salt 3006 as released (#586)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* Mark Salt 3006 as released
+
+Without this, commands like
+
+```
+salt '*' salt_version.equal 'Sulfur'
+```
+
+will not work properly and return False although Salt 3006 is used.
+
+Signed-off-by: Dominik Gedon
+
+* Fix detection of Salt codename by salt_version module
+
+* Fix mess with version detection bad version definition
+
+* Add some new and fix unit tests
+
+* Fix SaltStackVersion string for new versions format
+
+* Do not crash when passing numbers to 'salt_version.get_release_number'
+
+* Fix salt_version execution module documentation
+
+---------
+
+Signed-off-by: Dominik Gedon
+Co-authored-by: Pablo Suárez Hernández
+---
+ salt/modules/salt_version.py | 8 +-
+ salt/version.py | 218 +++++++++---------
+ .../pytests/unit/modules/test_salt_version.py | 55 ++++-
+ tests/pytests/unit/test_version.py | 10 +-
+ 4 files changed, 176 insertions(+), 115 deletions(-)
+
+diff --git a/salt/modules/salt_version.py b/salt/modules/salt_version.py
+index 1b5421fee4..99dae5f61a 100644
+--- a/salt/modules/salt_version.py
++++ b/salt/modules/salt_version.py
+@@ -20,7 +20,7 @@ A simple example might be something like the following:
+ .. code-block:: jinja
+
+ {# a boolean check #}
+- {% set option_deprecated = salt['salt_version.less_than']("3001") %}
++ {% set option_deprecated = salt['salt_version.less_than']("Sodium") %}
+
+ {% if option_deprecated %}
+