diff --git a/3002-set-distro-requirement-to-oldest-supported-vers.patch b/3002-set-distro-requirement-to-oldest-supported-vers.patch deleted file mode 100644 index 6177596..0000000 --- a/3002-set-distro-requirement-to-oldest-supported-vers.patch +++ /dev/null @@ -1,48 +0,0 @@ -From ffe924ef060a9b9540a4dcd117e045eaefa62513 Mon Sep 17 00:00:00 2001 -From: Alexander Graul -Date: Tue, 9 Mar 2021 13:46:03 +0100 -Subject: [PATCH] 3002: Set distro requirement to oldest supported - version (#327) - -In the released Salt packages, python3-distro is taken from the -officially repositories on supported operating systems. The oldest -supported python3-distro version is 1.0.1 in Ubuntu18.04 universe and -Debian 9. FreeBSD is an exception and requires 1.3.0. -The mismatch between the version specified in requirements/base.txt and -what is actually used by the released packages can be confusing. - -(cherry picked from commit 5c9c0ab9cdf2bf67bfdd259b53aa15297d1656ce) -(cherry picked from commit 0ff35358f79e9df8b06fb345fd79c1d22ed91179) - -Co-authored-by: Pedro Algarvio ---- - requirements/base.txt | 2 +- - requirements/static/pkg/freebsd.in | 1 + - 2 files changed, 2 insertions(+), 1 deletion(-) - -diff --git a/requirements/base.txt b/requirements/base.txt -index ffe4bc98f1..6af972bd1b 100644 ---- a/requirements/base.txt -+++ b/requirements/base.txt -@@ -3,7 +3,7 @@ msgpack>=0.5,!=0.5.5 - PyYAML - MarkupSafe - requests>=1.0.0 --distro>=1.5 -+distro>=1.0.1 - # Requirements for Tornado 4.5.3 (vendored as salt.ext.tornado) - singledispatch==3.4.0.3; python_version < '3.4' - # Required by Tornado to handle threads stuff. -diff --git a/requirements/static/pkg/freebsd.in b/requirements/static/pkg/freebsd.in -index 879a378822..7cfa3dcce8 100644 ---- a/requirements/static/pkg/freebsd.in -+++ b/requirements/static/pkg/freebsd.in -@@ -8,3 +8,4 @@ python-dateutil>=2.8.0 - python-gnupg>=0.4.4 - setproctitle>=1.1.10 - timelib>=0.2.5 -+distro>=1.3.0 --- -2.30.1 - - diff --git a/3003.3-postgresql-json-support-in-pillar-423.patch b/3003.3-postgresql-json-support-in-pillar-423.patch new file mode 100644 index 0000000..5d2fbbf --- /dev/null +++ b/3003.3-postgresql-json-support-in-pillar-423.patch @@ -0,0 +1,1008 @@ +From 6ffe3270d58527576688011e2b3bd826ec3941ee Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Fri, 24 Sep 2021 17:01:34 +0200 +Subject: [PATCH] 3003.3 - postgresql JSON support in pillar (#423) + +* Allow single field returns from SQL pillar + +Several SQL databases support native JSON storage. When storing +pillars in this way, SQL query result already returns dict and +without the need to have key column. + +* Add and adapt tests for as_json sql mode + +* Add missing entries to rest of sql pillar tests + +* Add changelog entry + +* Fix the sql_base pillar merge for as_json + +Use salt.utils.update() to recursively merge the JSON dicts of the +returned SQL queries. + +Co-authored-by: Ondrej Holecek +--- + changelog/60905.added | 1 + + salt/pillar/sql_base.py | 38 +++++++++++++++ + tests/pytests/unit/pillar/test_sql_base.py | 43 ++++++++++++++++ + tests/unit/pillar/test_mysql.py | 57 ++++++++++++++++++++++ + tests/unit/pillar/test_sqlcipher.py | 32 ++++++++++++ + tests/unit/pillar/test_sqlite3.py | 32 ++++++++++++ + 6 files changed, 203 insertions(+) + create mode 100644 changelog/60905.added + create mode 100644 tests/pytests/unit/pillar/test_sql_base.py + +diff --git a/changelog/60905.added b/changelog/60905.added +new file mode 100644 +index 0000000000..3fe39286a8 +--- /dev/null ++++ b/changelog/60905.added +@@ -0,0 +1 @@ ++Support querying for JSON data in SQL external pillar +diff --git a/salt/pillar/sql_base.py b/salt/pillar/sql_base.py +index 976ca8c0d8..9d9f0c9c9f 100644 +--- a/salt/pillar/sql_base.py ++++ b/salt/pillar/sql_base.py +@@ -137,6 +137,33 @@ These columns define list grouping + The range for with_lists is 1 to number_of_fields, inclusive. + Numbers outside this range are ignored. + ++If you specify `as_json: True` in the mapping expression and query only for ++single value, returned data are considered in JSON format and will be merged ++directly. ++ ++.. code-block:: yaml ++ ++ ext_pillar: ++ - sql_base: ++ - query: "SELECT json_pillar FROM pillars WHERE minion_id = %s" ++ as_json: True ++ ++The processed JSON entries are recursively merged in a single dictionary. ++Additionnaly if `as_list` is set to `True` the lists will be merged in case of collision. ++ ++For instance the following rows: ++ ++ {"a": {"b": [1, 2]}, "c": 3} ++ {"a": {"b": [1, 3]}, "d": 4} ++ ++will result in the following pillar with `as_list=False` ++ ++ {"a": {"b": [1, 3], "c": 3, "d": 4} ++ ++and in with `as_list=True` ++ ++ {"a": {"b": [1, 2, 3], "c": 3, "d": 4} ++ + Finally, if you pass the queries in via a mapping, the key will be the + first level name where as passing them in as a list will place them in the + root. This isolates the query results into their own subtrees. +@@ -179,6 +206,7 @@ from salt.ext import six + from salt.ext.six.moves import range + + # Import Salt libs ++from salt.utils.dictupdate import update + from salt.utils.odict import OrderedDict + + # Please don't strip redundant parentheses from this file. +@@ -208,6 +236,7 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)): + num_fields = 0 + depth = 0 + as_list = False ++ as_json = False + with_lists = None + ignore_null = False + +@@ -267,6 +296,7 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)): + "query": "", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + } +@@ -324,6 +354,13 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)): + for ret in rows: + # crd is the Current Return Data level, to make this non-recursive. + crd = self.focus ++ ++ # We have just one field without any key, assume returned row is already a dict ++ # aka JSON storage ++ if self.as_json and self.num_fields == 1: ++ crd = update(crd, ret[0], merge_lists=self.as_list) ++ continue ++ + # Walk and create dicts above the final layer + for i in range(0, self.depth - 1): + # At the end we'll use listify to find values to make a list of +@@ -443,6 +480,7 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)): + ) + self.enter_root(root) + self.as_list = details["as_list"] ++ self.as_json = details["as_json"] + if details["with_lists"]: + self.with_lists = details["with_lists"] + else: +diff --git a/tests/pytests/unit/pillar/test_sql_base.py b/tests/pytests/unit/pillar/test_sql_base.py +new file mode 100644 +index 0000000000..0d44c2d608 +--- /dev/null ++++ b/tests/pytests/unit/pillar/test_sql_base.py +@@ -0,0 +1,43 @@ ++import pytest ++import salt.pillar.sql_base as sql_base ++from tests.support.mock import MagicMock ++ ++ ++class FakeExtPillar(sql_base.SqlBaseExtPillar): ++ """ ++ Mock SqlBaseExtPillar implementation for testing purpose ++ """ ++ ++ @classmethod ++ def _db_name(cls): ++ return "fake" ++ ++ def _get_cursor(self): ++ return MagicMock() ++ ++ ++@pytest.mark.parametrize("as_list", [True, False]) ++def test_process_results_as_json(as_list): ++ """ ++ Validates merging of dict values returned from JSON datatype. ++ """ ++ return_data = FakeExtPillar() ++ return_data.as_list = as_list ++ return_data.as_json = True ++ return_data.with_lists = None ++ return_data.enter_root(None) ++ return_data.process_fields(["json_data"], 0) ++ test_dicts = [ ++ ({"a": [1]},), ++ ({"b": [2, 3]},), ++ ({"a": [4]},), ++ ({"c": {"d": [4, 5], "e": 6}},), ++ ({"f": [{"g": 7, "h": "test"}], "c": {"g": 8}},), ++ ] ++ return_data.process_results(test_dicts) ++ assert return_data.result == { ++ "a": [1, 4] if as_list else [4], ++ "b": [2, 3], ++ "c": {"d": [4, 5], "e": 6, "g": 8}, ++ "f": [{"g": 7, "h": "test"}], ++ } +diff --git a/tests/unit/pillar/test_mysql.py b/tests/unit/pillar/test_mysql.py +index bc81eb4174..9db724329d 100644 +--- a/tests/unit/pillar/test_mysql.py ++++ b/tests/unit/pillar/test_mysql.py +@@ -26,6 +26,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -47,6 +48,7 @@ class MysqlPillarTestCase(TestCase): + {"query": "SELECT blah7", "as_list": True}, + {"query": "SELECT blah8", "with_lists": "1"}, + {"query": "SELECT blah9", "with_lists": "1,2"}, ++ {"query": "SELECT json1", "as_json": True}, + ], + {}, + ) +@@ -59,6 +61,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -69,6 +72,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -79,6 +83,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -89,6 +94,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -99,6 +105,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -109,6 +116,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -119,6 +127,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -129,6 +138,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah8", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": [1], + "ignore_null": False, + }, +@@ -139,10 +149,22 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah9", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": [1, 2], + "ignore_null": False, + }, + ], ++ [ ++ None, ++ { ++ "query": "SELECT json1", ++ "depth": 0, ++ "as_list": False, ++ "as_json": True, ++ "with_lists": None, ++ "ignore_null": False, ++ }, ++ ], + ], + qbuffer, + ) +@@ -159,6 +181,7 @@ class MysqlPillarTestCase(TestCase): + "5": {"query": "SELECT blah5"}, + "6": {"query": "SELECT blah6", "depth": 2}, + "7": {"query": "SELECT blah7", "as_list": True}, ++ "8": {"query": "SELECT json1", "as_json": True}, + }, + ) + qbuffer = return_data.extract_queries(args, kwargs) +@@ -170,6 +193,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -180,6 +204,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -190,6 +215,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -200,6 +226,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -210,6 +237,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -220,6 +248,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -230,6 +259,18 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 0, + "as_list": True, ++ "as_json": False, ++ "with_lists": None, ++ "ignore_null": False, ++ }, ++ ], ++ [ ++ "8", ++ { ++ "query": "SELECT json1", ++ "depth": 0, ++ "as_list": False, ++ "as_json": True, + "with_lists": None, + "ignore_null": False, + }, +@@ -261,6 +302,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah1", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -271,6 +313,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -281,6 +324,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -291,6 +335,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah1", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -301,6 +346,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -311,6 +357,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -349,6 +396,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -359,6 +407,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -369,6 +418,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -379,6 +429,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -389,6 +440,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -399,6 +451,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -409,6 +462,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -419,6 +473,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah8", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -440,6 +495,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -450,6 +506,7 @@ class MysqlPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +diff --git a/tests/unit/pillar/test_sqlcipher.py b/tests/unit/pillar/test_sqlcipher.py +index d7e9eed6f6..6f7b21fb3f 100644 +--- a/tests/unit/pillar/test_sqlcipher.py ++++ b/tests/unit/pillar/test_sqlcipher.py +@@ -38,6 +38,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -48,6 +49,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -58,6 +60,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -68,6 +71,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -78,6 +82,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -88,6 +93,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -98,6 +104,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -108,6 +115,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah8", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": [1], + "ignore_null": False, + }, +@@ -118,6 +126,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah9", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": [1, 2], + "ignore_null": False, + }, +@@ -149,6 +158,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -159,6 +169,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -169,6 +180,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -179,6 +191,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -189,6 +202,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -199,6 +213,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -209,6 +224,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -240,6 +256,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah1", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -250,6 +267,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -260,6 +278,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -270,6 +289,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah1", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -280,6 +300,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -290,6 +311,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -328,6 +350,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -338,6 +361,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -348,6 +372,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -358,6 +383,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -368,6 +394,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -378,6 +405,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -388,6 +416,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -398,6 +427,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah8", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -419,6 +449,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -429,6 +460,7 @@ class SQLCipherPillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +diff --git a/tests/unit/pillar/test_sqlite3.py b/tests/unit/pillar/test_sqlite3.py +index da780682e7..69efd0a3e8 100644 +--- a/tests/unit/pillar/test_sqlite3.py ++++ b/tests/unit/pillar/test_sqlite3.py +@@ -38,6 +38,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -48,6 +49,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -58,6 +60,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -68,6 +71,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -78,6 +82,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -88,6 +93,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -98,6 +104,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -108,6 +115,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah8", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": [1], + "ignore_null": False, + }, +@@ -118,6 +126,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah9", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": [1, 2], + "ignore_null": False, + }, +@@ -149,6 +158,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -159,6 +169,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -169,6 +180,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -179,6 +191,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -189,6 +202,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -199,6 +213,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -209,6 +224,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -240,6 +256,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah1", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -250,6 +267,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -260,6 +278,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -270,6 +289,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah1", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -280,6 +300,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -290,6 +311,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -328,6 +350,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -338,6 +361,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -348,6 +372,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah3", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -358,6 +383,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah4", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -368,6 +394,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah5", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -378,6 +405,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah6", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -388,6 +416,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah7", + "depth": 2, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -398,6 +427,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah8", + "depth": 0, + "as_list": True, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -419,6 +449,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +@@ -429,6 +460,7 @@ class SQLite3PillarTestCase(TestCase): + "query": "SELECT blah2", + "depth": 0, + "as_list": False, ++ "as_json": False, + "with_lists": None, + "ignore_null": False, + }, +-- +2.33.0 + + diff --git a/_lastrevision b/_lastrevision index 3ea8210..bf40173 100644 --- a/_lastrevision +++ b/_lastrevision @@ -1 +1 @@ -71392e10750f7481475066788a23a39ad92d0c64 \ No newline at end of file +deacfe2304a0b9f34a9130b61bd69dea851962a7 \ No newline at end of file diff --git a/_service b/_service index b060a52..ec6074d 100644 --- a/_service +++ b/_service @@ -3,7 +3,7 @@ https://github.com/openSUSE/salt-packaging.git salt package - 3002.2 + 3003.3 git @@ -12,8 +12,8 @@ codeload.github.com - openSUSE/salt/tar.gz/v3002.2-suse - v3002.2.tar.gz + openSUSE/salt/tar.gz/v3003.3-suse + v3003.3.tar.gz diff --git a/accumulated-changes-from-yomi-167.patch b/accumulated-changes-from-yomi-167.patch deleted file mode 100644 index 46b7f0c..0000000 --- a/accumulated-changes-from-yomi-167.patch +++ /dev/null @@ -1,223 +0,0 @@ -From 828650500159fd7040d2fa76b2fc4d2b627f7065 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Tue, 22 Oct 2019 11:02:33 +0200 -Subject: [PATCH] Accumulated changes from Yomi (#167) - -* core.py: ignore wrong product_name files - -Some firmwares (like some NUC machines), do not provide valid -/sys/class/dmi/id/product_name strings. In those cases an -UnicodeDecodeError exception happens. - -This patch ignore this kind of issue during the grains creation. - -(cherry picked from commit 2d57d2a6063488ad9329a083219e3826e945aa2d) - -* zypperpkg: understand product type - -(cherry picked from commit b865491b74679140f7a71c5ba50d482db47b600f) ---- - salt/grains/core.py | 6 +-- - salt/modules/zypperpkg.py | 22 ---------- - tests/unit/grains/test_core.py | 64 +++++++++++++++++++++++++++- - tests/unit/modules/test_zypperpkg.py | 38 +++++++++++++++++ - 4 files changed, 103 insertions(+), 27 deletions(-) - -diff --git a/salt/grains/core.py b/salt/grains/core.py -index 0dc1d97f97..a2983e388b 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -1046,7 +1046,7 @@ def _virtual(osdata): - if os.path.isfile("/sys/devices/virtual/dmi/id/product_name"): - try: - with salt.utils.files.fopen( -- "/sys/devices/virtual/dmi/id/product_name", "rb" -+ "/sys/devices/virtual/dmi/id/product_name", "r" - ) as fhr: - output = salt.utils.stringutils.to_unicode( - fhr.read(), errors="replace" -@@ -1066,9 +1066,7 @@ def _virtual(osdata): - except UnicodeDecodeError: - # Some firmwares provide non-valid 'product_name' - # files, ignore them -- log.debug( -- "The content in /sys/devices/virtual/dmi/id/product_name is not valid" -- ) -+ pass - except OSError: - pass - elif osdata["kernel"] == "FreeBSD": -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 2daec0f380..b5621174a4 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -958,28 +958,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs): - } - ] - -- for include in includes: -- if include in ("pattern", "patch"): -- if include == "pattern": -- pkgs = list_installed_patterns(root=root) -- elif include == "patch": -- pkgs = list_installed_patches(root=root) -- else: -- pkgs = [] -- for pkg in pkgs: -- pkg_extended_name = "{}:{}".format(include, pkg) -- info = info_available(pkg_extended_name, refresh=False, root=root) -- _ret[pkg_extended_name] = [ -- { -- "epoch": None, -- "version": info[pkg]["version"], -- "release": None, -- "arch": info[pkg]["arch"], -- "install_date": None, -- "install_date_time_t": None, -- } -- ] -- - __context__[contextkey] = _ret - - return __salt__["pkg_resource.format_pkg_list"]( -diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index a5ceeb8317..0dc3423646 100644 ---- a/tests/unit/grains/test_core.py -+++ b/tests/unit/grains/test_core.py -@@ -2047,13 +2047,74 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - result = core.path() - assert result == {"path": path, "systempath": comps}, result - -+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") -+ @patch("os.path.exists") -+ @patch("salt.utils.platform.is_proxy") -+ def test_kernelparams_return(self): -+ expectations = [ -+ ( -+ "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64", -+ { -+ "kernelparams": [ -+ ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64") -+ ] -+ }, -+ ), -+ ( -+ "root=/dev/mapper/centos_daemon-root", -+ {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]}, -+ ), -+ ( -+ "rhgb quiet ro", -+ {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]}, -+ ), -+ ('param="value1"', {"kernelparams": [("param", "value1")]}), -+ ( -+ 'param="value1 value2 value3"', -+ {"kernelparams": [("param", "value1 value2 value3")]}, -+ ), -+ ( -+ 'param="value1 value2 value3" LANG="pl" ro', -+ { -+ "kernelparams": [ -+ ("param", "value1 value2 value3"), -+ ("LANG", "pl"), -+ ("ro", None), -+ ] -+ }, -+ ), -+ ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}), -+ ( -+ 'param="value1:value2:value3"', -+ {"kernelparams": [("param", "value1:value2:value3")]}, -+ ), -+ ( -+ 'param="value1,value2,value3"', -+ {"kernelparams": [("param", "value1,value2,value3")]}, -+ ), -+ ( -+ 'param="value1" param="value2" param="value3"', -+ { -+ "kernelparams": [ -+ ("param", "value1"), -+ ("param", "value2"), -+ ("param", "value3"), -+ ] -+ }, -+ ), -+ ] -+ -+ for cmdline, expectation in expectations: -+ with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)): -+ self.assertEqual(core.kernelparams(), expectation) -+ - @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") - @patch("os.path.exists") - @patch("salt.utils.platform.is_proxy") - def test__hw_data_linux_empty(self, is_proxy, exists): - is_proxy.return_value = False - exists.return_value = True -- with patch("salt.utils.files.fopen", mock_open(read_data=b"")): -+ with patch("salt.utils.files.fopen", mock_open(read_data="")): - self.assertEqual( - core._hw_data({"kernel": "Linux"}), - { -@@ -2067,6 +2128,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - ) - - @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") -+ @skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3") - @patch("os.path.exists") - @patch("salt.utils.platform.is_proxy") - def test__hw_data_linux_unicode_error(self, is_proxy, exists): -diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 5d4e7766b6..1b62122e0e 100644 ---- a/tests/unit/modules/test_zypperpkg.py -+++ b/tests/unit/modules/test_zypperpkg.py -@@ -1424,6 +1424,44 @@ Repository 'DUMMY' not found by its alias, number, or URI. - ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}} - ) - -+ @patch("salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)) -+ @patch( -+ "salt.modules.zypperpkg.list_products", -+ MagicMock(return_value={"openSUSE": {"installed": False, "summary": "test"}}), -+ ) -+ @patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock( -+ side_effect=[{"product:openSUSE": "15.2"}, {"product:openSUSE": "15.3"}] -+ ), -+ ) -+ def test_install_product_ok(self): -+ """ -+ Test successfully product installation. -+ """ -+ with patch.dict( -+ zypper.__salt__, -+ { -+ "pkg_resource.parse_targets": MagicMock( -+ return_value=(["product:openSUSE"], None) -+ ) -+ }, -+ ): -+ with patch( -+ "salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock() -+ ) as zypper_mock: -+ ret = zypper.install("product:openSUSE", includes=["product"]) -+ zypper_mock.assert_called_once_with( -+ "--no-refresh", -+ "install", -+ "--auto-agree-with-licenses", -+ "--name", -+ "product:openSUSE", -+ ) -+ self.assertDictEqual( -+ ret, {"product:openSUSE": {"old": "15.2", "new": "15.3"}} -+ ) -+ - def test_remove_purge(self): - """ - Test package removal --- -2.29.2 - - diff --git a/accumulated-changes-required-for-yomi-165.patch b/accumulated-changes-required-for-yomi-165.patch deleted file mode 100644 index 29409fa..0000000 --- a/accumulated-changes-required-for-yomi-165.patch +++ /dev/null @@ -1,117 +0,0 @@ -From 7d35fdba84b6e1b62a3abc71e518366a35efb662 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Tue, 30 Jul 2019 11:23:12 +0200 -Subject: [PATCH] Accumulated changes required for Yomi (#165) - -* cmdmod: fix runas and group in run_chroot - -The parameters runas and group for cmdmod.run() will change the efective -user and group before executing the command. But in a chroot environment is -expected that the change happends inside the chroot, not outside, as the -user and groups are refering to objects that can only exist inside the -environment. - -This patch add the userspec parameter to the chroot command, to change -the user in the correct place. - -(cherry picked from commit f0434aaeeee3ace4e3fc65c04e69984f08b2541e) - -* chroot: add missing sys directory - -(cherry picked from commit cdf74426bcad4e8bf329bf604c77ea83bfca8b2c) - -* chroot: change variable name to root - -(cherry picked from commit 7f68b65b1b0f9eec2a6b07b02714ead0121f0e4b) - -* chroot: fix bug in safe_kwargs iteration - -(cherry picked from commit 39da1c69ea2781bed6e9d8e6879b70d65fa5a5b0) - -* test_cmdmod: fix test_run_cwd_in_combination_with_runas - -(cherry picked from commit 42640ecf161caf64c61e9b02927882f92c850092) - -* test_cmdmod: add test_run_chroot_runas test - -(cherry picked from commit d900035089a22f6741d2095fd1f6694597041a88) - -* freezer: do not fail in cache dir is present - -(cherry picked from commit 25137c51e6d6e53e3099b6cddbf51d4cb2c53d8d) - -* freezer: clean freeze YAML profile on restore - -(cherry picked from commit 56b97c997257f12038399549dc987b7723ab225f) - -* zypperpkg: fix pkg.list_pkgs cache - -The cache from pkg.list_pkgs for the zypper installer is too aggresive. -Some parameters will deliver different package lists, like root and -includes. The current cache do not take those parameters into -consideration, so the next time that this function is called, the last -list of packages will be returned, without checking if the current -parameters match the old one. - -This patch create a different cache key for each parameter combination, -so the cached data will be separated too. - -(cherry picked from commit 9c54bb3e8c93ba21fc583bdefbcadbe53cbcd7b5) ---- - salt/modules/zypperpkg.py | 1 - - tests/unit/modules/test_zypperpkg.py | 22 +++++++++++++++++++++- - 2 files changed, 21 insertions(+), 2 deletions(-) - -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index c996935bff..b099f3e5d7 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -879,7 +879,6 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs): - # inclusion types are passed - contextkey = "pkg.list_pkgs_{}_{}".format(root, includes) - -- # TODO(aplanas): this cached value depends on the parameters - if contextkey not in __context__: - ret = {} - cmd = ["rpm"] -diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 032785395e..5d4e7766b6 100644 ---- a/tests/unit/modules/test_zypperpkg.py -+++ b/tests/unit/modules/test_zypperpkg.py -@@ -912,7 +912,8 @@ Repository 'DUMMY' not found by its alias, number, or URI. - ), patch.dict( - zypper.__salt__, {"pkg_resource.stringify": MagicMock()} - ), patch.dict( -- pkg_resource.__salt__, {"pkg.parse_arch": zypper.parse_arch} -+ pkg_resource.__salt__, -+ {"pkg.parse_arch_from_name": zypper.parse_arch_from_name}, - ): - pkgs = zypper.list_pkgs( - attr=["epoch", "release", "arch", "install_date_time_t"] -@@ -1950,3 +1951,22 @@ pattern() = package-c""" - "package-a": {"installed": True, "summary": "description a",}, - "package-b": {"installed": False, "summary": "description b",}, - } -+ -+ def test__clean_cache_empty(self): -+ """Test that an empty cached can be cleaned""" -+ context = {} -+ with patch.dict(zypper.__context__, context): -+ zypper._clean_cache() -+ assert context == {} -+ -+ def test__clean_cache_filled(self): -+ """Test that a filled cached can be cleaned""" -+ context = { -+ "pkg.list_pkgs_/mnt_[]": None, -+ "pkg.list_pkgs_/mnt_[patterns]": None, -+ "pkg.list_provides": None, -+ "pkg.other_data": None, -+ } -+ with patch.dict(zypper.__context__, context): -+ zypper._clean_cache() -+ self.assertEqual(zypper.__context__, {"pkg.other_data": None}) --- -2.29.2 - - diff --git a/add-alibaba-cloud-linux-2-by-backporting-upstream-s-.patch b/add-alibaba-cloud-linux-2-by-backporting-upstream-s-.patch index c9cc62b..3e076ca 100644 --- a/add-alibaba-cloud-linux-2-by-backporting-upstream-s-.patch +++ b/add-alibaba-cloud-linux-2-by-backporting-upstream-s-.patch @@ -1,45 +1,48 @@ -From ec0d11ed66e8541a9ccaebc85aab4724013fb71f Mon Sep 17 00:00:00 2001 +From 2e810cc876f7b7110326231de51d78ff5d12eae6 Mon Sep 17 00:00:00 2001 From: Pau Garcia Quiles Date: Tue, 13 Apr 2021 10:31:09 +0200 Subject: [PATCH] Add Alibaba Cloud Linux 2 by backporting upstream's grain and discarding my own (#352) --- - salt/grains/core.py | 4 ++-- - tests/unit/grains/test_core.py | 26 +++++++++++++++++--------- - 2 files changed, 19 insertions(+), 11 deletions(-) + salt/grains/core.py | 2 ++ + tests/unit/grains/test_core.py | 28 ++++++++++++++++++++++++++++ + 2 files changed, 30 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 09f9d29788..2b965a2a8a 100644 +index 19937f008e..bce8c95179 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -1547,7 +1547,7 @@ _OS_NAME_MAP = { - "slesexpand": "RES", +@@ -1560,6 +1560,7 @@ _OS_NAME_MAP = { "linuxmint": "Mint", "neon": "KDE neon", -- "alibaba": "Alibaba Cloud (Aliyun)", + "pop": "Pop", + "alibabaclo": "Alinux", } # Map the 'os' grain to the 'os_family' grain -@@ -1622,7 +1622,7 @@ _OS_FAMILY_MAP = { - "AIX": "AIX", +@@ -1637,6 +1638,7 @@ _OS_FAMILY_MAP = { "TurnKey": "Debian", + "Pop": "Debian", "AstraLinuxCE": "Debian", -- "Alibaba Cloud (Aliyun)": "RedHat", + "Alinux": "RedHat", } # Matches any possible format: diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 6aa05abe40..8280d6de47 100644 +index ac2d515bcd..fa06bb27ab 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -782,17 +782,25 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - Test if OS grains are parsed correctly in Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS - ''' - _os_release_map = { -- '_linux_distribution': ('Alibaba Cloud Linux (Aliyun Linux)', '2.1903', 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)'), +@@ -846,6 +846,34 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + } + self._run_os_grains_tests("astralinuxce-2.12.22", _os_release_map, expectation) + ++ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') ++ def test_aliyunlinux2_os_grains(self): ++ ''' ++ Test if OS grains are parsed correctly in Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS ++ ''' ++ _os_release_map = { + "os_release_file": { + "NAME": "Alibaba Cloud Linux (Aliyun Linux)", + "VERSION": "2.1903 LTS (Hunting Beagle)", @@ -49,16 +52,8 @@ index 6aa05abe40..8280d6de47 100644 + "ANSI_COLOR": "0;31", + }, + "_linux_distribution": ("alinux", "2.1903", "LTS"), - } - expectation = { -- 'os': 'Alibaba Cloud (Aliyun)', -- 'os_family': 'RedHat', -- 'oscodename': 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)', -- 'osfullname': 'Alibaba Cloud Linux (Aliyun Linux)', -- 'osrelease': '2.1903', -- 'osrelease_info': (2, 1903), -- 'osmajorrelease': 2, -- 'osfinger': 'Alibaba Cloud Linux (Aliyun Linux)-2', ++ } ++ expectation = { + "os": "Alinux", + "os_family": "RedHat", + "oscodename": "Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)", @@ -67,10 +62,13 @@ index 6aa05abe40..8280d6de47 100644 + "osrelease_info": (2, 1903), + "osmajorrelease": 2, + "osfinger": "Alibaba Cloud Linux (Aliyun Linux)-2", - } - self._run_os_grains_tests(None, _os_release_map, expectation) - ++ } ++ self._run_os_grains_tests(None, _os_release_map, expectation) ++ + @skipIf(not salt.utils.platform.is_windows(), "System is not Windows") + def test_windows_platform_data(self): + """ -- -2.30.2 +2.33.0 diff --git a/add-all_versions-parameter-to-include-all-installed-.patch b/add-all_versions-parameter-to-include-all-installed-.patch deleted file mode 100644 index 6212a62..0000000 --- a/add-all_versions-parameter-to-include-all-installed-.patch +++ /dev/null @@ -1,959 +0,0 @@ -From 2e300c770c227cf394929b7d5d025d5c52f1ae2c Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Mon, 14 May 2018 11:33:13 +0100 -Subject: [PATCH] Add "all_versions" parameter to include all installed - version on rpm.info - -Enable "all_versions" parameter for zypper.info_installed - -Enable "all_versions" parameter for yumpkg.info_installed - -Prevent adding failed packages when pkg name contains the arch (on SUSE) - -Add 'all_versions' documentation for info_installed on yum/zypper modules - -Add unit tests for info_installed with all_versions - -Refactor: use dict.setdefault instead if-else statement - -Allow removing only specific package versions with zypper and yum ---- - salt/states/pkg.py | 285 +++++++++++++++++++++++---------------------- - 1 file changed, 146 insertions(+), 139 deletions(-) - -diff --git a/salt/states/pkg.py b/salt/states/pkg.py -index 51b5a06e8f..a1b2a122bb 100644 ---- a/salt/states/pkg.py -+++ b/salt/states/pkg.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Installation of packages using OS package managers such as yum or apt-get - ========================================================================= -@@ -71,21 +70,16 @@ state module - used. This will be addressed in a future release of Salt. - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import fnmatch - import logging - import os - import re - --# Import Salt libs - import salt.utils.pkg - import salt.utils.platform - import salt.utils.versions - from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError -- --# Import 3rd-party libs - from salt.ext import six - from salt.modules.pkg_resource import _repack_pkgs - from salt.output import nested -@@ -323,7 +317,7 @@ def _find_download_targets( - "name": name, - "changes": {}, - "result": True, -- "comment": "Version {0} of package '{1}' is already " -+ "comment": "Version {} of package '{}' is already " - "downloaded".format(version, name), - } - -@@ -334,7 +328,7 @@ def _find_download_targets( - "name": name, - "changes": {}, - "result": True, -- "comment": "Package {0} is already " "downloaded".format(name), -+ "comment": "Package {} is already " "downloaded".format(name), - } - - version_spec = False -@@ -349,13 +343,13 @@ def _find_download_targets( - comments.append( - "The following package(s) were not found, and no " - "possible matches were found in the package db: " -- "{0}".format(", ".join(sorted(problems["no_suggest"]))) -+ "{}".format(", ".join(sorted(problems["no_suggest"]))) - ) - if problems.get("suggest"): -- for pkgname, suggestions in six.iteritems(problems["suggest"]): -+ for pkgname, suggestions in problems["suggest"].items(): - comments.append( -- "Package '{0}' not found (possible matches: " -- "{1})".format(pkgname, ", ".join(suggestions)) -+ "Package '{}' not found (possible matches: " -+ "{})".format(pkgname, ", ".join(suggestions)) - ) - if comments: - if len(comments) > 1: -@@ -371,7 +365,7 @@ def _find_download_targets( - # Check current downloaded versions against specified versions - targets = {} - problems = [] -- for pkgname, pkgver in six.iteritems(to_download): -+ for pkgname, pkgver in to_download.items(): - cver = cur_pkgs.get(pkgname, {}) - # Package not yet downloaded, so add to targets - if not cver: -@@ -401,7 +395,7 @@ def _find_download_targets( - - if not targets: - # All specified packages are already downloaded -- msg = "All specified packages{0} are already downloaded".format( -+ msg = "All specified packages{} are already downloaded".format( - " (matching specified versions)" if version_spec else "" - ) - return {"name": name, "changes": {}, "result": True, "comment": msg} -@@ -425,7 +419,7 @@ def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): - "name": name, - "changes": {}, - "result": True, -- "comment": "Advisory patch {0} is already " "installed".format(name), -+ "comment": "Advisory patch {} is already " "installed".format(name), - } - - # Find out which advisory patches will be targeted in the call to pkg.install -@@ -477,12 +471,22 @@ def _find_remove_targets( - # Check current versions against specified versions - targets = [] - problems = [] -- for pkgname, pkgver in six.iteritems(to_remove): -+ for pkgname, pkgver in to_remove.items(): - # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names - origin = bool(re.search("/", pkgname)) - - if __grains__["os"] == "FreeBSD" and origin: -- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == pkgname] -+ cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname] -+ elif __grains__["os_family"] == "Suse": -+ # On SUSE systems. Zypper returns packages without "arch" in name -+ try: -+ namepart, archpart = pkgname.rsplit(".", 1) -+ except ValueError: -+ cver = cur_pkgs.get(pkgname, []) -+ else: -+ if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",): -+ pkgname = namepart -+ cver = cur_pkgs.get(pkgname, []) - else: - cver = cur_pkgs.get(pkgname, []) - -@@ -518,7 +522,7 @@ def _find_remove_targets( - - if not targets: - # All specified packages are already absent -- msg = "All specified packages{0} are already absent".format( -+ msg = "All specified packages{} are already absent".format( - " (matching specified versions)" if version_spec else "" - ) - return {"name": name, "changes": {}, "result": True, "comment": msg} -@@ -619,7 +623,7 @@ def _find_install_targets( - "name": name, - "changes": {}, - "result": False, -- "comment": "Invalidly formatted '{0}' parameter. See " -+ "comment": "Invalidly formatted '{}' parameter. See " - "minion log.".format("pkgs" if pkgs else "sources"), - } - -@@ -634,7 +638,7 @@ def _find_install_targets( - "name": name, - "changes": {}, - "result": False, -- "comment": "Package {0} not found in the " -+ "comment": "Package {} not found in the " - "repository.".format(name), - } - if version is None: -@@ -656,7 +660,7 @@ def _find_install_targets( - origin = bool(re.search("/", name)) - - if __grains__["os"] == "FreeBSD" and origin: -- cver = [k for k, v in six.iteritems(cur_pkgs) if v["origin"] == name] -+ cver = [k for k, v in cur_pkgs.items() if v["origin"] == name] - else: - cver = cur_pkgs.get(name, []) - -@@ -667,7 +671,7 @@ def _find_install_targets( - "name": name, - "changes": {}, - "result": True, -- "comment": "Version {0} of package '{1}' is already " -+ "comment": "Version {} of package '{}' is already " - "installed".format(version, name), - } - -@@ -678,7 +682,7 @@ def _find_install_targets( - "name": name, - "changes": {}, - "result": True, -- "comment": "Package {0} is already " "installed".format(name), -+ "comment": "Package {} is already " "installed".format(name), - } - - version_spec = False -@@ -687,21 +691,19 @@ def _find_install_targets( - # enforced. Takes extra time. Disable for improved performance - if not skip_suggestions: - # Perform platform-specific pre-flight checks -- not_installed = dict( -- [ -- (name, version) -- for name, version in desired.items() -- if not ( -- name in cur_pkgs -- and ( -- version is None -- or _fulfills_version_string( -- cur_pkgs[name], version, ignore_epoch=ignore_epoch -- ) -+ not_installed = { -+ name: version -+ for name, version in desired.items() -+ if not ( -+ name in cur_pkgs -+ and ( -+ version is None -+ or _fulfills_version_string( -+ cur_pkgs[name], version, ignore_epoch=ignore_epoch - ) - ) -- ] -- ) -+ ) -+ } - if not_installed: - try: - problems = _preflight_check(not_installed, **kwargs) -@@ -713,13 +715,13 @@ def _find_install_targets( - comments.append( - "The following package(s) were not found, and no " - "possible matches were found in the package db: " -- "{0}".format(", ".join(sorted(problems["no_suggest"]))) -+ "{}".format(", ".join(sorted(problems["no_suggest"]))) - ) - if problems.get("suggest"): -- for pkgname, suggestions in six.iteritems(problems["suggest"]): -+ for pkgname, suggestions in problems["suggest"].items(): - comments.append( -- "Package '{0}' not found (possible matches: " -- "{1})".format(pkgname, ", ".join(suggestions)) -+ "Package '{}' not found (possible matches: " -+ "{})".format(pkgname, ", ".join(suggestions)) - ) - if comments: - if len(comments) > 1: -@@ -733,9 +735,7 @@ def _find_install_targets( - - # Resolve the latest package version for any packages with "latest" in the - # package version -- wants_latest = ( -- [] if sources else [x for x, y in six.iteritems(desired) if y == "latest"] -- ) -+ wants_latest = [] if sources else [x for x, y in desired.items() if y == "latest"] - if wants_latest: - resolved_latest = __salt__["pkg.latest_version"]( - *wants_latest, refresh=refresh, **kwargs -@@ -766,7 +766,7 @@ def _find_install_targets( - problems = [] - warnings = [] - failed_verify = False -- for package_name, version_string in six.iteritems(desired): -+ for package_name, version_string in desired.items(): - cver = cur_pkgs.get(package_name, []) - if resolve_capabilities and not cver and package_name in cur_prov: - cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) -@@ -795,12 +795,12 @@ def _find_install_targets( - problems.append(err.format(version_string, "file not found")) - continue - elif not os.path.exists(cached_path): -- problems.append("{0} does not exist on minion".format(version_string)) -+ problems.append("{} does not exist on minion".format(version_string)) - continue - source_info = __salt__["lowpkg.bin_pkg_info"](cached_path) - if source_info is None: - warnings.append( -- "Failed to parse metadata for {0}".format(version_string) -+ "Failed to parse metadata for {}".format(version_string) - ) - continue - else: -@@ -923,13 +923,24 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None): - has_origin = "/" in pkgname - - if __grains__["os"] == "FreeBSD" and has_origin: -- cver = [k for k, v in six.iteritems(new_pkgs) if v["origin"] == pkgname] -+ cver = [k for k, v in new_pkgs.items() if v["origin"] == pkgname] - elif __grains__["os"] == "MacOS" and has_origin: - cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split("/")[-1])) - elif __grains__["os"] == "OpenBSD": - cver = new_pkgs.get(pkgname.split("%")[0]) - elif __grains__["os_family"] == "Debian": - cver = new_pkgs.get(pkgname.split("=")[0]) -+ elif __grains__["os_family"] == "Suse": -+ # On SUSE systems. Zypper returns packages without "arch" in name -+ try: -+ namepart, archpart = pkgname.rsplit(".", 1) -+ except ValueError: -+ cver = new_pkgs.get(pkgname) -+ else: -+ if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",): -+ cver = new_pkgs.get(namepart) -+ else: -+ cver = new_pkgs.get(pkgname) - else: - cver = new_pkgs.get(pkgname) - if not cver and pkgname in new_caps: -@@ -964,7 +975,7 @@ def _get_desired_pkg(name, desired): - oper = "" - else: - oper = "=" -- return "{0}{1}{2}".format(name, oper, "" if not desired[name] else desired[name]) -+ return "{}{}{}".format(name, oper, "" if not desired[name] else desired[name]) - - - def _preflight_check(desired, fromrepo, **kwargs): -@@ -1709,8 +1720,8 @@ def installed( - "comment": "pkg.verify not implemented", - } - -- if not isinstance(version, six.string_types) and version is not None: -- version = six.text_type(version) -+ if not isinstance(version, str) and version is not None: -+ version = str(version) - - kwargs["allow_updates"] = allow_updates - -@@ -1754,7 +1765,7 @@ def installed( - "name": name, - "changes": {}, - "result": False, -- "comment": six.text_type(exc), -+ "comment": str(exc), - } - - if "result" in hold_ret and not hold_ret["result"]: -@@ -1763,7 +1774,7 @@ def installed( - "changes": {}, - "result": False, - "comment": "An error was encountered while " -- "holding/unholding package(s): {0}".format(hold_ret["comment"]), -+ "holding/unholding package(s): {}".format(hold_ret["comment"]), - } - else: - modified_hold = [ -@@ -1779,16 +1790,16 @@ def installed( - ] - - for i in modified_hold: -- result["comment"] += ".\n{0}".format(i["comment"]) -+ result["comment"] += ".\n{}".format(i["comment"]) - result["result"] = i["result"] - result["changes"][i["name"]] = i["changes"] - - for i in not_modified_hold: -- result["comment"] += ".\n{0}".format(i["comment"]) -+ result["comment"] += ".\n{}".format(i["comment"]) - result["result"] = i["result"] - - for i in failed_hold: -- result["comment"] += ".\n{0}".format(i["comment"]) -+ result["comment"] += ".\n{}".format(i["comment"]) - result["result"] = i["result"] - return result - -@@ -1805,8 +1816,8 @@ def installed( - - # Remove any targets not returned by _find_install_targets - if pkgs: -- pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] -- pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) -+ pkgs = [dict([(x, y)]) for x, y in targets.items()] -+ pkgs.extend([dict([(x, y)]) for x, y in to_reinstall.items()]) - elif sources: - oldsources = sources - sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] -@@ -1823,12 +1834,12 @@ def installed( - summary = ", ".join([_get_desired_pkg(x, targets) for x in targets]) - comment.append( - "The following packages would be " -- "installed/updated: {0}".format(summary) -+ "installed/updated: {}".format(summary) - ) - if to_unpurge: - comment.append( - "The following packages would have their selection status " -- "changed from 'purge' to 'install': {0}".format(", ".join(to_unpurge)) -+ "changed from 'purge' to 'install': {}".format(", ".join(to_unpurge)) - ) - if to_reinstall: - # Add a comment for each package in to_reinstall with its -@@ -1852,7 +1863,7 @@ def installed( - else: - pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) - comment.append( -- "Package '{0}' would be reinstalled because the " -+ "Package '{}' would be reinstalled because the " - "following files have been altered:".format(pkgstr) - ) - comment.append(_nested_output(altered_files[reinstall_pkg])) -@@ -1896,7 +1907,7 @@ def installed( - ret["changes"] = {} - ret["comment"] = ( - "An error was encountered while installing " -- "package(s): {0}".format(exc) -+ "package(s): {}".format(exc) - ) - if warnings: - ret.setdefault("warnings", []).extend(warnings) -@@ -1907,7 +1918,7 @@ def installed( - - if isinstance(pkg_ret, dict): - changes["installed"].update(pkg_ret) -- elif isinstance(pkg_ret, six.string_types): -+ elif isinstance(pkg_ret, str): - comment.append(pkg_ret) - # Code below will be looking for a dictionary. If this is a string - # it means that there was an exception raised and that no packages -@@ -1921,7 +1932,7 @@ def installed( - action = "pkg.hold" if kwargs["hold"] else "pkg.unhold" - hold_ret = __salt__[action](name=name, pkgs=desired) - except (CommandExecutionError, SaltInvocationError) as exc: -- comment.append(six.text_type(exc)) -+ comment.append(str(exc)) - ret = { - "name": name, - "changes": changes, -@@ -1938,7 +1949,7 @@ def installed( - "changes": {}, - "result": False, - "comment": "An error was encountered while " -- "holding/unholding package(s): {0}".format(hold_ret["comment"]), -+ "holding/unholding package(s): {}".format(hold_ret["comment"]), - } - if warnings: - ret.setdefault("warnings", []).extend(warnings) -@@ -1996,11 +2007,11 @@ def installed( - summary = ", ".join([_get_desired_pkg(x, desired) for x in modified]) - if len(summary) < 20: - comment.append( -- "The following packages were installed/updated: " "{0}".format(summary) -+ "The following packages were installed/updated: " "{}".format(summary) - ) - else: - comment.append( -- "{0} targeted package{1} {2} installed/updated.".format( -+ "{} targeted package{} {} installed/updated.".format( - len(modified), - "s" if len(modified) > 1 else "", - "were" if len(modified) > 1 else "was", -@@ -2014,14 +2025,14 @@ def installed( - comment.append(i["comment"]) - if len(changes[change_name]["new"]) > 0: - changes[change_name]["new"] += "\n" -- changes[change_name]["new"] += "{0}".format(i["changes"]["new"]) -+ changes[change_name]["new"] += "{}".format(i["changes"]["new"]) - if len(changes[change_name]["old"]) > 0: - changes[change_name]["old"] += "\n" -- changes[change_name]["old"] += "{0}".format(i["changes"]["old"]) -+ changes[change_name]["old"] += "{}".format(i["changes"]["old"]) - else: - comment.append(i["comment"]) - changes[change_name] = {} -- changes[change_name]["new"] = "{0}".format(i["changes"]["new"]) -+ changes[change_name]["new"] = "{}".format(i["changes"]["new"]) - - # Any requested packages that were not targeted for install or reinstall - if not_modified: -@@ -2031,11 +2042,11 @@ def installed( - summary = ", ".join([_get_desired_pkg(x, desired) for x in not_modified]) - if len(not_modified) <= 20: - comment.append( -- "The following packages were already installed: " "{0}".format(summary) -+ "The following packages were already installed: " "{}".format(summary) - ) - else: - comment.append( -- "{0} targeted package{1} {2} already installed".format( -+ "{} targeted package{} {} already installed".format( - len(not_modified), - "s" if len(not_modified) > 1 else "", - "were" if len(not_modified) > 1 else "was", -@@ -2054,7 +2065,7 @@ def installed( - else: - summary = ", ".join([_get_desired_pkg(x, desired) for x in failed]) - comment.insert( -- 0, "The following packages failed to " "install/update: {0}".format(summary) -+ 0, "The following packages failed to " "install/update: {}".format(summary) - ) - result = False - -@@ -2118,7 +2129,7 @@ def installed( - pkgstr = modified_pkg - else: - pkgstr = _get_desired_pkg(modified_pkg, desired) -- msg = "Package {0} was reinstalled.".format(pkgstr) -+ msg = "Package {} was reinstalled.".format(pkgstr) - if modified_pkg in altered_files: - msg += " The following files were remediated:" - comment.append(msg) -@@ -2133,7 +2144,7 @@ def installed( - pkgstr = failed_pkg - else: - pkgstr = _get_desired_pkg(failed_pkg, desired) -- msg = "Reinstall was not successful for package {0}.".format(pkgstr) -+ msg = "Reinstall was not successful for package {}.".format(pkgstr) - if failed_pkg in altered_files: - msg += " The following files could not be remediated:" - comment.append(msg) -@@ -2274,12 +2285,12 @@ def downloaded( - ret["result"] = False - ret[ - "comment" -- ] = "An error was encountered while checking targets: " "{0}".format(targets) -+ ] = "An error was encountered while checking targets: " "{}".format(targets) - return ret - - if __opts__["test"]: - summary = ", ".join(targets) -- ret["comment"] = "The following packages would be " "downloaded: {0}".format( -+ ret["comment"] = "The following packages would be " "downloaded: {}".format( - summary - ) - return ret -@@ -2306,7 +2317,7 @@ def downloaded( - ret["changes"] = {} - ret["comment"] = ( - "An error was encountered while downloading " -- "package(s): {0}".format(exc) -+ "package(s): {}".format(exc) - ) - return ret - -@@ -2316,13 +2327,13 @@ def downloaded( - if failed: - summary = ", ".join([_get_desired_pkg(x, targets) for x in failed]) - ret["result"] = False -- ret["comment"] = "The following packages failed to " "download: {0}".format( -+ ret["comment"] = "The following packages failed to " "download: {}".format( - summary - ) - - if not ret["changes"] and not ret["comment"]: - ret["result"] = True -- ret["comment"] = "Packages downloaded: " "{0}".format(", ".join(targets)) -+ ret["comment"] = "Packages downloaded: " "{}".format(", ".join(targets)) - - return ret - -@@ -2382,14 +2393,14 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): - ret["result"] = False - ret[ - "comment" -- ] = "An error was encountered while checking targets: " "{0}".format(targets) -+ ] = "An error was encountered while checking targets: " "{}".format(targets) - return ret - - if __opts__["test"]: - summary = ", ".join(targets) - ret[ - "comment" -- ] = "The following advisory patches would be " "downloaded: {0}".format(summary) -+ ] = "The following advisory patches would be " "downloaded: {}".format(summary) - return ret - - try: -@@ -2408,7 +2419,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): - ret["changes"] = {} - ret["comment"] = ( - "An error was encountered while downloading " -- "package(s): {0}".format(exc) -+ "package(s): {}".format(exc) - ) - return ret - -@@ -2417,7 +2428,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): - ret["result"] = True - ret["comment"] = ( - "Advisory patch is not needed or related packages " -- "are already {0}".format(status) -+ "are already {}".format(status) - ) - - return ret -@@ -2674,7 +2685,7 @@ def latest( - "changes": {}, - "result": False, - "comment": "An error was encountered while checking the " -- "newest available version of package(s): {0}".format(exc), -+ "newest available version of package(s): {}".format(exc), - } - - try: -@@ -2683,9 +2694,9 @@ def latest( - return {"name": name, "changes": {}, "result": False, "comment": exc.strerror} - - # Repack the cur/avail data if only a single package is being checked -- if isinstance(cur, six.string_types): -+ if isinstance(cur, str): - cur = {desired_pkgs[0]: cur} -- if isinstance(avail, six.string_types): -+ if isinstance(avail, str): - avail = {desired_pkgs[0]: avail} - - targets = {} -@@ -2695,7 +2706,7 @@ def latest( - # Package either a) is up-to-date, or b) does not exist - if not cur.get(pkg): - # Package does not exist -- msg = "No information found for '{0}'.".format(pkg) -+ msg = "No information found for '{}'.".format(pkg) - log.error(msg) - problems.append(msg) - elif ( -@@ -2741,12 +2752,12 @@ def latest( - comments.append( - "The following packages are already up-to-date: " - + ", ".join( -- ["{0} ({1})".format(x, cur[x]) for x in sorted(up_to_date)] -+ ["{} ({})".format(x, cur[x]) for x in sorted(up_to_date)] - ) - ) - else: - comments.append( -- "{0} packages are already up-to-date".format(up_to_date_count) -+ "{} packages are already up-to-date".format(up_to_date_count) - ) - - return { -@@ -2784,7 +2795,7 @@ def latest( - "changes": {}, - "result": False, - "comment": "An error was encountered while installing " -- "package(s): {0}".format(exc), -+ "package(s): {}".format(exc), - } - - if changes: -@@ -2800,7 +2811,7 @@ def latest( - - comments = [] - if failed: -- msg = "The following packages failed to update: " "{0}".format( -+ msg = "The following packages failed to update: " "{}".format( - ", ".join(sorted(failed)) - ) - comments.append(msg) -@@ -2808,19 +2819,17 @@ def latest( - msg = ( - "The following packages were successfully " - "installed/upgraded: " -- "{0}".format(", ".join(sorted(successful))) -+ "{}".format(", ".join(sorted(successful))) - ) - comments.append(msg) - if up_to_date: - if len(up_to_date) <= 10: - msg = ( - "The following packages were already up-to-date: " -- "{0}".format(", ".join(sorted(up_to_date))) -+ "{}".format(", ".join(sorted(up_to_date))) - ) - else: -- msg = "{0} packages were already up-to-date ".format( -- len(up_to_date) -- ) -+ msg = "{} packages were already up-to-date ".format(len(up_to_date)) - comments.append(msg) - - return { -@@ -2832,18 +2841,18 @@ def latest( - else: - if len(targets) > 10: - comment = ( -- "{0} targeted packages failed to update. " -+ "{} targeted packages failed to update. " - "See debug log for details.".format(len(targets)) - ) - elif len(targets) > 1: - comment = ( - "The following targeted packages failed to update. " -- "See debug log for details: ({0}).".format( -+ "See debug log for details: ({}).".format( - ", ".join(sorted(targets)) - ) - ) - else: -- comment = "Package {0} failed to " "update.".format( -+ comment = "Package {} failed to " "update.".format( - next(iter(list(targets.keys()))) - ) - if up_to_date: -@@ -2851,10 +2860,10 @@ def latest( - comment += ( - " The following packages were already " - "up-to-date: " -- "{0}".format(", ".join(sorted(up_to_date))) -+ "{}".format(", ".join(sorted(up_to_date))) - ) - else: -- comment += "{0} packages were already " "up-to-date".format( -+ comment += "{} packages were already " "up-to-date".format( - len(up_to_date) - ) - -@@ -2866,13 +2875,13 @@ def latest( - } - else: - if len(desired_pkgs) > 10: -- comment = "All {0} packages are up-to-date.".format(len(desired_pkgs)) -+ comment = "All {} packages are up-to-date.".format(len(desired_pkgs)) - elif len(desired_pkgs) > 1: -- comment = "All packages are up-to-date " "({0}).".format( -+ comment = "All packages are up-to-date " "({}).".format( - ", ".join(sorted(desired_pkgs)) - ) - else: -- comment = "Package {0} is already " "up-to-date".format(desired_pkgs[0]) -+ comment = "Package {} is already " "up-to-date".format(desired_pkgs[0]) - - return {"name": name, "changes": {}, "result": True, "comment": comment} - -@@ -2894,8 +2903,7 @@ def _uninstall( - "name": name, - "changes": {}, - "result": False, -- "comment": "Invalid action '{0}'. " -- "This is probably a bug.".format(action), -+ "comment": "Invalid action '{}'. " "This is probably a bug.".format(action), - } - - try: -@@ -2908,7 +2916,7 @@ def _uninstall( - "changes": {}, - "result": False, - "comment": "An error was encountered while parsing targets: " -- "{0}".format(exc), -+ "{}".format(exc), - } - targets = _find_remove_targets( - name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs -@@ -2921,7 +2929,7 @@ def _uninstall( - "changes": {}, - "result": False, - "comment": "An error was encountered while checking targets: " -- "{0}".format(targets), -+ "{}".format(targets), - } - if action == "purge": - old_removed = __salt__["pkg.list_pkgs"]( -@@ -2936,7 +2944,7 @@ def _uninstall( - "changes": {}, - "result": True, - "comment": "None of the targeted packages are installed" -- "{0}".format(" or partially installed" if action == "purge" else ""), -+ "{}".format(" or partially installed" if action == "purge" else ""), - } - - if __opts__["test"]: -@@ -2944,11 +2952,11 @@ def _uninstall( - "name": name, - "changes": {}, - "result": None, -- "comment": "The following packages will be {0}d: " -- "{1}.".format(action, ", ".join(targets)), -+ "comment": "The following packages will be {}d: " -+ "{}.".format(action, ", ".join(targets)), - } - -- changes = __salt__["pkg.{0}".format(action)]( -+ changes = __salt__["pkg.{}".format(action)]( - name, pkgs=pkgs, version=version, **kwargs - ) - new = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs) -@@ -2975,8 +2983,8 @@ def _uninstall( - "name": name, - "changes": changes, - "result": False, -- "comment": "The following packages failed to {0}: " -- "{1}.".format(action, ", ".join(failed)), -+ "comment": "The following packages failed to {}: " -+ "{}.".format(action, ", ".join(failed)), - } - - comments = [] -@@ -2984,14 +2992,13 @@ def _uninstall( - if not_installed: - comments.append( - "The following packages were not installed: " -- "{0}".format(", ".join(not_installed)) -+ "{}".format(", ".join(not_installed)) - ) - comments.append( -- "The following packages were {0}d: " -- "{1}.".format(action, ", ".join(targets)) -+ "The following packages were {}d: " "{}.".format(action, ", ".join(targets)) - ) - else: -- comments.append("All targeted packages were {0}d.".format(action)) -+ comments.append("All targeted packages were {}d.".format(action)) - - return { - "name": name, -@@ -3089,7 +3096,7 @@ def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, ** - ret["changes"] = {} - ret[ - "comment" -- ] = "An error was encountered while removing " "package(s): {0}".format(exc) -+ ] = "An error was encountered while removing " "package(s): {}".format(exc) - return ret - - -@@ -3181,7 +3188,7 @@ def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **k - ret["changes"] = {} - ret[ - "comment" -- ] = "An error was encountered while purging " "package(s): {0}".format(exc) -+ ] = "An error was encountered while purging " "package(s): {}".format(exc) - return ret - - -@@ -3247,17 +3254,17 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs): - "new": pkgver, - "old": __salt__["pkg.version"](pkgname, **kwargs), - } -- for pkgname, pkgver in six.iteritems(packages) -+ for pkgname, pkgver in packages.items() - } - if isinstance(pkgs, list): - packages = [pkg for pkg in packages if pkg in pkgs] - expected = { - pkgname: pkgver -- for pkgname, pkgver in six.iteritems(expected) -+ for pkgname, pkgver in expected.items() - if pkgname in pkgs - } - except Exception as exc: # pylint: disable=broad-except -- ret["comment"] = six.text_type(exc) -+ ret["comment"] = str(exc) - return ret - else: - ret["comment"] = "refresh must be either True or False" -@@ -3284,16 +3291,16 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs): - ret["changes"] = {} - ret[ - "comment" -- ] = "An error was encountered while updating " "packages: {0}".format(exc) -+ ] = "An error was encountered while updating " "packages: {}".format(exc) - return ret - - # If a package list was provided, ensure those packages were updated - missing = [] - if isinstance(pkgs, list): -- missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret["changes"]] -+ missing = [pkg for pkg in expected.keys() if pkg not in ret["changes"]] - - if missing: -- ret["comment"] = "The following package(s) failed to update: {0}".format( -+ ret["comment"] = "The following package(s) failed to update: {}".format( - ", ".join(missing) - ) - ret["result"] = False -@@ -3362,8 +3369,8 @@ def group_installed(name, skip=None, include=None, **kwargs): - ret["comment"] = "skip must be formatted as a list" - return ret - for idx, item in enumerate(skip): -- if not isinstance(item, six.string_types): -- skip[idx] = six.text_type(item) -+ if not isinstance(item, str): -+ skip[idx] = str(item) - - if include is None: - include = [] -@@ -3372,15 +3379,15 @@ def group_installed(name, skip=None, include=None, **kwargs): - ret["comment"] = "include must be formatted as a list" - return ret - for idx, item in enumerate(include): -- if not isinstance(item, six.string_types): -- include[idx] = six.text_type(item) -+ if not isinstance(item, str): -+ include[idx] = str(item) - - try: - diff = __salt__["pkg.group_diff"](name) - except CommandExecutionError as err: - ret["comment"] = ( - "An error was encountered while installing/updating " -- "group '{0}': {1}.".format(name, err) -+ "group '{}': {}.".format(name, err) - ) - return ret - -@@ -3390,7 +3397,7 @@ def group_installed(name, skip=None, include=None, **kwargs): - if invalid_skip: - ret[ - "comment" -- ] = "The following mandatory packages cannot be skipped: {0}".format( -+ ] = "The following mandatory packages cannot be skipped: {}".format( - ", ".join(invalid_skip) - ) - return ret -@@ -3401,7 +3408,7 @@ def group_installed(name, skip=None, include=None, **kwargs): - - if not targets: - ret["result"] = True -- ret["comment"] = "Group '{0}' is already installed".format(name) -+ ret["comment"] = "Group '{}' is already installed".format(name) - return ret - - partially_installed = ( -@@ -3415,9 +3422,9 @@ def group_installed(name, skip=None, include=None, **kwargs): - if partially_installed: - ret[ - "comment" -- ] = "Group '{0}' is partially installed and will be updated".format(name) -+ ] = "Group '{}' is partially installed and will be updated".format(name) - else: -- ret["comment"] = "Group '{0}' will be installed".format(name) -+ ret["comment"] = "Group '{}' will be installed".format(name) - return ret - - try: -@@ -3432,19 +3439,19 @@ def group_installed(name, skip=None, include=None, **kwargs): - ret["changes"] = {} - ret["comment"] = ( - "An error was encountered while " -- "installing/updating group '{0}': {1}".format(name, exc) -+ "installing/updating group '{}': {}".format(name, exc) - ) - return ret - - failed = [x for x in targets if x not in __salt__["pkg.list_pkgs"](**kwargs)] - if failed: -- ret["comment"] = "Failed to install the following packages: {0}".format( -+ ret["comment"] = "Failed to install the following packages: {}".format( - ", ".join(failed) - ) - return ret - - ret["result"] = True -- ret["comment"] = "Group '{0}' was {1}".format( -+ ret["comment"] = "Group '{}' was {}".format( - name, "updated" if partially_installed else "installed" - ) - return ret -@@ -3561,6 +3568,6 @@ def mod_watch(name, **kwargs): - return { - "name": name, - "changes": {}, -- "comment": "pkg.{0} does not work with the watch requisite".format(sfun), -+ "comment": "pkg.{} does not work with the watch requisite".format(sfun), - "result": False, - } --- -2.29.2 - - diff --git a/add-almalinux-and-alibaba-cloud-linux-to-the-os-fami.patch b/add-almalinux-and-alibaba-cloud-linux-to-the-os-fami.patch deleted file mode 100644 index 23e27bf..0000000 --- a/add-almalinux-and-alibaba-cloud-linux-to-the-os-fami.patch +++ /dev/null @@ -1,131 +0,0 @@ -From beec6f3945bda722bfe9c0aa606065f04c89bc62 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Julio=20Gonz=C3=A1lez=20Gil?= - -Date: Wed, 24 Mar 2021 14:12:34 +0100 -Subject: [PATCH] Add AlmaLinux and Alibaba Cloud Linux to the OS - Family list (#341) - -* Add AlmaLinux and Alibaba Cloud Linux to the OS Family list - -* Fix some grains tests ---- - salt/grains/core.py | 4 +++ - tests/unit/grains/test_core.py | 51 +++++++++++++++++++++++++++++++++- - 2 files changed, 54 insertions(+), 1 deletion(-) - -diff --git a/salt/grains/core.py b/salt/grains/core.py -index 5634327623..09f9d29788 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -1532,6 +1532,7 @@ _OS_NAME_MAP = { - "oracleserv": "OEL", - "cloudserve": "CloudLinux", - "cloudlinux": "CloudLinux", -+ "almalinux": "AlmaLinux", - "pidora": "Fedora", - "scientific": "ScientificLinux", - "synology": "Synology", -@@ -1546,6 +1547,7 @@ _OS_NAME_MAP = { - "slesexpand": "RES", - "linuxmint": "Mint", - "neon": "KDE neon", -+ "alibaba": "Alibaba Cloud (Aliyun)", - } - - # Map the 'os' grain to the 'os_family' grain -@@ -1563,6 +1565,7 @@ _OS_FAMILY_MAP = { - "Scientific": "RedHat", - "Amazon": "RedHat", - "CloudLinux": "RedHat", -+ "AlmaLinux": "RedHat", - "OVS": "RedHat", - "OEL": "RedHat", - "XCP": "RedHat", -@@ -1619,6 +1622,7 @@ _OS_FAMILY_MAP = { - "AIX": "AIX", - "TurnKey": "Debian", - "AstraLinuxCE": "Debian", -+ "Alibaba Cloud (Aliyun)": "RedHat", - } - - # Matches any possible format: -diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 15de4e363e..6aa05abe40 100644 ---- a/tests/unit/grains/test_core.py -+++ b/tests/unit/grains/test_core.py -@@ -678,6 +678,35 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - } - self._run_os_grains_tests(None, _os_release_map, expectation) - -+ @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") -+ def test_almalinux_8_os_grains(self): -+ """ -+ Test if OS grains are parsed correctly in AlmaLinux 8 -+ """ -+ _os_release_map = { -+ "os_release_file": { -+ "NAME": "AlmaLinux", -+ "VERSION_ID": "8.3", -+ "PRETTY_NAME": "AlmaLinux 8", -+ "ID": "almalinux", -+ "ANSI_COLOR": "0;31", -+ "CPE_NAME": "cpe:/o:almalinux:almalinux:8.3", -+ }, -+ "_linux_distribution": ("almaLinux", "8.3", ""), -+ } -+ -+ expectation = { -+ "os": "AlmaLinux", -+ "os_family": "RedHat", -+ "oscodename": "AlmaLinux 8", -+ "osfullname": "AlmaLinux", -+ "osrelease": "8.3", -+ "osrelease_info": (8, 3,), -+ "osmajorrelease": 8, -+ "osfinger": "AlmaLinux-8", -+ } -+ self._run_os_grains_tests(None, _os_release_map, expectation) -+ - def test_unicode_error(self): - raise_unicode_mock = MagicMock( - name="raise_unicode_error", side_effect=UnicodeError -@@ -733,7 +762,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - Test if OS grains are parsed correctly in Astra Linux CE 2.12.22 "orel" - """ - _os_release_map = { -- "linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"), -+ "_linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"), - } - expectation = { - "os": "AstraLinuxCE", -@@ -747,6 +776,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - } - self._run_os_grains_tests("astralinuxce-2.12.22", _os_release_map, expectation) - -+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') -+ def test_aliyunlinux2_os_grains(self): -+ ''' -+ Test if OS grains are parsed correctly in Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS -+ ''' -+ _os_release_map = { -+ '_linux_distribution': ('Alibaba Cloud Linux (Aliyun Linux)', '2.1903', 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)'), -+ } -+ expectation = { -+ 'os': 'Alibaba Cloud (Aliyun)', -+ 'os_family': 'RedHat', -+ 'oscodename': 'Alibaba Cloud Linux (Aliyun Linux) 2.1903 LTS (Hunting Beagle)', -+ 'osfullname': 'Alibaba Cloud Linux (Aliyun Linux)', -+ 'osrelease': '2.1903', -+ 'osrelease_info': (2, 1903), -+ 'osmajorrelease': 2, -+ 'osfinger': 'Alibaba Cloud Linux (Aliyun Linux)-2', -+ } -+ self._run_os_grains_tests(None, _os_release_map, expectation) -+ - @skipIf(not salt.utils.platform.is_windows(), "System is not Windows") - def test_windows_platform_data(self): - """ --- -2.30.2 - - diff --git a/add-astra-linux-common-edition-to-the-os-family-list.patch b/add-astra-linux-common-edition-to-the-os-family-list.patch index 1a98229..cf2be72 100644 --- a/add-astra-linux-common-edition-to-the-os-family-list.patch +++ b/add-astra-linux-common-edition-to-the-os-family-list.patch @@ -1,4 +1,4 @@ -From d5569023c64a3fcec57a7aa6823ee94e8be91b3d Mon Sep 17 00:00:00 2001 +From 30366101c20eefd2411482138edfa0ca0c8a3b06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julio=20Gonz=C3=A1lez=20Gil?= Date: Wed, 12 Feb 2020 10:05:45 +0100 @@ -11,24 +11,24 @@ Subject: [PATCH] Add Astra Linux Common Edition to the OS Family list 2 files changed, 21 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py -index 5dff6ecfd4..5634327623 100644 +index e007f40c92..19937f008e 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -1618,6 +1618,7 @@ _OS_FAMILY_MAP = { - "Funtoo": "Gentoo", +@@ -1636,6 +1636,7 @@ _OS_FAMILY_MAP = { "AIX": "AIX", "TurnKey": "Debian", + "Pop": "Debian", + "AstraLinuxCE": "Debian", } # Matches any possible format: diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 85d434dd9d..196dbcf83d 100644 +index 7173f04979..e8845e2dfa 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -728,6 +728,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): +@@ -826,6 +826,26 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): } - self._run_os_grains_tests("ubuntu-17.10", _os_release_map, expectation) + self._run_os_grains_tests("pop-20.10", _os_release_map, expectation) + @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") + def test_astralinuxce_2_os_grains(self): @@ -36,7 +36,7 @@ index 85d434dd9d..196dbcf83d 100644 + Test if OS grains are parsed correctly in Astra Linux CE 2.12.22 "orel" + """ + _os_release_map = { -+ "linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"), ++ "_linux_distribution": ("AstraLinuxCE", "2.12.22", "orel"), + } + expectation = { + "os": "AstraLinuxCE", @@ -54,6 +54,6 @@ index 85d434dd9d..196dbcf83d 100644 def test_windows_platform_data(self): """ -- -2.29.2 +2.33.0 diff --git a/add-batch_presence_ping_timeout-and-batch_presence_p.patch b/add-batch_presence_ping_timeout-and-batch_presence_p.patch deleted file mode 100644 index 296a963..0000000 --- a/add-batch_presence_ping_timeout-and-batch_presence_p.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 66f6c2540a151487b26c89a2bb66199d6c65c18d Mon Sep 17 00:00:00 2001 -From: Marcelo Chiaradia -Date: Thu, 4 Apr 2019 13:57:38 +0200 -Subject: [PATCH] Add 'batch_presence_ping_timeout' and - 'batch_presence_ping_gather_job_timeout' parameters for synchronous batching - ---- - salt/cli/batch.py | 16 ++++++++++++++-- - 1 file changed, 14 insertions(+), 2 deletions(-) - -diff --git a/salt/cli/batch.py b/salt/cli/batch.py -index 527cffdeb7..2bc5444aef 100644 ---- a/salt/cli/batch.py -+++ b/salt/cli/batch.py -@@ -77,6 +77,13 @@ def batch_get_opts( - if key not in opts: - opts[key] = val - -+ opts["batch_presence_ping_timeout"] = kwargs.get( -+ "batch_presence_ping_timeout", opts["timeout"] -+ ) -+ opts["batch_presence_ping_gather_job_timeout"] = kwargs.get( -+ "batch_presence_ping_gather_job_timeout", opts["gather_job_timeout"] -+ ) -+ - return opts - - -@@ -115,7 +122,7 @@ class Batch: - self.opts["tgt"], - "test.ping", - [], -- self.opts["timeout"], -+ self.opts.get("batch_presence_ping_timeout", self.opts["timeout"]), - ] - - selected_target_option = self.opts.get("selected_target_option", None) -@@ -126,7 +133,12 @@ class Batch: - - self.pub_kwargs["yield_pub_data"] = True - ping_gen = self.local.cmd_iter( -- *args, gather_job_timeout=self.opts["gather_job_timeout"], **self.pub_kwargs -+ *args, -+ gather_job_timeout=self.opts.get( -+ "batch_presence_ping_gather_job_timeout", -+ self.opts["gather_job_timeout"], -+ ), -+ **self.pub_kwargs - ) - - # Broadcast to targets --- -2.29.2 - - diff --git a/add-cpe_name-for-osversion-grain-parsing-u-49946.patch b/add-cpe_name-for-osversion-grain-parsing-u-49946.patch deleted file mode 100644 index 64228b2..0000000 --- a/add-cpe_name-for-osversion-grain-parsing-u-49946.patch +++ /dev/null @@ -1,73 +0,0 @@ -From c845d56fdf1762586b1f210b1eb49193893d4312 Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Tue, 9 Oct 2018 14:08:50 +0200 -Subject: [PATCH] Add CPE_NAME for osversion* grain parsing (U#49946) - -Remove unnecessary linebreak - -Override VERSION_ID from os-release, if CPE_NAME is given - -Add unit test for WFN format of CPE_NAME - -Add unit test for v2.3 of CPE format - -Add unit test for broken CPE_NAME - -Prevent possible crash if CPE_NAME is wrongly written in the distro - -Add part parsing - -Keep CPE_NAME only for opensuse series - -Remove linebreak - -Expand unit test to verify part name - -Fix proper part name in the string-bound CPE ---- - salt/grains/core.py | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - -diff --git a/salt/grains/core.py b/salt/grains/core.py -index 5535584d1b..bc3cf129cd 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -1732,6 +1732,34 @@ def _parse_cpe_name(cpe): - return ret - - -+def _parse_cpe_name(cpe): -+ ''' -+ Parse CPE_NAME data from the os-release -+ -+ Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe -+ -+ :param cpe: -+ :return: -+ ''' -+ part = { -+ 'o': 'operating system', -+ 'h': 'hardware', -+ 'a': 'application', -+ } -+ ret = {} -+ cpe = (cpe or '').split(':') -+ if len(cpe) > 4 and cpe[0] == 'cpe': -+ if cpe[1].startswith('/'): # WFN to URI -+ ret['vendor'], ret['product'], ret['version'] = cpe[2:5] -+ ret['phase'] = cpe[5] if len(cpe) > 5 else None -+ ret['part'] = part.get(cpe[1][1:]) -+ elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string -+ ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]] -+ ret['part'] = part.get(cpe[2]) -+ -+ return ret -+ -+ - def os_data(): - """ - Return grains pertaining to the operating system --- -2.29.2 - - diff --git a/add-custom-suse-capabilities-as-grains.patch b/add-custom-suse-capabilities-as-grains.patch index 395a9b5..8e3a2f0 100644 --- a/add-custom-suse-capabilities-as-grains.patch +++ b/add-custom-suse-capabilities-as-grains.patch @@ -1,18 +1,19 @@ -From 713ccfdc5c6733495d3ce7f26a8cfeddb8e9e9c4 Mon Sep 17 00:00:00 2001 +From bdb48ed82c755407bc413fa445e057a6da5f1e87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 21 Jun 2018 11:57:57 +0100 Subject: [PATCH] Add custom SUSE capabilities as Grains +Add new custom SUSE capability for saltutil state module --- - salt/grains/extra.py | 7 +++++++ - 1 file changed, 7 insertions(+) + salt/grains/extra.py | 8 ++++++++ + 1 file changed, 8 insertions(+) diff --git a/salt/grains/extra.py b/salt/grains/extra.py -index 2fdbe6526a..ddc22293ea 100644 +index 2fdbe6526a..0eec27e628 100644 --- a/salt/grains/extra.py +++ b/salt/grains/extra.py -@@ -66,3 +66,10 @@ def config(): +@@ -66,3 +66,11 @@ def config(): log.warning("Bad syntax in grains file! Skipping.") return {} return {} @@ -21,9 +22,10 @@ index 2fdbe6526a..ddc22293ea 100644 +def suse_backported_capabilities(): + return { + '__suse_reserved_pkg_all_versions_support': True, -+ '__suse_reserved_pkg_patches_support': True ++ '__suse_reserved_pkg_patches_support': True, ++ '__suse_reserved_saltutil_states_support': True + } -- -2.29.2 +2.33.0 diff --git a/add-docker-logout-237.patch b/add-docker-logout-237.patch deleted file mode 100644 index 7f878dc..0000000 --- a/add-docker-logout-237.patch +++ /dev/null @@ -1,179 +0,0 @@ -From 355e1e29e8f3286eeb13bc2d05089c096c9e01e3 Mon Sep 17 00:00:00 2001 -From: Alexander Graul -Date: Mon, 18 May 2020 16:39:27 +0200 -Subject: [PATCH] Add docker logout (#237) - -Docker logout works analog to login. It takes none, one or more registries as -arguments. If there are no arguments, all known (specified in pillar) -docker registries are logged out of. If arguments are present, they are -interpreted as a list of docker registries to log out of. ---- - salt/modules/dockermod.py | 80 ++++++++++++++++++++++++++++ - tests/unit/modules/test_dockermod.py | 59 ++++++++++++++++++++ - 2 files changed, 139 insertions(+) - -diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py -index 934038c927..176b4db926 100644 ---- a/salt/modules/dockermod.py -+++ b/salt/modules/dockermod.py -@@ -1586,6 +1586,86 @@ def logout(*registries): - return ret - - -+def logout(*registries): -+ """ -+ .. versionadded:: 3001 -+ -+ Performs a ``docker logout`` to remove the saved authentication details for -+ one or more configured repositories. -+ -+ Multiple registry URLs (matching those configured in Pillar) can be passed, -+ and Salt will attempt to logout of *just* those registries. If no registry -+ URLs are provided, Salt will attempt to logout of *all* configured -+ registries. -+ -+ **RETURN DATA** -+ -+ A dictionary containing the following keys: -+ -+ - ``Results`` - A dictionary mapping registry URLs to the authentication -+ result. ``True`` means a successful logout, ``False`` means a failed -+ logout. -+ - ``Errors`` - A list of errors encountered during the course of this -+ function. -+ -+ CLI Example: -+ -+ .. code-block:: bash -+ -+ salt myminion docker.logout -+ salt myminion docker.logout hub -+ salt myminion docker.logout hub https://mydomain.tld/registry/ -+ """ -+ # NOTE: This function uses the "docker logout" CLI command to remove -+ # authentication information from config.json. docker-py does not support -+ # this usecase (see https://github.com/docker/docker-py/issues/1091) -+ -+ # To logout of all known (to Salt) docker registries, they have to be collected first -+ registry_auth = __salt__["config.get"]("docker-registries", {}) -+ ret = {"retcode": 0} -+ errors = ret.setdefault("Errors", []) -+ if not isinstance(registry_auth, dict): -+ errors.append("'docker-registries' Pillar value must be a dictionary") -+ registry_auth = {} -+ for reg_name, reg_conf in six.iteritems( -+ __salt__["config.option"]("*-docker-registries", wildcard=True) -+ ): -+ try: -+ registry_auth.update(reg_conf) -+ except TypeError: -+ errors.append( -+ "Docker registry '{0}' was not specified as a " -+ "dictionary".format(reg_name) -+ ) -+ -+ # If no registries passed, we will logout of all known registries -+ if not registries: -+ registries = list(registry_auth) -+ -+ results = ret.setdefault("Results", {}) -+ for registry in registries: -+ if registry not in registry_auth: -+ errors.append("No match found for registry '{0}'".format(registry)) -+ continue -+ else: -+ cmd = ["docker", "logout"] -+ if registry.lower() != "hub": -+ cmd.append(registry) -+ log.debug("Attempting to logout of docker registry '%s'", registry) -+ logout_cmd = __salt__["cmd.run_all"]( -+ cmd, python_shell=False, output_loglevel="quiet", -+ ) -+ results[registry] = logout_cmd["retcode"] == 0 -+ if not results[registry]: -+ if logout_cmd["stderr"]: -+ errors.append(logout_cmd["stderr"]) -+ elif logout_cmd["stdout"]: -+ errors.append(logout_cmd["stdout"]) -+ if errors: -+ ret["retcode"] = 1 -+ return ret -+ -+ - # Functions for information gathering - def depends(name): - """ -diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py -index 34e2e9c610..48526acb71 100644 ---- a/tests/unit/modules/test_dockermod.py -+++ b/tests/unit/modules/test_dockermod.py -@@ -199,6 +199,65 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): - output_loglevel="quiet", - ) - -+ def test_logout_calls_docker_cli_logout_all(self): -+ client = Mock() -+ get_client_mock = MagicMock(return_value=client) -+ ref_out = {"stdout": "", "stderr": "", "retcode": 0} -+ registry_auth_data = { -+ "portus.example.com:5000": { -+ "username": "admin", -+ "password": "linux12345", -+ "email": "tux@example.com", -+ }, -+ "portus2.example.com:5000": { -+ "username": "admin", -+ "password": "linux12345", -+ "email": "tux@example.com", -+ }, -+ } -+ -+ docker_mock = MagicMock(return_value=ref_out) -+ with patch.object(docker_mod, "_get_client", get_client_mock): -+ dunder_salt = { -+ "config.get": MagicMock(return_value=registry_auth_data), -+ "cmd.run_all": docker_mock, -+ "config.option": MagicMock(return_value={}), -+ } -+ with patch.dict(docker_mod.__salt__, dunder_salt): -+ ret = docker_mod.logout() -+ assert "retcode" in ret -+ assert ret["retcode"] == 0 -+ assert docker_mock.call_count == 2 -+ -+ def test_logout_calls_docker_cli_logout_single(self): -+ client = Mock() -+ get_client_mock = MagicMock(return_value=client) -+ ref_out = {"stdout": "", "stderr": "", "retcode": 0} -+ registry_auth_data = { -+ "portus.example.com:5000": { -+ "username": "admin", -+ "password": "linux12345", -+ "email": "tux@example.com", -+ } -+ } -+ docker_mock = MagicMock(return_value=ref_out) -+ with patch.object(docker_mod, "_get_client", get_client_mock): -+ dunder_salt = { -+ "config.get": MagicMock(return_value=registry_auth_data), -+ "cmd.run_all": docker_mock, -+ "config.option": MagicMock(return_value={}), -+ } -+ with patch.dict(docker_mod.__salt__, dunder_salt): -+ ret = docker_mod.logout("portus.example.com:5000") -+ assert "retcode" in ret -+ assert ret["retcode"] == 0 -+ docker_mock.assert_called_with( -+ ["docker", "logout", "portus.example.com:5000"], -+ python_shell=False, -+ output_loglevel="quiet", -+ ) -+ -+ - def test_logout_calls_docker_cli_logout_all(self): - client = Mock() - get_client_mock = MagicMock(return_value=client) --- -2.29.2 - - diff --git a/add-hold-unhold-functions.patch b/add-hold-unhold-functions.patch deleted file mode 100644 index da32063..0000000 --- a/add-hold-unhold-functions.patch +++ /dev/null @@ -1,469 +0,0 @@ -From 6176ef8aa39626dcb450a1665231a796e9544342 Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Thu, 6 Dec 2018 16:26:23 +0100 -Subject: [PATCH] Add hold/unhold functions - -Add unhold function - -Add warnings ---- - salt/modules/zypperpkg.py | 186 +++++++++++++++++++++++++++----------- - 1 file changed, 131 insertions(+), 55 deletions(-) - -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 44bcbbf2f2..6fa6e3e0a1 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Package support for openSUSE via the zypper package manager - -@@ -12,8 +11,6 @@ Package support for openSUSE via the zypper package manager - - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import datetime - import fnmatch -@@ -24,7 +21,6 @@ import time - from xml.dom import minidom as dom - from xml.parsers.expat import ExpatError - --# Import salt libs - import salt.utils.data - import salt.utils.environment - import salt.utils.event -@@ -35,9 +31,9 @@ import salt.utils.pkg - import salt.utils.pkg.rpm - import salt.utils.stringutils - import salt.utils.systemd -+import salt.utils.versions - from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError - --# Import 3rd-party libs - # pylint: disable=import-error,redefined-builtin,no-name-in-module - from salt.ext import six - from salt.ext.six.moves import configparser -@@ -51,8 +47,8 @@ log = logging.getLogger(__name__) - - HAS_ZYPP = False - ZYPP_HOME = "/etc/zypp" --LOCKS = "{0}/locks".format(ZYPP_HOME) --REPOS = "{0}/repos.d".format(ZYPP_HOME) -+LOCKS = "{}/locks".format(ZYPP_HOME) -+REPOS = "{}/repos.d".format(ZYPP_HOME) - DEFAULT_PRIORITY = 99 - PKG_ARCH_SEPARATOR = "." - -@@ -75,7 +71,7 @@ def __virtual__(): - return __virtualname__ - - --class _Zypper(object): -+class _Zypper: - """ - Zypper parallel caller. - Validates the result and either raises an exception or reports an error. -@@ -339,7 +335,7 @@ class _Zypper(object): - attrs=["pid", "name", "cmdline", "create_time"], - ) - data["cmdline"] = " ".join(data["cmdline"]) -- data["info"] = "Blocking process created at {0}.".format( -+ data["info"] = "Blocking process created at {}.".format( - datetime.datetime.utcfromtimestamp( - data["create_time"] - ).isoformat() -@@ -347,7 +343,7 @@ class _Zypper(object): - data["success"] = True - except Exception as err: # pylint: disable=broad-except - data = { -- "info": "Unable to retrieve information about blocking process: {0}".format( -+ "info": "Unable to retrieve information about blocking process: {}".format( - err.message - ), - "success": False, -@@ -382,7 +378,7 @@ class _Zypper(object): - ) - if self.error_msg and not self.__no_raise and not self.__ignore_repo_failure: - raise CommandExecutionError( -- "Zypper command failure: {0}".format(self.error_msg) -+ "Zypper command failure: {}".format(self.error_msg) - ) - - return ( -@@ -397,7 +393,7 @@ class _Zypper(object): - __zypper__ = _Zypper() - - --class Wildcard(object): -+class Wildcard: - """ - .. versionadded:: 2017.7.0 - -@@ -439,7 +435,7 @@ class Wildcard(object): - for vrs in self._get_scope_versions(self._get_available_versions()) - ] - ) -- return versions and "{0}{1}".format(self._op or "", versions[-1]) or None -+ return versions and "{}{}".format(self._op or "", versions[-1]) or None - - def _get_available_versions(self): - """ -@@ -451,17 +447,15 @@ class Wildcard(object): - ).getElementsByTagName("solvable") - if not solvables: - raise CommandExecutionError( -- "No packages found matching '{0}'".format(self.name) -+ "No packages found matching '{}'".format(self.name) - ) - - return sorted( -- set( -- [ -- slv.getAttribute(self._attr_solvable_version) -- for slv in solvables -- if slv.getAttribute(self._attr_solvable_version) -- ] -- ) -+ { -+ slv.getAttribute(self._attr_solvable_version) -+ for slv in solvables -+ if slv.getAttribute(self._attr_solvable_version) -+ } - ) - - def _get_scope_versions(self, pkg_versions): -@@ -489,7 +483,7 @@ class Wildcard(object): - self._op = version.replace(exact_version, "") or None - if self._op and self._op not in self.Z_OP: - raise CommandExecutionError( -- 'Zypper do not supports operator "{0}".'.format(self._op) -+ 'Zypper do not supports operator "{}".'.format(self._op) - ) - self.version = exact_version - -@@ -539,14 +533,11 @@ def list_upgrades(refresh=True, root=None, **kwargs): - cmd = ["list-updates"] - if "fromrepo" in kwargs: - repos = kwargs["fromrepo"] -- if isinstance(repos, six.string_types): -+ if isinstance(repos, str): - repos = [repos] - for repo in repos: - cmd.extend( -- [ -- "--repo", -- repo if isinstance(repo, six.string_types) else six.text_type(repo), -- ] -+ ["--repo", repo if isinstance(repo, str) else str(repo),] - ) - log.debug("Targeting repos: %s", repos) - for update_node in ( -@@ -610,7 +601,7 @@ def info_installed(*names, **kwargs): - for _nfo in pkg_nfo: - t_nfo = dict() - # Translate dpkg-specific keys to a common structure -- for key, value in six.iteritems(_nfo): -+ for key, value in _nfo.items(): - if key == "source_rpm": - t_nfo["source"] = value - else: -@@ -1033,9 +1024,7 @@ def list_repo_pkgs(*args, **kwargs): - fromrepo = kwargs.pop("fromrepo", "") or "" - ret = {} - -- targets = [ -- arg if isinstance(arg, six.string_types) else six.text_type(arg) for arg in args -- ] -+ targets = [arg if isinstance(arg, str) else str(arg) for arg in args] - - def _is_match(pkgname): - """ -@@ -1124,7 +1113,7 @@ def _get_repo_info(alias, repos_cfg=None, root=None): - try: - meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias)) - meta["alias"] = alias -- for key, val in six.iteritems(meta): -+ for key, val in meta.items(): - if val in ["0", "1"]: - meta[key] = int(meta[key]) == 1 - elif val == "NONE": -@@ -1197,7 +1186,7 @@ def del_repo(repo, root=None): - "message": msg[0].childNodes[0].nodeValue, - } - -- raise CommandExecutionError("Repository '{0}' not found.".format(repo)) -+ raise CommandExecutionError("Repository '{}' not found.".format(repo)) - - - def mod_repo(repo, **kwargs): -@@ -1252,13 +1241,13 @@ def mod_repo(repo, **kwargs): - url = kwargs.get("url", kwargs.get("mirrorlist", kwargs.get("baseurl"))) - if not url: - raise CommandExecutionError( -- "Repository '{0}' not found, and neither 'baseurl' nor " -+ "Repository '{}' not found, and neither 'baseurl' nor " - "'mirrorlist' was specified".format(repo) - ) - - if not _urlparse(url).scheme: - raise CommandExecutionError( -- "Repository '{0}' not found and URL for baseurl/mirrorlist " -+ "Repository '{}' not found and URL for baseurl/mirrorlist " - "is malformed".format(repo) - ) - -@@ -1281,7 +1270,7 @@ def mod_repo(repo, **kwargs): - - if new_url == base_url: - raise CommandExecutionError( -- "Repository '{0}' already exists as '{1}'.".format(repo, alias) -+ "Repository '{}' already exists as '{}'.".format(repo, alias) - ) - - # Add new repo -@@ -1291,7 +1280,7 @@ def mod_repo(repo, **kwargs): - repos_cfg = _get_configured_repos(root=root) - if repo not in repos_cfg.sections(): - raise CommandExecutionError( -- "Failed add new repository '{0}' for unspecified reason. " -+ "Failed add new repository '{}' for unspecified reason. " - "Please check zypper logs.".format(repo) - ) - added = True -@@ -1327,12 +1316,10 @@ def mod_repo(repo, **kwargs): - cmd_opt.append(kwargs["gpgcheck"] and "--gpgcheck" or "--no-gpgcheck") - - if "priority" in kwargs: -- cmd_opt.append( -- "--priority={0}".format(kwargs.get("priority", DEFAULT_PRIORITY)) -- ) -+ cmd_opt.append("--priority={}".format(kwargs.get("priority", DEFAULT_PRIORITY))) - - if "humanname" in kwargs: -- cmd_opt.append("--name='{0}'".format(kwargs.get("humanname"))) -+ cmd_opt.append("--name='{}'".format(kwargs.get("humanname"))) - - if kwargs.get("gpgautoimport") is True: - global_cmd_opt.append("--gpg-auto-import-keys") -@@ -1589,7 +1576,7 @@ def install( - - if pkg_type == "repository": - targets = [] -- for param, version_num in six.iteritems(pkg_params): -+ for param, version_num in pkg_params.items(): - if version_num is None: - log.debug("targeting package: %s", param) - targets.append(param) -@@ -1597,7 +1584,7 @@ def install( - prefix, verstr = salt.utils.pkg.split_comparison(version_num) - if not prefix: - prefix = "=" -- target = "{0}{1}{2}".format(param, prefix, verstr) -+ target = "{}{}{}".format(param, prefix, verstr) - log.debug("targeting package: %s", target) - targets.append(target) - elif pkg_type == "advisory": -@@ -1606,7 +1593,7 @@ def install( - for advisory_id in pkg_params: - if advisory_id not in cur_patches: - raise CommandExecutionError( -- 'Advisory id "{0}" not found'.format(advisory_id) -+ 'Advisory id "{}" not found'.format(advisory_id) - ) - else: - # If we add here the `patch:` prefix, the -@@ -1703,7 +1690,7 @@ def install( - - if errors: - raise CommandExecutionError( -- "Problem encountered {0} package(s)".format( -+ "Problem encountered {} package(s)".format( - "downloading" if downloadonly else "installing" - ), - info={"errors": errors, "changes": ret}, -@@ -1797,7 +1784,7 @@ def upgrade( - cmd_update.append("--dry-run") - - if fromrepo: -- if isinstance(fromrepo, six.string_types): -+ if isinstance(fromrepo, str): - fromrepo = [fromrepo] - for repo in fromrepo: - cmd_update.extend(["--from" if dist_upgrade else "--repo", repo]) -@@ -2052,7 +2039,7 @@ def list_locks(root=None): - ) - if lock.get("solvable_name"): - locks[lock.pop("solvable_name")] = lock -- except IOError: -+ except OSError: - pass - except Exception: # pylint: disable=broad-except - log.warning("Detected a problem when accessing {}".format(_locks)) -@@ -2089,7 +2076,7 @@ def clean_locks(root=None): - return out - - --def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument -+def unhold(name=None, pkgs=None, **kwargs): - """ - Remove specified package lock. - -@@ -2104,8 +2091,50 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume - salt '*' pkg.remove_lock ,, - salt '*' pkg.remove_lock pkgs='["foo", "bar"]' - """ -+ ret = {} -+ if (not name and not pkgs) or (name and pkgs): -+ raise CommandExecutionError("Name or packages must be specified.") -+ elif name: -+ pkgs = [name] -+ -+ locks = list_locks() -+ try: -+ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys()) -+ except MinionError as exc: -+ raise CommandExecutionError(exc) -+ -+ removed = [] -+ missing = [] -+ for pkg in pkgs: -+ if locks.get(pkg): -+ removed.append(pkg) -+ ret[pkg]["comment"] = "Package {} is no longer held.".format(pkg) -+ else: -+ missing.append(pkg) -+ ret[pkg]["comment"] = "Package {} unable to be unheld.".format(pkg) -+ -+ if removed: -+ __zypper__.call("rl", *removed) -+ -+ return ret -+ -+ -+def remove_lock(packages, **kwargs): # pylint: disable=unused-argument -+ """ -+ Remove specified package lock. -+ -+ CLI Example: -+ -+ .. code-block:: bash - -- locks = list_locks(root) -+ salt '*' pkg.remove_lock -+ salt '*' pkg.remove_lock ,, -+ salt '*' pkg.remove_lock pkgs='["foo", "bar"]' -+ """ -+ salt.utils.versions.warn_until( -+ "Sodium", "This function is deprecated. Please use unhold() instead." -+ ) -+ locks = list_locks() - try: - packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys()) - except MinionError as exc: -@@ -2125,7 +2154,51 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume - return {"removed": len(removed), "not_found": missing} - - --def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument -+def hold(name=None, pkgs=None, **kwargs): -+ """ -+ Add a package lock. Specify packages to lock by exact name. -+ -+ CLI Example: -+ -+ .. code-block:: bash -+ -+ salt '*' pkg.add_lock -+ salt '*' pkg.add_lock ,, -+ salt '*' pkg.add_lock pkgs='["foo", "bar"]' -+ -+ :param name: -+ :param pkgs: -+ :param kwargs: -+ :return: -+ """ -+ ret = {} -+ if (not name and not pkgs) or (name and pkgs): -+ raise CommandExecutionError("Name or packages must be specified.") -+ elif name: -+ pkgs = [name] -+ -+ locks = list_locks() -+ added = [] -+ try: -+ pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys()) -+ except MinionError as exc: -+ raise CommandExecutionError(exc) -+ -+ for pkg in pkgs: -+ ret[pkg] = {"name": pkg, "changes": {}, "result": False, "comment": ""} -+ if not locks.get(pkg): -+ added.append(pkg) -+ ret[pkg]["comment"] = "Package {} is now being held.".format(pkg) -+ else: -+ ret[pkg]["comment"] = "Package {} is already set to be held.".format(pkg) -+ -+ if added: -+ __zypper__.call("al", *added) -+ -+ return ret -+ -+ -+def add_lock(packages, **kwargs): # pylint: disable=unused-argument - """ - Add a package lock. Specify packages to lock by exact name. - -@@ -2140,7 +2213,10 @@ def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument - salt '*' pkg.add_lock ,, - salt '*' pkg.add_lock pkgs='["foo", "bar"]' - """ -- locks = list_locks(root) -+ salt.utils.versions.warn_until( -+ "Sodium", "This function is deprecated. Please use hold() instead." -+ ) -+ locks = list_locks() - added = [] - try: - packages = list(__salt__["pkg_resource.parse_targets"](packages)[0].keys()) -@@ -2495,7 +2571,7 @@ def search(criteria, refresh=False, **kwargs): - .getElementsByTagName("solvable") - ) - if not solvables: -- raise CommandExecutionError("No packages found matching '{0}'".format(criteria)) -+ raise CommandExecutionError("No packages found matching '{}'".format(criteria)) - - out = {} - for solvable in solvables: -@@ -2649,13 +2725,13 @@ def download(*packages, **kwargs): - if failed: - pkg_ret[ - "_error" -- ] = "The following package(s) failed to download: {0}".format( -+ ] = "The following package(s) failed to download: {}".format( - ", ".join(failed) - ) - return pkg_ret - - raise CommandExecutionError( -- "Unable to download packages: {0}".format(", ".join(packages)) -+ "Unable to download packages: {}".format(", ".join(packages)) - ) - - -@@ -2726,7 +2802,7 @@ def diff(*paths, **kwargs): - - if pkg_to_paths: - local_pkgs = __salt__["pkg.download"](*pkg_to_paths.keys(), **kwargs) -- for pkg, files in six.iteritems(pkg_to_paths): -+ for pkg, files in pkg_to_paths.items(): - for path in files: - ret[path] = ( - __salt__["lowpkg.diff"](local_pkgs[pkg]["path"], path) --- -2.29.2 - - diff --git a/add-migrated-state-and-gpg-key-management-functions-.patch b/add-migrated-state-and-gpg-key-management-functions-.patch index 5a81fa5..11e6c63 100644 --- a/add-migrated-state-and-gpg-key-management-functions-.patch +++ b/add-migrated-state-and-gpg-key-management-functions-.patch @@ -1,4 +1,4 @@ -From 57cab2d4e282f8b1d17610e6b4a0e772494bfcb1 Mon Sep 17 00:00:00 2001 +From acd8fbfd7b2c1fdf84b0250e245418d8c6e387ec Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 20 Oct 2020 11:43:09 +0200 Subject: [PATCH] Add "migrated" state and GPG key management functions @@ -17,19 +17,24 @@ for key management. This patch add a similar API for zypperpkg and yumpkg, also part of the same virtual package, based on the counterpart from rpm_lowpkg API. + +Convert test to pytests --- - changelog/58782.added | 1 + - salt/modules/aptpkg.py | 7 +- - salt/modules/rpm_lowpkg.py | 151 +++++++++ - salt/modules/yumpkg.py | 88 +++++ - salt/modules/zypperpkg.py | 88 +++++ - salt/states/pkgrepo.py | 207 ++++++++++++ - tests/unit/modules/test_rpm_lowpkg.py | 236 ++++++++++++- - tests/unit/modules/test_yumpkg.py | 41 ++- - tests/unit/modules/test_zypperpkg.py | 40 ++- - tests/unit/states/test_pkgrepo.py | 468 +++++++++++++++++++++++++- - 10 files changed, 1301 insertions(+), 26 deletions(-) + changelog/58782.added | 1 + + salt/modules/aptpkg.py | 7 +- + salt/modules/rpm_lowpkg.py | 151 ++ + salt/modules/yumpkg.py | 89 +- + salt/modules/zypperpkg.py | 88 + + salt/states/pkgrepo.py | 207 +++ + tests/pytests/unit/modules/test_yumpkg.py | 87 +- + tests/pytests/unit/modules/test_zypperpkg.py | 66 +- + tests/pytests/unit/states/test_pkgrepo.py | 448 +++++ + tests/unit/modules/test_rpm_lowpkg.py | 232 ++- + tests/unit/modules/test_yumpkg.py | 1754 ------------------ + tests/unit/modules/test_zypperpkg.py | 44 +- + 12 files changed, 1399 insertions(+), 1775 deletions(-) create mode 100644 changelog/58782.added + delete mode 100644 tests/unit/modules/test_yumpkg.py diff --git a/changelog/58782.added b/changelog/58782.added new file mode 100644 @@ -40,10 +45,10 @@ index 0000000000..f9e69f64f2 +Add GPG key functions in "lowpkg" and a "migrated" function in the "pkgrepo" state for repository and GPG key migration. \ No newline at end of file diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index e4a9872aad..e001d2f11c 100644 +index 692d99f97e..1d9557b497 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -1908,7 +1908,7 @@ def _convert_if_int(value): +@@ -1918,7 +1918,7 @@ def _convert_if_int(value): return value @@ -52,7 +57,7 @@ index e4a9872aad..e001d2f11c 100644 """ .. versionadded:: 2017.7.0 -@@ -1990,7 +1990,9 @@ def get_repo_keys(): +@@ -2000,7 +2000,9 @@ def get_repo_keys(): return ret @@ -63,7 +68,7 @@ index e4a9872aad..e001d2f11c 100644 """ .. versionadded:: 2017.7.0 -@@ -2016,7 +2018,6 @@ def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base +@@ -2026,7 +2028,6 @@ def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base salt '*' pkg.add_repo_key keyserver='keyserver.example' keyid='0000AAAA' """ cmd = ["apt-key"] @@ -72,10 +77,10 @@ index e4a9872aad..e001d2f11c 100644 current_repo_keys = get_repo_keys() diff --git a/salt/modules/rpm_lowpkg.py b/salt/modules/rpm_lowpkg.py -index 393b0f453a..57f336bacf 100644 +index f610ca412c..370bd5b728 100644 --- a/salt/modules/rpm_lowpkg.py +++ b/salt/modules/rpm_lowpkg.py -@@ -835,3 +835,154 @@ def checksum(*paths, **kwargs): +@@ -828,3 +828,154 @@ def checksum(*paths, **kwargs): ) return ret @@ -231,13 +236,21 @@ index 393b0f453a..57f336bacf 100644 + cmd.extend(["-e", key]) + return __salt__["cmd.retcode"](cmd) == 0 diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py -index c58b3e4c70..dd843f985b 100644 +index 77cd14aaf2..fd79109e40 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py -@@ -3346,3 +3346,91 @@ def list_installed_patches(**kwargs): - salt '*' pkg.list_installed_patches - """ - return _get_patches(installed_only=True) +@@ -3391,7 +3391,6 @@ def services_need_restart(**kwargs): + + Requires systemd. + +- + CLI Examples: + + .. code-block:: bash +@@ -3417,3 +3416,91 @@ def services_need_restart(**kwargs): + services.add(service) + + return list(services) + + +def get_repo_keys(info=False, root=None, **kwargs): @@ -327,13 +340,13 @@ index c58b3e4c70..dd843f985b 100644 + """ + return __salt__["lowpkg.remove_gpg_key"](keyid, root) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index d06c265202..5e13c68708 100644 +index 7216e25b86..863be3c894 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -3004,3 +3004,91 @@ def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs): - else: - ret.append(name) - return ret +@@ -3125,3 +3125,91 @@ def services_need_restart(root=None, **kwargs): + services = zypper_output.split() + + return services + + +def get_repo_keys(info=False, root=None, **kwargs): @@ -423,10 +436,10 @@ index d06c265202..5e13c68708 100644 + """ + return __salt__["lowpkg.remove_gpg_key"](keyid, root) diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py -index 70cb7a1c7e..d734bb9de9 100644 +index c8c75e3244..e3d7f7084e 100644 --- a/salt/states/pkgrepo.py +++ b/salt/states/pkgrepo.py -@@ -93,6 +93,7 @@ package managers are APT, DNF, YUM and Zypper. Here is some example SLS: +@@ -85,6 +85,7 @@ package managers are APT, DNF, YUM and Zypper. Here is some example SLS: """ @@ -434,7 +447,7 @@ index 70cb7a1c7e..d734bb9de9 100644 import sys import salt.utils.data -@@ -679,3 +680,209 @@ def absent(name, **kwargs): +@@ -672,3 +673,209 @@ def absent(name, **kwargs): ret["comment"] = "Failed to remove repo {}".format(name) return ret @@ -644,30 +657,661 @@ index 70cb7a1c7e..d734bb9de9 100644 + } + + return ret +diff --git a/tests/pytests/unit/modules/test_yumpkg.py b/tests/pytests/unit/modules/test_yumpkg.py +index 66c86972a0..ef7100fe9d 100644 +--- a/tests/pytests/unit/modules/test_yumpkg.py ++++ b/tests/pytests/unit/modules/test_yumpkg.py +@@ -7,7 +7,7 @@ import salt.modules.rpm_lowpkg as rpm + import salt.modules.yumpkg as yumpkg + import salt.utils.platform + from salt.exceptions import CommandExecutionError, SaltInvocationError +-from tests.support.mock import MagicMock, Mock, patch ++from tests.support.mock import MagicMock, Mock, call, mock_open, patch + + try: + import pytest +@@ -1683,6 +1683,91 @@ def test_get_repo_with_non_existent_repo(list_repos_var): + assert ret == expected, ret + + ++def test_get_repo_keys(): ++ salt_mock = {"lowpkg.list_gpg_keys": MagicMock(return_value=True)} ++ with patch.dict(yumpkg.__salt__, salt_mock): ++ assert yumpkg.get_repo_keys(info=True, root="/mnt") ++ salt_mock["lowpkg.list_gpg_keys"].assert_called_once_with(True, "/mnt") ++ ++ ++def test_add_repo_key_fail(): ++ with pytest.raises(SaltInvocationError): ++ yumpkg.add_repo_key() ++ ++ with pytest.raises(SaltInvocationError): ++ yumpkg.add_repo_key(path="path", text="text") ++ ++ ++def test_add_repo_key_path(): ++ salt_mock = { ++ "cp.cache_file": MagicMock(return_value="path"), ++ "lowpkg.import_gpg_key": MagicMock(return_value=True), ++ } ++ with patch("salt.utils.files.fopen", mock_open(read_data="text")), patch.dict( ++ yumpkg.__salt__, salt_mock ++ ): ++ assert yumpkg.add_repo_key(path="path", root="/mnt") ++ salt_mock["cp.cache_file"].assert_called_once_with("path", "base") ++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt") ++ ++ ++def test_add_repo_key_text(): ++ salt_mock = {"lowpkg.import_gpg_key": MagicMock(return_value=True)} ++ with patch.dict(yumpkg.__salt__, salt_mock): ++ assert yumpkg.add_repo_key(text="text", root="/mnt") ++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt") ++ ++ ++def test_del_repo_key(): ++ salt_mock = {"lowpkg.remove_gpg_key": MagicMock(return_value=True)} ++ with patch.dict(yumpkg.__salt__, salt_mock): ++ assert yumpkg.del_repo_key(keyid="keyid", root="/mnt") ++ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") ++ ++ ++def test_pkg_update_dnf(): ++ """ ++ Tests that the proper CLI options are added when obsoletes=False ++ """ ++ name = "foo" ++ old = "1.2.2-1.fc31" ++ new = "1.2.3-1.fc31" ++ cmd_mock = MagicMock(return_value={"retcode": 0}) ++ list_pkgs_mock = MagicMock(side_effect=[{name: old}, {name: new}]) ++ parse_targets_mock = MagicMock(return_value=({"foo": None}, "repository")) ++ with patch.dict( ++ yumpkg.__salt__, ++ {"cmd.run_all": cmd_mock, "pkg_resource.parse_targets": parse_targets_mock}, ++ ), patch.object(yumpkg, "refresh_db", MagicMock()), patch.object( ++ yumpkg, "list_pkgs", list_pkgs_mock ++ ), patch.object( ++ yumpkg, "_yum", MagicMock(return_value="dnf") ++ ), patch( ++ "salt.utils.systemd.has_scope", MagicMock(return_value=False) ++ ): ++ ret = yumpkg.update(name, setopt="obsoletes=0,plugins=0") ++ expected = {name: {"old": old, "new": new}} ++ assert ret == expected, ret ++ ++ cmd_mock.assert_called_once_with( ++ [ ++ "dnf", ++ "--quiet", ++ "-y", ++ "--setopt", ++ "obsoletes=0", ++ "--setopt", ++ "plugins=0", ++ "--obsoletes=False", ++ "upgrade", ++ "foo", ++ ], ++ env={}, ++ output_loglevel="trace", ++ python_shell=False, ++ ) ++ ++ + def test_call_yum_default(): + """ + Call default Yum/Dnf. +diff --git a/tests/pytests/unit/modules/test_zypperpkg.py b/tests/pytests/unit/modules/test_zypperpkg.py +index aece43ea29..37bbef87b7 100644 +--- a/tests/pytests/unit/modules/test_zypperpkg.py ++++ b/tests/pytests/unit/modules/test_zypperpkg.py +@@ -8,7 +8,8 @@ import os + import pytest + import salt.modules.pkg_resource as pkg_resource + import salt.modules.zypperpkg as zypper +-from tests.support.mock import MagicMock, patch ++from salt.exceptions import SaltInvocationError ++from tests.support.mock import MagicMock, mock_open, patch + + + @pytest.fixture +@@ -55,3 +56,66 @@ def test_list_pkgs_no_context(): + pkgs = zypper.list_pkgs(versions_as_list=True, use_context=False) + list_pkgs_context_mock.assert_not_called() + list_pkgs_context_mock.reset_mock() ++ ++ ++def test_normalize_name(): ++ """ ++ Test that package is normalized only when it should be ++ """ ++ with patch.dict(zypper.__grains__, {"osarch": "x86_64"}): ++ result = zypper.normalize_name("foo") ++ assert result == "foo", result ++ result = zypper.normalize_name("foo.x86_64") ++ assert result == "foo", result ++ result = zypper.normalize_name("foo.noarch") ++ assert result == "foo", result ++ ++ with patch.dict(zypper.__grains__, {"osarch": "aarch64"}): ++ result = zypper.normalize_name("foo") ++ assert result == "foo", result ++ result = zypper.normalize_name("foo.aarch64") ++ assert result == "foo", result ++ result = zypper.normalize_name("foo.noarch") ++ assert result == "foo", result ++ ++ ++def test_get_repo_keys(): ++ salt_mock = {"lowpkg.list_gpg_keys": MagicMock(return_value=True)} ++ with patch.dict(zypper.__salt__, salt_mock): ++ assert zypper.get_repo_keys(info=True, root="/mnt") ++ salt_mock["lowpkg.list_gpg_keys"].assert_called_once_with(True, "/mnt") ++ ++ ++def test_add_repo_key_fail(): ++ with pytest.raises(SaltInvocationError): ++ zypper.add_repo_key() ++ ++ with pytest.raises(SaltInvocationError): ++ zypper.add_repo_key(path="path", text="text") ++ ++ ++def test_add_repo_key_path(): ++ salt_mock = { ++ "cp.cache_file": MagicMock(return_value="path"), ++ "lowpkg.import_gpg_key": MagicMock(return_value=True), ++ } ++ with patch("salt.utils.files.fopen", mock_open(read_data="text")), patch.dict( ++ zypper.__salt__, salt_mock ++ ): ++ assert zypper.add_repo_key(path="path", root="/mnt") ++ salt_mock["cp.cache_file"].assert_called_once_with("path", "base") ++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt") ++ ++ ++def test_add_repo_key_text(): ++ salt_mock = {"lowpkg.import_gpg_key": MagicMock(return_value=True)} ++ with patch.dict(zypper.__salt__, salt_mock): ++ assert zypper.add_repo_key(text="text", root="/mnt") ++ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt") ++ ++ ++def test_del_repo_key(): ++ salt_mock = {"lowpkg.remove_gpg_key": MagicMock(return_value=True)} ++ with patch.dict(zypper.__salt__, salt_mock): ++ assert zypper.del_repo_key(keyid="keyid", root="/mnt") ++ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") +diff --git a/tests/pytests/unit/states/test_pkgrepo.py b/tests/pytests/unit/states/test_pkgrepo.py +index daa913bcc2..cbb12cfb9b 100644 +--- a/tests/pytests/unit/states/test_pkgrepo.py ++++ b/tests/pytests/unit/states/test_pkgrepo.py +@@ -51,3 +51,451 @@ def test_update_key_url(): + assert ret["changes"] == { + "key_url": {"old": kwargs["key_url"], "new": changed_kwargs["key_url"]} + } ++ ++ ++def test__normalize_repo_suse(): ++ repo = { ++ "name": "repo name", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": True, ++ } ++ grains = {"os_family": "Suse"} ++ with patch.dict(pkgrepo.__grains__, grains): ++ assert pkgrepo._normalize_repo(repo) == { ++ "humanname": "repo name", ++ "refresh": True, ++ "priority": 0, ++ } ++ ++ ++def test__normalize_key_rpm(): ++ key = {"Description": "key", "Date": "Date", "Other": "Other"} ++ for os_family in ("Suse", "RedHat"): ++ grains = {"os_family": os_family} ++ with patch.dict(pkgrepo.__grains__, grains): ++ assert pkgrepo._normalize_key(key) == {"key": "key"} ++ ++ ++def test__repos_keys_migrate_drop_migrate_to_empty(): ++ src_repos = { ++ "repo-1": { ++ "name": "repo name 1", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": True, ++ }, ++ "repo-2": { ++ "name": "repo name 2", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": False, ++ }, ++ } ++ tgt_repos = {} ++ ++ src_keys = { ++ "key1": {"Description": "key1", "Other": "Other1"}, ++ "key2": {"Description": "key2", "Other": "Other2"}, ++ } ++ tgt_keys = {} ++ ++ grains = {"os_family": "Suse"} ++ salt_mock = { ++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), ++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__salt__, salt_mock ++ ): ++ assert pkgrepo._repos_keys_migrate_drop("/mnt", False, False) == ( ++ { ++ ( ++ "repo-1", ++ ( ++ ("humanname", "repo name 1"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ ( ++ "repo-2", ++ ( ++ ("humanname", "repo name 2"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ }, ++ set(), ++ set(), ++ set(), ++ ) ++ ++ ++def test__repos_keys_migrate_drop_migrate_to_empty_keys(): ++ src_repos = { ++ "repo-1": { ++ "name": "repo name 1", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": True, ++ }, ++ "repo-2": { ++ "name": "repo name 2", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": False, ++ }, ++ } ++ tgt_repos = {} ++ ++ src_keys = { ++ "key1": {"Description": "key1", "Other": "Other1"}, ++ "key2": {"Description": "key2", "Other": "Other2"}, ++ } ++ tgt_keys = {} ++ ++ grains = {"os_family": "Suse"} ++ salt_mock = { ++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), ++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__salt__, salt_mock ++ ): ++ assert pkgrepo._repos_keys_migrate_drop("/mnt", True, False) == ( ++ { ++ ( ++ "repo-1", ++ ( ++ ("humanname", "repo name 1"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ ( ++ "repo-2", ++ ( ++ ("humanname", "repo name 2"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ }, ++ set(), ++ {("key1", (("key", "key1"),)), ("key2", (("key", "key2"),))}, ++ set(), ++ ) ++ ++ ++def test__repos_keys_migrate_drop_migrate_to_populated_no_drop(): ++ src_repos = { ++ "repo-1": { ++ "name": "repo name 1", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": True, ++ }, ++ "repo-2": { ++ "name": "repo name 2", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": False, ++ }, ++ } ++ tgt_repos = { ++ "repo-1": { ++ "name": "repo name 1", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": True, ++ }, ++ "repo-3": { ++ "name": "repo name 3", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": False, ++ }, ++ } ++ ++ src_keys = { ++ "key1": {"Description": "key1", "Other": "Other1"}, ++ "key2": {"Description": "key2", "Other": "Other2"}, ++ } ++ tgt_keys = { ++ "key1": {"Description": "key1", "Other": "Other1"}, ++ "key3": {"Description": "key3", "Other": "Other2"}, ++ } ++ ++ grains = {"os_family": "Suse"} ++ salt_mock = { ++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), ++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__salt__, salt_mock ++ ): ++ assert pkgrepo._repos_keys_migrate_drop("/mnt", True, False) == ( ++ { ++ ( ++ "repo-2", ++ ( ++ ("humanname", "repo name 2"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ }, ++ set(), ++ {("key2", (("key", "key2"),))}, ++ set(), ++ ) ++ ++ ++def test__repos_keys_migrate_drop_migrate_to_populated_drop(): ++ src_repos = { ++ "repo-1": { ++ "name": "repo name 1", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": True, ++ }, ++ "repo-2": { ++ "name": "repo name 2", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": False, ++ }, ++ } ++ tgt_repos = { ++ "repo-1": { ++ "name": "repo name 1", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": True, ++ }, ++ "repo-3": { ++ "name": "repo name 3", ++ "autorefresh": True, ++ "priority": 0, ++ "pkg_gpgcheck": False, ++ }, ++ } ++ ++ src_keys = { ++ "key1": {"Description": "key1", "Other": "Other1"}, ++ "key2": {"Description": "key2", "Other": "Other2"}, ++ } ++ tgt_keys = { ++ "key1": {"Description": "key1", "Other": "Other1"}, ++ "key3": {"Description": "key3", "Other": "Other2"}, ++ } ++ ++ grains = {"os_family": "Suse"} ++ salt_mock = { ++ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), ++ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__salt__, salt_mock ++ ): ++ assert pkgrepo._repos_keys_migrate_drop("/mnt", True, True) == ( ++ { ++ ( ++ "repo-2", ++ ( ++ ("humanname", "repo name 2"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ }, ++ { ++ ( ++ "repo-3", ++ ( ++ ("humanname", "repo name 3"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ }, ++ {("key2", (("key", "key2"),))}, ++ {("key3", (("key", "key3"),))}, ++ ) ++ ++ ++@pytest.mark.skip_on_windows(reason="Not a Windows test") ++def test__copy_repository_to_suse(): ++ grains = {"os_family": "Suse"} ++ salt_mock = {"file.copy": MagicMock()} ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__salt__, salt_mock ++ ): ++ pkgrepo._copy_repository_to("/mnt") ++ salt_mock["file.copy"].assert_called_with( ++ src="/etc/zypp/repos.d", dst="/mnt/etc/zypp/repos.d", recurse=True ++ ) ++ ++ ++def test_migrated_non_supported_platform(): ++ grains = {"os_family": "Debian"} ++ with patch.dict(pkgrepo.__grains__, grains): ++ assert pkgrepo.migrated("/mnt") == { ++ "name": "/mnt", ++ "result": False, ++ "changes": {}, ++ "comment": "Migration not supported for this platform", ++ } ++ ++ ++def test_migrated_missing_keys_api(): ++ grains = {"os_family": "Suse"} ++ with patch.dict(pkgrepo.__grains__, grains): ++ assert pkgrepo.migrated("/mnt") == { ++ "name": "/mnt", ++ "result": False, ++ "changes": {}, ++ "comment": "Keys cannot be migrated for this platform", ++ } ++ ++ ++def test_migrated_wrong_method(): ++ grains = {"os_family": "Suse"} ++ salt_mock = { ++ "lowpkg.import_gpg_key": True, ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__salt__, salt_mock ++ ): ++ assert pkgrepo.migrated("/mnt", method_="magic") == { ++ "name": "/mnt", ++ "result": False, ++ "changes": {}, ++ "comment": "Migration method not supported", ++ } ++ ++ ++@patch( ++ "salt.states.pkgrepo._repos_keys_migrate_drop", ++ MagicMock(return_value=(set(), set(), set(), set())), ++) ++def test_migrated_empty(): ++ grains = {"os_family": "Suse"} ++ salt_mock = { ++ "lowpkg.import_gpg_key": True, ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__salt__, salt_mock ++ ): ++ assert pkgrepo.migrated("/mnt") == { ++ "name": "/mnt", ++ "result": True, ++ "changes": {}, ++ "comment": "Repositories are already migrated", ++ } ++ ++ ++def test_migrated(): ++ _repos_keys_migrate_drop = MagicMock() ++ _repos_keys_migrate_drop.side_effect = [ ++ ( ++ { ++ ( ++ "repo-1", ++ ( ++ ("humanname", "repo name 1"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ }, ++ { ++ ( ++ "repo-2", ++ ( ++ ("humanname", "repo name 2"), ++ ("priority", 0), ++ ("refresh", True), ++ ), ++ ), ++ }, ++ {("key1", (("key", "key1"),))}, ++ {("key2", (("key", "key2"),))}, ++ ), ++ (set(), set(), set(), set()), ++ ] ++ ++ grains = {"os_family": "Suse"} ++ opts = {"test": False} ++ salt_mock = { ++ "pkg.mod_repo": MagicMock(), ++ "pkg.del_repo": MagicMock(), ++ "lowpkg.import_gpg_key": MagicMock(), ++ "lowpkg.remove_gpg_key": MagicMock(), ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__opts__, opts ++ ), patch.dict(pkgrepo.__salt__, salt_mock), patch( ++ "salt.states.pkgrepo._repos_keys_migrate_drop", _repos_keys_migrate_drop ++ ): ++ assert pkgrepo.migrated("/mnt", True, True) == { ++ "name": "/mnt", ++ "result": True, ++ "changes": { ++ "repos migrated": ["repo-1"], ++ "repos dropped": ["repo-2"], ++ "keys migrated": ["key1"], ++ "keys dropped": ["key2"], ++ }, ++ "comment": "Repositories synchronized", ++ } ++ salt_mock["pkg.mod_repo"].assert_called_with( ++ "repo-1", humanname="repo name 1", priority=0, refresh=True, root="/mnt" ++ ) ++ salt_mock["pkg.del_repo"].assert_called_with("repo-2", root="/mnt") ++ salt_mock["lowpkg.import_gpg_key"].assert_called_with("key1", root="/mnt") ++ salt_mock["lowpkg.remove_gpg_key"].assert_called_with("key2", root="/mnt") ++ ++ ++def test_migrated_test(): ++ _repos_keys_migrate_drop = MagicMock() ++ _repos_keys_migrate_drop.return_value = ( ++ { ++ ( ++ "repo-1", ++ (("humanname", "repo name 1"), ("priority", 0), ("refresh", True)), ++ ), ++ }, ++ { ++ ( ++ "repo-2", ++ (("humanname", "repo name 2"), ("priority", 0), ("refresh", True)), ++ ), ++ }, ++ {("key1", (("key", "key1"),))}, ++ {("key2", (("key", "key2"),))}, ++ ) ++ ++ grains = {"os_family": "Suse"} ++ opts = {"test": True} ++ salt_mock = { ++ "lowpkg.import_gpg_key": True, ++ } ++ with patch.dict(pkgrepo.__grains__, grains), patch.dict( ++ pkgrepo.__opts__, opts ++ ), patch.dict(pkgrepo.__salt__, salt_mock), patch( ++ "salt.states.pkgrepo._repos_keys_migrate_drop", _repos_keys_migrate_drop ++ ): ++ assert pkgrepo.migrated("/mnt", True, True) == { ++ "name": "/mnt", ++ "result": None, ++ "changes": { ++ "repos to migrate": ["repo-1"], ++ "repos to drop": ["repo-2"], ++ "keys to migrate": ["key1"], ++ "keys to drop": ["key2"], ++ }, ++ "comment": "There are keys or repositories to migrate or drop", ++ } diff --git a/tests/unit/modules/test_rpm_lowpkg.py b/tests/unit/modules/test_rpm_lowpkg.py -index ec9ecd40cb..84020263ea 100644 +index e7e8230510..280a19b911 100644 --- a/tests/unit/modules/test_rpm_lowpkg.py +++ b/tests/unit/modules/test_rpm_lowpkg.py -@@ -2,6 +2,7 @@ - :codeauthor: Jayesh Kariya - """ +@@ -5,6 +5,7 @@ + # Import Python Libs + from __future__ import absolute_import +import datetime + # Import Salt Libs import salt.modules.rpm_lowpkg as rpm - from tests.support.mixins import LoaderModuleMockMixin -@@ -15,8 +16,8 @@ def _called_with_root(mock): - - - def _called_with_root(mock): -- cmd = ' '.join(mock.call_args[0][0]) -- return cmd.startswith('rpm --root /') -+ cmd = " ".join(mock.call_args[0][0]) -+ return cmd.startswith("rpm --root /") - - - class RpmTestCase(TestCase, LoaderModuleMockMixin): -@@ -263,14 +264,223 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin): +@@ -255,14 +256,223 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin): :return: """ @@ -903,79 +1547,1784 @@ index ec9ecd40cb..84020263ea 100644 + self.assertTrue(rpm.remove_gpg_key("gpg-pubkey-1")) + self.assertFalse(_called_with_root(mock)) diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py -index 4784160d25..e65a1f8b8b 100644 +deleted file mode 100644 +index fd57faad32..0000000000 --- a/tests/unit/modules/test_yumpkg.py -+++ b/tests/unit/modules/test_yumpkg.py -@@ -5,9 +5,9 @@ import salt.modules.pkg_resource as pkg_resource - import salt.modules.rpm_lowpkg as rpm - import salt.modules.yumpkg as yumpkg - import salt.utils.platform --from salt.exceptions import CommandExecutionError -+from salt.exceptions import CommandExecutionError, SaltInvocationError - from tests.support.mixins import LoaderModuleMockMixin --from tests.support.mock import MagicMock, Mock, patch -+from tests.support.mock import MagicMock, Mock, mock_open, patch - from tests.support.unit import TestCase, skipIf - - try: -@@ -1630,6 +1630,43 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): - ret = yumpkg.get_repo(repo, **kwargs) - assert ret == expected, ret - -+ def test_get_repo_keys(self): -+ salt_mock = {"lowpkg.list_gpg_keys": MagicMock(return_value=True)} -+ with patch.dict(yumpkg.__salt__, salt_mock): -+ self.assertTrue(yumpkg.get_repo_keys(info=True, root="/mnt")) -+ salt_mock["lowpkg.list_gpg_keys"].assert_called_once_with(True, "/mnt") -+ -+ def test_add_repo_key_fail(self): -+ with self.assertRaises(SaltInvocationError): -+ yumpkg.add_repo_key() -+ -+ with self.assertRaises(SaltInvocationError): -+ yumpkg.add_repo_key(path="path", text="text") -+ -+ def test_add_repo_key_path(self): -+ salt_mock = { -+ "cp.cache_file": MagicMock(return_value="path"), -+ "lowpkg.import_gpg_key": MagicMock(return_value=True), -+ } -+ with patch("salt.utils.files.fopen", mock_open(read_data="text")), patch.dict( -+ yumpkg.__salt__, salt_mock -+ ): -+ self.assertTrue(yumpkg.add_repo_key(path="path", root="/mnt")) -+ salt_mock["cp.cache_file"].assert_called_once_with("path", "base") -+ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt") -+ -+ def test_add_repo_key_text(self): -+ salt_mock = {"lowpkg.import_gpg_key": MagicMock(return_value=True)} -+ with patch.dict(yumpkg.__salt__, salt_mock): -+ self.assertTrue(yumpkg.add_repo_key(text="text", root="/mnt")) -+ salt_mock["lowpkg.import_gpg_key"].assert_called_once_with("text", "/mnt") -+ -+ def test_del_repo_key(self): -+ salt_mock = {"lowpkg.remove_gpg_key": MagicMock(return_value=True)} -+ with patch.dict(yumpkg.__salt__, salt_mock): -+ self.assertTrue(yumpkg.del_repo_key(keyid="keyid", root="/mnt")) -+ salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") -+ - - @skipIf(pytest is None, "PyTest is missing") - class YumUtilsTestCase(TestCase, LoaderModuleMockMixin): ++++ /dev/null +@@ -1,1754 +0,0 @@ +-import os +- +-import salt.modules.cmdmod as cmdmod +-import salt.modules.pkg_resource as pkg_resource +-import salt.modules.rpm_lowpkg as rpm +-import salt.modules.yumpkg as yumpkg +-import salt.utils.platform +-from salt.exceptions import CommandExecutionError, SaltInvocationError +-from tests.support.mixins import LoaderModuleMockMixin +-from tests.support.mock import MagicMock, Mock, call, patch +-from tests.support.unit import TestCase, skipIf +- +-try: +- import pytest +-except ImportError: +- pytest = None +- +-LIST_REPOS = { +- "base": { +- "file": "/etc/yum.repos.d/CentOS-Base.repo", +- "gpgcheck": "1", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "mirrorlist": "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra", +- "name": "CentOS-$releasever - Base", +- }, +- "base-source": { +- "baseurl": "http://vault.centos.org/centos/$releasever/os/Source/", +- "enabled": "0", +- "file": "/etc/yum.repos.d/CentOS-Sources.repo", +- "gpgcheck": "1", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "name": "CentOS-$releasever - Base Sources", +- }, +- "updates": { +- "file": "/etc/yum.repos.d/CentOS-Base.repo", +- "gpgcheck": "1", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "mirrorlist": "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra", +- "name": "CentOS-$releasever - Updates", +- }, +- "updates-source": { +- "baseurl": "http://vault.centos.org/centos/$releasever/updates/Source/", +- "enabled": "0", +- "file": "/etc/yum.repos.d/CentOS-Sources.repo", +- "gpgcheck": "1", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "name": "CentOS-$releasever - Updates Sources", +- }, +-} +- +- +-class YumTestCase(TestCase, LoaderModuleMockMixin): +- """ +- Test cases for salt.modules.yumpkg +- """ +- +- def setup_loader_modules(self): +- return { +- yumpkg: { +- "__context__": {"yum_bin": "yum"}, +- "__grains__": { +- "osarch": "x86_64", +- "os": "CentOS", +- "os_family": "RedHat", +- "osmajorrelease": 7, +- }, +- }, +- pkg_resource: {}, +- } +- +- def test_list_pkgs(self): +- """ +- Test packages listing. +- +- :return: +- """ +- +- def _add_data(data, key, value): +- data.setdefault(key, []).append(value) +- +- rpm_out = [ +- "python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471", +- "alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475", +- "gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477", +- "rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477", +- "pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478", +- "yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479", +- "lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479", +- "qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480", +- "ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480", +- "shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481", +- "util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484", +- "openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485", +- "virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486", +- ] +- with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict( +- yumpkg.__salt__, +- {"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))}, +- ), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict( +- yumpkg.__salt__, +- {"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list}, +- ), patch.dict( +- yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()} +- ), patch.dict( +- pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch} +- ): +- pkgs = yumpkg.list_pkgs(versions_as_list=True) +- for pkg_name, pkg_version in { +- "python-urlgrabber": "3.10-8.el7", +- "alsa-lib": "1.1.1-1.el7", +- "gnupg2": "2.0.22-4.el7", +- "rpm-python": "4.11.3-21.el7", +- "pygpgme": "0.3-9.el7", +- "yum": "3.4.3-150.el7.centos", +- "lzo": "2.06-8.el7", +- "qrencode-libs": "3.4.1-3.el7", +- "ustr": "1.0.4-16.el7", +- "shadow-utils": "2:4.1.5.1-24.el7", +- "util-linux": "2.23.2-33.el7", +- "openssh": "6.6.1p1-33.el7_3", +- "virt-what": "1.13-8.el7", +- }.items(): +- self.assertTrue(pkgs.get(pkg_name)) +- self.assertEqual(pkgs[pkg_name], [pkg_version]) +- +- def test_list_pkgs_with_attr(self): +- """ +- Test packages listing with the attr parameter +- +- :return: +- """ +- +- def _add_data(data, key, value): +- data.setdefault(key, []).append(value) +- +- rpm_out = [ +- "python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471", +- "alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475", +- "gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477", +- "rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477", +- "pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478", +- "yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479", +- "lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479", +- "qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480", +- "ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480", +- "shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481", +- "util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484", +- "openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485", +- "virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486", +- ] +- with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict( +- yumpkg.__salt__, +- {"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))}, +- ), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict( +- yumpkg.__salt__, +- {"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list}, +- ), patch.dict( +- yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()} +- ), patch.dict( +- pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch} +- ): +- pkgs = yumpkg.list_pkgs( +- attr=["epoch", "release", "arch", "install_date_time_t"] +- ) +- for pkg_name, pkg_attr in { +- "python-urlgrabber": { +- "version": "3.10", +- "release": "8.el7", +- "arch": "noarch", +- "install_date_time_t": 1487838471, +- "epoch": None, +- }, +- "alsa-lib": { +- "version": "1.1.1", +- "release": "1.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838475, +- "epoch": None, +- }, +- "gnupg2": { +- "version": "2.0.22", +- "release": "4.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838477, +- "epoch": None, +- }, +- "rpm-python": { +- "version": "4.11.3", +- "release": "21.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838477, +- "epoch": None, +- }, +- "pygpgme": { +- "version": "0.3", +- "release": "9.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838478, +- "epoch": None, +- }, +- "yum": { +- "version": "3.4.3", +- "release": "150.el7.centos", +- "arch": "noarch", +- "install_date_time_t": 1487838479, +- "epoch": None, +- }, +- "lzo": { +- "version": "2.06", +- "release": "8.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838479, +- "epoch": None, +- }, +- "qrencode-libs": { +- "version": "3.4.1", +- "release": "3.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838480, +- "epoch": None, +- }, +- "ustr": { +- "version": "1.0.4", +- "release": "16.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838480, +- "epoch": None, +- }, +- "shadow-utils": { +- "epoch": "2", +- "version": "4.1.5.1", +- "release": "24.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838481, +- }, +- "util-linux": { +- "version": "2.23.2", +- "release": "33.el7", +- "arch": "x86_64", +- "install_date_time_t": 1487838484, +- "epoch": None, +- }, +- "openssh": { +- "version": "6.6.1p1", +- "release": "33.el7_3", +- "arch": "x86_64", +- "install_date_time_t": 1487838485, +- "epoch": None, +- }, +- "virt-what": { +- "version": "1.13", +- "release": "8.el7", +- "install_date_time_t": 1487838486, +- "arch": "x86_64", +- "epoch": None, +- }, +- }.items(): +- +- self.assertTrue(pkgs.get(pkg_name)) +- self.assertEqual(pkgs[pkg_name], [pkg_attr]) +- +- def test_list_pkgs_with_attr_multiple_versions(self): +- """ +- Test packages listing with the attr parameter reporting multiple version installed +- +- :return: +- """ +- +- def _add_data(data, key, value): +- data.setdefault(key, []).append(value) +- +- rpm_out = [ +- "glibc_|-(none)_|-2.12_|-1.212.el6_|-i686_|-(none)_|-1542394210" +- "glibc_|-(none)_|-2.12_|-1.212.el6_|-x86_64_|-(none)_|-1542394204", +- "virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486", +- "virt-what_|-(none)_|-1.10_|-2.el7_|-x86_64_|-(none)_|-1387838486", +- ] +- with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict( +- yumpkg.__salt__, +- {"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))}, +- ), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict( +- yumpkg.__salt__, +- {"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list}, +- ), patch.dict( +- yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()} +- ), patch.dict( +- pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch} +- ): +- pkgs = yumpkg.list_pkgs( +- attr=["epoch", "release", "arch", "install_date_time_t"] +- ) +- expected_pkg_list = { +- "glibc": [ +- { +- "version": "2.12", +- "release": "1.212.el6", +- "install_date_time_t": 1542394210, +- "arch": "i686", +- "epoch": None, +- }, +- { +- "version": "2.12", +- "release": "1.212.el6", +- "install_date_time_t": 1542394204, +- "arch": "x86_64", +- "epoch": None, +- }, +- ], +- "virt-what": [ +- { +- "version": "1.10", +- "release": "2.el7", +- "install_date_time_t": 1387838486, +- "arch": "x86_64", +- "epoch": None, +- }, +- { +- "version": "1.13", +- "release": "8.el7", +- "install_date_time_t": 1487838486, +- "arch": "x86_64", +- "epoch": None, +- }, +- ], +- } +- for pkgname, pkginfo in pkgs.items(): +- self.assertCountEqual(pkginfo, expected_pkg_list[pkgname]) +- +- def test_list_patches(self): +- """ +- Test patches listing. +- +- :return: +- """ +- yum_out = [ +- "i my-fake-patch-not-installed-1234 recommended spacewalk-usix-2.7.5.2-2.2.noarch", +- " my-fake-patch-not-installed-1234 recommended spacewalksd-5.0.26.2-21.2.x86_64", +- "i my-fake-patch-not-installed-1234 recommended suseRegisterInfo-3.1.1-18.2.x86_64", +- "i my-fake-patch-installed-1234 recommended my-package-one-1.1-0.1.x86_64", +- "i my-fake-patch-installed-1234 recommended my-package-two-1.1-0.1.x86_64", +- ] +- +- expected_patches = { +- "my-fake-patch-not-installed-1234": { +- "installed": False, +- "summary": [ +- "spacewalk-usix-2.7.5.2-2.2.noarch", +- "spacewalksd-5.0.26.2-21.2.x86_64", +- "suseRegisterInfo-3.1.1-18.2.x86_64", +- ], +- }, +- "my-fake-patch-installed-1234": { +- "installed": True, +- "summary": [ +- "my-package-one-1.1-0.1.x86_64", +- "my-package-two-1.1-0.1.x86_64", +- ], +- }, +- } +- +- with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict( +- yumpkg.__salt__, +- {"cmd.run_stdout": MagicMock(return_value=os.linesep.join(yum_out))}, +- ): +- patches = yumpkg.list_patches() +- self.assertFalse(patches["my-fake-patch-not-installed-1234"]["installed"]) +- self.assertTrue( +- len(patches["my-fake-patch-not-installed-1234"]["summary"]) == 3 +- ) +- for _patch in expected_patches["my-fake-patch-not-installed-1234"][ +- "summary" +- ]: +- self.assertTrue( +- _patch in patches["my-fake-patch-not-installed-1234"]["summary"] +- ) +- +- self.assertTrue(patches["my-fake-patch-installed-1234"]["installed"]) +- self.assertTrue( +- len(patches["my-fake-patch-installed-1234"]["summary"]) == 2 +- ) +- for _patch in expected_patches["my-fake-patch-installed-1234"]["summary"]: +- self.assertTrue( +- _patch in patches["my-fake-patch-installed-1234"]["summary"] +- ) +- +- def test_latest_version_with_options(self): +- with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})): +- +- # with fromrepo +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.latest_version( +- "foo", refresh=False, fromrepo="good", branch="foo" +- ) +- cmd.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- "list", +- "available", +- "foo", +- ], +- env={}, +- ignore_retcode=True, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- # without fromrepo +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.latest_version( +- "foo", +- refresh=False, +- enablerepo="good", +- disablerepo="bad", +- branch="foo", +- ) +- cmd.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- "list", +- "available", +- "foo", +- ], +- env={}, +- ignore_retcode=True, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- # without fromrepo, but within the scope +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch("salt.utils.systemd.has_scope", MagicMock(return_value=True)): +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=True)}, +- ): +- yumpkg.latest_version( +- "foo", +- refresh=False, +- enablerepo="good", +- disablerepo="bad", +- branch="foo", +- ) +- cmd.assert_called_once_with( +- [ +- "systemd-run", +- "--scope", +- "yum", +- "--quiet", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- "list", +- "available", +- "foo", +- ], +- env={}, +- ignore_retcode=True, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- def test_list_repo_pkgs_with_options(self): +- """ +- Test list_repo_pkgs with and without fromrepo +- +- NOTE: mock_calls is a stack. The most recent call is indexed +- with 0, while the first call would have the highest index. +- """ +- really_old_yum = MagicMock(return_value="3.2.0") +- older_yum = MagicMock(return_value="3.4.0") +- newer_yum = MagicMock(return_value="3.4.5") +- list_repos_mock = MagicMock(return_value=LIST_REPOS) +- kwargs = { +- "output_loglevel": "trace", +- "ignore_retcode": True, +- "python_shell": False, +- "env": {}, +- } +- +- with patch.object(yumpkg, "list_repos", list_repos_mock): +- +- # Test with really old yum. The fromrepo argument has no effect on +- # the yum commands we'd run. +- with patch.dict(yumpkg.__salt__, {"cmd.run": really_old_yum}): +- +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_repo_pkgs("foo") +- # We should have called cmd.run_all twice +- assert len(cmd.mock_calls) == 2 +- +- # Check args from first call +- assert cmd.mock_calls[1][1] == ( +- ["yum", "--quiet", "list", "available"], +- ) +- +- # Check kwargs from first call +- assert cmd.mock_calls[1][2] == kwargs +- +- # Check args from second call +- assert cmd.mock_calls[0][1] == ( +- ["yum", "--quiet", "list", "installed"], +- ) +- +- # Check kwargs from second call +- assert cmd.mock_calls[0][2] == kwargs +- +- # Test with really old yum. The fromrepo argument has no effect on +- # the yum commands we'd run. +- with patch.dict(yumpkg.__salt__, {"cmd.run": older_yum}): +- +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_repo_pkgs("foo") +- # We should have called cmd.run_all twice +- assert len(cmd.mock_calls) == 2 +- +- # Check args from first call +- assert cmd.mock_calls[1][1] == ( +- ["yum", "--quiet", "--showduplicates", "list", "available"], +- ) +- +- # Check kwargs from first call +- assert cmd.mock_calls[1][2] == kwargs +- +- # Check args from second call +- assert cmd.mock_calls[0][1] == ( +- ["yum", "--quiet", "--showduplicates", "list", "installed"], +- ) +- +- # Check kwargs from second call +- assert cmd.mock_calls[0][2] == kwargs +- +- # Test with newer yum. We should run one yum command per repo, so +- # fromrepo would limit how many calls we make. +- with patch.dict(yumpkg.__salt__, {"cmd.run": newer_yum}): +- +- # When fromrepo is used, we would only run one yum command, for +- # that specific repo. +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_repo_pkgs("foo", fromrepo="base") +- # We should have called cmd.run_all once +- assert len(cmd.mock_calls) == 1 +- +- # Check args +- assert cmd.mock_calls[0][1] == ( +- [ +- "yum", +- "--quiet", +- "--showduplicates", +- "repository-packages", +- "base", +- "list", +- "foo", +- ], +- ) +- # Check kwargs +- assert cmd.mock_calls[0][2] == kwargs +- +- # Test enabling base-source and disabling updates. We should +- # get two calls, one for each enabled repo. Because dict +- # iteration order will vary, different Python versions will be +- # do them in different orders, which is OK, but it will just +- # mean that we will have to check both the first and second +- # mock call both times. +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_repo_pkgs( +- "foo", enablerepo="base-source", disablerepo="updates" +- ) +- # We should have called cmd.run_all twice +- assert len(cmd.mock_calls) == 2 +- +- for repo in ("base", "base-source"): +- for index in (0, 1): +- try: +- # Check args +- assert cmd.mock_calls[index][1] == ( +- [ +- "yum", +- "--quiet", +- "--showduplicates", +- "repository-packages", +- repo, +- "list", +- "foo", +- ], +- ) +- # Check kwargs +- assert cmd.mock_calls[index][2] == kwargs +- break +- except AssertionError: +- continue +- else: +- self.fail("repo '{}' not checked".format(repo)) +- +- def test_list_upgrades_dnf(self): +- """ +- The subcommand should be "upgrades" with dnf +- """ +- with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}): +- # with fromrepo +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_upgrades(refresh=False, fromrepo="good", branch="foo") +- cmd.assert_called_once_with( +- [ +- "dnf", +- "--quiet", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- "list", +- "upgrades", +- ], +- env={}, +- output_loglevel="trace", +- ignore_retcode=True, +- python_shell=False, +- ) +- +- # without fromrepo +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_upgrades( +- refresh=False, enablerepo="good", disablerepo="bad", branch="foo" +- ) +- cmd.assert_called_once_with( +- [ +- "dnf", +- "--quiet", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- "list", +- "upgrades", +- ], +- env={}, +- output_loglevel="trace", +- ignore_retcode=True, +- python_shell=False, +- ) +- +- def test_list_upgrades_yum(self): +- """ +- The subcommand should be "updates" with yum +- """ +- # with fromrepo +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_upgrades(refresh=False, fromrepo="good", branch="foo") +- cmd.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- "list", +- "updates", +- ], +- env={}, +- output_loglevel="trace", +- ignore_retcode=True, +- python_shell=False, +- ) +- +- # without fromrepo +- cmd = MagicMock(return_value={"retcode": 0, "stdout": ""}) +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.list_upgrades( +- refresh=False, enablerepo="good", disablerepo="bad", branch="foo" +- ) +- cmd.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- "list", +- "updates", +- ], +- env={}, +- output_loglevel="trace", +- ignore_retcode=True, +- python_shell=False, +- ) +- +- def test_refresh_db_with_options(self): +- +- with patch("salt.utils.pkg.clear_rtag", Mock()): +- +- # With check_update=True we will do a cmd.run to run the clean_cmd, and +- # then a separate cmd.retcode to check for updates. +- +- # with fromrepo +- yum_call = MagicMock() +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.refresh_db(check_update=True, fromrepo="good", branch="foo") +- +- assert yum_call.call_count == 2 +- yum_call.assert_any_call( +- [ +- "yum", +- "--quiet", +- "--assumeyes", +- "clean", +- "expire-cache", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- ], +- env={}, +- ignore_retcode=True, +- output_loglevel="trace", +- python_shell=False, +- ) +- yum_call.assert_any_call( +- [ +- "yum", +- "--quiet", +- "--assumeyes", +- "check-update", +- "--setopt=autocheck_running_kernel=false", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- ], +- output_loglevel="trace", +- env={}, +- ignore_retcode=True, +- python_shell=False, +- ) +- +- # without fromrepo +- yum_call = MagicMock() +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.refresh_db( +- check_update=True, +- enablerepo="good", +- disablerepo="bad", +- branch="foo", +- ) +- assert yum_call.call_count == 2 +- yum_call.assert_any_call( +- [ +- "yum", +- "--quiet", +- "--assumeyes", +- "clean", +- "expire-cache", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- ], +- env={}, +- ignore_retcode=True, +- output_loglevel="trace", +- python_shell=False, +- ) +- yum_call.assert_any_call( +- [ +- "yum", +- "--quiet", +- "--assumeyes", +- "check-update", +- "--setopt=autocheck_running_kernel=false", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- ], +- output_loglevel="trace", +- env={}, +- ignore_retcode=True, +- python_shell=False, +- ) +- +- # With check_update=False we will just do a cmd.run for the clean_cmd +- +- # with fromrepo +- yum_call = MagicMock() +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.refresh_db(check_update=False, fromrepo="good", branch="foo") +- assert yum_call.call_count == 1 +- yum_call.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "--assumeyes", +- "clean", +- "expire-cache", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- ], +- env={}, +- output_loglevel="trace", +- ignore_retcode=True, +- python_shell=False, +- ) +- +- # without fromrepo +- yum_call = MagicMock() +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg.refresh_db( +- check_update=False, +- enablerepo="good", +- disablerepo="bad", +- branch="foo", +- ) +- assert yum_call.call_count == 1 +- yum_call.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "--assumeyes", +- "clean", +- "expire-cache", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- ], +- env={}, +- output_loglevel="trace", +- ignore_retcode=True, +- python_shell=False, +- ) +- +- def test_install_with_options(self): +- parse_targets = MagicMock(return_value=({"foo": None}, "repository")) +- with patch.object( +- yumpkg, "list_pkgs", MagicMock(return_value={}) +- ), patch.object(yumpkg, "list_holds", MagicMock(return_value=[])), patch.dict( +- yumpkg.__salt__, {"pkg_resource.parse_targets": parse_targets} +- ), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ): +- +- # with fromrepo +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}): +- yumpkg.install( +- refresh=False, +- fromrepo="good", +- branch="foo", +- setopt="obsoletes=0,plugins=0", +- ) +- cmd.assert_called_once_with( +- [ +- "yum", +- "-y", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- "--setopt", +- "obsoletes=0", +- "--setopt", +- "plugins=0", +- "install", +- "foo", +- ], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ignore_retcode=False, +- redirect_stderr=True, +- ) +- +- # without fromrepo +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}): +- yumpkg.install( +- refresh=False, +- enablerepo="good", +- disablerepo="bad", +- branch="foo", +- setopt="obsoletes=0,plugins=0", +- ) +- cmd.assert_called_once_with( +- [ +- "yum", +- "-y", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- "--setopt", +- "obsoletes=0", +- "--setopt", +- "plugins=0", +- "install", +- "foo", +- ], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ignore_retcode=False, +- redirect_stderr=True, +- ) +- +- def test_install_with_epoch(self): +- """ +- Tests that we properly identify a version containing an epoch as an +- upgrade instead of a downgrade. +- """ +- name = "foo" +- old = "8:3.8.12-6.n.el7" +- new = "9:3.8.12-4.n.el7" +- list_pkgs_mock = MagicMock( +- side_effect=lambda **kwargs: { +- name: [old] if kwargs.get("versions_as_list", False) else old +- } +- ) +- cmd_mock = MagicMock( +- return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""} +- ) +- salt_mock = { +- "cmd.run_all": cmd_mock, +- "lowpkg.version_cmp": rpm.version_cmp, +- "pkg_resource.parse_targets": MagicMock( +- return_value=({name: new}, "repository") +- ), +- } +- full_pkg_string = "-".join((name, new[2:])) +- with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ), patch.dict(yumpkg.__salt__, salt_mock): +- +- # Test yum +- expected = ["yum", "-y", "install", full_pkg_string] +- with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict( +- yumpkg.__grains__, {"os": "CentOS", "osrelease": 7} +- ): +- yumpkg.install("foo", version=new) +- call = cmd_mock.mock_calls[0][1][0] +- assert call == expected, call +- +- # Test dnf +- expected = [ +- "dnf", +- "-y", +- "--best", +- "--allowerasing", +- "install", +- full_pkg_string, +- ] +- yumpkg.__context__.pop("yum_bin") +- cmd_mock.reset_mock() +- with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict( +- yumpkg.__grains__, {"os": "Fedora", "osrelease": 27} +- ): +- yumpkg.install("foo", version=new) +- call = cmd_mock.mock_calls[0][1][0] +- assert call == expected, call +- +- @skipIf(not salt.utils.platform.is_linux(), "Only run on Linux") +- def test_install_error_reporting(self): +- """ +- Tests that we properly report yum/dnf errors. +- """ +- name = "foo" +- old = "8:3.8.12-6.n.el7" +- new = "9:3.8.12-4.n.el7" +- list_pkgs_mock = MagicMock( +- side_effect=lambda **kwargs: { +- name: [old] if kwargs.get("versions_as_list", False) else old +- } +- ) +- salt_mock = { +- "cmd.run_all": cmdmod.run_all, +- "lowpkg.version_cmp": rpm.version_cmp, +- "pkg_resource.parse_targets": MagicMock( +- return_value=({name: new}, "repository") +- ), +- } +- full_pkg_string = "-".join((name, new[2:])) +- with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ), patch.dict(yumpkg.__salt__, salt_mock), patch.object( +- yumpkg, "_yum", MagicMock(return_value="cat") +- ): +- +- expected = { +- "changes": {}, +- "errors": [ +- "cat: invalid option -- 'y'\n" +- "Try 'cat --help' for more information." +- ], +- } +- with pytest.raises(CommandExecutionError) as exc_info: +- yumpkg.install("foo", version=new) +- assert exc_info.value.info == expected, exc_info.value.info +- +- def test_upgrade_with_options(self): +- with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ): +- +- # with fromrepo +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}): +- yumpkg.upgrade( +- refresh=False, +- fromrepo="good", +- exclude="kernel*", +- branch="foo", +- setopt="obsoletes=0,plugins=0", +- ) +- cmd.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "-y", +- "--disablerepo=*", +- "--enablerepo=good", +- "--branch=foo", +- "--setopt", +- "obsoletes=0", +- "--setopt", +- "plugins=0", +- "--exclude=kernel*", +- "upgrade", +- ], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- # without fromrepo +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}): +- yumpkg.upgrade( +- refresh=False, +- enablerepo="good", +- disablerepo="bad", +- exclude="kernel*", +- branch="foo", +- setopt="obsoletes=0,plugins=0", +- ) +- cmd.assert_called_once_with( +- [ +- "yum", +- "--quiet", +- "-y", +- "--disablerepo=bad", +- "--enablerepo=good", +- "--branch=foo", +- "--setopt", +- "obsoletes=0", +- "--setopt", +- "plugins=0", +- "--exclude=kernel*", +- "upgrade", +- ], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- def test_info_installed_with_all_versions(self): +- """ +- Test the return information of all versions for the named package(s), installed on the system. +- +- :return: +- """ +- run_out = { +- "virgo-dummy": [ +- { +- "build_date": "2015-07-09T10:55:19Z", +- "vendor": "openSUSE Build Service", +- "description": "This is the Virgo dummy package used for testing SUSE Manager", +- "license": "GPL-2.0", +- "build_host": "sheep05", +- "url": "http://www.suse.com", +- "build_date_time_t": 1436432119, +- "relocations": "(not relocatable)", +- "source_rpm": "virgo-dummy-1.0-1.1.src.rpm", +- "install_date": "2016-02-23T16:31:57Z", +- "install_date_time_t": 1456241517, +- "summary": "Virgo dummy package", +- "version": "1.0", +- "signature": "DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9", +- "release": "1.1", +- "group": "Applications/System", +- "arch": "i686", +- "size": "17992", +- }, +- { +- "build_date": "2015-07-09T10:15:19Z", +- "vendor": "openSUSE Build Service", +- "description": "This is the Virgo dummy package used for testing SUSE Manager", +- "license": "GPL-2.0", +- "build_host": "sheep05", +- "url": "http://www.suse.com", +- "build_date_time_t": 1436432119, +- "relocations": "(not relocatable)", +- "source_rpm": "virgo-dummy-1.0-1.1.src.rpm", +- "install_date": "2016-02-23T16:31:57Z", +- "install_date_time_t": 14562415127, +- "summary": "Virgo dummy package", +- "version": "1.0", +- "signature": "DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9", +- "release": "1.1", +- "group": "Applications/System", +- "arch": "x86_64", +- "size": "13124", +- }, +- ], +- "libopenssl1_0_0": [ +- { +- "build_date": "2015-11-04T23:20:34Z", +- "vendor": "SUSE LLC ", +- "description": "The OpenSSL Project is a collaborative effort.", +- "license": "OpenSSL", +- "build_host": "sheep11", +- "url": "https://www.openssl.org/", +- "build_date_time_t": 1446675634, +- "relocations": "(not relocatable)", +- "source_rpm": "openssl-1.0.1i-34.1.src.rpm", +- "install_date": "2016-02-23T16:31:35Z", +- "install_date_time_t": 1456241495, +- "summary": "Secure Sockets and Transport Layer Security", +- "version": "1.0.1i", +- "signature": "RSA/SHA256, Wed Nov 4 22:21:34 2015, Key ID 70af9e8139db7c82", +- "release": "34.1", +- "group": "Productivity/Networking/Security", +- "packager": "https://www.suse.com/", +- "arch": "x86_64", +- "size": "2576912", +- } +- ], +- } +- with patch.dict( +- yumpkg.__salt__, {"lowpkg.info": MagicMock(return_value=run_out)} +- ): +- installed = yumpkg.info_installed(all_versions=True) +- # Test overall products length +- self.assertEqual(len(installed), 2) +- +- # Test multiple versions for the same package +- for pkg_name, pkg_info_list in installed.items(): +- self.assertEqual( +- len(pkg_info_list), 2 if pkg_name == "virgo-dummy" else 1 +- ) +- for info in pkg_info_list: +- self.assertTrue(info["arch"] in ("x86_64", "i686")) +- +- def test_pkg_hold_yum(self): +- """ +- Tests that we properly identify versionlock plugin when using yum +- for RHEL/CentOS 7 and Fedora < 22 +- """ +- +- # Test RHEL/CentOS 7 +- list_pkgs_mock = { +- "yum-plugin-versionlock": "0:1.0.0-0.n.el7", +- "yum-versionlock": "0:1.0.0-0.n.el7", +- } +- +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.object( +- yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock) +- ), patch.object(yumpkg, "list_holds", MagicMock(return_value=[])), patch.dict( +- yumpkg.__salt__, {"cmd.run_all": cmd} +- ), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ): +- yumpkg.hold("foo") +- cmd.assert_called_once_with( +- ["yum", "versionlock", "foo"], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- # Test Fedora 20 +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict( +- yumpkg.__grains__, {"os": "Fedora", "osrelease": 20} +- ), patch.object( +- yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock) +- ), patch.object( +- yumpkg, "list_holds", MagicMock(return_value=[]) +- ), patch.dict( +- yumpkg.__salt__, {"cmd.run_all": cmd} +- ), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ): +- yumpkg.hold("foo") +- cmd.assert_called_once_with( +- ["yum", "versionlock", "foo"], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- def test_pkg_hold_tdnf(self): +- """ +- Tests that we raise a SaltInvocationError if we try to use +- hold-related functions on Photon OS. +- """ +- with patch.dict(yumpkg.__context__, {"yum_bin": "tdnf"}): +- self.assertRaises(SaltInvocationError, yumpkg.hold, "foo") +- +- def test_pkg_hold_dnf(self): +- """ +- Tests that we properly identify versionlock plugin when using dnf +- for RHEL/CentOS 8 and Fedora >= 22 +- """ +- +- # Test RHEL/CentOS 8 +- list_pkgs_mock = { +- "python2-dnf-plugin-versionlock": "0:1.0.0-0.n.el8", +- "python3-dnf-plugin-versionlock": "0:1.0.0-0.n.el8", +- } +- +- yumpkg.__context__.pop("yum_bin") +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict( +- yumpkg.__grains__, {"osmajorrelease": 8} +- ), patch.object( +- yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock) +- ), patch.object( +- yumpkg, "list_holds", MagicMock(return_value=[]) +- ), patch.dict( +- yumpkg.__salt__, {"cmd.run_all": cmd} +- ), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ): +- yumpkg.hold("foo") +- cmd.assert_called_once_with( +- ["dnf", "versionlock", "foo"], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- # Test Fedora 26+ +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict( +- yumpkg.__grains__, {"os": "Fedora", "osrelease": 26} +- ), patch.object( +- yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock) +- ), patch.object( +- yumpkg, "list_holds", MagicMock(return_value=[]) +- ), patch.dict( +- yumpkg.__salt__, {"cmd.run_all": cmd} +- ), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ): +- yumpkg.hold("foo") +- cmd.assert_called_once_with( +- ["dnf", "versionlock", "foo"], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- # Test Fedora 22-25 +- list_pkgs_mock = { +- "python-dnf-plugins-extras-versionlock": "0:1.0.0-0.n.el8", +- "python3-dnf-plugins-extras-versionlock": "0:1.0.0-0.n.el8", +- } +- +- cmd = MagicMock(return_value={"retcode": 0}) +- with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict( +- yumpkg.__grains__, {"os": "Fedora", "osrelease": 25} +- ), patch.object( +- yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock) +- ), patch.object( +- yumpkg, "list_holds", MagicMock(return_value=[]) +- ), patch.dict( +- yumpkg.__salt__, {"cmd.run_all": cmd} +- ), patch( +- "salt.utils.systemd.has_scope", MagicMock(return_value=False) +- ): +- yumpkg.hold("foo") +- cmd.assert_called_once_with( +- ["dnf", "versionlock", "foo"], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- @skipIf(not yumpkg.HAS_YUM, "Could not import yum") +- def test_yum_base_error(self): +- with patch("yum.YumBase") as mock_yum_yumbase: +- mock_yum_yumbase.side_effect = CommandExecutionError +- with pytest.raises(CommandExecutionError): +- yumpkg._get_yum_config() +- +- def test_group_info(self): +- """ +- Test yumpkg.group_info parsing +- """ +- expected = { +- "conditional": [], +- "default": ["qgnomeplatform", "xdg-desktop-portal-gtk"], +- "description": "GNOME is a highly intuitive and user friendly desktop environment.", +- "group": "GNOME", +- "id": "gnome-desktop", +- "mandatory": [ +- "NetworkManager-libreswan-gnome", +- "PackageKit-command-not-found", +- "PackageKit-gtk3-module", +- "abrt-desktop", +- "at-spi2-atk", +- "at-spi2-core", +- "avahi", +- "baobab", +- "caribou", +- "caribou-gtk2-module", +- "caribou-gtk3-module", +- "cheese", +- "chrome-gnome-shell", +- "compat-cheese314", +- "control-center", +- "dconf", +- "empathy", +- "eog", +- "evince", +- "evince-nautilus", +- "file-roller", +- "file-roller-nautilus", +- "firewall-config", +- "firstboot", +- "fprintd-pam", +- "gdm", +- "gedit", +- "glib-networking", +- "gnome-bluetooth", +- "gnome-boxes", +- "gnome-calculator", +- "gnome-classic-session", +- "gnome-clocks", +- "gnome-color-manager", +- "gnome-contacts", +- "gnome-dictionary", +- "gnome-disk-utility", +- "gnome-font-viewer", +- "gnome-getting-started-docs", +- "gnome-icon-theme", +- "gnome-icon-theme-extras", +- "gnome-icon-theme-symbolic", +- "gnome-initial-setup", +- "gnome-packagekit", +- "gnome-packagekit-updater", +- "gnome-screenshot", +- "gnome-session", +- "gnome-session-xsession", +- "gnome-settings-daemon", +- "gnome-shell", +- "gnome-software", +- "gnome-system-log", +- "gnome-system-monitor", +- "gnome-terminal", +- "gnome-terminal-nautilus", +- "gnome-themes-standard", +- "gnome-tweak-tool", +- "gnome-user-docs", +- "gnome-weather", +- "gucharmap", +- "gvfs-afc", +- "gvfs-afp", +- "gvfs-archive", +- "gvfs-fuse", +- "gvfs-goa", +- "gvfs-gphoto2", +- "gvfs-mtp", +- "gvfs-smb", +- "initial-setup-gui", +- "libcanberra-gtk2", +- "libcanberra-gtk3", +- "libproxy-mozjs", +- "librsvg2", +- "libsane-hpaio", +- "metacity", +- "mousetweaks", +- "nautilus", +- "nautilus-sendto", +- "nm-connection-editor", +- "orca", +- "redhat-access-gui", +- "sane-backends-drivers-scanners", +- "seahorse", +- "setroubleshoot", +- "sushi", +- "totem", +- "totem-nautilus", +- "vinagre", +- "vino", +- "xdg-user-dirs-gtk", +- "yelp", +- ], +- "optional": [ +- "", +- "alacarte", +- "dconf-editor", +- "dvgrab", +- "fonts-tweak-tool", +- "gconf-editor", +- "gedit-plugins", +- "gnote", +- "libappindicator-gtk3", +- "seahorse-nautilus", +- "seahorse-sharing", +- "vim-X11", +- "xguest", +- ], +- "type": "package group", +- } +- cmd_out = """Group: GNOME +- Group-Id: gnome-desktop +- Description: GNOME is a highly intuitive and user friendly desktop environment. +- Mandatory Packages: +- =NetworkManager-libreswan-gnome +- =PackageKit-command-not-found +- =PackageKit-gtk3-module +- abrt-desktop +- =at-spi2-atk +- =at-spi2-core +- =avahi +- =baobab +- -caribou +- -caribou-gtk2-module +- -caribou-gtk3-module +- =cheese +- =chrome-gnome-shell +- =compat-cheese314 +- =control-center +- =dconf +- =empathy +- =eog +- =evince +- =evince-nautilus +- =file-roller +- =file-roller-nautilus +- =firewall-config +- =firstboot +- fprintd-pam +- =gdm +- =gedit +- =glib-networking +- =gnome-bluetooth +- =gnome-boxes +- =gnome-calculator +- =gnome-classic-session +- =gnome-clocks +- =gnome-color-manager +- =gnome-contacts +- =gnome-dictionary +- =gnome-disk-utility +- =gnome-font-viewer +- =gnome-getting-started-docs +- =gnome-icon-theme +- =gnome-icon-theme-extras +- =gnome-icon-theme-symbolic +- =gnome-initial-setup +- =gnome-packagekit +- =gnome-packagekit-updater +- =gnome-screenshot +- =gnome-session +- =gnome-session-xsession +- =gnome-settings-daemon +- =gnome-shell +- =gnome-software +- =gnome-system-log +- =gnome-system-monitor +- =gnome-terminal +- =gnome-terminal-nautilus +- =gnome-themes-standard +- =gnome-tweak-tool +- =gnome-user-docs +- =gnome-weather +- =gucharmap +- =gvfs-afc +- =gvfs-afp +- =gvfs-archive +- =gvfs-fuse +- =gvfs-goa +- =gvfs-gphoto2 +- =gvfs-mtp +- =gvfs-smb +- initial-setup-gui +- =libcanberra-gtk2 +- =libcanberra-gtk3 +- =libproxy-mozjs +- =librsvg2 +- =libsane-hpaio +- =metacity +- =mousetweaks +- =nautilus +- =nautilus-sendto +- =nm-connection-editor +- =orca +- -redhat-access-gui +- =sane-backends-drivers-scanners +- =seahorse +- =setroubleshoot +- =sushi +- =totem +- =totem-nautilus +- =vinagre +- =vino +- =xdg-user-dirs-gtk +- =yelp +- Default Packages: +- =qgnomeplatform +- =xdg-desktop-portal-gtk +- Optional Packages: +- alacarte +- dconf-editor +- dvgrab +- fonts-tweak-tool +- gconf-editor +- gedit-plugins +- gnote +- libappindicator-gtk3 +- seahorse-nautilus +- seahorse-sharing +- vim-X11 +- xguest +- """ +- with patch.dict( +- yumpkg.__salt__, {"cmd.run_stdout": MagicMock(return_value=cmd_out)} +- ): +- info = yumpkg.group_info("@gnome-desktop") +- self.assertDictEqual(info, expected) +- +- def test_get_repo_with_existent_repo(self): +- """ +- Test get_repo with an existent repository +- Expected return is a populated dictionary +- """ +- repo = "base-source" +- kwargs = { +- "baseurl": "http://vault.centos.org/centos/$releasever/os/Source/", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "name": "CentOS-$releasever - Base Sources", +- "enabled": True, +- } +- parse_repo_file_return = ( +- "", +- { +- "base-source": { +- "baseurl": "http://vault.centos.org/centos/$releasever/os/Source/", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "name": "CentOS-$releasever - Base Sources", +- "enabled": "1", +- } +- }, +- ) +- expected = { +- "baseurl": "http://vault.centos.org/centos/$releasever/os/Source/", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "name": "CentOS-$releasever - Base Sources", +- "enabled": "1", +- } +- patch_list_repos = patch.object( +- yumpkg, "list_repos", autospec=True, return_value=LIST_REPOS +- ) +- patch_parse_repo_file = patch.object( +- yumpkg, +- "_parse_repo_file", +- autospec=True, +- return_value=parse_repo_file_return, +- ) +- +- with patch_list_repos, patch_parse_repo_file: +- ret = yumpkg.get_repo(repo, **kwargs) +- assert ret == expected, ret +- +- def test_get_repo_with_non_existent_repo(self): +- """ +- Test get_repo with an non existent repository +- Expected return is an empty dictionary +- """ +- repo = "non-existent-repository" +- kwargs = { +- "baseurl": "http://fake.centos.org/centos/$releasever/os/Non-Existent/", +- "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7", +- "name": "CentOS-$releasever - Non-Existent Repository", +- "enabled": True, +- } +- expected = {} +- patch_list_repos = patch.object( +- yumpkg, "list_repos", autospec=True, return_value=LIST_REPOS +- ) +- +- with patch_list_repos: +- ret = yumpkg.get_repo(repo, **kwargs) +- assert ret == expected, ret +- +- +-@skipIf(pytest is None, "PyTest is missing") +-class YumUtilsTestCase(TestCase, LoaderModuleMockMixin): +- """ +- Yum/Dnf utils tests. +- """ +- +- def setup_loader_modules(self): +- return { +- yumpkg: { +- "__context__": {"yum_bin": "fake-yum"}, +- "__grains__": { +- "osarch": "x86_64", +- "os_family": "RedHat", +- "osmajorrelease": 7, +- }, +- } +- } +- +- def test_call_yum_default(self): +- """ +- Call default Yum/Dnf. +- :return: +- """ +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg._call_yum(["-y", "--do-something"]) # pylint: disable=W0106 +- yumpkg.__salt__["cmd.run_all"].assert_called_once_with( +- ["fake-yum", "-y", "--do-something"], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- @patch("salt.utils.systemd.has_scope", MagicMock(return_value=True)) +- def test_call_yum_in_scope(self): +- """ +- Call Yum/Dnf within the scope. +- :return: +- """ +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=True)}, +- ): +- yumpkg._call_yum(["-y", "--do-something"]) # pylint: disable=W0106 +- yumpkg.__salt__["cmd.run_all"].assert_called_once_with( +- ["systemd-run", "--scope", "fake-yum", "-y", "--do-something"], +- env={}, +- output_loglevel="trace", +- python_shell=False, +- ) +- +- def test_call_yum_with_kwargs(self): +- """ +- Call Yum/Dnf with the optinal keyword arguments. +- :return: +- """ +- with patch.dict( +- yumpkg.__salt__, +- {"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)}, +- ): +- yumpkg._call_yum( +- ["-y", "--do-something"], +- python_shell=True, +- output_loglevel="quiet", +- ignore_retcode=False, +- username="Darth Vader", +- ) # pylint: disable=W0106 +- yumpkg.__salt__["cmd.run_all"].assert_called_once_with( +- ["fake-yum", "-y", "--do-something"], +- env={}, +- ignore_retcode=False, +- output_loglevel="quiet", +- python_shell=True, +- username="Darth Vader", +- ) +- +- @skipIf(not salt.utils.systemd.booted(), "Requires systemd") +- @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")) +- def test_services_need_restart(self): +- """ +- Test that dnf needs-restarting output is parsed and +- salt.utils.systemd.pid_to_service is called as expected. +- """ +- expected = ["firewalld", "salt-minion"] +- +- dnf_mock = Mock( +- return_value="123 : /usr/bin/firewalld\n456 : /usr/bin/salt-minion\n" +- ) +- systemd_mock = Mock(side_effect=["firewalld", "salt-minion"]) +- with patch.dict(yumpkg.__salt__, {"cmd.run_stdout": dnf_mock}), patch( +- "salt.utils.systemd.pid_to_service", systemd_mock +- ): +- assert sorted(yumpkg.services_need_restart()) == expected +- systemd_mock.assert_has_calls([call("123"), call("456")]) +- +- @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")) +- def test_services_need_restart_requires_systemd(self): +- """Test that yumpkg.services_need_restart raises an error if systemd is unavailable.""" +- with patch("salt.utils.systemd.booted", Mock(return_value=False)): +- pytest.raises(CommandExecutionError, yumpkg.services_need_restart) +- +- @patch("salt.modules.yumpkg._yum", Mock(return_value="yum")) +- def test_services_need_restart_requires_dnf(self): +- """Test that yumpkg.services_need_restart raises an error if DNF is unavailable.""" +- pytest.raises(CommandExecutionError, yumpkg.services_need_restart) diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index eaa4d9a76a..018c1ffbca 100644 +index 78fe226914..2440954d89 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -10,7 +10,7 @@ import salt.modules.pkg_resource as pkg_resource +@@ -12,11 +12,9 @@ import salt.modules.pkg_resource as pkg_resource import salt.modules.zypperpkg as zypper import salt.utils.files import salt.utils.pkg -from salt.exceptions import CommandExecutionError +-from salt.ext import six +-from salt.ext.six.moves import configparser +from salt.exceptions import CommandExecutionError, SaltInvocationError - from salt.ext import six - from salt.ext.six.moves import configparser from tests.support.mixins import LoaderModuleMockMixin -@@ -2175,3 +2175,41 @@ pattern() = package-c""" +-from tests.support.mock import MagicMock, Mock, call, mock_open, patch ++from tests.support.mock import MagicMock, Mock, call, patch + from tests.support.unit import TestCase + + +@@ -2441,3 +2439,41 @@ pattern() = package-c""" python_shell=False, env={"ZYPP_READONLY_HACK": "1"}, ) @@ -1017,494 +3366,7 @@ index eaa4d9a76a..018c1ffbca 100644 + with patch.dict(zypper.__salt__, salt_mock): + self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt")) + salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") -diff --git a/tests/unit/states/test_pkgrepo.py b/tests/unit/states/test_pkgrepo.py -index b2be5b4da1..135e545220 100644 ---- a/tests/unit/states/test_pkgrepo.py -+++ b/tests/unit/states/test_pkgrepo.py -@@ -1,17 +1,12 @@ --# -*- coding: utf-8 -*- - """ - :codeauthor: Tyler Johnson - """ --# Import Python libs --from __future__ import absolute_import - --# Import Salt Libs - import salt.states.pkgrepo as pkgrepo -- --# Import Salt Testing Libs -+import salt.utils.platform - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.mock import MagicMock, patch --from tests.support.unit import TestCase -+from tests.support.unit import TestCase, skipIf - - - class PkgrepoTestCase(TestCase, LoaderModuleMockMixin): -@@ -72,3 +67,462 @@ class PkgrepoTestCase(TestCase, LoaderModuleMockMixin): - }, - ret["changes"], - ) -+ -+ def test__normalize_repo_suse(self): -+ repo = { -+ "name": "repo name", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": True, -+ } -+ grains = {"os_family": "Suse"} -+ with patch.dict(pkgrepo.__grains__, grains): -+ self.assertEqual( -+ pkgrepo._normalize_repo(repo), -+ {"humanname": "repo name", "refresh": True, "priority": 0}, -+ ) -+ -+ def test__normalize_key_rpm(self): -+ key = {"Description": "key", "Date": "Date", "Other": "Other"} -+ for os_family in ("Suse", "RedHat"): -+ grains = {"os_family": os_family} -+ with patch.dict(pkgrepo.__grains__, grains): -+ self.assertEqual(pkgrepo._normalize_key(key), {"key": "key"}) -+ -+ def test__repos_keys_migrate_drop_migrate_to_empty(self): -+ src_repos = { -+ "repo-1": { -+ "name": "repo name 1", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": True, -+ }, -+ "repo-2": { -+ "name": "repo name 2", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": False, -+ }, -+ } -+ tgt_repos = {} -+ -+ src_keys = { -+ "key1": {"Description": "key1", "Other": "Other1"}, -+ "key2": {"Description": "key2", "Other": "Other2"}, -+ } -+ tgt_keys = {} -+ -+ grains = {"os_family": "Suse"} -+ salt_mock = { -+ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), -+ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__salt__, salt_mock -+ ): -+ self.assertEqual( -+ pkgrepo._repos_keys_migrate_drop("/mnt", False, False), -+ ( -+ { -+ ( -+ "repo-1", -+ ( -+ ("humanname", "repo name 1"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ ( -+ "repo-2", -+ ( -+ ("humanname", "repo name 2"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ }, -+ set(), -+ set(), -+ set(), -+ ), -+ ) -+ -+ def test__repos_keys_migrate_drop_migrate_to_empty_keys(self): -+ src_repos = { -+ "repo-1": { -+ "name": "repo name 1", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": True, -+ }, -+ "repo-2": { -+ "name": "repo name 2", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": False, -+ }, -+ } -+ tgt_repos = {} -+ -+ src_keys = { -+ "key1": {"Description": "key1", "Other": "Other1"}, -+ "key2": {"Description": "key2", "Other": "Other2"}, -+ } -+ tgt_keys = {} -+ -+ grains = {"os_family": "Suse"} -+ salt_mock = { -+ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), -+ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__salt__, salt_mock -+ ): -+ self.assertEqual( -+ pkgrepo._repos_keys_migrate_drop("/mnt", True, False), -+ ( -+ { -+ ( -+ "repo-1", -+ ( -+ ("humanname", "repo name 1"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ ( -+ "repo-2", -+ ( -+ ("humanname", "repo name 2"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ }, -+ set(), -+ {("key1", (("key", "key1"),)), ("key2", (("key", "key2"),))}, -+ set(), -+ ), -+ ) -+ -+ def test__repos_keys_migrate_drop_migrate_to_populated_no_drop(self): -+ src_repos = { -+ "repo-1": { -+ "name": "repo name 1", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": True, -+ }, -+ "repo-2": { -+ "name": "repo name 2", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": False, -+ }, -+ } -+ tgt_repos = { -+ "repo-1": { -+ "name": "repo name 1", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": True, -+ }, -+ "repo-3": { -+ "name": "repo name 3", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": False, -+ }, -+ } -+ -+ src_keys = { -+ "key1": {"Description": "key1", "Other": "Other1"}, -+ "key2": {"Description": "key2", "Other": "Other2"}, -+ } -+ tgt_keys = { -+ "key1": {"Description": "key1", "Other": "Other1"}, -+ "key3": {"Description": "key3", "Other": "Other2"}, -+ } -+ -+ grains = {"os_family": "Suse"} -+ salt_mock = { -+ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), -+ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__salt__, salt_mock -+ ): -+ self.assertEqual( -+ pkgrepo._repos_keys_migrate_drop("/mnt", True, False), -+ ( -+ { -+ ( -+ "repo-2", -+ ( -+ ("humanname", "repo name 2"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ }, -+ set(), -+ {("key2", (("key", "key2"),))}, -+ set(), -+ ), -+ ) -+ -+ def test__repos_keys_migrate_drop_migrate_to_populated_drop(self): -+ src_repos = { -+ "repo-1": { -+ "name": "repo name 1", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": True, -+ }, -+ "repo-2": { -+ "name": "repo name 2", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": False, -+ }, -+ } -+ tgt_repos = { -+ "repo-1": { -+ "name": "repo name 1", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": True, -+ }, -+ "repo-3": { -+ "name": "repo name 3", -+ "autorefresh": True, -+ "priority": 0, -+ "pkg_gpgcheck": False, -+ }, -+ } -+ -+ src_keys = { -+ "key1": {"Description": "key1", "Other": "Other1"}, -+ "key2": {"Description": "key2", "Other": "Other2"}, -+ } -+ tgt_keys = { -+ "key1": {"Description": "key1", "Other": "Other1"}, -+ "key3": {"Description": "key3", "Other": "Other2"}, -+ } -+ -+ grains = {"os_family": "Suse"} -+ salt_mock = { -+ "pkg.list_repos": MagicMock(side_effect=[src_repos, tgt_repos]), -+ "lowpkg.list_gpg_keys": MagicMock(side_effect=[src_keys, tgt_keys]), -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__salt__, salt_mock -+ ): -+ self.assertEqual( -+ pkgrepo._repos_keys_migrate_drop("/mnt", True, True), -+ ( -+ { -+ ( -+ "repo-2", -+ ( -+ ("humanname", "repo name 2"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ }, -+ { -+ ( -+ "repo-3", -+ ( -+ ("humanname", "repo name 3"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ }, -+ {("key2", (("key", "key2"),))}, -+ {("key3", (("key", "key3"),))}, -+ ), -+ ) -+ -+ @skipIf(salt.utils.platform.is_windows(), "Do not run on Windows") -+ def test__copy_repository_to_suse(self): -+ grains = {"os_family": "Suse"} -+ salt_mock = {"file.copy": MagicMock()} -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__salt__, salt_mock -+ ): -+ pkgrepo._copy_repository_to("/mnt") -+ salt_mock["file.copy"].assert_called_with( -+ src="/etc/zypp/repos.d", dst="/mnt/etc/zypp/repos.d", recurse=True -+ ) -+ -+ def test_migrated_non_supported_platform(self): -+ grains = {"os_family": "Debian"} -+ with patch.dict(pkgrepo.__grains__, grains): -+ self.assertEqual( -+ pkgrepo.migrated("/mnt"), -+ { -+ "name": "/mnt", -+ "result": False, -+ "changes": {}, -+ "comment": "Migration not supported for this platform", -+ }, -+ ) -+ -+ def test_migrated_missing_keys_api(self): -+ grains = {"os_family": "Suse"} -+ with patch.dict(pkgrepo.__grains__, grains): -+ self.assertEqual( -+ pkgrepo.migrated("/mnt"), -+ { -+ "name": "/mnt", -+ "result": False, -+ "changes": {}, -+ "comment": "Keys cannot be migrated for this platform", -+ }, -+ ) -+ -+ def test_migrated_wrong_method(self): -+ grains = {"os_family": "Suse"} -+ salt_mock = { -+ "lowpkg.import_gpg_key": True, -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__salt__, salt_mock -+ ): -+ self.assertEqual( -+ pkgrepo.migrated("/mnt", method="magic"), -+ { -+ "name": "/mnt", -+ "result": False, -+ "changes": {}, -+ "comment": "Migration method not supported", -+ }, -+ ) -+ -+ @patch("salt.states.pkgrepo._repos_keys_migrate_drop") -+ def test_migrated_empty(self, _repos_keys_migrate_drop): -+ _repos_keys_migrate_drop.return_value = (set(), set(), set(), set()) -+ -+ grains = {"os_family": "Suse"} -+ salt_mock = { -+ "lowpkg.import_gpg_key": True, -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__salt__, salt_mock -+ ): -+ self.assertEqual( -+ pkgrepo.migrated("/mnt"), -+ { -+ "name": "/mnt", -+ "result": True, -+ "changes": {}, -+ "comment": "Repositories are already migrated", -+ }, -+ ) -+ -+ @patch("salt.states.pkgrepo._repos_keys_migrate_drop") -+ def test_migrated(self, _repos_keys_migrate_drop): -+ _repos_keys_migrate_drop.side_effect = [ -+ ( -+ { -+ ( -+ "repo-1", -+ ( -+ ("humanname", "repo name 1"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ }, -+ { -+ ( -+ "repo-2", -+ ( -+ ("humanname", "repo name 2"), -+ ("priority", 0), -+ ("refresh", True), -+ ), -+ ), -+ }, -+ {("key1", (("key", "key1"),))}, -+ {("key2", (("key", "key2"),))}, -+ ), -+ (set(), set(), set(), set()), -+ ] -+ -+ grains = {"os_family": "Suse"} -+ opts = {"test": False} -+ salt_mock = { -+ "pkg.mod_repo": MagicMock(), -+ "pkg.del_repo": MagicMock(), -+ "lowpkg.import_gpg_key": MagicMock(), -+ "lowpkg.remove_gpg_key": MagicMock(), -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__opts__, opts -+ ), patch.dict(pkgrepo.__salt__, salt_mock): -+ self.assertEqual( -+ pkgrepo.migrated("/mnt", True, True), -+ { -+ "name": "/mnt", -+ "result": True, -+ "changes": { -+ "repos migrated": ["repo-1"], -+ "repos dropped": ["repo-2"], -+ "keys migrated": ["key1"], -+ "keys dropped": ["key2"], -+ }, -+ "comment": "Repositories synchronized", -+ }, -+ ) -+ salt_mock["pkg.mod_repo"].assert_called_with( -+ "repo-1", humanname="repo name 1", priority=0, refresh=True, root="/mnt" -+ ) -+ salt_mock["pkg.del_repo"].assert_called_with("repo-2", root="/mnt") -+ salt_mock["lowpkg.import_gpg_key"].assert_called_with("key1", root="/mnt") -+ salt_mock["lowpkg.remove_gpg_key"].assert_called_with("key2", root="/mnt") -+ -+ @patch("salt.states.pkgrepo._repos_keys_migrate_drop") -+ def test_migrated_test(self, _repos_keys_migrate_drop): -+ _repos_keys_migrate_drop.return_value = ( -+ { -+ ( -+ "repo-1", -+ (("humanname", "repo name 1"), ("priority", 0), ("refresh", True)), -+ ), -+ }, -+ { -+ ( -+ "repo-2", -+ (("humanname", "repo name 2"), ("priority", 0), ("refresh", True)), -+ ), -+ }, -+ {("key1", (("key", "key1"),))}, -+ {("key2", (("key", "key2"),))}, -+ ) -+ -+ grains = {"os_family": "Suse"} -+ opts = {"test": True} -+ salt_mock = { -+ "lowpkg.import_gpg_key": True, -+ } -+ with patch.dict(pkgrepo.__grains__, grains), patch.dict( -+ pkgrepo.__opts__, opts -+ ), patch.dict(pkgrepo.__salt__, salt_mock): -+ self.assertEqual( -+ pkgrepo.migrated("/mnt", True, True), -+ { -+ "name": "/mnt", -+ "result": None, -+ "changes": { -+ "repos to migrate": ["repo-1"], -+ "repos to drop": ["repo-2"], -+ "keys to migrate": ["key1"], -+ "keys to drop": ["key2"], -+ }, -+ "comment": "There are keys or repositories to migrate or drop", -+ }, -+ ) -- -2.29.2 +2.33.0 diff --git a/add-missing-aarch64-to-rpm-package-architectures-405.patch b/add-missing-aarch64-to-rpm-package-architectures-405.patch index 62afcbc..593198a 100644 --- a/add-missing-aarch64-to-rpm-package-architectures-405.patch +++ b/add-missing-aarch64-to-rpm-package-architectures-405.patch @@ -1,21 +1,20 @@ -From e7723f081cc79088156a986cf940349fec7f00a3 Mon Sep 17 00:00:00 2001 +From 03b40485102e88e217814ea4e08fb857ad16cbff Mon Sep 17 00:00:00 2001 From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:05:42 +0300 -Subject: [PATCH] Add missing aarch64 to rpm package architectures - (#405) +Subject: [PATCH] Add missing aarch64 to rpm package architectures (#405) Required to prevent false negative results on using pkg.installed with architecture specification in package name (ex. `bash.aarch64`) --- salt/utils/pkg/rpm.py | 2 +- - tests/unit/modules/test_zypperpkg.py | 20 ++++++++++++++++++++ - 2 files changed, 21 insertions(+), 1 deletion(-) + tests/unit/modules/test_zypperpkg.py | 34 ++++++++++++++++++++++++++++ + 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/salt/utils/pkg/rpm.py b/salt/utils/pkg/rpm.py -index d1b149ea0b..8b8ea2e4b1 100644 +index 3e990cc05d..8203d2f989 100644 --- a/salt/utils/pkg/rpm.py +++ b/salt/utils/pkg/rpm.py -@@ -33,7 +33,7 @@ ARCHES_ALPHA = ( +@@ -30,7 +30,7 @@ ARCHES_ALPHA = ( "alphaev68", "alphaev7", ) @@ -25,13 +24,27 @@ index d1b149ea0b..8b8ea2e4b1 100644 ARCHES = ( diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 5c01bbbfbd..d6a6a6d852 100644 +index 2d7e5f0858..20bf5eaaad 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -2477,3 +2477,23 @@ pattern() = package-c""" - with patch("salt.modules.zypperpkg.__zypper__", zypper_mock): - assert zypper.services_need_restart() == expected - zypper_mock(root=None).nolock.call.assert_called_with("ps", "-sss") +@@ -2475,3 +2475,37 @@ pattern() = package-c""" + with patch.dict(zypper.__salt__, salt_mock): + self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt")) + salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") ++ ++ def test_services_need_restart(self): ++ """ ++ Test that zypper ps is used correctly to list services that need to ++ be restarted. ++ """ ++ expected = ["salt-minion", "firewalld"] ++ zypper_output = "salt-minion\nfirewalld" ++ zypper_mock = Mock() ++ zypper_mock(root=None).nolock.call = Mock(return_value=zypper_output) ++ ++ with patch("salt.modules.zypperpkg.__zypper__", zypper_mock): ++ assert zypper.services_need_restart() == expected ++ zypper_mock(root=None).nolock.call.assert_called_with("ps", "-sss") + + def test_normalize_name(self): + """ @@ -53,6 +66,6 @@ index 5c01bbbfbd..d6a6a6d852 100644 + result = zypper.normalize_name("foo.noarch") + assert result == "foo", result -- -2.32.0 +2.33.0 diff --git a/add-multi-file-support-and-globbing-to-the-filetree-.patch b/add-multi-file-support-and-globbing-to-the-filetree-.patch deleted file mode 100644 index dae5602..0000000 --- a/add-multi-file-support-and-globbing-to-the-filetree-.patch +++ /dev/null @@ -1,116 +0,0 @@ -From c5e5dc304e897f8c1664cce29fe9ee63d84f3ae6 Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Fri, 12 Oct 2018 16:20:40 +0200 -Subject: [PATCH] Add multi-file support and globbing to the filetree - (U#50018) - -Add more possible logs - -Support multiple files grabbing - -Collect system logs and boot logs - -Support globbing in filetree ---- - salt/cli/support/intfunc.py | 49 ++++++++++++++++----------- - salt/cli/support/profiles/default.yml | 7 ++++ - 2 files changed, 37 insertions(+), 19 deletions(-) - -diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py -index d3d8f83cb8..a9f76a6003 100644 ---- a/salt/cli/support/intfunc.py -+++ b/salt/cli/support/intfunc.py -@@ -3,6 +3,7 @@ Internal functions. - """ - # Maybe this needs to be a modules in a future? - -+import glob - import os - - import salt.utils.files -@@ -11,7 +12,7 @@ from salt.cli.support.console import MessagesOutput - out = MessagesOutput() - - --def filetree(collector, path): -+def filetree(collector, *paths): - """ - Add all files in the tree. If the "path" is a file, - only that file will be added. -@@ -19,22 +20,32 @@ def filetree(collector, path): - :param path: File or directory - :return: - """ -- if not path: -- out.error("Path not defined", ident=2) -- else: -- # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. -- # pylint: disable=W8470 -- if os.path.isfile(path): -- filename = os.path.basename(path) -- try: -- file_ref = salt.utils.files.fopen(path) # pylint: disable=W -- out.put("Add {}".format(filename), indent=2) -- collector.add(filename) -- collector.link(title=path, path=file_ref) -- except Exception as err: -- out.error(err, ident=4) -- # pylint: enable=W8470 -+ _paths = [] -+ # Unglob -+ for path in paths: -+ _paths += glob.glob(path) -+ for path in set(_paths): -+ if not path: -+ out.error("Path not defined", ident=2) -+ elif not os.path.exists(path): -+ out.warning("Path {} does not exists".format(path)) - else: -- for fname in os.listdir(path): -- fname = os.path.join(path, fname) -- filetree(collector, fname) -+ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. -+ # pylint: disable=W8470 -+ if os.path.isfile(path): -+ filename = os.path.basename(path) -+ try: -+ file_ref = salt.utils.files.fopen(path) # pylint: disable=W -+ out.put("Add {}".format(filename), indent=2) -+ collector.add(filename) -+ collector.link(title=path, path=file_ref) -+ except Exception as err: -+ out.error(err, ident=4) -+ # pylint: enable=W8470 -+ else: -+ try: -+ for fname in os.listdir(path): -+ fname = os.path.join(path, fname) -+ filetree(collector, [fname]) -+ except Exception as err: -+ out.error(err, ident=4) -diff --git a/salt/cli/support/profiles/default.yml b/salt/cli/support/profiles/default.yml -index 01d9a26193..3defb5eef3 100644 ---- a/salt/cli/support/profiles/default.yml -+++ b/salt/cli/support/profiles/default.yml -@@ -62,10 +62,17 @@ general-health: - - ps.top: - info: Top CPU consuming processes - -+boot_log: -+ - filetree: -+ info: Collect boot logs -+ args: -+ - /var/log/boot.* -+ - system.log: - # This works on any file system object. - - filetree: - info: Add system log - args: - - /var/log/syslog -+ - /var/log/messages - --- -2.29.2 - - diff --git a/add-new-custom-suse-capability-for-saltutil-state-mo.patch b/add-new-custom-suse-capability-for-saltutil-state-mo.patch deleted file mode 100644 index 537fcf9..0000000 --- a/add-new-custom-suse-capability-for-saltutil-state-mo.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 70d13dcc62286d5195bbf28b53aae61616cc0f8f Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Thu, 26 Mar 2020 13:08:16 +0000 -Subject: [PATCH] Add new custom SUSE capability for saltutil state - module - ---- - salt/grains/extra.py | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/salt/grains/extra.py b/salt/grains/extra.py -index ddc22293ea..0eec27e628 100644 ---- a/salt/grains/extra.py -+++ b/salt/grains/extra.py -@@ -71,5 +71,6 @@ def config(): - def suse_backported_capabilities(): - return { - '__suse_reserved_pkg_all_versions_support': True, -- '__suse_reserved_pkg_patches_support': True -+ '__suse_reserved_pkg_patches_support': True, -+ '__suse_reserved_saltutil_states_support': True - } --- -2.29.2 - - diff --git a/add-patch-support-for-allow-vendor-change-option-wit.patch b/add-patch-support-for-allow-vendor-change-option-wit.patch deleted file mode 100644 index ae6333b..0000000 --- a/add-patch-support-for-allow-vendor-change-option-wit.patch +++ /dev/null @@ -1,107 +0,0 @@ -From cee4cc182b4740c912861c712dea7bc44eb70ffb Mon Sep 17 00:00:00 2001 -From: Martin Seidl -Date: Mon, 7 Dec 2020 01:10:51 +0100 -Subject: [PATCH] add patch support for allow vendor change option with - zypper - ---- - salt/modules/zypperpkg.py | 46 +++++++++++++++++++++++++++------------ - 1 file changed, 32 insertions(+), 14 deletions(-) - -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 6f22994bf0..4a5cb85e7c 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -35,7 +35,6 @@ import salt.utils.versions - from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError - - # pylint: disable=import-error,redefined-builtin,no-name-in-module --from salt.ext import six - from salt.ext.six.moves import configparser - from salt.ext.six.moves.urllib.parse import urlparse as _urlparse - from salt.utils.versions import LooseVersion -@@ -1431,6 +1430,7 @@ def install( - no_recommends=False, - root=None, - inclusion_detection=False, -+ novendorchange=True, - **kwargs - ): - """ -@@ -1478,6 +1478,10 @@ def install( - skip_verify - Skip the GPG verification check (e.g., ``--no-gpg-checks``) - -+ -+ novendorchange -+ Disallow vendor change -+ - version - Can be either a version number, or the combination of a comparison - operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4'). -@@ -1638,6 +1642,22 @@ def install( - cmd_install.append( - kwargs.get("resolve_capabilities") and "--capability" or "--name" - ) -+ if novendorchange: -+ if __grains__["osrelease_info"][0] > 11: -+ cmd_install.append("--no-allow-vendor-change") -+ log.info("Disabling vendor changes") -+ else: -+ log.warning( -+ "Enabling/Disabling vendor changes is not supported on this Zypper version" -+ ) -+ else: -+ if __grains__["osrelease_info"][0] > 11: -+ cmd_install.append("--allow-vendor-change") -+ log.info("Enabling vendor changes") -+ else: -+ log.warning( -+ "Enabling/Disabling vendor changes is not supported on this Zypper version" -+ ) - - if not refresh: - cmd_install.insert(0, "--no-refresh") -@@ -1649,7 +1669,6 @@ def install( - cmd_install.extend(fromrepoopt) - if no_recommends: - cmd_install.append("--no-recommends") -- - errors = [] - - # Split the targets into batches of 500 packages each, so that -@@ -1793,19 +1812,18 @@ def upgrade( - cmd_update.extend(["--from" if dist_upgrade else "--repo", repo]) - log.info("Targeting repos: %s", fromrepo) - -- if dist_upgrade: -- # TODO: Grains validation should be moved to Zypper class -- if __grains__["osrelease_info"][0] > 11: -- if novendorchange: -- cmd_update.append("--no-allow-vendor-change") -- log.info("Disabling vendor changes") -- else: -- cmd_update.append("--allow-vendor-change") -- log.info("Enabling vendor changes") -+ # TODO: Grains validation should be moved to Zypper class -+ if __grains__["osrelease_info"][0] > 11: -+ if novendorchange: -+ cmd_update.append("--no-allow-vendor-change") -+ log.info("Disabling vendor changes") - else: -- log.warning( -- "Enabling/Disabling vendor changes is not supported on this Zypper version" -- ) -+ cmd_update.append("--allow-vendor-change") -+ log.info("Enabling vendor changes") -+ else: -+ log.warning( -+ "Enabling/Disabling vendor changes is not supported on this Zypper version" -+ ) - - if no_recommends: - cmd_update.append("--no-recommends") --- -2.29.2 - - diff --git a/add-pkg.services_need_restart-302.patch b/add-pkg.services_need_restart-302.patch deleted file mode 100644 index 08ae31b..0000000 --- a/add-pkg.services_need_restart-302.patch +++ /dev/null @@ -1,404 +0,0 @@ -From c79f4a8619ff1275b2ec4400c1fb27d24c22a7eb Mon Sep 17 00:00:00 2001 -From: Alexander Graul -Date: Tue, 8 Dec 2020 15:35:49 +0100 -Subject: [PATCH] Add pkg.services_need_restart (#302) - -* Add utils.systemd.pid_to_service function - -This function translates a given PID to the systemd service name in case -the process belongs to a running service. It uses DBUS for the -translation if DBUS is available, falling back to parsing -``systemctl status -o json'' output. - -* Add zypperpkg.services_need_restart - -pkg.services_need_restart returns a list of system services that were -affected by package manager operations such as updates, downgrades or -reinstallations without having been restarted. This might cause issues, -e.g. in the case a shared object was loaded by a process and then -replaced by the package manager. - -(cherry picked from commit b950fcdbd6cc8cb08e1413a0ed05e0ae21717cea) - -* Add aptpkg.services_need_restart - -pkg.services_need_restart returns a list of system services that were -affected by package manager operations such as updates, downgrades or -reinstallations without having been restarted. This might cause issues, -e.g. in the case a shared object was loaded by a process and then -replaced by the package manager. - -Requires checkrestart, which is part of the debian-goodies package and -available from official Ubuntu and Debian repositories. - -(cherry picked from commit b981f6ecb1a551b98c5cebab4975fc09c6a55a22) - -* Add yumpkg.services_need_restart - -pkg.services_need_restart returns a list of system services that were -affected by package manager operations such as updates, downgrades or -reinstallations without having been restarted. This might cause issues, -e.g. in the case a shared object was loaded by a process and then -replaced by the package manager. - -Requires dnf with the needs-restarting plugin, which is part of -dnf-plugins-core and installed by default on RHEL/CentOS/Fedora. -Also requires systemd for the mapping between PIDs and systemd services. - -(cherry picked from commit 5e2be1095729c9f73394e852b82749950957e6fb) - -* Add changelog entry for issue #58261 - -(cherry picked from commit 148877ed8ff7a47132c1186274739e648f7acf1c) - -* Simplify dnf needs-restarting output parsing - -Co-authored-by: Wayne Werner -(cherry picked from commit beb5d60f3cc64b880ec25ca188f8a73f6ec493dd) ---- - changelog/58261.added | 1 + - salt/modules/aptpkg.py | 42 ++++++++++++++++- - salt/modules/yumpkg.py | 36 +++++++++++++++ - salt/modules/zypperpkg.py | 25 ++++++++++ - salt/utils/systemd.py | 69 ++++++++++++++++++++++++++++ - tests/unit/modules/test_aptpkg.py | 22 ++++++++- - tests/unit/modules/test_yumpkg.py | 32 ++++++++++++- - tests/unit/modules/test_zypperpkg.py | 14 ++++++ - 8 files changed, 238 insertions(+), 3 deletions(-) - create mode 100644 changelog/58261.added - -diff --git a/changelog/58261.added b/changelog/58261.added -new file mode 100644 -index 0000000000..537a43e80d ---- /dev/null -+++ b/changelog/58261.added -@@ -0,0 +1 @@ -+Added ``pkg.services_need_restart`` which lists system services that should be restarted after package management operations. -diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 03e99af733..a0e0cc30c1 100644 ---- a/salt/modules/aptpkg.py -+++ b/salt/modules/aptpkg.py -@@ -38,7 +38,12 @@ import salt.utils.stringutils - import salt.utils.systemd - import salt.utils.versions - import salt.utils.yaml --from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError -+from salt.exceptions import ( -+ CommandExecutionError, -+ CommandNotFoundError, -+ MinionError, -+ SaltInvocationError, -+) - from salt.modules.cmdmod import _parse_env - - log = logging.getLogger(__name__) -@@ -3029,3 +3034,38 @@ def list_downloaded(root=None, **kwargs): - ).isoformat(), - } - return ret -+ -+ -+def services_need_restart(**kwargs): -+ """ -+ .. versionadded:: NEXT -+ -+ List services that use files which have been changed by the -+ package manager. It might be needed to restart them. -+ -+ Requires checkrestart from the debian-goodies package. -+ -+ CLI Examples: -+ -+ .. code-block:: bash -+ -+ salt '*' pkg.services_need_restart -+ """ -+ if not salt.utils.path.which_bin(["checkrestart"]): -+ raise CommandNotFoundError( -+ "'checkrestart' is needed. It is part of the 'debian-goodies' " -+ "package which can be installed from official repositories." -+ ) -+ -+ cmd = ["checkrestart", "--machine"] -+ services = set() -+ -+ cr_output = __salt__["cmd.run_stdout"](cmd, python_shell=False) -+ for line in cr_output.split("\n"): -+ if not line.startswith("SERVICE:"): -+ continue -+ end_of_name = line.find(",") -+ service = line[8:end_of_name] # skip "SERVICE:" -+ services.add(service) -+ -+ return list(services) -diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py -index dd843f985b..df174e737d 100644 ---- a/salt/modules/yumpkg.py -+++ b/salt/modules/yumpkg.py -@@ -3434,3 +3434,39 @@ def del_repo_key(keyid, root=None, **kwargs): - - """ - return __salt__["lowpkg.remove_gpg_key"](keyid, root) -+ -+ -+def services_need_restart(**kwargs): -+ """ -+ .. versionadded:: NEXT -+ -+ List services that use files which have been changed by the -+ package manager. It might be needed to restart them. -+ -+ Requires systemd. -+ -+ CLI Examples: -+ -+ .. code-block:: bash -+ -+ salt '*' pkg.services_need_restart -+ """ -+ if _yum() != "dnf": -+ raise CommandExecutionError("dnf is required to list outdated services.") -+ if not salt.utils.systemd.booted(__context__): -+ raise CommandExecutionError("systemd is required to list outdated services.") -+ -+ cmd = ["dnf", "--quiet", "needs-restarting"] -+ dnf_output = __salt__["cmd.run_stdout"](cmd, python_shell=False) -+ if not dnf_output: -+ return [] -+ -+ services = set() -+ for line in dnf_output.split("\n"): -+ pid, has_delim, _ = line.partition(":") -+ if has_delim: -+ service = salt.utils.systemd.pid_to_service(pid.strip()) -+ if service: -+ services.add(service) -+ -+ return list(services) -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 5e13c68708..6f22994bf0 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -3092,3 +3092,28 @@ def del_repo_key(keyid, root=None, **kwargs): - - """ - return __salt__["lowpkg.remove_gpg_key"](keyid, root) -+ -+ -+def services_need_restart(root=None, **kwargs): -+ """ -+ .. versionadded:: NEXT -+ -+ List services that use files which have been changed by the -+ package manager. It might be needed to restart them. -+ -+ root -+ operate on a different root directory. -+ -+ CLI Examples: -+ -+ .. code-block:: bash -+ -+ salt '*' pkg.services_need_restart -+ -+ """ -+ cmd = ["ps", "-sss"] -+ -+ zypper_output = __zypper__(root=root).nolock.call(*cmd) -+ services = zypper_output.split() -+ -+ return services -diff --git a/salt/utils/systemd.py b/salt/utils/systemd.py -index 4d902bc920..f42d0421f8 100644 ---- a/salt/utils/systemd.py -+++ b/salt/utils/systemd.py -@@ -11,6 +11,12 @@ import salt.utils.path - import salt.utils.stringutils - from salt.exceptions import SaltInvocationError - -+try: -+ import dbus -+except ImportError: -+ dbus = None -+ -+ - log = logging.getLogger(__name__) - - -@@ -114,3 +120,66 @@ def has_scope(context=None): - if _sd_version is None: - return False - return _sd_version >= 205 -+ -+ -+def pid_to_service(pid): -+ """ -+ Check if a PID belongs to a systemd service and return its name. -+ Return None if the PID does not belong to a service. -+ -+ Uses DBUS if available. -+ """ -+ if dbus: -+ return _pid_to_service_dbus(pid) -+ else: -+ return _pid_to_service_systemctl(pid) -+ -+ -+def _pid_to_service_systemctl(pid): -+ systemd_cmd = ["systemctl", "--output", "json", "status", str(pid)] -+ try: -+ systemd_output = subprocess.run( -+ systemd_cmd, check=True, text=True, capture_output=True -+ ) -+ status_json = salt.utils.json.find_json(systemd_output.stdout) -+ except (ValueError, subprocess.CalledProcessError): -+ return None -+ -+ name = status_json.get("_SYSTEMD_UNIT") -+ if name and name.endswith(".service"): -+ return _strip_suffix(name) -+ else: -+ return None -+ -+ -+def _pid_to_service_dbus(pid): -+ """ -+ Use DBUS to check if a PID belongs to a running systemd service and return the service name if it does. -+ """ -+ bus = dbus.SystemBus() -+ systemd_object = bus.get_object( -+ "org.freedesktop.systemd1", "/org/freedesktop/systemd1" -+ ) -+ systemd = dbus.Interface(systemd_object, "org.freedesktop.systemd1.Manager") -+ try: -+ service_path = systemd.GetUnitByPID(pid) -+ service_object = bus.get_object("org.freedesktop.systemd1", service_path) -+ service_props = dbus.Interface( -+ service_object, "org.freedesktop.DBus.Properties" -+ ) -+ service_name = service_props.Get("org.freedesktop.systemd1.Unit", "Id") -+ name = str(service_name) -+ -+ if name and name.endswith(".service"): -+ return _strip_suffix(name) -+ else: -+ return None -+ except dbus.DBusException: -+ return None -+ -+ -+def _strip_suffix(service_name): -+ """ -+ Strip ".service" suffix from a given service name. -+ """ -+ return service_name[:-8] -diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py -index eb3f9e2da7..1d4d2f7fdc 100644 ---- a/tests/unit/modules/test_aptpkg.py -+++ b/tests/unit/modules/test_aptpkg.py -@@ -13,7 +13,6 @@ import textwrap - import pytest - import salt.modules.aptpkg as aptpkg - from salt.exceptions import CommandExecutionError, SaltInvocationError --from salt.ext import six - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.mock import MagicMock, Mock, call, patch - from tests.support.unit import TestCase, skipIf -@@ -1001,3 +1000,24 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin): - # We should attempt to call the cmd 5 times - self.assertEqual(cmd_mock.call_count, 5) - cmd_mock.has_calls(expected_calls) -+ -+ @patch("salt.utils.path.which_bin", Mock(return_value="/usr/sbin/checkrestart")) -+ def test_services_need_restart(self): -+ """ -+ Test that checkrestart output is parsed correctly -+ """ -+ cr_output = """ -+PROCESSES: 24 -+PROGRAMS: 17 -+PACKAGES: 8 -+SERVICE:rsyslog,385,/usr/sbin/rsyslogd -+SERVICE:cups-daemon,390,/usr/sbin/cupsd -+ """ -+ -+ with patch.dict( -+ aptpkg.__salt__, {"cmd.run_stdout": Mock(return_value=cr_output)} -+ ): -+ assert sorted(aptpkg.services_need_restart()) == [ -+ "cups-daemon", -+ "rsyslog", -+ ] -diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py -index e65a1f8b8b..b97e82d307 100644 ---- a/tests/unit/modules/test_yumpkg.py -+++ b/tests/unit/modules/test_yumpkg.py -@@ -7,7 +7,7 @@ import salt.modules.yumpkg as yumpkg - import salt.utils.platform - from salt.exceptions import CommandExecutionError, SaltInvocationError - from tests.support.mixins import LoaderModuleMockMixin --from tests.support.mock import MagicMock, Mock, mock_open, patch -+from tests.support.mock import MagicMock, Mock, call, mock_open, patch - from tests.support.unit import TestCase, skipIf - - try: -@@ -1745,3 +1745,33 @@ class YumUtilsTestCase(TestCase, LoaderModuleMockMixin): - python_shell=True, - username="Darth Vader", - ) -+ -+ @skipIf(not salt.utils.systemd.booted(), "Requires systemd") -+ @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")) -+ def test_services_need_restart(self): -+ """ -+ Test that dnf needs-restarting output is parsed and -+ salt.utils.systemd.pid_to_service is called as expected. -+ """ -+ expected = ["firewalld", "salt-minion"] -+ -+ dnf_mock = Mock( -+ return_value="123 : /usr/bin/firewalld\n456 : /usr/bin/salt-minion\n" -+ ) -+ systemd_mock = Mock(side_effect=["firewalld", "salt-minion"]) -+ with patch.dict(yumpkg.__salt__, {"cmd.run_stdout": dnf_mock}), patch( -+ "salt.utils.systemd.pid_to_service", systemd_mock -+ ): -+ assert sorted(yumpkg.services_need_restart()) == expected -+ systemd_mock.assert_has_calls([call("123"), call("456")]) -+ -+ @patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")) -+ def test_services_need_restart_requires_systemd(self): -+ """Test that yumpkg.services_need_restart raises an error if systemd is unavailable.""" -+ with patch("salt.utils.systemd.booted", Mock(return_value=False)): -+ pytest.raises(CommandExecutionError, yumpkg.services_need_restart) -+ -+ @patch("salt.modules.yumpkg._yum", Mock(return_value="yum")) -+ def test_services_need_restart_requires_dnf(self): -+ """Test that yumpkg.services_need_restart raises an error if DNF is unavailable.""" -+ pytest.raises(CommandExecutionError, yumpkg.services_need_restart) -diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 018c1ffbca..9c4a224c55 100644 ---- a/tests/unit/modules/test_zypperpkg.py -+++ b/tests/unit/modules/test_zypperpkg.py -@@ -2213,3 +2213,17 @@ pattern() = package-c""" - with patch.dict(zypper.__salt__, salt_mock): - self.assertTrue(zypper.del_repo_key(keyid="keyid", root="/mnt")) - salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") -+ -+ def test_services_need_restart(self): -+ """ -+ Test that zypper ps is used correctly to list services that need to -+ be restarted. -+ """ -+ expected = ["salt-minion", "firewalld"] -+ zypper_output = "salt-minion\nfirewalld" -+ zypper_mock = Mock() -+ zypper_mock(root=None).nolock.call = Mock(return_value=zypper_output) -+ -+ with patch("salt.modules.zypperpkg.__zypper__", zypper_mock): -+ assert zypper.services_need_restart() == expected -+ zypper_mock(root=None).nolock.call.assert_called_with("ps", "-sss") --- -2.29.2 - - diff --git a/add-saltssh-multi-version-support-across-python-inte.patch b/add-saltssh-multi-version-support-across-python-inte.patch deleted file mode 100644 index 19c92ce..0000000 --- a/add-saltssh-multi-version-support-across-python-inte.patch +++ /dev/null @@ -1,463 +0,0 @@ -From 99aa26e7ab4840cf38f54e7692d7d1eede3adeb4 Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Mon, 12 Mar 2018 12:01:39 +0100 -Subject: [PATCH] Add SaltSSH multi-version support across Python - interpeters. - -Bugfix: crashes when OPTIONS.saltdir is a file - -salt-ssh: allow server and client to run different python major version - -Handle non-directory on the /tmp - -Bugfix: prevent partial fileset removal in /tmp - -salt-ssh: compare checksums to detect newly generated thin on the server - -Reset time at thin unpack - -Bugfix: get a proper option for CLI and opts of wiping the tmp - -Add docstring to get_tops - -Remove unnecessary noise in imports - -Refactor get_tops collector - -Add logging to the get_tops - -Update call script - -Remove pre-caution - -Update log debug message for tops collector - -Reset default compression, if unknown is passed - -Refactor archive creation flow - -Add external shell-callable function to collect tops - -Simplify tops gathering, bugfix alternative to Py2 - -find working executable - -Add basic shareable module classifier - -Add proper error handler, unmuting exceptions during top collection - -Use common shared directory for compatible libraries - -fix searching for python versions - -Flatten error message string - -Bail-out immediately if <2.6 version detected - -Simplify shell cmd to get the version on Python 2.x - -Remove stub that was previously moved upfront - -Lintfix: PEP8 ident - -Add logging on the error, when Python-2 version cannot be detected properly - -Generate salt-call source, based on conditions - -Add logging on remove failure on thin.tgz archive - -Add config-based external tops gatherer - -Change signature to pass the extended configuration to the thin generator - -Update docstring to the salt-call generator - -Implement get namespaces inclusion to the salt-call script on the client machine - -Use new signature of the get call - -Implement namespace selector, based on the current Python interpreter version - -Add deps as a list, instead of a map - -Add debug logging - -Implement packaging an alternative version - -Update salt-call script so it swaps the namespace according to the configuration - -Compress thin.zip if zlib is available - -Fix a system exit error message - -Move compression fall-back operation - -Add debug logging prior to the thin archive removal - -Flatten the archive extension choice - -Lintfix: PEP8 an empty line required - -Bugfix: ZFS modules (zfs, zpool) crashes on non-ZFS systems - -Add unit test case for the Salt SSH parts - -Add unit test for missing dependencies on get_ext_tops - -Postpone inheritance implementation - -Refactor unit test for get_ext_tops - -Add unit test for get_ext_tops checks interpreter configuration - -Check python interpreter lock version - -Add unit test for get_ext_tops checks the python locked interepreter value - -Bugfix: report into warning log module name, not its config - -Add unit test for dependencies check python version lock (inherently) - -Mock os.path.isfile function - -Update warning logging information - -Add unit test for get_ext_tops module configuration validation - -Do not use list of dicts for namespaces, just dict for namespaces. - -Add unit test for get_ext_tops config verification - -Fix unit tests for the new config structure - -Add unit test for thin.gte call - -Add unit test for dependency path adding function - -Add unit test for thin_path function - -Add unit test for salt-call source generator - -Add unit test for get_ext_namespaces on empty configuration - -Add get_ext_namespaces for namespace extractions into a tuple for python version - -Remove unused variable - -Add unit test for getting namespace failure when python maj/min versions are not defined - -Add unit test to add tops based on the current interpreter - -Add unit test for get_tops with extra modules - -Add unit test for shared object modules top addition - -Add unit test for thin_sum hashing - -Add unit test for min_sum hashing - -Add unit test for gen_thin verify for 2.6 Python version is a minimum requirement - -Fix gen_thin exception on Python 3 - -Use object attribute instead of indeces. Remove an empty line. - -Add unit test for gen_thin compression type fallback - -Move helper functions up by the class code - -Update unit test doc - -Add check for correct archiving mode is opened - -Add unit test for gen_thin if control files are written correctly - -Update docstring for fake version info constructor method - -Add fake tarfile mock handler - -Mock-out missing methods inside gen_thin - -Move tarfile.open check to the end of the test - -Add unit test for tree addition to the archive - -Add shareable module to the gen_thin unit test - -Fix docstring - -Add unit test for an alternative version pack - -Lintfix - -Add documentation about updated Salt SSH features - -Fix typo - -Lintfix: PEP8 extra-line needed - -Make the command more readable - -Write all supported minimal python versions into a config file on the target machine - -Get supported Python executable based on the config py-map - -Add unit test for get_supported_py_config function typecheck - -Add unit test for get_supported_py_config function base tops - -Add unit test for get_supported_py_config function ext tops - -Fix unit test for catching "supported-versions" was written down - -Rephrase Salt SSH doc description - -Re-phrase docstring for alternative Salt installation - -require same major version while minor is allowed to be higher - -Bugfix: remove minor version from the namespaced, version-specific directory - -Fix unit tests for minor version removal of namespaced version-specific directory - -Initialise the options directly to be structure-ready object. - -Disable wiping if state is executed - -Properly mock a tempfile object - -Support Python 2.6 versions - -Add digest collector for file trees etc - -Bufix: recurse calls damages the configuration (reference problem) - -Collect digest of the code - -Get code checksum into the shim options - -Get all the code content, not just Python sources - -Bugfix: Python3 compat - string required instead of bytes - -Lintfix: too many empty lines - -Lintfix: blocked function used - -Bugfix: key error master_tops_first - -Fix unit tests for the checksum generator - -Use code checksum to update thin archive on client's cache - -Lintfix - -Set master_top_first to False by default ---- - doc/topics/releases/fluorine.rst | 178 +++++++++++++++++++++++++++++++ - salt/client/ssh/ssh_py_shim.py | 3 + - 2 files changed, 181 insertions(+) - create mode 100644 doc/topics/releases/fluorine.rst - -diff --git a/doc/topics/releases/fluorine.rst b/doc/topics/releases/fluorine.rst -new file mode 100644 -index 0000000000..40c69e25cc ---- /dev/null -+++ b/doc/topics/releases/fluorine.rst -@@ -0,0 +1,178 @@ -+:orphan: -+ -+====================================== -+Salt Release Notes - Codename Fluorine -+====================================== -+ -+ -+Minion Startup Events -+--------------------- -+ -+When a minion starts up it sends a notification on the event bus with a tag -+that looks like this: `salt/minion//start`. For historical reasons -+the minion also sends a similar event with an event tag like this: -+`minion_start`. This duplication can cause a lot of clutter on the event bus -+when there are many minions. Set `enable_legacy_startup_events: False` in the -+minion config to ensure only the `salt/minion//start` events are -+sent. -+ -+The new :conf_minion:`enable_legacy_startup_events` minion config option -+defaults to ``True``, but will be set to default to ``False`` beginning with -+the Neon release of Salt. -+ -+The Salt Syndic currently sends an old style `syndic_start` event as well. The -+syndic respects :conf_minion:`enable_legacy_startup_events` as well. -+ -+ -+Deprecations -+------------ -+ -+Module Deprecations -+=================== -+ -+The ``napalm_network`` module had the following changes: -+ -+- Support for the ``template_path`` has been removed in the ``load_template`` -+ function. This is because support for NAPALM native templates has been -+ dropped. -+ -+The ``trafficserver`` module had the following changes: -+ -+- Support for the ``match_var`` function was removed. Please use the -+ ``match_metric`` function instead. -+- Support for the ``read_var`` function was removed. Please use the -+ ``read_config`` function instead. -+- Support for the ``set_var`` function was removed. Please use the -+ ``set_config`` function instead. -+ -+The ``win_update`` module has been removed. It has been replaced by ``win_wua`` -+module. -+ -+The ``win_wua`` module had the following changes: -+ -+- Support for the ``download_update`` function has been removed. Please use the -+ ``download`` function instead. -+- Support for the ``download_updates`` function has been removed. Please use the -+ ``download`` function instead. -+- Support for the ``install_update`` function has been removed. Please use the -+ ``install`` function instead. -+- Support for the ``install_updates`` function has been removed. Please use the -+ ``install`` function instead. -+- Support for the ``list_update`` function has been removed. Please use the -+ ``get`` function instead. -+- Support for the ``list_updates`` function has been removed. Please use the -+ ``list`` function instead. -+ -+Pillar Deprecations -+=================== -+ -+The ``vault`` pillar had the following changes: -+ -+- Support for the ``profile`` argument was removed. Any options passed up until -+ and following the first ``path=`` are discarded. -+ -+Roster Deprecations -+=================== -+ -+The ``cache`` roster had the following changes: -+ -+- Support for ``roster_order`` as a list or tuple has been removed. As of the -+ ``Fluorine`` release, ``roster_order`` must be a dictionary. -+- The ``roster_order`` option now includes IPv6 in addition to IPv4 for the -+ ``private``, ``public``, ``global`` or ``local`` settings. The syntax for these -+ settings has changed to ``ipv4-*`` or ``ipv6-*``, respectively. -+ -+State Deprecations -+================== -+ -+The ``docker`` state has been removed. The following functions should be used -+instead. -+ -+- The ``docker.running`` function was removed. Please update applicable SLS files -+ to use the ``docker_container.running`` function instead. -+- The ``docker.stopped`` function was removed. Please update applicable SLS files -+ to use the ``docker_container.stopped`` function instead. -+- The ``docker.absent`` function was removed. Please update applicable SLS files -+ to use the ``docker_container.absent`` function instead. -+- The ``docker.absent`` function was removed. Please update applicable SLS files -+ to use the ``docker_container.absent`` function instead. -+- The ``docker.network_present`` function was removed. Please update applicable -+ SLS files to use the ``docker_network.present`` function instead. -+- The ``docker.network_absent`` function was removed. Please update applicable -+ SLS files to use the ``docker_network.absent`` function instead. -+- The ``docker.image_present`` function was removed. Please update applicable SLS -+ files to use the ``docker_image.present`` function instead. -+- The ``docker.image_absent`` function was removed. Please update applicable SLS -+ files to use the ``docker_image.absent`` function instead. -+- The ``docker.volume_present`` function was removed. Please update applicable SLS -+ files to use the ``docker_volume.present`` function instead. -+- The ``docker.volume_absent`` function was removed. Please update applicable SLS -+ files to use the ``docker_volume.absent`` function instead. -+ -+The ``docker_network`` state had the following changes: -+ -+- Support for the ``driver`` option has been removed from the ``absent`` function. -+ This option had no functionality in ``docker_network.absent``. -+ -+The ``git`` state had the following changes: -+ -+- Support for the ``ref`` option in the ``detached`` state has been removed. -+ Please use the ``rev`` option instead. -+ -+The ``k8s`` state has been removed. The following functions should be used -+instead: -+ -+- The ``k8s.label_absent`` function was removed. Please update applicable SLS -+ files to use the ``kubernetes.node_label_absent`` function instead. -+- The ``k8s.label_present`` function was removed. Please updated applicable SLS -+ files to use the ``kubernetes.node_label_present`` function instead. -+- The ``k8s.label_folder_absent`` function was removed. Please update applicable -+ SLS files to use the ``kubernetes.node_label_folder_absent`` function instead. -+ -+The ``netconfig`` state had the following changes: -+ -+- Support for the ``template_path`` option in the ``managed`` state has been -+ removed. This is because support for NAPALM native templates has been dropped. -+ -+The ``trafficserver`` state had the following changes: -+ -+- Support for the ``set_var`` function was removed. Please use the ``config`` -+ function instead. -+ -+The ``win_update`` state has been removed. Please use the ``win_wua`` state instead. -+ -+SaltSSH major updates -+===================== -+ -+SaltSSH now works across different major Python versions. Python 2.7 ~ Python 3.x -+are now supported transparently. Requirement is, however, that the SaltMaster should -+have installed Salt, including all related dependencies for Python 2 and Python 3. -+Everything needs to be importable from the respective Python environment. -+ -+SaltSSH can bundle up an arbitrary version of Salt. If there would be an old box for -+example, running an outdated and unsupported Python 2.6, it is still possible from -+a SaltMaster with Python 3.5 or newer to access it. This feature requires an additional -+configuration in /etc/salt/master as follows: -+ -+ -+.. code-block:: yaml -+ -+ ssh_ext_alternatives: -+ 2016.3: # Namespace, can be actually anything. -+ py-version: [2, 6] # Constraint to specific interpreter version -+ path: /opt/2016.3/salt # Main Salt installation -+ dependencies: # List of dependencies and their installation paths -+ jinja2: /opt/jinja2 -+ yaml: /opt/yaml -+ tornado: /opt/tornado -+ msgpack: /opt/msgpack -+ certifi: /opt/certifi -+ singledispatch: /opt/singledispatch.py -+ singledispatch_helpers: /opt/singledispatch_helpers.py -+ markupsafe: /opt/markupsafe -+ backports_abc: /opt/backports_abc.py -+ -+It is also possible to use several alternative versions of Salt. You can for instance generate -+a minimal tarball using runners and include that. But this is only possible, when such specific -+Salt version is also available on the Master machine, although does not need to be directly -+installed together with the older Python interpreter. -diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py -index c0ce0fd7de..5ddd282ed0 100644 ---- a/salt/client/ssh/ssh_py_shim.py -+++ b/salt/client/ssh/ssh_py_shim.py -@@ -171,6 +171,9 @@ def unpack_thin(thin_path): - old_umask = os.umask(0o077) # pylint: disable=blacklisted-function - tfile.extractall(path=OPTIONS.saltdir) - tfile.close() -+ checksum_path = os.path.normpath(os.path.join(OPTIONS.saltdir, "thin_checksum")) -+ with open(checksum_path, "w") as chk: -+ chk.write(OPTIONS.checksum + "\n") - os.umask(old_umask) # pylint: disable=blacklisted-function - try: - os.unlink(thin_path) --- -2.29.2 - - diff --git a/add-supportconfig-module-for-remote-calls-and-saltss.patch b/add-supportconfig-module-for-remote-calls-and-saltss.patch deleted file mode 100644 index d03d6fe..0000000 --- a/add-supportconfig-module-for-remote-calls-and-saltss.patch +++ /dev/null @@ -1,1794 +0,0 @@ -From 9fba801c1e1e6136808dca80ccd7524ed483250e Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Fri, 19 Oct 2018 15:44:47 +0200 -Subject: [PATCH] Add supportconfig module for remote calls and SaltSSH - -Add log collector for remote purposes - -Implement default archive name - -Fix imports - -Implement runner function - -Remove targets data collector function as it is now called by a module instead - -Add external method decorator marker - -Add utility class for detecting exportable methods - -Mark run method as an external function - -Implement function setter - -Fix imports - -Setup config from __opts__ - -Use utility class - -Remove utils class - -Allow specify profile from the API parameter directly - -Rename module by virtual name - -Bypass parent subclass - -Implement profiles listing (local only for now) - -Specify profile from the state/call - -Set default or personalised archive name - -Add archives lister - -Add personalised name element to the archive name - -Use proper args/kwargs to the exported function - -Add archives deletion function - -Change log level when debugging rendered profiles - -Add ability to directly pass profile source when taking local data - -Add pillar profile support - -Remove extra-line - -Fix header - -Change output format for deleting archives - -Refactor logger output format - -Add time/milliseconds to each log notification - -Fix imports - -Switch output destination by context - -Add last archive function - -Lintfix - -Return consistent type - -Change output format for deleted archives report - -Implement report archive syncing to the reporting node - -Send multiple files at once via rsync, instead of send one after another - -Add sync stats formatter - -Change signature: cleanup -> move. Update docstring. - -Flush empty data from the output format - -Report archfiles activity - -Refactor imports - -Do not remove retcode if it is EX_OK - -Do not raise rsync error for undefined archives. - -Update header - -Add salt-support state module - -Move all functions into a callable class object - -Support __call__ function in state and command modules as default entrance that does not need to be specified in SLS state syntax - -Access from the outside only allowed class methods - -Pre-create destination of the archive, preventing single archive copied as a group name - -Handle functions exceptions - -Add unit test scaffold - -Add LogCollector UT for testing regular message - -Add LogCollector UT for testing INFO message - -Add LogCollector UT for testing WARNING message - -Replace hardcoded variables with defined constants - -Add LogCollector UT for testing ERROR message - -Test title attribute in msg method of LogCollector - -Add UT for LogCollector on highlighter method - -Add UT for LogCollector on put method - -Fix docstrings - -Add UT for archive name generator - -Add UT for custom archive name - -Fix docstring for the UT - -Add UT for checking profiles list format - -Add Unit Test for existing archives listing - -Add UT for the last archive function - -Create instance of the support class - -Add UT for successfully deleting all archives - -Add UT for deleting archives with failures - -Add UI for formatting sync stats and order preservation - -Add UT for testing sync failure when no archives has been specified - -Add UT for last picked archive has not found - -Add UT for last specified archive was not found - -Bugfix: do not create an array with None element in it - -Fix UT for found bugfix - -Add UT for syncing no archives failure - -Add UT for sync function - -Add UT for run support function - -Fix docstring for function "run" - -lintfix: use 'salt.support.mock' and 'patch()' - -Rewrite subdirectory creation and do not rely on Python3-only code - -Lintfix: remove unused imports - -Lintfix: regexp strings - -Break-down oneliner if/else clause - -Use ordered dictionary to preserve order of the state. - -This has transparent effect to the current process: OrderedDict is the -same as just Python dict, except it is preserving order of the state -chunks. - -Refactor state processing class. - -Add __call__ function to process single-id syntax - -Add backward-compatibility with default SLS syntax (id-per-call) - -Lintfix: E1120 no value in argument 'name' for class constructor - -Remove unused import - -Check last function by full name ---- - doc/ref/modules/all/index.rst | 1 + - doc/ref/states/all/index.rst | 1 + - salt/cli/support/__init__.py | 2 +- - salt/cli/support/collector.py | 14 +- - salt/loader.py | 6 +- - salt/modules/saltsupport.py | 405 ++++++++++++++++++++ - salt/state.py | 38 +- - salt/states/saltsupport.py | 225 +++++++++++ - salt/utils/args.py | 23 +- - salt/utils/decorators/__init__.py | 68 ++-- - tests/unit/modules/test_saltsupport.py | 496 +++++++++++++++++++++++++ - 11 files changed, 1220 insertions(+), 59 deletions(-) - create mode 100644 salt/modules/saltsupport.py - create mode 100644 salt/states/saltsupport.py - create mode 100644 tests/unit/modules/test_saltsupport.py - -diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst -index 4c93972276..9fea7af07f 100644 ---- a/doc/ref/modules/all/index.rst -+++ b/doc/ref/modules/all/index.rst -@@ -415,6 +415,7 @@ execution modules - salt_version - saltcheck - saltcloudmod -+ saltsupport - saltutil - schedule - scp_mod -diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst -index 2664b4ce45..052efe4582 100644 ---- a/doc/ref/states/all/index.rst -+++ b/doc/ref/states/all/index.rst -@@ -281,6 +281,7 @@ state modules - rvm - salt_proxy - saltmod -+ saltsupport - saltutil - schedule - selinux -diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py -index 4fdf44186f..59c2609e07 100644 ---- a/salt/cli/support/__init__.py -+++ b/salt/cli/support/__init__.py -@@ -47,7 +47,7 @@ def get_profile(profile, caller, runner): - if os.path.exists(profile_path): - try: - rendered_template = _render_profile(profile_path, caller, runner) -- log.trace("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template)) -+ log.debug("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template)) - data.update(yaml.load(rendered_template)) - except Exception as ex: - log.debug(ex, exc_info=True) -diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py -index a08a0b8c6e..1879cc5220 100644 ---- a/salt/cli/support/collector.py -+++ b/salt/cli/support/collector.py -@@ -362,7 +362,7 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): - - return data - -- def collect_local_data(self): -+ def collect_local_data(self, profile=None, profile_source=None): - """ - Collects master system data. - :return: -@@ -388,8 +388,8 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): - self._local_run({"fun": func, "arg": args, "kwarg": kwargs}) - ) - -- scenario = salt.cli.support.get_profile( -- self.config["support_profile"], call, run -+ scenario = profile_source or salt.cli.support.get_profile( -+ profile or self.config["support_profile"], call, run - ) - for category_name in scenario: - self.out.put(category_name) -@@ -441,13 +441,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): - - return action_name.split(":")[0] or None - -- def collect_targets_data(self): -- """ -- Collects minion targets data -- :return: -- """ -- # TODO: remote collector? -- - def _cleanup(self): - """ - Cleanup if crash/exception -@@ -551,7 +544,6 @@ class SaltSupport(salt.utils.parsers.SaltSupportOptionParser): - self.collector.open() - self.collect_local_data() - self.collect_internal_data() -- self.collect_targets_data() - self.collector.close() - - archive_path = self.collector.archive_path -diff --git a/salt/loader.py b/salt/loader.py -index 8232ed632e..1ee40712e5 100644 ---- a/salt/loader.py -+++ b/salt/loader.py -@@ -1843,8 +1843,10 @@ class LazyLoader(salt.utils.lazy.LazyDict): - } - - for attr in getattr(mod, "__load__", dir(mod)): -- if attr.startswith("_"): -- # private functions are skipped -+ if attr.startswith("_") and attr != "__call__": -+ # private functions are skipped, -+ # except __call__ which is default entrance -+ # for multi-function batch-like state syntax - continue - func = getattr(mod, attr) - if not inspect.isfunction(func) and not isinstance(func, functools.partial): -diff --git a/salt/modules/saltsupport.py b/salt/modules/saltsupport.py -new file mode 100644 -index 0000000000..e800e3bf1f ---- /dev/null -+++ b/salt/modules/saltsupport.py -@@ -0,0 +1,405 @@ -+# -+# Author: Bo Maryniuk -+# -+# Copyright 2018 SUSE LLC -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+""" -+:codeauthor: :email:`Bo Maryniuk ` -+ -+Module to run salt-support within Salt. -+""" -+# pylint: disable=W0231,W0221 -+ -+ -+import datetime -+import logging -+import os -+import re -+import sys -+import tempfile -+import time -+ -+import salt.cli.support -+import salt.cli.support.intfunc -+import salt.defaults.exitcodes -+import salt.exceptions -+import salt.utils.decorators -+import salt.utils.dictupdate -+import salt.utils.odict -+import salt.utils.path -+import salt.utils.stringutils -+from salt.cli.support.collector import SaltSupport, SupportDataCollector -+ -+__virtualname__ = "support" -+log = logging.getLogger(__name__) -+ -+ -+class LogCollector: -+ """ -+ Output collector. -+ """ -+ -+ INFO = "info" -+ WARNING = "warning" -+ ERROR = "error" -+ -+ class MessagesList(list): -+ def append(self, obj): -+ list.append( -+ self, -+ "{} - {}".format( -+ datetime.datetime.utcnow().strftime("%T.%f")[:-3], obj -+ ), -+ ) -+ -+ __call__ = append -+ -+ def __init__(self): -+ self.messages = { -+ self.INFO: self.MessagesList(), -+ self.WARNING: self.MessagesList(), -+ self.ERROR: self.MessagesList(), -+ } -+ -+ def msg(self, message, *args, **kwargs): -+ title = kwargs.get("title") -+ if title: -+ message = "{}: {}".format(title, message) -+ self.messages[self.INFO](message) -+ -+ def info(self, message, *args, **kwargs): -+ self.msg(message) -+ -+ def warning(self, message, *args, **kwargs): -+ self.messages[self.WARNING](message) -+ -+ def error(self, message, *args, **kwargs): -+ self.messages[self.ERROR](message) -+ -+ def put(self, message, *args, **kwargs): -+ self.messages[self.INFO](message) -+ -+ def highlight(self, message, *values, **kwargs): -+ self.msg(message.format(*values)) -+ -+ -+class SaltSupportModule(SaltSupport): -+ """ -+ Salt Support module class. -+ """ -+ -+ def __init__(self): -+ """ -+ Constructor -+ """ -+ self.config = self.setup_config() -+ -+ def setup_config(self): -+ """ -+ Return current configuration -+ :return: -+ """ -+ return __opts__ -+ -+ def _get_archive_name(self, archname=None): -+ """ -+ Create default archive name. -+ -+ :return: -+ """ -+ archname = re.sub("[^a-z0-9]", "", (archname or "").lower()) or "support" -+ for grain in ["fqdn", "host", "localhost", "nodename"]: -+ host = __grains__.get(grain) -+ if host: -+ break -+ if not host: -+ host = "localhost" -+ -+ return os.path.join( -+ tempfile.gettempdir(), -+ "{hostname}-{archname}-{date}-{time}.bz2".format( -+ archname=archname, -+ hostname=host, -+ date=time.strftime("%Y%m%d"), -+ time=time.strftime("%H%M%S"), -+ ), -+ ) -+ -+ @salt.utils.decorators.external -+ def profiles(self): -+ """ -+ Get list of profiles. -+ -+ :return: -+ """ -+ return { -+ "standard": salt.cli.support.get_profiles(self.config), -+ "custom": [], -+ } -+ -+ @salt.utils.decorators.external -+ def archives(self): -+ """ -+ Get list of existing archives. -+ :return: -+ """ -+ arc_files = [] -+ tmpdir = tempfile.gettempdir() -+ for filename in os.listdir(tmpdir): -+ mtc = re.match(r"\w+-\w+-\d+-\d+\.bz2", filename) -+ if mtc and len(filename) == mtc.span()[-1]: -+ arc_files.append(os.path.join(tmpdir, filename)) -+ -+ return arc_files -+ -+ @salt.utils.decorators.external -+ def last_archive(self): -+ """ -+ Get the last available archive -+ :return: -+ """ -+ archives = {} -+ for archive in self.archives(): -+ archives[int(archive.split(".")[0].split("-")[-1])] = archive -+ -+ return archives and archives[max(archives)] or None -+ -+ @salt.utils.decorators.external -+ def delete_archives(self, *archives): -+ """ -+ Delete archives -+ :return: -+ """ -+ # Remove paths -+ _archives = [] -+ for archive in archives: -+ _archives.append(os.path.basename(archive)) -+ archives = _archives[:] -+ -+ ret = {"files": {}, "errors": {}} -+ for archive in self.archives(): -+ arc_dir = os.path.dirname(archive) -+ archive = os.path.basename(archive) -+ if archives and archive in archives or not archives: -+ archive = os.path.join(arc_dir, archive) -+ try: -+ os.unlink(archive) -+ ret["files"][archive] = "removed" -+ except Exception as err: -+ ret["errors"][archive] = str(err) -+ ret["files"][archive] = "left" -+ -+ return ret -+ -+ def format_sync_stats(self, cnt): -+ """ -+ Format stats of the sync output. -+ -+ :param cnt: -+ :return: -+ """ -+ stats = salt.utils.odict.OrderedDict() -+ if cnt.get("retcode") == salt.defaults.exitcodes.EX_OK: -+ for line in cnt.get("stdout", "").split(os.linesep): -+ line = line.split(": ") -+ if len(line) == 2: -+ stats[line[0].lower().replace(" ", "_")] = line[1] -+ cnt["transfer"] = stats -+ del cnt["stdout"] -+ -+ # Remove empty -+ empty_sections = [] -+ for section in cnt: -+ if not cnt[section] and section != "retcode": -+ empty_sections.append(section) -+ for section in empty_sections: -+ del cnt[section] -+ -+ return cnt -+ -+ @salt.utils.decorators.depends("rsync") -+ @salt.utils.decorators.external -+ def sync(self, group, name=None, host=None, location=None, move=False, all=False): -+ """ -+ Sync the latest archive to the host on given location. -+ -+ CLI Example: -+ -+ .. code-block:: bash -+ -+ salt '*' support.sync group=test -+ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 -+ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 host=allmystuff.lan -+ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 host=allmystuff.lan location=/opt/ -+ -+ :param group: name of the local directory to which sync is going to put the result files -+ :param name: name of the archive. Latest, if not specified. -+ :param host: name of the destination host for rsync. Default is master, if not specified. -+ :param location: local destination directory, default temporary if not specified -+ :param move: move archive file[s]. Default is False. -+ :param all: work with all available archives. Default is False (i.e. latest available) -+ -+ :return: -+ """ -+ tfh, tfn = tempfile.mkstemp() -+ processed_archives = [] -+ src_uri = uri = None -+ -+ last_arc = self.last_archive() -+ if name: -+ archives = [name] -+ elif all: -+ archives = self.archives() -+ elif last_arc: -+ archives = [last_arc] -+ else: -+ archives = [] -+ -+ for name in archives: -+ err = None -+ if not name: -+ err = "No support archive has been defined." -+ elif not os.path.exists(name): -+ err = 'Support archive "{}" was not found'.format(name) -+ if err is not None: -+ log.error(err) -+ raise salt.exceptions.SaltInvocationError(err) -+ -+ if not uri: -+ src_uri = os.path.dirname(name) -+ uri = "{host}:{loc}".format( -+ host=host or __opts__["master"], -+ loc=os.path.join(location or tempfile.gettempdir(), group), -+ ) -+ -+ os.write(tfh, salt.utils.stringutils.to_bytes(os.path.basename(name))) -+ os.write(tfh, salt.utils.stringutils.to_bytes(os.linesep)) -+ processed_archives.append(name) -+ log.debug("Syncing {filename} to {uri}".format(filename=name, uri=uri)) -+ os.close(tfh) -+ -+ if not processed_archives: -+ raise salt.exceptions.SaltInvocationError("No archives found to transfer.") -+ -+ ret = __salt__["rsync.rsync"]( -+ src=src_uri, -+ dst=uri, -+ additional_opts=["--stats", "--files-from={}".format(tfn)], -+ ) -+ ret["files"] = {} -+ for name in processed_archives: -+ if move: -+ salt.utils.dictupdate.update(ret, self.delete_archives(name)) -+ log.debug("Deleting {filename}".format(filename=name)) -+ ret["files"][name] = "moved" -+ else: -+ ret["files"][name] = "copied" -+ -+ try: -+ os.unlink(tfn) -+ except OSError as err: -+ log.error( -+ "Cannot remove temporary rsync file {fn}: {err}".format(fn=tfn, err=err) -+ ) -+ -+ return self.format_sync_stats(ret) -+ -+ @salt.utils.decorators.external -+ def run(self, profile="default", pillar=None, archive=None, output="nested"): -+ """ -+ Run Salt Support on the minion. -+ -+ profile -+ Set available profile name. Default is "default". -+ -+ pillar -+ Set available profile from the pillars. -+ -+ archive -+ Override archive name. Default is "support". This results to "hostname-support-YYYYMMDD-hhmmss.bz2". -+ -+ output -+ Change the default outputter. Default is "nested". -+ -+ CLI Example: -+ -+ .. code-block:: bash -+ -+ salt '*' support.run -+ salt '*' support.run profile=network -+ salt '*' support.run pillar=something_special -+ """ -+ -+ class outputswitch: -+ """ -+ Output switcher on context -+ """ -+ -+ def __init__(self, output_device): -+ self._tmp_out = output_device -+ self._orig_out = None -+ -+ def __enter__(self): -+ self._orig_out = salt.cli.support.intfunc.out -+ salt.cli.support.intfunc.out = self._tmp_out -+ -+ def __exit__(self, *args): -+ salt.cli.support.intfunc.out = self._orig_out -+ -+ self.out = LogCollector() -+ with outputswitch(self.out): -+ self.collector = SupportDataCollector( -+ archive or self._get_archive_name(archname=archive), output -+ ) -+ self.collector.out = self.out -+ self.collector.open() -+ self.collect_local_data( -+ profile=profile, profile_source=__pillar__.get(pillar) -+ ) -+ self.collect_internal_data() -+ self.collector.close() -+ -+ return {"archive": self.collector.archive_path, "messages": self.out.messages} -+ -+ -+def __virtual__(): -+ """ -+ Set method references as module functions aliases -+ :return: -+ """ -+ support = SaltSupportModule() -+ -+ def _set_function(obj): -+ """ -+ Create a Salt function for the SaltSupport class. -+ """ -+ -+ def _cmd(*args, **kwargs): -+ """ -+ Call support method as a function from the Salt. -+ """ -+ _kwargs = {} -+ for kw in kwargs: -+ if not kw.startswith("__"): -+ _kwargs[kw] = kwargs[kw] -+ return obj(*args, **_kwargs) -+ -+ _cmd.__doc__ = obj.__doc__ -+ return _cmd -+ -+ for m_name in dir(support): -+ obj = getattr(support, m_name) -+ if getattr(obj, "external", False): -+ setattr(sys.modules[__name__], m_name, _set_function(obj)) -+ -+ return __virtualname__ -diff --git a/salt/state.py b/salt/state.py -index beab2cb16c..b1bce4e0cd 100644 ---- a/salt/state.py -+++ b/salt/state.py -@@ -1547,7 +1547,9 @@ class State: - names = [] - if state.startswith("__"): - continue -- chunk = {"state": state, "name": name} -+ chunk = OrderedDict() -+ chunk["state"] = state -+ chunk["name"] = name - if orchestration_jid is not None: - chunk["__orchestration_jid__"] = orchestration_jid - if "__sls__" in body: -@@ -2150,9 +2152,16 @@ class State: - ret = self.call_parallel(cdata, low) - else: - self.format_slots(cdata) -- ret = self.states[cdata["full"]]( -- *cdata["args"], **cdata["kwargs"] -- ) -+ if cdata["full"].split(".")[-1] == "__call__": -+ # __call__ requires OrderedDict to preserve state order -+ # kwargs are also invalid overall -+ ret = self.states[cdata["full"]]( -+ cdata["args"], module=None, state=cdata["kwargs"] -+ ) -+ else: -+ ret = self.states[cdata["full"]]( -+ *cdata["args"], **cdata["kwargs"] -+ ) - self.states.inject_globals = {} - if ( - "check_cmd" in low -@@ -3188,10 +3197,31 @@ class State: - running.update(errors) - return running - -+ def inject_default_call(self, high): -+ """ -+ Sets .call function to a state, if not there. -+ -+ :param high: -+ :return: -+ """ -+ for chunk in high: -+ state = high[chunk] -+ for state_ref in state: -+ needs_default = True -+ for argset in state[state_ref]: -+ if isinstance(argset, str): -+ needs_default = False -+ break -+ if needs_default: -+ order = state[state_ref].pop(-1) -+ state[state_ref].append("__call__") -+ state[state_ref].append(order) -+ - def call_high(self, high, orchestration_jid=None): - """ - Process a high data call and ensure the defined states. - """ -+ self.inject_default_call(high) - errors = [] - # If there is extension data reconcile it - high, ext_errors = self.reconcile_extend(high) -diff --git a/salt/states/saltsupport.py b/salt/states/saltsupport.py -new file mode 100644 -index 0000000000..fb0c9e0372 ---- /dev/null -+++ b/salt/states/saltsupport.py -@@ -0,0 +1,225 @@ -+# -+# Author: Bo Maryniuk -+# -+# Copyright 2018 SUSE LLC -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+r""" -+:codeauthor: :email:`Bo Maryniuk ` -+ -+Execution of Salt Support from within states -+============================================ -+ -+State to collect support data from the systems: -+ -+.. code-block:: yaml -+ -+ examine_my_systems: -+ support.taken: -+ - profile: default -+ -+ support.collected: -+ - group: somewhere -+ - move: true -+ -+""" -+import logging -+import os -+import tempfile -+ -+import salt.exceptions -+ -+# Import salt modules -+import salt.fileclient -+import salt.utils.decorators.path -+import salt.utils.odict -+ -+log = logging.getLogger(__name__) -+__virtualname__ = "support" -+ -+ -+class SaltSupportState: -+ """ -+ Salt-support. -+ """ -+ -+ EXPORTED = ["collected", "taken"] -+ -+ def get_kwargs(self, data): -+ kwargs = {} -+ for keyset in data: -+ kwargs.update(keyset) -+ -+ return kwargs -+ -+ def __call__(self, state): -+ """ -+ Call support. -+ -+ :param args: -+ :param kwargs: -+ :return: -+ """ -+ ret = { -+ "name": state.pop("name"), -+ "changes": {}, -+ "result": True, -+ "comment": "", -+ } -+ -+ out = {} -+ functions = ["Functions:"] -+ try: -+ for ref_func, ref_kwargs in state.items(): -+ if ref_func not in self.EXPORTED: -+ raise salt.exceptions.SaltInvocationError( -+ "Function {} is not found".format(ref_func) -+ ) -+ out[ref_func] = getattr(self, ref_func)(**self.get_kwargs(ref_kwargs)) -+ functions.append(" - {}".format(ref_func)) -+ ret["comment"] = "\n".join(functions) -+ except Exception as ex: -+ ret["comment"] = str(ex) -+ ret["result"] = False -+ ret["changes"] = out -+ -+ return ret -+ -+ def check_destination(self, location, group): -+ """ -+ Check destination for the archives. -+ :return: -+ """ -+ # Pre-create destination, since rsync will -+ # put one file named as group -+ try: -+ destination = os.path.join(location, group) -+ if os.path.exists(destination) and not os.path.isdir(destination): -+ raise salt.exceptions.SaltException( -+ 'Destination "{}" should be directory!'.format(destination) -+ ) -+ if not os.path.exists(destination): -+ os.makedirs(destination) -+ log.debug("Created destination directory for archives: %s", destination) -+ else: -+ log.debug( -+ "Archives destination directory %s already exists", destination -+ ) -+ except OSError as err: -+ log.error(err) -+ -+ def collected( -+ self, group, filename=None, host=None, location=None, move=True, all=True -+ ): -+ """ -+ Sync archives to a central place. -+ -+ :param name: -+ :param group: -+ :param filename: -+ :param host: -+ :param location: -+ :param move: -+ :param all: -+ :return: -+ """ -+ ret = { -+ "name": "support.collected", -+ "changes": {}, -+ "result": True, -+ "comment": "", -+ } -+ location = location or tempfile.gettempdir() -+ self.check_destination(location, group) -+ ret["changes"] = __salt__["support.sync"]( -+ group, name=filename, host=host, location=location, move=move, all=all -+ ) -+ -+ return ret -+ -+ def taken(self, profile="default", pillar=None, archive=None, output="nested"): -+ """ -+ Takes minion support config data. -+ -+ :param profile: -+ :param pillar: -+ :param archive: -+ :param output: -+ :return: -+ """ -+ ret = { -+ "name": "support.taken", -+ "changes": {}, -+ "result": True, -+ } -+ -+ result = __salt__["support.run"]( -+ profile=profile, pillar=pillar, archive=archive, output=output -+ ) -+ if result.get("archive"): -+ ret[ -+ "comment" -+ ] = "Information about this system has been saved to {} file.".format( -+ result["archive"] -+ ) -+ ret["changes"]["archive"] = result["archive"] -+ ret["changes"]["messages"] = {} -+ for key in ["info", "error", "warning"]: -+ if result.get("messages", {}).get(key): -+ ret["changes"]["messages"][key] = result["messages"][key] -+ else: -+ ret["comment"] = "" -+ -+ return ret -+ -+ -+_support_state = SaltSupportState() -+ -+ -+def __call__(*args, **kwargs): -+ """ -+ SLS single-ID syntax processing. -+ -+ module: -+ This module reference, equals to sys.modules[__name__] -+ -+ state: -+ Compiled state in preserved order. The function supposed to look -+ at first level array of functions. -+ -+ :param cdata: -+ :param kwargs: -+ :return: -+ """ -+ return _support_state(kwargs.get("state", {})) -+ -+ -+def taken(name, profile="default", pillar=None, archive=None, output="nested"): -+ return _support_state.taken( -+ profile=profile, pillar=pillar, archive=archive, output=output -+ ) -+ -+ -+def collected( -+ name, group, filename=None, host=None, location=None, move=True, all=True -+): -+ return _support_state.collected( -+ group=group, filename=filename, host=host, location=location, move=move, all=all -+ ) -+ -+ -+def __virtual__(): -+ """ -+ Salt Support state -+ """ -+ return __virtualname__ -diff --git a/salt/utils/args.py b/salt/utils/args.py -index 87afdd3597..102402500c 100644 ---- a/salt/utils/args.py -+++ b/salt/utils/args.py -@@ -1,8 +1,6 @@ --# -*- coding: utf-8 -*- - """ - Functions used for CLI argument handling - """ --from __future__ import absolute_import, print_function, unicode_literals - - import copy - import fnmatch -@@ -17,6 +15,7 @@ import salt.utils.jid - import salt.utils.versions - import salt.utils.yaml - from salt.exceptions import SaltInvocationError -+from salt.utils.odict import OrderedDict - - log = logging.getLogger(__name__) - -@@ -70,9 +69,9 @@ def invalid_kwargs(invalid_kwargs, raise_exc=True): - """ - if invalid_kwargs: - if isinstance(invalid_kwargs, dict): -- new_invalid = ["{0}={1}".format(x, y) for x, y in invalid_kwargs.items()] -+ new_invalid = ["{}={}".format(x, y) for x, y in invalid_kwargs.items()] - invalid_kwargs = new_invalid -- msg = "The following keyword arguments are not valid: {0}".format( -+ msg = "The following keyword arguments are not valid: {}".format( - ", ".join(invalid_kwargs) - ) - if raise_exc: -@@ -259,7 +258,7 @@ def get_function_argspec(func, is_class_method=None): - and this is not always wanted. - """ - if not callable(func): -- raise TypeError("{0} is not a callable".format(func)) -+ raise TypeError("{} is not a callable".format(func)) - - if hasattr(func, "__wrapped__"): - func = func.__wrapped__ -@@ -279,7 +278,7 @@ def get_function_argspec(func, is_class_method=None): - try: - sig = inspect.signature(func) - except TypeError: -- raise TypeError("Cannot inspect argument list for '{0}'".format(func)) -+ raise TypeError("Cannot inspect argument list for '{}'".format(func)) - else: - # argspec-related functions are deprecated in Python 3 in favor of - # the new inspect.Signature class, and will be removed at some -@@ -439,7 +438,7 @@ def format_call( - ret = initial_ret is not None and initial_ret or {} - - ret["args"] = [] -- ret["kwargs"] = {} -+ ret["kwargs"] = OrderedDict() - - aspec = get_function_argspec(fun, is_class_method=is_class_method) - -@@ -470,7 +469,7 @@ def format_call( - used_args_count = len(ret["args"]) + len(args) - args_count = used_args_count + len(missing_args) - raise SaltInvocationError( -- "{0} takes at least {1} argument{2} ({3} given)".format( -+ "{} takes at least {} argument{} ({} given)".format( - fun.__name__, args_count, args_count > 1 and "s" or "", used_args_count - ) - ) -@@ -506,18 +505,18 @@ def format_call( - # In case this is being called for a state module - "full", - # Not a state module, build the name -- "{0}.{1}".format(fun.__module__, fun.__name__), -+ "{}.{}".format(fun.__module__, fun.__name__), - ), - ) - else: -- msg = "{0} and '{1}' are invalid keyword arguments for '{2}'".format( -- ", ".join(["'{0}'".format(e) for e in extra][:-1]), -+ msg = "{} and '{}' are invalid keyword arguments for '{}'".format( -+ ", ".join(["'{}'".format(e) for e in extra][:-1]), - list(extra.keys())[-1], - ret.get( - # In case this is being called for a state module - "full", - # Not a state module, build the name -- "{0}.{1}".format(fun.__module__, fun.__name__), -+ "{}.{}".format(fun.__module__, fun.__name__), - ), - ) - -diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py -index 940d0a90f2..b06cf0abc8 100644 ---- a/salt/utils/decorators/__init__.py -+++ b/salt/utils/decorators/__init__.py -@@ -1,10 +1,7 @@ --# -*- coding: utf-8 -*- - """ - Helpful decorators for module writing - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import errno - import inspect -@@ -15,13 +12,10 @@ import time - from collections import defaultdict - from functools import wraps - --# Import salt libs - import salt.utils.args - import salt.utils.data - import salt.utils.versions - from salt.exceptions import CommandExecutionError, SaltConfigurationError -- --# Import 3rd-party libs - from salt.ext import six - from salt.log import LOG_LEVELS - -@@ -32,7 +26,7 @@ if getattr(sys, "getwindowsversion", False): - log = logging.getLogger(__name__) - - --class Depends(object): -+class Depends: - """ - This decorator will check the module when it is loaded and check that the - dependencies passed in are in the globals of the module. If not, it will -@@ -121,7 +115,7 @@ class Depends(object): - - @staticmethod - def run_command(dependency, mod_name, func_name): -- full_name = "{0}.{1}".format(mod_name, func_name) -+ full_name = "{}.{}".format(mod_name, func_name) - log.trace("Running '%s' for '%s'", dependency, full_name) - if IS_WINDOWS: - args = salt.utils.args.shlex_split(dependency, posix=False) -@@ -145,8 +139,8 @@ class Depends(object): - It will modify the "functions" dict and remove/replace modules that - are missing dependencies. - """ -- for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]): -- for (mod_name, func_name), (frame, params) in six.iteritems(dependent_dict): -+ for dependency, dependent_dict in cls.dependency_dict[kind].items(): -+ for (mod_name, func_name), (frame, params) in dependent_dict.items(): - if mod_name != tgt_mod: - continue - # Imports from local context take presedence over those from the global context. -@@ -232,7 +226,7 @@ class Depends(object): - except (AttributeError, KeyError): - pass - -- mod_key = "{0}.{1}".format(mod_name, func_name) -+ mod_key = "{}.{}".format(mod_name, func_name) - - # if we don't have this module loaded, skip it! - if mod_key not in functions: -@@ -267,9 +261,7 @@ def timing(function): - mod_name = function.__module__[16:] - else: - mod_name = function.__module__ -- fstr = "Function %s.%s took %.{0}f seconds to execute".format( -- sys.float_info.dig -- ) -+ fstr = "Function %s.%s took %.{}f seconds to execute".format(sys.float_info.dig) - log.profile(fstr, mod_name, function.__name__, end_time - start_time) - return ret - -@@ -291,13 +283,13 @@ def memoize(func): - def _memoize(*args, **kwargs): - str_args = [] - for arg in args: -- if not isinstance(arg, six.string_types): -- str_args.append(six.text_type(arg)) -+ if not isinstance(arg, str): -+ str_args.append(str(arg)) - else: - str_args.append(arg) - - args_ = ",".join( -- list(str_args) + ["{0}={1}".format(k, kwargs[k]) for k in sorted(kwargs)] -+ list(str_args) + ["{}={}".format(k, kwargs[k]) for k in sorted(kwargs)] - ) - if args_ not in cache: - cache[args_] = func(*args, **kwargs) -@@ -306,7 +298,7 @@ def memoize(func): - return _memoize - - --class _DeprecationDecorator(object): -+class _DeprecationDecorator: - """ - Base mix-in class for the deprecation decorator. - Takes care of a common functionality, used in its derivatives. -@@ -359,7 +351,7 @@ class _DeprecationDecorator(object): - try: - return self._function(*args, **kwargs) - except TypeError as error: -- error = six.text_type(error).replace( -+ error = str(error).replace( - self._function, self._orig_f_name - ) # Hide hidden functions - log.error( -@@ -374,7 +366,7 @@ class _DeprecationDecorator(object): - self._function.__name__, - error, - ) -- six.reraise(*sys.exc_info()) -+ raise - else: - raise CommandExecutionError( - "Function is deprecated, but the successor function was not found." -@@ -626,11 +618,11 @@ class _WithDeprecated(_DeprecationDecorator): - - if use_deprecated and use_superseded: - raise SaltConfigurationError( -- "Function '{0}' is mentioned both in deprecated " -+ "Function '{}' is mentioned both in deprecated " - "and superseded sections. Please remove any of that.".format(full_name) - ) - old_function = self._globals.get( -- self._with_name or "_{0}".format(function.__name__) -+ self._with_name or "_{}".format(function.__name__) - ) - if self._policy == self.OPT_IN: - self._function = function if use_superseded else old_function -@@ -782,12 +774,30 @@ def ensure_unicode_args(function): - - @wraps(function) - def wrapped(*args, **kwargs): -- if six.PY2: -- return function( -- *salt.utils.data.decode_list(args), -- **salt.utils.data.decode_dict(kwargs) -- ) -- else: -- return function(*args, **kwargs) -+ return function(*args, **kwargs) - - return wrapped -+ -+ -+def external(func): -+ """ -+ Mark function as external. -+ -+ :param func: -+ :return: -+ """ -+ -+ def f(*args, **kwargs): -+ """ -+ Stub. -+ -+ :param args: -+ :param kwargs: -+ :return: -+ """ -+ return func(*args, **kwargs) -+ -+ f.external = True -+ f.__doc__ = func.__doc__ -+ -+ return f -diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py -new file mode 100644 -index 0000000000..f9ce7be29a ---- /dev/null -+++ b/tests/unit/modules/test_saltsupport.py -@@ -0,0 +1,496 @@ -+""" -+ :codeauthor: Bo Maryniuk -+""" -+ -+ -+import datetime -+ -+import salt.exceptions -+from salt.modules import saltsupport -+from tests.support.mixins import LoaderModuleMockMixin -+from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch -+from tests.support.unit import TestCase, skipIf -+ -+try: -+ import pytest -+except ImportError: -+ pytest = None -+ -+ -+@skipIf(not bool(pytest), "Pytest required") -+@skipIf(NO_MOCK, NO_MOCK_REASON) -+class SaltSupportModuleTestCase(TestCase, LoaderModuleMockMixin): -+ """ -+ Test cases for salt.modules.support::SaltSupportModule -+ """ -+ -+ def setup_loader_modules(self): -+ return {saltsupport: {}} -+ -+ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) -+ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"}) -+ @patch("time.strftime", MagicMock(return_value="000")) -+ def test_get_archive_name(self): -+ """ -+ Test archive name construction. -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ assert support._get_archive_name() == "/mnt/storage/c-3po-support-000-000.bz2" -+ -+ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) -+ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"}) -+ @patch("time.strftime", MagicMock(return_value="000")) -+ def test_get_custom_archive_name(self): -+ """ -+ Test get custom archive name. -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ temp_name = support._get_archive_name(archname="Darth Wader") -+ assert temp_name == "/mnt/storage/c-3po-darthwader-000-000.bz2" -+ temp_name = support._get_archive_name(archname="Яйця з сіллю") -+ assert temp_name == "/mnt/storage/c-3po-support-000-000.bz2" -+ temp_name = support._get_archive_name(archname="!@#$%^&*()Fillip J. Fry") -+ assert temp_name == "/mnt/storage/c-3po-fillipjfry-000-000.bz2" -+ -+ @patch( -+ "salt.cli.support.get_profiles", -+ MagicMock(return_value={"message": "Feature was not beta tested"}), -+ ) -+ def test_profiles_format(self): -+ """ -+ Test profiles format. -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ profiles = support.profiles() -+ assert "custom" in profiles -+ assert "standard" in profiles -+ assert "message" in profiles["standard"] -+ assert profiles["custom"] == [] -+ assert profiles["standard"]["message"] == "Feature was not beta tested" -+ -+ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) -+ @patch( -+ "os.listdir", -+ MagicMock( -+ return_value=[ -+ "one-support-000-000.bz2", -+ "two-support-111-111.bz2", -+ "trash.bz2", -+ "hostname-000-000.bz2", -+ "three-support-wrong222-222.bz2", -+ "000-support-000-000.bz2", -+ ] -+ ), -+ ) -+ def test_get_existing_archives(self): -+ """ -+ Get list of existing archives. -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ out = support.archives() -+ assert len(out) == 3 -+ for name in [ -+ "/mnt/storage/one-support-000-000.bz2", -+ "/mnt/storage/two-support-111-111.bz2", -+ "/mnt/storage/000-support-000-000.bz2", -+ ]: -+ assert name in out -+ -+ def test_last_archive(self): -+ """ -+ Get last archive name -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock( -+ return_value=[ -+ "/mnt/storage/one-support-000-000.bz2", -+ "/mnt/storage/two-support-111-111.bz2", -+ "/mnt/storage/three-support-222-222.bz2", -+ ] -+ ) -+ assert support.last_archive() == "/mnt/storage/three-support-222-222.bz2" -+ -+ @patch("os.unlink", MagicMock(return_value=True)) -+ def test_delete_all_archives_success(self): -+ """ -+ Test delete archives -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock( -+ return_value=[ -+ "/mnt/storage/one-support-000-000.bz2", -+ "/mnt/storage/two-support-111-111.bz2", -+ "/mnt/storage/three-support-222-222.bz2", -+ ] -+ ) -+ ret = support.delete_archives() -+ assert "files" in ret -+ assert "errors" in ret -+ assert not bool(ret["errors"]) -+ assert bool(ret["files"]) -+ assert isinstance(ret["errors"], dict) -+ assert isinstance(ret["files"], dict) -+ -+ for arc in support.archives(): -+ assert ret["files"][arc] == "removed" -+ -+ @patch( -+ "os.unlink", -+ MagicMock( -+ return_value=False, -+ side_effect=[ -+ OSError("Decreasing electron flux"), -+ OSError("Solar flares interference"), -+ None, -+ ], -+ ), -+ ) -+ def test_delete_all_archives_failure(self): -+ """ -+ Test delete archives failure -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock( -+ return_value=[ -+ "/mnt/storage/one-support-000-000.bz2", -+ "/mnt/storage/two-support-111-111.bz2", -+ "/mnt/storage/three-support-222-222.bz2", -+ ] -+ ) -+ ret = support.delete_archives() -+ assert "files" in ret -+ assert "errors" in ret -+ assert bool(ret["errors"]) -+ assert bool(ret["files"]) -+ assert isinstance(ret["errors"], dict) -+ assert isinstance(ret["files"], dict) -+ -+ assert ret["files"]["/mnt/storage/three-support-222-222.bz2"] == "removed" -+ assert ret["files"]["/mnt/storage/one-support-000-000.bz2"] == "left" -+ assert ret["files"]["/mnt/storage/two-support-111-111.bz2"] == "left" -+ -+ assert len(ret["errors"]) == 2 -+ assert ( -+ ret["errors"]["/mnt/storage/one-support-000-000.bz2"] -+ == "Decreasing electron flux" -+ ) -+ assert ( -+ ret["errors"]["/mnt/storage/two-support-111-111.bz2"] -+ == "Solar flares interference" -+ ) -+ -+ def test_format_sync_stats(self): -+ """ -+ Test format rsync stats for preserving ordering of the keys -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ stats = """ -+robot: Bender -+cute: Leela -+weird: Zoidberg -+professor: Farnsworth -+ """ -+ f_stats = support.format_sync_stats({"retcode": 0, "stdout": stats}) -+ assert list(f_stats["transfer"].keys()) == [ -+ "robot", -+ "cute", -+ "weird", -+ "professor", -+ ] -+ assert list(f_stats["transfer"].values()) == [ -+ "Bender", -+ "Leela", -+ "Zoidberg", -+ "Farnsworth", -+ ] -+ -+ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) -+ @patch("os.close", MagicMock()) -+ def test_sync_no_archives_failure(self): -+ """ -+ Test sync failed when no archives specified. -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=[]) -+ -+ with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync("group-name") -+ assert "No archives found to transfer" in str(err) -+ -+ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) -+ @patch("os.path.exists", MagicMock(return_value=False)) -+ def test_sync_last_picked_archive_not_found_failure(self): -+ """ -+ Test sync failed when archive was not found (last picked) -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock( -+ return_value=[ -+ "/mnt/storage/one-support-000-000.bz2", -+ "/mnt/storage/two-support-111-111.bz2", -+ "/mnt/storage/three-support-222-222.bz2", -+ ] -+ ) -+ -+ with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync("group-name") -+ assert ( -+ ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found' -+ in str(err) -+ ) -+ -+ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) -+ @patch("os.path.exists", MagicMock(return_value=False)) -+ def test_sync_specified_archive_not_found_failure(self): -+ """ -+ Test sync failed when archive was not found (last picked) -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock( -+ return_value=[ -+ "/mnt/storage/one-support-000-000.bz2", -+ "/mnt/storage/two-support-111-111.bz2", -+ "/mnt/storage/three-support-222-222.bz2", -+ ] -+ ) -+ -+ with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync("group-name", name="lost.bz2") -+ assert ' Support archive "lost.bz2" was not found' in str(err) -+ -+ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) -+ @patch("os.path.exists", MagicMock(return_value=False)) -+ @patch("os.close", MagicMock()) -+ def test_sync_no_archive_to_transfer_failure(self): -+ """ -+ Test sync failed when no archive was found to transfer -+ -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock(return_value=[]) -+ with pytest.raises(salt.exceptions.SaltInvocationError) as err: -+ support.sync("group-name", all=True) -+ assert "No archives found to transfer" in str(err) -+ -+ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) -+ @patch("os.path.exists", MagicMock(return_value=True)) -+ @patch("os.close", MagicMock()) -+ @patch("os.write", MagicMock()) -+ @patch("os.unlink", MagicMock()) -+ @patch( -+ "salt.modules.saltsupport.__salt__", {"rsync.rsync": MagicMock(return_value={})} -+ ) -+ def test_sync_archives(self): -+ """ -+ Test sync archives -+ :return: -+ """ -+ support = saltsupport.SaltSupportModule() -+ support.archives = MagicMock( -+ return_value=[ -+ "/mnt/storage/one-support-000-000.bz2", -+ "/mnt/storage/two-support-111-111.bz2", -+ "/mnt/storage/three-support-222-222.bz2", -+ ] -+ ) -+ out = support.sync("group-name", host="buzz", all=True, move=False) -+ assert "files" in out -+ for arc_name in out["files"]: -+ assert out["files"][arc_name] == "copied" -+ assert saltsupport.os.unlink.call_count == 1 -+ assert saltsupport.os.unlink.call_args_list[0][0][0] == "dummy" -+ calls = [] -+ for call in saltsupport.os.write.call_args_list: -+ assert len(call) == 2 -+ calls.append(call[0]) -+ assert calls == [ -+ (0, b"one-support-000-000.bz2"), -+ (0, b"\n"), -+ (0, b"two-support-111-111.bz2"), -+ (0, b"\n"), -+ (0, b"three-support-222-222.bz2"), -+ (0, b"\n"), -+ ] -+ -+ @patch("salt.modules.saltsupport.__pillar__", {}) -+ @patch("salt.modules.saltsupport.SupportDataCollector", MagicMock()) -+ def test_run_support(self): -+ """ -+ Test run support -+ :return: -+ """ -+ saltsupport.SupportDataCollector(None, None).archive_path = "dummy" -+ support = saltsupport.SaltSupportModule() -+ support.collect_internal_data = MagicMock() -+ support.collect_local_data = MagicMock() -+ out = support.run() -+ -+ for section in ["messages", "archive"]: -+ assert section in out -+ assert out["archive"] == "dummy" -+ for section in ["warning", "error", "info"]: -+ assert section in out["messages"] -+ ld_call = support.collect_local_data.call_args_list[0][1] -+ assert "profile" in ld_call -+ assert ld_call["profile"] == "default" -+ assert "profile_source" in ld_call -+ assert ld_call["profile_source"] is None -+ assert support.collector.open.call_count == 1 -+ assert support.collector.close.call_count == 1 -+ assert support.collect_internal_data.call_count == 1 -+ -+ -+@skipIf(not bool(pytest), "Pytest required") -+@skipIf(NO_MOCK, NO_MOCK_REASON) -+class LogCollectorTestCase(TestCase, LoaderModuleMockMixin): -+ """ -+ Test cases for salt.modules.support::LogCollector -+ """ -+ -+ def setup_loader_modules(self): -+ return {saltsupport: {}} -+ -+ def test_msg(self): -+ """ -+ Test message to the log collector. -+ -+ :return: -+ """ -+ utcmock = MagicMock() -+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch("datetime.datetime", utcmock): -+ msg = "Upgrading /dev/null device" -+ out = saltsupport.LogCollector() -+ out.msg(msg, title="Here") -+ assert saltsupport.LogCollector.INFO in out.messages -+ assert ( -+ type(out.messages[saltsupport.LogCollector.INFO]) -+ == saltsupport.LogCollector.MessagesList -+ ) -+ assert out.messages[saltsupport.LogCollector.INFO] == [ -+ "00:00:00.000 - {}: {}".format("Here", msg) -+ ] -+ -+ def test_info_message(self): -+ """ -+ Test info message to the log collector. -+ -+ :return: -+ """ -+ utcmock = MagicMock() -+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch("datetime.datetime", utcmock): -+ msg = "SIMM crosstalk during tectonic stress" -+ out = saltsupport.LogCollector() -+ out.info(msg) -+ assert saltsupport.LogCollector.INFO in out.messages -+ assert ( -+ type(out.messages[saltsupport.LogCollector.INFO]) -+ == saltsupport.LogCollector.MessagesList -+ ) -+ assert out.messages[saltsupport.LogCollector.INFO] == [ -+ "00:00:00.000 - {}".format(msg) -+ ] -+ -+ def test_put_message(self): -+ """ -+ Test put message to the log collector. -+ -+ :return: -+ """ -+ utcmock = MagicMock() -+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch("datetime.datetime", utcmock): -+ msg = "Webmaster kidnapped by evil cult" -+ out = saltsupport.LogCollector() -+ out.put(msg) -+ assert saltsupport.LogCollector.INFO in out.messages -+ assert ( -+ type(out.messages[saltsupport.LogCollector.INFO]) -+ == saltsupport.LogCollector.MessagesList -+ ) -+ assert out.messages[saltsupport.LogCollector.INFO] == [ -+ "00:00:00.000 - {}".format(msg) -+ ] -+ -+ def test_warning_message(self): -+ """ -+ Test warning message to the log collector. -+ -+ :return: -+ """ -+ utcmock = MagicMock() -+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch("datetime.datetime", utcmock): -+ msg = "Your e-mail is now being delivered by USPS" -+ out = saltsupport.LogCollector() -+ out.warning(msg) -+ assert saltsupport.LogCollector.WARNING in out.messages -+ assert ( -+ type(out.messages[saltsupport.LogCollector.WARNING]) -+ == saltsupport.LogCollector.MessagesList -+ ) -+ assert out.messages[saltsupport.LogCollector.WARNING] == [ -+ "00:00:00.000 - {}".format(msg) -+ ] -+ -+ def test_error_message(self): -+ """ -+ Test error message to the log collector. -+ -+ :return: -+ """ -+ utcmock = MagicMock() -+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch("datetime.datetime", utcmock): -+ msg = "Learning curve appears to be fractal" -+ out = saltsupport.LogCollector() -+ out.error(msg) -+ assert saltsupport.LogCollector.ERROR in out.messages -+ assert ( -+ type(out.messages[saltsupport.LogCollector.ERROR]) -+ == saltsupport.LogCollector.MessagesList -+ ) -+ assert out.messages[saltsupport.LogCollector.ERROR] == [ -+ "00:00:00.000 - {}".format(msg) -+ ] -+ -+ def test_hl_message(self): -+ """ -+ Test highlighter message to the log collector. -+ -+ :return: -+ """ -+ utcmock = MagicMock() -+ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) -+ with patch("datetime.datetime", utcmock): -+ out = saltsupport.LogCollector() -+ out.highlight("The {} TTYs became {} TTYs and vice versa", "real", "pseudo") -+ assert saltsupport.LogCollector.INFO in out.messages -+ assert ( -+ type(out.messages[saltsupport.LogCollector.INFO]) -+ == saltsupport.LogCollector.MessagesList -+ ) -+ assert out.messages[saltsupport.LogCollector.INFO] == [ -+ "00:00:00.000 - The real TTYs became " "pseudo TTYs and vice versa" -+ ] --- -2.29.2 - - diff --git a/add-virt.all_capabilities.patch b/add-virt.all_capabilities.patch deleted file mode 100644 index 794c5fd..0000000 --- a/add-virt.all_capabilities.patch +++ /dev/null @@ -1,145 +0,0 @@ -From ca2ad86438293af6715a9890b168f159ff4d9b9b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= -Date: Thu, 18 Oct 2018 13:32:59 +0200 -Subject: [PATCH] Add virt.all_capabilities - -In order to get all possible capabilities from a host, the user has to -call virt.capabilities, and then loop over the guests and domains -before calling virt.domain_capabilities for each of them. - -This commit embeds all this logic to get them all in a single -virt.all_capabilities call. ---- - salt/modules/virt.py | 73 +++++++++++++++++++++++++++++++-- - tests/unit/modules/test_virt.py | 2 +- - 2 files changed, 71 insertions(+), 4 deletions(-) - -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index 313181c49e..362c2a68b5 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -5568,11 +5568,76 @@ def _parse_domain_caps(caps): - return result - - -+def _parse_domain_caps(caps): -+ """ -+ Parse the XML document of domain capabilities into a structure. -+ """ -+ result = { -+ "emulator": caps.find("path").text if caps.find("path") is not None else None, -+ "domain": caps.find("domain").text if caps.find("domain") is not None else None, -+ "machine": caps.find("machine").text -+ if caps.find("machine") is not None -+ else None, -+ "arch": caps.find("arch").text if caps.find("arch") is not None else None, -+ } -+ -+ -+def all_capabilities(**kwargs): -+ """ -+ Return the host and domain capabilities in a single call. -+ -+ .. versionadded:: 3001 -+ -+ :param connection: libvirt connection URI, overriding defaults -+ :param username: username to connect with, overriding defaults -+ :param password: password to connect with, overriding defaults -+ -+ CLI Example: -+ -+ .. code-block:: bash -+ -+ salt '*' virt.all_capabilities -+ -+ """ -+ conn = __get_conn(**kwargs) -+ try: -+ host_caps = ElementTree.fromstring(conn.getCapabilities()) -+ domains = [ -+ [ -+ (guest.get("arch", {}).get("name", None), key) -+ for key in guest.get("arch", {}).get("domains", {}).keys() -+ ] -+ for guest in [ -+ _parse_caps_guest(guest) for guest in host_caps.findall("guest") -+ ] -+ ] -+ flattened = [pair for item in (x for x in domains) for pair in item] -+ result = { -+ "host": { -+ "host": _parse_caps_host(host_caps.find("host")), -+ "guests": [ -+ _parse_caps_guest(guest) for guest in host_caps.findall("guest") -+ ], -+ }, -+ "domains": [ -+ _parse_domain_caps( -+ ElementTree.fromstring( -+ conn.getDomainCapabilities(None, arch, None, domain) -+ ) -+ ) -+ for (arch, domain) in flattened -+ ], -+ } -+ return result -+ finally: -+ conn.close() -+ -+ - def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs): - """ - Return the domain capabilities given an emulator, architecture, machine or virtualization type. - -- .. versionadded:: 2019.2.0 -+ .. versionadded:: Fluorine - - :param emulator: return the capabilities for the given emulator binary - :param arch: return the capabilities for the given CPU architecture -@@ -5611,7 +5676,7 @@ def all_capabilities(**kwargs): - """ - Return the host and domain capabilities in a single call. - -- .. versionadded:: 3001 -+ .. versionadded:: Neon - - :param connection: libvirt connection URI, overriding defaults - :param username: username to connect with, overriding defaults -@@ -5625,6 +5690,7 @@ def all_capabilities(**kwargs): - - """ - conn = __get_conn(**kwargs) -+ result = {} - try: - host_caps = ElementTree.fromstring(conn.getCapabilities()) - domains = [ -@@ -5653,10 +5719,11 @@ def all_capabilities(**kwargs): - for (arch, domain) in flattened - ], - } -- return result - finally: - conn.close() - -+ return result -+ - - def cpu_baseline(full=False, migratable=False, out="libvirt", **kwargs): - """ -diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index cce107c9e4..e9e73d7b5d 100644 ---- a/tests/unit/modules/test_virt.py -+++ b/tests/unit/modules/test_virt.py -@@ -4063,7 +4063,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "44454c4c-3400-105a-8033-b3c04f4b344a", caps["host"]["host"]["uuid"] - ) - self.assertEqual( -- {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]}, -+ {"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]} - ) - - def test_network_tag(self): --- -2.29.2 - - diff --git a/adding-preliminary-support-for-rocky.-59682-391.patch b/adding-preliminary-support-for-rocky.-59682-391.patch index 5b2eee1..a8db3c4 100644 --- a/adding-preliminary-support-for-rocky.-59682-391.patch +++ b/adding-preliminary-support-for-rocky.-59682-391.patch @@ -1,4 +1,4 @@ -From 34a913b0b54b55edf042dc899250e56ef0eaec77 Mon Sep 17 00:00:00 2001 +From e3e55336b0d457cb55cd83236e9ac8e0dc671d2e Mon Sep 17 00:00:00 2001 From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> Date: Mon, 5 Jul 2021 18:57:26 +0300 Subject: [PATCH] Adding preliminary support for Rocky. (#59682) (#391) @@ -32,30 +32,30 @@ index 0000000000..93b4a3d1fc @@ -0,0 +1 @@ +Rocky Linux has been added to the RedHat os_family. diff --git a/salt/grains/core.py b/salt/grains/core.py -index 2b965a2a8a..ace0e4bff9 100644 +index bce8c95179..f79110124f 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py -@@ -1547,6 +1547,7 @@ _OS_NAME_MAP = { - "slesexpand": "RES", +@@ -1560,6 +1560,7 @@ _OS_NAME_MAP = { "linuxmint": "Mint", "neon": "KDE neon", + "pop": "Pop", + "rocky": "Rocky", "alibabaclo": "Alinux", } -@@ -1621,6 +1622,7 @@ _OS_FAMILY_MAP = { - "Funtoo": "Gentoo", +@@ -1637,6 +1638,7 @@ _OS_FAMILY_MAP = { "AIX": "AIX", "TurnKey": "Debian", + "Pop": "Debian", + "Rocky": "RedHat", "AstraLinuxCE": "Debian", "Alinux": "RedHat", } diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 8280d6de47..61a6956e32 100644 +index fa06bb27ab..fcc7586775 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -678,6 +678,35 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): +@@ -708,6 +708,35 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): } self._run_os_grains_tests(None, _os_release_map, expectation) @@ -92,6 +92,6 @@ index 8280d6de47..61a6956e32 100644 def test_almalinux_8_os_grains(self): """ -- -2.32.0 +2.33.0 diff --git a/adds-explicit-type-cast-for-port.patch b/adds-explicit-type-cast-for-port.patch index 176d530..2681ce4 100644 --- a/adds-explicit-type-cast-for-port.patch +++ b/adds-explicit-type-cast-for-port.patch @@ -1,4 +1,4 @@ -From 12d67e0cfa54399f3a0b6ae0d4faa09793fa2b0f Mon Sep 17 00:00:00 2001 +From 3beb3379dafe1adf9c1a43694f7b71938be3f583 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 1 Apr 2020 16:13:23 +0200 Subject: [PATCH] Adds explicit type cast for port @@ -8,26 +8,25 @@ and a wrong set of remotes was returned. The type casting to int solves this issue. --- - salt/utils/network.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) + salt/utils/network.py | 4 ++++ + 1 file changed, 4 insertions(+) diff --git a/salt/utils/network.py b/salt/utils/network.py -index 25b2d06758..1705a5809d 100644 +index 5fc9a34ca4..0dd20c5599 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py -@@ -1626,9 +1626,9 @@ def _netlink_tool_remote_on(port, which_end): - local_host, local_port = chunks[3].rsplit(":", 1) +@@ -1703,6 +1703,10 @@ def _netlink_tool_remote_on(port, which_end): + chunks = line.split() remote_host, remote_port = chunks[4].rsplit(":", 1) -- if which_end == "remote_port" and int(remote_port) != port: + if which_end == "remote_port" and int(remote_port) != int(port): - continue -- if which_end == "local_port" and int(local_port) != port: ++ continue + if which_end == "local_port" and int(local_port) != int(port): - continue ++ continue remotes.add(remote_host.strip("[]")) + if valid is False: -- -2.29.2 +2.33.0 diff --git a/allow-extra_filerefs-as-sanitized-kwargs-for-ssh-cli.patch b/allow-extra_filerefs-as-sanitized-kwargs-for-ssh-cli.patch deleted file mode 100644 index 6a18cf9..0000000 --- a/allow-extra_filerefs-as-sanitized-kwargs-for-ssh-cli.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 5e8a9c9eaa18c53b259a3bb1da8df51f5382ed6b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Mon, 22 Feb 2021 11:39:19 +0000 -Subject: [PATCH] Allow extra_filerefs as sanitized kwargs for SSH - client - -(cherry picked from commit 89f843398849633af52cceab2155e9cedf8ad3dd) ---- - salt/client/ssh/client.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py -index 2cf42f53e7..3631c3bb67 100644 ---- a/salt/client/ssh/client.py -+++ b/salt/client/ssh/client.py -@@ -60,6 +60,7 @@ class SSHClient: - ("rosters", list), - ("ignore_host_keys", bool), - ("raw_shell", bool), -+ ("extra_filerefs", str), - ] - sane_kwargs = {} - for name, kind in roster_vals: --- -2.30.1 - - diff --git a/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch b/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch deleted file mode 100644 index 22fdf66..0000000 --- a/allow-passing-kwargs-to-pkg.list_downloaded-bsc-1140.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 125f973014b8d5ffa13ae7dd231043e39af75ea0 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Wed, 3 Jul 2019 09:34:50 +0100 -Subject: [PATCH] Allow passing kwargs to pkg.list_downloaded - (bsc#1140193) - -Add unit test for pkg.list_downloaded with kwargs ---- - salt/modules/zypperpkg.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 75cb5ce4a8..c996935bff 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -2754,7 +2754,7 @@ def download(*packages, **kwargs): - ) - - --def list_downloaded(root=None): -+def list_downloaded(root=None, **kwargs): - """ - .. versionadded:: 2017.7.0 - --- -2.29.2 - - diff --git a/allow-vendor-change-option-with-zypper-313.patch b/allow-vendor-change-option-with-zypper-313.patch deleted file mode 100644 index ae57faa..0000000 --- a/allow-vendor-change-option-with-zypper-313.patch +++ /dev/null @@ -1,292 +0,0 @@ -From 33ad6876a04e800afc08748133dc568a5e362903 Mon Sep 17 00:00:00 2001 -From: Martin Seidl -Date: Wed, 17 Mar 2021 14:05:42 +0100 -Subject: [PATCH] Allow vendor change option with zypper (#313) -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -* add patch support for allow vendor change option with zypper - -* adjust unit tests vendor change refactor, dropping cli arg - -* Fix pr issues - -Co-authored-by: Pablo Suárez Hernández - -* Fix unit test for allow vendor change on upgrade - -* Add unit test with unsupported zypper version - -Co-authored-by: Pablo Suárez Hernández ---- - salt/modules/zypperpkg.py | 58 +++++++++++++--- - tests/unit/modules/test_zypperpkg.py | 99 ++++++++++++++++++---------- - 2 files changed, 112 insertions(+), 45 deletions(-) - -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 6f22994bf0..b35792237c 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -105,6 +105,10 @@ class _Zypper: - ZYPPER_LOCK = "/var/run/zypp.pid" - TAG_RELEASED = "zypper/released" - TAG_BLOCKED = "zypper/blocked" -+ # Dist upgrade vendor change support (SLE12+) -+ dup_avc = False -+ # Install/Patch/Upgrade vendor change support (SLE15+) -+ inst_avc = False - - def __init__(self): - """ -@@ -218,6 +222,21 @@ class _Zypper: - def pid(self): - return self.__call_result.get("pid", "") - -+ def refresh_zypper_flags(self): -+ try: -+ zypp_version = version('zypper') -+ # zypper version 1.11.34 in SLE12 update supports vendor change for only dist upgrade -+ if version_cmp(zypp_version, '1.11.34') >= 0: -+ # zypper version supports vendor change for dist upgrade -+ self.dup_avc = True -+ # zypper version 1.14.8 in SLE15 update supports vendor change in install/patch/upgrading -+ if version_cmp(zypp_version, '1.14.8') >= 0: -+ self.inst_avc = True -+ else: -+ log.error("Failed to compare Zypper version") -+ except Exception as ex: -+ log.error("Unable to get Zypper version: {}".format(ex)) -+ - def _is_error(self): - """ - Is this is an error code? -@@ -1431,6 +1450,7 @@ def install( - no_recommends=False, - root=None, - inclusion_detection=False, -+ novendorchange=True, - **kwargs - ): - """ -@@ -1478,6 +1498,9 @@ def install( - skip_verify - Skip the GPG verification check (e.g., ``--no-gpg-checks``) - -+ novendorchange -+ Disallow vendor change -+ - version - Can be either a version number, or the combination of a comparison - operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4'). -@@ -1638,6 +1661,15 @@ def install( - cmd_install.append( - kwargs.get("resolve_capabilities") and "--capability" or "--name" - ) -+ # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+ -+ if not novendorchange: -+ __zypper__(root=root).refresh_zypper_flags() -+ if __zypper__(root=root).inst_avc: -+ cmd_install.append("--allow-vendor-change") -+ log.info("Enabling vendor changes") -+ else: -+ log.warning("Enabling/Disabling vendor changes is not supported on this Zypper version") -+ - - if not refresh: - cmd_install.insert(0, "--no-refresh") -@@ -1793,19 +1825,25 @@ def upgrade( - cmd_update.extend(["--from" if dist_upgrade else "--repo", repo]) - log.info("Targeting repos: %s", fromrepo) - -- if dist_upgrade: -- # TODO: Grains validation should be moved to Zypper class -- if __grains__["osrelease_info"][0] > 11: -- if novendorchange: -- cmd_update.append("--no-allow-vendor-change") -- log.info("Disabling vendor changes") -- else: -+ if not novendorchange: -+ __zypper__(root=root).refresh_zypper_flags() -+ if dist_upgrade: -+ if __zypper__(root=root).dup_avc: - cmd_update.append("--allow-vendor-change") - log.info("Enabling vendor changes") -+ else: -+ log.warning( -+ "Enabling/Disabling vendor changes is not supported on this Zypper version" -+ ) - else: -- log.warning( -- "Enabling/Disabling vendor changes is not supported on this Zypper version" -- ) -+ # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+ -+ if __zypper__(root=root).inst_avc: -+ cmd_update.append("--allow-vendor-change") -+ log.info("Enabling vendor changes") -+ else: -+ log.warning( -+ "Enabling/Disabling vendor changes is not supported on this Zypper version" -+ ) - - if no_recommends: - cmd_update.append("--no-recommends") -diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index 9c4a224c55..f32c382d7f 100644 ---- a/tests/unit/modules/test_zypperpkg.py -+++ b/tests/unit/modules/test_zypperpkg.py -@@ -644,7 +644,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - zypper_mock.assert_any_call( - "dist-upgrade", - "--auto-agree-with-licenses", -- "--no-allow-vendor-change", - ) - - with patch( -@@ -691,46 +690,80 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - "dist-upgrade", - "--auto-agree-with-licenses", - "--dry-run", -- "--no-allow-vendor-change", - ) - zypper_mock.assert_any_call( - "dist-upgrade", - "--auto-agree-with-licenses", - "--dry-run", -- "--no-allow-vendor-change", - ) - - with patch( - "salt.modules.zypperpkg.list_pkgs", -- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]) - ): -- ret = zypper.upgrade( -- dist_upgrade=True, -- dryrun=True, -- fromrepo=["Dummy", "Dummy2"], -- novendorchange=False, -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- "--from", -- "Dummy", -- "--from", -- "Dummy2", -- "--allow-vendor-change", -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- "--from", -- "Dummy", -- "--from", -- "Dummy2", -- "--allow-vendor-change", -- "--debug-solver", -- ) -+ with patch.dict(zypper.__salt__, -+ {'pkg_resource.version': MagicMock(return_value='1.15'), -+ 'lowpkg.version_cmp': MagicMock(return_value=1)}): -+ ret = zypper.upgrade( -+ dist_upgrade=True, -+ dryrun=True, -+ fromrepo=["Dummy", "Dummy2"], -+ novendorchange=False, -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--allow-vendor-change", -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--allow-vendor-change", -+ "--debug-solver", -+ ) -+ -+ with patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]) -+ ): -+ with patch.dict(zypper.__salt__, -+ {'pkg_resource.version': MagicMock(return_value='1.11'), -+ 'lowpkg.version_cmp': MagicMock(return_value=1)}): -+ ret = zypper.upgrade( -+ dist_upgrade=True, -+ dryrun=True, -+ fromrepo=["Dummy", "Dummy2"], -+ novendorchange=False, -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--debug-solver", -+ ) - - with patch( - "salt.modules.zypperpkg.list_pkgs", -@@ -750,7 +783,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - "Dummy", - "--from", - "Dummy2", -- "--no-allow-vendor-change", - ) - zypper_mock.assert_any_call( - "dist-upgrade", -@@ -760,7 +792,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - "Dummy", - "--from", - "Dummy2", -- "--no-allow-vendor-change", - "--debug-solver", - ) - -@@ -797,7 +828,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - "Dummy", - "--from", - "Dummy2", -- "--no-allow-vendor-change", - ) - - with patch( -@@ -911,7 +941,6 @@ Repository 'DUMMY' not found by its alias, number, or URI. - "--auto-agree-with-licenses", - "--from", - "DUMMY", -- "--no-allow-vendor-change", - ) - - def test_upgrade_available(self): --- -2.30.1 - - diff --git a/move-vendor-change-logic-to-zypper-class-355.patch b/allow-vendor-change-option-with-zypper.patch similarity index 74% rename from move-vendor-change-logic-to-zypper-class-355.patch rename to allow-vendor-change-option-with-zypper.patch index c5b1e07..c701677 100644 --- a/move-vendor-change-logic-to-zypper-class-355.patch +++ b/allow-vendor-change-option-with-zypper.patch @@ -1,11 +1,41 @@ -From a6f8803f6374f646802a898e43bc772d05960d89 Mon Sep 17 00:00:00 2001 +From 07d1b742f16799d3df9d7eeb04bbce5d814e519d Mon Sep 17 00:00:00 2001 From: Martin Seidl -Date: Thu, 24 Jun 2021 10:08:06 +0200 -Subject: [PATCH] Move vendor change logic to zypper class (#355) +Date: Tue, 27 Oct 2020 16:12:29 +0100 +Subject: [PATCH] Allow vendor change option with zypper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit +Fix novendorchange option (#284) + +* Fixed novendorchange handling in zypperpkg + +* refactor handling of novendorchange and fix tests + +add patch support for allow vendor change option with zypper + +Revert "add patch support for allow vendor change option with zypper" + +This reverts commit cee4cc182b4740c912861c712dea7bc44eb70ffb. + +Allow vendor change option with zypper (#313) + +* add patch support for allow vendor change option with zypper + +* adjust unit tests vendor change refactor, dropping cli arg + +* Fix pr issues + +Co-authored-by: Pablo Suárez Hernández + +* Fix unit test for allow vendor change on upgrade + +* Add unit test with unsupported zypper version + +Co-authored-by: Pablo Suárez Hernández + +Move vendor change logic to zypper class (#355) + * move vendor change logic to zypper class * fix thing in zypperkg @@ -29,26 +59,24 @@ Co-authored-by: Pablo Suárez Hernández Co-authored-by: Jochen Breuer Co-authored-by: Pablo Suárez Hernández --- - salt/modules/zypperpkg.py | 110 +++--- - tests/unit/modules/test_zypperpkg.py | 513 +++++++++++++++++++-------- - 2 files changed, 428 insertions(+), 195 deletions(-) + salt/modules/zypperpkg.py | 105 +++++-- + tests/unit/modules/test_zypperpkg.py | 418 ++++++++++++++++++++++++--- + 2 files changed, 462 insertions(+), 61 deletions(-) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index b35792237c..e064e2cb4e 100644 +index 1777bec031..7216e25b86 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -105,10 +105,6 @@ class _Zypper: - ZYPPER_LOCK = "/var/run/zypp.pid" - TAG_RELEASED = "zypper/released" - TAG_BLOCKED = "zypper/blocked" -- # Dist upgrade vendor change support (SLE12+) -- dup_avc = False -- # Install/Patch/Upgrade vendor change support (SLE15+) -- inst_avc = False +@@ -35,6 +35,8 @@ import salt.utils.stringutils + import salt.utils.systemd + import salt.utils.versions + from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError ++ ++# pylint: disable=import-error,redefined-builtin,no-name-in-module + from salt.utils.versions import LooseVersion - def __init__(self): - """ -@@ -138,6 +134,13 @@ class _Zypper: + log = logging.getLogger(__name__) +@@ -128,6 +130,13 @@ class _Zypper: self.__systemd_scope = False self.__root = None @@ -62,7 +90,7 @@ index b35792237c..e064e2cb4e 100644 # Call status self.__called = False -@@ -182,6 +185,8 @@ class _Zypper: +@@ -172,6 +181,8 @@ class _Zypper: self.__no_raise = True elif item == "refreshable": self.__refresh = True @@ -71,7 +99,7 @@ index b35792237c..e064e2cb4e 100644 elif item == "call": return self.__call else: -@@ -222,15 +227,27 @@ class _Zypper: +@@ -212,6 +223,33 @@ class _Zypper: def pid(self): return self.__call_result.get("pid", "") @@ -87,22 +115,25 @@ index b35792237c..e064e2cb4e 100644 + ) + return self + - def refresh_zypper_flags(self): - try: -- zypp_version = version('zypper') ++ def refresh_zypper_flags(self): ++ try: + zypp_version = version("zypper") - # zypper version 1.11.34 in SLE12 update supports vendor change for only dist upgrade -- if version_cmp(zypp_version, '1.11.34') >= 0: ++ # zypper version 1.11.34 in SLE12 update supports vendor change for only dist upgrade + if version_cmp(zypp_version, "1.11.34") >= 0: - # zypper version supports vendor change for dist upgrade - self.dup_avc = True - # zypper version 1.14.8 in SLE15 update supports vendor change in install/patch/upgrading -- if version_cmp(zypp_version, '1.14.8') >= 0: ++ # zypper version supports vendor change for dist upgrade ++ self.dup_avc = True ++ # zypper version 1.14.8 in SLE15 update supports vendor change in install/patch/upgrading + if version_cmp(zypp_version, "1.14.8") >= 0: - self.inst_avc = True - else: - log.error("Failed to compare Zypper version") -@@ -351,6 +368,15 @@ class _Zypper: ++ self.inst_avc = True ++ else: ++ log.error("Failed to compare Zypper version") ++ except Exception as ex: ++ log.error("Unable to get Zypper version: {}".format(ex)) ++ + def _is_error(self): + """ + Is this is an error code? +@@ -326,6 +364,15 @@ class _Zypper: if self.__systemd_scope: cmd.extend(["systemd-run", "--scope"]) cmd.extend(self.__cmd) @@ -118,43 +149,38 @@ index b35792237c..e064e2cb4e 100644 log.debug("Calling Zypper: %s", " ".join(cmd)) self.__call_result = __salt__["cmd.run_all"](cmd, **kwargs) if self._check_result(): -@@ -1451,6 +1477,7 @@ def install( +@@ -1435,6 +1482,8 @@ def install( + no_recommends=False, root=None, inclusion_detection=False, - novendorchange=True, ++ novendorchange=True, + allowvendorchange=False, **kwargs ): """ -@@ -1499,7 +1526,11 @@ def install( +@@ -1482,6 +1531,13 @@ def install( + skip_verify Skip the GPG verification check (e.g., ``--no-gpg-checks``) - novendorchange -- Disallow vendor change ++ novendorchange + DEPRECATED(use allowvendorchange): If set to True, do not allow vendor changes. Default: True + + allowvendorchange + If set to True, vendor change is allowed. Default: False + If both allowvendorchange and novendorchange are passed, only allowvendorchange is used. - ++ version Can be either a version number, or the combination of a comparison -@@ -1662,14 +1693,6 @@ def install( + operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4'). +@@ -1647,6 +1703,7 @@ def install( + cmd_install.append( kwargs.get("resolve_capabilities") and "--capability" or "--name" ) - # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+ -- if not novendorchange: -- __zypper__(root=root).refresh_zypper_flags() -- if __zypper__(root=root).inst_avc: -- cmd_install.append("--allow-vendor-change") -- log.info("Enabling vendor changes") -- else: -- log.warning("Enabling/Disabling vendor changes is not supported on this Zypper version") -- ++ # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+ if not refresh: cmd_install.insert(0, "--no-refresh") -@@ -1696,6 +1719,7 @@ def install( +@@ -1683,6 +1740,7 @@ def install( systemd_scope=systemd_scope, root=root, ) @@ -162,7 +188,7 @@ index b35792237c..e064e2cb4e 100644 .call(*cmd) .splitlines() ): -@@ -1708,7 +1732,9 @@ def install( +@@ -1695,7 +1753,9 @@ def install( while downgrades: cmd = cmd_install + ["--force"] + downgrades[:500] downgrades = downgrades[500:] @@ -173,15 +199,17 @@ index b35792237c..e064e2cb4e 100644 _clean_cache() new = ( -@@ -1740,6 +1766,7 @@ def upgrade( +@@ -1726,7 +1786,8 @@ def upgrade( + dryrun=False, dist_upgrade=False, fromrepo=None, - novendorchange=True, +- novendorchange=False, ++ novendorchange=True, + allowvendorchange=False, skip_verify=False, no_recommends=False, root=None, -@@ -1778,7 +1805,11 @@ def upgrade( +@@ -1765,7 +1826,11 @@ def upgrade( Specify a list of package repositories to upgrade from. Default: None novendorchange @@ -194,29 +222,20 @@ index b35792237c..e064e2cb4e 100644 skip_verify Skip the GPG verification check (e.g., ``--no-gpg-checks``) -@@ -1825,40 +1856,21 @@ def upgrade( +@@ -1812,31 +1877,21 @@ def upgrade( cmd_update.extend(["--from" if dist_upgrade else "--repo", repo]) log.info("Targeting repos: %s", fromrepo) -- if not novendorchange: -- __zypper__(root=root).refresh_zypper_flags() -- if dist_upgrade: -- if __zypper__(root=root).dup_avc: -- cmd_update.append("--allow-vendor-change") -- log.info("Enabling vendor changes") +- if dist_upgrade: +- if novendorchange: +- # TODO: Grains validation should be moved to Zypper class +- if __grains__["osrelease_info"][0] > 11: +- cmd_update.append("--no-allow-vendor-change") +- log.info("Disabling vendor changes") - else: - log.warning( -- "Enabling/Disabling vendor changes is not supported on this Zypper version" +- "Disabling vendor changes is not supported on this Zypper version" - ) -- else: -- # Install / patching / upgrade with vendor change support is only in SLE 15+ opensuse Leap 15+ -- if __zypper__(root=root).inst_avc: -- cmd_update.append("--allow-vendor-change") -- log.info("Enabling vendor changes") -- else: -- log.warning( -- "Enabling/Disabling vendor changes is not supported on this Zypper version" -- ) - - if no_recommends: - cmd_update.append("--no-recommends") @@ -248,19 +267,10 @@ index b35792237c..e064e2cb4e 100644 new = list_pkgs(root=root) ret = salt.utils.data.compare_dicts(old, new) diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index f32c382d7f..5c01bbbfbd 100644 +index 0ba5595d65..78fe226914 100644 --- a/tests/unit/modules/test_zypperpkg.py +++ b/tests/unit/modules/test_zypperpkg.py -@@ -14,7 +14,7 @@ from salt.exceptions import CommandExecutionError, SaltInvocationError - from salt.ext import six - from salt.ext.six.moves import configparser - from tests.support.mixins import LoaderModuleMockMixin --from tests.support.mock import MagicMock, Mock, call, patch -+from tests.support.mock import MagicMock, Mock, call, mock_open, patch - from tests.support.unit import TestCase - - -@@ -135,6 +135,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): +@@ -137,6 +137,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): stdout_xml_snippet = '' sniffer = RunSniffer(stdout=stdout_xml_snippet) @@ -268,7 +278,7 @@ index f32c382d7f..5c01bbbfbd 100644 with patch.dict("salt.modules.zypperpkg.__salt__", {"cmd.run_all": sniffer}): self.assertEqual(zypper.__zypper__.call("foo"), stdout_xml_snippet) self.assertEqual(len(sniffer.calls), 1) -@@ -590,13 +591,373 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): +@@ -592,13 +593,373 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): {"vim": "7.4.326-2.62", "fakepkg": ""}, ) @@ -643,7 +653,7 @@ index f32c382d7f..5c01bbbfbd 100644 "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True) ), patch( "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False) -@@ -635,17 +996,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): +@@ -637,16 +998,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.1,1.2"}}) zypper_mock.assert_any_call("update", "--auto-agree-with-licenses") @@ -654,109 +664,52 @@ index f32c382d7f..5c01bbbfbd 100644 - ret = zypper.upgrade(dist_upgrade=True) - self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}}) - zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", +- "dist-upgrade", "--auto-agree-with-licenses" - ) - with patch( "salt.modules.zypperpkg.list_pkgs", MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -@@ -677,94 +1027,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - "Dummy2", +@@ -662,6 +1013,22 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): + "--debug-solver", ) -- with patch( -- "salt.modules.zypperpkg.list_pkgs", -- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]), -- ): -- ret = zypper.upgrade( -- dist_upgrade=True, -- fromrepo=["Dummy", "Dummy2"], -- novendorchange=True, -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- ) -- -- with patch( -- "salt.modules.zypperpkg.list_pkgs", -- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]) -- ): -- with patch.dict(zypper.__salt__, -- {'pkg_resource.version': MagicMock(return_value='1.15'), -- 'lowpkg.version_cmp': MagicMock(return_value=1)}): -- ret = zypper.upgrade( -- dist_upgrade=True, -- dryrun=True, -- fromrepo=["Dummy", "Dummy2"], -- novendorchange=False, -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- "--from", -- "Dummy", -- "--from", -- "Dummy2", -- "--allow-vendor-change", -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- "--from", -- "Dummy", -- "--from", -- "Dummy2", -- "--allow-vendor-change", -- "--debug-solver", -- ) -- -- with patch( -- "salt.modules.zypperpkg.list_pkgs", -- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]) -- ): -- with patch.dict(zypper.__salt__, -- {'pkg_resource.version': MagicMock(return_value='1.11'), -- 'lowpkg.version_cmp': MagicMock(return_value=1)}): -- ret = zypper.upgrade( -- dist_upgrade=True, -- dryrun=True, -- fromrepo=["Dummy", "Dummy2"], -- novendorchange=False, -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- "--from", -- "Dummy", -- "--from", -- "Dummy2", -- ) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--dry-run", -- "--from", -- "Dummy", -- "--from", -- "Dummy2", -- "--debug-solver", -- ) -- ++ with patch( ++ "salt.modules.zypperpkg.list_pkgs", ++ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), ++ ): ++ ret = zypper.upgrade( ++ dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False ++ ) ++ zypper_mock.assert_any_call( ++ "update", ++ "--auto-agree-with-licenses", ++ "--repo", ++ "Dummy", ++ "--repo", ++ "Dummy2", ++ ) ++ with patch( "salt.modules.zypperpkg.list_pkgs", MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -@@ -811,52 +1073,13 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): +@@ -680,7 +1047,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): + "Dummy", + "--from", + "Dummy2", +- "--no-allow-vendor-change", + ) + zypper_mock.assert_any_call( + "dist-upgrade", +@@ -690,7 +1056,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): + "Dummy", + "--from", + "Dummy2", +- "--no-allow-vendor-change", + "--debug-solver", + ) + +@@ -710,33 +1075,13 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): "Dummy2", ) @@ -777,26 +730,7 @@ index f32c382d7f..5c01bbbfbd 100644 - "Dummy", - "--from", - "Dummy2", -- ) -- -- with patch( -- "salt.modules.zypperpkg.list_pkgs", -- MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]), -- ): -- ret = zypper.upgrade( -- dist_upgrade=True, -- fromrepo=["Dummy", "Dummy2"], -- novendorchange=False, -- ) -- self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}}) -- zypper_mock.assert_any_call( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--from", -- "Dummy", -- "--from", -- "Dummy2", -- "--allow-vendor-change", +- "--no-allow-vendor-change", - ) - def test_upgrade_kernel(self): @@ -810,7 +744,7 @@ index f32c382d7f..5c01bbbfbd 100644 "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True) ), patch( "salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False) -@@ -915,12 +1138,13 @@ Repository 'DUMMY' not found by its alias, number, or URI. +@@ -795,12 +1140,13 @@ Repository 'DUMMY' not found by its alias, number, or URI. self.pid = 1234 self.exit_code = 555 self.noraise = MagicMock() @@ -825,19 +759,16 @@ index f32c382d7f..5c01bbbfbd 100644 "salt.modules.zypperpkg.__zypper__", FailingZypperDummy() ) as zypper_mock, patch( "salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True) -@@ -937,10 +1161,7 @@ Repository 'DUMMY' not found by its alias, number, or URI. +@@ -817,7 +1163,7 @@ Repository 'DUMMY' not found by its alias, number, or URI. self.assertEqual(cmd_exc.exception.info["changes"], {}) self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out) zypper_mock.noraise.call.assert_called_with( -- "dist-upgrade", -- "--auto-agree-with-licenses", -- "--from", -- "DUMMY", +- "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY" + "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY", ) def test_upgrade_available(self): -- -2.31.1 +2.33.0 diff --git a/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch b/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch deleted file mode 100644 index e11f91a..0000000 --- a/ansiblegate-take-care-of-failed-skipped-and-unreacha.patch +++ /dev/null @@ -1,91 +0,0 @@ -From 6111853f13c9c1e8eaaa1acd521cd3abfbfff766 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Thu, 13 Aug 2020 13:49:16 +0100 -Subject: [PATCH] ansiblegate: take care of failed, skipped and - unreachable tasks (bsc#1173911) - -Add 'retcode' from ansible-playbook execution to the returned data (bsc#1173909) - -Always add retcode to ansible.playbooks output - -Adjust ansible.playbooks output comment properly - -Add new unit test for ansible.playbooks - -Add unit tests for ansible.playbooks state ---- - tests/unit/modules/test_ansiblegate.py | 12 ++++++++++++ - tests/unit/states/test_ansiblegate.py | 7 ++++--- - 2 files changed, 16 insertions(+), 3 deletions(-) - -diff --git a/tests/unit/modules/test_ansiblegate.py b/tests/unit/modules/test_ansiblegate.py -index 6724d37c40..3d406a9d42 100644 ---- a/tests/unit/modules/test_ansiblegate.py -+++ b/tests/unit/modules/test_ansiblegate.py -@@ -209,3 +209,15 @@ description: - timeout=1200, - ) - assert ret == {"completed": True, "timeout": 1200} -+ -+ @patch("salt.utils.path.which", MagicMock(return_value=True)) -+ def test_ansible_playbooks_return_retcode(self): -+ """ -+ Test ansible.playbooks execution module function include retcode in the return. -+ :return: -+ """ -+ ref_out = {"retcode": 0, "stdout": '{"foo": "bar"}'} -+ cmd_run_all = MagicMock(return_value=ref_out) -+ with patch.dict(ansible.__salt__, {"cmd.run_all": cmd_run_all}): -+ ret = ansible.playbooks("fake-playbook.yml") -+ assert "retcode" in ret -diff --git a/tests/unit/states/test_ansiblegate.py b/tests/unit/states/test_ansiblegate.py -index ac677fc5db..c21a4f642f 100644 ---- a/tests/unit/states/test_ansiblegate.py -+++ b/tests/unit/states/test_ansiblegate.py -@@ -12,7 +12,6 @@ - # See the License for the specific language governing permissions and - # limitations under the License. - --# Import Salt Testing Libs - - import json - import os -@@ -43,6 +42,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin): - def setup_loader_modules(self): - return {ansible: {}} - -+ @patch("salt.utils.path.which", MagicMock(return_value=True)) - def test_ansible_playbooks_states_success(self): - """ - Test ansible.playbooks states executions success. -@@ -57,7 +57,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict( - ansible.__salt__, - {"ansible.playbooks": MagicMock(return_value=success_output)}, -- ), patch("salt.utils.path.which", MagicMock(return_value=True)): -+ ): - with patch.dict(ansible.__opts__, {"test": False}): - ret = ansible.playbooks("foobar") - self.assertTrue(ret["result"]) -@@ -73,6 +73,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin): - }, - ) - -+ @patch("salt.utils.path.which", MagicMock(return_value=True)) - def test_ansible_playbooks_states_failed(self): - """ - Test ansible.playbooks failed states executions. -@@ -87,7 +88,7 @@ class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict( - ansible.__salt__, - {"ansible.playbooks": MagicMock(return_value=failed_output)}, -- ), patch("salt.utils.path.which", MagicMock(return_value=True)): -+ ): - with patch.dict(ansible.__opts__, {"test": False}): - ret = ansible.playbooks("foobar") - self.assertFalse(ret["result"]) --- -2.29.2 - - diff --git a/apply-patch-from-upstream-to-support-python-3.8.patch b/apply-patch-from-upstream-to-support-python-3.8.patch deleted file mode 100644 index ea29717..0000000 --- a/apply-patch-from-upstream-to-support-python-3.8.patch +++ /dev/null @@ -1,235 +0,0 @@ -From 85464ec6c34fceee3379d268745c3294d27e7fb4 Mon Sep 17 00:00:00 2001 -From: Steve Kowalik -Date: Mon, 17 Feb 2020 15:34:00 +1100 -Subject: [PATCH] Apply patch from upstream to support Python 3.8 - -Apply saltstack/salt#56031 to support Python 3.8, which removed a -deprecated module and changed some behaviour. Add a {Build,}Requires on -python-distro, since it is now required. ---- - pkg/suse/salt.spec | 2 ++ - salt/renderers/stateconf.py | 49 ++++++++++++++++--------------------- - 2 files changed, 23 insertions(+), 28 deletions(-) - -diff --git a/pkg/suse/salt.spec b/pkg/suse/salt.spec -index a17d2381ce..0df9d6c283 100644 ---- a/pkg/suse/salt.spec -+++ b/pkg/suse/salt.spec -@@ -62,6 +62,7 @@ BuildRequires: python-psutil - BuildRequires: python-requests >= 1.0.0 - BuildRequires: python-tornado >= 4.2.1 - BuildRequires: python-yaml -+BuildRequires: python-distro - # requirements/opt.txt (not all) - # BuildRequires: python-MySQL-python - # BuildRequires: python-timelib -@@ -112,6 +113,7 @@ Requires: python-psutil - Requires: python-requests >= 1.0.0 - Requires: python-tornado >= 4.2.1 - Requires: python-yaml -+Requires: python-distro - %if 0%{?suse_version} - # requirements/opt.txt (not all) - Recommends: python-MySQL-python -diff --git a/salt/renderers/stateconf.py b/salt/renderers/stateconf.py -index 298ae28338..f0527d51d7 100644 ---- a/salt/renderers/stateconf.py -+++ b/salt/renderers/stateconf.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - A flexible renderer that takes a templating engine and a data format - -@@ -26,8 +25,6 @@ A flexible renderer that takes a templating engine and a data format - # - apache: >= 0.1.0 - # - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import copy - import getopt -@@ -36,12 +33,9 @@ import os - import re - from itertools import chain - --# Import salt libs - import salt.utils.files - import salt.utils.stringutils - from salt.exceptions import SaltRenderError -- --# Import 3rd-party libs - from salt.ext import six - from salt.ext.six.moves import StringIO # pylint: disable=import-error - -@@ -135,7 +129,7 @@ def render(input, saltenv="base", sls="", argline="", **kws): - sid = has_names_decls(data) - if sid: - raise SaltRenderError( -- "'names' declaration(found in state id: {0}) is " -+ "'names' declaration(found in state id: {}) is " - "not supported with implicitly ordered states! You " - "should generate the states in a template for-loop " - "instead.".format(sid) -@@ -203,11 +197,11 @@ def render(input, saltenv="base", sls="", argline="", **kws): - name, rt_argline = (args[1] + " ").split(" ", 1) - render_template = renderers[name] # e.g., the mako renderer - except KeyError as err: -- raise SaltRenderError("Renderer: {0} is not available!".format(err)) -+ raise SaltRenderError("Renderer: {} is not available!".format(err)) - except IndexError: - raise INVALID_USAGE_ERROR - -- if isinstance(input, six.string_types): -+ if isinstance(input, str): - with salt.utils.files.fopen(input, "r") as ifile: - sls_templ = salt.utils.stringutils.to_unicode(ifile.read()) - else: # assume file-like -@@ -227,7 +221,7 @@ def render(input, saltenv="base", sls="", argline="", **kws): - prefix = sls + "::" - tmplctx = { - k[len(prefix) :] if k.startswith(prefix) else k: v -- for k, v in six.iteritems(tmplctx) -+ for k, v in tmplctx.items() - } - else: - tmplctx = {} -@@ -262,8 +256,8 @@ def rewrite_single_shorthand_state_decl(data): # pylint: disable=C0103 - state_id_decl: - state.func: [] - """ -- for sid, states in six.iteritems(data): -- if isinstance(states, six.string_types): -+ for sid, states in data.items(): -+ if isinstance(states, str): - data[sid] = {states: []} - - -@@ -328,7 +322,7 @@ def nvlist(thelist, names=None): - for nvitem in thelist: - if isinstance(nvitem, dict): - # then nvitem is a name-value item(a dict) of the list. -- name, value = next(six.iteritems(nvitem)) -+ name, value = next(iter(nvitem.items())) - if names is None or name in names: - yield nvitem, name, value - -@@ -349,17 +343,16 @@ def nvlist2(thelist, names=None): - - """ - for _, _, value in nvlist(thelist, names): -- for each in nvlist(value): -- yield each -+ yield from nvlist(value) - - - def statelist(states_dict, sid_excludes=frozenset(["include", "exclude"])): -- for sid, states in six.iteritems(states_dict): -+ for sid, states in states_dict.items(): - if sid.startswith("__"): - continue - if sid in sid_excludes: - continue -- for sname, args in six.iteritems(states): -+ for sname, args in states.items(): - if sname.startswith("__"): - continue - yield sid, states, sname, args -@@ -401,11 +394,11 @@ def rename_state_ids(data, sls, is_extend=False): - newsid = _local_to_abs_sid(sid, sls) - if newsid in data: - raise SaltRenderError( -- "Can't rename state id({0}) into {1} because the later " -+ "Can't rename state id({}) into {} because the later " - "already exists!".format(sid, newsid) - ) - # add a '- name: sid' to those states without '- name'. -- for sname, args in six.iteritems(data[sid]): -+ for sname, args in data[sid].items(): - if state_name(sname) == STATE_NAME: - continue - for arg in args: -@@ -430,7 +423,7 @@ EXTENDED_REQUIRE_IN = {} - # explicit require_in/watch_in/listen_in/onchanges_in/onfail_in can only contain states after it - def add_implicit_requires(data): - def T(sid, state): # pylint: disable=C0103 -- return "{0}:{1}".format(sid, state_name(state)) -+ return "{}:{}".format(sid, state_name(state)) - - states_before = set() - states_after = set() -@@ -462,7 +455,7 @@ def add_implicit_requires(data): - for _, rstate, rsid in reqs: - if T(rsid, rstate) in states_after: - raise SaltRenderError( -- "State({0}) can't require/watch/listen/onchanges/onfail a state({1}) defined " -+ "State({}) can't require/watch/listen/onchanges/onfail a state({}) defined " - "after it!".format(tag, T(rsid, rstate)) - ) - -@@ -472,7 +465,7 @@ def add_implicit_requires(data): - for _, rstate, rsid in reqs: - if T(rsid, rstate) in states_before: - raise SaltRenderError( -- "State({0}) can't require_in/watch_in/listen_in/onchanges_in/onfail_in a state({1}) " -+ "State({}) can't require_in/watch_in/listen_in/onchanges_in/onfail_in a state({}) " - "defined before it!".format(tag, T(rsid, rstate)) - ) - -@@ -492,7 +485,7 @@ def add_start_state(data, sls): - start_sid = __opts__["stateconf_start_state"] - if start_sid in data: - raise SaltRenderError( -- "Can't generate start state({0})! The same state id already " -+ "Can't generate start state({})! The same state id already " - "exists!".format(start_sid) - ) - if not data: -@@ -502,14 +495,14 @@ def add_start_state(data, sls): - # no __sls__, or it's the first state whose id declaration has a - # __sls__ == sls. - non_sids = ("include", "exclude", "extend") -- for sid, states in six.iteritems(data): -+ for sid, states in data.items(): - if sid in non_sids or sid.startswith("__"): - continue - if "__sls__" not in states or states["__sls__"] == sls: - break - else: - raise SaltRenderError("Can't determine the first state in the sls file!") -- reqin = {state_name(next(six.iterkeys(data[sid]))): sid} -+ reqin = {state_name(next(iter(data[sid].keys()))): sid} - data[start_sid] = {STATE_FUNC: [{"require_in": [reqin]}]} - - -@@ -517,7 +510,7 @@ def add_goal_state(data): - goal_sid = __opts__["stateconf_goal_state"] - if goal_sid in data: - raise SaltRenderError( -- "Can't generate goal state({0})! The same state id already " -+ "Can't generate goal state({})! The same state id already " - "exists!".format(goal_sid) - ) - else: -@@ -561,7 +554,7 @@ STATE_CONF_EXT = {} # stateconf.set under extend: ... - - - def extract_state_confs(data, is_extend=False): -- for state_id, state_dict in six.iteritems(data): -+ for state_id, state_dict in data.items(): - if state_id == "extend" and not is_extend: - extract_state_confs(state_dict, True) - continue -@@ -578,7 +571,7 @@ def extract_state_confs(data, is_extend=False): - for sdk in state_dict[key]: - if not isinstance(sdk, dict): - continue -- key, val = next(six.iteritems(sdk)) -+ key, val = next(iter(sdk.items())) - conf[key] = val - - if not is_extend and state_id in STATE_CONF_EXT: --- -2.29.2 - - diff --git a/async-batch-implementation-fix-320.patch b/async-batch-implementation-fix-320.patch deleted file mode 100644 index 911e5bd..0000000 --- a/async-batch-implementation-fix-320.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 843c76e5889659ec80fea3f39b750b9f907a902d Mon Sep 17 00:00:00 2001 -From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> -Date: Wed, 17 Feb 2021 16:47:11 +0300 -Subject: [PATCH] Async batch implementation fix (#320) - ---- - salt/client/__init__.py | 38 -------------------------------------- - 1 file changed, 38 deletions(-) - -diff --git a/salt/client/__init__.py b/salt/client/__init__.py -index cc8fd4048d..ddb437604b 100644 ---- a/salt/client/__init__.py -+++ b/salt/client/__init__.py -@@ -534,12 +534,6 @@ class LocalClient: - {'dave': {...}} - {'stewart': {...}} - """ -- # We need to re-import salt.utils.args here -- # even though it has already been imported. -- # when cmd_batch is called via the NetAPI -- # the module is unavailable. -- import salt.utils.args -- - # Late import - not used anywhere else in this file - import salt.cli.batch - -@@ -557,38 +551,6 @@ class LocalClient: - - eauth = salt.cli.batch.batch_get_eauth(kwargs) - -- arg = salt.utils.args.condition_input(arg, kwarg) -- opts = { -- "tgt": tgt, -- "fun": fun, -- "arg": arg, -- "tgt_type": tgt_type, -- "ret": ret, -- "batch": batch, -- "failhard": kwargs.get("failhard", self.opts.get("failhard", False)), -- "raw": kwargs.get("raw", False), -- } -- -- if "timeout" in kwargs: -- opts["timeout"] = kwargs["timeout"] -- if "gather_job_timeout" in kwargs: -- opts["gather_job_timeout"] = kwargs["gather_job_timeout"] -- if "batch_wait" in kwargs: -- opts["batch_wait"] = int(kwargs["batch_wait"]) -- -- eauth = {} -- if "eauth" in kwargs: -- eauth["eauth"] = kwargs.pop("eauth") -- if "username" in kwargs: -- eauth["username"] = kwargs.pop("username") -- if "password" in kwargs: -- eauth["password"] = kwargs.pop("password") -- if "token" in kwargs: -- eauth["token"] = kwargs.pop("token") -- -- for key, val in self.opts.items(): -- if key not in opts: -- opts[key] = val - batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) - for ret in batch.run(): - yield ret --- -2.30.0 - - diff --git a/async-batch-implementation.patch b/async-batch-implementation.patch index 63b2533..9cebed7 100644 --- a/async-batch-implementation.patch +++ b/async-batch-implementation.patch @@ -1,7 +1,10 @@ -From 78faccbd063b8635550935057b8630262958f669 Mon Sep 17 00:00:00 2001 +From c25ee8158000770cb667b914de62f802467c204e Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Fri, 16 Nov 2018 17:05:29 +0100 Subject: [PATCH] Async batch implementation +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit Add find_job checks @@ -24,25 +27,67 @@ Add the metadata to the start/done events Pass only metadata not all **kwargs Add separate batch presence_ping timeout + +Fix async batch race conditions + +Close batching when there is no next batch + +Add 'batch_presence_ping_timeout' and 'batch_presence_ping_gather_job_timeout' parameters for synchronous batching + +Fix async-batch multiple done events + +Fix memory leak produced by batch async find_jobs mechanism (bsc#1140912) + +Multiple fixes: + +- use different JIDs per find_job +- fix bug in detection of find_job returns +- fix timeout passed from request payload +- better cleanup at the end of batching + +Co-authored-by: Pablo Suárez Hernández + +Improve batch_async to release consumed memory (bsc#1140912) + +Use current IOLoop for the LocalClient instance of BatchAsync (bsc#1137642) + +Fix failing unit tests for batch async + +Remove unnecessary yield causing BadYieldError (bsc#1154620) + +Fixing StreamClosed issue + +Fix batch_async obsolete test + +batch_async: avoid using fnmatch to match event (#217) + +Batch Async: Catch exceptions and safety unregister and close instances + +Fix unit tests for batch async after refactor + +Changed imports to vendored Tornado + +Async batch implementation fix (#320) + +Remove deprecated usage of NO_MOCK and NO_MOCK_REASON --- salt/auth/__init__.py | 2 + - salt/cli/batch.py | 115 +++++++--- - salt/cli/batch_async.py | 240 +++++++++++++++++++ - salt/client/__init__.py | 14 ++ - salt/master.py | 26 ++- - salt/netapi/__init__.py | 3 +- - salt/transport/ipc.py | 43 ++-- + salt/cli/batch.py | 109 ++++++-- + salt/cli/batch_async.py | 315 +++++++++++++++++++++++ + salt/client/__init__.py | 51 ++-- + salt/master.py | 20 ++ + salt/transport/ipc.py | 9 +- salt/utils/event.py | 8 +- - tests/unit/cli/test_batch_async.py | 357 +++++++++++++++++++++++++++++ - 9 files changed, 741 insertions(+), 67 deletions(-) + tests/unit/cli/test_batch_async.py | 386 +++++++++++++++++++++++++++++ + 8 files changed, 839 insertions(+), 61 deletions(-) create mode 100644 salt/cli/batch_async.py create mode 100644 tests/unit/cli/test_batch_async.py diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py -index ee1eac7ce4..22c54e8048 100644 +index 3b73c2ec08..6f300fe7c4 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py -@@ -52,6 +52,8 @@ AUTH_INTERNAL_KEYWORDS = frozenset( +@@ -49,6 +49,8 @@ AUTH_INTERNAL_KEYWORDS = frozenset( "print_event", "raw", "yield_pub_data", @@ -52,37 +97,18 @@ index ee1eac7ce4..22c54e8048 100644 ) diff --git a/salt/cli/batch.py b/salt/cli/batch.py -index 155dc734b7..527cffdeb7 100644 +index 2a692e13f8..828a1ded5b 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py -@@ -1,10 +1,7 @@ --# -*- coding: utf-8 -*- - """ - Execute batch runs - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import copy - -@@ -17,11 +14,8 @@ from datetime import datetime, timedelta - import salt.client - import salt.exceptions +@@ -13,9 +13,88 @@ import salt.exceptions import salt.output -- --# Import salt libs import salt.utils.stringutils --# Import 3rd-party libs - # pylint: disable=import-error,no-name-in-module,redefined-builtin - from salt.ext import six - from salt.ext.six.moves import range -@@ -29,7 +23,77 @@ from salt.ext.six.moves import range ++# pylint: disable=import-error,no-name-in-module,redefined-builtin ++ log = logging.getLogger(__name__) --class Batch(object): +def get_bnum(opts, minions, quiet): + """ + Return the active number of minions to maintain @@ -137,6 +163,13 @@ index 155dc734b7..527cffdeb7 100644 + if key not in opts: + opts[key] = val + ++ opts["batch_presence_ping_timeout"] = kwargs.get( ++ "batch_presence_ping_timeout", opts["timeout"] ++ ) ++ opts["batch_presence_ping_gather_job_timeout"] = kwargs.get( ++ "batch_presence_ping_gather_job_timeout", opts["gather_job_timeout"] ++ ) ++ + return opts + + @@ -153,20 +186,41 @@ index 155dc734b7..527cffdeb7 100644 + return eauth + + -+class Batch: + class Batch: """ Manage the execution of batch runs - """ -@@ -75,7 +139,7 @@ class Batch(object): - continue - else: - try: -- m = next(six.iterkeys(ret)) -+ m = next(iter(ret.keys())) - except StopIteration: - if not self.quiet: - salt.utils.stringutils.print_cli( -@@ -87,28 +151,7 @@ class Batch(object): +@@ -39,6 +118,7 @@ class Batch: + self.pub_kwargs = eauth if eauth else {} + self.quiet = quiet + self.options = _parser ++ self.minions = set() + # Passing listen True to local client will prevent it from purging + # cahced events while iterating over the batches. + self.local = salt.client.get_local_client(opts["conf_file"], listen=True) +@@ -51,7 +131,7 @@ class Batch: + self.opts["tgt"], + "test.ping", + [], +- self.opts["timeout"], ++ self.opts.get("batch_presence_ping_timeout", self.opts["timeout"]), + ] + + selected_target_option = self.opts.get("selected_target_option", None) +@@ -62,7 +142,12 @@ class Batch: + + self.pub_kwargs["yield_pub_data"] = True + ping_gen = self.local.cmd_iter( +- *args, gather_job_timeout=self.opts["gather_job_timeout"], **self.pub_kwargs ++ *args, ++ gather_job_timeout=self.opts.get( ++ "batch_presence_ping_gather_job_timeout", ++ self.opts["gather_job_timeout"], ++ ), ++ **self.pub_kwargs + ) + + # Broadcast to targets +@@ -87,25 +172,7 @@ class Batch: return (list(fret), ping_gen, nret.difference(fret)) def get_bnum(self): @@ -175,10 +229,7 @@ index 155dc734b7..527cffdeb7 100644 - """ - partition = lambda x: float(x) / 100.0 * len(self.minions) - try: -- if ( -- isinstance(self.opts["batch"], six.string_types) -- and "%" in self.opts["batch"] -- ): +- if isinstance(self.opts["batch"], str) and "%" in self.opts["batch"]: - res = partition(float(self.opts["batch"].strip("%"))) - if res < 1: - return int(math.ceil(res)) @@ -189,83 +240,30 @@ index 155dc734b7..527cffdeb7 100644 - except ValueError: - if not self.quiet: - salt.utils.stringutils.print_cli( -- "Invalid batch data sent: {0}\nData must be in the " +- "Invalid batch data sent: {}\nData must be in the " - "form of %10, 10% or 3".format(self.opts["batch"]) - ) + return get_bnum(self.opts, self.minions, self.quiet) def __update_wait(self, wait): now = datetime.now() -@@ -161,7 +204,7 @@ class Batch(object): - # the user we won't be attempting to run a job on them - for down_minion in self.down_minions: - salt.utils.stringutils.print_cli( -- "Minion {0} did not respond. No job will be sent.".format( -+ "Minion {} did not respond. No job will be sent.".format( - down_minion - ) - ) -@@ -190,7 +233,7 @@ class Batch(object): - if next_: - if not self.quiet: - salt.utils.stringutils.print_cli( -- "\nExecuting run on {0}\n".format(sorted(next_)) -+ "\nExecuting run on {}\n".format(sorted(next_)) - ) - # create a new iterator for this batch of minions - return_value = self.opts.get("return", self.opts.get("ret", "")) -@@ -218,7 +261,7 @@ class Batch(object): - for ping_ret in self.ping_gen: - if ping_ret is None: - break -- m = next(six.iterkeys(ping_ret)) -+ m = next(iter(ping_ret.keys())) - if m not in self.minions: - self.minions.append(m) - to_run.append(m) -@@ -243,7 +286,7 @@ class Batch(object): - ) - else: - salt.utils.stringutils.print_cli( -- "minion {0} was already deleted from tracker, probably a duplicate key".format( -+ "minion {} was already deleted from tracker, probably a duplicate key".format( - part["id"] - ) - ) -@@ -254,7 +297,7 @@ class Batch(object): - minion_tracker[queue]["minions"].remove(id) - else: - salt.utils.stringutils.print_cli( -- "minion {0} was already deleted from tracker, probably a duplicate key".format( -+ "minion {} was already deleted from tracker, probably a duplicate key".format( - id - ) - ) -@@ -274,7 +317,7 @@ class Batch(object): - parts[minion] = {} - parts[minion]["ret"] = {} - -- for minion, data in six.iteritems(parts): -+ for minion, data in parts.items(): - if minion in active: - active.remove(minion) - if bwait: diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py new file mode 100644 -index 0000000000..1557e5105b +index 0000000000..09aa85258b --- /dev/null +++ b/salt/cli/batch_async.py -@@ -0,0 +1,240 @@ +@@ -0,0 +1,315 @@ +""" +Execute a job on the targeted minions by using a moving window of fixed size `batch`. +""" + -+import fnmatch ++import gc + +# pylint: enable=import-error,no-name-in-module,redefined-builtin +import logging + +import salt.client ++import salt.ext.tornado +import tornado +from salt.cli.batch import batch_get_eauth, batch_get_opts, get_bnum + @@ -290,22 +288,24 @@ index 0000000000..1557e5105b + - tag: salt/batch//start + - data: { + "available_minions": self.minions, -+ "down_minions": self.down_minions ++ "down_minions": targeted_minions - presence_ping_minions + } + + When the batch ends, an `done` event is fired: + - tag: salt/batch//done + - data: { + "available_minions": self.minions, -+ "down_minions": self.down_minions, ++ "down_minions": targeted_minions - presence_ping_minions + "done_minions": self.done_minions, + "timedout_minions": self.timedout_minions + } + """ + + def __init__(self, parent_opts, jid_gen, clear_load): -+ ioloop = tornado.ioloop.IOLoop.current() -+ self.local = salt.client.get_local_client(parent_opts["conf_file"]) ++ ioloop = salt.ext.tornado.ioloop.IOLoop.current() ++ self.local = salt.client.get_local_client( ++ parent_opts["conf_file"], io_loop=ioloop ++ ) + if "gather_job_timeout" in clear_load["kwargs"]: + clear_load["gather_job_timeout"] = clear_load["kwargs"].pop( + "gather_job_timeout" @@ -326,15 +326,17 @@ index 0000000000..1557e5105b + self.eauth = batch_get_eauth(clear_load["kwargs"]) + self.metadata = clear_load["kwargs"].get("metadata", {}) + self.minions = set() -+ self.down_minions = set() ++ self.targeted_minions = set() + self.timedout_minions = set() + self.done_minions = set() + self.active = set() + self.initialized = False ++ self.jid_gen = jid_gen + self.ping_jid = jid_gen() + self.batch_jid = jid_gen() + self.find_job_jid = jid_gen() + self.find_job_returned = set() ++ self.ended = False + self.event = salt.utils.event.get_event( + "master", + self.opts["sock_dir"], @@ -344,48 +346,42 @@ index 0000000000..1557e5105b + io_loop=ioloop, + keep_loop=True, + ) ++ self.scheduled = False ++ self.patterns = set() + + def __set_event_handler(self): + ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) + batch_return_pattern = "salt/job/{}/ret/*".format(self.batch_jid) -+ find_job_return_pattern = "salt/job/{}/ret/*".format(self.find_job_jid) + self.event.subscribe(ping_return_pattern, match_type="glob") + self.event.subscribe(batch_return_pattern, match_type="glob") -+ self.event.subscribe(find_job_return_pattern, match_type="glob") -+ self.event.patterns = { ++ self.patterns = { + (ping_return_pattern, "ping_return"), + (batch_return_pattern, "batch_run"), -+ (find_job_return_pattern, "find_job_return"), + } + self.event.set_event_handler(self.__event_handler) + + def __event_handler(self, raw): + if not self.event: + return -+ mtag, data = self.event.unpack(raw, self.event.serial) -+ for (pattern, op) in self.event.patterns: -+ if fnmatch.fnmatch(mtag, pattern): -+ minion = data["id"] -+ if op == "ping_return": -+ self.minions.add(minion) -+ self.down_minions.remove(minion) -+ if not self.down_minions: -+ self.event.io_loop.spawn_callback(self.start_batch) -+ elif op == "find_job_return": -+ self.find_job_returned.add(minion) -+ elif op == "batch_run": -+ if minion in self.active: -+ self.active.remove(minion) -+ self.done_minions.add(minion) -+ # call later so that we maybe gather more returns -+ self.event.io_loop.call_later( -+ self.batch_delay, self.schedule_next -+ ) -+ -+ if self.initialized and self.done_minions == self.minions.difference( -+ self.timedout_minions -+ ): -+ self.end_batch() ++ try: ++ mtag, data = self.event.unpack(raw, self.event.serial) ++ for (pattern, op) in self.patterns: ++ if mtag.startswith(pattern[:-1]): ++ minion = data["id"] ++ if op == "ping_return": ++ self.minions.add(minion) ++ if self.targeted_minions == self.minions: ++ self.event.io_loop.spawn_callback(self.start_batch) ++ elif op == "find_job_return": ++ if data.get("return", None): ++ self.find_job_returned.add(minion) ++ elif op == "batch_run": ++ if minion in self.active: ++ self.active.remove(minion) ++ self.done_minions.add(minion) ++ self.event.io_loop.spawn_callback(self.schedule_next) ++ except Exception as ex: ++ log.error("Exception occured while processing event: {}".format(ex)) + + def _get_next(self): + to_run = ( @@ -399,111 +395,207 @@ index 0000000000..1557e5105b + ) + return set(list(to_run)[:next_batch_size]) + -+ @tornado.gen.coroutine -+ def check_find_job(self, minions): -+ did_not_return = minions.difference(self.find_job_returned) -+ if did_not_return: -+ for minion in did_not_return: -+ if minion in self.find_job_returned: -+ self.find_job_returned.remove(minion) -+ if minion in self.active: -+ self.active.remove(minion) -+ self.timedout_minions.add(minion) -+ running = ( -+ minions.difference(did_not_return) -+ .difference(self.done_minions) -+ .difference(self.timedout_minions) -+ ) -+ if running: -+ self.event.io_loop.add_callback(self.find_job, running) ++ def check_find_job(self, batch_minions, jid): ++ if self.event: ++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) ++ self.event.unsubscribe(find_job_return_pattern, match_type="glob") ++ self.patterns.remove((find_job_return_pattern, "find_job_return")) + -+ @tornado.gen.coroutine ++ timedout_minions = batch_minions.difference( ++ self.find_job_returned ++ ).difference(self.done_minions) ++ self.timedout_minions = self.timedout_minions.union(timedout_minions) ++ self.active = self.active.difference(self.timedout_minions) ++ running = batch_minions.difference(self.done_minions).difference( ++ self.timedout_minions ++ ) ++ ++ if timedout_minions: ++ self.schedule_next() ++ ++ if self.event and running: ++ self.find_job_returned = self.find_job_returned.difference(running) ++ self.event.io_loop.spawn_callback(self.find_job, running) ++ ++ @salt.ext.tornado.gen.coroutine + def find_job(self, minions): -+ not_done = minions.difference(self.done_minions) -+ ping_return = yield self.local.run_job_async( -+ not_done, -+ "saltutil.find_job", -+ [self.batch_jid], -+ "list", -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ jid=self.find_job_jid, -+ **self.eauth -+ ) -+ self.event.io_loop.call_later( -+ self.opts["gather_job_timeout"], self.check_find_job, not_done -+ ) ++ if self.event: ++ not_done = minions.difference(self.done_minions).difference( ++ self.timedout_minions ++ ) ++ try: ++ if not_done: ++ jid = self.jid_gen() ++ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) ++ self.patterns.add((find_job_return_pattern, "find_job_return")) ++ self.event.subscribe(find_job_return_pattern, match_type="glob") ++ ret = yield self.local.run_job_async( ++ not_done, ++ "saltutil.find_job", ++ [self.batch_jid], ++ "list", ++ gather_job_timeout=self.opts["gather_job_timeout"], ++ jid=jid, ++ **self.eauth ++ ) ++ yield salt.ext.tornado.gen.sleep(self.opts["gather_job_timeout"]) ++ if self.event: ++ self.event.io_loop.spawn_callback( ++ self.check_find_job, not_done, jid ++ ) ++ except Exception as ex: ++ log.error( ++ "Exception occured handling batch async: {}. Aborting execution.".format( ++ ex ++ ) ++ ) ++ self.close_safe() + -+ @tornado.gen.coroutine ++ @salt.ext.tornado.gen.coroutine + def start(self): -+ self.__set_event_handler() -+ # start batching even if not all minions respond to ping -+ self.event.io_loop.call_later( -+ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"], -+ self.start_batch, -+ ) -+ ping_return = yield self.local.run_job_async( -+ self.opts["tgt"], -+ "test.ping", -+ [], -+ self.opts.get("selected_target_option", self.opts.get("tgt_type", "glob")), -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ jid=self.ping_jid, -+ metadata=self.metadata, -+ **self.eauth -+ ) -+ self.down_minions = set(ping_return["minions"]) ++ if self.event: ++ self.__set_event_handler() ++ ping_return = yield self.local.run_job_async( ++ self.opts["tgt"], ++ "test.ping", ++ [], ++ self.opts.get( ++ "selected_target_option", self.opts.get("tgt_type", "glob") ++ ), ++ gather_job_timeout=self.opts["gather_job_timeout"], ++ jid=self.ping_jid, ++ metadata=self.metadata, ++ **self.eauth ++ ) ++ self.targeted_minions = set(ping_return["minions"]) ++ # start batching even if not all minions respond to ping ++ yield salt.ext.tornado.gen.sleep( ++ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] ++ ) ++ if self.event: ++ self.event.io_loop.spawn_callback(self.start_batch) + -+ @tornado.gen.coroutine ++ @salt.ext.tornado.gen.coroutine + def start_batch(self): + if not self.initialized: + self.batch_size = get_bnum(self.opts, self.minions, True) + self.initialized = True + data = { + "available_minions": self.minions, -+ "down_minions": self.down_minions, ++ "down_minions": self.targeted_minions.difference(self.minions), + "metadata": self.metadata, + } -+ self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid)) -+ yield self.schedule_next() ++ ret = self.event.fire_event( ++ data, "salt/batch/{}/start".format(self.batch_jid) ++ ) ++ if self.event: ++ self.event.io_loop.spawn_callback(self.run_next) + ++ @salt.ext.tornado.gen.coroutine + def end_batch(self): -+ data = { -+ "available_minions": self.minions, -+ "down_minions": self.down_minions, -+ "done_minions": self.done_minions, -+ "timedout_minions": self.timedout_minions, -+ "metadata": self.metadata, -+ } -+ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) -+ self.event.remove_event_handler(self.__event_handler) ++ left = self.minions.symmetric_difference( ++ self.done_minions.union(self.timedout_minions) ++ ) ++ if not left and not self.ended: ++ self.ended = True ++ data = { ++ "available_minions": self.minions, ++ "down_minions": self.targeted_minions.difference(self.minions), ++ "done_minions": self.done_minions, ++ "timedout_minions": self.timedout_minions, ++ "metadata": self.metadata, ++ } ++ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) + -+ @tornado.gen.coroutine ++ # release to the IOLoop to allow the event to be published ++ # before closing batch async execution ++ yield salt.ext.tornado.gen.sleep(1) ++ self.close_safe() ++ ++ def close_safe(self): ++ for (pattern, label) in self.patterns: ++ self.event.unsubscribe(pattern, match_type="glob") ++ self.event.remove_event_handler(self.__event_handler) ++ self.event = None ++ self.local = None ++ self.ioloop = None ++ del self ++ gc.collect() ++ ++ @salt.ext.tornado.gen.coroutine + def schedule_next(self): ++ if not self.scheduled: ++ self.scheduled = True ++ # call later so that we maybe gather more returns ++ yield salt.ext.tornado.gen.sleep(self.batch_delay) ++ if self.event: ++ self.event.io_loop.spawn_callback(self.run_next) ++ ++ @salt.ext.tornado.gen.coroutine ++ def run_next(self): ++ self.scheduled = False + next_batch = self._get_next() + if next_batch: -+ yield self.local.run_job_async( -+ next_batch, -+ self.opts["fun"], -+ self.opts["arg"], -+ "list", -+ raw=self.opts.get("raw", False), -+ ret=self.opts.get("return", ""), -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ jid=self.batch_jid, -+ metadata=self.metadata, -+ ) -+ self.event.io_loop.call_later( -+ self.opts["timeout"], self.find_job, set(next_batch) -+ ) + self.active = self.active.union(next_batch) ++ try: ++ ret = yield self.local.run_job_async( ++ next_batch, ++ self.opts["fun"], ++ self.opts["arg"], ++ "list", ++ raw=self.opts.get("raw", False), ++ ret=self.opts.get("return", ""), ++ gather_job_timeout=self.opts["gather_job_timeout"], ++ jid=self.batch_jid, ++ metadata=self.metadata, ++ ) ++ ++ yield salt.ext.tornado.gen.sleep(self.opts["timeout"]) ++ ++ # The batch can be done already at this point, which means no self.event ++ if self.event: ++ self.event.io_loop.spawn_callback(self.find_job, set(next_batch)) ++ except Exception as ex: ++ log.error("Error in scheduling next batch: %s. Aborting execution", ex) ++ self.active = self.active.difference(next_batch) ++ self.close_safe() ++ else: ++ yield self.end_batch() ++ gc.collect() ++ ++ def __del__(self): ++ self.local = None ++ self.event = None ++ self.ioloop = None ++ gc.collect() diff --git a/salt/client/__init__.py b/salt/client/__init__.py -index 6fab45fcbf..1e9f11df4c 100644 +index 8ea8818d01..482d3ac7bd 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py -@@ -543,6 +543,20 @@ class LocalClient: +@@ -584,47 +584,23 @@ class LocalClient: + {'dave': {...}} + {'stewart': {...}} + """ +- # We need to re-import salt.utils.args here +- # even though it has already been imported. +- # when cmd_batch is called via the NetAPI +- # the module is unavailable. +- import salt.utils.args +- # Late import - not used anywhere else in this file import salt.cli.batch +- arg = salt.utils.args.condition_input(arg, kwarg) +- opts = { +- "tgt": tgt, +- "fun": fun, +- "arg": arg, +- "tgt_type": tgt_type, +- "ret": ret, +- "batch": batch, +- "failhard": kwargs.get("failhard", self.opts.get("failhard", False)), +- "raw": kwargs.get("raw", False), +- } + opts = salt.cli.batch.batch_get_opts( + tgt, + fun, @@ -517,57 +609,63 @@ index 6fab45fcbf..1e9f11df4c 100644 + ) + + eauth = salt.cli.batch.batch_get_eauth(kwargs) -+ - arg = salt.utils.args.condition_input(arg, kwarg) - opts = { - "tgt": tgt, + +- if "timeout" in kwargs: +- opts["timeout"] = kwargs["timeout"] +- if "gather_job_timeout" in kwargs: +- opts["gather_job_timeout"] = kwargs["gather_job_timeout"] +- if "batch_wait" in kwargs: +- opts["batch_wait"] = int(kwargs["batch_wait"]) +- +- eauth = {} +- if "eauth" in kwargs: +- eauth["eauth"] = kwargs.pop("eauth") +- if "username" in kwargs: +- eauth["username"] = kwargs.pop("username") +- if "password" in kwargs: +- eauth["password"] = kwargs.pop("password") +- if "token" in kwargs: +- eauth["token"] = kwargs.pop("token") +- +- for key, val in self.opts.items(): +- if key not in opts: +- opts[key] = val + batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) + for ret in batch.run(): + yield ret +@@ -1812,6 +1788,7 @@ class LocalClient: + "key": self.key, + "tgt_type": tgt_type, + "ret": ret, ++ "timeout": timeout, + "jid": jid, + } + diff --git a/salt/master.py b/salt/master.py -index 1c91c28209..b9bc1a7a67 100644 +index 37fe52159f..795aeef647 100644 --- a/salt/master.py +++ b/salt/master.py -@@ -3,7 +3,6 @@ This module contains all of the routines needed to set up a master server, this - involves preparing the three listeners and the workers needed by the master. - """ - --# Import python libs - - import collections - import copy -@@ -21,10 +20,9 @@ import time +@@ -19,6 +19,7 @@ import time import salt.acl import salt.auth +import salt.cli.batch_async import salt.client import salt.client.ssh.client -- --# Import salt libs import salt.crypt - import salt.daemons.masterapi - import salt.defaults.exitcodes -@@ -89,7 +87,6 @@ except ImportError: - # resource is not available on windows - HAS_RESOURCE = False - --# Import halite libs - try: - import halite # pylint: disable=import-error - -@@ -2232,6 +2229,24 @@ class ClearFuncs(TransportMethods): +@@ -2167,6 +2168,22 @@ class ClearFuncs(TransportMethods): return False return self.loadauth.get_tok(clear_load["token"]) + def publish_batch(self, clear_load, minions, missing): + batch_load = {} + batch_load.update(clear_load) -+ import salt.cli.batch_async -+ + batch = salt.cli.batch_async.BatchAsync( + self.local.opts, + functools.partial(self._prep_jid, clear_load, {}), + batch_load, + ) -+ ioloop = tornado.ioloop.IOLoop.current() ++ ioloop = salt.ext.tornado.ioloop.IOLoop.current() + ioloop.add_callback(batch.start) + + return { @@ -578,7 +676,7 @@ index 1c91c28209..b9bc1a7a67 100644 def publish(self, clear_load): """ This method sends out publications to the minions, it can only be used -@@ -2349,6 +2364,9 @@ class ClearFuncs(TransportMethods): +@@ -2284,6 +2301,9 @@ class ClearFuncs(TransportMethods): ), }, } @@ -588,139 +686,11 @@ index 1c91c28209..b9bc1a7a67 100644 jid = self._prep_jid(clear_load, extra) if jid is None: return {"enc": "clear", "load": {"error": "Master failed to assign jid"}} -diff --git a/salt/netapi/__init__.py b/salt/netapi/__init__.py -index 96f57f6c79..dec19b37ef 100644 ---- a/salt/netapi/__init__.py -+++ b/salt/netapi/__init__.py -@@ -151,7 +151,8 @@ class NetapiClient: - :return: job ID - """ - local = salt.client.get_local_client(mopts=self.opts) -- return local.run_job(*args, **kwargs) -+ ret = local.run_job(*args, **kwargs) -+ return ret - - def local(self, *args, **kwargs): - """ diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py -index 041718d058..f411907da2 100644 +index 29210d7522..3f430ba796 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py -@@ -1,10 +1,7 @@ --# -*- coding: utf-8 -*- - """ - IPC transport classes - """ - --# Import Python libs --from __future__ import absolute_import, print_function, unicode_literals - - import errno - import logging -@@ -12,15 +9,12 @@ import socket - import sys - import time - --# Import Tornado libs - import salt.ext.tornado - import salt.ext.tornado.concurrent - import salt.ext.tornado.gen - import salt.ext.tornado.netutil - import salt.transport.client - import salt.transport.frame -- --# Import Salt libs - import salt.utils.msgpack - from salt.ext import six - from salt.ext.tornado.ioloop import IOLoop -@@ -42,7 +36,7 @@ def future_with_timeout_callback(future): - - class FutureWithTimeout(salt.ext.tornado.concurrent.Future): - def __init__(self, io_loop, future, timeout): -- super(FutureWithTimeout, self).__init__() -+ super().__init__() - self.io_loop = io_loop - self._future = future - if timeout is not None: -@@ -85,7 +79,7 @@ class FutureWithTimeout(salt.ext.tornado.concurrent.Future): - self.set_exception(exc) - - --class IPCServer(object): -+class IPCServer: - """ - A Tornado IPC server very similar to Tornado's TCPServer class - but using either UNIX domain sockets or TCP sockets -@@ -181,10 +175,7 @@ class IPCServer(object): - # Under Py2 we still want raw to be set to True - msgpack_kwargs = {"raw": six.PY2} - else: -- if six.PY2: -- msgpack_kwargs = {"encoding": None} -- else: -- msgpack_kwargs = {"encoding": "utf-8"} -+ msgpack_kwargs = {"encoding": "utf-8"} - unpacker = salt.utils.msgpack.Unpacker(**msgpack_kwargs) - while not stream.closed(): - try: -@@ -200,7 +191,7 @@ class IPCServer(object): - except StreamClosedError: - log.trace("Client disconnected from IPC %s", self.socket_path) - break -- except socket.error as exc: -+ except OSError as exc: - # On occasion an exception will occur with - # an error code of 0, it's a spurious exception. - if exc.errno == 0: -@@ -247,7 +238,7 @@ class IPCServer(object): - # pylint: enable=W1701 - - --class IPCClient(object): -+class IPCClient: - """ - A Tornado IPC client very similar to Tornado's TCPClient class - but using either UNIX domain sockets or TCP sockets -@@ -282,10 +273,7 @@ class IPCClient(object): - # Under Py2 we still want raw to be set to True - msgpack_kwargs = {"raw": six.PY2} - else: -- if six.PY2: -- msgpack_kwargs = {"encoding": None} -- else: -- msgpack_kwargs = {"encoding": "utf-8"} -+ msgpack_kwargs = {"encoding": "utf-8"} - self.unpacker = salt.utils.msgpack.Unpacker(**msgpack_kwargs) - - def connected(self): -@@ -385,10 +373,10 @@ class IPCClient(object): - if self.stream is not None and not self.stream.closed(): - try: - self.stream.close() -- except socket.error as exc: -+ except OSError as exc: - if exc.errno != errno.EBADF: - # If its not a bad file descriptor error, raise -- six.reraise(*sys.exc_info()) -+ raise - - - class IPCMessageClient(IPCClient): -@@ -483,7 +471,7 @@ class IPCMessageServer(IPCServer): - """ - - --class IPCMessagePublisher(object): -+class IPCMessagePublisher: - """ - A Tornado IPC Publisher similar to Tornado's TCPServer class - but using either UNIX domain sockets or TCP sockets -@@ -645,10 +633,11 @@ class IPCMessageSubscriber(IPCClient): - """ - - def __init__(self, socket_path, io_loop=None): -- super(IPCMessageSubscriber, self).__init__(socket_path, io_loop=io_loop) -+ super().__init__(socket_path, io_loop=io_loop) +@@ -650,6 +650,7 @@ class IPCMessageSubscriber(IPCClient): self._read_stream_future = None self._saved_data = [] self._read_in_progress = Lock() @@ -728,7 +698,7 @@ index 041718d058..f411907da2 100644 @salt.ext.tornado.gen.coroutine def _read(self, timeout, callback=None): -@@ -725,8 +714,12 @@ class IPCMessageSubscriber(IPCClient): +@@ -749,8 +750,12 @@ class IPCMessageSubscriber(IPCClient): return self._saved_data.pop(0) return self.io_loop.run_sync(lambda: self._read(timeout)) @@ -742,7 +712,7 @@ index 041718d058..f411907da2 100644 """ Asynchronously read messages and invoke a callback when they are ready. -@@ -744,7 +737,7 @@ class IPCMessageSubscriber(IPCClient): +@@ -768,7 +773,7 @@ class IPCMessageSubscriber(IPCClient): except Exception as exc: # pylint: disable=broad-except log.error("Exception occurred while Subscriber connecting: %s", exc) yield salt.ext.tornado.gen.sleep(1) @@ -751,20 +721,11 @@ index 041718d058..f411907da2 100644 def close(self): """ -@@ -754,7 +747,7 @@ class IPCMessageSubscriber(IPCClient): - """ - if self._closing: - return -- super(IPCMessageSubscriber, self).close() -+ super().close() - # This will prevent this message from showing up: - # '[ERROR ] Future exception was never retrieved: - # StreamClosedError' diff --git a/salt/utils/event.py b/salt/utils/event.py -index 6f7edef4e5..ae200f9dfa 100644 +index 3c91daa2b4..fd23197747 100644 --- a/salt/utils/event.py +++ b/salt/utils/event.py -@@ -867,6 +867,10 @@ class SaltEvent: +@@ -920,6 +920,10 @@ class SaltEvent: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) @@ -775,7 +736,7 @@ index 6f7edef4e5..ae200f9dfa 100644 def set_event_handler(self, event_handler): """ Invoke the event_handler callback each time an event arrives. -@@ -875,8 +879,10 @@ class SaltEvent: +@@ -928,8 +932,10 @@ class SaltEvent: if not self.cpub: self.connect_pub() @@ -789,18 +750,17 @@ index 6f7edef4e5..ae200f9dfa 100644 def __del__(self): diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py new file mode 100644 -index 0000000000..3f8626a2dd +index 0000000000..c0b708de76 --- /dev/null +++ b/tests/unit/cli/test_batch_async.py -@@ -0,0 +1,357 @@ -+import tornado +@@ -0,0 +1,386 @@ ++import salt.ext.tornado +from salt.cli.batch_async import BatchAsync -+from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch ++from salt.ext.tornado.testing import AsyncTestCase ++from tests.support.mock import MagicMock, patch +from tests.support.unit import TestCase, skipIf -+from tornado.testing import AsyncTestCase + + -+@skipIf(NO_MOCK, NO_MOCK_REASON) +class AsyncBatchTestCase(AsyncTestCase, TestCase): + def setUp(self): + self.io_loop = self.get_new_ioloop() @@ -847,37 +807,37 @@ index 0000000000..3f8626a2dd + self.batch.start_batch() + self.assertEqual(self.batch.batch_size, 2) + -+ @tornado.testing.gen_test ++ @salt.ext.tornado.testing.gen_test + def test_batch_start_on_batch_presence_ping_timeout(self): + self.batch.event = MagicMock() -+ future = tornado.gen.Future() ++ future = salt.ext.tornado.gen.Future() + future.set_result({"minions": ["foo", "bar"]}) + self.batch.local.run_job_async.return_value = future + ret = self.batch.start() + # assert start_batch is called later with batch_presence_ping_timeout as param + self.assertEqual( -+ self.batch.event.io_loop.call_later.call_args[0], -+ (self.batch.batch_presence_ping_timeout, self.batch.start_batch), ++ self.batch.event.io_loop.spawn_callback.call_args[0], ++ (self.batch.start_batch,), + ) + # assert test.ping called + self.assertEqual( + self.batch.local.run_job_async.call_args[0], ("*", "test.ping", [], "glob") + ) -+ # assert down_minions == all minions matched by tgt -+ self.assertEqual(self.batch.down_minions, {"foo", "bar"}) ++ # assert targeted_minions == all minions matched by tgt ++ self.assertEqual(self.batch.targeted_minions, {"foo", "bar"}) + -+ @tornado.testing.gen_test ++ @salt.ext.tornado.testing.gen_test + def test_batch_start_on_gather_job_timeout(self): + self.batch.event = MagicMock() -+ future = tornado.gen.Future() ++ future = salt.ext.tornado.gen.Future() + future.set_result({"minions": ["foo", "bar"]}) + self.batch.local.run_job_async.return_value = future + self.batch.batch_presence_ping_timeout = None + ret = self.batch.start() + # assert start_batch is called later with gather_job_timeout as param + self.assertEqual( -+ self.batch.event.io_loop.call_later.call_args[0], -+ (self.batch.opts["gather_job_timeout"], self.batch.start_batch), ++ self.batch.event.io_loop.spawn_callback.call_args[0], ++ (self.batch.start_batch,), + ) + + def test_batch_fire_start_event(self): @@ -898,55 +858,81 @@ index 0000000000..3f8626a2dd + ), + ) + -+ @tornado.testing.gen_test ++ @salt.ext.tornado.testing.gen_test + def test_start_batch_calls_next(self): -+ self.batch.schedule_next = MagicMock(return_value=MagicMock()) ++ self.batch.run_next = MagicMock(return_value=MagicMock()) + self.batch.event = MagicMock() -+ future = tornado.gen.Future() -+ future.set_result(None) -+ self.batch.schedule_next = MagicMock(return_value=future) + self.batch.start_batch() + self.assertEqual(self.batch.initialized, True) -+ self.assertEqual(len(self.batch.schedule_next.mock_calls), 1) ++ self.assertEqual( ++ self.batch.event.io_loop.spawn_callback.call_args[0], (self.batch.run_next,) ++ ) + + def test_batch_fire_done_event(self): ++ self.batch.targeted_minions = {"foo", "baz", "bar"} + self.batch.minions = {"foo", "bar"} ++ self.batch.done_minions = {"foo"} ++ self.batch.timedout_minions = {"bar"} + self.batch.event = MagicMock() + self.batch.metadata = {"mykey": "myvalue"} ++ old_event = self.batch.event + self.batch.end_batch() + self.assertEqual( -+ self.batch.event.fire_event.call_args[0], ++ old_event.fire_event.call_args[0], + ( + { + "available_minions": {"foo", "bar"}, -+ "done_minions": set(), -+ "down_minions": set(), -+ "timedout_minions": set(), ++ "done_minions": self.batch.done_minions, ++ "down_minions": {"baz"}, ++ "timedout_minions": self.batch.timedout_minions, + "metadata": self.batch.metadata, + }, + "salt/batch/1235/done", + ), + ) -+ self.assertEqual(len(self.batch.event.remove_event_handler.mock_calls), 1) + -+ @tornado.testing.gen_test ++ def test_batch__del__(self): ++ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock()) ++ event = MagicMock() ++ batch.event = event ++ batch.__del__() ++ self.assertEqual(batch.local, None) ++ self.assertEqual(batch.event, None) ++ self.assertEqual(batch.ioloop, None) ++ ++ def test_batch_close_safe(self): ++ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock()) ++ event = MagicMock() ++ batch.event = event ++ batch.patterns = { ++ ("salt/job/1234/ret/*", "find_job_return"), ++ ("salt/job/4321/ret/*", "find_job_return"), ++ } ++ batch.close_safe() ++ self.assertEqual(batch.local, None) ++ self.assertEqual(batch.event, None) ++ self.assertEqual(batch.ioloop, None) ++ self.assertEqual(len(event.unsubscribe.mock_calls), 2) ++ self.assertEqual(len(event.remove_event_handler.mock_calls), 1) ++ ++ @salt.ext.tornado.testing.gen_test + def test_batch_next(self): + self.batch.event = MagicMock() + self.batch.opts["fun"] = "my.fun" + self.batch.opts["arg"] = [] + self.batch._get_next = MagicMock(return_value={"foo", "bar"}) + self.batch.batch_size = 2 -+ future = tornado.gen.Future() ++ future = salt.ext.tornado.gen.Future() + future.set_result({"minions": ["foo", "bar"]}) + self.batch.local.run_job_async.return_value = future -+ ret = self.batch.schedule_next().result() ++ self.batch.run_next() + self.assertEqual( + self.batch.local.run_job_async.call_args[0], + ({"foo", "bar"}, "my.fun", [], "list"), + ) + self.assertEqual( -+ self.batch.event.io_loop.call_later.call_args[0], -+ (self.batch.opts["timeout"], self.batch.find_job, {"foo", "bar"}), ++ self.batch.event.io_loop.spawn_callback.call_args[0], ++ (self.batch.find_job, {"foo", "bar"}), + ) + self.assertEqual(self.batch.active, {"bar", "foo"}) + @@ -1000,7 +986,7 @@ index 0000000000..3f8626a2dd + self.assertEqual(self.batch._get_next(), set()) + + def test_batch__event_handler_ping_return(self): -+ self.batch.down_minions = {"foo"} ++ self.batch.targeted_minions = {"foo"} + self.batch.event = MagicMock( + unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) + ) @@ -1011,7 +997,7 @@ index 0000000000..3f8626a2dd + self.assertEqual(self.batch.done_minions, set()) + + def test_batch__event_handler_call_start_batch_when_all_pings_return(self): -+ self.batch.down_minions = {"foo"} ++ self.batch.targeted_minions = {"foo"} + self.batch.event = MagicMock( + unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) + ) @@ -1023,7 +1009,7 @@ index 0000000000..3f8626a2dd + ) + + def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self): -+ self.batch.down_minions = {"foo", "bar"} ++ self.batch.targeted_minions = {"foo", "bar"} + self.batch.event = MagicMock( + unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) + ) @@ -1041,72 +1027,68 @@ index 0000000000..3f8626a2dd + self.assertEqual(self.batch.active, set()) + self.assertEqual(self.batch.done_minions, {"foo"}) + self.assertEqual( -+ self.batch.event.io_loop.call_later.call_args[0], -+ (self.batch.batch_delay, self.batch.schedule_next), ++ self.batch.event.io_loop.spawn_callback.call_args[0], ++ (self.batch.schedule_next,), + ) + + def test_batch__event_handler_find_job_return(self): + self.batch.event = MagicMock( -+ unpack=MagicMock(return_value=("salt/job/1236/ret/foo", {"id": "foo"})) ++ unpack=MagicMock( ++ return_value=( ++ "salt/job/1236/ret/foo", ++ {"id": "foo", "return": "deadbeaf"}, ++ ) ++ ) + ) + self.batch.start() ++ self.batch.patterns.add(("salt/job/1236/ret/*", "find_job_return")) + self.batch._BatchAsync__event_handler(MagicMock()) + self.assertEqual(self.batch.find_job_returned, {"foo"}) + -+ @tornado.testing.gen_test -+ def test_batch__event_handler_end_batch(self): -+ self.batch.event = MagicMock( -+ unpack=MagicMock( -+ return_value=("salt/job/not-my-jid/ret/foo", {"id": "foo"}) -+ ) -+ ) -+ future = tornado.gen.Future() -+ future.set_result({"minions": ["foo", "bar", "baz"]}) -+ self.batch.local.run_job_async.return_value = future -+ self.batch.start() -+ self.batch.initialized = True -+ self.assertEqual(self.batch.down_minions, {"foo", "bar", "baz"}) ++ @salt.ext.tornado.testing.gen_test ++ def test_batch_run_next_end_batch_when_no_next(self): + self.batch.end_batch = MagicMock() -+ self.batch.minions = {"foo", "bar", "baz"} -+ self.batch.done_minions = {"foo", "bar"} -+ self.batch.timedout_minions = {"baz"} -+ self.batch._BatchAsync__event_handler(MagicMock()) ++ self.batch._get_next = MagicMock(return_value={}) ++ self.batch.run_next() + self.assertEqual(len(self.batch.end_batch.mock_calls), 1) + -+ @tornado.testing.gen_test ++ @salt.ext.tornado.testing.gen_test + def test_batch_find_job(self): + self.batch.event = MagicMock() -+ future = tornado.gen.Future() ++ future = salt.ext.tornado.gen.Future() + future.set_result({}) + self.batch.local.run_job_async.return_value = future ++ self.batch.minions = {"foo", "bar"} ++ self.batch.jid_gen = MagicMock(return_value="1234") ++ salt.ext.tornado.gen.sleep = MagicMock(return_value=future) + self.batch.find_job({"foo", "bar"}) + self.assertEqual( -+ self.batch.event.io_loop.call_later.call_args[0], -+ ( -+ self.batch.opts["gather_job_timeout"], -+ self.batch.check_find_job, -+ {"foo", "bar"}, -+ ), ++ self.batch.event.io_loop.spawn_callback.call_args[0], ++ (self.batch.check_find_job, {"foo", "bar"}, "1234"), + ) + -+ @tornado.testing.gen_test ++ @salt.ext.tornado.testing.gen_test + def test_batch_find_job_with_done_minions(self): + self.batch.done_minions = {"bar"} + self.batch.event = MagicMock() -+ future = tornado.gen.Future() ++ future = salt.ext.tornado.gen.Future() + future.set_result({}) + self.batch.local.run_job_async.return_value = future ++ self.batch.minions = {"foo", "bar"} ++ self.batch.jid_gen = MagicMock(return_value="1234") ++ salt.ext.tornado.gen.sleep = MagicMock(return_value=future) + self.batch.find_job({"foo", "bar"}) + self.assertEqual( -+ self.batch.event.io_loop.call_later.call_args[0], -+ (self.batch.opts["gather_job_timeout"], self.batch.check_find_job, {"foo"}), ++ self.batch.event.io_loop.spawn_callback.call_args[0], ++ (self.batch.check_find_job, {"foo"}, "1234"), + ) + + def test_batch_check_find_job_did_not_return(self): + self.batch.event = MagicMock() + self.batch.active = {"foo"} + self.batch.find_job_returned = set() -+ self.batch.check_find_job({"foo"}) ++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} ++ self.batch.check_find_job({"foo"}, jid="1234") + self.assertEqual(self.batch.find_job_returned, set()) + self.assertEqual(self.batch.active, set()) + self.assertEqual(len(self.batch.event.io_loop.add_callback.mock_calls), 0) @@ -1114,9 +1096,10 @@ index 0000000000..3f8626a2dd + def test_batch_check_find_job_did_return(self): + self.batch.event = MagicMock() + self.batch.find_job_returned = {"foo"} -+ self.batch.check_find_job({"foo"}) ++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} ++ self.batch.check_find_job({"foo"}, jid="1234") + self.assertEqual( -+ self.batch.event.io_loop.add_callback.call_args[0], ++ self.batch.event.io_loop.spawn_callback.call_args[0], + (self.batch.find_job, {"foo"}), + ) + @@ -1137,7 +1120,8 @@ index 0000000000..3f8626a2dd + # both not yet done but only 'foo' responded to find_job + not_done = {"foo", "bar"} + -+ self.batch.check_find_job(not_done) ++ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} ++ self.batch.check_find_job(not_done, jid="1234") + + # assert 'bar' removed from active + self.assertEqual(self.batch.active, {"foo"}) @@ -1147,10 +1131,16 @@ index 0000000000..3f8626a2dd + + # assert 'find_job' schedueled again only for 'foo' + self.assertEqual( -+ self.batch.event.io_loop.add_callback.call_args[0], ++ self.batch.event.io_loop.spawn_callback.call_args[0], + (self.batch.find_job, {"foo"}), + ) ++ ++ def test_only_on_run_next_is_scheduled(self): ++ self.batch.event = MagicMock() ++ self.batch.scheduled = True ++ self.batch.schedule_next() ++ self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0) -- -2.29.2 +2.33.0 diff --git a/avoid-traceback-when-http.query-request-cannot-be-pe.patch b/avoid-traceback-when-http.query-request-cannot-be-pe.patch deleted file mode 100644 index 1e882c5..0000000 --- a/avoid-traceback-when-http.query-request-cannot-be-pe.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 2fecfe18cf17389714ab5bed0ff59bec2d1e1c36 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Mon, 29 Jul 2019 11:17:53 +0100 -Subject: [PATCH] Avoid traceback when http.query request cannot be - performed (bsc#1128554) - -Improve error logging when http.query cannot be performed ---- - salt/utils/http.py | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/salt/utils/http.py b/salt/utils/http.py -index 5ab4503f61..9522bd6ee4 100644 ---- a/salt/utils/http.py -+++ b/salt/utils/http.py -@@ -628,12 +628,17 @@ def query( - except salt.ext.tornado.httpclient.HTTPError as exc: - ret["status"] = exc.code - ret["error"] = str(exc) -+ log.error( -+ "Cannot perform 'http.query': {} - {}".format(url_full, ret["error"]) -+ ) - return ret - except (socket.herror, OSError, socket.timeout, socket.gaierror) as exc: - if status is True: - ret["status"] = 0 - ret["error"] = str(exc) -- log.debug("Cannot perform 'http.query': %s - %s", url_full, ret["error"]) -+ log.error( -+ "Cannot perform 'http.query': {} - {}".format(url_full, ret["error"]) -+ ) - return ret - - if stream is True or handle is True: --- -2.29.2 - - diff --git a/backport-a-few-virt-prs-272.patch b/backport-a-few-virt-prs-272.patch deleted file mode 100644 index 0da018d..0000000 --- a/backport-a-few-virt-prs-272.patch +++ /dev/null @@ -1,1188 +0,0 @@ -From acee2074e9fe4da2731e61a554639e773c04e43a Mon Sep 17 00:00:00 2001 -From: Cedric Bosdonnat -Date: Mon, 5 Oct 2020 16:49:59 +0200 -Subject: [PATCH] Backport a few virt PRs (#272) - -* Fix virt update when cpu and memory are changed - -If CPU is changed, the memory change would be short circuited. This is a -regression introduced by PR #58332 - -* virt: add VM memory tunning support - -* avoid comparing string with integer - -* fix pre-commit failure - -* Properly fix memory setting regression in virt.update - -The 'mem' property in the virt.update value should indicate the result -of a live memory setting. The value should be an int in KiB. Fixing the -code and tests for this. - -* virt: add stop_on_reboot parameter in guest states and definition - -It can be needed to force a VM to stop instead of rebooting. A typical -example of this is when creating a VM using a install CDROM ISO or when -using an autoinstallation profile. Forcing a shutdown allows libvirt to -pick up another XML definition for the new start to remove the -firstboot-only options. - -* virt: expose live parameter in virt.defined state - -Allow updating the definition of a VM without touching the live -instance. This can be helpful since live update may change the device -names in the guest. - -Co-authored-by: firefly -Co-authored-by: gqlo ---- - changelog/57639.added | 1 + - changelog/58589.added | 1 + - salt/modules/virt.py | 284 ++++++++++++++++++-- - salt/states/virt.py | 71 ++++- - salt/templates/virt/libvirt_domain.jinja | 30 ++- - salt/utils/xmlutil.py | 2 +- - tests/unit/modules/test_virt.py | 318 ++++++++++++++++++++++- - tests/unit/states/test_virt.py | 14 +- - 8 files changed, 687 insertions(+), 34 deletions(-) - create mode 100644 changelog/57639.added - create mode 100644 changelog/58589.added - -diff --git a/changelog/57639.added b/changelog/57639.added -new file mode 100644 -index 0000000000..c0281e9319 ---- /dev/null -+++ b/changelog/57639.added -@@ -0,0 +1 @@ -+Memory Tuning Support which allows much greater control of memory allocation -diff --git a/changelog/58589.added b/changelog/58589.added -new file mode 100644 -index 0000000000..5960555ec6 ---- /dev/null -+++ b/changelog/58589.added -@@ -0,0 +1 @@ -+Allow handling special first boot definition on virtual machine -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index e306bc0679..8e2180608a 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -71,6 +71,50 @@ The calls not using the libvirt connection setup are: - - `libvirt URI format `_ - - `libvirt authentication configuration `_ - -+Units -+========== -+.. _virt-units: -+.. rubric:: Units specification -+.. versionadded:: Magnesium -+ -+The string should contain a number optionally followed -+by a unit. The number may have a decimal fraction. If -+the unit is not given then MiB are set by default. -+Units can optionally be given in IEC style (such as MiB), -+although the standard single letter style (such as M) is -+more convenient. -+ -+Valid units include: -+ -+========== ===== ========== ========== ====== -+Standard IEC Standard IEC -+ Unit Unit Name Name Factor -+========== ===== ========== ========== ====== -+ B Bytes 1 -+ K KiB Kilobytes Kibibytes 2**10 -+ M MiB Megabytes Mebibytes 2**20 -+ G GiB Gigabytes Gibibytes 2**30 -+ T TiB Terabytes Tebibytes 2**40 -+ P PiB Petabytes Pebibytes 2**50 -+ E EiB Exabytes Exbibytes 2**60 -+ Z ZiB Zettabytes Zebibytes 2**70 -+ Y YiB Yottabytes Yobibytes 2**80 -+========== ===== ========== ========== ====== -+ -+Additional decimal based units: -+ -+====== ======= -+Unit Factor -+====== ======= -+KB 10**3 -+MB 10**6 -+GB 10**9 -+TB 10**12 -+PB 10**15 -+EB 10**18 -+ZB 10**21 -+YB 10**24 -+====== ======= - """ - # Special Thanks to Michael Dehann, many of the concepts, and a few structures - # of his in the virt func module have been used -@@ -719,6 +763,39 @@ def _disk_from_pool(conn, pool, pool_xml, volume_name): - return disk_context - - -+def _handle_unit(s, def_unit="m"): -+ """ -+ Handle the unit conversion, return the value in bytes -+ """ -+ m = re.match(r"(?P[0-9.]*)\s*(?P.*)$", str(s).strip()) -+ value = m.group("value") -+ # default unit -+ unit = m.group("unit").lower() or def_unit -+ try: -+ value = int(value) -+ except ValueError: -+ try: -+ value = float(value) -+ except ValueError: -+ raise SaltInvocationError("invalid number") -+ # flag for base ten -+ dec = False -+ if re.match(r"[kmgtpezy]b$", unit): -+ dec = True -+ elif not re.match(r"(b|[kmgtpezy](ib)?)$", unit): -+ raise SaltInvocationError("invalid units") -+ p = "bkmgtpezy".index(unit[0]) -+ value *= 10 ** (p * 3) if dec else 2 ** (p * 10) -+ return int(value) -+ -+ -+def nesthash(): -+ """ -+ create default dict that allows arbitrary level of nesting -+ """ -+ return collections.defaultdict(nesthash) -+ -+ - def _gen_xml( - conn, - name, -@@ -732,18 +809,32 @@ def _gen_xml( - graphics=None, - boot=None, - boot_dev=None, -+ stop_on_reboot=False, - **kwargs - ): - """ - Generate the XML string to define a libvirt VM - """ -- mem = int(mem) * 1024 # MB - context = { - "hypervisor": hypervisor, - "name": name, - "cpu": str(cpu), -- "mem": str(mem), -+ "on_reboot": "destroy" if stop_on_reboot else "restart", - } -+ -+ context["mem"] = nesthash() -+ if isinstance(mem, int): -+ mem = int(mem) * 1024 # MB -+ context["mem"]["boot"] = str(mem) -+ context["mem"]["current"] = str(mem) -+ elif isinstance(mem, dict): -+ for tag, val in mem.items(): -+ if val: -+ if tag == "slots": -+ context["mem"]["slots"] = "{}='{}'".format(tag, val) -+ else: -+ context["mem"][tag] = str(int(_handle_unit(val) / 1024)) -+ - if hypervisor in ["qemu", "kvm"]: - context["controller_model"] = False - elif hypervisor == "vmware": -@@ -863,7 +954,6 @@ def _gen_xml( - except jinja2.exceptions.TemplateNotFound: - log.error("Could not load template %s", fn_) - return "" -- - return template.render(**context) - - -@@ -1662,6 +1752,7 @@ def init( - arch=None, - boot=None, - boot_dev=None, -+ stop_on_reboot=False, - **kwargs - ): - """ -@@ -1669,7 +1760,28 @@ def init( - - :param name: name of the virtual machine to create - :param cpu: Number of virtual CPUs to assign to the virtual machine -- :param mem: Amount of memory to allocate to the virtual machine in MiB. -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to -+ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, -+ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The -+ structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -+ Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be -+ an integer. -+ -+ .. code-block:: python -+ -+ { -+ 'boot': 1g, -+ 'current': 1g, -+ 'max': 1g, -+ 'slots': 10, -+ 'hard_limit': '1024' -+ 'soft_limit': '512m' -+ 'swap_hard_limit': '1g' -+ 'min_guarantee': '512mib' -+ } -+ -+ .. versionchanged:: Magnesium -+ - :param nic: NIC profile to use (Default: ``'default'``). - The profile interfaces can be customized / extended with the interfaces parameter. - If set to ``None``, no profile will be used. -@@ -1726,6 +1838,15 @@ def init( - :param password: password to connect with, overriding defaults - - .. versionadded:: 2019.2.0 -+ -+ :param stop_on_reboot: -+ If set to ``True`` the guest will stop instead of rebooting. -+ This is specially useful when creating a virtual machine with an installation cdrom or -+ an autoinstallation needing a special first boot configuration. -+ Defaults to ``False`` -+ -+ .. versionadded:: Aluminium -+ - :param boot: - Specifies kernel, initial ramdisk and kernel command line parameters for the virtual machine. - This is an optional parameter, all of the keys are optional within the dictionary. The structure of -@@ -1782,6 +1903,36 @@ def init( - - .. versionadded:: sodium - -+ .. _init-mem-def: -+ -+ .. rubric:: Memory parameter definition -+ -+ Memory parameter can contain the following properties: -+ -+ boot -+ The maximum allocation of memory for the guest at boot time -+ -+ current -+ The actual allocation of memory for the guest -+ -+ max -+ The run time maximum memory allocation of the guest -+ -+ slots -+ specifies the number of slots available for adding memory to the guest -+ -+ hard_limit -+ the maximum memory the guest can use -+ -+ soft_limit -+ memory limit to enforce during memory contention -+ -+ swap_hard_limit -+ the maximum memory plus swap the guest can use -+ -+ min_guarantee -+ the guaranteed minimum memory allocation for the guest -+ - .. _init-nic-def: - - .. rubric:: Network Interfaces Definitions -@@ -2076,6 +2227,7 @@ def init( - graphics, - boot, - boot_dev, -+ stop_on_reboot, - **kwargs - ) - log.debug("New virtual machine definition: %s", vm_xml) -@@ -2305,6 +2457,7 @@ def update( - boot=None, - test=False, - boot_dev=None, -+ stop_on_reboot=False, - **kwargs - ): - """ -@@ -2312,7 +2465,7 @@ def update( - - :param name: Name of the domain to update - :param cpu: Number of virtual CPUs to assign to the virtual machine -- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to - contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, - ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The - structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -@@ -2328,7 +2481,7 @@ def update( - hard_limit: null - soft_limit: null - -- .. versionchanged:: 3002 -+ .. versionchanged:: Magnesium - - :param disk_profile: disk profile to use - :param disks: -@@ -2386,6 +2539,14 @@ def update( - - .. versionadded:: Magnesium - -+ :param stop_on_reboot: -+ If set to ``True`` the guest will stop instead of rebooting. -+ This is specially useful when creating a virtual machine with an installation cdrom or -+ an autoinstallation needing a special first boot configuration. -+ Defaults to ``False`` -+ -+ .. versionadded:: Aluminium -+ - :param test: run in dry-run mode if set to True - - .. versionadded:: sodium -@@ -2449,6 +2610,8 @@ def update( - desc.find(".//os/type").get("arch"), - graphics, - boot, -+ boot_dev, -+ stop_on_reboot, - **kwargs - ) - ) -@@ -2469,12 +2632,26 @@ def update( - def _set_nvram(node, value): - node.set("template", value) - -- def _set_with_mib_unit(node, value): -+ def _set_with_byte_unit(node, value): - node.text = str(value) -- node.set("unit", "MiB") -+ node.set("unit", "bytes") -+ -+ def _get_with_unit(node): -+ unit = node.get("unit", "KiB") -+ # _handle_unit treats bytes as invalid unit for the purpose of consistency -+ unit = unit if unit != "bytes" else "b" -+ value = node.get("memory") or node.text -+ return _handle_unit("{}{}".format(value, unit)) if value else None -+ -+ old_mem = int(_get_with_unit(desc.find("memory")) / 1024) - - # Update the kernel boot parameters - params_mapping = [ -+ { -+ "path": "stop_on_reboot", -+ "xpath": "on_reboot", -+ "convert": lambda v: "destroy" if v else "restart", -+ }, - {"path": "boot:kernel", "xpath": "os/kernel"}, - {"path": "boot:initrd", "xpath": "os/initrd"}, - {"path": "boot:cmdline", "xpath": "os/cmdline"}, -@@ -2484,14 +2661,72 @@ def update( - { - "path": "mem", - "xpath": "memory", -- "get": lambda n: int(n.text) / 1024, -- "set": _set_with_mib_unit, -+ "convert": _handle_unit, -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, - }, - { - "path": "mem", - "xpath": "currentMemory", -- "get": lambda n: int(n.text) / 1024, -- "set": _set_with_mib_unit, -+ "convert": _handle_unit, -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, -+ }, -+ { -+ "path": "mem:max", -+ "convert": _handle_unit, -+ "xpath": "maxMemory", -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, -+ }, -+ { -+ "path": "mem:boot", -+ "convert": _handle_unit, -+ "xpath": "memory", -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, -+ }, -+ { -+ "path": "mem:current", -+ "convert": _handle_unit, -+ "xpath": "currentMemory", -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, -+ }, -+ { -+ "path": "mem:slots", -+ "xpath": "maxMemory", -+ "get": lambda n: n.get("slots"), -+ "set": lambda n, v: n.set("slots", str(v)), -+ "del": salt.utils.xmlutil.del_attribute("slots", ["unit"]), -+ }, -+ { -+ "path": "mem:hard_limit", -+ "convert": _handle_unit, -+ "xpath": "memtune/hard_limit", -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, -+ }, -+ { -+ "path": "mem:soft_limit", -+ "convert": _handle_unit, -+ "xpath": "memtune/soft_limit", -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, -+ }, -+ { -+ "path": "mem:swap_hard_limit", -+ "convert": _handle_unit, -+ "xpath": "memtune/swap_hard_limit", -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, -+ }, -+ { -+ "path": "mem:min_guarantee", -+ "convert": _handle_unit, -+ "xpath": "memtune/min_guarantee", -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit, - }, - { - "path": "boot_dev:{dev}", -@@ -2577,13 +2812,24 @@ def update( - } - ) - if mem: -- commands.append( -- { -- "device": "mem", -- "cmd": "setMemoryFlags", -- "args": [mem * 1024, libvirt.VIR_DOMAIN_AFFECT_LIVE], -- } -- ) -+ if isinstance(mem, dict): -+ # setMemoryFlags takes memory amount in KiB -+ new_mem = ( -+ int(_handle_unit(mem.get("current")) / 1024) -+ if "current" in mem -+ else None -+ ) -+ elif isinstance(mem, int): -+ new_mem = int(mem * 1024) -+ -+ if old_mem != new_mem and new_mem is not None: -+ commands.append( -+ { -+ "device": "mem", -+ "cmd": "setMemoryFlags", -+ "args": [new_mem, libvirt.VIR_DOMAIN_AFFECT_LIVE], -+ } -+ ) - - # Look for removable device source changes - new_disks = [] -diff --git a/salt/states/virt.py b/salt/states/virt.py -index df7ebb63e6..20ea1c25f1 100644 ---- a/salt/states/virt.py -+++ b/salt/states/virt.py -@@ -289,6 +289,8 @@ def defined( - boot=None, - update=True, - boot_dev=None, -+ stop_on_reboot=False, -+ live=True, - ): - """ - Starts an existing guest, or defines and starts a new VM with specified arguments. -@@ -297,7 +299,28 @@ def defined( - - :param name: name of the virtual machine to run - :param cpu: number of CPUs for the virtual machine to create -- :param mem: amount of memory in MiB for the new virtual machine -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to -+ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, -+ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The -+ structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -+ Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be -+ an integer. -+ -+ .. code-block:: python -+ -+ { -+ 'boot': 1g, -+ 'current': 1g, -+ 'max': 1g, -+ 'slots': 10, -+ 'hard_limit': '1024' -+ 'soft_limit': '512m' -+ 'swap_hard_limit': '1g' -+ 'min_guarantee': '512mib' -+ } -+ -+ .. versionchanged:: Magnesium -+ - :param vm_type: force virtual machine type for the new VM. The default value is taken from - the host capabilities. This could be useful for example to use ``'qemu'`` type instead - of the ``'kvm'`` one. -@@ -357,6 +380,20 @@ def defined( - - .. versionadded:: Magnesium - -+ :param stop_on_reboot: -+ If set to ``True`` the guest will stop instead of rebooting. -+ This is specially useful when creating a virtual machine with an installation cdrom or -+ an autoinstallation needing a special first boot configuration. -+ Defaults to ``False`` -+ -+ .. versionadded:: Aluminium -+ -+ :param live: -+ If set to ``False`` the changes will not be applied live to the running instance, but will -+ only apply at the next start. Note that reboot will not take those changes. -+ -+ .. versionadded:: Aluminium -+ - .. rubric:: Example States - - Make sure a virtual machine called ``domain_name`` is defined: -@@ -414,13 +451,14 @@ def defined( - nic_profile=nic_profile, - interfaces=interfaces, - graphics=graphics, -- live=True, -+ live=live, - connection=connection, - username=username, - password=password, - boot=boot, - test=__opts__["test"], - boot_dev=boot_dev, -+ stop_on_reboot=stop_on_reboot, - ) - ret["changes"][name] = status - if not status.get("definition"): -@@ -456,6 +494,7 @@ def defined( - boot=boot, - start=False, - boot_dev=boot_dev, -+ stop_on_reboot=stop_on_reboot, - ) - ret["changes"][name] = {"definition": True} - ret["comment"] = "Domain {} defined".format(name) -@@ -489,6 +528,7 @@ def running( - arch=None, - boot=None, - boot_dev=None, -+ stop_on_reboot=False, - ): - """ - Starts an existing guest, or defines and starts a new VM with specified arguments. -@@ -497,7 +537,23 @@ def running( - - :param name: name of the virtual machine to run - :param cpu: number of CPUs for the virtual machine to create -- :param mem: amount of memory in MiB for the new virtual machine -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to -+ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, -+ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The -+ structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -+ Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be -+ an integer. -+ -+ To remove any parameters, pass a None object, for instance: 'soft_limit': ``None``. Please note that ``None`` -+ is mapped to ``null`` in sls file, pass ``null`` in sls file instead. -+ -+ .. code-block:: yaml -+ -+ - mem: -+ hard_limit: null -+ soft_limit: null -+ -+ .. versionchanged:: Magnesium - :param vm_type: force virtual machine type for the new VM. The default value is taken from - the host capabilities. This could be useful for example to use ``'qemu'`` type instead - of the ``'kvm'`` one. -@@ -608,6 +664,14 @@ def running( - - .. versionadded:: Magnesium - -+ :param stop_on_reboot: -+ If set to ``True`` the guest will stop instead of rebooting. -+ This is specially useful when creating a virtual machine with an installation cdrom or -+ an autoinstallation needing a special first boot configuration. -+ Defaults to ``False`` -+ -+ .. versionadded:: Aluminium -+ - .. rubric:: Example States - - Make sure an already-defined virtual machine called ``domain_name`` is running: -@@ -676,6 +740,7 @@ def running( - boot=boot, - update=update, - boot_dev=boot_dev, -+ stop_on_reboot=stop_on_reboot, - connection=connection, - username=username, - password=password, -diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja -index 18728a75b5..fb4c9f40d0 100644 ---- a/salt/templates/virt/libvirt_domain.jinja -+++ b/salt/templates/virt/libvirt_domain.jinja -@@ -2,9 +2,32 @@ - - {{ name }} - {{ cpu }} -- {{ mem }} -- {{ mem }} -- -+ {%- if mem.max %} -+ {{ mem.max }} -+ {%- endif %} -+ {%- if mem.boot %} -+ {{ mem.boot }} -+ {%- endif %} -+ {%- if mem.current %} -+ {{ mem.current }} -+ {%- endif %} -+ {%- if mem %} -+ -+ {%- if 'hard_limit' in mem and mem.hard_limit %} -+ {{ mem.hard_limit }} -+ {%- endif %} -+ {%- if 'soft_limit' in mem and mem.soft_limit %} -+ {{ mem.soft_limit }} -+ {%- endif %} -+ {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %} -+ {{ mem.swap_hard_limit }} -+ {%- endif %} -+ {%- if 'min_guarantee' in mem and mem.min_guarantee %} -+ {{ mem.min_guarantee }} -+ {%- endif %} -+ -+ {%- endif %} -+ - {{ os_type }} - {% if boot %} - {% if 'kernel' in boot %} -@@ -27,6 +50,7 @@ - - {% endfor %} - -+ {{ on_reboot }} - - {% for disk in disks %} - -diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py -index 111ca155d4..d25f5c8da5 100644 ---- a/salt/utils/xmlutil.py -+++ b/salt/utils/xmlutil.py -@@ -299,7 +299,7 @@ def change_xml(doc, data, mapping): - if convert_fn: - new_value = convert_fn(new_value) - -- if current_value != new_value: -+ if str(current_value) != str(new_value): - set_fn(node, new_value) - need_update = True - else: -diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index e214e406e2..fba821ea53 100644 ---- a/tests/unit/modules/test_virt.py -+++ b/tests/unit/modules/test_virt.py -@@ -21,7 +21,6 @@ from salt.ext import six - - # pylint: disable=import-error - from salt.ext.six.moves import range # pylint: disable=redefined-builtin --from tests.support.helpers import dedent - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.mock import MagicMock, patch - from tests.support.unit import TestCase -@@ -1856,6 +1855,25 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - virt.update("my_vm"), - ) - -+ # mem + cpu case -+ define_mock.reset_mock() -+ domain_mock.setMemoryFlags.return_value = 0 -+ domain_mock.setVcpusFlags.return_value = 0 -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ "mem": True, -+ "cpu": True, -+ }, -+ virt.update("my_vm", mem=2048, cpu=2), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual("2", setxml.find("vcpu").text) -+ self.assertEqual("2147483648", setxml.find("memory").text) -+ self.assertEqual(2048 * 1024, domain_mock.setMemoryFlags.call_args[0][0]) -+ - # Same parameters passed than in default virt.defined state case - self.assertEqual( - { -@@ -2001,6 +2019,50 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - with self.assertRaises(SaltInvocationError): - virt.update("my_vm", boot={"efi": "Not a boolean value"}) - -+ # Update memtune parameter case -+ memtune = { -+ "soft_limit": "0.5g", -+ "hard_limit": "1024", -+ "swap_hard_limit": "2048m", -+ "min_guarantee": "1 g", -+ } -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", mem=memtune), -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ setxml.find("memtune").find("soft_limit").text, str(int(0.5 * 1024 ** 3)) -+ ) -+ self.assertEqual(setxml.find("memtune").find("soft_limit").get("unit"), "bytes") -+ self.assertEqual( -+ setxml.find("memtune").find("hard_limit").text, str(1024 * 1024 ** 2) -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("swap_hard_limit").text, str(2048 * 1024 ** 2) -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) -+ ) -+ -+ invalid_unit = {"soft_limit": "2HB"} -+ -+ with self.assertRaises(SaltInvocationError): -+ virt.update("my_vm", mem=invalid_unit) -+ -+ invalid_number = { -+ "soft_limit": "3.4.MB", -+ } -+ -+ with self.assertRaises(SaltInvocationError): -+ virt.update("my_vm", mem=invalid_number) -+ - # Update memory case - setmem_mock = MagicMock(return_value=0) - domain_mock.setMemoryFlags = setmem_mock -@@ -2015,10 +2077,43 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - virt.update("my_vm", mem=2048), - ) - setxml = ET.fromstring(define_mock.call_args[0][0]) -- self.assertEqual(setxml.find("memory").text, "2048") -- self.assertEqual(setxml.find("memory").get("unit"), "MiB") -+ self.assertEqual(setxml.find("memory").text, str(2048 * 1024 ** 2)) -+ self.assertEqual(setxml.find("memory").get("unit"), "bytes") - self.assertEqual(setmem_mock.call_args[0][0], 2048 * 1024) - -+ mem_dict = {"boot": "0.5g", "current": "2g", "max": "1g", "slots": 12} -+ self.assertEqual( -+ { -+ "definition": True, -+ "mem": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", mem=mem_dict), -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("memory").get("unit"), "bytes") -+ self.assertEqual(setxml.find("memory").text, str(int(0.5 * 1024 ** 3))) -+ self.assertEqual(setxml.find("maxMemory").text, str(1 * 1024 ** 3)) -+ self.assertEqual(setxml.find("currentMemory").text, str(2 * 1024 ** 3)) -+ -+ max_slot_reverse = { -+ "slots": "10", -+ "max": "3096m", -+ } -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", mem=max_slot_reverse), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) -+ self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") -+ - # Update disks case - devattach_mock = MagicMock(return_value=0) - devdetach_mock = MagicMock(return_value=0) -@@ -2533,7 +2628,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - """ - Test virt.update() with existing boot parameters. - """ -- root_dir = os.path.join(salt.syspaths.ROOT_DIR, "srv", "salt-images") - xml_boot = """ - - vm_with_boot_param -@@ -2591,9 +2685,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - - - -- """.format( -- root_dir, os.sep -- ) -+ """ - domain_mock_boot = self.set_mock_vm("vm_with_boot_param", xml_boot) - domain_mock_boot.OSType = MagicMock(return_value="hvm") - define_mock_boot = MagicMock(return_value=True) -@@ -2694,6 +2786,218 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertEqual(setxml.find("os").find("loader"), None) - self.assertEqual(setxml.find("os").find("nvram"), None) - -+ def test_update_memtune_params(self): -+ """ -+ Test virt.update() with memory tuning parameters. -+ """ -+ xml_with_memtune_params = """ -+ -+ vm_with_boot_param -+ 1048576 -+ 1048576 -+ 1048576 -+ 1 -+ -+ 1048576 -+ 2097152 -+ 2621440 -+ 671088 -+ -+ -+ hvm -+ -+ -+ """ -+ domain_mock = self.set_mock_vm("vm_with_memtune_param", xml_with_memtune_params) -+ domain_mock.OSType = MagicMock(return_value="hvm") -+ define_mock = MagicMock(return_value=True) -+ self.mock_conn.defineXML = define_mock -+ -+ memtune_new_val = { -+ "boot": "0.7g", -+ "current": "2.5g", -+ "max": "3096m", -+ "slots": "10", -+ "soft_limit": "2048m", -+ "hard_limit": "1024", -+ "swap_hard_limit": "2.5g", -+ "min_guarantee": "1 g", -+ } -+ -+ domain_mock.setMemoryFlags.return_value = 0 -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ "mem": True, -+ }, -+ virt.update("vm_with_memtune_param", mem=memtune_new_val), -+ ) -+ self.assertEqual( -+ domain_mock.setMemoryFlags.call_args[0][0], int(2.5 * 1024 ** 2) -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ setxml.find("memtune").find("soft_limit").text, str(2048 * 1024) -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("hard_limit").text, str(1024 * 1024) -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("swap_hard_limit").text, -+ str(int(2.5 * 1024 ** 2)), -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("swap_hard_limit").get("unit"), "KiB", -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("min_guarantee").attrib.get("unit"), "bytes" -+ ) -+ self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) -+ self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") -+ self.assertEqual(setxml.find("currentMemory").text, str(int(2.5 * 1024 ** 3))) -+ self.assertEqual(setxml.find("memory").text, str(int(0.7 * 1024 ** 3))) -+ -+ max_slot_reverse = { -+ "slots": "10", -+ "max": "3096m", -+ } -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("vm_with_memtune_param", mem=max_slot_reverse), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) -+ self.assertEqual(setxml.find("maxMemory").get("unit"), "bytes") -+ self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") -+ -+ max_swap_none = { -+ "boot": "0.7g", -+ "current": "2.5g", -+ "max": None, -+ "slots": "10", -+ "soft_limit": "2048m", -+ "hard_limit": "1024", -+ "swap_hard_limit": None, -+ "min_guarantee": "1 g", -+ } -+ -+ domain_mock.setMemoryFlags.reset_mock() -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ "mem": True, -+ }, -+ virt.update("vm_with_memtune_param", mem=max_swap_none), -+ ) -+ self.assertEqual( -+ domain_mock.setMemoryFlags.call_args[0][0], int(2.5 * 1024 ** 2) -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ setxml.find("memtune").find("soft_limit").text, str(2048 * 1024) -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("hard_limit").text, str(1024 * 1024) -+ ) -+ self.assertEqual(setxml.find("memtune").find("swap_hard_limit"), None) -+ self.assertEqual( -+ setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) -+ ) -+ self.assertEqual( -+ setxml.find("memtune").find("min_guarantee").attrib.get("unit"), "bytes" -+ ) -+ self.assertEqual(setxml.find("maxMemory").text, None) -+ self.assertEqual(setxml.find("currentMemory").text, str(int(2.5 * 1024 ** 3))) -+ self.assertEqual(setxml.find("memory").text, str(int(0.7 * 1024 ** 3))) -+ -+ memtune_none = { -+ "soft_limit": None, -+ "hard_limit": None, -+ "swap_hard_limit": None, -+ "min_guarantee": None, -+ } -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("vm_with_memtune_param", mem=memtune_none), -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("memtune").find("soft_limit"), None) -+ self.assertEqual(setxml.find("memtune").find("hard_limit"), None) -+ self.assertEqual(setxml.find("memtune").find("swap_hard_limit"), None) -+ self.assertEqual(setxml.find("memtune").find("min_guarantee"), None) -+ -+ max_none = { -+ "max": None, -+ } -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("vm_with_memtune_param", mem=max_none), -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("maxMemory"), None) -+ self.assertEqual(setxml.find("currentMemory").text, str(int(1 * 1024 ** 2))) -+ self.assertEqual(setxml.find("memory").text, str(int(1 * 1024 ** 2))) -+ -+ def test_handle_unit(self): -+ """ -+ Test regex function for handling units -+ """ -+ valid_case = [ -+ ("2", 2097152), -+ ("42", 44040192), -+ ("5b", 5), -+ ("2.3Kib", 2355), -+ ("5.8Kb", 5800), -+ ("16MiB", 16777216), -+ ("20 GB", 20000000000), -+ ("16KB", 16000), -+ (".5k", 512), -+ ("2.k", 2048), -+ ] -+ -+ for key, val in valid_case: -+ self.assertEqual(virt._handle_unit(key), val) -+ -+ invalid_case = [ -+ ("9ib", "invalid units"), -+ ("8byte", "invalid units"), -+ ("512bytes", "invalid units"), -+ ("4 Kbytes", "invalid units"), -+ ("3.4.MB", "invalid number"), -+ ("", "invalid number"), -+ ("bytes", "invalid number"), -+ ("2HB", "invalid units"), -+ ] -+ -+ for key, val in invalid_case: -+ with self.assertRaises(SaltInvocationError): -+ virt._handle_unit(key) -+ - def test_mixed_dict_and_list_as_profile_objects(self): - """ - Test virt._nic_profile with mixed dictionaries and lists as input. -diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py -index 8fe892f607..1923ae5c0f 100644 ---- a/tests/unit/states/test_virt.py -+++ b/tests/unit/states/test_virt.py -@@ -8,7 +8,6 @@ import tempfile - import salt.states.virt as virt - import salt.utils.files - from salt.exceptions import CommandExecutionError, SaltInvocationError --from salt.ext import six - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.mock import MagicMock, mock_open, patch - from tests.support.runtests import RUNTIME_VARS -@@ -346,6 +345,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - install=False, - pub_key="/path/to/key.pub", - priv_key="/path/to/key", -+ stop_on_reboot=True, - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -371,6 +371,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - start=False, - pub_key="/path/to/key.pub", - priv_key="/path/to/key", -+ stop_on_reboot=True, - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -484,6 +485,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - password=None, - boot=None, - test=False, -+ stop_on_reboot=False, - ) - - # Failed definition update case -@@ -554,6 +556,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - install=False, - pub_key="/path/to/key.pub", - priv_key="/path/to/key", -+ stop_on_reboot=False, - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -596,6 +599,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - boot=None, - test=True, - boot_dev=None, -+ stop_on_reboot=False, - ) - - # No changes case -@@ -631,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - boot=None, - test=True, - boot_dev=None, -+ stop_on_reboot=False, - ) - - def test_running(self): -@@ -708,6 +713,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - pub_key=None, - priv_key=None, - boot_dev=None, -+ stop_on_reboot=False, - connection=None, - username=None, - password=None, -@@ -770,6 +776,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - pub_key="/path/to/key.pub", - priv_key="/path/to/key", - boot_dev="network hd", -+ stop_on_reboot=True, - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -795,6 +802,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - pub_key="/path/to/key.pub", - priv_key="/path/to/key", - boot_dev="network hd", -+ stop_on_reboot=True, - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -940,6 +948,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - boot=None, - test=False, - boot_dev=None, -+ stop_on_reboot=False, - ) - - # Failed definition update case -@@ -1013,6 +1022,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - install=False, - pub_key="/path/to/key.pub", - priv_key="/path/to/key", -+ stop_on_reboot=True, - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -1059,6 +1069,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - boot=None, - test=True, - boot_dev=None, -+ stop_on_reboot=False, - ) - start_mock.assert_not_called() - -@@ -1096,6 +1107,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - boot=None, - test=True, - boot_dev=None, -+ stop_on_reboot=False, - ) - - def test_stopped(self): --- -2.29.2 - - diff --git a/backport-of-upstream-pr59492-to-3002.2-404.patch b/backport-of-upstream-pr59492-to-3002.2-404.patch deleted file mode 100644 index ffb1cde..0000000 --- a/backport-of-upstream-pr59492-to-3002.2-404.patch +++ /dev/null @@ -1,227 +0,0 @@ -From fba6631e0a66a5f8ea76a104e9acf385ce06471c Mon Sep 17 00:00:00 2001 -From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> -Date: Wed, 18 Aug 2021 15:05:30 +0300 -Subject: [PATCH] Backport of upstream PR59492 to 3002.2 (#404) - -* Fix failing integration tests - -* Fix unless logic and failing tests - -* Revert some of the changes in the onlyif code - -Co-authored-by: twangboy ---- - salt/state.py | 24 +++++++++------ - .../files/file/base/issue-35384.sls | 7 +++++ - tests/unit/test_state.py | 30 ++++++++++++++----- - 3 files changed, 44 insertions(+), 17 deletions(-) - -diff --git a/salt/state.py b/salt/state.py -index 070a914636..64c5225728 100644 ---- a/salt/state.py -+++ b/salt/state.py -@@ -929,7 +929,8 @@ class State: - - def _run_check_onlyif(self, low_data, cmd_opts): - """ -- Check that unless doesn't return 0, and that onlyif returns a 0. -+ Make sure that all commands return True for the state to run. If any -+ command returns False (non 0), the state will not run - """ - ret = {"result": False} - -@@ -938,7 +939,9 @@ class State: - else: - low_data_onlyif = low_data["onlyif"] - -+ # If any are False the state will NOT run - def _check_cmd(cmd): -+ # Don't run condition (False) - if cmd != 0 and ret["result"] is False: - ret.update( - { -@@ -1001,7 +1004,8 @@ class State: - - def _run_check_unless(self, low_data, cmd_opts): - """ -- Check that unless doesn't return 0, and that onlyif returns a 0. -+ Check if any of the commands return False (non 0). If any are False the -+ state will run. - """ - ret = {"result": False} - -@@ -1010,8 +1014,10 @@ class State: - else: - low_data_unless = low_data["unless"] - -+ # If any are False the state will run - def _check_cmd(cmd): -- if cmd == 0 and ret["result"] is False: -+ # Don't run condition -+ if cmd == 0: - ret.update( - { - "comment": "unless condition is true", -@@ -1020,9 +1026,10 @@ class State: - } - ) - return False -- elif cmd != 0: -+ else: -+ ret.pop("skip_watch", None) - ret.update({"comment": "unless condition is false", "result": False}) -- return True -+ return True - - for entry in low_data_unless: - if isinstance(entry, str): -@@ -1034,7 +1041,7 @@ class State: - except CommandExecutionError: - # Command failed, so notify unless to skip the item - cmd = 0 -- if not _check_cmd(cmd): -+ if _check_cmd(cmd): - return ret - elif isinstance(entry, dict): - if "fun" not in entry: -@@ -1047,7 +1054,7 @@ class State: - if get_return: - result = salt.utils.data.traverse_dict_and_list(result, get_return) - if self.state_con.get("retcode", 0): -- if not _check_cmd(self.state_con["retcode"]): -+ if _check_cmd(self.state_con["retcode"]): - return ret - elif result: - ret.update( -@@ -1057,11 +1064,11 @@ class State: - "result": True, - } - ) -- return ret - else: - ret.update( - {"comment": "unless condition is false", "result": False} - ) -+ return ret - else: - ret.update( - { -@@ -1069,7 +1076,6 @@ class State: - "result": False, - } - ) -- return ret - - # No reason to stop, return ret - return ret -diff --git a/tests/integration/files/file/base/issue-35384.sls b/tests/integration/files/file/base/issue-35384.sls -index 3c41617ca8..2aa436bb37 100644 ---- a/tests/integration/files/file/base/issue-35384.sls -+++ b/tests/integration/files/file/base/issue-35384.sls -@@ -2,5 +2,12 @@ cmd_run_unless_multiple: - cmd.run: - - name: echo "hello" - - unless: -+ {% if grains["os"] == "Windows" %} -+ - "exit 0" -+ - "exit 1" -+ - "exit 0" -+ {% else %} - - "$(which true)" - - "$(which false)" -+ - "$(which true)" -+ {% endif %} -diff --git a/tests/unit/test_state.py b/tests/unit/test_state.py -index 95018a9cf3..79a261d837 100644 ---- a/tests/unit/test_state.py -+++ b/tests/unit/test_state.py -@@ -142,7 +142,7 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - def test_verify_onlyif_cmd_error(self): - """ - Simulates a failure in cmd.retcode from onlyif -- This could occur is runas is specified with a user that does not exist -+ This could occur if runas is specified with a user that does not exist - """ - low_data = { - "onlyif": "somecommand", -@@ -175,7 +175,7 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - def test_verify_unless_cmd_error(self): - """ - Simulates a failure in cmd.retcode from unless -- This could occur is runas is specified with a user that does not exist -+ This could occur if runas is specified with a user that does not exist - """ - low_data = { - "unless": "somecommand", -@@ -206,6 +206,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - self.assertEqual(expected_result, return_result) - - def test_verify_unless_list_cmd(self): -+ """ -+ If any of the unless commands return False (non 0) then the state should -+ run (no skip_watch). -+ """ - low_data = { - "state": "cmd", - "name": 'echo "something"', -@@ -217,9 +221,8 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - "fun": "run", - } - expected_result = { -- "comment": "unless condition is true", -- "result": True, -- "skip_watch": True, -+ "comment": "unless condition is false", -+ "result": False, - } - with patch("salt.state.State._gather_pillar") as state_patch: - minion_opts = self.get_temp_config("minion") -@@ -228,6 +231,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - self.assertEqual(expected_result, return_result) - - def test_verify_unless_list_cmd_different_order(self): -+ """ -+ If any of the unless commands return False (non 0) then the state should -+ run (no skip_watch). The order shouldn't matter. -+ """ - low_data = { - "state": "cmd", - "name": 'echo "something"', -@@ -239,9 +246,8 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - "fun": "run", - } - expected_result = { -- "comment": "unless condition is true", -- "result": True, -- "skip_watch": True, -+ "comment": "unless condition is false", -+ "result": False, - } - with patch("salt.state.State._gather_pillar") as state_patch: - minion_opts = self.get_temp_config("minion") -@@ -272,6 +278,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - self.assertEqual(expected_result, return_result) - - def test_verify_unless_list_cmd_valid(self): -+ """ -+ If any of the unless commands return False (non 0) then the state should -+ run (no skip_watch). This tests all commands return False. -+ """ - low_data = { - "state": "cmd", - "name": 'echo "something"', -@@ -308,6 +318,10 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - self.assertEqual(expected_result, return_result) - - def test_verify_unless_list_cmd_invalid(self): -+ """ -+ If any of the unless commands return False (non 0) then the state should -+ run (no skip_watch). This tests all commands return True -+ """ - low_data = { - "state": "cmd", - "name": 'echo "something"', --- -2.32.0 - - diff --git a/backport-thread.is_alive-fix-390.patch b/backport-thread.is_alive-fix-390.patch deleted file mode 100644 index a4bc135..0000000 --- a/backport-thread.is_alive-fix-390.patch +++ /dev/null @@ -1,127 +0,0 @@ -From a782af246a2f3d4b91afee2ee847c87f71e8904b Mon Sep 17 00:00:00 2001 -From: Alexander Graul -Date: Fri, 25 Jun 2021 13:34:38 +0200 -Subject: [PATCH] Backport Thread.is_alive fix (#390) - -* Change thread.isAlive() to thread.is_alive() - -(cherry picked from commit b1dc0cee03896c8abad55a609805b0be6c7aaefa) - -* Run pre-commit on salt/utils/timed_subprocess.py - -(cherry picked from commit 178e3b83e6c21abf5d6db454c19c104ceb8bd92c) - -* Fix the six removal made by pre-commit - -(cherry picked from commit aaa8ca3b7f129568637799d6d49d7ad3708f73bc) - -* Remove the PY2 code in salt/utils/timed_subprocess.py - -(cherry picked from commit 3a702a510b965e9af1ad318c953e19114925357e) - -Co-authored-by: Petr Messner -Co-authored-by: Petr Messner ---- - salt/utils/timed_subprocess.py | 39 ++++++++++++++-------------------- - 1 file changed, 16 insertions(+), 23 deletions(-) - -diff --git a/salt/utils/timed_subprocess.py b/salt/utils/timed_subprocess.py -index 5c4ac35ac3..b043a3bde2 100644 ---- a/salt/utils/timed_subprocess.py -+++ b/salt/utils/timed_subprocess.py -@@ -1,8 +1,6 @@ --# -*- coding: utf-8 -*- - """ - For running command line executables with a timeout - """ --from __future__ import absolute_import, print_function, unicode_literals - - import shlex - import subprocess -@@ -10,10 +8,9 @@ import threading - - import salt.exceptions - import salt.utils.data --from salt.ext import six - - --class TimedProc(object): -+class TimedProc: - """ - Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs - """ -@@ -46,7 +43,7 @@ class TimedProc(object): - - if self.timeout and not isinstance(self.timeout, (int, float)): - raise salt.exceptions.TimedProcTimeoutError( -- "Error: timeout {0} must be a number".format(self.timeout) -+ "Error: timeout {} must be a number".format(self.timeout) - ) - if kwargs.get("shell", False): - args = salt.utils.data.decode(args, to_str=True) -@@ -59,28 +56,24 @@ class TimedProc(object): - try: - args = shlex.split(args) - except AttributeError: -- args = shlex.split(six.text_type(args)) -+ args = shlex.split(str(args)) - str_args = [] - for arg in args: -- if not isinstance(arg, six.string_types): -- str_args.append(six.text_type(arg)) -+ if not isinstance(arg, str): -+ str_args.append(str(arg)) - else: - str_args.append(arg) - args = str_args - else: -- if not isinstance(args, (list, tuple, six.string_types)): -+ if not isinstance(args, (list, tuple, str)): - # Handle corner case where someone does a 'cmd.run 3' -- args = six.text_type(args) -+ args = str(args) - # Ensure that environment variables are strings -- for key, val in six.iteritems(kwargs.get("env", {})): -- if not isinstance(val, six.string_types): -- kwargs["env"][key] = six.text_type(val) -- if not isinstance(key, six.string_types): -- kwargs["env"][six.text_type(key)] = kwargs["env"].pop(key) -- if six.PY2 and "env" in kwargs: -- # Ensure no unicode in custom env dict, as it can cause -- # problems with subprocess. -- kwargs["env"] = salt.utils.data.encode_dict(kwargs["env"]) -+ for key, val in kwargs.get("env", {}).items(): -+ if not isinstance(val, str): -+ kwargs["env"][key] = str(val) -+ if not isinstance(key, str): -+ kwargs["env"][str(key)] = kwargs["env"].pop(key) - args = salt.utils.data.decode(args) - self.process = subprocess.Popen(args, **kwargs) - self.command = args -@@ -103,18 +96,18 @@ class TimedProc(object): - rt = threading.Thread(target=receive) - rt.start() - rt.join(self.timeout) -- if rt.isAlive(): -+ if rt.is_alive(): - # Subprocess cleanup (best effort) - self.process.kill() - - def terminate(): -- if rt.isAlive(): -+ if rt.is_alive(): - self.process.terminate() - - threading.Timer(10, terminate).start() - raise salt.exceptions.TimedProcTimeoutError( -- "{0} : Timed out after {1} seconds".format( -- self.command, six.text_type(self.timeout), -+ "{} : Timed out after {} seconds".format( -+ self.command, str(self.timeout), - ) - ) - return self.process.returncode --- -2.32.0 - - diff --git a/backport-virt-patches-from-3001-256.patch b/backport-virt-patches-from-3001-256.patch deleted file mode 100644 index 4a32152..0000000 --- a/backport-virt-patches-from-3001-256.patch +++ /dev/null @@ -1,1659 +0,0 @@ -From 32559016ba2bd306a3a027a2191857f24258fc46 Mon Sep 17 00:00:00 2001 -From: Cedric Bosdonnat -Date: Mon, 7 Sep 2020 15:00:40 +0200 -Subject: [PATCH] Backport virt patches from 3001+ (#256) - -* Fix various spelling mistakes in master branch (#55954) - -* Fix typo of additional - -Signed-off-by: Benjamin Drung - -* Fix typo of against - -Signed-off-by: Benjamin Drung - -* Fix typo of amount - -Signed-off-by: Benjamin Drung - -* Fix typo of argument - -Signed-off-by: Benjamin Drung - -* Fix typo of attempt - -Signed-off-by: Benjamin Drung - -* Fix typo of bandwidth - -Signed-off-by: Benjamin Drung - -* Fix typo of caught - -Signed-off-by: Benjamin Drung - -* Fix typo of compatibility - -Signed-off-by: Benjamin Drung - -* Fix typo of consistency - -Signed-off-by: Benjamin Drung - -* Fix typo of conversions - -Signed-off-by: Benjamin Drung - -* Fix typo of corresponding - -Signed-off-by: Benjamin Drung - -* Fix typo of dependent - -Signed-off-by: Benjamin Drung - -* Fix typo of dictionary - -Signed-off-by: Benjamin Drung - -* Fix typo of disabled - -Signed-off-by: Benjamin Drung - -* Fix typo of adapters - -Signed-off-by: Benjamin Drung - -* Fix typo of disassociates - -Signed-off-by: Benjamin Drung - -* Fix typo of changes - -Signed-off-by: Benjamin Drung - -* Fix typo of command - -Signed-off-by: Benjamin Drung - -* Fix typo of communicate - -Signed-off-by: Benjamin Drung - -* Fix typo of community - -Signed-off-by: Benjamin Drung - -* Fix typo of configuration - -Signed-off-by: Benjamin Drung - -* Fix typo of default - -Signed-off-by: Benjamin Drung - -* Fix typo of absence - -Signed-off-by: Benjamin Drung - -* Fix typo of attribute - -Signed-off-by: Benjamin Drung - -* Fix typo of container - -Signed-off-by: Benjamin Drung - -* Fix typo of described - -Signed-off-by: Benjamin Drung - -* Fix typo of existence - -Signed-off-by: Benjamin Drung - -* Fix typo of explicit - -Signed-off-by: Benjamin Drung - -* Fix typo of formatted - -Signed-off-by: Benjamin Drung - -* Fix typo of guarantees - -Signed-off-by: Benjamin Drung - -* Fix typo of hexadecimal - -Signed-off-by: Benjamin Drung - -* Fix typo of hierarchy - -Signed-off-by: Benjamin Drung - -* Fix typo of initialize - -Signed-off-by: Benjamin Drung - -* Fix typo of label - -Signed-off-by: Benjamin Drung - -* Fix typo of management - -Signed-off-by: Benjamin Drung - -* Fix typo of mismatch - -Signed-off-by: Benjamin Drung - -* Fix typo of don't - -Signed-off-by: Benjamin Drung - -* Fix typo of manually - -Signed-off-by: Benjamin Drung - -* Fix typo of getting - -Signed-off-by: Benjamin Drung - -* Fix typo of information - -Signed-off-by: Benjamin Drung - -* Fix typo of meant - -Signed-off-by: Benjamin Drung - -* Fix typo of nonexistent - -Signed-off-by: Benjamin Drung - -* Fix typo of occur - -Signed-off-by: Benjamin Drung - -* Fix typo of omitted - -Signed-off-by: Benjamin Drung - -* Fix typo of normally - -Signed-off-by: Benjamin Drung - -* Fix typo of overridden - -Signed-off-by: Benjamin Drung - -* Fix typo of repository - -Signed-off-by: Benjamin Drung - -* Fix typo of separate - -Signed-off-by: Benjamin Drung - -* Fix typo of separator - -Signed-off-by: Benjamin Drung - -* Fix typo of specific - -Signed-off-by: Benjamin Drung - -* Fix typo of successful - -Signed-off-by: Benjamin Drung - -* Fix typo of succeeded - -Signed-off-by: Benjamin Drung - -* Fix typo of support - -Signed-off-by: Benjamin Drung - -* Fix typo of version - -Signed-off-by: Benjamin Drung - -* Fix typo of that's - -Signed-off-by: Benjamin Drung - -* Fix typo of "will be removed" - -Signed-off-by: Benjamin Drung - -* Fix typo of release - -Signed-off-by: Benjamin Drung - -* Fix typo of synchronize - -Signed-off-by: Benjamin Drung - -* Fix typo of python - -Signed-off-by: Benjamin Drung - -* Fix typo of usually - -Signed-off-by: Benjamin Drung - -* Fix typo of override - -Signed-off-by: Benjamin Drung - -* Fix typo of running - -Signed-off-by: Benjamin Drung - -* Fix typo of whether - -Signed-off-by: Benjamin Drung - -* Fix typo of package - -Signed-off-by: Benjamin Drung - -* Fix typo of persist - -Signed-off-by: Benjamin Drung - -* Fix typo of preferred - -Signed-off-by: Benjamin Drung - -* Fix typo of present - -Signed-off-by: Benjamin Drung - -* Fix typo of run - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of "allows someone to..." - -"Allows to" is not correct English. It must either be "allows someone -to" or "allows doing". - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of "number of times" - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of msgpack - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of daemonized - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of daemons - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of extemporaneous - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of instead - -Signed-off-by: Benjamin Drung - -* Fix spelling mistake of returning - -Signed-off-by: Benjamin Drung - -* Fix literal comparissons - -* virt: Convert cpu_baseline ElementTree to string - -In commit 0f5184c (Remove minidom use in virt module) the value -of `cpu` become `xml.etree.ElementTree.Element` and no longer -has a method `toxml()`. This results in the following error: - -$ salt '*' virt.cpu_baseline -host2: - The minion function caused an exception: Traceback (most recent call last): - File "/usr/lib/python3.7/site-packages/salt/minion.py", line 1675, in _thread_return - return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) - File "/usr/lib/python3.7/site-packages/salt/executors/direct_call.py", line 12, in execute - return func(*args, **kwargs) - File "/usr/lib/python3.7/site-packages/salt/modules/virt.py", line 4410, in cpu_baseline - return cpu.toxml() - AttributeError: 'xml.etree.ElementTree.Element' object has no attribute 'toxml' - -Signed-off-by: Radostin Stoyanov - -* PR#57374 backport - -virt: pool secret should be undefined in pool_undefine not pool_delete - -virt: handle build differently depending on the pool type - -virt: don't fail if the pool secret has been removed - -* PR #57396 backport - -add firmware auto select feature - -* virt: Update dependencies - -Closes: #57641 - -Signed-off-by: Radostin Stoyanov - -* use null in sls file to map None object - -add sls file example - -reword doc - -* Update virt module and states and their tests to python3 - -* PR #57545 backport - -Move virt.init boot_dev parameter away from the kwargs - -virt: handle boot device in virt.update() - -virt: add boot_dev parameter to virt.running state - -* PR #57431 backport - -virt: Handle no available hypervisors - -virt: Remove unused imports - -* Blacken salt - -* Add method to remove circular references in data objects and add test (#54930) - -* Add method to remove circular references in data objects and add test - -* remove trailing whitespace - -* Blacken changed files - -Co-authored-by: xeacott -Co-authored-by: Frode Gundersen -Co-authored-by: Daniel A. Wozniak - -* PR #58332 backport - -virt: add debug log with VM XML definition - -Add xmlutil.get_xml_node() helper function - -Add salt.utils.data.get_value function - -Add change_xml() function to xmlutil - -virt.update: refactor the XML diffing code - -virt.test_update: move some code to make test more readable - -Co-authored-by: Benjamin Drung -Co-authored-by: Pedro Algarvio -Co-authored-by: Radostin Stoyanov -Co-authored-by: Firefly -Co-authored-by: Blacken Salt -Co-authored-by: Joe Eacott <31625359+xeacott@users.noreply.github.com> -Co-authored-by: xeacott -Co-authored-by: Frode Gundersen -Co-authored-by: Daniel A. Wozniak ---- - changelog/56454.fixed | 1 + - changelog/57544.added | 1 + - changelog/58331.fixed | 1 + - salt/modules/virt.py | 270 +++++++++++++---------- - salt/states/virt.py | 88 ++++++-- - salt/templates/virt/libvirt_domain.jinja | 29 +-- - salt/utils/xmlutil.py | 4 +- - tests/unit/modules/test_virt.py | 159 +++++++++---- - tests/unit/states/test_virt.py | 93 +++++++- - tests/unit/utils/test_data.py | 32 --- - 10 files changed, 441 insertions(+), 237 deletions(-) - create mode 100644 changelog/56454.fixed - create mode 100644 changelog/57544.added - create mode 100644 changelog/58331.fixed - -diff --git a/changelog/56454.fixed b/changelog/56454.fixed -new file mode 100644 -index 0000000000..978b4b6e03 ---- /dev/null -+++ b/changelog/56454.fixed -@@ -0,0 +1 @@ -+Better handle virt.pool_rebuild in virt.pool_running and virt.pool_defined states -diff --git a/changelog/57544.added b/changelog/57544.added -new file mode 100644 -index 0000000000..52071cf2c7 ---- /dev/null -+++ b/changelog/57544.added -@@ -0,0 +1 @@ -+Allow setting VM boot devices order in virt.running and virt.defined states -diff --git a/changelog/58331.fixed b/changelog/58331.fixed -new file mode 100644 -index 0000000000..4b8f78dd53 ---- /dev/null -+++ b/changelog/58331.fixed -@@ -0,0 +1 @@ -+Leave boot parameters untouched if boot parameter is set to None in virt.update -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index fb27397baa..ec40f08359 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -94,17 +94,13 @@ from xml.sax import saxutils - import jinja2.exceptions - import salt.utils.files - import salt.utils.json --import salt.utils.network - import salt.utils.path - import salt.utils.stringutils - import salt.utils.templates --import salt.utils.validate.net --import salt.utils.versions - import salt.utils.xmlutil as xmlutil - import salt.utils.yaml - from salt._compat import ipaddress - from salt.exceptions import CommandExecutionError, SaltInvocationError --from salt.ext import six - from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin - from salt.ext.six.moves.urllib.parse import urlparse, urlunparse - from salt.utils.virt import check_remote, download_remote -@@ -647,6 +643,7 @@ def _gen_xml( - arch, - graphics=None, - boot=None, -+ boot_dev=None, - **kwargs - ): - """ -@@ -680,15 +677,17 @@ def _gen_xml( - graphics = None - context["graphics"] = graphics - -- if "boot_dev" in kwargs: -- context["boot_dev"] = [] -- for dev in kwargs["boot_dev"].split(): -- context["boot_dev"].append(dev) -- else: -- context["boot_dev"] = ["hd"] -+ context["boot_dev"] = boot_dev.split() if boot_dev is not None else ["hd"] - - context["boot"] = boot if boot else {} - -+ # if efi parameter is specified, prepare os_attrib -+ efi_value = context["boot"].get("efi", None) if boot else None -+ if efi_value is True: -+ context["boot"]["os_attrib"] = "firmware='efi'" -+ elif efi_value is not None and type(efi_value) != bool: -+ raise SaltInvocationError("Invalid efi value") -+ - if os_type == "xen": - # Compute the Xen PV boot method - if __grains__["os_family"] == "Suse": -@@ -1519,17 +1518,24 @@ def _handle_remote_boot_params(orig_boot): - new_boot = orig_boot.copy() - keys = orig_boot.keys() - cases = [ -+ {"efi"}, -+ {"kernel", "initrd", "efi"}, -+ {"kernel", "initrd", "cmdline", "efi"}, - {"loader", "nvram"}, - {"kernel", "initrd"}, - {"kernel", "initrd", "cmdline"}, -- {"loader", "nvram", "kernel", "initrd"}, -- {"loader", "nvram", "kernel", "initrd", "cmdline"}, -+ {"kernel", "initrd", "loader", "nvram"}, -+ {"kernel", "initrd", "cmdline", "loader", "nvram"}, - ] - - try: - if keys in cases: - for key in keys: -- if orig_boot.get(key) is not None and check_remote(orig_boot.get(key)): -+ if key == "efi" and type(orig_boot.get(key)) == bool: -+ new_boot[key] = orig_boot.get(key) -+ elif orig_boot.get(key) is not None and check_remote( -+ orig_boot.get(key) -+ ): - if saltinst_dir is None: - os.makedirs(CACHE_DIR) - saltinst_dir = CACHE_DIR -@@ -1537,12 +1543,41 @@ def _handle_remote_boot_params(orig_boot): - return new_boot - else: - raise SaltInvocationError( -- "Invalid boot parameters, (kernel, initrd) or/and (loader, nvram) must be both present" -+ "Invalid boot parameters,It has to follow this combination: [(kernel, initrd) or/and cmdline] or/and [(loader, nvram) or efi]" - ) - except Exception as err: # pylint: disable=broad-except - raise err - - -+def _handle_efi_param(boot, desc): -+ """ -+ Checks if boot parameter contains efi boolean value, if so, handles the firmware attribute. -+ :param boot: The boot parameters passed to the init or update functions. -+ :param desc: The XML description of that domain. -+ :return: A boolean value. -+ """ -+ efi_value = boot.get("efi", None) if boot else None -+ parent_tag = desc.find("os") -+ os_attrib = parent_tag.attrib -+ -+ # newly defined vm without running, loader tag might not be filled yet -+ if efi_value is False and os_attrib != {}: -+ parent_tag.attrib.pop("firmware", None) -+ return True -+ -+ # check the case that loader tag might be present. This happens after the vm ran -+ elif type(efi_value) == bool and os_attrib == {}: -+ if efi_value is True and parent_tag.find("loader") is None: -+ parent_tag.set("firmware", "efi") -+ if efi_value is False and parent_tag.find("loader") is not None: -+ parent_tag.remove(parent_tag.find("loader")) -+ parent_tag.remove(parent_tag.find("nvram")) -+ return True -+ elif type(efi_value) != bool: -+ raise SaltInvocationError("Invalid efi value") -+ return False -+ -+ - def init( - name, - cpu, -@@ -1563,6 +1598,7 @@ def init( - os_type=None, - arch=None, - boot=None, -+ boot_dev=None, - **kwargs - ): - """ -@@ -1632,7 +1668,8 @@ def init( - This is an optional parameter, all of the keys are optional within the dictionary. The structure of - the dictionary is documented in :ref:`init-boot-def`. If a remote path is provided to kernel or initrd, - salt will handle the downloading of the specified remote file and modify the XML accordingly. -- To boot VM with UEFI, specify loader and nvram path. -+ To boot VM with UEFI, specify loader and nvram path or specify 'efi': ``True`` if your libvirtd version -+ is >= 5.2.0 and QEMU >= 3.0.0. - - .. versionadded:: 3000 - -@@ -1646,6 +1683,12 @@ def init( - 'nvram': '/usr/share/OVMF/OVMF_VARS.ms.fd' - } - -+ :param boot_dev: -+ Space separated list of devices to boot from sorted by decreasing priority. -+ Values can be ``hd``, ``fd``, ``cdrom`` or ``network``. -+ -+ By default, the value will ``"hd"``. -+ - .. _init-boot-def: - - .. rubric:: Boot parameters definition -@@ -1671,6 +1714,11 @@ def init( - - .. versionadded:: sodium - -+ efi -+ A boolean value. -+ -+ .. versionadded:: sodium -+ - .. _init-nic-def: - - .. rubric:: Network Interfaces Definitions -@@ -1855,6 +1903,8 @@ def init( - for x in y - } - ) -+ if len(hypervisors) == 0: -+ raise SaltInvocationError("No supported hypervisors were found") - virt_hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0] - - # esxi used to be a possible value for the hypervisor: map it to vmware since it's the same -@@ -1962,8 +2012,10 @@ def init( - arch, - graphics, - boot, -+ boot_dev, - **kwargs - ) -+ log.debug("New virtual machine definition: %s", vm_xml) - conn.defineXML(vm_xml) - except libvirt.libvirtError as err: - conn.close() -@@ -2189,6 +2241,7 @@ def update( - live=True, - boot=None, - test=False, -+ boot_dev=None, - **kwargs - ): - """ -@@ -2248,11 +2301,28 @@ def update( - - Refer to :ref:`init-boot-def` for the complete boot parameter description. - -- To update any boot parameters, specify the new path for each. To remove any boot parameters, -- pass a None object, for instance: 'kernel': ``None``. -+ To update any boot parameters, specify the new path for each. To remove any boot parameters, pass ``None`` object, -+ for instance: 'kernel': ``None``. To switch back to BIOS boot, specify ('loader': ``None`` and 'nvram': ``None``) -+ or 'efi': ``False``. Please note that ``None`` is mapped to ``null`` in sls file, pass ``null`` in sls file instead. -+ -+ SLS file Example: -+ -+ .. code-block:: yaml -+ -+ - boot: -+ loader: null -+ nvram: null - - .. versionadded:: 3000 - -+ :param boot_dev: -+ Space separated list of devices to boot from sorted by decreasing priority. -+ Values can be ``hd``, ``fd``, ``cdrom`` or ``network``. -+ -+ By default, the value will ``"hd"``. -+ -+ .. versionadded:: Magnesium -+ - :param test: run in dry-run mode if set to True - - .. versionadded:: sodium -@@ -2327,67 +2397,54 @@ def update( - cpu_node.set("current", str(cpu)) - need_update = True - -- # Update the kernel boot parameters -- boot_tags = ["kernel", "initrd", "cmdline", "loader", "nvram"] -- parent_tag = desc.find("os") -- -- # We need to search for each possible subelement, and update it. -- for tag in boot_tags: -- # The Existing Tag... -- found_tag = parent_tag.find(tag) -- -- # The new value -- boot_tag_value = boot.get(tag, None) if boot else None -- -- # Existing tag is found and values don't match -- if found_tag is not None and found_tag.text != boot_tag_value: -- -- # If the existing tag is found, but the new value is None -- # remove it. If the existing tag is found, and the new value -- # doesn't match update it. In either case, mark for update. -- if boot_tag_value is None and boot is not None and parent_tag is not None: -- parent_tag.remove(found_tag) -- else: -- found_tag.text = boot_tag_value -- -- # If the existing tag is loader or nvram, we need to update the corresponding attribute -- if found_tag.tag == "loader" and boot_tag_value is not None: -- found_tag.set("readonly", "yes") -- found_tag.set("type", "pflash") -- -- if found_tag.tag == "nvram" and boot_tag_value is not None: -- found_tag.set("template", found_tag.text) -- found_tag.text = None -+ def _set_loader(node, value): -+ salt.utils.xmlutil.set_node_text(node, value) -+ if value is not None: -+ node.set("readonly", "yes") -+ node.set("type", "pflash") - -- need_update = True -+ def _set_nvram(node, value): -+ node.set("template", value) - -- # Need to check for parent tag, and add it if it does not exist. -- # Add a subelement and set the value to the new value, and then -- # mark for update. -- if parent_tag is not None: -- child_tag = ElementTree.SubElement(parent_tag, tag) -- else: -- new_parent_tag = ElementTree.Element("os") -- child_tag = ElementTree.SubElement(new_parent_tag, tag) -- -- child_tag.text = boot_tag_value -- -- # If the newly created tag is loader or nvram, we need to update the corresponding attribute -- if child_tag.tag == "loader": -- child_tag.set("readonly", "yes") -- child_tag.set("type", "pflash") -+ def _set_with_mib_unit(node, value): -+ node.text = str(value) -+ node.set("unit", "MiB") - -- if child_tag.tag == "nvram": -- child_tag.set("template", child_tag.text) -- child_tag.text = None -+ # Update the kernel boot parameters -+ params_mapping = [ -+ {"path": "boot:kernel", "xpath": "os/kernel"}, -+ {"path": "boot:initrd", "xpath": "os/initrd"}, -+ {"path": "boot:cmdline", "xpath": "os/cmdline"}, -+ {"path": "boot:loader", "xpath": "os/loader", "set": _set_loader}, -+ {"path": "boot:nvram", "xpath": "os/nvram", "set": _set_nvram}, -+ # Update the memory, note that libvirt outputs all memory sizes in KiB -+ { -+ "path": "mem", -+ "xpath": "memory", -+ "get": lambda n: int(n.text) / 1024, -+ "set": _set_with_mib_unit, -+ }, -+ { -+ "path": "mem", -+ "xpath": "currentMemory", -+ "get": lambda n: int(n.text) / 1024, -+ "set": _set_with_mib_unit, -+ }, -+ { -+ "path": "boot_dev:{dev}", -+ "xpath": "os/boot[$dev]", -+ "get": lambda n: n.get("dev"), -+ "set": lambda n, v: n.set("dev", v), -+ "del": salt.utils.xmlutil.del_attribute("dev"), -+ }, -+ ] - -- # Update the memory, note that libvirt outputs all memory sizes in KiB -- for mem_node_name in ["memory", "currentMemory"]: -- mem_node = desc.find(mem_node_name) -- if mem and int(mem_node.text) != mem * 1024: -- mem_node.text = str(mem) -- mem_node.set("unit", "MiB") -- need_update = True -+ data = {k: v for k, v in locals().items() if bool(v)} -+ if boot_dev: -+ data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} -+ need_update = need_update or salt.utils.xmlutil.change_xml( -+ desc, data, params_mapping -+ ) - - # Update the XML definition with the new disks and diff changes - devices_node = desc.find("devices") -@@ -2434,9 +2491,9 @@ def update( - _disk_volume_create(conn, all_disks[idx]) - - if not test: -- conn.defineXML( -- salt.utils.stringutils.to_str(ElementTree.tostring(desc)) -- ) -+ xml_desc = ElementTree.tostring(desc) -+ log.debug("Update virtual machine definition: %s", xml_desc) -+ conn.defineXML(salt.utils.stringutils.to_str(xml_desc)) - status["definition"] = True - except libvirt.libvirtError as err: - conn.close() -@@ -3218,24 +3275,19 @@ def get_profiles(hypervisor=None, **kwargs): - for x in y - } - ) -- default_hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0] -+ if len(hypervisors) == 0: -+ raise SaltInvocationError("No supported hypervisors were found") - - if not hypervisor: -- hypervisor = default_hypervisor -+ hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0] - virtconf = __salt__["config.get"]("virt", {}) - for typ in ["disk", "nic"]: - _func = getattr(sys.modules[__name__], "_{}_profile".format(typ)) -- ret[typ] = { -- "default": _func( -- "default", hypervisor if hypervisor else default_hypervisor -- ) -- } -+ ret[typ] = {"default": _func("default", hypervisor)} - if typ in virtconf: - ret.setdefault(typ, {}) - for prf in virtconf[typ]: -- ret[typ][prf] = _func( -- prf, hypervisor if hypervisor else default_hypervisor -- ) -+ ret[typ][prf] = _func(prf, hypervisor) - return ret - - -@@ -4043,7 +4095,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs): - directories.add(os.path.dirname(disks[disk]["file"])) - else: - # We may have a volume to delete here -- matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"],) -+ matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"]) - if matcher: - pool_name = matcher.group("pool") - pool = None -@@ -5431,7 +5483,7 @@ def list_networks(**kwargs): - - def network_info(name=None, **kwargs): - """ -- Return informations on a virtual network provided its name. -+ Return information on a virtual network provided its name. - - :param name: virtual network name - :param connection: libvirt connection URI, overriding defaults -@@ -6028,15 +6080,19 @@ def _pool_set_secret( - if secret_type: - # Get the previously defined secret if any - secret = None -- if usage: -- usage_type = ( -- libvirt.VIR_SECRET_USAGE_TYPE_CEPH -- if secret_type == "ceph" -- else libvirt.VIR_SECRET_USAGE_TYPE_ISCSI -- ) -- secret = conn.secretLookupByUsage(usage_type, usage) -- elif uuid: -- secret = conn.secretLookupByUUIDString(uuid) -+ try: -+ if usage: -+ usage_type = ( -+ libvirt.VIR_SECRET_USAGE_TYPE_CEPH -+ if secret_type == "ceph" -+ else libvirt.VIR_SECRET_USAGE_TYPE_ISCSI -+ ) -+ secret = conn.secretLookupByUsage(usage_type, usage) -+ elif uuid: -+ secret = conn.secretLookupByUUIDString(uuid) -+ except libvirt.libvirtError as err: -+ # For some reason the secret has been removed. Don't fail since we'll recreate it -+ log.info("Secret not found: %s", err.get_error_message()) - - # Create secret if needed - if not secret: -@@ -6288,7 +6344,7 @@ def list_pools(**kwargs): - - def pool_info(name=None, **kwargs): - """ -- Return informations on a storage pool provided its name. -+ Return information on a storage pool provided its name. - - :param name: libvirt storage pool name - :param connection: libvirt connection URI, overriding defaults -@@ -6505,22 +6561,6 @@ def pool_delete(name, **kwargs): - conn = __get_conn(**kwargs) - try: - pool = conn.storagePoolLookupByName(name) -- desc = ElementTree.fromstring(pool.XMLDesc()) -- -- # Is there a secret that we generated and would need to be removed? -- # Don't remove the other secrets -- auth_node = desc.find("source/auth") -- if auth_node is not None: -- auth_types = { -- "ceph": libvirt.VIR_SECRET_USAGE_TYPE_CEPH, -- "iscsi": libvirt.VIR_SECRET_USAGE_TYPE_ISCSI, -- } -- secret_type = auth_types[auth_node.get("type")] -- secret_usage = auth_node.find("secret").get("usage") -- if secret_type and "pool_{}".format(name) == secret_usage: -- secret = conn.secretLookupByUsage(secret_type, secret_usage) -- secret.undefine() -- - return not bool(pool.delete(libvirt.VIR_STORAGE_POOL_DELETE_NORMAL)) - finally: - conn.close() -diff --git a/salt/states/virt.py b/salt/states/virt.py -index cb15d57d8f..b45cf72ed3 100644 ---- a/salt/states/virt.py -+++ b/salt/states/virt.py -@@ -33,6 +33,8 @@ except ImportError: - - __virtualname__ = "virt" - -+log = logging.getLogger(__name__) -+ - - def __virtual__(): - """ -@@ -285,6 +287,7 @@ def defined( - arch=None, - boot=None, - update=True, -+ boot_dev=None, - ): - """ - Starts an existing guest, or defines and starts a new VM with specified arguments. -@@ -345,6 +348,14 @@ def defined( - - .. deprecated:: sodium - -+ :param boot_dev: -+ Space separated list of devices to boot from sorted by decreasing priority. -+ Values can be ``hd``, ``fd``, ``cdrom`` or ``network``. -+ -+ By default, the value will ``"hd"``. -+ -+ .. versionadded:: Magnesium -+ - .. rubric:: Example States - - Make sure a virtual machine called ``domain_name`` is defined: -@@ -355,6 +366,7 @@ def defined( - virt.defined: - - cpu: 2 - - mem: 2048 -+ - boot_dev: network hd - - disk_profile: prod - - disks: - - name: system -@@ -407,6 +419,7 @@ def defined( - password=password, - boot=boot, - test=__opts__["test"], -+ boot_dev=boot_dev, - ) - ret["changes"][name] = status - if not status.get("definition"): -@@ -441,6 +454,7 @@ def defined( - password=password, - boot=boot, - start=False, -+ boot_dev=boot_dev, - ) - ret["changes"][name] = {"definition": True} - ret["comment"] = "Domain {} defined".format(name) -@@ -473,6 +487,7 @@ def running( - os_type=None, - arch=None, - boot=None, -+ boot_dev=None, - ): - """ - Starts an existing guest, or defines and starts a new VM with specified arguments. -@@ -584,6 +599,14 @@ def running( - - .. versionadded:: 3000 - -+ :param boot_dev: -+ Space separated list of devices to boot from sorted by decreasing priority. -+ Values can be ``hd``, ``fd``, ``cdrom`` or ``network``. -+ -+ By default, the value will ``"hd"``. -+ -+ .. versionadded:: Magnesium -+ - .. rubric:: Example States - - Make sure an already-defined virtual machine called ``domain_name`` is running: -@@ -651,6 +674,7 @@ def running( - arch=arch, - boot=boot, - update=update, -+ boot_dev=boot_dev, - connection=connection, - username=username, - password=password, -@@ -1218,14 +1242,24 @@ def pool_defined( - - action = "" - if info[name]["state"] != "running": -- if not __opts__["test"]: -- __salt__["virt.pool_build"]( -- name, -- connection=connection, -- username=username, -- password=password, -- ) -- action = ", built" -+ if ptype in BUILDABLE_POOL_TYPES: -+ if not __opts__["test"]: -+ # Storage pools build like disk or logical will fail if the disk or LV group -+ # was already existing. Since we can't easily figure that out, just log the -+ # possible libvirt error. -+ try: -+ __salt__["virt.pool_build"]( -+ name, -+ connection=connection, -+ username=username, -+ password=password, -+ ) -+ except libvirt.libvirtError as err: -+ log.warning( -+ "Failed to build libvirt storage pool: %s", -+ err.get_error_message(), -+ ) -+ action = ", built" - - action = ( - "{}, autostart flag changed".format(action) -@@ -1261,9 +1295,22 @@ def pool_defined( - password=password, - ) - -- __salt__["virt.pool_build"]( -- name, connection=connection, username=username, password=password -- ) -+ if ptype in BUILDABLE_POOL_TYPES: -+ # Storage pools build like disk or logical will fail if the disk or LV group -+ # was already existing. Since we can't easily figure that out, just log the -+ # possible libvirt error. -+ try: -+ __salt__["virt.pool_build"]( -+ name, -+ connection=connection, -+ username=username, -+ password=password, -+ ) -+ except libvirt.libvirtError as err: -+ log.warning( -+ "Failed to build libvirt storage pool: %s", -+ err.get_error_message(), -+ ) - if needs_autostart: - ret["changes"][name] = "Pool defined, marked for autostart" - ret["comment"] = "Pool {} defined, marked for autostart".format(name) -@@ -1370,7 +1417,7 @@ def pool_running( - is_running = info.get(name, {}).get("state", "stopped") == "running" - if is_running: - if updated: -- action = "built, restarted" -+ action = "restarted" - if not __opts__["test"]: - __salt__["virt.pool_stop"]( - name, -@@ -1378,13 +1425,16 @@ def pool_running( - username=username, - password=password, - ) -- if not __opts__["test"]: -- __salt__["virt.pool_build"]( -- name, -- connection=connection, -- username=username, -- password=password, -- ) -+ # if the disk or LV group is already existing build will fail (issue #56454) -+ if ptype in BUILDABLE_POOL_TYPES - {"disk", "logical"}: -+ if not __opts__["test"]: -+ __salt__["virt.pool_build"]( -+ name, -+ connection=connection, -+ username=username, -+ password=password, -+ ) -+ action = "built, {}".format(action) - else: - action = "already running" - result = True -diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja -index 439ed83f7f..2a2f5e4141 100644 ---- a/salt/templates/virt/libvirt_domain.jinja -+++ b/salt/templates/virt/libvirt_domain.jinja -@@ -2,32 +2,9 @@ - - {{ name }} - {{ cpu }} -- {%- if mem.max %} -- {{ mem.max }} -- {%- endif %} -- {%- if mem.boot %} -- {{ mem.boot }} -- {%- endif %} -- {%- if mem.current %} -- {{ mem.current }} -- {%- endif %} -- {%- if mem %} -- -- {%- if 'hard_limit' in mem and mem.hard_limit %} -- {{ mem.hard_limit }} -- {%- endif %} -- {%- if 'soft_limit' in mem and mem.soft_limit %} -- {{ mem.soft_limit }} -- {%- endif %} -- {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %} -- {{ mem.swap_hard_limit }} -- {%- endif %} -- {%- if 'min_guarantee' in mem and mem.min_guarantee %} -- {{ mem.min_guarantee }} -- {%- endif %} -- -- {%- endif %} -- -+ {{ mem }} -+ {{ mem }} -+ - {{ os_type }} - {% if boot %} - {% if 'kernel' in boot %} -diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py -index e5c8ad4eec..b9f047820b 100644 ---- a/salt/utils/xmlutil.py -+++ b/salt/utils/xmlutil.py -@@ -25,7 +25,7 @@ def _to_dict(xmltree): - """ - Converts an XML ElementTree to a dictionary that only contains items. - This is the default behavior in version 2017.7. This will default to prevent -- unexpected parsing issues on modules dependent on this. -+ unexpected parsing issues on modules dependant on this. - """ - # If this object has no children, the for..loop below will return nothing - # for it, so just return a single dict representing it. -@@ -298,7 +298,7 @@ def change_xml(doc, data, mapping): - if convert_fn: - new_value = convert_fn(new_value) - -- if str(current_value) != str(new_value): -+ if current_value != new_value: - set_fn(node, new_value) - need_update = True - else: -diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index f53b4a85c1..4775fec31f 100644 ---- a/tests/unit/modules/test_virt.py -+++ b/tests/unit/modules/test_virt.py -@@ -1843,17 +1843,36 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/", - } - -- boot_uefi = { -- "loader": "/usr/share/OVMF/OVMF_CODE.fd", -- "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd", -- } -+ # Update boot devices case -+ define_mock.reset_mock() -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot_dev="cdrom network hd"), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ ["cdrom", "network", "hd"], -+ [node.get("dev") for node in setxml.findall("os/boot")], -+ ) - -- invalid_boot = { -- "loader": "/usr/share/OVMF/OVMF_CODE.fd", -- "initrd": "/root/f8-i386-initrd", -- } -+ # Update unchanged boot devices case -+ define_mock.reset_mock() -+ self.assertEqual( -+ { -+ "definition": False, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot_dev="hd"), -+ ) -+ define_mock.assert_not_called() - - # Update with boot parameter case -+ define_mock.reset_mock() - self.assertEqual( - { - "definition": True, -@@ -1877,6 +1896,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "console=ttyS0 ks=http://example.com/f8-i386/os/", - ) - -+ boot_uefi = { -+ "loader": "/usr/share/OVMF/OVMF_CODE.fd", -+ "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd", -+ } -+ - self.assertEqual( - { - "definition": True, -@@ -1896,9 +1920,28 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "/usr/share/OVMF/OVMF_VARS.ms.fd", - ) - -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot={"efi": True}), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("os").attrib.get("firmware"), "efi") -+ -+ invalid_boot = { -+ "loader": "/usr/share/OVMF/OVMF_CODE.fd", -+ "initrd": "/root/f8-i386-initrd", -+ } -+ - with self.assertRaises(SaltInvocationError): - virt.update("my_vm", boot=invalid_boot) - -+ with self.assertRaises(SaltInvocationError): -+ virt.update("my_vm", boot={"efi": "Not a boolean value"}) -+ - # Update memory case - setmem_mock = MagicMock(return_value=0) - domain_mock.setMemoryFlags = setmem_mock -@@ -2390,6 +2433,43 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - ], - ) - -+ def test_update_xen_boot_params(self): -+ """ -+ Test virt.update() a Xen definition no boot parameter. -+ """ -+ root_dir = os.path.join(salt.syspaths.ROOT_DIR, "srv", "salt-images") -+ xml_boot = """ -+ -+ vm -+ 1048576 -+ 1048576 -+ 1 -+ -+ hvm -+ /usr/lib/xen/boot/hvmloader -+ -+ -+ """ -+ domain_mock_boot = self.set_mock_vm("vm", xml_boot) -+ domain_mock_boot.OSType = MagicMock(return_value="hvm") -+ define_mock_boot = MagicMock(return_value=True) -+ define_mock_boot.setVcpusFlags = MagicMock(return_value=0) -+ self.mock_conn.defineXML = define_mock_boot -+ self.assertEqual( -+ { -+ "cpu": False, -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("vm", cpu=2), -+ ) -+ setxml = ET.fromstring(define_mock_boot.call_args[0][0]) -+ self.assertEqual(setxml.find("os").find("loader").attrib.get("type"), "rom") -+ self.assertEqual( -+ setxml.find("os").find("loader").text, "/usr/lib/xen/boot/hvmloader" -+ ) -+ - def test_update_existing_boot_params(self): - """ - Test virt.update() with existing boot parameters. -@@ -2530,6 +2610,18 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertEqual(setxml.find("os").find("initrd"), None) - self.assertEqual(setxml.find("os").find("cmdline"), None) - -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("vm_with_boot_param", boot={"efi": False}), -+ ) -+ setxml = ET.fromstring(define_mock_boot.call_args[0][0]) -+ self.assertEqual(setxml.find("os").find("nvram"), None) -+ self.assertEqual(setxml.find("os").find("loader"), None) -+ - self.assertEqual( - { - "definition": True, -@@ -4248,7 +4340,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - """ - mock_pool = MagicMock() - mock_pool.delete = MagicMock(return_value=0) -- mock_pool.XMLDesc.return_value = "" - self.mock_conn.storagePoolLookupByName = MagicMock(return_value=mock_pool) - - res = virt.pool_delete("test-pool") -@@ -4262,12 +4353,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL - ) - -- def test_pool_delete_secret(self): -+ def test_pool_undefine_secret(self): - """ -- Test virt.pool_delete function where the pool has a secret -+ Test virt.pool_undefine function where the pool has a secret - """ - mock_pool = MagicMock() -- mock_pool.delete = MagicMock(return_value=0) -+ mock_pool.undefine = MagicMock(return_value=0) - mock_pool.XMLDesc.return_value = """ - - test-ses -@@ -4284,16 +4375,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - mock_undefine = MagicMock(return_value=0) - self.mock_conn.secretLookupByUsage.return_value.undefine = mock_undefine - -- res = virt.pool_delete("test-ses") -+ res = virt.pool_undefine("test-ses") - self.assertTrue(res) - - self.mock_conn.storagePoolLookupByName.assert_called_once_with("test-ses") -- -- # Shouldn't be called with another parameter so far since those are not implemented -- # and thus throwing exceptions. -- mock_pool.delete.assert_called_once_with( -- self.mock_libvirt.VIR_STORAGE_POOL_DELETE_NORMAL -- ) -+ mock_pool.undefine.assert_called_once_with() - - self.mock_conn.secretLookupByUsage.assert_called_once_with( - self.mock_libvirt.VIR_SECRET_USAGE_TYPE_CEPH, "pool_test-ses" -@@ -4562,24 +4648,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - - """ - -- expected_xml = ( -- '' -- "default" -- "20fbe05c-ab40-418a-9afa-136d512f0ede" -- '1999421108224' -- '713207042048' -- '1286214066176' -- "" -- '' -- '' -- '' -- '' -- "" -- "iscsi-images" -- "" -- "" -- ) -- - mock_secret = MagicMock() - self.mock_conn.secretLookupByUUIDString = MagicMock(return_value=mock_secret) - -@@ -4600,6 +4668,23 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.mock_conn.storagePoolDefineXML.assert_not_called() - mock_secret.setValue.assert_called_once_with(b"secret") - -+ # Case where the secret can't be found -+ self.mock_conn.secretLookupByUUIDString = MagicMock( -+ side_effect=self.mock_libvirt.libvirtError("secret not found") -+ ) -+ self.assertFalse( -+ virt.pool_update( -+ "default", -+ "rbd", -+ source_name="iscsi-images", -+ source_hosts=["ses4.tf.local", "ses5.tf.local"], -+ source_auth={"username": "libvirt", "password": "c2VjcmV0"}, -+ ) -+ ) -+ self.mock_conn.storagePoolDefineXML.assert_not_called() -+ self.mock_conn.secretDefineXML.assert_called_once() -+ mock_secret.setValue.assert_called_once_with(b"secret") -+ - def test_pool_update_password_create(self): - """ - Test the pool_update function, where the password only is changed -diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py -index 6d38829870..8fe892f607 100644 ---- a/tests/unit/states/test_virt.py -+++ b/tests/unit/states/test_virt.py -@@ -333,6 +333,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - "myvm", - cpu=2, - mem=2048, -+ boot_dev="cdrom hd", - os_type="linux", - arch="i686", - vm_type="qemu", -@@ -355,6 +356,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - "myvm", - cpu=2, - mem=2048, -+ boot_dev="cdrom hd", - os_type="linux", - arch="i686", - disk="prod", -@@ -463,10 +465,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - "comment": "Domain myvm updated with live update(s) failures", - } - ) -- self.assertDictEqual(virt.defined("myvm", cpu=2), ret) -+ self.assertDictEqual( -+ virt.defined("myvm", cpu=2, boot_dev="cdrom hd"), ret -+ ) - update_mock.assert_called_with( - "myvm", - cpu=2, -+ boot_dev="cdrom hd", - mem=None, - disk_profile=None, - disks=None, -@@ -590,6 +595,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - password=None, - boot=None, - test=True, -+ boot_dev=None, - ) - - # No changes case -@@ -624,6 +630,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - password=None, - boot=None, - test=True, -+ boot_dev=None, - ) - - def test_running(self): -@@ -700,6 +707,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - install=True, - pub_key=None, - priv_key=None, -+ boot_dev=None, - connection=None, - username=None, - password=None, -@@ -761,6 +769,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - install=False, - pub_key="/path/to/key.pub", - priv_key="/path/to/key", -+ boot_dev="network hd", - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -785,6 +794,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - start=False, - pub_key="/path/to/key.pub", - priv_key="/path/to/key", -+ boot_dev="network hd", - connection="someconnection", - username="libvirtuser", - password="supersecret", -@@ -929,6 +939,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - password=None, - boot=None, - test=False, -+ boot_dev=None, - ) - - # Failed definition update case -@@ -1047,6 +1058,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - password=None, - boot=None, - test=True, -+ boot_dev=None, - ) - start_mock.assert_not_called() - -@@ -1083,6 +1095,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - password=None, - boot=None, - test=True, -+ boot_dev=None, - ) - - def test_stopped(self): -@@ -1970,6 +1983,72 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - password="secret", - ) - -+ # Define a pool that doesn't handle build -+ for mock in mocks: -+ mocks[mock].reset_mock() -+ with patch.dict( -+ virt.__salt__, -+ { # pylint: disable=no-member -+ "virt.pool_info": MagicMock( -+ side_effect=[ -+ {}, -+ {"mypool": {"state": "stopped", "autostart": True}}, -+ ] -+ ), -+ "virt.pool_define": mocks["define"], -+ "virt.pool_build": mocks["build"], -+ "virt.pool_set_autostart": mocks["autostart"], -+ }, -+ ): -+ ret.update( -+ { -+ "changes": {"mypool": "Pool defined, marked for autostart"}, -+ "comment": "Pool mypool defined, marked for autostart", -+ } -+ ) -+ self.assertDictEqual( -+ virt.pool_defined( -+ "mypool", -+ ptype="rbd", -+ source={ -+ "name": "libvirt-pool", -+ "hosts": ["ses2.tf.local", "ses3.tf.local"], -+ "auth": { -+ "username": "libvirt", -+ "password": "AQAz+PRdtquBBRAASMv7nlMZYfxIyLw3St65Xw==", -+ }, -+ }, -+ autostart=True, -+ ), -+ ret, -+ ) -+ mocks["define"].assert_called_with( -+ "mypool", -+ ptype="rbd", -+ target=None, -+ permissions=None, -+ source_devices=None, -+ source_dir=None, -+ source_adapter=None, -+ source_hosts=["ses2.tf.local", "ses3.tf.local"], -+ source_auth={ -+ "username": "libvirt", -+ "password": "AQAz+PRdtquBBRAASMv7nlMZYfxIyLw3St65Xw==", -+ }, -+ source_name="libvirt-pool", -+ source_format=None, -+ source_initiator=None, -+ start=False, -+ transient=False, -+ connection=None, -+ username=None, -+ password=None, -+ ) -+ mocks["autostart"].assert_called_with( -+ "mypool", state="on", connection=None, username=None, password=None, -+ ) -+ mocks["build"].assert_not_called() -+ - mocks["update"] = MagicMock(return_value=False) - for mock in mocks: - mocks[mock].reset_mock() -@@ -2019,6 +2098,9 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - for mock in mocks: - mocks[mock].reset_mock() - mocks["update"] = MagicMock(return_value=True) -+ mocks["build"] = MagicMock( -+ side_effect=self.mock_libvirt.libvirtError("Existing VG") -+ ) - with patch.dict( - virt.__salt__, - { # pylint: disable=no-member -@@ -2122,6 +2204,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - ), - ret, - ) -+ mocks["build"].assert_not_called() - mocks["update"].assert_called_with( - "mypool", - ptype="logical", -@@ -2469,8 +2552,8 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - ): - ret.update( - { -- "changes": {"mypool": "Pool updated, built, restarted"}, -- "comment": "Pool mypool updated, built, restarted", -+ "changes": {"mypool": "Pool updated, restarted"}, -+ "comment": "Pool mypool updated, restarted", - "result": True, - } - ) -@@ -2496,9 +2579,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): - mocks["start"].assert_called_with( - "mypool", connection=None, username=None, password=None - ) -- mocks["build"].assert_called_with( -- "mypool", connection=None, username=None, password=None -- ) -+ mocks["build"].assert_not_called() - mocks["update"].assert_called_with( - "mypool", - ptype="logical", -diff --git a/tests/unit/utils/test_data.py b/tests/unit/utils/test_data.py -index 9206979284..aff7384232 100644 ---- a/tests/unit/utils/test_data.py -+++ b/tests/unit/utils/test_data.py -@@ -220,38 +220,6 @@ class DataTestCase(TestCase): - ), - ) - -- # Traverse and match integer key in a nested dict -- # https://github.com/saltstack/salt/issues/56444 -- self.assertEqual( -- "it worked", -- salt.utils.data.traverse_dict_and_list( -- {"foo": {1234: "it worked"}}, "foo:1234", "it didn't work", -- ), -- ) -- # Make sure that we properly return the default value when the initial -- # attempt fails and YAML-loading the target key doesn't change its -- # value. -- self.assertEqual( -- "default", -- salt.utils.data.traverse_dict_and_list( -- {"foo": {"baz": "didn't work"}}, "foo:bar", "default", -- ), -- ) -- -- def test_issue_39709(self): -- test_two_level_dict_and_list = { -- "foo": ["bar", "baz", {"lorem": {"ipsum": [{"dolor": "sit"}]}}] -- } -- -- self.assertEqual( -- "sit", -- salt.utils.data.traverse_dict_and_list( -- test_two_level_dict_and_list, -- ["foo", "lorem", "ipsum", "dolor"], -- {"not_found": "not_found"}, -- ), -- ) -- - def test_compare_dicts(self): - ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "bar"}) - self.assertEqual(ret, {}) --- -2.29.2 - - diff --git a/batch-async-catch-exceptions-and-safety-unregister-a.patch b/batch-async-catch-exceptions-and-safety-unregister-a.patch deleted file mode 100644 index 8b84c50..0000000 --- a/batch-async-catch-exceptions-and-safety-unregister-a.patch +++ /dev/null @@ -1,238 +0,0 @@ -From 1606379714f4776e2b529fb1d45891266985c896 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 28 Feb 2020 15:11:53 +0000 -Subject: [PATCH] Batch Async: Catch exceptions and safety unregister - and close instances - ---- - salt/cli/batch_async.py | 160 ++++++++++++++++++++++++---------------- - 1 file changed, 96 insertions(+), 64 deletions(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 1e2ac5b0d3..3dc04826d1 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -107,22 +107,25 @@ class BatchAsync: - def __event_handler(self, raw): - if not self.event: - return -- mtag, data = self.event.unpack(raw, self.event.serial) -- for (pattern, op) in self.patterns: -- if mtag.startswith(pattern[:-1]): -- minion = data["id"] -- if op == "ping_return": -- self.minions.add(minion) -- if self.targeted_minions == self.minions: -- self.event.io_loop.spawn_callback(self.start_batch) -- elif op == "find_job_return": -- if data.get("return", None): -- self.find_job_returned.add(minion) -- elif op == "batch_run": -- if minion in self.active: -- self.active.remove(minion) -- self.done_minions.add(minion) -- self.event.io_loop.spawn_callback(self.schedule_next) -+ try: -+ mtag, data = self.event.unpack(raw, self.event.serial) -+ for (pattern, op) in self.patterns: -+ if mtag.startswith(pattern[:-1]): -+ minion = data["id"] -+ if op == "ping_return": -+ self.minions.add(minion) -+ if self.targeted_minions == self.minions: -+ self.event.io_loop.spawn_callback(self.start_batch) -+ elif op == "find_job_return": -+ if data.get("return", None): -+ self.find_job_returned.add(minion) -+ elif op == "batch_run": -+ if minion in self.active: -+ self.active.remove(minion) -+ self.done_minions.add(minion) -+ self.event.io_loop.spawn_callback(self.schedule_next) -+ except Exception as ex: -+ log.error("Exception occured while processing event: {}".format(ex)) - - def _get_next(self): - to_run = ( -@@ -154,53 +157,67 @@ class BatchAsync: - if timedout_minions: - self.schedule_next() - -- if running: -+ if self.event and running: - self.find_job_returned = self.find_job_returned.difference(running) - self.event.io_loop.spawn_callback(self.find_job, running) - - @tornado.gen.coroutine - def find_job(self, minions): -- not_done = minions.difference(self.done_minions).difference( -- self.timedout_minions -- ) -- -- if not_done: -- jid = self.jid_gen() -- find_job_return_pattern = "salt/job/{}/ret/*".format(jid) -- self.patterns.add((find_job_return_pattern, "find_job_return")) -- self.event.subscribe(find_job_return_pattern, match_type="glob") -- -- ret = yield self.local.run_job_async( -- not_done, -- "saltutil.find_job", -- [self.batch_jid], -- "list", -- gather_job_timeout=self.opts["gather_job_timeout"], -- jid=jid, -- **self.eauth -+ if self.event: -+ not_done = minions.difference(self.done_minions).difference( -+ self.timedout_minions - ) -- yield tornado.gen.sleep(self.opts["gather_job_timeout"]) -- self.event.io_loop.spawn_callback(self.check_find_job, not_done, jid) -+ try: -+ if not_done: -+ jid = self.jid_gen() -+ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) -+ self.patterns.add((find_job_return_pattern, "find_job_return")) -+ self.event.subscribe(find_job_return_pattern, match_type="glob") -+ ret = yield self.local.run_job_async( -+ not_done, -+ "saltutil.find_job", -+ [self.batch_jid], -+ "list", -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ jid=jid, -+ **self.eauth -+ ) -+ yield tornado.gen.sleep(self.opts["gather_job_timeout"]) -+ if self.event: -+ self.event.io_loop.spawn_callback( -+ self.check_find_job, not_done, jid -+ ) -+ except Exception as ex: -+ log.error( -+ "Exception occured handling batch async: {}. Aborting execution.".format( -+ ex -+ ) -+ ) -+ self.close_safe() - - @tornado.gen.coroutine - def start(self): -- self.__set_event_handler() -- ping_return = yield self.local.run_job_async( -- self.opts["tgt"], -- "test.ping", -- [], -- self.opts.get("selected_target_option", self.opts.get("tgt_type", "glob")), -- gather_job_timeout=self.opts["gather_job_timeout"], -- jid=self.ping_jid, -- metadata=self.metadata, -- **self.eauth -- ) -- self.targeted_minions = set(ping_return["minions"]) -- # start batching even if not all minions respond to ping -- yield tornado.gen.sleep( -- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] -- ) -- self.event.io_loop.spawn_callback(self.start_batch) -+ if self.event: -+ self.__set_event_handler() -+ ping_return = yield self.local.run_job_async( -+ self.opts["tgt"], -+ "test.ping", -+ [], -+ self.opts.get( -+ "selected_target_option", self.opts.get("tgt_type", "glob") -+ ), -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ jid=self.ping_jid, -+ metadata=self.metadata, -+ **self.eauth -+ ) -+ self.targeted_minions = set(ping_return["minions"]) -+ # start batching even if not all minions respond to ping -+ yield tornado.gen.sleep( -+ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] -+ ) -+ if self.event: -+ self.event.io_loop.spawn_callback(self.start_batch) - - @tornado.gen.coroutine - def start_batch(self): -@@ -215,7 +232,8 @@ class BatchAsync: - ret = self.event.fire_event( - data, "salt/batch/{}/start".format(self.batch_jid) - ) -- self.event.io_loop.spawn_callback(self.run_next) -+ if self.event: -+ self.event.io_loop.spawn_callback(self.run_next) - - @tornado.gen.coroutine - def end_batch(self): -@@ -232,11 +250,21 @@ class BatchAsync: - "metadata": self.metadata, - } - self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) -- for (pattern, label) in self.patterns: -- if label in ["ping_return", "batch_run"]: -- self.event.unsubscribe(pattern, match_type="glob") -- del self -- gc.collect() -+ -+ # release to the IOLoop to allow the event to be published -+ # before closing batch async execution -+ yield tornado.gen.sleep(1) -+ self.close_safe() -+ -+ def close_safe(self): -+ for (pattern, label) in self.patterns: -+ self.event.unsubscribe(pattern, match_type="glob") -+ self.event.remove_event_handler(self.__event_handler) -+ self.event = None -+ self.local = None -+ self.ioloop = None -+ del self -+ gc.collect() - - @tornado.gen.coroutine - def schedule_next(self): -@@ -244,7 +272,8 @@ class BatchAsync: - self.scheduled = True - # call later so that we maybe gather more returns - yield tornado.gen.sleep(self.batch_delay) -- self.event.io_loop.spawn_callback(self.run_next) -+ if self.event: -+ self.event.io_loop.spawn_callback(self.run_next) - - @tornado.gen.coroutine - def run_next(self): -@@ -266,17 +295,20 @@ class BatchAsync: - ) - - yield tornado.gen.sleep(self.opts["timeout"]) -- self.event.io_loop.spawn_callback(self.find_job, set(next_batch)) -+ -+ # The batch can be done already at this point, which means no self.event -+ if self.event: -+ self.event.io_loop.spawn_callback(self.find_job, set(next_batch)) - except Exception as ex: -- log.error("Error in scheduling next batch: %s", ex) -+ log.error("Error in scheduling next batch: %s. Aborting execution", ex) - self.active = self.active.difference(next_batch) -+ self.close_safe() - else: - yield self.end_batch() - gc.collect() - - def __del__(self): - self.local = None -- self.event.remove_event_handler(self.__event_handler) - self.event = None - self.ioloop = None - gc.collect() --- -2.29.2 - - diff --git a/batch_async-avoid-using-fnmatch-to-match-event-217.patch b/batch_async-avoid-using-fnmatch-to-match-event-217.patch deleted file mode 100644 index d97e8cf..0000000 --- a/batch_async-avoid-using-fnmatch-to-match-event-217.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 31fedcb3173f73fbffc3b053bc64c94a7b608118 Mon Sep 17 00:00:00 2001 -From: Silvio Moioli -Date: Mon, 2 Mar 2020 11:23:59 +0100 -Subject: [PATCH] batch_async: avoid using fnmatch to match event - (#217) - ---- - salt/cli/batch_async.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 8d2601e636..1e2ac5b0d3 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -109,7 +109,7 @@ class BatchAsync: - return - mtag, data = self.event.unpack(raw, self.event.serial) - for (pattern, op) in self.patterns: -- if fnmatch.fnmatch(mtag, pattern): -+ if mtag.startswith(pattern[:-1]): - minion = data["id"] - if op == "ping_return": - self.minions.add(minion) --- -2.29.2 - - diff --git a/better-handling-of-bad-public-keys-from-minions-bsc-.patch b/better-handling-of-bad-public-keys-from-minions-bsc-.patch index 62f7386..a32e8c9 100644 --- a/better-handling-of-bad-public-keys-from-minions-bsc-.patch +++ b/better-handling-of-bad-public-keys-from-minions-bsc-.patch @@ -1,4 +1,4 @@ -From cd64b9a063771829f85d6be0e42259825cfb10c8 Mon Sep 17 00:00:00 2001 +From f0025c6d00f174db587726bb15b78713cbbcf996 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 2 Aug 2021 13:50:37 -0700 Subject: [PATCH] Better handling of bad public keys from minions @@ -31,7 +31,7 @@ index 0000000000..0cd55b19a6 @@ -0,0 +1 @@ +Better handling of bad RSA public keys from minions diff --git a/salt/crypt.py b/salt/crypt.py -index 0a8b728f50..e6e4f3181e 100644 +index f3da78f9ba..789c562e25 100644 --- a/salt/crypt.py +++ b/salt/crypt.py @@ -36,6 +36,7 @@ import salt.utils.verify @@ -42,7 +42,7 @@ index 0a8b728f50..e6e4f3181e 100644 MasterExit, SaltClientError, SaltReqTimeoutError, -@@ -217,10 +218,16 @@ def get_rsa_pub_key(path): +@@ -220,10 +221,16 @@ def get_rsa_pub_key(path): with salt.utils.files.fopen(path, "rb") as f: data = f.read().replace(b"RSA ", b"") bio = BIO.MemoryBuffer(data) @@ -79,18 +79,18 @@ index 033a19cc54..1da15f9e69 100644 """ Used in modules or grains when a required binary is not available diff --git a/salt/key.py b/salt/key.py -index 75777ede06..59090c979c 100644 +index 16d20b1303..3b931152cc 100644 --- a/salt/key.py +++ b/salt/key.py -@@ -11,6 +11,7 @@ import fnmatch +@@ -9,6 +9,7 @@ import itertools import logging import os import shutil +import sys - # Import salt libs import salt.cache -@@ -652,17 +653,27 @@ class Key(object): + import salt.client +@@ -643,17 +644,27 @@ class Key: keydirs.append(self.REJ) if include_denied: keydirs.append(self.DEN) @@ -112,7 +112,7 @@ index 75777ede06..59090c979c 100644 ) eload = {"result": True, "act": "accept", "id": key} self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) - except (IOError, OSError): + except OSError: pass + for keydir, key in invalid_keys: + matches[keydir].remove(key) @@ -121,10 +121,10 @@ index 75777ede06..59090c979c 100644 def accept_all(self): diff --git a/salt/transport/mixins/auth.py b/salt/transport/mixins/auth.py -index 003cbd8275..0f0c615408 100644 +index 102af568f3..29b38d3027 100644 --- a/salt/transport/mixins/auth.py +++ b/salt/transport/mixins/auth.py -@@ -184,11 +184,11 @@ class AESReqServerMixin(object): +@@ -174,11 +174,11 @@ class AESReqServerMixin: tagged "auth" and returns a dict with information about the auth event @@ -141,7 +141,7 @@ index 003cbd8275..0f0c615408 100644 """ if not salt.utils.verify.valid_id(self.opts, load["id"]): -@@ -460,7 +460,7 @@ class AESReqServerMixin(object): +@@ -450,7 +450,7 @@ class AESReqServerMixin: # and an empty request comes in try: pub = salt.crypt.get_rsa_pub_key(pubfn) @@ -151,10 +151,10 @@ index 003cbd8275..0f0c615408 100644 return {"enc": "clear", "load": {"ret": False}} diff --git a/tests/pytests/integration/cli/test_salt_key.py b/tests/pytests/integration/cli/test_salt_key.py -index 0edb2cf86c..2583348ce6 100644 +index 3ec87fe580..8f29929747 100644 --- a/tests/pytests/integration/cli/test_salt_key.py +++ b/tests/pytests/integration/cli/test_salt_key.py -@@ -328,3 +328,31 @@ def test_keys_generation_keysize_max(salt_key_cli): +@@ -316,3 +316,31 @@ def test_keys_generation_keysize_max(salt_key_cli): ) assert ret.exitcode != 0 assert "error: The maximum value for keysize is 32768" in ret.stderr @@ -213,6 +213,6 @@ index 0000000000..aa8f439b8c + with pytest.raises(salt.crypt.InvalidKeyError): + salt.crypt.get_rsa_pub_key(key_path) -- -2.32.0 +2.33.0 diff --git a/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch b/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch deleted file mode 100644 index eff0ef5..0000000 --- a/calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch +++ /dev/null @@ -1,107 +0,0 @@ -From d9362f10681a2dfdb057939eee1ffae3a35d4a8d Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 12 Apr 2019 16:47:03 +0100 -Subject: [PATCH] Calculate FQDNs in parallel to avoid blockings - (bsc#1129079) - -Fix pylint issue ---- - salt/grains/core.py | 55 +++++++++++++++++++++++++++++++++------------ - 1 file changed, 41 insertions(+), 14 deletions(-) - -diff --git a/salt/grains/core.py b/salt/grains/core.py -index 006878f806..883e3ebe09 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -20,8 +20,10 @@ import socket - import sys - import time - import uuid -+import warnings - import zlib - from errno import EACCES, EPERM -+from multiprocessing.dummy import Pool as ThreadPool - - import distro - import salt.exceptions -@@ -44,6 +46,14 @@ import salt.utils.versions - from salt.ext.six.moves import range - from salt.utils.network import _get_interfaces - -+# pylint: disable=import-error -+try: -+ import dateutil.tz -+ -+ _DATEUTIL_TZ = True -+except ImportError: -+ _DATEUTIL_TZ = False -+ - - # rewrite distro.linux_distribution to allow best=True kwarg in version(), needed to get the minor version numbers in CentOS - def _linux_distribution(): -@@ -2402,22 +2412,12 @@ def fqdns(): - grains = {} - fqdns = set() - -- addresses = salt.utils.network.ip_addrs( -- include_loopback=False, interface_data=_get_interfaces() -- ) -- addresses.extend( -- salt.utils.network.ip_addrs6( -- include_loopback=False, interface_data=_get_interfaces() -- ) -- ) -- err_message = "Exception during resolving address: %s" -- for ip in addresses: -+ def _lookup_fqdn(ip): - try: - name, aliaslist, addresslist = socket.gethostbyaddr(ip) -- fqdns.update( -- [socket.getfqdn(name)] -- + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] -- ) -+ return [socket.getfqdn(name)] + [ -+ als for als in aliaslist if salt.utils.network.is_fqdn(als) -+ ] - except socket.herror as err: - if err.errno in (0, HOST_NOT_FOUND, NO_DATA): - # No FQDN for this IP address, so we don't need to know this all the time. -@@ -2427,6 +2427,33 @@ def fqdns(): - except (OSError, socket.gaierror, socket.timeout) as err: - log.error(err_message, ip, err) - -+ start = time.time() -+ -+ addresses = salt.utils.network.ip_addrs( -+ include_loopback=False, interface_data=_get_interfaces() -+ ) -+ addresses.extend( -+ salt.utils.network.ip_addrs6( -+ include_loopback=False, interface_data=_get_interfaces() -+ ) -+ ) -+ err_message = "Exception during resolving address: %s" -+ -+ # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel. -+ # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing -+ # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. -+ pool = ThreadPool(8) -+ results = pool.map(_lookup_fqdn, addresses) -+ pool.close() -+ pool.join() -+ -+ for item in results: -+ if item: -+ fqdns.update(item) -+ -+ elapsed = time.time() - start -+ log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed)) -+ - return {"fqdns": sorted(list(fqdns))} - - --- -2.29.2 - - diff --git a/changed-imports-to-vendored-tornado.patch b/changed-imports-to-vendored-tornado.patch deleted file mode 100644 index 403546a..0000000 --- a/changed-imports-to-vendored-tornado.patch +++ /dev/null @@ -1,252 +0,0 @@ -From 5db9ccdb4f557cdbff670b18c45e55124e29c57c Mon Sep 17 00:00:00 2001 -From: Jochen Breuer -Date: Tue, 10 Mar 2020 14:02:17 +0100 -Subject: [PATCH] Changed imports to vendored Tornado - ---- - salt/cli/batch_async.py | 25 ++++++++++++----------- - salt/master.py | 2 +- - tests/unit/cli/test_batch_async.py | 32 +++++++++++++++--------------- - 3 files changed, 30 insertions(+), 29 deletions(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 3dc04826d1..09aa85258b 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -8,6 +8,7 @@ import gc - import logging - - import salt.client -+import salt.ext.tornado - import tornado - from salt.cli.batch import batch_get_eauth, batch_get_opts, get_bnum - -@@ -46,7 +47,7 @@ class BatchAsync: - """ - - def __init__(self, parent_opts, jid_gen, clear_load): -- ioloop = tornado.ioloop.IOLoop.current() -+ ioloop = salt.ext.tornado.ioloop.IOLoop.current() - self.local = salt.client.get_local_client( - parent_opts["conf_file"], io_loop=ioloop - ) -@@ -161,7 +162,7 @@ class BatchAsync: - self.find_job_returned = self.find_job_returned.difference(running) - self.event.io_loop.spawn_callback(self.find_job, running) - -- @tornado.gen.coroutine -+ @salt.ext.tornado.gen.coroutine - def find_job(self, minions): - if self.event: - not_done = minions.difference(self.done_minions).difference( -@@ -182,7 +183,7 @@ class BatchAsync: - jid=jid, - **self.eauth - ) -- yield tornado.gen.sleep(self.opts["gather_job_timeout"]) -+ yield salt.ext.tornado.gen.sleep(self.opts["gather_job_timeout"]) - if self.event: - self.event.io_loop.spawn_callback( - self.check_find_job, not_done, jid -@@ -195,7 +196,7 @@ class BatchAsync: - ) - self.close_safe() - -- @tornado.gen.coroutine -+ @salt.ext.tornado.gen.coroutine - def start(self): - if self.event: - self.__set_event_handler() -@@ -213,13 +214,13 @@ class BatchAsync: - ) - self.targeted_minions = set(ping_return["minions"]) - # start batching even if not all minions respond to ping -- yield tornado.gen.sleep( -+ yield salt.ext.tornado.gen.sleep( - self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] - ) - if self.event: - self.event.io_loop.spawn_callback(self.start_batch) - -- @tornado.gen.coroutine -+ @salt.ext.tornado.gen.coroutine - def start_batch(self): - if not self.initialized: - self.batch_size = get_bnum(self.opts, self.minions, True) -@@ -235,7 +236,7 @@ class BatchAsync: - if self.event: - self.event.io_loop.spawn_callback(self.run_next) - -- @tornado.gen.coroutine -+ @salt.ext.tornado.gen.coroutine - def end_batch(self): - left = self.minions.symmetric_difference( - self.done_minions.union(self.timedout_minions) -@@ -253,7 +254,7 @@ class BatchAsync: - - # release to the IOLoop to allow the event to be published - # before closing batch async execution -- yield tornado.gen.sleep(1) -+ yield salt.ext.tornado.gen.sleep(1) - self.close_safe() - - def close_safe(self): -@@ -266,16 +267,16 @@ class BatchAsync: - del self - gc.collect() - -- @tornado.gen.coroutine -+ @salt.ext.tornado.gen.coroutine - def schedule_next(self): - if not self.scheduled: - self.scheduled = True - # call later so that we maybe gather more returns -- yield tornado.gen.sleep(self.batch_delay) -+ yield salt.ext.tornado.gen.sleep(self.batch_delay) - if self.event: - self.event.io_loop.spawn_callback(self.run_next) - -- @tornado.gen.coroutine -+ @salt.ext.tornado.gen.coroutine - def run_next(self): - self.scheduled = False - next_batch = self._get_next() -@@ -294,7 +295,7 @@ class BatchAsync: - metadata=self.metadata, - ) - -- yield tornado.gen.sleep(self.opts["timeout"]) -+ yield salt.ext.tornado.gen.sleep(self.opts["timeout"]) - - # The batch can be done already at this point, which means no self.event - if self.event: -diff --git a/salt/master.py b/salt/master.py -index 7a99af357a..ab85c7f5c6 100644 ---- a/salt/master.py -+++ b/salt/master.py -@@ -2237,7 +2237,7 @@ class ClearFuncs(TransportMethods): - functools.partial(self._prep_jid, clear_load, {}), - batch_load, - ) -- ioloop = tornado.ioloop.IOLoop.current() -+ ioloop = salt.ext.tornado.ioloop.IOLoop.current() - ioloop.add_callback(batch.start) - - return { -diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index dcee9a87bd..82a712b15b 100644 ---- a/tests/unit/cli/test_batch_async.py -+++ b/tests/unit/cli/test_batch_async.py -@@ -1,8 +1,8 @@ --import tornado -+import salt.ext.tornado - from salt.cli.batch_async import BatchAsync -+from salt.ext.tornado.testing import AsyncTestCase - from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch - from tests.support.unit import TestCase, skipIf --from tornado.testing import AsyncTestCase - - - @skipIf(NO_MOCK, NO_MOCK_REASON) -@@ -52,10 +52,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.batch.start_batch() - self.assertEqual(self.batch.batch_size, 2) - -- @tornado.testing.gen_test -+ @salt.ext.tornado.testing.gen_test - def test_batch_start_on_batch_presence_ping_timeout(self): - self.batch.event = MagicMock() -- future = tornado.gen.Future() -+ future = salt.ext.tornado.gen.Future() - future.set_result({"minions": ["foo", "bar"]}) - self.batch.local.run_job_async.return_value = future - ret = self.batch.start() -@@ -71,10 +71,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - # assert targeted_minions == all minions matched by tgt - self.assertEqual(self.batch.targeted_minions, {"foo", "bar"}) - -- @tornado.testing.gen_test -+ @salt.ext.tornado.testing.gen_test - def test_batch_start_on_gather_job_timeout(self): - self.batch.event = MagicMock() -- future = tornado.gen.Future() -+ future = salt.ext.tornado.gen.Future() - future.set_result({"minions": ["foo", "bar"]}) - self.batch.local.run_job_async.return_value = future - self.batch.batch_presence_ping_timeout = None -@@ -103,7 +103,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - ), - ) - -- @tornado.testing.gen_test -+ @salt.ext.tornado.testing.gen_test - def test_start_batch_calls_next(self): - self.batch.run_next = MagicMock(return_value=MagicMock()) - self.batch.event = MagicMock() -@@ -160,14 +160,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(len(event.unsubscribe.mock_calls), 2) - self.assertEqual(len(event.remove_event_handler.mock_calls), 1) - -- @tornado.testing.gen_test -+ @salt.ext.tornado.testing.gen_test - def test_batch_next(self): - self.batch.event = MagicMock() - self.batch.opts["fun"] = "my.fun" - self.batch.opts["arg"] = [] - self.batch._get_next = MagicMock(return_value={"foo", "bar"}) - self.batch.batch_size = 2 -- future = tornado.gen.Future() -+ future = salt.ext.tornado.gen.Future() - future.set_result({"minions": ["foo", "bar"]}) - self.batch.local.run_job_async.return_value = future - self.batch.run_next() -@@ -290,38 +290,38 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.batch._BatchAsync__event_handler(MagicMock()) - self.assertEqual(self.batch.find_job_returned, {"foo"}) - -- @tornado.testing.gen_test -+ @salt.ext.tornado.testing.gen_test - def test_batch_run_next_end_batch_when_no_next(self): - self.batch.end_batch = MagicMock() - self.batch._get_next = MagicMock(return_value={}) - self.batch.run_next() - self.assertEqual(len(self.batch.end_batch.mock_calls), 1) - -- @tornado.testing.gen_test -+ @salt.ext.tornado.testing.gen_test - def test_batch_find_job(self): - self.batch.event = MagicMock() -- future = tornado.gen.Future() -+ future = salt.ext.tornado.gen.Future() - future.set_result({}) - self.batch.local.run_job_async.return_value = future - self.batch.minions = {"foo", "bar"} - self.batch.jid_gen = MagicMock(return_value="1234") -- tornado.gen.sleep = MagicMock(return_value=future) -+ salt.ext.tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({"foo", "bar"}) - self.assertEqual( - self.batch.event.io_loop.spawn_callback.call_args[0], - (self.batch.check_find_job, {"foo", "bar"}, "1234"), - ) - -- @tornado.testing.gen_test -+ @salt.ext.tornado.testing.gen_test - def test_batch_find_job_with_done_minions(self): - self.batch.done_minions = {"bar"} - self.batch.event = MagicMock() -- future = tornado.gen.Future() -+ future = salt.ext.tornado.gen.Future() - future.set_result({}) - self.batch.local.run_job_async.return_value = future - self.batch.minions = {"foo", "bar"} - self.batch.jid_gen = MagicMock(return_value="1234") -- tornado.gen.sleep = MagicMock(return_value=future) -+ salt.ext.tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({"foo", "bar"}) - self.assertEqual( - self.batch.event.io_loop.spawn_callback.call_args[0], --- -2.29.2 - - diff --git a/debian-info_installed-compatibility-50453.patch b/debian-info_installed-compatibility-50453.patch index 7c45e35..925950e 100644 --- a/debian-info_installed-compatibility-50453.patch +++ b/debian-info_installed-compatibility-50453.patch @@ -1,4 +1,4 @@ -From 36f4465d22f8cdf05be20ba72756757f5725e509 Mon Sep 17 00:00:00 2001 +From 1ccca51897eb7c7cf1bace7015a4307aa0be7215 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 20 Nov 2018 16:06:31 +0100 Subject: [PATCH] Debian info_installed compatibility (#50453) @@ -48,18 +48,25 @@ Lintfix: too many empty lines Adjust architecture getter according to the lowpkg info Fix wrong Git merge: missing function signature + +Reintroducing reverted changes + +Reintroducing changes from commit e20362f6f053eaa4144583604e6aac3d62838419 +that got partially reverted by this commit: +https://github.com/openSUSE/salt/commit/d0ef24d113bdaaa29f180031b5da384cffe08c64#diff-820e6ce667fe3afddbc1b9cf1682fdef --- - salt/modules/aptpkg.py | 24 ++- - salt/modules/dpkg_lowpkg.py | 110 ++++++++++-- - tests/unit/modules/test_aptpkg.py | 235 ++++++++++++++++++------- - tests/unit/modules/test_dpkg_lowpkg.py | 189 +++++++++++--------- - 4 files changed, 396 insertions(+), 162 deletions(-) + salt/modules/aptpkg.py | 24 +- + salt/modules/dpkg_lowpkg.py | 108 ++- + tests/unit/modules/test_aptpkg.py | 981 +++++++++++++++++++++++++ + tests/unit/modules/test_dpkg_lowpkg.py | 189 +++-- + 4 files changed, 1203 insertions(+), 99 deletions(-) + create mode 100644 tests/unit/modules/test_aptpkg.py diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 70e173806a..bf90d0614f 100644 +index 86c85bb95c..06db908d3d 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -2902,6 +2902,15 @@ def info_installed(*names, **kwargs): +@@ -2920,6 +2920,15 @@ def info_installed(*names, **kwargs): .. versionadded:: 2016.11.3 @@ -72,10 +79,10 @@ index 70e173806a..bf90d0614f 100644 + + .. versionadded:: Neon + - CLI example: + CLI Example: .. code-block:: bash -@@ -2912,11 +2921,19 @@ def info_installed(*names, **kwargs): +@@ -2930,11 +2939,19 @@ def info_installed(*names, **kwargs): """ kwargs = salt.utils.args.clean_kwargs(**kwargs) failhard = kwargs.pop("failhard", True) @@ -96,7 +103,7 @@ index 70e173806a..bf90d0614f 100644 t_nfo = dict() if pkg_nfo.get("status", "ii")[1] != "i": continue # return only packages that are really installed -@@ -2937,7 +2954,10 @@ def info_installed(*names, **kwargs): +@@ -2955,7 +2972,10 @@ def info_installed(*names, **kwargs): else: t_nfo[key] = value @@ -109,24 +116,10 @@ index 70e173806a..bf90d0614f 100644 return ret diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py -index d569e04995..7447637774 100644 +index 6a88573a8f..afbd619490 100644 --- a/salt/modules/dpkg_lowpkg.py +++ b/salt/modules/dpkg_lowpkg.py -@@ -2,13 +2,11 @@ - Support for DEB packages - """ - --# Import python libs - import datetime - import logging - import os - import re - --# Import salt libs - import salt.utils.args - import salt.utils.data - import salt.utils.files -@@ -236,6 +234,44 @@ def file_dict(*packages, **kwargs): +@@ -234,6 +234,44 @@ def file_dict(*packages, **kwargs): return {"errors": errors, "packages": ret} @@ -171,7 +164,7 @@ index d569e04995..7447637774 100644 def _get_pkg_info(*packages, **kwargs): """ Return list of package information. If 'packages' parameter is empty, -@@ -259,7 +295,7 @@ def _get_pkg_info(*packages, **kwargs): +@@ -257,7 +295,7 @@ def _get_pkg_info(*packages, **kwargs): cmd = ( "dpkg-query -W -f='package:" + bin_var + "\\n" "revision:${binary:Revision}\\n" @@ -180,7 +173,7 @@ index d569e04995..7447637774 100644 "maintainer:${Maintainer}\\n" "summary:${Summary}\\n" "source:${source:Package}\\n" -@@ -298,9 +334,16 @@ def _get_pkg_info(*packages, **kwargs): +@@ -296,9 +334,16 @@ def _get_pkg_info(*packages, **kwargs): key, value = pkg_info_line.split(":", 1) if value: pkg_data[key] = value @@ -200,7 +193,7 @@ index d569e04995..7447637774 100644 pkg_data["description"] = pkg_descr.split(":", 1)[-1] ret.append(pkg_data) -@@ -326,24 +369,34 @@ def _get_pkg_license(pkg): +@@ -324,24 +369,34 @@ def _get_pkg_license(pkg): return ", ".join(sorted(licenses)) @@ -246,7 +239,7 @@ index d569e04995..7447637774 100644 def _get_pkg_ds_avail(): -@@ -393,6 +446,15 @@ def info(*packages, **kwargs): +@@ -391,6 +446,15 @@ def info(*packages, **kwargs): .. versionadded:: 2016.11.3 @@ -259,10 +252,10 @@ index d569e04995..7447637774 100644 + + .. versionadded:: Neon + - CLI example: + CLI Example: .. code-block:: bash -@@ -407,6 +469,10 @@ def info(*packages, **kwargs): +@@ -405,6 +469,10 @@ def info(*packages, **kwargs): kwargs = salt.utils.args.clean_kwargs(**kwargs) failhard = kwargs.pop("failhard", True) @@ -273,7 +266,7 @@ index d569e04995..7447637774 100644 if kwargs: salt.utils.args.invalid_kwargs(kwargs) -@@ -434,6 +500,14 @@ def info(*packages, **kwargs): +@@ -432,6 +500,14 @@ def info(*packages, **kwargs): lic = _get_pkg_license(pkg["package"]) if lic: pkg["license"] = lic @@ -290,21 +283,196 @@ index d569e04995..7447637774 100644 return ret diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py -index a7b7a34166..77d8b84896 100644 ---- a/tests/unit/modules/test_aptpkg.py +new file mode 100644 +index 0000000000..3c9744e224 +--- /dev/null +++ b/tests/unit/modules/test_aptpkg.py -@@ -13,6 +13,7 @@ import textwrap - import pytest - import salt.modules.aptpkg as aptpkg - from salt.exceptions import CommandExecutionError, SaltInvocationError +@@ -0,0 +1,981 @@ ++""" ++ :synopsis: Unit Tests for Advanced Packaging Tool module 'module.aptpkg' ++ :platform: Linux ++ :maturity: develop ++ versionadded:: 2017.7.0 ++""" ++ ++ ++import copy ++import logging ++import textwrap ++ ++import pytest ++import salt.modules.aptpkg as aptpkg ++from salt.exceptions import CommandExecutionError, SaltInvocationError +from salt.ext import six - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.mock import MagicMock, Mock, call, patch - from tests.support.unit import TestCase, skipIf -@@ -182,49 +183,54 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - def setup_loader_modules(self): - return {aptpkg: {"__grains__": {}}} - ++from tests.support.mixins import LoaderModuleMockMixin ++from tests.support.mock import MagicMock, Mock, call, patch ++from tests.support.unit import TestCase, skipIf ++ ++log = logging.getLogger(__name__) ++ ++ ++APT_KEY_LIST = r""" ++pub:-:1024:17:46181433FBB75451:1104433784:::-:::scSC: ++fpr:::::::::C5986B4F1257FFA86632CBA746181433FBB75451: ++uid:-::::1104433784::B4D41942D4B35FF44182C7F9D00C99AF27B93AD0::Ubuntu CD Image Automatic Signing Key : ++""" ++ ++REPO_KEYS = { ++ "46181433FBB75451": { ++ "algorithm": 17, ++ "bits": 1024, ++ "capability": "scSC", ++ "date_creation": 1104433784, ++ "date_expiration": None, ++ "fingerprint": "C5986B4F1257FFA86632CBA746181433FBB75451", ++ "keyid": "46181433FBB75451", ++ "uid": "Ubuntu CD Image Automatic Signing Key ", ++ "uid_hash": "B4D41942D4B35FF44182C7F9D00C99AF27B93AD0", ++ "validity": "-", ++ } ++} ++ ++PACKAGES = {"wget": "1.15-1ubuntu1.14.04.2"} ++ ++LOWPKG_FILES = { ++ "errors": {}, ++ "packages": { ++ "wget": [ ++ "/.", ++ "/etc", ++ "/etc/wgetrc", ++ "/usr", ++ "/usr/bin", ++ "/usr/bin/wget", ++ "/usr/share", ++ "/usr/share/info", ++ "/usr/share/info/wget.info.gz", ++ "/usr/share/doc", ++ "/usr/share/doc/wget", ++ "/usr/share/doc/wget/MAILING-LIST", ++ "/usr/share/doc/wget/NEWS.gz", ++ "/usr/share/doc/wget/AUTHORS", ++ "/usr/share/doc/wget/copyright", ++ "/usr/share/doc/wget/changelog.Debian.gz", ++ "/usr/share/doc/wget/README", ++ "/usr/share/man", ++ "/usr/share/man/man1", ++ "/usr/share/man/man1/wget.1.gz", ++ ] ++ }, ++} ++ ++LOWPKG_INFO = { ++ "wget": { ++ "architecture": "amd64", ++ "description": "retrieves files from the web", ++ "homepage": "http://www.gnu.org/software/wget/", ++ "install_date": "2016-08-30T22:20:15Z", ++ "maintainer": "Ubuntu Developers ", ++ "name": "wget", ++ "section": "web", ++ "source": "wget", ++ "version": "1.15-1ubuntu1.14.04.2", ++ "status": "ii", ++ }, ++ "apache2": { ++ "architecture": "amd64", ++ "description": """Apache HTTP Server ++ The Apache HTTP Server Project's goal is to build a secure, efficient and ++ extensible HTTP server as standards-compliant open source software. The ++ result has long been the number one web server on the Internet. ++ . ++ Installing this package results in a full installation, including the ++ configuration files, init scripts and support scripts.""", ++ "homepage": "http://httpd.apache.org/", ++ "install_date": "2016-08-30T22:20:15Z", ++ "maintainer": "Ubuntu Developers ", ++ "name": "apache2", ++ "section": "httpd", ++ "source": "apache2", ++ "version": "2.4.18-2ubuntu3.9", ++ "status": "rc", ++ }, ++} ++ ++APT_Q_UPDATE = """ ++Get:1 http://security.ubuntu.com trusty-security InRelease [65 kB] ++Get:2 http://security.ubuntu.com trusty-security/main Sources [120 kB] ++Get:3 http://security.ubuntu.com trusty-security/main amd64 Packages [548 kB] ++Get:4 http://security.ubuntu.com trusty-security/main i386 Packages [507 kB] ++Hit http://security.ubuntu.com trusty-security/main Translation-en ++Fetched 1240 kB in 10s (124 kB/s) ++Reading package lists... ++""" ++ ++APT_Q_UPDATE_ERROR = """ ++Err http://security.ubuntu.com trusty InRelease ++ ++Err http://security.ubuntu.com trusty Release.gpg ++Unable to connect to security.ubuntu.com:http: ++Reading package lists... ++W: Failed to fetch http://security.ubuntu.com/ubuntu/dists/trusty/InRelease ++ ++W: Failed to fetch http://security.ubuntu.com/ubuntu/dists/trusty/Release.gpg Unable to connect to security.ubuntu.com:http: ++ ++W: Some index files failed to download. They have been ignored, or old ones used instead. ++""" ++ ++AUTOREMOVE = """ ++Reading package lists... Done ++Building dependency tree ++Reading state information... Done ++0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. ++""" ++ ++UPGRADE = """ ++Reading package lists... ++Building dependency tree... ++Reading state information... ++0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. ++""" ++ ++UNINSTALL = {"tmux": {"new": "", "old": "1.8-5"}} ++INSTALL = {"tmux": {"new": "1.8-5", "old": ""}} ++ ++ ++def _get_uri(repo): ++ """ ++ Get the URI portion of the a string ++ """ ++ splits = repo.split() ++ for val in splits: ++ if any(val.startswith(x) for x in ("http://", "https://", "ftp://")): ++ return val ++ ++ ++class MockSourceEntry: ++ def __init__(self, uri, source_type, line, invalid, file=None): ++ self.uri = uri ++ self.type = source_type ++ self.line = line ++ self.invalid = invalid ++ self.file = file ++ self.disabled = False ++ self.dist = "" ++ ++ def mysplit(self, line): ++ return line.split() ++ ++ ++class MockSourceList: ++ def __init__(self): ++ self.list = [] ++ ++ ++class AptPkgTestCase(TestCase, LoaderModuleMockMixin): ++ """ ++ Test cases for salt.modules.aptpkg ++ """ ++ ++ def setup_loader_modules(self): ++ return {aptpkg: {"__grains__": {}}} ++ + @patch( + "salt.modules.aptpkg.__salt__", + { @@ -313,89 +481,65 @@ index a7b7a34166..77d8b84896 100644 + ) + }, + ) - def test_version(self): - """ - Test - Returns a string representing the package version or an empty string if - not installed. - """ -- version = LOWPKG_INFO["wget"]["version"] -- mock = MagicMock(return_value=version) -- with patch.dict(aptpkg.__salt__, {"pkg_resource.version": mock}): -- self.assertEqual(aptpkg.version(*["wget"]), version) ++ def test_version(self): ++ """ ++ Test - Returns a string representing the package version or an empty string if ++ not installed. ++ """ + assert aptpkg.version(*["wget"]) == aptpkg.__salt__["pkg_resource.version"]() - ++ + @patch("salt.modules.aptpkg.latest_version", MagicMock(return_value="")) - def test_upgrade_available(self): - """ - Test - Check whether or not an upgrade is available for a given package. - """ -- with patch("salt.modules.aptpkg.latest_version", MagicMock(return_value="")): -- self.assertFalse(aptpkg.upgrade_available("wget")) ++ def test_upgrade_available(self): ++ """ ++ Test - Check whether or not an upgrade is available for a given package. ++ """ + assert not aptpkg.upgrade_available("wget") - ++ + @patch("salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS)) + @patch( + "salt.modules.aptpkg.__salt__", + {"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": "OK"})}, + ) - def test_add_repo_key(self): - """ - Test - Add a repo key. - """ -- with patch( -- "salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS) -- ): -- mock = MagicMock(return_value={"retcode": 0, "stdout": "OK"}) -- with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}): -- self.assertTrue( -- aptpkg.add_repo_key( -- keyserver="keyserver.ubuntu.com", keyid="FBB75451" -- ) -- ) ++ def test_add_repo_key(self): ++ """ ++ Test - Add a repo key. ++ """ + assert aptpkg.add_repo_key(keyserver="keyserver.ubuntu.com", keyid="FBB75451") - ++ + @patch("salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS)) + @patch( + "salt.modules.aptpkg.__salt__", + {"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": "OK"})}, + ) - def test_add_repo_key_failed(self): - """ - Test - Add a repo key using incomplete input data. - """ -- with patch( -- "salt.modules.aptpkg.get_repo_keys", MagicMock(return_value=REPO_KEYS) -- ): -- kwargs = {"keyserver": "keyserver.ubuntu.com"} -- mock = MagicMock(return_value={"retcode": 0, "stdout": "OK"}) -- with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}): -- self.assertRaises(SaltInvocationError, aptpkg.add_repo_key, **kwargs) ++ def test_add_repo_key_failed(self): ++ """ ++ Test - Add a repo key using incomplete input data. ++ """ + with pytest.raises(SaltInvocationError) as ex: + aptpkg.add_repo_key(keyserver="keyserver.ubuntu.com") + assert ( + " No keyid or keyid too short for keyserver: keyserver.ubuntu.com" + in str(ex) + ) - - def test_get_repo_keys(self): - """ -@@ -234,35 +240,48 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}): - self.assertEqual(aptpkg.get_repo_keys(), REPO_KEYS) - ++ ++ def test_get_repo_keys(self): ++ """ ++ Test - List known repo key details. ++ """ ++ mock = MagicMock(return_value={"retcode": 0, "stdout": APT_KEY_LIST}) ++ with patch.dict(aptpkg.__salt__, {"cmd.run_all": mock}): ++ self.assertEqual(aptpkg.get_repo_keys(), REPO_KEYS) ++ + @patch( + "salt.modules.aptpkg.__salt__", + {"lowpkg.file_dict": MagicMock(return_value=LOWPKG_FILES)}, + ) - def test_file_dict(self): - """ - Test - List the files that belong to a package, grouped by package. - """ -- mock = MagicMock(return_value=LOWPKG_FILES) -- with patch.dict(aptpkg.__salt__, {"lowpkg.file_dict": mock}): -- self.assertEqual(aptpkg.file_dict("wget"), LOWPKG_FILES) ++ def test_file_dict(self): ++ """ ++ Test - List the files that belong to a package, grouped by package. ++ """ + assert aptpkg.file_dict("wget") == LOWPKG_FILES - ++ + @patch( + "salt.modules.aptpkg.__salt__", + { @@ -407,53 +551,42 @@ index a7b7a34166..77d8b84896 100644 + ) + }, + ) - def test_file_list(self): - """ -- Test - List the files that belong to a package. ++ def test_file_list(self): ++ """ + Test 'file_list' function, which is just an alias to the lowpkg 'file_list' + - """ -- files = { -- "errors": LOWPKG_FILES["errors"], -- "files": LOWPKG_FILES["packages"]["wget"], -- } -- mock = MagicMock(return_value=files) -- with patch.dict(aptpkg.__salt__, {"lowpkg.file_list": mock}): -- self.assertEqual(aptpkg.file_list("wget"), files) ++ """ + assert aptpkg.file_list("wget") == aptpkg.__salt__["lowpkg.file_list"]() - ++ + @patch( + "salt.modules.aptpkg.__salt__", + {"cmd.run_stdout": MagicMock(return_value="wget\t\t\t\t\t\tinstall")}, + ) - def test_get_selections(self): - """ - Test - View package state from the dpkg database. - """ -- selections = {"install": ["wget"]} -- mock = MagicMock(return_value="wget\t\t\t\t\t\tinstall") -- with patch.dict(aptpkg.__salt__, {"cmd.run_stdout": mock}): -- self.assertEqual(aptpkg.get_selections("wget"), selections) ++ def test_get_selections(self): ++ """ ++ Test - View package state from the dpkg database. ++ """ + assert aptpkg.get_selections("wget") == {"install": ["wget"]} - ++ + @patch( + "salt.modules.aptpkg.__salt__", + {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)}, + ) - def test_info_installed(self): - """ - Test - Return the information of the named package(s) installed on the system. -@@ -274,21 +293,101 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - if installed["wget"].get(names[name], False): - installed["wget"][name] = installed["wget"].pop(names[name]) - -- mock = MagicMock(return_value=LOWPKG_INFO) -- with patch.dict(aptpkg.__salt__, {"lowpkg.info": mock}): -- del installed["wget"]["status"] -- self.assertEqual(aptpkg.info_installed("wget"), installed) -- self.assertEqual(len(aptpkg.info_installed()), 1) -+ assert aptpkg.info_installed("wget") == installed - ++ def test_info_installed(self): ++ """ ++ Test - Return the information of the named package(s) installed on the system. ++ """ ++ names = {"group": "section", "packager": "maintainer", "url": "homepage"} ++ ++ installed = copy.deepcopy({"wget": LOWPKG_INFO["wget"]}) ++ for name in names: ++ if installed["wget"].get(names[name], False): ++ installed["wget"][name] = installed["wget"].pop(names[name]) ++ ++ del installed["wget"]["status"] ++ self.assertEqual(aptpkg.info_installed("wget"), installed) ++ self.assertEqual(len(aptpkg.info_installed()), 1) ++ + @patch( + "salt.modules.aptpkg.__salt__", + {"lowpkg.info": MagicMock(return_value=LOWPKG_INFO)}, @@ -531,16 +664,12 @@ index a7b7a34166..77d8b84896 100644 + "salt.modules.aptpkg.__salt__", + {"cmd.run_stdout": MagicMock(return_value="wget: /usr/bin/wget")}, + ) - def test_owner(self): - """ - Test - Return the name of the package that owns the file. - """ -- paths = ["/usr/bin/wget"] -- mock = MagicMock(return_value="wget: /usr/bin/wget") -- with patch.dict(aptpkg.__salt__, {"cmd.run_stdout": mock}): -- self.assertEqual(aptpkg.owner(*paths), "wget") ++ def test_owner(self): ++ """ ++ Test - Return the name of the package that owns the file. ++ """ + assert aptpkg.owner("/usr/bin/wget") == "wget" - ++ + @patch("salt.utils.pkg.clear_rtag", MagicMock()) + @patch( + "salt.modules.aptpkg.__salt__", @@ -551,13 +680,25 @@ index a7b7a34166..77d8b84896 100644 + "config.get": MagicMock(return_value=False), + }, + ) - def test_refresh_db(self): - """ - Test - Updates the APT database to latest packages based upon repositories. -@@ -308,6 +407,16 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - ): - self.assertEqual(aptpkg.refresh_db(), refresh_db) - ++ def test_refresh_db(self): ++ """ ++ Test - Updates the APT database to latest packages based upon repositories. ++ """ ++ refresh_db = { ++ "http://security.ubuntu.com trusty-security InRelease": True, ++ "http://security.ubuntu.com trusty-security/main Sources": True, ++ "http://security.ubuntu.com trusty-security/main Translation-en": None, ++ "http://security.ubuntu.com trusty-security/main amd64 Packages": True, ++ "http://security.ubuntu.com trusty-security/main i386 Packages": True, ++ } ++ mock = MagicMock(return_value={"retcode": 0, "stdout": APT_Q_UPDATE}) ++ with patch("salt.utils.pkg.clear_rtag", MagicMock()): ++ with patch.dict( ++ aptpkg.__salt__, ++ {"cmd.run_all": mock, "config.get": MagicMock(return_value=False)}, ++ ): ++ self.assertEqual(aptpkg.refresh_db(), refresh_db) ++ + @patch("salt.utils.pkg.clear_rtag", MagicMock()) + @patch( + "salt.modules.aptpkg.__salt__", @@ -568,39 +709,50 @@ index a7b7a34166..77d8b84896 100644 + "config.get": MagicMock(return_value=False), + }, + ) - def test_refresh_db_failed(self): - """ - Test - Update the APT database using unreachable repositories. -@@ -340,29 +449,33 @@ class AptPkgTestCase(TestCase, LoaderModuleMockMixin): - assert aptpkg.autoremove(list_only=True) == [] - assert aptpkg.autoremove(list_only=True, purge=True) == [] - -- def test_install(self): -- """ -- Test - Install packages. -- """ -- with patch("salt.modules.aptpkg.install", MagicMock(return_value=INSTALL)): -- self.assertEqual(aptpkg.install(name="tmux"), INSTALL) -- kwargs = {"force_conf_new": True} -- self.assertEqual(aptpkg.install(name="tmux", **kwargs), INSTALL) -- ++ def test_refresh_db_failed(self): ++ """ ++ Test - Update the APT database using unreachable repositories. ++ """ ++ kwargs = {"failhard": True} ++ mock = MagicMock(return_value={"retcode": 0, "stdout": APT_Q_UPDATE_ERROR}) ++ with patch("salt.utils.pkg.clear_rtag", MagicMock()): ++ with patch.dict( ++ aptpkg.__salt__, ++ {"cmd.run_all": mock, "config.get": MagicMock(return_value=False)}, ++ ): ++ self.assertRaises(CommandExecutionError, aptpkg.refresh_db, **kwargs) ++ ++ def test_autoremove(self): ++ """ ++ Test - Remove packages not required by another package. ++ """ ++ with patch("salt.modules.aptpkg.list_pkgs", MagicMock(return_value=PACKAGES)): ++ patch_kwargs = { ++ "__salt__": { ++ "config.get": MagicMock(return_value=True), ++ "cmd.run_all": MagicMock( ++ return_value=MagicMock(return_value=AUTOREMOVE) ++ ), ++ } ++ } ++ with patch.multiple(aptpkg, **patch_kwargs): ++ assert aptpkg.autoremove() == {} ++ assert aptpkg.autoremove(purge=True) == {} ++ assert aptpkg.autoremove(list_only=True) == [] ++ assert aptpkg.autoremove(list_only=True, purge=True) == [] ++ + @patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)) - def test_remove(self): - """ - Test - Remove packages. - """ -- with patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)): -- self.assertEqual(aptpkg.remove(name="tmux"), UNINSTALL) ++ def test_remove(self): ++ """ ++ Test - Remove packages. ++ """ + assert aptpkg.remove(name="tmux") == UNINSTALL - ++ + @patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)) - def test_purge(self): - """ - Test - Remove packages along with all configuration files. - """ -- with patch("salt.modules.aptpkg._uninstall", MagicMock(return_value=UNINSTALL)): -- self.assertEqual(aptpkg.purge(name="tmux"), UNINSTALL) -- ++ def test_purge(self): ++ """ ++ Test - Remove packages along with all configuration files. ++ """ + assert aptpkg.purge(name="tmux") == UNINSTALL + + @patch("salt.utils.pkg.clear_rtag", MagicMock()) @@ -616,9 +768,507 @@ index a7b7a34166..77d8b84896 100644 + } + } + ) - def test_upgrade(self): - """ - Test - Upgrades all packages. ++ def test_upgrade(self): ++ """ ++ Test - Upgrades all packages. ++ """ ++ with patch("salt.utils.pkg.clear_rtag", MagicMock()): ++ with patch( ++ "salt.modules.aptpkg.list_pkgs", MagicMock(return_value=UNINSTALL) ++ ): ++ mock_cmd = MagicMock(return_value={"retcode": 0, "stdout": UPGRADE}) ++ patch_kwargs = { ++ "__salt__": { ++ "config.get": MagicMock(return_value=True), ++ "cmd.run_all": mock_cmd, ++ } ++ } ++ with patch.multiple(aptpkg, **patch_kwargs): ++ self.assertEqual(aptpkg.upgrade(), dict()) ++ kwargs = {"force_conf_new": True} ++ self.assertEqual(aptpkg.upgrade(**kwargs), dict()) ++ ++ def test_upgrade_downloadonly(self): ++ """ ++ Tests the download-only options for upgrade. ++ """ ++ with patch("salt.utils.pkg.clear_rtag", MagicMock()): ++ with patch( ++ "salt.modules.aptpkg.list_pkgs", MagicMock(return_value=UNINSTALL) ++ ): ++ mock_cmd = MagicMock(return_value={"retcode": 0, "stdout": UPGRADE}) ++ patch_kwargs = { ++ "__salt__": { ++ "config.get": MagicMock(return_value=True), ++ "cmd.run_all": mock_cmd, ++ }, ++ } ++ with patch.multiple(aptpkg, **patch_kwargs): ++ aptpkg.upgrade() ++ args_matching = [ ++ True ++ for args in patch_kwargs["__salt__"]["cmd.run_all"].call_args[0] ++ if "--download-only" in args ++ ] ++ # Here we shouldn't see the parameter and args_matching should be empty. ++ self.assertFalse(any(args_matching)) ++ ++ aptpkg.upgrade(downloadonly=True) ++ args_matching = [ ++ True ++ for args in patch_kwargs["__salt__"]["cmd.run_all"].call_args[0] ++ if "--download-only" in args ++ ] ++ # --download-only should be in the args list and we should have at least on True in the list. ++ self.assertTrue(any(args_matching)) ++ ++ aptpkg.upgrade(download_only=True) ++ args_matching = [ ++ True ++ for args in patch_kwargs["__salt__"]["cmd.run_all"].call_args[0] ++ if "--download-only" in args ++ ] ++ # --download-only should be in the args list and we should have at least on True in the list. ++ self.assertTrue(any(args_matching)) ++ ++ def test_show(self): ++ """ ++ Test that the pkg.show function properly parses apt-cache show output. ++ This test uses an abridged output per package, for simplicity. ++ """ ++ show_mock_success = MagicMock( ++ return_value={ ++ "retcode": 0, ++ "pid": 12345, ++ "stderr": "", ++ "stdout": textwrap.dedent( ++ """\ ++ Package: foo1.0 ++ Architecture: amd64 ++ Version: 1.0.5-3ubuntu4 ++ Description: A silly package (1.0 release cycle) ++ Provides: foo ++ Suggests: foo-doc ++ ++ Package: foo1.0 ++ Architecture: amd64 ++ Version: 1.0.4-2ubuntu1 ++ Description: A silly package (1.0 release cycle) ++ Provides: foo ++ Suggests: foo-doc ++ ++ Package: foo-doc ++ Architecture: all ++ Version: 1.0.5-3ubuntu4 ++ Description: Silly documentation for a silly package (1.0 release cycle) ++ ++ Package: foo-doc ++ Architecture: all ++ Version: 1.0.4-2ubuntu1 ++ Description: Silly documentation for a silly package (1.0 release cycle) ++ ++ """ ++ ), ++ } ++ ) ++ ++ show_mock_failure = MagicMock( ++ return_value={ ++ "retcode": 1, ++ "pid": 12345, ++ "stderr": textwrap.dedent( ++ """\ ++ N: Unable to locate package foo* ++ N: Couldn't find any package by glob 'foo*' ++ N: Couldn't find any package by regex 'foo*' ++ E: No packages found ++ """ ++ ), ++ "stdout": "", ++ } ++ ) ++ ++ refresh_mock = Mock() ++ ++ expected = { ++ "foo1.0": { ++ "1.0.5-3ubuntu4": { ++ "Architecture": "amd64", ++ "Description": "A silly package (1.0 release cycle)", ++ "Provides": "foo", ++ "Suggests": "foo-doc", ++ }, ++ "1.0.4-2ubuntu1": { ++ "Architecture": "amd64", ++ "Description": "A silly package (1.0 release cycle)", ++ "Provides": "foo", ++ "Suggests": "foo-doc", ++ }, ++ }, ++ "foo-doc": { ++ "1.0.5-3ubuntu4": { ++ "Architecture": "all", ++ "Description": "Silly documentation for a silly package (1.0 release cycle)", ++ }, ++ "1.0.4-2ubuntu1": { ++ "Architecture": "all", ++ "Description": "Silly documentation for a silly package (1.0 release cycle)", ++ }, ++ }, ++ } ++ ++ # Make a copy of the above dict and strip out some keys to produce the ++ # expected filtered result. ++ filtered = copy.deepcopy(expected) ++ for k1 in filtered: ++ for k2 in filtered[k1]: ++ # Using list() because we will modify the dict during iteration ++ for k3 in list(filtered[k1][k2]): ++ if k3 not in ("Description", "Provides"): ++ filtered[k1][k2].pop(k3) ++ ++ with patch.dict( ++ aptpkg.__salt__, {"cmd.run_all": show_mock_success} ++ ), patch.object(aptpkg, "refresh_db", refresh_mock): ++ ++ # Test success (no refresh) ++ self.assertEqual(aptpkg.show("foo*"), expected) ++ refresh_mock.assert_not_called() ++ refresh_mock.reset_mock() ++ ++ # Test success (with refresh) ++ self.assertEqual(aptpkg.show("foo*", refresh=True), expected) ++ self.assert_called_once(refresh_mock) ++ refresh_mock.reset_mock() ++ ++ # Test filtered return ++ self.assertEqual( ++ aptpkg.show("foo*", filter="description,provides"), filtered ++ ) ++ refresh_mock.assert_not_called() ++ refresh_mock.reset_mock() ++ ++ with patch.dict( ++ aptpkg.__salt__, {"cmd.run_all": show_mock_failure} ++ ), patch.object(aptpkg, "refresh_db", refresh_mock): ++ ++ # Test failure (no refresh) ++ self.assertEqual(aptpkg.show("foo*"), {}) ++ refresh_mock.assert_not_called() ++ refresh_mock.reset_mock() ++ ++ # Test failure (with refresh) ++ self.assertEqual(aptpkg.show("foo*", refresh=True), {}) ++ self.assert_called_once(refresh_mock) ++ refresh_mock.reset_mock() ++ ++ def test_mod_repo_enabled(self): ++ """ ++ Checks if a repo is enabled or disabled depending on the passed kwargs. ++ """ ++ with patch.dict( ++ aptpkg.__salt__, ++ {"config.option": MagicMock(), "no_proxy": MagicMock(return_value=False)}, ++ ): ++ with patch("salt.modules.aptpkg._check_apt", MagicMock(return_value=True)): ++ with patch( ++ "salt.modules.aptpkg.refresh_db", MagicMock(return_value={}) ++ ): ++ with patch( ++ "salt.utils.data.is_true", MagicMock(return_value=True) ++ ) as data_is_true: ++ with patch( ++ "salt.modules.aptpkg.sourceslist", MagicMock(), create=True ++ ): ++ repo = aptpkg.mod_repo("foo", enabled=False) ++ data_is_true.assert_called_with(False) ++ # with disabled=True; should call salt.utils.data.is_true True ++ data_is_true.reset_mock() ++ repo = aptpkg.mod_repo("foo", disabled=True) ++ data_is_true.assert_called_with(True) ++ # with enabled=True; should call salt.utils.data.is_true with False ++ data_is_true.reset_mock() ++ repo = aptpkg.mod_repo("foo", enabled=True) ++ data_is_true.assert_called_with(True) ++ # with disabled=True; should call salt.utils.data.is_true False ++ data_is_true.reset_mock() ++ repo = aptpkg.mod_repo("foo", disabled=False) ++ data_is_true.assert_called_with(False) ++ ++ @patch( ++ "salt.utils.path.os_walk", MagicMock(return_value=[("test", "test", "test")]) ++ ) ++ @patch("os.path.getsize", MagicMock(return_value=123456)) ++ @patch("os.path.getctime", MagicMock(return_value=1234567890.123456)) ++ @patch( ++ "fnmatch.filter", ++ MagicMock(return_value=["/var/cache/apt/archive/test_package.rpm"]), ++ ) ++ def test_list_downloaded(self): ++ """ ++ Test downloaded packages listing. ++ :return: ++ """ ++ DOWNLOADED_RET = { ++ "test-package": { ++ "1.0": { ++ "path": "/var/cache/apt/archive/test_package.rpm", ++ "size": 123456, ++ "creation_date_time_t": 1234567890, ++ "creation_date_time": "2009-02-13T23:31:30", ++ } ++ } ++ } ++ ++ with patch.dict( ++ aptpkg.__salt__, ++ { ++ "lowpkg.bin_pkg_info": MagicMock( ++ return_value={"name": "test-package", "version": "1.0"} ++ ) ++ }, ++ ): ++ list_downloaded = aptpkg.list_downloaded() ++ self.assertEqual(len(list_downloaded), 1) ++ self.assertDictEqual(list_downloaded, DOWNLOADED_RET) ++ ++ def test__skip_source(self): ++ """ ++ Test __skip_source. ++ :return: ++ """ ++ # Valid source ++ source_type = "deb" ++ source_uri = "http://cdn-aws.deb.debian.org/debian" ++ source_line = "deb http://cdn-aws.deb.debian.org/debian stretch main\n" ++ ++ mock_source = MockSourceEntry(source_uri, source_type, source_line, False) ++ ++ ret = aptpkg._skip_source(mock_source) ++ self.assertFalse(ret) ++ ++ # Invalid source type ++ source_type = "ded" ++ source_uri = "http://cdn-aws.deb.debian.org/debian" ++ source_line = "deb http://cdn-aws.deb.debian.org/debian stretch main\n" ++ ++ mock_source = MockSourceEntry(source_uri, source_type, source_line, True) ++ ++ ret = aptpkg._skip_source(mock_source) ++ self.assertTrue(ret) ++ ++ # Invalid source type , not skipped ++ source_type = "deb" ++ source_uri = "http://cdn-aws.deb.debian.org/debian" ++ source_line = "deb [http://cdn-aws.deb.debian.org/debian] stretch main\n" ++ ++ mock_source = MockSourceEntry(source_uri, source_type, source_line, True) ++ ++ ret = aptpkg._skip_source(mock_source) ++ self.assertFalse(ret) ++ ++ def test_normalize_name(self): ++ """ ++ Test that package is normalized only when it should be ++ """ ++ with patch.dict(aptpkg.__grains__, {"osarch": "amd64"}): ++ result = aptpkg.normalize_name("foo") ++ assert result == "foo", result ++ result = aptpkg.normalize_name("foo:amd64") ++ assert result == "foo", result ++ result = aptpkg.normalize_name("foo:any") ++ assert result == "foo", result ++ result = aptpkg.normalize_name("foo:i386") ++ assert result == "foo:i386", result ++ ++ def test_list_repos(self): ++ """ ++ Checks results from list_repos ++ """ ++ # Valid source ++ source_type = "deb" ++ source_uri = "http://cdn-aws.deb.debian.org/debian/" ++ source_line = "deb http://cdn-aws.deb.debian.org/debian/ stretch main\n" ++ ++ mock_source = MockSourceEntry(source_uri, source_type, source_line, False) ++ mock_source_list = MockSourceList() ++ mock_source_list.list = [mock_source] ++ ++ with patch("salt.modules.aptpkg._check_apt", MagicMock(return_value=True)): ++ with patch("salt.modules.aptpkg.sourceslist", MagicMock(), create=True): ++ with patch( ++ "salt.modules.aptpkg.sourceslist.SourcesList", ++ MagicMock(return_value=mock_source_list), ++ create=True, ++ ): ++ repos = aptpkg.list_repos() ++ self.assertIn(source_uri, repos) ++ ++ assert isinstance(repos[source_uri], list) ++ assert len(repos[source_uri]) == 1 ++ ++ # Make sure last character in of the URI in line is still a / ++ self.assertIn("line", repos[source_uri][0]) ++ _uri = _get_uri(repos[source_uri][0]["line"]) ++ self.assertEqual(_uri[-1], "/") ++ ++ # Make sure last character in URI is still a / ++ self.assertIn("uri", repos[source_uri][0]) ++ self.assertEqual(repos[source_uri][0]["uri"][-1], "/") ++ ++ def test_expand_repo_def(self): ++ """ ++ Checks results from expand_repo_def ++ """ ++ source_type = "deb" ++ source_uri = "http://cdn-aws.deb.debian.org/debian/" ++ source_line = "deb http://cdn-aws.deb.debian.org/debian/ stretch main\n" ++ source_file = "/etc/apt/sources.list" ++ ++ mock_source = MockSourceEntry( ++ source_uri, source_type, source_line, False, file=source_file ++ ) ++ ++ # Valid source ++ with patch("salt.modules.aptpkg._check_apt", MagicMock(return_value=True)): ++ with patch("salt.modules.aptpkg.sourceslist", MagicMock(), create=True): ++ with patch( ++ "salt.modules.aptpkg.sourceslist.SourceEntry", ++ MagicMock(return_value=mock_source), ++ create=True, ++ ): ++ repo = "deb http://cdn-aws.deb.debian.org/debian/ stretch main\n" ++ sanitized = aptpkg.expand_repo_def(repo=repo, file=source_file) ++ ++ assert isinstance(sanitized, dict) ++ self.assertIn("uri", sanitized) ++ ++ # Make sure last character in of the URI is still a / ++ self.assertEqual(sanitized["uri"][-1], "/") ++ ++ ++@skipIf(pytest is None, "PyTest is missing") ++class AptUtilsTestCase(TestCase, LoaderModuleMockMixin): ++ """ ++ apt utils test case ++ """ ++ ++ def setup_loader_modules(self): ++ return {aptpkg: {}} ++ ++ def test_call_apt_default(self): ++ """ ++ Call default apt. ++ :return: ++ """ ++ with patch.dict( ++ aptpkg.__salt__, ++ {"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)}, ++ ): ++ aptpkg._call_apt(["apt-get", "install", "emacs"]) # pylint: disable=W0106 ++ aptpkg.__salt__["cmd.run_all"].assert_called_once_with( ++ ["apt-get", "install", "emacs"], ++ env={}, ++ output_loglevel="trace", ++ python_shell=False, ++ ) ++ ++ @patch("salt.utils.systemd.has_scope", MagicMock(return_value=True)) ++ def test_call_apt_in_scope(self): ++ """ ++ Call apt within the scope. ++ :return: ++ """ ++ with patch.dict( ++ aptpkg.__salt__, ++ {"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=True)}, ++ ): ++ aptpkg._call_apt(["apt-get", "purge", "vim"]) # pylint: disable=W0106 ++ aptpkg.__salt__["cmd.run_all"].assert_called_once_with( ++ [ ++ "systemd-run", ++ "--scope", ++ "--description", ++ '"salt.modules.aptpkg"', ++ "apt-get", ++ "purge", ++ "vim", ++ ], ++ env={}, ++ output_loglevel="trace", ++ python_shell=False, ++ ) ++ ++ def test_call_apt_with_kwargs(self): ++ """ ++ Call apt with the optinal keyword arguments. ++ :return: ++ """ ++ with patch.dict( ++ aptpkg.__salt__, ++ {"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)}, ++ ): ++ aptpkg._call_apt( ++ ["dpkg", "-l", "python"], ++ python_shell=True, ++ output_loglevel="quiet", ++ ignore_retcode=False, ++ username="Darth Vader", ++ ) # pylint: disable=W0106 ++ aptpkg.__salt__["cmd.run_all"].assert_called_once_with( ++ ["dpkg", "-l", "python"], ++ env={}, ++ ignore_retcode=False, ++ output_loglevel="quiet", ++ python_shell=True, ++ username="Darth Vader", ++ ) ++ ++ def test_call_apt_dpkg_lock(self): ++ """ ++ Call apt and ensure the dpkg locking is handled ++ :return: ++ """ ++ cmd_side_effect = [ ++ {"stderr": "Could not get lock"}, ++ {"stderr": "Could not get lock"}, ++ {"stderr": "Could not get lock"}, ++ {"stderr": "Could not get lock"}, ++ {"stderr": "", "stdout": ""}, ++ ] ++ ++ cmd_mock = MagicMock(side_effect=cmd_side_effect) ++ cmd_call = ( ++ call( ++ ["dpkg", "-l", "python"], ++ env={}, ++ ignore_retcode=False, ++ output_loglevel="quiet", ++ python_shell=True, ++ username="Darth Vader", ++ ), ++ ) ++ expected_calls = [cmd_call * 5] ++ ++ with patch.dict( ++ aptpkg.__salt__, ++ {"cmd.run_all": cmd_mock, "config.get": MagicMock(return_value=False)}, ++ ): ++ with patch("salt.modules.aptpkg.time.sleep", MagicMock()) as sleep_mock: ++ aptpkg._call_apt( ++ ["dpkg", "-l", "python"], ++ python_shell=True, ++ output_loglevel="quiet", ++ ignore_retcode=False, ++ username="Darth Vader", ++ ) # pylint: disable=W0106 ++ ++ # We should have sleept at least 4 times ++ assert sleep_mock.call_count >= 4 ++ ++ # We should attempt to call the cmd 5 times ++ self.assertEqual(cmd_mock.call_count, 5) ++ cmd_mock.has_calls(expected_calls) diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py index 071c0f0742..160bbcd5b1 100644 --- a/tests/unit/modules/test_dpkg_lowpkg.py @@ -842,6 +1492,6 @@ index 071c0f0742..160bbcd5b1 100644 + assert ret["emacs"]["license"] == "BSD v3" + assert ret["emacs"]["version"] == "46.1" -- -2.29.2 +2.33.0 diff --git a/do-noop-for-services-states-when-running-systemd-in-.patch b/do-noop-for-services-states-when-running-systemd-in-.patch deleted file mode 100644 index e05741b..0000000 --- a/do-noop-for-services-states-when-running-systemd-in-.patch +++ /dev/null @@ -1,631 +0,0 @@ -From 6837044f5a207cf39f3064428b0ed276226a5e39 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 9 Jul 2021 09:05:55 +0100 -Subject: [PATCH] Do noop for services states when running systemd in - offline mode (bsc#1187787) - -transactional_updates: do not execute states in parallel but use a queue (bsc#1188170) - -Add changes suggested by pre-commit - -Fix unit tests for transactional_updates module - -Add unit tests to cover queue cases on transaction_update states - -Refactor offline checkers and add unit tests - -Fix regression that always consider offline mode - -Add proper mocking and skip tests when running in offline mode ---- - salt/modules/systemd_service.py | 5 + - salt/modules/transactional_update.py | 56 +++- - salt/states/service.py | 14 + - tests/integration/states/test_service.py | 4 + - .../unit/modules/test_transactional_update.py | 264 +++++++++++++++++- - tests/unit/states/test_service.py | 43 ++- - 6 files changed, 377 insertions(+), 9 deletions(-) - -diff --git a/salt/modules/systemd_service.py b/salt/modules/systemd_service.py -index 49e5bd813f..8d495433f8 100644 ---- a/salt/modules/systemd_service.py -+++ b/salt/modules/systemd_service.py -@@ -102,6 +102,11 @@ def _check_available(name): - """ - Returns boolean telling whether or not the named service is available - """ -+ if offline(): -+ raise CommandExecutionError( -+ "Cannot run in offline mode. Failed to get information on unit '%s'" % name -+ ) -+ - _status = _systemctl_status(name) - sd_version = salt.utils.systemd.version(__context__) - if sd_version is not None and sd_version >= 231: -diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py -index 9cdaddb91a..3af9d91822 100644 ---- a/salt/modules/transactional_update.py -+++ b/salt/modules/transactional_update.py -@@ -281,10 +281,14 @@ import os - import sys - import tempfile - -+# required by _check_queue invocation later -+import time # pylint: disable=unused-import -+ - import salt.client.ssh.state - import salt.client.ssh.wrapper.state - import salt.exceptions - import salt.utils.args -+from salt.modules.state import _check_queue, _prior_running_states, _wait, running - - __func_alias__ = {"apply_": "apply"} - -@@ -295,7 +299,14 @@ def __virtual__(): - """ - transactional-update command is required. - """ -+ global _check_queue, _wait, _prior_running_states, running - if __utils__["path.which"]("transactional-update"): -+ _check_queue = salt.utils.functools.namespaced_function(_check_queue, globals()) -+ _wait = salt.utils.functools.namespaced_function(_wait, globals()) -+ _prior_running_states = salt.utils.functools.namespaced_function( -+ _prior_running_states, globals() -+ ) -+ running = salt.utils.functools.namespaced_function(running, globals()) - return True - else: - return (False, "Module transactional_update requires a transactional system") -@@ -1068,7 +1079,13 @@ def _create_and_execute_salt_state( - - - def sls( -- mods, saltenv="base", test=None, exclude=None, activate_transaction=False, **kwargs -+ mods, -+ saltenv="base", -+ test=None, -+ exclude=None, -+ activate_transaction=False, -+ queue=False, -+ **kwargs - ): - """Execute the states in one or more SLS files inside a transaction. - -@@ -1093,6 +1110,13 @@ def sls( - (i.e there is a new snaphot in the system), a new reboot will - be scheduled (default False) - -+ queue -+ Instead of failing immediately when another state run is in progress, -+ queue the new state run to begin running once the other has finished. -+ -+ This option starts a new thread for each queued state run, so use this -+ option sparingly. (Default: False) -+ - For a formal description of the possible parameters accepted in - this function, check `state.sls` documentation. - -@@ -1104,6 +1128,10 @@ def sls( - salt microos transactional_update.sls stuff activate_transaction=True - - """ -+ conflict = _check_queue(queue, kwargs) -+ if conflict is not None: -+ return conflict -+ - # Get a copy of the pillar data, to avoid overwriting the current - # pillar, instead the one delegated - pillar = copy.deepcopy(__pillar__) -@@ -1156,7 +1184,7 @@ def sls( - ) - - --def highstate(activate_transaction=False, **kwargs): -+def highstate(activate_transaction=False, queue=False, **kwargs): - """Retrieve the state data from the salt master for this minion and - execute it inside a transaction. - -@@ -1168,6 +1196,13 @@ def highstate(activate_transaction=False, **kwargs): - (i.e there is a new snaphot in the system), a new reboot will - be scheduled (default False) - -+ queue -+ Instead of failing immediately when another state run is in progress, -+ queue the new state run to begin running once the other has finished. -+ -+ This option starts a new thread for each queued state run, so use this -+ option sparingly. (Default: False) -+ - CLI Example: - - .. code-block:: bash -@@ -1177,6 +1212,10 @@ def highstate(activate_transaction=False, **kwargs): - salt microos transactional_update.highstate activate_transaction=True - - """ -+ conflict = _check_queue(queue, kwargs) -+ if conflict is not None: -+ return conflict -+ - # Get a copy of the pillar data, to avoid overwriting the current - # pillar, instead the one delegated - pillar = copy.deepcopy(__pillar__) -@@ -1210,7 +1249,7 @@ def highstate(activate_transaction=False, **kwargs): - ) - - --def single(fun, name, test=None, activate_transaction=False, **kwargs): -+def single(fun, name, test=None, activate_transaction=False, queue=False, **kwargs): - """Execute a single state function with the named kwargs, returns - False if insufficient data is sent to the command - -@@ -1224,6 +1263,13 @@ def single(fun, name, test=None, activate_transaction=False, **kwargs): - (i.e there is a new snaphot in the system), a new reboot will - be scheduled (default False) - -+ queue -+ Instead of failing immediately when another state run is in progress, -+ queue the new state run to begin running once the other has finished. -+ -+ This option starts a new thread for each queued state run, so use this -+ option sparingly. (Default: False) -+ - CLI Example: - - .. code-block:: bash -@@ -1232,6 +1278,10 @@ def single(fun, name, test=None, activate_transaction=False, **kwargs): - salt microos transactional_update.single pkg.installed name=emacs activate_transaction=True - - """ -+ conflict = _check_queue(queue, kwargs) -+ if conflict is not None: -+ return conflict -+ - # Get a copy of the pillar data, to avoid overwriting the current - # pillar, instead the one delegated - pillar = copy.deepcopy(__pillar__) -diff --git a/salt/states/service.py b/salt/states/service.py -index 4ea36a78f6..3a216920f4 100644 ---- a/salt/states/service.py -+++ b/salt/states/service.py -@@ -342,6 +342,10 @@ def _disable(name, started, result=True, **kwargs): - return ret - - -+def _offline(): -+ return "service.offline" in __salt__ and __salt__["service.offline"]() -+ -+ - def _available(name, ret): - """ - Check if the service is available -@@ -436,6 +440,11 @@ def running(name, enable=None, sig=None, init_delay=None, **kwargs): - if isinstance(enable, str): - enable = salt.utils.data.is_true(enable) - -+ if _offline(): -+ ret["result"] = True -+ ret["comment"] = "Running in OFFLINE mode. Nothing to do" -+ return ret -+ - # Check if the service is available - try: - if not _available(name, ret): -@@ -631,6 +640,11 @@ def dead(name, enable=None, sig=None, init_delay=None, **kwargs): - if isinstance(enable, str): - enable = salt.utils.data.is_true(enable) - -+ if _offline(): -+ ret["result"] = True -+ ret["comment"] = "Running in OFFLINE mode. Nothing to do" -+ return ret -+ - # Check if the service is available - try: - if not _available(name, ret): -diff --git a/tests/integration/states/test_service.py b/tests/integration/states/test_service.py -index 81359d44ea..9c89d2cfd0 100644 ---- a/tests/integration/states/test_service.py -+++ b/tests/integration/states/test_service.py -@@ -26,6 +26,7 @@ class ServiceTest(ModuleCase, SaltReturnAssertsMixin): - cmd_name = "crontab" - os_family = self.run_function("grains.get", ["os_family"]) - os_release = self.run_function("grains.get", ["osrelease"]) -+ is_systemd = self.run_function("grains.get", ["systemd"]) - self.stopped = False - self.running = True - if os_family == "RedHat": -@@ -53,6 +54,9 @@ class ServiceTest(ModuleCase, SaltReturnAssertsMixin): - if os_family != "Windows" and salt.utils.path.which(cmd_name) is None: - self.skipTest("{} is not installed".format(cmd_name)) - -+ if is_systemd and self.run_function("service.offline"): -+ self.skipTest("systemd is OFFLINE") -+ - def tearDown(self): - if self.post_srv_disable: - self.run_function("service.disable", name=self.service_name) -diff --git a/tests/unit/modules/test_transactional_update.py b/tests/unit/modules/test_transactional_update.py -index 2d30f296d7..6f8587baa0 100644 ---- a/tests/unit/modules/test_transactional_update.py -+++ b/tests/unit/modules/test_transactional_update.py -@@ -1,6 +1,7 @@ - import sys - - import pytest -+import salt.modules.state as statemod - import salt.modules.transactional_update as tu - import salt.utils.platform - from salt.exceptions import CommandExecutionError -@@ -16,7 +17,10 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin): - """ - - def setup_loader_modules(self): -- return {tu: {"__salt__": {}, "__utils__": {}}} -+ return { -+ tu: {"__salt__": {}, "__utils__": {}}, -+ statemod: {"__salt__": {}, "__context__": {}}, -+ } - - def test__global_params_no_self_update(self): - """Test transactional_update._global_params without self_update""" -@@ -643,11 +647,103 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin): - opts_mock = { - "hash_type": "md5", - } -+ salt_mock = { -+ "saltutil.is_running": MagicMock(return_value=[]), -+ } - get_sls_opts.return_value = opts_mock -- with patch.dict(tu.__opts__, opts_mock): -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): - assert tu.sls("module") == "result" - _create_and_execute_salt_state.assert_called_once() - -+ @patch("salt.modules.transactional_update._create_and_execute_salt_state") -+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate") -+ @patch("salt.fileclient.get_file_client") -+ @patch("salt.utils.state.get_sls_opts") -+ def test_sls_queue_true( -+ self, -+ get_sls_opts, -+ get_file_client, -+ TransactionalUpdateHighstate, -+ _create_and_execute_salt_state, -+ ): -+ """Test transactional_update.sls""" -+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate -+ TransactionalUpdateHighstate.render_highstate.return_value = (None, []) -+ TransactionalUpdateHighstate.state.reconcile_extend.return_value = (None, []) -+ TransactionalUpdateHighstate.state.requisite_in.return_value = (None, []) -+ TransactionalUpdateHighstate.state.verify_high.return_value = [] -+ -+ _create_and_execute_salt_state.return_value = "result" -+ opts_mock = { -+ "hash_type": "md5", -+ } -+ salt_mock = { -+ "saltutil.is_running": MagicMock( -+ side_effect=[ -+ [ -+ { -+ "fun": "state.running", -+ "pid": "4126", -+ "jid": "20150325123407204096", -+ } -+ ], -+ [], -+ ] -+ ), -+ } -+ get_sls_opts.return_value = opts_mock -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): -+ assert tu.sls("module", queue=True) == "result" -+ _create_and_execute_salt_state.assert_called_once() -+ -+ @patch("salt.modules.transactional_update._create_and_execute_salt_state") -+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate") -+ @patch("salt.fileclient.get_file_client") -+ @patch("salt.utils.state.get_sls_opts") -+ def test_sls_queue_false_failing( -+ self, -+ get_sls_opts, -+ get_file_client, -+ TransactionalUpdateHighstate, -+ _create_and_execute_salt_state, -+ ): -+ """Test transactional_update.sls""" -+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate -+ TransactionalUpdateHighstate.render_highstate.return_value = (None, []) -+ TransactionalUpdateHighstate.state.reconcile_extend.return_value = (None, []) -+ TransactionalUpdateHighstate.state.requisite_in.return_value = (None, []) -+ TransactionalUpdateHighstate.state.verify_high.return_value = [] -+ -+ _create_and_execute_salt_state.return_value = "result" -+ opts_mock = { -+ "hash_type": "md5", -+ } -+ salt_mock = { -+ "saltutil.is_running": MagicMock( -+ side_effect=[ -+ [ -+ { -+ "fun": "state.running", -+ "pid": "4126", -+ "jid": "20150325123407204096", -+ } -+ ], -+ [], -+ ] -+ ), -+ } -+ get_sls_opts.return_value = opts_mock -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): -+ assert tu.sls("module", queue=False) == [ -+ 'The function "state.running" is running as PID 4126 and was started at 2015, Mar 25 12:34:07.204096 with jid 20150325123407204096' -+ ] -+ - @patch("salt.modules.transactional_update._create_and_execute_salt_state") - @patch("salt.modules.transactional_update.TransactionalUpdateHighstate") - @patch("salt.fileclient.get_file_client") -@@ -666,11 +762,95 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin): - opts_mock = { - "hash_type": "md5", - } -+ salt_mock = { -+ "saltutil.is_running": MagicMock(return_value=[]), -+ } - get_sls_opts.return_value = opts_mock -- with patch.dict(tu.__opts__, opts_mock): -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): - assert tu.highstate() == "result" - _create_and_execute_salt_state.assert_called_once() - -+ @patch("salt.modules.transactional_update._create_and_execute_salt_state") -+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate") -+ @patch("salt.fileclient.get_file_client") -+ @patch("salt.utils.state.get_sls_opts") -+ def test_highstate_queue_true( -+ self, -+ get_sls_opts, -+ get_file_client, -+ TransactionalUpdateHighstate, -+ _create_and_execute_salt_state, -+ ): -+ """Test transactional_update.highstage""" -+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate -+ -+ _create_and_execute_salt_state.return_value = "result" -+ opts_mock = { -+ "hash_type": "md5", -+ } -+ salt_mock = { -+ "saltutil.is_running": MagicMock( -+ side_effect=[ -+ [ -+ { -+ "fun": "state.running", -+ "pid": "4126", -+ "jid": "20150325123407204096", -+ } -+ ], -+ [], -+ ] -+ ), -+ } -+ get_sls_opts.return_value = opts_mock -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): -+ assert tu.highstate(queue=True) == "result" -+ _create_and_execute_salt_state.assert_called_once() -+ -+ @patch("salt.modules.transactional_update._create_and_execute_salt_state") -+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate") -+ @patch("salt.fileclient.get_file_client") -+ @patch("salt.utils.state.get_sls_opts") -+ def test_highstate_queue_false_failing( -+ self, -+ get_sls_opts, -+ get_file_client, -+ TransactionalUpdateHighstate, -+ _create_and_execute_salt_state, -+ ): -+ """Test transactional_update.highstage""" -+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate -+ -+ _create_and_execute_salt_state.return_value = "result" -+ opts_mock = { -+ "hash_type": "md5", -+ } -+ salt_mock = { -+ "saltutil.is_running": MagicMock( -+ side_effect=[ -+ [ -+ { -+ "fun": "state.running", -+ "pid": "4126", -+ "jid": "20150325123407204096", -+ } -+ ], -+ [], -+ ] -+ ), -+ } -+ get_sls_opts.return_value = opts_mock -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): -+ assert tu.highstate(queue=False) == [ -+ 'The function "state.running" is running as PID 4126 and was started at 2015, Mar 25 12:34:07.204096 with jid 20150325123407204096' -+ ] -+ - @patch("salt.modules.transactional_update._create_and_execute_salt_state") - @patch("salt.client.ssh.state.SSHState") - @patch("salt.utils.state.get_sls_opts") -@@ -683,7 +863,83 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin): - opts_mock = { - "hash_type": "md5", - } -+ salt_mock = { -+ "saltutil.is_running": MagicMock(return_value=[]), -+ } - get_sls_opts.return_value = opts_mock -- with patch.dict(tu.__opts__, opts_mock): -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): - assert tu.single("pkg.installed", name="emacs") == "result" - _create_and_execute_salt_state.assert_called_once() -+ -+ @patch("salt.modules.transactional_update._create_and_execute_salt_state") -+ @patch("salt.client.ssh.state.SSHState") -+ @patch("salt.utils.state.get_sls_opts") -+ def test_single_queue_false_failing( -+ self, get_sls_opts, SSHState, _create_and_execute_salt_state -+ ): -+ """Test transactional_update.single""" -+ SSHState.return_value = SSHState -+ SSHState.verify_data.return_value = None -+ -+ _create_and_execute_salt_state.return_value = "result" -+ opts_mock = { -+ "hash_type": "md5", -+ } -+ salt_mock = { -+ "saltutil.is_running": MagicMock( -+ side_effect=[ -+ [ -+ { -+ "fun": "state.running", -+ "pid": "4126", -+ "jid": "20150325123407204096", -+ } -+ ], -+ [], -+ ] -+ ), -+ } -+ get_sls_opts.return_value = opts_mock -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): -+ assert tu.single("pkg.installed", name="emacs", queue=False) == [ -+ 'The function "state.running" is running as PID 4126 and was started at 2015, Mar 25 12:34:07.204096 with jid 20150325123407204096' -+ ] -+ -+ @patch("salt.modules.transactional_update._create_and_execute_salt_state") -+ @patch("salt.client.ssh.state.SSHState") -+ @patch("salt.utils.state.get_sls_opts") -+ def test_single_queue_true( -+ self, get_sls_opts, SSHState, _create_and_execute_salt_state -+ ): -+ """Test transactional_update.single""" -+ SSHState.return_value = SSHState -+ SSHState.verify_data.return_value = None -+ -+ _create_and_execute_salt_state.return_value = "result" -+ opts_mock = { -+ "hash_type": "md5", -+ } -+ salt_mock = { -+ "saltutil.is_running": MagicMock( -+ side_effect=[ -+ [ -+ { -+ "fun": "state.running", -+ "pid": "4126", -+ "jid": "20150325123407204096", -+ } -+ ], -+ [], -+ ] -+ ), -+ } -+ get_sls_opts.return_value = opts_mock -+ with patch.dict(tu.__opts__, opts_mock), patch.dict( -+ statemod.__salt__, salt_mock -+ ): -+ assert tu.single("pkg.installed", name="emacs", queue=True) == "result" -+ _create_and_execute_salt_state.assert_called_once() -diff --git a/tests/unit/states/test_service.py b/tests/unit/states/test_service.py -index 51755fc5a1..de09f2f8ab 100644 ---- a/tests/unit/states/test_service.py -+++ b/tests/unit/states/test_service.py -@@ -304,6 +304,24 @@ class ServiceTestCase(TestCase, LoaderModuleMockMixin): - service.__context__, {"service.state": "running"} - ) - -+ def test_running_in_offline_mode(self): -+ """ -+ Tests the case in which a service.running state is executed on an offline environemnt -+ -+ """ -+ name = "thisisnotarealservice" -+ with patch.object(service, "_offline", MagicMock(return_value=True)): -+ ret = service.running(name=name) -+ self.assertDictEqual( -+ ret, -+ { -+ "changes": {}, -+ "comment": "Running in OFFLINE mode. Nothing to do", -+ "result": True, -+ "name": name, -+ }, -+ ) -+ - def test_dead(self): - """ - Test to ensure that the named service is dead -@@ -443,6 +461,24 @@ class ServiceTestCase(TestCase, LoaderModuleMockMixin): - }, - ) - -+ def test_dead_in_offline_mode(self): -+ """ -+ Tests the case in which a service.dead state is executed on an offline environemnt -+ -+ """ -+ name = "thisisnotarealservice" -+ with patch.object(service, "_offline", MagicMock(return_value=True)): -+ ret = service.dead(name=name) -+ self.assertDictEqual( -+ ret, -+ { -+ "changes": {}, -+ "comment": "Running in OFFLINE mode. Nothing to do", -+ "result": True, -+ "name": name, -+ }, -+ ) -+ - def test_enabled(self): - """ - Test to verify that the service is enabled -@@ -567,8 +603,11 @@ class ServiceTestCaseFunctional(TestCase, LoaderModuleMockMixin): - @slowTest - def test_running_with_reload(self): - with patch.dict(service.__opts__, {"test": False}): -- service.dead(self.service_name, enable=False) -- result = service.running(name=self.service_name, enable=True, reload=False) -+ with patch("salt.utils.systemd.offline", MagicMock(return_value=False)): -+ service.dead(self.service_name, enable=False) -+ result = service.running( -+ name=self.service_name, enable=True, reload=False -+ ) - - if salt.utils.platform.is_windows(): - comment = "Started Service {}".format(self.service_name) --- -2.32.0 - - diff --git a/do-not-break-repo-files-with-multiple-line-values-on.patch b/do-not-break-repo-files-with-multiple-line-values-on.patch deleted file mode 100644 index 5db42ff..0000000 --- a/do-not-break-repo-files-with-multiple-line-values-on.patch +++ /dev/null @@ -1,77 +0,0 @@ -From e986ed8fc0d5da74374d9ded82e10c16fc984ca8 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Wed, 29 May 2019 11:03:16 +0100 -Subject: [PATCH] Do not break repo files with multiple line values on - yumpkg (bsc#1135360) - ---- - tests/integration/modules/test_pkg.py | 51 +++++++++++++++++++++++++++ - 1 file changed, 51 insertions(+) - -diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py -index 7a720523da..e32013800d 100644 ---- a/tests/integration/modules/test_pkg.py -+++ b/tests/integration/modules/test_pkg.py -@@ -194,6 +194,57 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): - if repo is not None: - self.run_function("pkg.del_repo", [repo]) - -+ def test_mod_del_repo_multiline_values(self): -+ """ -+ test modifying and deleting a software repository defined with multiline values -+ """ -+ os_grain = self.run_function("grains.item", ["os"])["os"] -+ repo = None -+ try: -+ if os_grain in ["CentOS", "RedHat", "SUSE"]: -+ my_baseurl = ( -+ "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/" -+ ) -+ expected_get_repo_baseurl = ( -+ "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/" -+ ) -+ major_release = int( -+ self.run_function("grains.item", ["osmajorrelease"])[ -+ "osmajorrelease" -+ ] -+ ) -+ repo = "fakerepo" -+ name = "Fake repo for RHEL/CentOS/SUSE" -+ baseurl = my_baseurl -+ gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub" -+ failovermethod = "priority" -+ gpgcheck = 1 -+ enabled = 1 -+ ret = self.run_function( -+ "pkg.mod_repo", -+ [repo], -+ name=name, -+ baseurl=baseurl, -+ gpgkey=gpgkey, -+ gpgcheck=gpgcheck, -+ enabled=enabled, -+ failovermethod=failovermethod, -+ ) -+ # return data from pkg.mod_repo contains the file modified at -+ # the top level, so use next(iter(ret)) to get that key -+ self.assertNotEqual(ret, {}) -+ repo_info = ret[next(iter(ret))] -+ self.assertIn(repo, repo_info) -+ self.assertEqual(repo_info[repo]["baseurl"], my_baseurl) -+ ret = self.run_function("pkg.get_repo", [repo]) -+ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) -+ self.run_function("pkg.mod_repo", [repo]) -+ ret = self.run_function("pkg.get_repo", [repo]) -+ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) -+ finally: -+ if repo is not None: -+ self.run_function("pkg.del_repo", [repo]) -+ - @requires_salt_modules("pkg.owner") - def test_owner(self): - """ --- -2.29.2 - - diff --git a/do-not-crash-when-there-are-ipv6-established-connect.patch b/do-not-crash-when-there-are-ipv6-established-connect.patch deleted file mode 100644 index 2af9dca..0000000 --- a/do-not-crash-when-there-are-ipv6-established-connect.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 998136ffd4c8442e0c3a7030af3d8196abec6be1 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Tue, 7 May 2019 15:33:51 +0100 -Subject: [PATCH] Do not crash when there are IPv6 established - connections (bsc#1130784) - -Add unit test for '_netlink_tool_remote_on' ---- - salt/utils/network.py | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/salt/utils/network.py b/salt/utils/network.py -index dd7fceb91a..d253ded3ab 100644 ---- a/salt/utils/network.py -+++ b/salt/utils/network.py -@@ -1623,8 +1623,13 @@ def _netlink_tool_remote_on(port, which_end): - elif "ESTAB" not in line: - continue - chunks = line.split() -+ local_host, local_port = chunks[3].rsplit(":", 1) - remote_host, remote_port = chunks[4].rsplit(":", 1) - -+ if which_end == "remote_port" and int(remote_port) != port: -+ continue -+ if which_end == "local_port" and int(local_port) != port: -+ continue - remotes.add(remote_host.strip("[]")) - - if valid is False: --- -2.29.2 - - diff --git a/do-not-crash-when-unexpected-cmd-output-at-listing-p.patch b/do-not-crash-when-unexpected-cmd-output-at-listing-p.patch index 423c0cb..9134653 100644 --- a/do-not-crash-when-unexpected-cmd-output-at-listing-p.patch +++ b/do-not-crash-when-unexpected-cmd-output-at-listing-p.patch @@ -1,4 +1,4 @@ -From cec95ba8f9b561d7ca4c66be9483e4b9386cb741 Mon Sep 17 00:00:00 2001 +From f9a66dbf39345b2b371b18e8bf9d89835d6381b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Mon, 25 Jan 2021 12:15:59 +0000 @@ -7,15 +7,15 @@ Subject: [PATCH] Do not crash when unexpected cmd output at listing Add unit tests to cover unexpected output when listing patches --- - salt/modules/yumpkg.py | 20 ++++++++-- - tests/unit/modules/test_yumpkg.py | 63 +++++++++++++++++++++++++++++++ - 2 files changed, 80 insertions(+), 3 deletions(-) + salt/modules/yumpkg.py | 20 +++++++-- + tests/pytests/unit/modules/test_yumpkg.py | 53 +++++++++++++++++++++++ + 2 files changed, 70 insertions(+), 3 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py -index df174e737d..82adbbd59d 100644 +index fd79109e40..c800dafa82 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py -@@ -3291,10 +3291,17 @@ def _get_patches(installed_only=False): +@@ -3325,10 +3325,17 @@ def _get_patches(installed_only=False): cmd = [_yum(), "--quiet", "updateinfo", "list", "all"] ret = __salt__["cmd.run_stdout"](cmd, python_shell=False, env={"SALT_RUNNING": "1"}) @@ -36,7 +36,7 @@ index df174e737d..82adbbd59d 100644 if advisory_id not in patches: patches[advisory_id] = { "installed": True if inst == "i" else False, -@@ -3305,6 +3312,13 @@ def _get_patches(installed_only=False): +@@ -3339,6 +3346,13 @@ def _get_patches(installed_only=False): if inst != "i": patches[advisory_id]["installed"] = False @@ -50,81 +50,71 @@ index df174e737d..82adbbd59d 100644 if installed_only: patches = {k: v for k, v in patches.items() if v["installed"]} return patches -diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py -index b97e82d307..96d3f12b17 100644 ---- a/tests/unit/modules/test_yumpkg.py -+++ b/tests/unit/modules/test_yumpkg.py -@@ -383,6 +383,69 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): - _patch in patches["my-fake-patch-installed-1234"]["summary"] - ) +diff --git a/tests/pytests/unit/modules/test_yumpkg.py b/tests/pytests/unit/modules/test_yumpkg.py +index ef7100fe9d..df01853927 100644 +--- a/tests/pytests/unit/modules/test_yumpkg.py ++++ b/tests/pytests/unit/modules/test_yumpkg.py +@@ -420,6 +420,59 @@ def test_list_patches(): + assert _patch in patches["my-fake-patch-installed-1234"]["summary"] -+ def test_list_patches_with_unexpected_output(self): -+ """ -+ Test patches listin with unexpected output from updateinfo list + ++def test_list_patches_with_unexpected_output(): ++ """ ++ Test patches listin with unexpected output from updateinfo list + -+ :return: -+ """ -+ yum_out = [ -+ "Update notice RHBA-2014:0722 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping.", -+ "You should report this problem to the owner of the rhel7-dev-rhel7-rpm-x86_64 repository.", -+ 'To help pinpoint the issue, please attach the output of "yum updateinfo --verbose" to the report.', -+ "Update notice RHSA-2014:1971 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping.", -+ "Update notice RHSA-2015:1981 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping.", -+ "Update notice RHSA-2015:0067 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping", -+ "i my-fake-patch-not-installed-1234 recommended spacewalk-usix-2.7.5.2-2.2.noarch", -+ " my-fake-patch-not-installed-1234 recommended spacewalksd-5.0.26.2-21.2.x86_64", -+ "i my-fake-patch-not-installed-1234 recommended suseRegisterInfo-3.1.1-18.2.x86_64", -+ "i my-fake-patch-installed-1234 recommended my-package-one-1.1-0.1.x86_64", -+ "i my-fake-patch-installed-1234 recommended my-package-two-1.1-0.1.x86_64", -+ ] ++ :return: ++ """ ++ yum_out = [ ++ "Update notice RHBA-2014:0722 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping.", ++ "You should report this problem to the owner of the rhel7-dev-rhel7-rpm-x86_64 repository.", ++ 'To help pinpoint the issue, please attach the output of "yum updateinfo --verbose" to the report.', ++ "Update notice RHSA-2014:1971 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping.", ++ "Update notice RHSA-2015:1981 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping.", ++ "Update notice RHSA-2015:0067 (from rhel7-dev-rhel7-rpm-x86_64) is broken, or a bad duplicate, skipping", ++ "i my-fake-patch-not-installed-1234 recommended spacewalk-usix-2.7.5.2-2.2.noarch", ++ " my-fake-patch-not-installed-1234 recommended spacewalksd-5.0.26.2-21.2.x86_64", ++ "i my-fake-patch-not-installed-1234 recommended suseRegisterInfo-3.1.1-18.2.x86_64", ++ "i my-fake-patch-installed-1234 recommended my-package-one-1.1-0.1.x86_64", ++ "i my-fake-patch-installed-1234 recommended my-package-two-1.1-0.1.x86_64", ++ ] + -+ expected_patches = { -+ "my-fake-patch-not-installed-1234": { -+ "installed": False, -+ "summary": [ -+ "spacewalk-usix-2.7.5.2-2.2.noarch", -+ "spacewalksd-5.0.26.2-21.2.x86_64", -+ "suseRegisterInfo-3.1.1-18.2.x86_64", -+ ], -+ }, -+ "my-fake-patch-installed-1234": { -+ "installed": True, -+ "summary": [ -+ "my-package-one-1.1-0.1.x86_64", -+ "my-package-two-1.1-0.1.x86_64", -+ ], -+ }, -+ } ++ expected_patches = { ++ "my-fake-patch-not-installed-1234": { ++ "installed": False, ++ "summary": [ ++ "spacewalk-usix-2.7.5.2-2.2.noarch", ++ "spacewalksd-5.0.26.2-21.2.x86_64", ++ "suseRegisterInfo-3.1.1-18.2.x86_64", ++ ], ++ }, ++ "my-fake-patch-installed-1234": { ++ "installed": True, ++ "summary": [ ++ "my-package-one-1.1-0.1.x86_64", ++ "my-package-two-1.1-0.1.x86_64", ++ ], ++ }, ++ } + -+ with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict( -+ yumpkg.__salt__, -+ {"cmd.run_stdout": MagicMock(return_value=os.linesep.join(yum_out))}, -+ ): -+ patches = yumpkg.list_patches() -+ self.assertFalse(patches["my-fake-patch-not-installed-1234"]["installed"]) -+ self.assertTrue( -+ len(patches["my-fake-patch-not-installed-1234"]["summary"]) == 3 -+ ) -+ for _patch in expected_patches["my-fake-patch-not-installed-1234"][ -+ "summary" -+ ]: -+ self.assertTrue( -+ _patch in patches["my-fake-patch-not-installed-1234"]["summary"] -+ ) ++ with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict( ++ yumpkg.__salt__, ++ {"cmd.run_stdout": MagicMock(return_value=os.linesep.join(yum_out))}, ++ ): ++ patches = yumpkg.list_patches() ++ assert not patches["my-fake-patch-not-installed-1234"]["installed"] ++ assert len(patches["my-fake-patch-not-installed-1234"]["summary"]) == 3 ++ for _patch in expected_patches["my-fake-patch-not-installed-1234"]["summary"]: ++ assert _patch in patches["my-fake-patch-not-installed-1234"]["summary"] ++ assert patches["my-fake-patch-installed-1234"]["installed"] ++ assert len(patches["my-fake-patch-installed-1234"]["summary"]) == 2 ++ for _patch in expected_patches["my-fake-patch-installed-1234"]["summary"]: ++ assert _patch in patches["my-fake-patch-installed-1234"]["summary"] + -+ self.assertTrue(patches["my-fake-patch-installed-1234"]["installed"]) -+ self.assertTrue( -+ len(patches["my-fake-patch-installed-1234"]["summary"]) == 2 -+ ) -+ for _patch in expected_patches["my-fake-patch-installed-1234"]["summary"]: -+ self.assertTrue( -+ _patch in patches["my-fake-patch-installed-1234"]["summary"] -+ ) + - def test_latest_version_with_options(self): - with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})): + def test_latest_version_with_options(): + with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})): -- -2.29.2 +2.33.0 diff --git a/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch index f4925d1..63e7b3c 100644 --- a/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch +++ b/do-not-load-pip-state-if-there-is-no-3rd-party-depen.patch @@ -1,4 +1,4 @@ -From 57f9da0bd7727c46eab866941fee46a3eaf8c8ea Mon Sep 17 00:00:00 2001 +From e0b91c626c10b29d328fa92415393cd57bb4c962 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Fri, 21 Sep 2018 17:31:39 +0200 Subject: [PATCH] Do not load pip state if there is no 3rd party @@ -6,39 +6,16 @@ Subject: [PATCH] Do not load pip state if there is no 3rd party Safe import 3rd party dependency --- - salt/modules/pip.py | 93 ++++++++++++++++++++++++--------------------- - 1 file changed, 50 insertions(+), 43 deletions(-) + salt/modules/pip.py | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/modules/pip.py b/salt/modules/pip.py -index f7c101f6e4..742e0dd48a 100644 +index f68cafaeaf..14cfafed4b 100644 --- a/salt/modules/pip.py +++ b/salt/modules/pip.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - r""" - Install Python packages with pip to either the system or a virtualenv - -@@ -77,9 +76,7 @@ of the 2015.5 branch: - The issue is described here: https://github.com/saltstack/salt/issues/46163 - - """ --from __future__ import absolute_import, print_function, unicode_literals - --# Import python libs - import logging - import os - import re -@@ -89,7 +86,6 @@ import tempfile - - import pkg_resources # pylint: disable=3rd-party-module-not-gated - --# Import Salt libs - import salt.utils.data - import salt.utils.files - import salt.utils.json -@@ -101,6 +97,12 @@ import salt.utils.versions +@@ -96,6 +96,12 @@ import salt.utils.url + import salt.utils.versions from salt.exceptions import CommandExecutionError, CommandNotFoundError - from salt.ext import six +try: + import pkg_resources @@ -47,9 +24,9 @@ index f7c101f6e4..742e0dd48a 100644 + + # This needs to be named logger so we don't shadow it in pip.install - logger = logging.getLogger(__name__) # pylint: disable=C0103 + logger = logging.getLogger(__name__) # pylint: disable=invalid-name -@@ -118,7 +120,12 @@ def __virtual__(): +@@ -113,7 +119,12 @@ def __virtual__(): entire filesystem. If it's not installed in a conventional location, the user is required to provide the location of pip each time it is used. """ @@ -63,298 +40,7 @@ index f7c101f6e4..742e0dd48a 100644 def _pip_bin_env(cwd, bin_env): -@@ -140,7 +147,7 @@ def _clear_context(bin_env=None): - """ - contextkey = "pip.version" - if bin_env is not None: -- contextkey = "{0}.{1}".format(contextkey, bin_env) -+ contextkey = "{}.{}".format(contextkey, bin_env) - __context__.pop(contextkey, None) - - -@@ -196,7 +203,7 @@ def _get_pip_bin(bin_env): - bin_path, - ) - raise CommandNotFoundError( -- "Could not find a pip binary in virtualenv {0}".format(bin_env) -+ "Could not find a pip binary in virtualenv {}".format(bin_env) - ) - - # bin_env is the python or pip binary -@@ -209,11 +216,11 @@ def _get_pip_bin(bin_env): - return [os.path.normpath(bin_env)] - - raise CommandExecutionError( -- "Could not find a pip binary within {0}".format(bin_env) -+ "Could not find a pip binary within {}".format(bin_env) - ) - else: - raise CommandNotFoundError( -- "Access denied to {0}, could not find a pip binary".format(bin_env) -+ "Access denied to {}, could not find a pip binary".format(bin_env) - ) - - -@@ -283,7 +290,7 @@ def _resolve_requirements_chain(requirements): - - chain = [] - -- if isinstance(requirements, six.string_types): -+ if isinstance(requirements, str): - requirements = [requirements] - - for req_file in requirements: -@@ -300,7 +307,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user): - cleanup_requirements = [] - - if requirements is not None: -- if isinstance(requirements, six.string_types): -+ if isinstance(requirements, str): - requirements = [r.strip() for r in requirements.split(",")] - elif not isinstance(requirements, list): - raise TypeError("requirements must be a string or list") -@@ -314,7 +321,7 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user): - if not cached_requirements: - ret = { - "result": False, -- "comment": "pip requirements file '{0}' not found".format( -+ "comment": "pip requirements file '{}' not found".format( - requirement - ), - } -@@ -412,15 +419,15 @@ def _format_env_vars(env_vars): - ret = {} - if env_vars: - if isinstance(env_vars, dict): -- for key, val in six.iteritems(env_vars): -- if not isinstance(key, six.string_types): -+ for key, val in env_vars.items(): -+ if not isinstance(key, str): - key = str(key) # future lint: disable=blacklisted-function -- if not isinstance(val, six.string_types): -+ if not isinstance(val, str): - val = str(val) # future lint: disable=blacklisted-function - ret[key] = val - else: - raise CommandExecutionError( -- "env_vars {0} is not a dictionary".format(env_vars) -+ "env_vars {} is not a dictionary".format(env_vars) - ) - return ret - -@@ -762,9 +769,9 @@ def install( - - if log: - if os.path.isdir(log): -- raise IOError("'{0}' is a directory. Use --log path_to_file".format(log)) -+ raise OSError("'{}' is a directory. Use --log path_to_file".format(log)) - elif not os.access(log, os.W_OK): -- raise IOError("'{0}' is not writeable".format(log)) -+ raise OSError("'{}' is not writeable".format(log)) - - cmd.extend(["--log", log]) - -@@ -790,12 +797,12 @@ def install( - int(timeout) - except ValueError: - raise ValueError( -- "'{0}' is not a valid timeout, must be an integer".format(timeout) -+ "'{}' is not a valid timeout, must be an integer".format(timeout) - ) - cmd.extend(["--timeout", timeout]) - - if find_links: -- if isinstance(find_links, six.string_types): -+ if isinstance(find_links, str): - find_links = [l.strip() for l in find_links.split(",")] - - for link in find_links: -@@ -803,7 +810,7 @@ def install( - salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link) - ): - raise CommandExecutionError( -- "'{0}' is not a valid URL or path".format(link) -+ "'{}' is not a valid URL or path".format(link) - ) - cmd.extend(["--find-links", link]) - -@@ -815,13 +822,13 @@ def install( - - if index_url: - if not salt.utils.url.validate(index_url, VALID_PROTOS): -- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url)) -+ raise CommandExecutionError("'{}' is not a valid URL".format(index_url)) - cmd.extend(["--index-url", index_url]) - - if extra_index_url: - if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): - raise CommandExecutionError( -- "'{0}' is not a valid URL".format(extra_index_url) -+ "'{}' is not a valid URL".format(extra_index_url) - ) - cmd.extend(["--extra-index-url", extra_index_url]) - -@@ -836,13 +843,13 @@ def install( - " use index_url and/or extra_index_url instead" - ) - -- if isinstance(mirrors, six.string_types): -+ if isinstance(mirrors, str): - mirrors = [m.strip() for m in mirrors.split(",")] - - cmd.append("--use-mirrors") - for mirror in mirrors: - if not mirror.startswith("http://"): -- raise CommandExecutionError("'{0}' is not a valid URL".format(mirror)) -+ raise CommandExecutionError("'{}' is not a valid URL".format(mirror)) - cmd.extend(["--mirrors", mirror]) - - if disable_version_check: -@@ -883,7 +890,7 @@ def install( - if exists_action.lower() not in ("s", "i", "w", "b"): - raise CommandExecutionError( - "The exists_action pip option only supports the values " -- "s, i, w, and b. '{0}' is not valid.".format(exists_action) -+ "s, i, w, and b. '{}' is not valid.".format(exists_action) - ) - cmd.extend(["--exists-action", exists_action]) - -@@ -911,14 +918,14 @@ def install( - cmd.extend(["--cert", cert]) - - if global_options: -- if isinstance(global_options, six.string_types): -+ if isinstance(global_options, str): - global_options = [go.strip() for go in global_options.split(",")] - - for opt in global_options: - cmd.extend(["--global-option", opt]) - - if install_options: -- if isinstance(install_options, six.string_types): -+ if isinstance(install_options, str): - install_options = [io.strip() for io in install_options.split(",")] - - for opt in install_options: -@@ -929,7 +936,7 @@ def install( - try: - pkgs = [p.strip() for p in pkgs.split(",")] - except AttributeError: -- pkgs = [p.strip() for p in six.text_type(pkgs).split(",")] -+ pkgs = [p.strip() for p in str(pkgs).split(",")] - pkgs = salt.utils.data.stringify(salt.utils.data.decode_list(pkgs)) - - # It's possible we replaced version-range commas with semicolons so -@@ -945,7 +952,7 @@ def install( - - if editable: - egg_match = re.compile(r"(?:#|#.*?&)egg=([^&]*)") -- if isinstance(editable, six.string_types): -+ if isinstance(editable, str): - editable = [e.strip() for e in editable.split(",")] - - for entry in editable: -@@ -964,14 +971,14 @@ def install( - cmd.append("--allow-all-external") - - if allow_external: -- if isinstance(allow_external, six.string_types): -+ if isinstance(allow_external, str): - allow_external = [p.strip() for p in allow_external.split(",")] - - for pkg in allow_external: - cmd.extend(["--allow-external", pkg]) - - if allow_unverified: -- if isinstance(allow_unverified, six.string_types): -+ if isinstance(allow_unverified, str): - allow_unverified = [p.strip() for p in allow_unverified.split(",")] - - for pkg in allow_unverified: -@@ -1106,8 +1113,8 @@ def uninstall( - try: - # TODO make this check if writeable - os.path.exists(log) -- except IOError: -- raise IOError("'{0}' is not writeable".format(log)) -+ except OSError: -+ raise OSError("'{}' is not writeable".format(log)) - - cmd.extend(["--log", log]) - -@@ -1133,12 +1140,12 @@ def uninstall( - int(timeout) - except ValueError: - raise ValueError( -- "'{0}' is not a valid timeout, must be an integer".format(timeout) -+ "'{}' is not a valid timeout, must be an integer".format(timeout) - ) - cmd.extend(["--timeout", timeout]) - - if pkgs: -- if isinstance(pkgs, six.string_types): -+ if isinstance(pkgs, str): - pkgs = [p.strip() for p in pkgs.split(",")] - if requirements: - for requirement in requirements: -@@ -1323,7 +1330,7 @@ def version(bin_env=None, cwd=None, user=None): - cwd = _pip_bin_env(cwd, bin_env) - contextkey = "pip.version" - if bin_env is not None: -- contextkey = "{0}.{1}".format(contextkey, bin_env) -+ contextkey = "{}.{}".format(contextkey, bin_env) - - if contextkey in __context__: - return __context__[contextkey] -@@ -1402,7 +1409,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None): - if match: - name, version_ = match.groups() - else: -- logger.error("Can't parse line '{0}'".format(line)) -+ logger.error("Can't parse line '{}'".format(line)) - continue - packages[name] = version_ - -@@ -1414,7 +1421,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None): - raise CommandExecutionError("Invalid JSON", info=result) - - for pkg in pkgs: -- packages[pkg["name"]] = "{0} [{1}]".format( -+ packages[pkg["name"]] = "{} [{}]".format( - pkg["latest_version"], pkg["latest_filetype"] - ) - -@@ -1602,17 +1609,17 @@ def list_all_versions( - """ - cwd = _pip_bin_env(cwd, bin_env) - cmd = _get_pip_bin(bin_env) -- cmd.extend(["install", "{0}==versions".format(pkg)]) -+ cmd.extend(["install", "{}==versions".format(pkg)]) - - if index_url: - if not salt.utils.url.validate(index_url, VALID_PROTOS): -- raise CommandExecutionError("'{0}' is not a valid URL".format(index_url)) -+ raise CommandExecutionError("'{}' is not a valid URL".format(index_url)) - cmd.extend(["--index-url", index_url]) - - if extra_index_url: - if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): - raise CommandExecutionError( -- "'{0}' is not a valid URL".format(extra_index_url) -+ "'{}' is not a valid URL".format(extra_index_url) - ) - cmd.extend(["--extra-index-url", extra_index_url]) - -@@ -1632,7 +1639,7 @@ def list_all_versions( - if not include_rc: - filtered.append("rc") - if filtered: -- excludes = re.compile(r"^((?!{0}).)*$".format("|".join(filtered))) -+ excludes = re.compile(r"^((?!{}).)*$".format("|".join(filtered))) - else: - excludes = re.compile(r"") - -- -2.29.2 +2.33.0 diff --git a/do-not-make-ansiblegate-to-crash-on-python3-minions.patch b/do-not-make-ansiblegate-to-crash-on-python3-minions.patch deleted file mode 100644 index b3a8df1..0000000 --- a/do-not-make-ansiblegate-to-crash-on-python3-minions.patch +++ /dev/null @@ -1,570 +0,0 @@ -From 5d465a5b392efa1b4df7870161b32e0125efa4af Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 28 Jun 2019 15:17:56 +0100 -Subject: [PATCH] Do not make ansiblegate to crash on Python3 minions - -Fix pylint issues - -Move MockTimedProc implementation to tests.support.mock - -Add unit test for ansible caller ---- - salt/modules/ansiblegate.py | 7 +- - tests/support/mock.py | 128 +++++++++------- - tests/unit/modules/test_ansiblegate.py | 201 +++++++++++++++++++++++++ - tests/unit/modules/test_cmdmod.py | 1 + - 4 files changed, 280 insertions(+), 57 deletions(-) - create mode 100644 tests/unit/modules/test_ansiblegate.py - -diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py -index 0279a26017..5d4b986ec2 100644 ---- a/salt/modules/ansiblegate.py -+++ b/salt/modules/ansiblegate.py -@@ -160,6 +160,7 @@ class AnsibleModuleCaller: - :param kwargs: keywords to the module - :return: - """ -+ python_exec = "python3" - - module = self._resolver.load_module(module) - if not hasattr(module, "main"): -@@ -182,9 +183,9 @@ class AnsibleModuleCaller: - timeout=self.timeout, - ) - proc_out.run() -- proc_out_stdout = salt.utils.stringutils.to_str(proc_out.stdout) -+ proc_out_stdout = proc_out.stdout.decode() - proc_exc = salt.utils.timed_subprocess.TimedProc( -- [sys.executable, module.__file__], -+ [python_exec, module.__file__], - stdin=proc_out_stdout, - stdout=subprocess.PIPE, - timeout=self.timeout, -@@ -298,7 +299,7 @@ def help(module=None, *args): - 'Available sections on module "{}"'.format( - module.__name__.replace("ansible.modules.", "") - ) -- ] = list(doc) -+ ] = [i for i in doc.keys()] - else: - for arg in args: - info = doc.get(arg) -diff --git a/tests/support/mock.py b/tests/support/mock.py -index 7ef02e0701..87d052c399 100644 ---- a/tests/support/mock.py -+++ b/tests/support/mock.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - :codeauthor: Pedro Algarvio (pedro@algarvio.me) - -@@ -14,7 +13,6 @@ - """ - # pylint: disable=unused-import,function-redefined,blacklisted-module,blacklisted-external-module - --from __future__ import absolute_import - - import collections - import copy -@@ -42,8 +40,6 @@ from mock import ( - patch, - sentinel, - ) -- --# Import salt libs - from salt.ext import six - - # pylint: disable=no-name-in-module,no-member -@@ -57,7 +53,7 @@ if sys.version_info < (3, 6) and __mock_version < (2,): - raise ImportError("Please install mock>=2.0.0") - - --class MockFH(object): -+class MockFH: - def __init__(self, filename, read_data, *args, **kwargs): - self.filename = filename - self.read_data = read_data -@@ -89,7 +85,7 @@ class MockFH(object): - """ - # Newline will always be a bytestring on PY2 because mock_open will have - # normalized it to one. -- newline = b"\n" if isinstance(read_data, six.binary_type) else "\n" -+ newline = b"\n" if isinstance(read_data, bytes) else "\n" - - read_data = [line + newline for line in read_data.split(newline)] - -@@ -103,8 +99,7 @@ class MockFH(object): - # newline that we added in the list comprehension. - read_data[-1] = read_data[-1][:-1] - -- for line in read_data: -- yield line -+ yield from read_data - - @property - def write_calls(self): -@@ -126,18 +121,18 @@ class MockFH(object): - def __check_read_data(self): - if not self.__read_data_ok: - if self.binary_mode: -- if not isinstance(self.read_data, six.binary_type): -+ if not isinstance(self.read_data, bytes): - raise TypeError( -- "{0} opened in binary mode, expected read_data to be " -- "bytes, not {1}".format( -+ "{} opened in binary mode, expected read_data to be " -+ "bytes, not {}".format( - self.filename, type(self.read_data).__name__ - ) - ) - else: - if not isinstance(self.read_data, str): - raise TypeError( -- "{0} opened in non-binary mode, expected read_data to " -- "be str, not {1}".format( -+ "{} opened in non-binary mode, expected read_data to " -+ "be str, not {}".format( - self.filename, type(self.read_data).__name__ - ) - ) -@@ -147,8 +142,8 @@ class MockFH(object): - def _read(self, size=0): - self.__check_read_data() - if not self.read_mode: -- raise IOError("File not open for reading") -- if not isinstance(size, six.integer_types) or size < 0: -+ raise OSError("File not open for reading") -+ if not isinstance(size, int) or size < 0: - raise TypeError("a positive integer is required") - - joined = self.empty_string.join(self.read_data_iter) -@@ -169,7 +164,7 @@ class MockFH(object): - # TODO: Implement "size" argument - self.__check_read_data() - if not self.read_mode: -- raise IOError("File not open for reading") -+ raise OSError("File not open for reading") - ret = list(self.read_data_iter) - self.__loc += sum(len(x) for x in ret) - return ret -@@ -178,7 +173,7 @@ class MockFH(object): - # TODO: Implement "size" argument - self.__check_read_data() - if not self.read_mode: -- raise IOError("File not open for reading") -+ raise OSError("File not open for reading") - try: - ret = next(self.read_data_iter) - self.__loc += len(ret) -@@ -189,7 +184,7 @@ class MockFH(object): - def __iter__(self): - self.__check_read_data() - if not self.read_mode: -- raise IOError("File not open for reading") -+ raise OSError("File not open for reading") - while True: - try: - ret = next(self.read_data_iter) -@@ -200,30 +195,22 @@ class MockFH(object): - - def _write(self, content): - if not self.write_mode: -- raise IOError("File not open for writing") -- if six.PY2: -- if isinstance(content, six.text_type): -- # encoding intentionally not specified to force a -- # UnicodeEncodeError when non-ascii unicode type is passed -- content.encode() -- else: -- content_type = type(content) -- if self.binary_mode and content_type is not bytes: -- raise TypeError( -- "a bytes-like object is required, not '{0}'".format( -- content_type.__name__ -- ) -- ) -- elif not self.binary_mode and content_type is not str: -- raise TypeError( -- "write() argument must be str, not {0}".format( -- content_type.__name__ -- ) -+ raise OSError("File not open for writing") -+ content_type = type(content) -+ if self.binary_mode and content_type is not bytes: -+ raise TypeError( -+ "a bytes-like object is required, not '{}'".format( -+ content_type.__name__ - ) -+ ) -+ elif not self.binary_mode and content_type is not str: -+ raise TypeError( -+ "write() argument must be str, not {}".format(content_type.__name__) -+ ) - - def _writelines(self, lines): - if not self.write_mode: -- raise IOError("File not open for writing") -+ raise OSError("File not open for writing") - for line in lines: - self._write(line) - -@@ -234,26 +221,24 @@ class MockFH(object): - pass - - --class MockCall(object): -+class MockCall: - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - def __repr__(self): - # future lint: disable=blacklisted-function -- ret = str("MockCall(") -+ ret = "MockCall(" - for arg in self.args: -- ret += repr(arg) + str(", ") -+ ret += repr(arg) + ", " - if not self.kwargs: - if self.args: - # Remove trailing ', ' - ret = ret[:-2] - else: -- for key, val in six.iteritems(self.kwargs): -- ret += str("{0}={1}").format( -- salt.utils.stringutils.to_str(key), repr(val) -- ) -- ret += str(")") -+ for key, val in self.kwargs.items(): -+ ret += "{}={}".format(salt.utils.stringutils.to_str(key), repr(val)) -+ ret += ")" - return ret - # future lint: enable=blacklisted-function - -@@ -264,7 +249,7 @@ class MockCall(object): - return self.args == other.args and self.kwargs == other.kwargs - - --class MockOpen(object): -+class MockOpen: - r''' - This class can be used to mock the use of ``open()``. - -@@ -379,7 +364,7 @@ class MockOpen(object): - # .__class__() used here to preserve the dict class in the event that - # an OrderedDict was used. - new_read_data = read_data.__class__() -- for key, val in six.iteritems(read_data): -+ for key, val in read_data.items(): - try: - val = salt.utils.data.decode(val, to_str=True) - except TypeError: -@@ -424,7 +409,7 @@ class MockOpen(object): - except IndexError: - # We've run out of file contents, abort! - raise RuntimeError( -- "File matching expression '{0}' opened more times than " -+ "File matching expression '{}' opened more times than " - "expected".format(matched_pattern) - ) - -@@ -443,7 +428,7 @@ class MockOpen(object): - except KeyError: - # No matching glob in read_data, treat this as a file that does - # not exist and raise the appropriate exception. -- raise IOError(errno.ENOENT, "No such file or directory", name) -+ raise OSError(errno.ENOENT, "No such file or directory", name) - - def write_calls(self, path=None): - """ -@@ -451,7 +436,7 @@ class MockOpen(object): - the results to files matching a given pattern. - """ - ret = [] -- for filename, handles in six.iteritems(self.filehandles): -+ for filename, handles in self.filehandles.items(): - if path is None or fnmatch.fnmatch(filename, path): - for fh_ in handles: - ret.extend(fh_.write_calls) -@@ -463,19 +448,54 @@ class MockOpen(object): - narrow the results to files matching a given pattern. - """ - ret = [] -- for filename, handles in six.iteritems(self.filehandles): -+ for filename, handles in self.filehandles.items(): - if path is None or fnmatch.fnmatch(filename, path): - for fh_ in handles: - ret.extend(fh_.writelines_calls) - return ret - - --class MockTimedProc(object): -+class MockTimedProc: -+ """ -+ Class used as a stand-in for salt.utils.timed_subprocess.TimedProc -+ """ -+ -+ class _Process: -+ """ -+ Used to provide a dummy "process" attribute -+ """ -+ -+ def __init__(self, returncode=0, pid=12345): -+ self.returncode = returncode -+ self.pid = pid -+ -+ def __init__(self, stdout=None, stderr=None, returncode=0, pid=12345): -+ if stdout is not None and not isinstance(stdout, bytes): -+ raise TypeError("Must pass stdout to MockTimedProc as bytes") -+ if stderr is not None and not isinstance(stderr, bytes): -+ raise TypeError("Must pass stderr to MockTimedProc as bytes") -+ self._stdout = stdout -+ self._stderr = stderr -+ self.process = self._Process(returncode=returncode, pid=pid) -+ -+ def run(self): -+ pass -+ -+ @property -+ def stdout(self): -+ return self._stdout -+ -+ @property -+ def stderr(self): -+ return self._stderr -+ -+ -+class MockTimedProc: - """ - Class used as a stand-in for salt.utils.timed_subprocess.TimedProc - """ - -- class _Process(object): -+ class _Process: - """ - Used to provide a dummy "process" attribute - """ -diff --git a/tests/unit/modules/test_ansiblegate.py b/tests/unit/modules/test_ansiblegate.py -new file mode 100644 -index 0000000000..61aad44b5c ---- /dev/null -+++ b/tests/unit/modules/test_ansiblegate.py -@@ -0,0 +1,201 @@ -+# -+# Author: Bo Maryniuk -+# -+# Copyright 2017 SUSE LLC -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import os -+ -+import salt.modules.ansiblegate as ansible -+import salt.utils.platform -+from salt.exceptions import LoaderError -+from salt.ext import six -+from tests.support.mixins import LoaderModuleMockMixin -+from tests.support.mock import MagicMock, MockTimedProc, patch -+from tests.support.unit import TestCase, skipIf -+ -+try: -+ import pytest -+except ImportError as import_error: -+ pytest = None -+NO_PYTEST = not bool(pytest) -+ -+ -+@skipIf(NO_PYTEST, False) -+@skipIf(salt.utils.platform.is_windows(), "Not supported on Windows") -+class AnsiblegateTestCase(TestCase, LoaderModuleMockMixin): -+ def setUp(self): -+ self.resolver = ansible.AnsibleModuleResolver({}) -+ self.resolver._modules_map = { -+ "one.two.three": os.sep + os.path.join("one", "two", "three.py"), -+ "four.five.six": os.sep + os.path.join("four", "five", "six.py"), -+ "three.six.one": os.sep + os.path.join("three", "six", "one.py"), -+ } -+ -+ def tearDown(self): -+ self.resolver = None -+ -+ def setup_loader_modules(self): -+ return {ansible: {}} -+ -+ def test_ansible_module_help(self): -+ """ -+ Test help extraction from the module -+ :return: -+ """ -+ -+ class Module: -+ """ -+ An ansible module mock. -+ """ -+ -+ __name__ = "foo" -+ DOCUMENTATION = """ -+--- -+one: -+ text here -+--- -+two: -+ text here -+description: -+ describe the second part -+ """ -+ -+ with patch.object(ansible, "_resolver", self.resolver), patch.object( -+ ansible._resolver, "load_module", MagicMock(return_value=Module()) -+ ): -+ ret = ansible.help("dummy") -+ assert sorted( -+ ret.get('Available sections on module "{}"'.format(Module().__name__)) -+ ) == ["one", "two"] -+ assert ret.get("Description") == "describe the second part" -+ -+ def test_module_resolver_modlist(self): -+ """ -+ Test Ansible resolver modules list. -+ :return: -+ """ -+ assert self.resolver.get_modules_list() == [ -+ "four.five.six", -+ "one.two.three", -+ "three.six.one", -+ ] -+ for ptr in ["five", "fi", "ve"]: -+ assert self.resolver.get_modules_list(ptr) == ["four.five.six"] -+ for ptr in ["si", "ix", "six"]: -+ assert self.resolver.get_modules_list(ptr) == [ -+ "four.five.six", -+ "three.six.one", -+ ] -+ assert self.resolver.get_modules_list("one") == [ -+ "one.two.three", -+ "three.six.one", -+ ] -+ assert self.resolver.get_modules_list("one.two") == ["one.two.three"] -+ assert self.resolver.get_modules_list("four") == ["four.five.six"] -+ -+ def test_resolver_module_loader_failure(self): -+ """ -+ Test Ansible module loader. -+ :return: -+ """ -+ mod = "four.five.six" -+ with pytest.raises(ImportError) as import_error: -+ self.resolver.load_module(mod) -+ -+ mod = "i.even.do.not.exist.at.all" -+ with pytest.raises(LoaderError) as loader_error: -+ self.resolver.load_module(mod) -+ -+ def test_resolver_module_loader(self): -+ """ -+ Test Ansible module loader. -+ :return: -+ """ -+ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch( -+ "salt.modules.ansiblegate.importlib.import_module", lambda x: x -+ ): -+ assert ( -+ self.resolver.load_module("four.five.six") -+ == "ansible.modules.four.five.six" -+ ) -+ -+ def test_resolver_module_loader_import_failure(self): -+ """ -+ Test Ansible module loader failure. -+ :return: -+ """ -+ with patch("salt.modules.ansiblegate.importlib", MagicMock()), patch( -+ "salt.modules.ansiblegate.importlib.import_module", lambda x: x -+ ): -+ with pytest.raises(LoaderError) as loader_error: -+ self.resolver.load_module("something.strange") -+ -+ def test_virtual_function(self): -+ """ -+ Test Ansible module __virtual__ when ansible is not installed on the minion. -+ :return: -+ """ -+ with patch("salt.modules.ansiblegate.ansible", None): -+ assert ansible.__virtual__() == "ansible" -+ -+ def test_ansible_module_call(self): -+ """ -+ Test Ansible module call from ansible gate module -+ -+ :return: -+ """ -+ -+ class Module: -+ """ -+ An ansible module mock. -+ """ -+ -+ __name__ = "one.two.three" -+ __file__ = "foofile" -+ -+ def main(): -+ pass -+ -+ ANSIBLE_MODULE_ARGS = '{"ANSIBLE_MODULE_ARGS": ["arg_1", {"kwarg1": "foobar"}]}' -+ -+ proc = MagicMock( -+ side_effect=[ -+ MockTimedProc(stdout=ANSIBLE_MODULE_ARGS.encode(), stderr=None), -+ MockTimedProc(stdout=b'{"completed": true}', stderr=None), -+ ] -+ ) -+ -+ with patch.object(ansible, "_resolver", self.resolver), patch.object( -+ ansible._resolver, "load_module", MagicMock(return_value=Module()) -+ ): -+ _ansible_module_caller = ansible.AnsibleModuleCaller(ansible._resolver) -+ with patch("salt.utils.timed_subprocess.TimedProc", proc): -+ ret = _ansible_module_caller.call( -+ "one.two.three", "arg_1", kwarg1="foobar" -+ ) -+ proc.assert_any_call( -+ [ -+ "echo", -+ '{"ANSIBLE_MODULE_ARGS": {"kwarg1": "foobar", "_raw_params": "arg_1"}}', -+ ], -+ stdout=-1, -+ timeout=1200, -+ ) -+ proc.assert_any_call( -+ ["python3", "foofile"], -+ stdin=ANSIBLE_MODULE_ARGS, -+ stdout=-1, -+ timeout=1200, -+ ) -+ assert ret == {"completed": True, "timeout": 1200} -diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py -index 15b97f8568..f3348bc379 100644 ---- a/tests/unit/modules/test_cmdmod.py -+++ b/tests/unit/modules/test_cmdmod.py -@@ -24,6 +24,7 @@ DEFAULT_SHELL = "foo/bar" - MOCK_SHELL_FILE = "# List of acceptable shells\n" "\n" "/bin/bash\n" - - -+@skipIf(NO_MOCK, NO_MOCK_REASON) - class CMDMODTestCase(TestCase, LoaderModuleMockMixin): - """ - Unit tests for the salt.modules.cmdmod module --- -2.29.2 - - diff --git a/do-not-monkey-patch-yaml-bsc-1177474.patch b/do-not-monkey-patch-yaml-bsc-1177474.patch index 56ca8e6..34a72b9 100644 --- a/do-not-monkey-patch-yaml-bsc-1177474.patch +++ b/do-not-monkey-patch-yaml-bsc-1177474.patch @@ -1,4 +1,4 @@ -From 8eaeb751d4077d6514577b53a9dbe23df231018e Mon Sep 17 00:00:00 2001 +From e82c8832aed9ef46f5021558758ef9d944d89214 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Mon, 8 Mar 2021 12:35:14 +0000 @@ -127,19 +127,19 @@ index e9d80fc4ad..f98fdcb0e9 100644 ), value_node.start_mark, diff --git a/tests/unit/utils/test_yamlloader.py b/tests/unit/utils/test_yamlloader.py -index a1e17af760..3f2e4403ba 100644 +index e1d60aeed0..a5de963817 100644 --- a/tests/unit/utils/test_yamlloader.py +++ b/tests/unit/utils/test_yamlloader.py -@@ -13,7 +13,7 @@ import salt.utils.files +@@ -5,7 +5,7 @@ + import textwrap - # Import 3rd-party libs - from salt.ext import six + import salt.utils.files -from salt.utils.yamlloader import SaltYamlSafeLoader +from salt.utils.yamlloader import SaltYamlSafeLoader, yaml from tests.support.mock import mock_open, patch - - # Import Salt Testing Libs -@@ -177,3 +177,7 @@ class YamlLoaderTestCase(TestCase): + from tests.support.unit import TestCase + from yaml.constructor import ConstructorError +@@ -133,3 +133,7 @@ class YamlLoaderTestCase(TestCase): ), {"foo": {"b": {"foo": "bar", "one": 1, "list": [1, "two", 3]}}}, ) @@ -148,6 +148,6 @@ index a1e17af760..3f2e4403ba 100644 + if hasattr(yaml, "CSafeLoader"): + assert yaml.SafeLoader != yaml.CSafeLoader -- -2.30.1 +2.33.0 diff --git a/do-not-raise-streamclosederror-traceback-but-only-lo.patch b/do-not-raise-streamclosederror-traceback-but-only-lo.patch deleted file mode 100644 index b9cba9d..0000000 --- a/do-not-raise-streamclosederror-traceback-but-only-lo.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 81d0105b0c0464c375070ffbc863a020a67e7965 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Wed, 26 Aug 2020 10:24:58 +0100 -Subject: [PATCH] Do not raise StreamClosedError traceback but only log - it (bsc#1175549) - ---- - salt/transport/ipc.py | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py -index f411907da2..5ff0956dde 100644 ---- a/salt/transport/ipc.py -+++ b/salt/transport/ipc.py -@@ -688,7 +688,6 @@ class IPCMessageSubscriber(IPCClient): - except StreamClosedError as exc: - log.trace("Subscriber disconnected from IPC %s", self.socket_path) - self._read_stream_future = None -- exc_to_raise = exc - except Exception as exc: # pylint: disable=broad-except - log.error("Exception occurred in Subscriber while handling stream: %s", exc) - self._read_stream_future = None --- -2.29.2 - - diff --git a/drop-wrong-mock-from-chroot-unit-test.patch b/drop-wrong-mock-from-chroot-unit-test.patch deleted file mode 100644 index e9eb834..0000000 --- a/drop-wrong-mock-from-chroot-unit-test.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 3dc61b426cee5c40976ee25a0357fd07244a630b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Tue, 13 Oct 2020 12:02:00 +0100 -Subject: [PATCH] Drop wrong mock from chroot unit test - ---- - tests/unit/modules/test_chroot.py | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py -index 196e3ad27f..a0f3f8e6af 100644 ---- a/tests/unit/modules/test_chroot.py -+++ b/tests/unit/modules/test_chroot.py -@@ -71,7 +71,6 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin): - self.assertTrue(chroot.create("/chroot")) - makedirs.assert_called() - -- @patch("salt.modules.chroot.exist") - @patch("salt.utils.files.fopen") - def test_in_chroot(self, fopen): - """ --- -2.29.2 - - diff --git a/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch b/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch deleted file mode 100644 index fd02a36..0000000 --- a/drop-wrong-virt-capabilities-code-after-rebasing-pat.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 79ae019ac7515614c6fbc620e66575f015bc447d Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Tue, 5 Jan 2021 09:34:45 +0000 -Subject: [PATCH] Drop wrong virt capabilities code after rebasing - patches - ---- - salt/modules/virt.py | 66 -------------------------------------------- - 1 file changed, 66 deletions(-) - -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index e3960a5a90..786bfa1e58 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -143,7 +143,6 @@ import salt.utils.xmlutil as xmlutil - import salt.utils.yaml - from salt._compat import ElementTree, ipaddress, saxutils - from salt.exceptions import CommandExecutionError, SaltInvocationError --from salt.ext import six - from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin - from salt.ext.six.moves.urllib.parse import urlparse, urlunparse - from salt.utils.virt import check_remote, download_remote -@@ -5416,71 +5415,6 @@ def _parse_domain_caps(caps): - return result - - --def _parse_domain_caps(caps): -- """ -- Parse the XML document of domain capabilities into a structure. -- """ -- result = { -- "emulator": caps.find("path").text if caps.find("path") is not None else None, -- "domain": caps.find("domain").text if caps.find("domain") is not None else None, -- "machine": caps.find("machine").text -- if caps.find("machine") is not None -- else None, -- "arch": caps.find("arch").text if caps.find("arch") is not None else None, -- } -- -- --def all_capabilities(**kwargs): -- """ -- Return the host and domain capabilities in a single call. -- -- .. versionadded:: 3001 -- -- :param connection: libvirt connection URI, overriding defaults -- :param username: username to connect with, overriding defaults -- :param password: password to connect with, overriding defaults -- -- CLI Example: -- -- .. code-block:: bash -- -- salt '*' virt.all_capabilities -- -- """ -- conn = __get_conn(**kwargs) -- try: -- host_caps = ElementTree.fromstring(conn.getCapabilities()) -- domains = [ -- [ -- (guest.get("arch", {}).get("name", None), key) -- for key in guest.get("arch", {}).get("domains", {}).keys() -- ] -- for guest in [ -- _parse_caps_guest(guest) for guest in host_caps.findall("guest") -- ] -- ] -- flattened = [pair for item in (x for x in domains) for pair in item] -- result = { -- "host": { -- "host": _parse_caps_host(host_caps.find("host")), -- "guests": [ -- _parse_caps_guest(guest) for guest in host_caps.findall("guest") -- ], -- }, -- "domains": [ -- _parse_domain_caps( -- ElementTree.fromstring( -- conn.getDomainCapabilities(None, arch, None, domain) -- ) -- ) -- for (arch, domain) in flattened -- ], -- } -- return result -- finally: -- conn.close() -- -- - def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs): - """ - Return the domain capabilities given an emulator, architecture, machine or virtualization type. --- -2.29.2 - - diff --git a/early-feature-support-config.patch b/early-feature-support-config.patch index 297c074..acdcae5 100644 --- a/early-feature-support-config.patch +++ b/early-feature-support-config.patch @@ -1,4 +1,4 @@ -From 550db5157741b0a252bfc684f3496a7fd6d674ad Mon Sep 17 00:00:00 2001 +From a20d99db6de68bd4e7a4d2eda4c1c590aea449e7 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 10 Jul 2018 12:06:33 +0200 Subject: [PATCH] early feature: support-config @@ -290,13 +290,236 @@ Lintfix: mute fopen warning Remove development stub. Ughh... Removed blacklist of pkg_resources + +Make profiles a package. + +Add UTF-8 encoding + +Add a docstring + +Support-config non-root permission issues fixes (U#50095) + +Do not crash if there is no configuration available at all + +Handle CLI and log errors + +Catch overwriting exiting archive error by other users + +Suppress excessive tracebacks on error log level + +Add multi-file support and globbing to the filetree (U#50018) + +Add more possible logs + +Support multiple files grabbing + +Collect system logs and boot logs + +Support globbing in filetree + +Add supportconfig module for remote calls and SaltSSH + +Add log collector for remote purposes + +Implement default archive name + +Fix imports + +Implement runner function + +Remove targets data collector function as it is now called by a module instead + +Add external method decorator marker + +Add utility class for detecting exportable methods + +Mark run method as an external function + +Implement function setter + +Fix imports + +Setup config from __opts__ + +Use utility class + +Remove utils class + +Allow specify profile from the API parameter directly + +Rename module by virtual name + +Bypass parent subclass + +Implement profiles listing (local only for now) + +Specify profile from the state/call + +Set default or personalised archive name + +Add archives lister + +Add personalised name element to the archive name + +Use proper args/kwargs to the exported function + +Add archives deletion function + +Change log level when debugging rendered profiles + +Add ability to directly pass profile source when taking local data + +Add pillar profile support + +Remove extra-line + +Fix header + +Change output format for deleting archives + +Refactor logger output format + +Add time/milliseconds to each log notification + +Fix imports + +Switch output destination by context + +Add last archive function + +Lintfix + +Return consistent type + +Change output format for deleted archives report + +Implement report archive syncing to the reporting node + +Send multiple files at once via rsync, instead of send one after another + +Add sync stats formatter + +Change signature: cleanup -> move. Update docstring. + +Flush empty data from the output format + +Report archfiles activity + +Refactor imports + +Do not remove retcode if it is EX_OK + +Do not raise rsync error for undefined archives. + +Update header + +Add salt-support state module + +Move all functions into a callable class object + +Support __call__ function in state and command modules as default entrance that does not need to be specified in SLS state syntax + +Access from the outside only allowed class methods + +Pre-create destination of the archive, preventing single archive copied as a group name + +Handle functions exceptions + +Add unit test scaffold + +Add LogCollector UT for testing regular message + +Add LogCollector UT for testing INFO message + +Add LogCollector UT for testing WARNING message + +Replace hardcoded variables with defined constants + +Add LogCollector UT for testing ERROR message + +Test title attribute in msg method of LogCollector + +Add UT for LogCollector on highlighter method + +Add UT for LogCollector on put method + +Fix docstrings + +Add UT for archive name generator + +Add UT for custom archive name + +Fix docstring for the UT + +Add UT for checking profiles list format + +Add Unit Test for existing archives listing + +Add UT for the last archive function + +Create instance of the support class + +Add UT for successfully deleting all archives + +Add UT for deleting archives with failures + +Add UI for formatting sync stats and order preservation + +Add UT for testing sync failure when no archives has been specified + +Add UT for last picked archive has not found + +Add UT for last specified archive was not found + +Bugfix: do not create an array with None element in it + +Fix UT for found bugfix + +Add UT for syncing no archives failure + +Add UT for sync function + +Add UT for run support function + +Fix docstring for function "run" + +lintfix: use 'salt.support.mock' and 'patch()' + +Rewrite subdirectory creation and do not rely on Python3-only code + +Lintfix: remove unused imports + +Lintfix: regexp strings + +Break-down oneliner if/else clause + +Use ordered dictionary to preserve order of the state. + +This has transparent effect to the current process: OrderedDict is the +same as just Python dict, except it is preserving order of the state +chunks. + +Refactor state processing class. + +Add __call__ function to process single-id syntax + +Add backward-compatibility with default SLS syntax (id-per-call) + +Lintfix: E1120 no value in argument 'name' for class constructor + +Remove unused import + +Check last function by full name --- + doc/ref/modules/all/index.rst | 1 + + doc/ref/states/all/index.rst | 1 + salt/cli/support/__init__.py | 76 +++ - salt/cli/support/collector.py | 538 +++++++++++++++++++++ + salt/cli/support/collector.py | 563 ++++++++++++++++++++++ salt/cli/support/console.py | 184 +++++++ - salt/cli/support/intfunc.py | 40 ++ + salt/cli/support/intfunc.py | 51 ++ salt/cli/support/localrunner.py | 33 ++ - salt/cli/support/profiles/default.yml | 71 +++ + salt/cli/support/profiles/__init__.py | 4 + + salt/cli/support/profiles/default.yml | 78 +++ salt/cli/support/profiles/jobs-active.yml | 3 + salt/cli/support/profiles/jobs-last.yml | 3 + salt/cli/support/profiles/jobs-trace.yml | 7 + @@ -304,17 +527,25 @@ Removed blacklist of pkg_resources salt/cli/support/profiles/postgres.yml | 11 + salt/cli/support/profiles/salt.yml | 9 + salt/cli/support/profiles/users.yml | 22 + + salt/loader.py | 6 +- + salt/modules/saltsupport.py | 405 ++++++++++++++++ salt/scripts.py | 15 + + salt/state.py | 38 +- + salt/states/saltsupport.py | 225 +++++++++ + salt/utils/args.py | 3 +- + salt/utils/decorators/__init__.py | 68 +-- salt/utils/parsers.py | 114 +++++ scripts/salt-support | 11 + setup.py | 2 + - tests/unit/cli/test_support.py | 553 ++++++++++++++++++++++ - 18 files changed, 1719 insertions(+) + tests/unit/cli/test_support.py | 553 +++++++++++++++++++++ + tests/unit/modules/test_saltsupport.py | 496 +++++++++++++++++++ + 28 files changed, 2973 insertions(+), 36 deletions(-) create mode 100644 salt/cli/support/__init__.py create mode 100644 salt/cli/support/collector.py create mode 100644 salt/cli/support/console.py create mode 100644 salt/cli/support/intfunc.py create mode 100644 salt/cli/support/localrunner.py + create mode 100644 salt/cli/support/profiles/__init__.py create mode 100644 salt/cli/support/profiles/default.yml create mode 100644 salt/cli/support/profiles/jobs-active.yml create mode 100644 salt/cli/support/profiles/jobs-last.yml @@ -323,12 +554,39 @@ Removed blacklist of pkg_resources create mode 100644 salt/cli/support/profiles/postgres.yml create mode 100644 salt/cli/support/profiles/salt.yml create mode 100644 salt/cli/support/profiles/users.yml + create mode 100644 salt/modules/saltsupport.py + create mode 100644 salt/states/saltsupport.py create mode 100755 scripts/salt-support create mode 100644 tests/unit/cli/test_support.py + create mode 100644 tests/unit/modules/test_saltsupport.py +diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst +index d9501ae4d9..3fff7ad636 100644 +--- a/doc/ref/modules/all/index.rst ++++ b/doc/ref/modules/all/index.rst +@@ -414,6 +414,7 @@ execution modules + salt_version + saltcheck + saltcloudmod ++ saltsupport + saltutil + schedule + scp_mod +diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst +index 914b63d0fb..e40b32a6e8 100644 +--- a/doc/ref/states/all/index.rst ++++ b/doc/ref/states/all/index.rst +@@ -280,6 +280,7 @@ state modules + rvm + salt_proxy + saltmod ++ saltsupport + saltutil + schedule + selinux diff --git a/salt/cli/support/__init__.py b/salt/cli/support/__init__.py new file mode 100644 -index 0000000000..4fdf44186f +index 0000000000..59c2609e07 --- /dev/null +++ b/salt/cli/support/__init__.py @@ -0,0 +1,76 @@ @@ -381,7 +639,7 @@ index 0000000000..4fdf44186f + if os.path.exists(profile_path): + try: + rendered_template = _render_profile(profile_path, caller, runner) -+ log.trace("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template)) ++ log.debug("\n{d}\n{t}\n{d}\n".format(d="-" * 80, t=rendered_template)) + data.update(yaml.load(rendered_template)) + except Exception as ex: + log.debug(ex, exc_info=True) @@ -410,10 +668,10 @@ index 0000000000..4fdf44186f + return sorted(profiles) diff --git a/salt/cli/support/collector.py b/salt/cli/support/collector.py new file mode 100644 -index 0000000000..bfbf491f5b +index 0000000000..1879cc5220 --- /dev/null +++ b/salt/cli/support/collector.py -@@ -0,0 +1,538 @@ +@@ -0,0 +1,563 @@ +import builtins as exceptions +import copy +import json @@ -540,6 +798,31 @@ index 0000000000..bfbf491f5b + self.__current_section = [] + self.__current_section_name = name + ++ def _printout(self, data, output): ++ """ ++ Use salt outputter to printout content. ++ ++ :return: ++ """ ++ opts = {"extension_modules": "", "color": False} ++ try: ++ printout = salt.output.get_printout(output, opts)(data) ++ if printout is not None: ++ return printout.rstrip() ++ except (KeyError, AttributeError, TypeError) as err: ++ log.debug(err, exc_info=True) ++ try: ++ printout = salt.output.get_printout("nested", opts)(data) ++ if printout is not None: ++ return printout.rstrip() ++ except (KeyError, AttributeError, TypeError) as err: ++ log.debug(err, exc_info=True) ++ printout = salt.output.get_printout("raw", opts)(data) ++ if printout is not None: ++ return printout.rstrip() ++ ++ return salt.output.try_printout(data, output, opts) ++ + def write(self, title, data, output=None): + """ + Add a data to the current opened section. @@ -553,9 +836,7 @@ index 0000000000..bfbf491f5b + try: + if isinstance(data, dict) and "return" in data: + data = data["return"] -+ content = salt.output.try_printout( -+ data, output, {"extension_modules": "", "color": False} -+ ) ++ content = self._printout(data, output) + except Exception: # Fall-back to just raw YAML + content = None + else: @@ -755,7 +1036,7 @@ index 0000000000..bfbf491f5b + + return data + -+ def collect_local_data(self): ++ def collect_local_data(self, profile=None, profile_source=None): + """ + Collects master system data. + :return: @@ -781,8 +1062,8 @@ index 0000000000..bfbf491f5b + self._local_run({"fun": func, "arg": args, "kwarg": kwargs}) + ) + -+ scenario = salt.cli.support.get_profile( -+ self.config["support_profile"], call, run ++ scenario = profile_source or salt.cli.support.get_profile( ++ profile or self.config["support_profile"], call, run + ) + for category_name in scenario: + self.out.put(category_name) @@ -834,13 +1115,6 @@ index 0000000000..bfbf491f5b + + return action_name.split(":")[0] or None + -+ def collect_targets_data(self): -+ """ -+ Collects minion targets data -+ :return: -+ """ -+ # TODO: remote collector? -+ + def _cleanup(self): + """ + Cleanup if crash/exception @@ -852,7 +1126,11 @@ index 0000000000..bfbf491f5b + and os.path.exists(self.config["support_archive"]) + ): + self.out.warning("Terminated earlier, cleaning up") -+ os.unlink(self.config["support_archive"]) ++ try: ++ os.unlink(self.config["support_archive"]) ++ except Exception as err: ++ log.debug(err) ++ self.out.error("{} while cleaning up.".format(err)) + + def _check_existing_archive(self): + """ @@ -868,7 +1146,13 @@ index 0000000000..bfbf491f5b + self.config["support_archive"] + ) + ) -+ os.unlink(self.config["support_archive"]) ++ try: ++ os.unlink(self.config["support_archive"]) ++ except Exception as err: ++ log.debug(err) ++ self.out.error( ++ "{} while trying to overwrite existing archive.".format(err) ++ ) + ret = True + else: + self.out.warning( @@ -934,7 +1218,6 @@ index 0000000000..bfbf491f5b + self.collector.open() + self.collect_local_data() + self.collect_internal_data() -+ self.collect_targets_data() + self.collector.close() + + archive_path = self.collector.archive_path @@ -1144,15 +1427,16 @@ index 0000000000..266b645479 + return wrapper.wrap(txt) diff --git a/salt/cli/support/intfunc.py b/salt/cli/support/intfunc.py new file mode 100644 -index 0000000000..d3d8f83cb8 +index 0000000000..a9f76a6003 --- /dev/null +++ b/salt/cli/support/intfunc.py -@@ -0,0 +1,40 @@ +@@ -0,0 +1,51 @@ +""" +Internal functions. +""" +# Maybe this needs to be a modules in a future? + ++import glob +import os + +import salt.utils.files @@ -1161,7 +1445,7 @@ index 0000000000..d3d8f83cb8 +out = MessagesOutput() + + -+def filetree(collector, path): ++def filetree(collector, *paths): + """ + Add all files in the tree. If the "path" is a file, + only that file will be added. @@ -1169,25 +1453,35 @@ index 0000000000..d3d8f83cb8 + :param path: File or directory + :return: + """ -+ if not path: -+ out.error("Path not defined", ident=2) -+ else: -+ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. -+ # pylint: disable=W8470 -+ if os.path.isfile(path): -+ filename = os.path.basename(path) -+ try: -+ file_ref = salt.utils.files.fopen(path) # pylint: disable=W -+ out.put("Add {}".format(filename), indent=2) -+ collector.add(filename) -+ collector.link(title=path, path=file_ref) -+ except Exception as err: -+ out.error(err, ident=4) -+ # pylint: enable=W8470 ++ _paths = [] ++ # Unglob ++ for path in paths: ++ _paths += glob.glob(path) ++ for path in set(_paths): ++ if not path: ++ out.error("Path not defined", ident=2) ++ elif not os.path.exists(path): ++ out.warning("Path {} does not exists".format(path)) + else: -+ for fname in os.listdir(path): -+ fname = os.path.join(path, fname) -+ filetree(collector, fname) ++ # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. ++ # pylint: disable=W8470 ++ if os.path.isfile(path): ++ filename = os.path.basename(path) ++ try: ++ file_ref = salt.utils.files.fopen(path) # pylint: disable=W ++ out.put("Add {}".format(filename), indent=2) ++ collector.add(filename) ++ collector.link(title=path, path=file_ref) ++ except Exception as err: ++ out.error(err, ident=4) ++ # pylint: enable=W8470 ++ else: ++ try: ++ for fname in os.listdir(path): ++ fname = os.path.join(path, fname) ++ filetree(collector, [fname]) ++ except Exception as err: ++ out.error(err, ident=4) diff --git a/salt/cli/support/localrunner.py b/salt/cli/support/localrunner.py new file mode 100644 index 0000000000..ad10eda0b0 @@ -1227,12 +1521,22 @@ index 0000000000..ad10eda0b0 + low["__tag__"] = tag + + return self.low(fun, low, print_event=False, full_return=False) +diff --git a/salt/cli/support/profiles/__init__.py b/salt/cli/support/profiles/__init__.py +new file mode 100644 +index 0000000000..b86aef30b8 +--- /dev/null ++++ b/salt/cli/support/profiles/__init__.py +@@ -0,0 +1,4 @@ ++# coding=utf-8 ++''' ++Profiles for salt-support. ++''' diff --git a/salt/cli/support/profiles/default.yml b/salt/cli/support/profiles/default.yml new file mode 100644 -index 0000000000..01d9a26193 +index 0000000000..3defb5eef3 --- /dev/null +++ b/salt/cli/support/profiles/default.yml -@@ -0,0 +1,71 @@ +@@ -0,0 +1,78 @@ +sysinfo: + - description: | + Get the Salt grains of the current system. @@ -1297,12 +1601,19 @@ index 0000000000..01d9a26193 + - ps.top: + info: Top CPU consuming processes + ++boot_log: ++ - filetree: ++ info: Collect boot logs ++ args: ++ - /var/log/boot.* ++ +system.log: + # This works on any file system object. + - filetree: + info: Add system log + args: + - /var/log/syslog ++ - /var/log/messages + diff --git a/salt/cli/support/profiles/jobs-active.yml b/salt/cli/support/profiles/jobs-active.yml new file mode 100644 @@ -1428,6 +1739,434 @@ index 0000000000..391acdb606 + - group.getent: + info: List of all available groups + output: table +diff --git a/salt/loader.py b/salt/loader.py +index 7a32f4c90d..491768a028 100644 +--- a/salt/loader.py ++++ b/salt/loader.py +@@ -2019,8 +2019,10 @@ class LazyLoader(salt.utils.lazy.LazyDict): + } + + for attr in getattr(mod, "__load__", dir(mod)): +- if attr.startswith("_"): +- # private functions are skipped ++ if attr.startswith("_") and attr != "__call__": ++ # private functions are skipped, ++ # except __call__ which is default entrance ++ # for multi-function batch-like state syntax + continue + func = getattr(mod, attr) + if not inspect.isfunction(func) and not isinstance(func, functools.partial): +diff --git a/salt/modules/saltsupport.py b/salt/modules/saltsupport.py +new file mode 100644 +index 0000000000..e800e3bf1f +--- /dev/null ++++ b/salt/modules/saltsupport.py +@@ -0,0 +1,405 @@ ++# ++# Author: Bo Maryniuk ++# ++# Copyright 2018 SUSE LLC ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++""" ++:codeauthor: :email:`Bo Maryniuk ` ++ ++Module to run salt-support within Salt. ++""" ++# pylint: disable=W0231,W0221 ++ ++ ++import datetime ++import logging ++import os ++import re ++import sys ++import tempfile ++import time ++ ++import salt.cli.support ++import salt.cli.support.intfunc ++import salt.defaults.exitcodes ++import salt.exceptions ++import salt.utils.decorators ++import salt.utils.dictupdate ++import salt.utils.odict ++import salt.utils.path ++import salt.utils.stringutils ++from salt.cli.support.collector import SaltSupport, SupportDataCollector ++ ++__virtualname__ = "support" ++log = logging.getLogger(__name__) ++ ++ ++class LogCollector: ++ """ ++ Output collector. ++ """ ++ ++ INFO = "info" ++ WARNING = "warning" ++ ERROR = "error" ++ ++ class MessagesList(list): ++ def append(self, obj): ++ list.append( ++ self, ++ "{} - {}".format( ++ datetime.datetime.utcnow().strftime("%T.%f")[:-3], obj ++ ), ++ ) ++ ++ __call__ = append ++ ++ def __init__(self): ++ self.messages = { ++ self.INFO: self.MessagesList(), ++ self.WARNING: self.MessagesList(), ++ self.ERROR: self.MessagesList(), ++ } ++ ++ def msg(self, message, *args, **kwargs): ++ title = kwargs.get("title") ++ if title: ++ message = "{}: {}".format(title, message) ++ self.messages[self.INFO](message) ++ ++ def info(self, message, *args, **kwargs): ++ self.msg(message) ++ ++ def warning(self, message, *args, **kwargs): ++ self.messages[self.WARNING](message) ++ ++ def error(self, message, *args, **kwargs): ++ self.messages[self.ERROR](message) ++ ++ def put(self, message, *args, **kwargs): ++ self.messages[self.INFO](message) ++ ++ def highlight(self, message, *values, **kwargs): ++ self.msg(message.format(*values)) ++ ++ ++class SaltSupportModule(SaltSupport): ++ """ ++ Salt Support module class. ++ """ ++ ++ def __init__(self): ++ """ ++ Constructor ++ """ ++ self.config = self.setup_config() ++ ++ def setup_config(self): ++ """ ++ Return current configuration ++ :return: ++ """ ++ return __opts__ ++ ++ def _get_archive_name(self, archname=None): ++ """ ++ Create default archive name. ++ ++ :return: ++ """ ++ archname = re.sub("[^a-z0-9]", "", (archname or "").lower()) or "support" ++ for grain in ["fqdn", "host", "localhost", "nodename"]: ++ host = __grains__.get(grain) ++ if host: ++ break ++ if not host: ++ host = "localhost" ++ ++ return os.path.join( ++ tempfile.gettempdir(), ++ "{hostname}-{archname}-{date}-{time}.bz2".format( ++ archname=archname, ++ hostname=host, ++ date=time.strftime("%Y%m%d"), ++ time=time.strftime("%H%M%S"), ++ ), ++ ) ++ ++ @salt.utils.decorators.external ++ def profiles(self): ++ """ ++ Get list of profiles. ++ ++ :return: ++ """ ++ return { ++ "standard": salt.cli.support.get_profiles(self.config), ++ "custom": [], ++ } ++ ++ @salt.utils.decorators.external ++ def archives(self): ++ """ ++ Get list of existing archives. ++ :return: ++ """ ++ arc_files = [] ++ tmpdir = tempfile.gettempdir() ++ for filename in os.listdir(tmpdir): ++ mtc = re.match(r"\w+-\w+-\d+-\d+\.bz2", filename) ++ if mtc and len(filename) == mtc.span()[-1]: ++ arc_files.append(os.path.join(tmpdir, filename)) ++ ++ return arc_files ++ ++ @salt.utils.decorators.external ++ def last_archive(self): ++ """ ++ Get the last available archive ++ :return: ++ """ ++ archives = {} ++ for archive in self.archives(): ++ archives[int(archive.split(".")[0].split("-")[-1])] = archive ++ ++ return archives and archives[max(archives)] or None ++ ++ @salt.utils.decorators.external ++ def delete_archives(self, *archives): ++ """ ++ Delete archives ++ :return: ++ """ ++ # Remove paths ++ _archives = [] ++ for archive in archives: ++ _archives.append(os.path.basename(archive)) ++ archives = _archives[:] ++ ++ ret = {"files": {}, "errors": {}} ++ for archive in self.archives(): ++ arc_dir = os.path.dirname(archive) ++ archive = os.path.basename(archive) ++ if archives and archive in archives or not archives: ++ archive = os.path.join(arc_dir, archive) ++ try: ++ os.unlink(archive) ++ ret["files"][archive] = "removed" ++ except Exception as err: ++ ret["errors"][archive] = str(err) ++ ret["files"][archive] = "left" ++ ++ return ret ++ ++ def format_sync_stats(self, cnt): ++ """ ++ Format stats of the sync output. ++ ++ :param cnt: ++ :return: ++ """ ++ stats = salt.utils.odict.OrderedDict() ++ if cnt.get("retcode") == salt.defaults.exitcodes.EX_OK: ++ for line in cnt.get("stdout", "").split(os.linesep): ++ line = line.split(": ") ++ if len(line) == 2: ++ stats[line[0].lower().replace(" ", "_")] = line[1] ++ cnt["transfer"] = stats ++ del cnt["stdout"] ++ ++ # Remove empty ++ empty_sections = [] ++ for section in cnt: ++ if not cnt[section] and section != "retcode": ++ empty_sections.append(section) ++ for section in empty_sections: ++ del cnt[section] ++ ++ return cnt ++ ++ @salt.utils.decorators.depends("rsync") ++ @salt.utils.decorators.external ++ def sync(self, group, name=None, host=None, location=None, move=False, all=False): ++ """ ++ Sync the latest archive to the host on given location. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt '*' support.sync group=test ++ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 ++ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 host=allmystuff.lan ++ salt '*' support.sync group=test name=/tmp/myspecial-12345-67890.bz2 host=allmystuff.lan location=/opt/ ++ ++ :param group: name of the local directory to which sync is going to put the result files ++ :param name: name of the archive. Latest, if not specified. ++ :param host: name of the destination host for rsync. Default is master, if not specified. ++ :param location: local destination directory, default temporary if not specified ++ :param move: move archive file[s]. Default is False. ++ :param all: work with all available archives. Default is False (i.e. latest available) ++ ++ :return: ++ """ ++ tfh, tfn = tempfile.mkstemp() ++ processed_archives = [] ++ src_uri = uri = None ++ ++ last_arc = self.last_archive() ++ if name: ++ archives = [name] ++ elif all: ++ archives = self.archives() ++ elif last_arc: ++ archives = [last_arc] ++ else: ++ archives = [] ++ ++ for name in archives: ++ err = None ++ if not name: ++ err = "No support archive has been defined." ++ elif not os.path.exists(name): ++ err = 'Support archive "{}" was not found'.format(name) ++ if err is not None: ++ log.error(err) ++ raise salt.exceptions.SaltInvocationError(err) ++ ++ if not uri: ++ src_uri = os.path.dirname(name) ++ uri = "{host}:{loc}".format( ++ host=host or __opts__["master"], ++ loc=os.path.join(location or tempfile.gettempdir(), group), ++ ) ++ ++ os.write(tfh, salt.utils.stringutils.to_bytes(os.path.basename(name))) ++ os.write(tfh, salt.utils.stringutils.to_bytes(os.linesep)) ++ processed_archives.append(name) ++ log.debug("Syncing {filename} to {uri}".format(filename=name, uri=uri)) ++ os.close(tfh) ++ ++ if not processed_archives: ++ raise salt.exceptions.SaltInvocationError("No archives found to transfer.") ++ ++ ret = __salt__["rsync.rsync"]( ++ src=src_uri, ++ dst=uri, ++ additional_opts=["--stats", "--files-from={}".format(tfn)], ++ ) ++ ret["files"] = {} ++ for name in processed_archives: ++ if move: ++ salt.utils.dictupdate.update(ret, self.delete_archives(name)) ++ log.debug("Deleting {filename}".format(filename=name)) ++ ret["files"][name] = "moved" ++ else: ++ ret["files"][name] = "copied" ++ ++ try: ++ os.unlink(tfn) ++ except OSError as err: ++ log.error( ++ "Cannot remove temporary rsync file {fn}: {err}".format(fn=tfn, err=err) ++ ) ++ ++ return self.format_sync_stats(ret) ++ ++ @salt.utils.decorators.external ++ def run(self, profile="default", pillar=None, archive=None, output="nested"): ++ """ ++ Run Salt Support on the minion. ++ ++ profile ++ Set available profile name. Default is "default". ++ ++ pillar ++ Set available profile from the pillars. ++ ++ archive ++ Override archive name. Default is "support". This results to "hostname-support-YYYYMMDD-hhmmss.bz2". ++ ++ output ++ Change the default outputter. Default is "nested". ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt '*' support.run ++ salt '*' support.run profile=network ++ salt '*' support.run pillar=something_special ++ """ ++ ++ class outputswitch: ++ """ ++ Output switcher on context ++ """ ++ ++ def __init__(self, output_device): ++ self._tmp_out = output_device ++ self._orig_out = None ++ ++ def __enter__(self): ++ self._orig_out = salt.cli.support.intfunc.out ++ salt.cli.support.intfunc.out = self._tmp_out ++ ++ def __exit__(self, *args): ++ salt.cli.support.intfunc.out = self._orig_out ++ ++ self.out = LogCollector() ++ with outputswitch(self.out): ++ self.collector = SupportDataCollector( ++ archive or self._get_archive_name(archname=archive), output ++ ) ++ self.collector.out = self.out ++ self.collector.open() ++ self.collect_local_data( ++ profile=profile, profile_source=__pillar__.get(pillar) ++ ) ++ self.collect_internal_data() ++ self.collector.close() ++ ++ return {"archive": self.collector.archive_path, "messages": self.out.messages} ++ ++ ++def __virtual__(): ++ """ ++ Set method references as module functions aliases ++ :return: ++ """ ++ support = SaltSupportModule() ++ ++ def _set_function(obj): ++ """ ++ Create a Salt function for the SaltSupport class. ++ """ ++ ++ def _cmd(*args, **kwargs): ++ """ ++ Call support method as a function from the Salt. ++ """ ++ _kwargs = {} ++ for kw in kwargs: ++ if not kw.startswith("__"): ++ _kwargs[kw] = kwargs[kw] ++ return obj(*args, **_kwargs) ++ ++ _cmd.__doc__ = obj.__doc__ ++ return _cmd ++ ++ for m_name in dir(support): ++ obj = getattr(support, m_name) ++ if getattr(obj, "external", False): ++ setattr(sys.modules[__name__], m_name, _set_function(obj)) ++ ++ return __virtualname__ diff --git a/salt/scripts.py b/salt/scripts.py index 8f3cde8477..e5c248f011 100644 --- a/salt/scripts.py @@ -1451,8 +2190,501 @@ index 8f3cde8477..e5c248f011 100644 + client = salt.cli.support.collector.SaltSupport() + _install_signal_handlers(client) + client.run() +diff --git a/salt/state.py b/salt/state.py +index f999191e6d..f5ac3199c7 100644 +--- a/salt/state.py ++++ b/salt/state.py +@@ -1565,7 +1565,9 @@ class State: + names = [] + if state.startswith("__"): + continue +- chunk = {"state": state, "name": name} ++ chunk = OrderedDict() ++ chunk["state"] = state ++ chunk["name"] = name + if orchestration_jid is not None: + chunk["__orchestration_jid__"] = orchestration_jid + if "__sls__" in body: +@@ -2168,9 +2170,16 @@ class State: + ret = self.call_parallel(cdata, low) + else: + self.format_slots(cdata) +- ret = self.states[cdata["full"]]( +- *cdata["args"], **cdata["kwargs"] +- ) ++ if cdata["full"].split(".")[-1] == "__call__": ++ # __call__ requires OrderedDict to preserve state order ++ # kwargs are also invalid overall ++ ret = self.states[cdata["full"]]( ++ cdata["args"], module=None, state=cdata["kwargs"] ++ ) ++ else: ++ ret = self.states[cdata["full"]]( ++ *cdata["args"], **cdata["kwargs"] ++ ) + self.states.inject_globals = {} + if ( + "check_cmd" in low +@@ -3242,10 +3251,31 @@ class State: + running.update(errors) + return running + ++ def inject_default_call(self, high): ++ """ ++ Sets .call function to a state, if not there. ++ ++ :param high: ++ :return: ++ """ ++ for chunk in high: ++ state = high[chunk] ++ for state_ref in state: ++ needs_default = True ++ for argset in state[state_ref]: ++ if isinstance(argset, str): ++ needs_default = False ++ break ++ if needs_default: ++ order = state[state_ref].pop(-1) ++ state[state_ref].append("__call__") ++ state[state_ref].append(order) ++ + def call_high(self, high, orchestration_jid=None): + """ + Process a high data call and ensure the defined states. + """ ++ self.inject_default_call(high) + errors = [] + # If there is extension data reconcile it + high, ext_errors = self.reconcile_extend(high) +diff --git a/salt/states/saltsupport.py b/salt/states/saltsupport.py +new file mode 100644 +index 0000000000..fb0c9e0372 +--- /dev/null ++++ b/salt/states/saltsupport.py +@@ -0,0 +1,225 @@ ++# ++# Author: Bo Maryniuk ++# ++# Copyright 2018 SUSE LLC ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++ ++r""" ++:codeauthor: :email:`Bo Maryniuk ` ++ ++Execution of Salt Support from within states ++============================================ ++ ++State to collect support data from the systems: ++ ++.. code-block:: yaml ++ ++ examine_my_systems: ++ support.taken: ++ - profile: default ++ ++ support.collected: ++ - group: somewhere ++ - move: true ++ ++""" ++import logging ++import os ++import tempfile ++ ++import salt.exceptions ++ ++# Import salt modules ++import salt.fileclient ++import salt.utils.decorators.path ++import salt.utils.odict ++ ++log = logging.getLogger(__name__) ++__virtualname__ = "support" ++ ++ ++class SaltSupportState: ++ """ ++ Salt-support. ++ """ ++ ++ EXPORTED = ["collected", "taken"] ++ ++ def get_kwargs(self, data): ++ kwargs = {} ++ for keyset in data: ++ kwargs.update(keyset) ++ ++ return kwargs ++ ++ def __call__(self, state): ++ """ ++ Call support. ++ ++ :param args: ++ :param kwargs: ++ :return: ++ """ ++ ret = { ++ "name": state.pop("name"), ++ "changes": {}, ++ "result": True, ++ "comment": "", ++ } ++ ++ out = {} ++ functions = ["Functions:"] ++ try: ++ for ref_func, ref_kwargs in state.items(): ++ if ref_func not in self.EXPORTED: ++ raise salt.exceptions.SaltInvocationError( ++ "Function {} is not found".format(ref_func) ++ ) ++ out[ref_func] = getattr(self, ref_func)(**self.get_kwargs(ref_kwargs)) ++ functions.append(" - {}".format(ref_func)) ++ ret["comment"] = "\n".join(functions) ++ except Exception as ex: ++ ret["comment"] = str(ex) ++ ret["result"] = False ++ ret["changes"] = out ++ ++ return ret ++ ++ def check_destination(self, location, group): ++ """ ++ Check destination for the archives. ++ :return: ++ """ ++ # Pre-create destination, since rsync will ++ # put one file named as group ++ try: ++ destination = os.path.join(location, group) ++ if os.path.exists(destination) and not os.path.isdir(destination): ++ raise salt.exceptions.SaltException( ++ 'Destination "{}" should be directory!'.format(destination) ++ ) ++ if not os.path.exists(destination): ++ os.makedirs(destination) ++ log.debug("Created destination directory for archives: %s", destination) ++ else: ++ log.debug( ++ "Archives destination directory %s already exists", destination ++ ) ++ except OSError as err: ++ log.error(err) ++ ++ def collected( ++ self, group, filename=None, host=None, location=None, move=True, all=True ++ ): ++ """ ++ Sync archives to a central place. ++ ++ :param name: ++ :param group: ++ :param filename: ++ :param host: ++ :param location: ++ :param move: ++ :param all: ++ :return: ++ """ ++ ret = { ++ "name": "support.collected", ++ "changes": {}, ++ "result": True, ++ "comment": "", ++ } ++ location = location or tempfile.gettempdir() ++ self.check_destination(location, group) ++ ret["changes"] = __salt__["support.sync"]( ++ group, name=filename, host=host, location=location, move=move, all=all ++ ) ++ ++ return ret ++ ++ def taken(self, profile="default", pillar=None, archive=None, output="nested"): ++ """ ++ Takes minion support config data. ++ ++ :param profile: ++ :param pillar: ++ :param archive: ++ :param output: ++ :return: ++ """ ++ ret = { ++ "name": "support.taken", ++ "changes": {}, ++ "result": True, ++ } ++ ++ result = __salt__["support.run"]( ++ profile=profile, pillar=pillar, archive=archive, output=output ++ ) ++ if result.get("archive"): ++ ret[ ++ "comment" ++ ] = "Information about this system has been saved to {} file.".format( ++ result["archive"] ++ ) ++ ret["changes"]["archive"] = result["archive"] ++ ret["changes"]["messages"] = {} ++ for key in ["info", "error", "warning"]: ++ if result.get("messages", {}).get(key): ++ ret["changes"]["messages"][key] = result["messages"][key] ++ else: ++ ret["comment"] = "" ++ ++ return ret ++ ++ ++_support_state = SaltSupportState() ++ ++ ++def __call__(*args, **kwargs): ++ """ ++ SLS single-ID syntax processing. ++ ++ module: ++ This module reference, equals to sys.modules[__name__] ++ ++ state: ++ Compiled state in preserved order. The function supposed to look ++ at first level array of functions. ++ ++ :param cdata: ++ :param kwargs: ++ :return: ++ """ ++ return _support_state(kwargs.get("state", {})) ++ ++ ++def taken(name, profile="default", pillar=None, archive=None, output="nested"): ++ return _support_state.taken( ++ profile=profile, pillar=pillar, archive=archive, output=output ++ ) ++ ++ ++def collected( ++ name, group, filename=None, host=None, location=None, move=True, all=True ++): ++ return _support_state.collected( ++ group=group, filename=filename, host=host, location=location, move=move, all=all ++ ) ++ ++ ++def __virtual__(): ++ """ ++ Salt Support state ++ """ ++ return __virtualname__ +diff --git a/salt/utils/args.py b/salt/utils/args.py +index ba50aff126..4e5ca0eedf 100644 +--- a/salt/utils/args.py ++++ b/salt/utils/args.py +@@ -15,6 +15,7 @@ import salt.utils.jid + import salt.utils.versions + import salt.utils.yaml + from salt.exceptions import SaltInvocationError ++from salt.utils.odict import OrderedDict + + log = logging.getLogger(__name__) + +@@ -437,7 +438,7 @@ def format_call( + ret = initial_ret is not None and initial_ret or {} + + ret["args"] = [] +- ret["kwargs"] = {} ++ ret["kwargs"] = OrderedDict() + + aspec = get_function_argspec(fun, is_class_method=is_class_method) + +diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py +index 940d0a90f2..b06cf0abc8 100644 +--- a/salt/utils/decorators/__init__.py ++++ b/salt/utils/decorators/__init__.py +@@ -1,10 +1,7 @@ +-# -*- coding: utf-8 -*- + """ + Helpful decorators for module writing + """ + +-# Import python libs +-from __future__ import absolute_import, print_function, unicode_literals + + import errno + import inspect +@@ -15,13 +12,10 @@ import time + from collections import defaultdict + from functools import wraps + +-# Import salt libs + import salt.utils.args + import salt.utils.data + import salt.utils.versions + from salt.exceptions import CommandExecutionError, SaltConfigurationError +- +-# Import 3rd-party libs + from salt.ext import six + from salt.log import LOG_LEVELS + +@@ -32,7 +26,7 @@ if getattr(sys, "getwindowsversion", False): + log = logging.getLogger(__name__) + + +-class Depends(object): ++class Depends: + """ + This decorator will check the module when it is loaded and check that the + dependencies passed in are in the globals of the module. If not, it will +@@ -121,7 +115,7 @@ class Depends(object): + + @staticmethod + def run_command(dependency, mod_name, func_name): +- full_name = "{0}.{1}".format(mod_name, func_name) ++ full_name = "{}.{}".format(mod_name, func_name) + log.trace("Running '%s' for '%s'", dependency, full_name) + if IS_WINDOWS: + args = salt.utils.args.shlex_split(dependency, posix=False) +@@ -145,8 +139,8 @@ class Depends(object): + It will modify the "functions" dict and remove/replace modules that + are missing dependencies. + """ +- for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]): +- for (mod_name, func_name), (frame, params) in six.iteritems(dependent_dict): ++ for dependency, dependent_dict in cls.dependency_dict[kind].items(): ++ for (mod_name, func_name), (frame, params) in dependent_dict.items(): + if mod_name != tgt_mod: + continue + # Imports from local context take presedence over those from the global context. +@@ -232,7 +226,7 @@ class Depends(object): + except (AttributeError, KeyError): + pass + +- mod_key = "{0}.{1}".format(mod_name, func_name) ++ mod_key = "{}.{}".format(mod_name, func_name) + + # if we don't have this module loaded, skip it! + if mod_key not in functions: +@@ -267,9 +261,7 @@ def timing(function): + mod_name = function.__module__[16:] + else: + mod_name = function.__module__ +- fstr = "Function %s.%s took %.{0}f seconds to execute".format( +- sys.float_info.dig +- ) ++ fstr = "Function %s.%s took %.{}f seconds to execute".format(sys.float_info.dig) + log.profile(fstr, mod_name, function.__name__, end_time - start_time) + return ret + +@@ -291,13 +283,13 @@ def memoize(func): + def _memoize(*args, **kwargs): + str_args = [] + for arg in args: +- if not isinstance(arg, six.string_types): +- str_args.append(six.text_type(arg)) ++ if not isinstance(arg, str): ++ str_args.append(str(arg)) + else: + str_args.append(arg) + + args_ = ",".join( +- list(str_args) + ["{0}={1}".format(k, kwargs[k]) for k in sorted(kwargs)] ++ list(str_args) + ["{}={}".format(k, kwargs[k]) for k in sorted(kwargs)] + ) + if args_ not in cache: + cache[args_] = func(*args, **kwargs) +@@ -306,7 +298,7 @@ def memoize(func): + return _memoize + + +-class _DeprecationDecorator(object): ++class _DeprecationDecorator: + """ + Base mix-in class for the deprecation decorator. + Takes care of a common functionality, used in its derivatives. +@@ -359,7 +351,7 @@ class _DeprecationDecorator(object): + try: + return self._function(*args, **kwargs) + except TypeError as error: +- error = six.text_type(error).replace( ++ error = str(error).replace( + self._function, self._orig_f_name + ) # Hide hidden functions + log.error( +@@ -374,7 +366,7 @@ class _DeprecationDecorator(object): + self._function.__name__, + error, + ) +- six.reraise(*sys.exc_info()) ++ raise + else: + raise CommandExecutionError( + "Function is deprecated, but the successor function was not found." +@@ -626,11 +618,11 @@ class _WithDeprecated(_DeprecationDecorator): + + if use_deprecated and use_superseded: + raise SaltConfigurationError( +- "Function '{0}' is mentioned both in deprecated " ++ "Function '{}' is mentioned both in deprecated " + "and superseded sections. Please remove any of that.".format(full_name) + ) + old_function = self._globals.get( +- self._with_name or "_{0}".format(function.__name__) ++ self._with_name or "_{}".format(function.__name__) + ) + if self._policy == self.OPT_IN: + self._function = function if use_superseded else old_function +@@ -782,12 +774,30 @@ def ensure_unicode_args(function): + + @wraps(function) + def wrapped(*args, **kwargs): +- if six.PY2: +- return function( +- *salt.utils.data.decode_list(args), +- **salt.utils.data.decode_dict(kwargs) +- ) +- else: +- return function(*args, **kwargs) ++ return function(*args, **kwargs) + + return wrapped ++ ++ ++def external(func): ++ """ ++ Mark function as external. ++ ++ :param func: ++ :return: ++ """ ++ ++ def f(*args, **kwargs): ++ """ ++ Stub. ++ ++ :param args: ++ :param kwargs: ++ :return: ++ """ ++ return func(*args, **kwargs) ++ ++ f.external = True ++ f.__doc__ = func.__doc__ ++ ++ return f diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py -index 952f9aebc5..c1422a9556 100644 +index 2a5abb25d5..31a2bf0b64 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -17,6 +17,7 @@ import optparse @@ -1471,7 +2703,7 @@ index 952f9aebc5..c1422a9556 100644 import salt.utils.platform import salt.utils.process import salt.utils.stringutils -@@ -2049,6 +2051,118 @@ class SyndicOptionParser( +@@ -2066,6 +2068,118 @@ class SyndicOptionParser( return opts @@ -1581,7 +2813,7 @@ index 952f9aebc5..c1422a9556 100644 + """ + _opts, _args = optparse.OptionParser.parse_args(self) + configs = self.find_existing_configs(_opts.support_unit) -+ if cfg not in configs: ++ if configs and cfg not in configs: + cfg = configs[0] + + return config.master_config(self.get_config_file_path(cfg)) @@ -1608,10 +2840,10 @@ index 0000000000..4e0e79f3ea +if __name__ == "__main__": + salt_support() diff --git a/setup.py b/setup.py -index 08c84344ea..39a66fefba 100755 +index 031f2bc492..6f3c1abd76 100755 --- a/setup.py +++ b/setup.py -@@ -1253,6 +1253,7 @@ class SaltDistribution(distutils.dist.Distribution): +@@ -1251,6 +1251,7 @@ class SaltDistribution(distutils.dist.Distribution): "scripts/salt-master", "scripts/salt-minion", "scripts/salt-proxy", @@ -1619,7 +2851,7 @@ index 08c84344ea..39a66fefba 100755 "scripts/salt-ssh", "scripts/salt-syndic", "scripts/salt-unity", -@@ -1299,6 +1300,7 @@ class SaltDistribution(distutils.dist.Distribution): +@@ -1297,6 +1298,7 @@ class SaltDistribution(distutils.dist.Distribution): "salt-key = salt.scripts:salt_key", "salt-master = salt.scripts:salt_master", "salt-minion = salt.scripts:salt_minion", @@ -2186,7 +3418,509 @@ index 0000000000..dc0e99bb3d + == "Details on JID 0000" + ) + assert jobs_trace["jobs-details"][0]["run:jobs.list_job"]["args"] == [0] +diff --git a/tests/unit/modules/test_saltsupport.py b/tests/unit/modules/test_saltsupport.py +new file mode 100644 +index 0000000000..f9ce7be29a +--- /dev/null ++++ b/tests/unit/modules/test_saltsupport.py +@@ -0,0 +1,496 @@ ++""" ++ :codeauthor: Bo Maryniuk ++""" ++ ++ ++import datetime ++ ++import salt.exceptions ++from salt.modules import saltsupport ++from tests.support.mixins import LoaderModuleMockMixin ++from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch ++from tests.support.unit import TestCase, skipIf ++ ++try: ++ import pytest ++except ImportError: ++ pytest = None ++ ++ ++@skipIf(not bool(pytest), "Pytest required") ++@skipIf(NO_MOCK, NO_MOCK_REASON) ++class SaltSupportModuleTestCase(TestCase, LoaderModuleMockMixin): ++ """ ++ Test cases for salt.modules.support::SaltSupportModule ++ """ ++ ++ def setup_loader_modules(self): ++ return {saltsupport: {}} ++ ++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) ++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"}) ++ @patch("time.strftime", MagicMock(return_value="000")) ++ def test_get_archive_name(self): ++ """ ++ Test archive name construction. ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ assert support._get_archive_name() == "/mnt/storage/c-3po-support-000-000.bz2" ++ ++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) ++ @patch("salt.modules.saltsupport.__grains__", {"fqdn": "c-3po"}) ++ @patch("time.strftime", MagicMock(return_value="000")) ++ def test_get_custom_archive_name(self): ++ """ ++ Test get custom archive name. ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ temp_name = support._get_archive_name(archname="Darth Wader") ++ assert temp_name == "/mnt/storage/c-3po-darthwader-000-000.bz2" ++ temp_name = support._get_archive_name(archname="Яйця з сіллю") ++ assert temp_name == "/mnt/storage/c-3po-support-000-000.bz2" ++ temp_name = support._get_archive_name(archname="!@#$%^&*()Fillip J. Fry") ++ assert temp_name == "/mnt/storage/c-3po-fillipjfry-000-000.bz2" ++ ++ @patch( ++ "salt.cli.support.get_profiles", ++ MagicMock(return_value={"message": "Feature was not beta tested"}), ++ ) ++ def test_profiles_format(self): ++ """ ++ Test profiles format. ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ profiles = support.profiles() ++ assert "custom" in profiles ++ assert "standard" in profiles ++ assert "message" in profiles["standard"] ++ assert profiles["custom"] == [] ++ assert profiles["standard"]["message"] == "Feature was not beta tested" ++ ++ @patch("tempfile.gettempdir", MagicMock(return_value="/mnt/storage")) ++ @patch( ++ "os.listdir", ++ MagicMock( ++ return_value=[ ++ "one-support-000-000.bz2", ++ "two-support-111-111.bz2", ++ "trash.bz2", ++ "hostname-000-000.bz2", ++ "three-support-wrong222-222.bz2", ++ "000-support-000-000.bz2", ++ ] ++ ), ++ ) ++ def test_get_existing_archives(self): ++ """ ++ Get list of existing archives. ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ out = support.archives() ++ assert len(out) == 3 ++ for name in [ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/000-support-000-000.bz2", ++ ]: ++ assert name in out ++ ++ def test_last_archive(self): ++ """ ++ Get last archive name ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ assert support.last_archive() == "/mnt/storage/three-support-222-222.bz2" ++ ++ @patch("os.unlink", MagicMock(return_value=True)) ++ def test_delete_all_archives_success(self): ++ """ ++ Test delete archives ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ ret = support.delete_archives() ++ assert "files" in ret ++ assert "errors" in ret ++ assert not bool(ret["errors"]) ++ assert bool(ret["files"]) ++ assert isinstance(ret["errors"], dict) ++ assert isinstance(ret["files"], dict) ++ ++ for arc in support.archives(): ++ assert ret["files"][arc] == "removed" ++ ++ @patch( ++ "os.unlink", ++ MagicMock( ++ return_value=False, ++ side_effect=[ ++ OSError("Decreasing electron flux"), ++ OSError("Solar flares interference"), ++ None, ++ ], ++ ), ++ ) ++ def test_delete_all_archives_failure(self): ++ """ ++ Test delete archives failure ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ ret = support.delete_archives() ++ assert "files" in ret ++ assert "errors" in ret ++ assert bool(ret["errors"]) ++ assert bool(ret["files"]) ++ assert isinstance(ret["errors"], dict) ++ assert isinstance(ret["files"], dict) ++ ++ assert ret["files"]["/mnt/storage/three-support-222-222.bz2"] == "removed" ++ assert ret["files"]["/mnt/storage/one-support-000-000.bz2"] == "left" ++ assert ret["files"]["/mnt/storage/two-support-111-111.bz2"] == "left" ++ ++ assert len(ret["errors"]) == 2 ++ assert ( ++ ret["errors"]["/mnt/storage/one-support-000-000.bz2"] ++ == "Decreasing electron flux" ++ ) ++ assert ( ++ ret["errors"]["/mnt/storage/two-support-111-111.bz2"] ++ == "Solar flares interference" ++ ) ++ ++ def test_format_sync_stats(self): ++ """ ++ Test format rsync stats for preserving ordering of the keys ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ stats = """ ++robot: Bender ++cute: Leela ++weird: Zoidberg ++professor: Farnsworth ++ """ ++ f_stats = support.format_sync_stats({"retcode": 0, "stdout": stats}) ++ assert list(f_stats["transfer"].keys()) == [ ++ "robot", ++ "cute", ++ "weird", ++ "professor", ++ ] ++ assert list(f_stats["transfer"].values()) == [ ++ "Bender", ++ "Leela", ++ "Zoidberg", ++ "Farnsworth", ++ ] ++ ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.close", MagicMock()) ++ def test_sync_no_archives_failure(self): ++ """ ++ Test sync failed when no archives specified. ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock(return_value=[]) ++ ++ with pytest.raises(salt.exceptions.SaltInvocationError) as err: ++ support.sync("group-name") ++ assert "No archives found to transfer" in str(err) ++ ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=False)) ++ def test_sync_last_picked_archive_not_found_failure(self): ++ """ ++ Test sync failed when archive was not found (last picked) ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ ++ with pytest.raises(salt.exceptions.SaltInvocationError) as err: ++ support.sync("group-name") ++ assert ( ++ ' Support archive "/mnt/storage/three-support-222-222.bz2" was not found' ++ in str(err) ++ ) ++ ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=False)) ++ def test_sync_specified_archive_not_found_failure(self): ++ """ ++ Test sync failed when archive was not found (last picked) ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ ++ with pytest.raises(salt.exceptions.SaltInvocationError) as err: ++ support.sync("group-name", name="lost.bz2") ++ assert ' Support archive "lost.bz2" was not found' in str(err) ++ ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=False)) ++ @patch("os.close", MagicMock()) ++ def test_sync_no_archive_to_transfer_failure(self): ++ """ ++ Test sync failed when no archive was found to transfer ++ ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock(return_value=[]) ++ with pytest.raises(salt.exceptions.SaltInvocationError) as err: ++ support.sync("group-name", all=True) ++ assert "No archives found to transfer" in str(err) ++ ++ @patch("tempfile.mkstemp", MagicMock(return_value=(0, "dummy"))) ++ @patch("os.path.exists", MagicMock(return_value=True)) ++ @patch("os.close", MagicMock()) ++ @patch("os.write", MagicMock()) ++ @patch("os.unlink", MagicMock()) ++ @patch( ++ "salt.modules.saltsupport.__salt__", {"rsync.rsync": MagicMock(return_value={})} ++ ) ++ def test_sync_archives(self): ++ """ ++ Test sync archives ++ :return: ++ """ ++ support = saltsupport.SaltSupportModule() ++ support.archives = MagicMock( ++ return_value=[ ++ "/mnt/storage/one-support-000-000.bz2", ++ "/mnt/storage/two-support-111-111.bz2", ++ "/mnt/storage/three-support-222-222.bz2", ++ ] ++ ) ++ out = support.sync("group-name", host="buzz", all=True, move=False) ++ assert "files" in out ++ for arc_name in out["files"]: ++ assert out["files"][arc_name] == "copied" ++ assert saltsupport.os.unlink.call_count == 1 ++ assert saltsupport.os.unlink.call_args_list[0][0][0] == "dummy" ++ calls = [] ++ for call in saltsupport.os.write.call_args_list: ++ assert len(call) == 2 ++ calls.append(call[0]) ++ assert calls == [ ++ (0, b"one-support-000-000.bz2"), ++ (0, b"\n"), ++ (0, b"two-support-111-111.bz2"), ++ (0, b"\n"), ++ (0, b"three-support-222-222.bz2"), ++ (0, b"\n"), ++ ] ++ ++ @patch("salt.modules.saltsupport.__pillar__", {}) ++ @patch("salt.modules.saltsupport.SupportDataCollector", MagicMock()) ++ def test_run_support(self): ++ """ ++ Test run support ++ :return: ++ """ ++ saltsupport.SupportDataCollector(None, None).archive_path = "dummy" ++ support = saltsupport.SaltSupportModule() ++ support.collect_internal_data = MagicMock() ++ support.collect_local_data = MagicMock() ++ out = support.run() ++ ++ for section in ["messages", "archive"]: ++ assert section in out ++ assert out["archive"] == "dummy" ++ for section in ["warning", "error", "info"]: ++ assert section in out["messages"] ++ ld_call = support.collect_local_data.call_args_list[0][1] ++ assert "profile" in ld_call ++ assert ld_call["profile"] == "default" ++ assert "profile_source" in ld_call ++ assert ld_call["profile_source"] is None ++ assert support.collector.open.call_count == 1 ++ assert support.collector.close.call_count == 1 ++ assert support.collect_internal_data.call_count == 1 ++ ++ ++@skipIf(not bool(pytest), "Pytest required") ++@skipIf(NO_MOCK, NO_MOCK_REASON) ++class LogCollectorTestCase(TestCase, LoaderModuleMockMixin): ++ """ ++ Test cases for salt.modules.support::LogCollector ++ """ ++ ++ def setup_loader_modules(self): ++ return {saltsupport: {}} ++ ++ def test_msg(self): ++ """ ++ Test message to the log collector. ++ ++ :return: ++ """ ++ utcmock = MagicMock() ++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) ++ with patch("datetime.datetime", utcmock): ++ msg = "Upgrading /dev/null device" ++ out = saltsupport.LogCollector() ++ out.msg(msg, title="Here") ++ assert saltsupport.LogCollector.INFO in out.messages ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - {}: {}".format("Here", msg) ++ ] ++ ++ def test_info_message(self): ++ """ ++ Test info message to the log collector. ++ ++ :return: ++ """ ++ utcmock = MagicMock() ++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) ++ with patch("datetime.datetime", utcmock): ++ msg = "SIMM crosstalk during tectonic stress" ++ out = saltsupport.LogCollector() ++ out.info(msg) ++ assert saltsupport.LogCollector.INFO in out.messages ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] ++ ++ def test_put_message(self): ++ """ ++ Test put message to the log collector. ++ ++ :return: ++ """ ++ utcmock = MagicMock() ++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) ++ with patch("datetime.datetime", utcmock): ++ msg = "Webmaster kidnapped by evil cult" ++ out = saltsupport.LogCollector() ++ out.put(msg) ++ assert saltsupport.LogCollector.INFO in out.messages ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] ++ ++ def test_warning_message(self): ++ """ ++ Test warning message to the log collector. ++ ++ :return: ++ """ ++ utcmock = MagicMock() ++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) ++ with patch("datetime.datetime", utcmock): ++ msg = "Your e-mail is now being delivered by USPS" ++ out = saltsupport.LogCollector() ++ out.warning(msg) ++ assert saltsupport.LogCollector.WARNING in out.messages ++ assert ( ++ type(out.messages[saltsupport.LogCollector.WARNING]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.WARNING] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] ++ ++ def test_error_message(self): ++ """ ++ Test error message to the log collector. ++ ++ :return: ++ """ ++ utcmock = MagicMock() ++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) ++ with patch("datetime.datetime", utcmock): ++ msg = "Learning curve appears to be fractal" ++ out = saltsupport.LogCollector() ++ out.error(msg) ++ assert saltsupport.LogCollector.ERROR in out.messages ++ assert ( ++ type(out.messages[saltsupport.LogCollector.ERROR]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.ERROR] == [ ++ "00:00:00.000 - {}".format(msg) ++ ] ++ ++ def test_hl_message(self): ++ """ ++ Test highlighter message to the log collector. ++ ++ :return: ++ """ ++ utcmock = MagicMock() ++ utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0)) ++ with patch("datetime.datetime", utcmock): ++ out = saltsupport.LogCollector() ++ out.highlight("The {} TTYs became {} TTYs and vice versa", "real", "pseudo") ++ assert saltsupport.LogCollector.INFO in out.messages ++ assert ( ++ type(out.messages[saltsupport.LogCollector.INFO]) ++ == saltsupport.LogCollector.MessagesList ++ ) ++ assert out.messages[saltsupport.LogCollector.INFO] == [ ++ "00:00:00.000 - The real TTYs became " "pseudo TTYs and vice versa" ++ ] -- -2.29.2 +2.33.0 diff --git a/enhance-openscap-module-add-xccdf_eval-call-386.patch b/enhance-openscap-module-add-xccdf_eval-call-386.patch index 814523d..fcf4f08 100644 --- a/enhance-openscap-module-add-xccdf_eval-call-386.patch +++ b/enhance-openscap-module-add-xccdf_eval-call-386.patch @@ -1,4 +1,4 @@ -From 1fd51c17bc03e679a040f2c6d9ac107a2c57b7c8 Mon Sep 17 00:00:00 2001 +From 9071189b7395284f0328b59c999c18919e12ae32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 7 Jul 2021 15:41:48 +0100 @@ -13,11 +13,13 @@ Subject: [PATCH] Enhance openscap module: add "xccdf_eval" call (#386) * Add changes suggested by pre-commit Co-authored-by: Michael Calmer + +Fix error handling in openscap module (bsc#1188647) (#409) --- changelog/59756.added | 1 + - salt/modules/openscap.py | 120 ++++++++++++- - tests/unit/modules/test_openscap.py | 262 +++++++++++++++++++++++++--- - 3 files changed, 353 insertions(+), 30 deletions(-) + salt/modules/openscap.py | 126 +++++++++++++-- + tests/unit/modules/test_openscap.py | 234 ++++++++++++++++++++++++++++ + 3 files changed, 352 insertions(+), 9 deletions(-) create mode 100644 changelog/59756.added diff --git a/changelog/59756.added b/changelog/59756.added @@ -28,7 +30,7 @@ index 0000000000..a59fb21eef @@ -0,0 +1 @@ +adding new call for openscap xccdf eval supporting new parameters diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py -index 6f8ff4a76d..f75e1c5e6b 100644 +index 6f8ff4a76d..216fd89eef 100644 --- a/salt/modules/openscap.py +++ b/salt/modules/openscap.py @@ -1,20 +1,15 @@ @@ -62,7 +64,7 @@ index 6f8ff4a76d..f75e1c5e6b 100644 self.add_argument("action", choices=["eval"]) add_arg = None for params, kwparams in _XCCDF_MAP["eval"]["parser_arguments"]: -@@ -61,6 +56,115 @@ _OSCAP_EXIT_CODES_MAP = { +@@ -61,6 +56,117 @@ _OSCAP_EXIT_CODES_MAP = { } @@ -163,7 +165,9 @@ index 6f8ff4a76d..f75e1c5e6b 100644 + tempdir = tempfile.mkdtemp() + proc = Popen(cmd_opts, stdout=PIPE, stderr=PIPE, cwd=tempdir) + (stdoutdata, error) = proc.communicate() -+ success = _OSCAP_EXIT_CODES_MAP[proc.returncode] ++ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False) ++ if proc.returncode < 0: ++ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii') + returncode = proc.returncode + if success: + __salt__["cp.push_dir"](tempdir) @@ -178,7 +182,7 @@ index 6f8ff4a76d..f75e1c5e6b 100644 def xccdf(params): """ Run ``oscap xccdf`` commands on minions. -@@ -91,7 +195,7 @@ def xccdf(params): +@@ -91,14 +197,16 @@ def xccdf(params): args, argv = _ArgumentParser(action=action).parse_known_args(args=params) except Exception as err: # pylint: disable=broad-except success = False @@ -187,30 +191,21 @@ index 6f8ff4a76d..f75e1c5e6b 100644 if success: cmd = _XCCDF_MAP[action]["cmd_pattern"].format(args.profile, policy) + tempdir = tempfile.mkdtemp() + proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir) + (stdoutdata, error) = proc.communicate() +- success = _OSCAP_EXIT_CODES_MAP[proc.returncode] ++ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False) ++ if proc.returncode < 0: ++ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii') + returncode = proc.returncode + if success: + __salt__["cp.push_dir"](tempdir) diff --git a/tests/unit/modules/test_openscap.py b/tests/unit/modules/test_openscap.py -index 04cf00a1d3..e5be151bf2 100644 +index 045c37f7c9..301c1869ec 100644 --- a/tests/unit/modules/test_openscap.py +++ b/tests/unit/modules/test_openscap.py -@@ -1,18 +1,8 @@ --# -*- coding: utf-8 -*- -- --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals -- - from subprocess import PIPE - --# Import salt libs - import salt.modules.openscap as openscap -- --# Import 3rd-party libs - from salt.ext import six - from tests.support.mock import MagicMock, Mock, patch -- --# Import salt test libs - from tests.support.unit import TestCase - - -@@ -32,6 +22,7 @@ class OpenscapTestCase(TestCase): +@@ -21,6 +21,7 @@ class OpenscapTestCase(TestCase): "salt.modules.openscap.tempfile.mkdtemp", Mock(return_value=self.random_temp_dir), ), @@ -218,60 +213,7 @@ index 04cf00a1d3..e5be151bf2 100644 ] for patcher in patchers: self.apply_patch(patcher) -@@ -50,7 +41,7 @@ class OpenscapTestCase(TestCase): - ), - ): - response = openscap.xccdf( -- "eval --profile Default {0}".format(self.policy_file) -+ "eval --profile Default {}".format(self.policy_file) - ) - - self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1) -@@ -97,7 +88,7 @@ class OpenscapTestCase(TestCase): - ), - ): - response = openscap.xccdf( -- "eval --profile Default {0}".format(self.policy_file) -+ "eval --profile Default {}".format(self.policy_file) - ) - - self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1) -@@ -136,10 +127,7 @@ class OpenscapTestCase(TestCase): - - def test_openscap_xccdf_eval_fail_no_profile(self): - response = openscap.xccdf("eval --param Default /unknown/param") -- if six.PY2: -- error = "argument --profile is required" -- else: -- error = "the following arguments are required: --profile" -+ error = "the following arguments are required: --profile" - self.assertEqual( - response, - {"error": error, "upload_dir": None, "success": False, "returncode": None}, -@@ -199,7 +187,7 @@ class OpenscapTestCase(TestCase): - ), - ): - response = openscap.xccdf( -- "eval --profile Default {0}".format(self.policy_file) -+ "eval --profile Default {}".format(self.policy_file) - ) - - self.assertEqual( -@@ -213,11 +201,8 @@ class OpenscapTestCase(TestCase): - ) - - def test_openscap_xccdf_eval_fail_not_implemented_action(self): -- response = openscap.xccdf("info {0}".format(self.policy_file)) -- if six.PY2: -- mock_err = "argument action: invalid choice: 'info' (choose from u'eval')" -- else: -- mock_err = "argument action: invalid choice: 'info' (choose from 'eval')" -+ response = openscap.xccdf("info {}".format(self.policy_file)) -+ mock_err = "argument action: invalid choice: 'info' (choose from 'eval')" - - self.assertEqual( - response, -@@ -228,3 +213,236 @@ class OpenscapTestCase(TestCase): +@@ -211,3 +212,236 @@ class OpenscapTestCase(TestCase): "returncode": None, }, ) @@ -310,8 +252,8 @@ index 04cf00a1d3..e5be151bf2 100644 + openscap.Popen.assert_called_once_with( + expected_cmd, + cwd=openscap.tempfile.mkdtemp.return_value, -+ stderr=PIPE, -+ stdout=PIPE, ++ stderr=subprocess.PIPE, ++ stdout=subprocess.PIPE, + ) + openscap.__salt__["cp.push_dir"].assert_called_once_with( + self.random_temp_dir @@ -364,8 +306,8 @@ index 04cf00a1d3..e5be151bf2 100644 + openscap.Popen.assert_called_once_with( + expected_cmd, + cwd=openscap.tempfile.mkdtemp.return_value, -+ stderr=PIPE, -+ stdout=PIPE, ++ stderr=subprocess.PIPE, ++ stdout=subprocess.PIPE, + ) + openscap.__salt__["cp.push_dir"].assert_called_once_with( + self.random_temp_dir @@ -415,8 +357,8 @@ index 04cf00a1d3..e5be151bf2 100644 + openscap.Popen.assert_called_once_with( + expected_cmd, + cwd=openscap.tempfile.mkdtemp.return_value, -+ stderr=PIPE, -+ stdout=PIPE, ++ stderr=subprocess.PIPE, ++ stdout=subprocess.PIPE, + ) + openscap.__salt__["cp.push_dir"].assert_called_once_with( + self.random_temp_dir @@ -475,8 +417,8 @@ index 04cf00a1d3..e5be151bf2 100644 + openscap.Popen.assert_called_once_with( + expected_cmd, + cwd=openscap.tempfile.mkdtemp.return_value, -+ stderr=PIPE, -+ stdout=PIPE, ++ stderr=subprocess.PIPE, ++ stdout=subprocess.PIPE, + ) + + def test_new_openscap_xccdf_eval_evaluation_error(self): @@ -509,6 +451,6 @@ index 04cf00a1d3..e5be151bf2 100644 + }, + ) -- -2.32.0 +2.33.0 diff --git a/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch b/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch deleted file mode 100644 index fafe014..0000000 --- a/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 1cea7d065d8da7c713af8136162c21187d5186f5 Mon Sep 17 00:00:00 2001 -From: Cedric Bosdonnat -Date: Wed, 14 Oct 2020 12:39:16 +0200 -Subject: [PATCH] Ensure virt.update stop_on_reboot is updated with its - default value (#280) - -While all virt.update properties default values should not be used when -updating the XML definition, the stop_on_reboot default value (False) -needs to be passed still or the user will never be able to update with -this value. ---- - salt/modules/virt.py | 1 + - tests/unit/modules/test_virt.py | 2 ++ - 2 files changed, 3 insertions(+) - -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index 8e2180608a..e3960a5a90 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -2738,6 +2738,7 @@ def update( - ] - - data = {k: v for k, v in locals().items() if bool(v)} -+ data["stop_on_reboot"] = stop_on_reboot - if boot_dev: - data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} - need_update = ( -diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index fba821ea53..83152eda6e 100644 ---- a/tests/unit/modules/test_virt.py -+++ b/tests/unit/modules/test_virt.py -@@ -1777,6 +1777,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - 1048576 - 1048576 - 1 -+ restart - - hvm - -@@ -2349,6 +2350,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - 1048576 - 1048576 - 1 -+ restart - - hvm - --- -2.29.2 - - diff --git a/exclude-the-full-path-of-a-download-url-to-prevent-i.patch b/exclude-the-full-path-of-a-download-url-to-prevent-i.patch deleted file mode 100644 index dfc3a5a..0000000 --- a/exclude-the-full-path-of-a-download-url-to-prevent-i.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 57ed9c41a177f57e3d56465662750617ac36cc95 Mon Sep 17 00:00:00 2001 -From: Joe Eacott -Date: Mon, 28 Jun 2021 16:46:35 -0600 -Subject: [PATCH] Exclude the full path of a download URL to prevent - injection of malicious code (bsc#1190265) (CVE-2021-21996) - ---- - salt/fileclient.py | 7 +++++++ - tests/unit/test_fileclient.py | 18 ++++++++++++++++++ - 2 files changed, 25 insertions(+) - -diff --git a/salt/fileclient.py b/salt/fileclient.py -index 88dcf1668d..bdf450ffe6 100644 ---- a/salt/fileclient.py -+++ b/salt/fileclient.py -@@ -28,6 +28,7 @@ import salt.utils.platform - import salt.utils.stringutils - import salt.utils.templates - import salt.utils.url -+import salt.utils.verify - import salt.utils.versions - from salt.exceptions import CommandExecutionError, MinionError - -@@ -858,6 +859,12 @@ class Client: - else: - file_name = url_data.path - -+ # clean_path returns an empty string if the check fails -+ root_path = salt.utils.path.join(cachedir, "extrn_files", saltenv, netloc) -+ new_path = os.path.sep.join([root_path, file_name]) -+ if not salt.utils.verify.clean_path(root_path, new_path, subdir=True): -+ return "Invalid path" -+ - if len(file_name) > MAX_FILENAME_LENGTH: - file_name = salt.utils.hashutils.sha256_digest(file_name) - -diff --git a/tests/unit/test_fileclient.py b/tests/unit/test_fileclient.py -index 3aa7b7cf84..b6cc84a871 100644 ---- a/tests/unit/test_fileclient.py -+++ b/tests/unit/test_fileclient.py -@@ -63,6 +63,24 @@ class FileclientTestCase(TestCase): - ) as c_ref_itr: - assert c_ref_itr == "/__test__/files/base/testfile" - -+ def test_cache_extrn_path_valid(self): -+ """ -+ Tests for extrn_filepath for a given url -+ """ -+ file_name = "http://localhost:8000/test/location/src/dev/usr/file" -+ -+ ret = fileclient.Client(self.opts)._extrn_path(file_name, "base") -+ assert ret == os.path.join("__test__", "extrn_files", "base", ret) -+ -+ def test_cache_extrn_path_invalid(self): -+ """ -+ Tests for extrn_filepath for a given url -+ """ -+ file_name = "http://localhost:8000/../../../../../usr/bin/bad" -+ -+ ret = fileclient.Client(self.opts)._extrn_path(file_name, "base") -+ assert ret == "Invalid path" -+ - def test_extrn_path_with_long_filename(self): - safe_file_name = os.path.split( - fileclient.Client(self.opts)._extrn_path( --- -2.33.0 - - diff --git a/fall-back-to-pymysql.patch b/fall-back-to-pymysql.patch deleted file mode 100644 index e3e0e9f..0000000 --- a/fall-back-to-pymysql.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 188a97fc20c3e24950b82dc6fcd0da878509cf7a Mon Sep 17 00:00:00 2001 -From: Maximilian Meister -Date: Thu, 5 Apr 2018 13:23:23 +0200 -Subject: [PATCH] fall back to PyMySQL - -same is already done in modules (see #26803) - -Signed-off-by: Maximilian Meister ---- - salt/modules/mysql.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/salt/modules/mysql.py b/salt/modules/mysql.py -index fdfe35158a..385e4d92a3 100644 ---- a/salt/modules/mysql.py -+++ b/salt/modules/mysql.py -@@ -55,7 +55,7 @@ try: - import MySQLdb.cursors - import MySQLdb.converters - from MySQLdb.constants import FIELD_TYPE, FLAG -- from MySQLdb import OperationalError -+ from MySQLdb.connections import OperationalError - except ImportError: - try: - # MySQLdb import failed, try to import PyMySQL -@@ -66,7 +66,7 @@ except ImportError: - import MySQLdb.cursors - import MySQLdb.converters - from MySQLdb.constants import FIELD_TYPE, FLAG -- from MySQLdb import OperationalError -+ from MySQLdb.err import OperationalError - except ImportError: - MySQLdb = None - --- -2.29.2 - - diff --git a/figure-out-python-interpreter-to-use-inside-containe.patch b/figure-out-python-interpreter-to-use-inside-containe.patch index 609a338..3126396 100644 --- a/figure-out-python-interpreter-to-use-inside-containe.patch +++ b/figure-out-python-interpreter-to-use-inside-containe.patch @@ -1,9 +1,8 @@ -From 271826b0baa6b2281bc2eac9118a0fcc4675f106 Mon Sep 17 00:00:00 2001 +From 1b54843abe5fad0bac844d6d5d9707df3e501aae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Wed, 19 May 2021 16:24:27 +0100 -Subject: [PATCH] Figure out Python interpreter to use inside - containers +Subject: [PATCH] Figure out Python interpreter to use inside containers Fix unit test for dockermod.call function --- @@ -12,10 +11,10 @@ Fix unit test for dockermod.call function 2 files changed, 41 insertions(+), 17 deletions(-) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py -index 176b4db926..cad307e7af 100644 +index ab2296a945..6d60a9a5aa 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py -@@ -217,7 +217,6 @@ import re +@@ -209,7 +209,6 @@ import re import shutil import string import subprocess @@ -23,7 +22,7 @@ index 176b4db926..cad307e7af 100644 import time import uuid -@@ -6865,9 +6864,32 @@ def call(name, function, *args, **kwargs): +@@ -6728,9 +6727,32 @@ def call(name, function, *args, **kwargs): name, thin_path, os.path.join(thin_dest_path, os.path.basename(thin_path)) ) @@ -55,22 +54,22 @@ index 176b4db926..cad307e7af 100644 - "python", + container_python_bin, "-c", - ("import tarfile; " 'tarfile.open("{0}/{1}").extractall(path="{0}")').format( - thin_dest_path, os.path.basename(thin_path) -@@ -6880,7 +6902,7 @@ def call(name, function, *args, **kwargs): + "import tarfile; " + 'tarfile.open("{0}/{1}").extractall(path="{0}")'.format( +@@ -6744,7 +6766,7 @@ def call(name, function, *args, **kwargs): try: salt_argv = ( [ -- "python{0}".format(sys.version_info[0]), +- "python{}".format(sys.version_info[0]), + container_python_bin, os.path.join(thin_dest_path, "salt-call"), "--metadata", "--local", diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py -index 48526acb71..ebe97a83f5 100644 +index 2c3665de85..fcedaf9272 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py -@@ -1049,33 +1049,35 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): +@@ -987,33 +987,35 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): # [ call(name, [args]), ... self.maxDiff = None self.assertIn("mkdir", docker_run_all_mock.mock_calls[0][1][1]) @@ -121,6 +120,6 @@ index 48526acb71..ebe97a83f5 100644 self.assertEqual({"retcode": 0, "comment": "container cmd"}, ret) -- -2.31.1 +2.33.0 diff --git a/fix-__mount_device-wrapper-254.patch b/fix-__mount_device-wrapper-254.patch deleted file mode 100644 index d0a191d..0000000 --- a/fix-__mount_device-wrapper-254.patch +++ /dev/null @@ -1,89 +0,0 @@ -From 1e00e2b72321b5312efb7b8b426a037c8db72b79 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Wed, 29 Jul 2020 16:11:47 +0200 -Subject: [PATCH] Fix __mount_device wrapper (#254) - -Some recent change in Salt is now doing the right thing, and calling the -different states with separated args and kwargs. This change trigger a -hidden bug in the __mount_device decorator, that expect those parameter -to be in kwargs, as is happening during the test. - -This patch change the way that the wrapper inside the decorator search -for the name and device parameters, first looking into kwargs and later -in args if possible. A new test is introduced to exercise both cases. - -Fix #58012 - -(cherry picked from commit 2089645e2478751dc795127cfd14d0385c2e0899) ---- - changelog/58012.fixed | 1 + - salt/states/btrfs.py | 4 ++-- - tests/unit/states/test_btrfs.py | 27 +++++++++++++++++++++++++++ - 3 files changed, 30 insertions(+), 2 deletions(-) - create mode 100644 changelog/58012.fixed - -diff --git a/changelog/58012.fixed b/changelog/58012.fixed -new file mode 100644 -index 0000000000..13a1ef747d ---- /dev/null -+++ b/changelog/58012.fixed -@@ -0,0 +1 @@ -+Fix btrfs state decorator, that produces exceptions when creating subvolumes. -\ No newline at end of file -diff --git a/salt/states/btrfs.py b/salt/states/btrfs.py -index 1374bbffb4..62a3553758 100644 ---- a/salt/states/btrfs.py -+++ b/salt/states/btrfs.py -@@ -103,8 +103,8 @@ def __mount_device(action): - - @functools.wraps(action) - def wrapper(*args, **kwargs): -- name = kwargs["name"] -- device = kwargs["device"] -+ name = kwargs.get("name", args[0] if args else None) -+ device = kwargs.get("device", args[1] if len(args) > 1 else None) - use_default = kwargs.get("use_default", False) - - ret = { -diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py -index b8f70bccfe..dceb971aa1 100644 ---- a/tests/unit/states/test_btrfs.py -+++ b/tests/unit/states/test_btrfs.py -@@ -231,6 +231,33 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin): - mount.assert_called_once() - umount.assert_called_once() - -+ @skipIf(salt.utils.platform.is_windows(), "Skip on Windows") -+ @patch("salt.states.btrfs._umount") -+ @patch("salt.states.btrfs._mount") -+ def test_subvolume_created_exists_decorator(self, mount, umount): -+ """ -+ Test creating a subvolume using a non-kwargs call -+ """ -+ mount.return_value = "/tmp/xxx" -+ salt_mock = { -+ "btrfs.subvolume_exists": MagicMock(return_value=True), -+ } -+ opts_mock = { -+ "test": False, -+ } -+ with patch.dict(btrfs.__salt__, salt_mock), patch.dict( -+ btrfs.__opts__, opts_mock -+ ): -+ assert btrfs.subvolume_created("@/var", "/dev/sda1") == { -+ "name": "@/var", -+ "result": True, -+ "changes": {}, -+ "comment": ["Subvolume @/var already present"], -+ } -+ salt_mock["btrfs.subvolume_exists"].assert_called_with("/tmp/xxx/@/var") -+ mount.assert_called_once() -+ umount.assert_called_once() -+ - @patch("salt.states.btrfs._umount") - @patch("salt.states.btrfs._mount") - def test_subvolume_created_exists_test(self, mount, umount): --- -2.29.2 - - diff --git a/fix-a-wrong-rebase-in-test_core.py-180.patch b/fix-a-wrong-rebase-in-test_core.py-180.patch deleted file mode 100644 index 309c53e..0000000 --- a/fix-a-wrong-rebase-in-test_core.py-180.patch +++ /dev/null @@ -1,144 +0,0 @@ -From 3d5f3cff6b43d7aba35063e970d016401bb82921 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Fri, 25 Oct 2019 15:43:16 +0200 -Subject: [PATCH] Fix a wrong rebase in test_core.py (#180) - -* core: ignore wrong product_name files - -Some firmwares (like some NUC machines), do not provide valid -/sys/class/dmi/id/product_name strings. In those cases an -UnicodeDecodeError exception happens. - -This patch ignore this kind of issue during the grains creation. - -(cherry picked from commit 27b001bd5408359aa5dd219bfd900095ed592fe8) - -* core: remove duplicate dead code - -(cherry picked from commit bd0213bae00b737b24795bec3c030ebfe476e0d8) ---- - salt/grains/core.py | 8 +++- - tests/unit/grains/test_core.py | 80 ---------------------------------- - 2 files changed, 6 insertions(+), 82 deletions(-) - -diff --git a/salt/grains/core.py b/salt/grains/core.py -index a2983e388b..5dff6ecfd4 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -1066,7 +1066,9 @@ def _virtual(osdata): - except UnicodeDecodeError: - # Some firmwares provide non-valid 'product_name' - # files, ignore them -- pass -+ log.debug( -+ "The content in /sys/devices/virtual/dmi/id/product_name is not valid" -+ ) - except OSError: - pass - elif osdata["kernel"] == "FreeBSD": -@@ -2716,7 +2718,9 @@ def _hw_data(osdata): - except UnicodeDecodeError: - # Some firmwares provide non-valid 'product_name' - # files, ignore them -- pass -+ log.debug( -+ "The content in /sys/devices/virtual/dmi/id/product_name is not valid" -+ ) - except OSError as err: - # PermissionError is new to Python 3, but corresponds to the EACESS and - # EPERM error numbers. Use those instead here for PY2 compatibility. -diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 0dc3423646..85d434dd9d 100644 ---- a/tests/unit/grains/test_core.py -+++ b/tests/unit/grains/test_core.py -@@ -2047,86 +2047,6 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - result = core.path() - assert result == {"path": path, "systempath": comps}, result - -- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") -- @patch("os.path.exists") -- @patch("salt.utils.platform.is_proxy") -- def test_kernelparams_return(self): -- expectations = [ -- ( -- "BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64", -- { -- "kernelparams": [ -- ("BOOT_IMAGE", "/vmlinuz-3.10.0-693.2.2.el7.x86_64") -- ] -- }, -- ), -- ( -- "root=/dev/mapper/centos_daemon-root", -- {"kernelparams": [("root", "/dev/mapper/centos_daemon-root")]}, -- ), -- ( -- "rhgb quiet ro", -- {"kernelparams": [("rhgb", None), ("quiet", None), ("ro", None)]}, -- ), -- ('param="value1"', {"kernelparams": [("param", "value1")]}), -- ( -- 'param="value1 value2 value3"', -- {"kernelparams": [("param", "value1 value2 value3")]}, -- ), -- ( -- 'param="value1 value2 value3" LANG="pl" ro', -- { -- "kernelparams": [ -- ("param", "value1 value2 value3"), -- ("LANG", "pl"), -- ("ro", None), -- ] -- }, -- ), -- ("ipv6.disable=1", {"kernelparams": [("ipv6.disable", "1")]}), -- ( -- 'param="value1:value2:value3"', -- {"kernelparams": [("param", "value1:value2:value3")]}, -- ), -- ( -- 'param="value1,value2,value3"', -- {"kernelparams": [("param", "value1,value2,value3")]}, -- ), -- ( -- 'param="value1" param="value2" param="value3"', -- { -- "kernelparams": [ -- ("param", "value1"), -- ("param", "value2"), -- ("param", "value3"), -- ] -- }, -- ), -- ] -- -- for cmdline, expectation in expectations: -- with patch("salt.utils.files.fopen", mock_open(read_data=cmdline)): -- self.assertEqual(core.kernelparams(), expectation) -- -- @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") -- @patch("os.path.exists") -- @patch("salt.utils.platform.is_proxy") -- def test__hw_data_linux_empty(self, is_proxy, exists): -- is_proxy.return_value = False -- exists.return_value = True -- with patch("salt.utils.files.fopen", mock_open(read_data="")): -- self.assertEqual( -- core._hw_data({"kernel": "Linux"}), -- { -- "biosreleasedate": "", -- "biosversion": "", -- "manufacturer": "", -- "productname": "", -- "serialnumber": "", -- "uuid": "", -- }, -- ) -- - @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") - @skipIf(six.PY2, "UnicodeDecodeError is throw in Python 3") - @patch("os.path.exists") --- -2.29.2 - - diff --git a/fix-aptpkg-systemd-call-bsc-1143301.patch b/fix-aptpkg-systemd-call-bsc-1143301.patch deleted file mode 100644 index 0890e7f..0000000 --- a/fix-aptpkg-systemd-call-bsc-1143301.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 5dadda6822323f409c99112244c2c809e58126e1 Mon Sep 17 00:00:00 2001 -From: Mihai Dinca -Date: Wed, 31 Jul 2019 15:29:03 +0200 -Subject: [PATCH] Fix aptpkg systemd call (bsc#1143301) - ---- - salt/modules/aptpkg.py | 2 +- - tests/unit/modules/test_aptpkg.py | 3 +-- - 2 files changed, 2 insertions(+), 3 deletions(-) - -diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index bf90d0614f..c47ee852f4 100644 ---- a/salt/modules/aptpkg.py -+++ b/salt/modules/aptpkg.py -@@ -160,7 +160,7 @@ def _call_apt(args, scope=True, **kwargs): - and salt.utils.systemd.has_scope(__context__) - and __salt__["config.get"]("systemd.scope", True) - ): -- cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)]) -+ cmd.extend(["systemd-run", "--scope", '--description "{}"'.format(__name__)]) - cmd.extend(args) - - params = { -diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py -index 77d8b84896..c3769a7df1 100644 ---- a/tests/unit/modules/test_aptpkg.py -+++ b/tests/unit/modules/test_aptpkg.py -@@ -896,8 +896,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin): - [ - "systemd-run", - "--scope", -- "--description", -- '"salt.modules.aptpkg"', -+ '--description "salt.modules.aptpkg"', - "apt-get", - "purge", - "vim", --- -2.29.2 - - diff --git a/fix-async-batch-multiple-done-events.patch b/fix-async-batch-multiple-done-events.patch deleted file mode 100644 index d655138..0000000 --- a/fix-async-batch-multiple-done-events.patch +++ /dev/null @@ -1,140 +0,0 @@ -From 85b8666b138cab170327f0217c799277371b2e80 Mon Sep 17 00:00:00 2001 -From: Mihai Dinca -Date: Tue, 7 May 2019 12:24:35 +0200 -Subject: [PATCH] Fix async-batch multiple done events - ---- - salt/cli/batch_async.py | 19 ++++++++++++------- - tests/unit/cli/test_batch_async.py | 20 +++++++++++++------- - 2 files changed, 25 insertions(+), 14 deletions(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index b0ab9d9f47..7225491228 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -86,6 +86,7 @@ class BatchAsync: - io_loop=ioloop, - keep_loop=True, - ) -+ self.scheduled = False - - def __set_event_handler(self): - ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) -@@ -118,10 +119,7 @@ class BatchAsync: - if minion in self.active: - self.active.remove(minion) - self.done_minions.add(minion) -- # call later so that we maybe gather more returns -- self.event.io_loop.call_later( -- self.batch_delay, self.schedule_next -- ) -+ self.schedule_next() - - def _get_next(self): - to_run = ( -@@ -146,7 +144,7 @@ class BatchAsync: - self.timedout_minions - ) - if timedout_minions: -- self.event.io_loop.call_later(self.batch_delay, self.schedule_next) -+ self.schedule_next() - if running: - self.event.io_loop.add_callback(self.find_job, running) - -@@ -197,7 +195,7 @@ class BatchAsync: - "metadata": self.metadata, - } - self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid)) -- yield self.schedule_next() -+ yield self.run_next() - - def end_batch(self): - left = self.minions.symmetric_difference( -@@ -214,8 +212,14 @@ class BatchAsync: - self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) - self.event.remove_event_handler(self.__event_handler) - -- @tornado.gen.coroutine - def schedule_next(self): -+ if not self.scheduled: -+ self.scheduled = True -+ # call later so that we maybe gather more returns -+ self.event.io_loop.call_later(self.batch_delay, self.run_next) -+ -+ @tornado.gen.coroutine -+ def run_next(self): - next_batch = self._get_next() - if next_batch: - self.active = self.active.union(next_batch) -@@ -238,3 +242,4 @@ class BatchAsync: - self.active = self.active.difference(next_batch) - else: - self.end_batch() -+ self.scheduled = False -diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index d6a4bfcf60..66332a548a 100644 ---- a/tests/unit/cli/test_batch_async.py -+++ b/tests/unit/cli/test_batch_async.py -@@ -105,14 +105,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - - @tornado.testing.gen_test - def test_start_batch_calls_next(self): -- self.batch.schedule_next = MagicMock(return_value=MagicMock()) -+ self.batch.run_next = MagicMock(return_value=MagicMock()) - self.batch.event = MagicMock() - future = tornado.gen.Future() - future.set_result(None) -- self.batch.schedule_next = MagicMock(return_value=future) -+ self.batch.run_next = MagicMock(return_value=future) - self.batch.start_batch() - self.assertEqual(self.batch.initialized, True) -- self.assertEqual(len(self.batch.schedule_next.mock_calls), 1) -+ self.assertEqual(len(self.batch.run_next.mock_calls), 1) - - def test_batch_fire_done_event(self): - self.batch.targeted_minions = {"foo", "baz", "bar"} -@@ -147,7 +147,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - future = tornado.gen.Future() - future.set_result({"minions": ["foo", "bar"]}) - self.batch.local.run_job_async.return_value = future -- ret = self.batch.schedule_next().result() -+ ret = self.batch.run_next().result() - self.assertEqual( - self.batch.local.run_job_async.call_args[0], - ({"foo", "bar"}, "my.fun", [], "list"), -@@ -250,7 +250,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.done_minions, {"foo"}) - self.assertEqual( - self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.batch_delay, self.batch.schedule_next), -+ (self.batch.batch_delay, self.batch.run_next), - ) - - def test_batch__event_handler_find_job_return(self): -@@ -262,10 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.find_job_returned, {"foo"}) - - @tornado.testing.gen_test -- def test_batch_schedule_next_end_batch_when_no_next(self): -+ def test_batch_run_next_end_batch_when_no_next(self): - self.batch.end_batch = MagicMock() - self.batch._get_next = MagicMock(return_value={}) -- self.batch.schedule_next() -+ self.batch.run_next() - self.assertEqual(len(self.batch.end_batch.mock_calls), 1) - - @tornado.testing.gen_test -@@ -345,3 +345,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.batch.event.io_loop.add_callback.call_args[0], - (self.batch.find_job, {"foo"}), - ) -+ -+ def test_only_on_run_next_is_scheduled(self): -+ self.batch.event = MagicMock() -+ self.batch.scheduled = True -+ self.batch.schedule_next() -+ self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0) --- -2.29.2 - - diff --git a/fix-async-batch-race-conditions.patch b/fix-async-batch-race-conditions.patch deleted file mode 100644 index f7ced1f..0000000 --- a/fix-async-batch-race-conditions.patch +++ /dev/null @@ -1,273 +0,0 @@ -From 4b3badeb52a9de10d6085ee3cc7598a827d1e68f Mon Sep 17 00:00:00 2001 -From: Mihai Dinca -Date: Thu, 11 Apr 2019 15:57:59 +0200 -Subject: [PATCH] Fix async batch race conditions - -Close batching when there is no next batch ---- - salt/cli/batch_async.py | 96 +++++++++++++++--------------- - tests/unit/cli/test_batch_async.py | 38 +++++------- - 2 files changed, 62 insertions(+), 72 deletions(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 1557e5105b..b0ab9d9f47 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -32,14 +32,14 @@ class BatchAsync: - - tag: salt/batch//start - - data: { - "available_minions": self.minions, -- "down_minions": self.down_minions -+ "down_minions": targeted_minions - presence_ping_minions - } - - When the batch ends, an `done` event is fired: - - tag: salt/batch//done - - data: { - "available_minions": self.minions, -- "down_minions": self.down_minions, -+ "down_minions": targeted_minions - presence_ping_minions - "done_minions": self.done_minions, - "timedout_minions": self.timedout_minions - } -@@ -68,7 +68,7 @@ class BatchAsync: - self.eauth = batch_get_eauth(clear_load["kwargs"]) - self.metadata = clear_load["kwargs"].get("metadata", {}) - self.minions = set() -- self.down_minions = set() -+ self.targeted_minions = set() - self.timedout_minions = set() - self.done_minions = set() - self.active = set() -@@ -110,8 +110,7 @@ class BatchAsync: - minion = data["id"] - if op == "ping_return": - self.minions.add(minion) -- self.down_minions.remove(minion) -- if not self.down_minions: -+ if self.targeted_minions == self.minions: - self.event.io_loop.spawn_callback(self.start_batch) - elif op == "find_job_return": - self.find_job_returned.add(minion) -@@ -124,11 +123,6 @@ class BatchAsync: - self.batch_delay, self.schedule_next - ) - -- if self.initialized and self.done_minions == self.minions.difference( -- self.timedout_minions -- ): -- self.end_batch() -- - def _get_next(self): - to_run = ( - self.minions.difference(self.done_minions) -@@ -142,20 +136,17 @@ class BatchAsync: - return set(list(to_run)[:next_batch_size]) - - @tornado.gen.coroutine -- def check_find_job(self, minions): -- did_not_return = minions.difference(self.find_job_returned) -- if did_not_return: -- for minion in did_not_return: -- if minion in self.find_job_returned: -- self.find_job_returned.remove(minion) -- if minion in self.active: -- self.active.remove(minion) -- self.timedout_minions.add(minion) -- running = ( -- minions.difference(did_not_return) -- .difference(self.done_minions) -- .difference(self.timedout_minions) -+ def check_find_job(self, batch_minions): -+ timedout_minions = batch_minions.difference(self.find_job_returned).difference( -+ self.done_minions - ) -+ self.timedout_minions = self.timedout_minions.union(timedout_minions) -+ self.active = self.active.difference(self.timedout_minions) -+ running = batch_minions.difference(self.done_minions).difference( -+ self.timedout_minions -+ ) -+ if timedout_minions: -+ self.event.io_loop.call_later(self.batch_delay, self.schedule_next) - if running: - self.event.io_loop.add_callback(self.find_job, running) - -@@ -193,7 +184,7 @@ class BatchAsync: - metadata=self.metadata, - **self.eauth - ) -- self.down_minions = set(ping_return["minions"]) -+ self.targeted_minions = set(ping_return["minions"]) - - @tornado.gen.coroutine - def start_batch(self): -@@ -202,39 +193,48 @@ class BatchAsync: - self.initialized = True - data = { - "available_minions": self.minions, -- "down_minions": self.down_minions, -+ "down_minions": self.targeted_minions.difference(self.minions), - "metadata": self.metadata, - } - self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid)) - yield self.schedule_next() - - def end_batch(self): -- data = { -- "available_minions": self.minions, -- "down_minions": self.down_minions, -- "done_minions": self.done_minions, -- "timedout_minions": self.timedout_minions, -- "metadata": self.metadata, -- } -- self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) -- self.event.remove_event_handler(self.__event_handler) -+ left = self.minions.symmetric_difference( -+ self.done_minions.union(self.timedout_minions) -+ ) -+ if not left: -+ data = { -+ "available_minions": self.minions, -+ "down_minions": self.targeted_minions.difference(self.minions), -+ "done_minions": self.done_minions, -+ "timedout_minions": self.timedout_minions, -+ "metadata": self.metadata, -+ } -+ self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) -+ self.event.remove_event_handler(self.__event_handler) - - @tornado.gen.coroutine - def schedule_next(self): - next_batch = self._get_next() - if next_batch: -- yield self.local.run_job_async( -- next_batch, -- self.opts["fun"], -- self.opts["arg"], -- "list", -- raw=self.opts.get("raw", False), -- ret=self.opts.get("return", ""), -- gather_job_timeout=self.opts["gather_job_timeout"], -- jid=self.batch_jid, -- metadata=self.metadata, -- ) -- self.event.io_loop.call_later( -- self.opts["timeout"], self.find_job, set(next_batch) -- ) - self.active = self.active.union(next_batch) -+ try: -+ yield self.local.run_job_async( -+ next_batch, -+ self.opts["fun"], -+ self.opts["arg"], -+ "list", -+ raw=self.opts.get("raw", False), -+ ret=self.opts.get("return", ""), -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ jid=self.batch_jid, -+ metadata=self.metadata, -+ ) -+ self.event.io_loop.call_later( -+ self.opts["timeout"], self.find_job, set(next_batch) -+ ) -+ except Exception as ex: -+ self.active = self.active.difference(next_batch) -+ else: -+ self.end_batch() -diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index 3f8626a2dd..d6a4bfcf60 100644 ---- a/tests/unit/cli/test_batch_async.py -+++ b/tests/unit/cli/test_batch_async.py -@@ -68,8 +68,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual( - self.batch.local.run_job_async.call_args[0], ("*", "test.ping", [], "glob") - ) -- # assert down_minions == all minions matched by tgt -- self.assertEqual(self.batch.down_minions, {"foo", "bar"}) -+ # assert targeted_minions == all minions matched by tgt -+ self.assertEqual(self.batch.targeted_minions, {"foo", "bar"}) - - @tornado.testing.gen_test - def test_batch_start_on_gather_job_timeout(self): -@@ -115,7 +115,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(len(self.batch.schedule_next.mock_calls), 1) - - def test_batch_fire_done_event(self): -+ self.batch.targeted_minions = {"foo", "baz", "bar"} - self.batch.minions = {"foo", "bar"} -+ self.batch.done_minions = {"foo"} -+ self.batch.timedout_minions = {"bar"} - self.batch.event = MagicMock() - self.batch.metadata = {"mykey": "myvalue"} - self.batch.end_batch() -@@ -124,9 +127,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - ( - { - "available_minions": {"foo", "bar"}, -- "done_minions": set(), -- "down_minions": set(), -- "timedout_minions": set(), -+ "done_minions": self.batch.done_minions, -+ "down_minions": {"baz"}, -+ "timedout_minions": self.batch.timedout_minions, - "metadata": self.batch.metadata, - }, - "salt/batch/1235/done", -@@ -205,7 +208,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch._get_next(), set()) - - def test_batch__event_handler_ping_return(self): -- self.batch.down_minions = {"foo"} -+ self.batch.targeted_minions = {"foo"} - self.batch.event = MagicMock( - unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) - ) -@@ -216,7 +219,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.done_minions, set()) - - def test_batch__event_handler_call_start_batch_when_all_pings_return(self): -- self.batch.down_minions = {"foo"} -+ self.batch.targeted_minions = {"foo"} - self.batch.event = MagicMock( - unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) - ) -@@ -228,7 +231,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - ) - - def test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self): -- self.batch.down_minions = {"foo", "bar"} -+ self.batch.targeted_minions = {"foo", "bar"} - self.batch.event = MagicMock( - unpack=MagicMock(return_value=("salt/job/1234/ret/foo", {"id": "foo"})) - ) -@@ -259,23 +262,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.find_job_returned, {"foo"}) - - @tornado.testing.gen_test -- def test_batch__event_handler_end_batch(self): -- self.batch.event = MagicMock( -- unpack=MagicMock( -- return_value=("salt/job/not-my-jid/ret/foo", {"id": "foo"}) -- ) -- ) -- future = tornado.gen.Future() -- future.set_result({"minions": ["foo", "bar", "baz"]}) -- self.batch.local.run_job_async.return_value = future -- self.batch.start() -- self.batch.initialized = True -- self.assertEqual(self.batch.down_minions, {"foo", "bar", "baz"}) -+ def test_batch_schedule_next_end_batch_when_no_next(self): - self.batch.end_batch = MagicMock() -- self.batch.minions = {"foo", "bar", "baz"} -- self.batch.done_minions = {"foo", "bar"} -- self.batch.timedout_minions = {"baz"} -- self.batch._BatchAsync__event_handler(MagicMock()) -+ self.batch._get_next = MagicMock(return_value={}) -+ self.batch.schedule_next() - self.assertEqual(len(self.batch.end_batch.mock_calls), 1) - - @tornado.testing.gen_test --- -2.29.2 - - diff --git a/fix-batch_async-obsolete-test.patch b/fix-batch_async-obsolete-test.patch deleted file mode 100644 index 86d5f8e..0000000 --- a/fix-batch_async-obsolete-test.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 5a83801b7733f09c35a7ff0abb5aa32d4c857e4b Mon Sep 17 00:00:00 2001 -From: Mihai Dinca -Date: Tue, 3 Dec 2019 11:22:42 +0100 -Subject: [PATCH] Fix batch_async obsolete test - ---- - tests/unit/cli/test_batch_async.py | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index c18b42be57..b04965268a 100644 ---- a/tests/unit/cli/test_batch_async.py -+++ b/tests/unit/cli/test_batch_async.py -@@ -134,7 +134,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - "salt/batch/1235/done", - ), - ) -- self.assertEqual(len(self.batch.event.remove_event_handler.mock_calls), 1) -+ -+ def test_batch__del__(self): -+ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock()) -+ event = MagicMock() -+ batch.event = event -+ batch.__del__() -+ self.assertEqual(len(event.remove_event_handler.mock_calls), 1) - - @tornado.testing.gen_test - def test_batch_next(self): --- -2.29.2 - - diff --git a/fix-bsc-1065792.patch b/fix-bsc-1065792.patch index 67d82e1..19767e6 100644 --- a/fix-bsc-1065792.patch +++ b/fix-bsc-1065792.patch @@ -1,34 +1,17 @@ -From 1b9a160f578cf446f5ae622a450d23022e7e3ca5 Mon Sep 17 00:00:00 2001 +From 22fe4809712dbc59ba2d8c3c2045f531f81bc517 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Thu, 14 Dec 2017 16:21:40 +0100 Subject: [PATCH] Fix bsc#1065792 --- - salt/states/service.py | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) + salt/states/service.py | 1 + + 1 file changed, 1 insertion(+) diff --git a/salt/states/service.py b/salt/states/service.py -index d19c245756..4ea36a78f6 100644 +index 536e64a430..27595f7703 100644 --- a/salt/states/service.py +++ b/salt/states/service.py -@@ -56,16 +56,12 @@ set the reload value to True: - :ref:`Requisites ` documentation. - - """ --# Import Python libs - - import time - --# Import Salt libs - import salt.utils.data - import salt.utils.platform - from salt.exceptions import CommandExecutionError -- --# Import 3rd-party libs - from salt.utils.args import get_function_argspec as _argspec - from salt.utils.systemd import booted - -@@ -79,6 +75,7 @@ def __virtual__(): +@@ -78,6 +78,7 @@ def __virtual__(): Only make these states available if a service provider has been detected or assigned for this minion """ @@ -37,6 +20,6 @@ index d19c245756..4ea36a78f6 100644 return __virtualname__ else: -- -2.29.2 +2.33.0 diff --git a/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch b/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch deleted file mode 100644 index c27fedf..0000000 --- a/fix-cve-2020-25592-and-add-tests-bsc-1178319.patch +++ /dev/null @@ -1,120 +0,0 @@ -From bc7acab857b952353a959339b06c79d851a9d879 Mon Sep 17 00:00:00 2001 -From: "Daniel A. Wozniak" -Date: Wed, 16 Sep 2020 00:25:10 +0000 -Subject: [PATCH] Fix CVE-2020-25592 and add tests (bsc#1178319) - -Properly validate eauth credentials and tokens on SSH calls made by Salt API - -(bsc#1178319) (bsc#1178362) (bsc#1178361) (CVE-2020-25592) (CVE-2020-17490) (CVE-2020-16846) ---- - salt/netapi/__init__.py | 43 +++++++++++++++++++++++++ - tests/integration/netapi/test_client.py | 13 ++++++-- - 2 files changed, 53 insertions(+), 3 deletions(-) - -diff --git a/salt/netapi/__init__.py b/salt/netapi/__init__.py -index dec19b37ef..cba1ec574f 100644 ---- a/salt/netapi/__init__.py -+++ b/salt/netapi/__init__.py -@@ -109,6 +109,49 @@ class NetapiClient: - "Authorization error occurred." - ) - -+ def _prep_auth_info(self, clear_load): -+ sensitive_load_keys = [] -+ key = None -+ if "token" in clear_load: -+ auth_type = "token" -+ err_name = "TokenAuthenticationError" -+ sensitive_load_keys = ["token"] -+ return auth_type, err_name, key, sensitive_load_keys -+ elif "eauth" in clear_load: -+ auth_type = "eauth" -+ err_name = "EauthAuthenticationError" -+ sensitive_load_keys = ["username", "password"] -+ return auth_type, err_name, key, sensitive_load_keys -+ raise salt.exceptions.EauthAuthenticationError( -+ "No authentication credentials given" -+ ) -+ -+ def _authorize_ssh(self, low): -+ auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(low) -+ auth_check = self.loadauth.check_authentication(low, auth_type, key=key) -+ auth_list = auth_check.get("auth_list", []) -+ error = auth_check.get("error") -+ if error: -+ raise salt.exceptions.EauthAuthenticationError(error) -+ delimiter = low.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM) -+ _res = self.ckminions.check_minions( -+ low["tgt"], low.get("tgt_type", "glob"), delimiter -+ ) -+ minions = _res.get("minions", list()) -+ missing = _res.get("missing", list()) -+ authorized = self.ckminions.auth_check( -+ auth_list, -+ low["fun"], -+ low.get("arg", []), -+ low["tgt"], -+ low.get("tgt_type", "glob"), -+ minions=minions, -+ ) -+ if not authorized: -+ raise salt.exceptions.EauthAuthenticationError( -+ "Authorization error occurred." -+ ) -+ - def run(self, low): - """ - Execute the specified function in the specified client by passing the -diff --git a/tests/integration/netapi/test_client.py b/tests/integration/netapi/test_client.py -index 70471d3148..9eb6e52920 100644 ---- a/tests/integration/netapi/test_client.py -+++ b/tests/integration/netapi/test_client.py -@@ -15,10 +15,12 @@ from tests.support.helpers import ( - SKIP_IF_NOT_RUNNING_PYTEST, - SaveRequestsPostHandler, - Webserver, -+ requires_sshd_server, - slowTest, - ) - from tests.support.mixins import AdaptedConfigurationTestCaseMixin - from tests.support.mock import patch -+from tests.support.paths import TMP, TMP_CONF_DIR - from tests.support.runtests import RUNTIME_VARS - from tests.support.unit import TestCase, skipIf - -@@ -178,7 +180,12 @@ class NetapiSSHClientTest(SSHCase): - """ - opts = AdaptedConfigurationTestCaseMixin.get_config("client_config").copy() - self.netapi = salt.netapi.NetapiClient(opts) -- self.priv_file = os.path.join(RUNTIME_VARS.TMP_SSH_CONF_DIR, "client_key") -+ opts = salt.config.client_config(os.path.join(TMP_CONF_DIR, "master")) -+ naopts = copy.deepcopy(opts) -+ naopts["ignore_host_keys"] = True -+ self.netapi = salt.netapi.NetapiClient(naopts) -+ -+ self.priv_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test") - self.rosters = os.path.join(RUNTIME_VARS.TMP_CONF_DIR) - self.roster_file = os.path.join(self.rosters, "roster") - -@@ -325,7 +332,7 @@ class NetapiSSHClientTest(SSHCase): - "roster": "cache", - "client": "ssh", - "tgt": "root|id>{} #@127.0.0.1".format(path), -- "roster_file": self.roster_file, -+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", - "rosters": "/", - "fun": "test.ping", - "eauth": "auto", -@@ -355,7 +362,7 @@ class NetapiSSHClientTest(SSHCase): - "eauth": "auto", - "username": "saltdev_auto", - "password": "saltdev", -- "roster_file": self.roster_file, -+ "roster_file": "/tmp/salt-tests-tmpdir/config/roaster", - "rosters": "/", - "ssh_options": ["|id>{} #".format(path), "lol"], - } --- -2.29.2 - - diff --git a/fix-error-handling-in-openscap-module-bsc-1188647-40.patch b/fix-error-handling-in-openscap-module-bsc-1188647-40.patch deleted file mode 100644 index 7fe2229..0000000 --- a/fix-error-handling-in-openscap-module-bsc-1188647-40.patch +++ /dev/null @@ -1,40 +0,0 @@ -From b7d11d8caf3eb4fb39a070201be87bb1b3abd525 Mon Sep 17 00:00:00 2001 -From: Vladimir Nadvornik -Date: Wed, 11 Aug 2021 12:19:09 +0200 -Subject: [PATCH] Fix error handling in openscap module (bsc#1188647) - (#409) - ---- - salt/modules/openscap.py | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py -index f75e1c5e6b..216fd89eef 100644 ---- a/salt/modules/openscap.py -+++ b/salt/modules/openscap.py -@@ -153,7 +153,9 @@ def xccdf_eval(xccdffile, ovalfiles=None, **kwargs): - tempdir = tempfile.mkdtemp() - proc = Popen(cmd_opts, stdout=PIPE, stderr=PIPE, cwd=tempdir) - (stdoutdata, error) = proc.communicate() -- success = _OSCAP_EXIT_CODES_MAP[proc.returncode] -+ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False) -+ if proc.returncode < 0: -+ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii') - returncode = proc.returncode - if success: - __salt__["cp.push_dir"](tempdir) -@@ -202,7 +204,9 @@ def xccdf(params): - tempdir = tempfile.mkdtemp() - proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir) - (stdoutdata, error) = proc.communicate() -- success = _OSCAP_EXIT_CODES_MAP[proc.returncode] -+ success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False) -+ if proc.returncode < 0: -+ error += "\nKilled by signal {}\n".format(proc.returncode).encode('ascii') - returncode = proc.returncode - if success: - __salt__["cp.push_dir"](tempdir) --- -2.32.0 - - diff --git a/fix-exception-in-yumpkg.remove-for-not-installed-pac.patch b/fix-exception-in-yumpkg.remove-for-not-installed-pac.patch index 19b0de4..4108f23 100644 --- a/fix-exception-in-yumpkg.remove-for-not-installed-pac.patch +++ b/fix-exception-in-yumpkg.remove-for-not-installed-pac.patch @@ -1,19 +1,19 @@ -From 30a2c8c042f0fe57253a8ab47220d897bc89bd17 Mon Sep 17 00:00:00 2001 +From 9413059223107924c6594e6c72e50fcbcc441e60 Mon Sep 17 00:00:00 2001 From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> Date: Thu, 24 Jun 2021 13:17:13 +0300 Subject: [PATCH] Fix exception in yumpkg.remove for not installed package (#380) --- - salt/modules/yumpkg.py | 2 ++ - tests/unit/modules/test_yumpkg.py | 25 +++++++++++++++++++++++++ - 2 files changed, 27 insertions(+) + salt/modules/yumpkg.py | 2 ++ + tests/pytests/unit/modules/test_yumpkg.py | 37 +++++++++++++++++++++++ + 2 files changed, 39 insertions(+) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py -index 0fb41a0400..c9f9f2c2d3 100644 +index dd81c6f1e9..273f0fb370 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py -@@ -2051,6 +2051,8 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613 +@@ -2087,6 +2087,8 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613 old = list_pkgs() targets = [] for target in pkg_params: @@ -22,43 +22,55 @@ index 0fb41a0400..c9f9f2c2d3 100644 version_to_remove = pkg_params[target] installed_versions = old[target].split(",") -diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py -index e22c0b9251..373d2e09cb 100644 ---- a/tests/unit/modules/test_yumpkg.py -+++ b/tests/unit/modules/test_yumpkg.py -@@ -1099,6 +1099,31 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): - call = cmd_mock.mock_calls[0][1][0] - assert call == expected, call +diff --git a/tests/pytests/unit/modules/test_yumpkg.py b/tests/pytests/unit/modules/test_yumpkg.py +index 7e3ed517ea..b5572db123 100644 +--- a/tests/pytests/unit/modules/test_yumpkg.py ++++ b/tests/pytests/unit/modules/test_yumpkg.py +@@ -1219,6 +1219,43 @@ def test_install_error_reporting(): + assert exc_info.value.info == expected, exc_info.value.info -+ def test_remove_not_existing(self): -+ """ -+ Test if no exception on removing not installed package -+ """ -+ name = "foo" -+ def list_pkgs_mock(): -+ return {} -+ cmd_mock = MagicMock( -+ return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""} -+ ) -+ salt_mock = { -+ "cmd.run_all": cmd_mock, -+ "lowpkg.version_cmp": rpm.version_cmp, -+ "pkg_resource.parse_targets": MagicMock( -+ return_value=({name: None}, "repository") -+ ), -+ } -+ with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch( -+ "salt.utils.systemd.has_scope", MagicMock(return_value=False) -+ ), patch.dict(yumpkg.__salt__, salt_mock): + ++def test_remove_not_installed(): ++ """ ++ Tests that no exception raised on removing not installed package ++ """ ++ name = "foo" ++ list_pkgs_mock = MagicMock(return_value={}) ++ cmd_mock = MagicMock( ++ return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""} ++ ) ++ salt_mock = { ++ "cmd.run_all": cmd_mock, ++ "lowpkg.version_cmp": rpm.version_cmp, ++ "pkg_resource.parse_targets": MagicMock( ++ return_value=({name: None}, "repository") ++ ), ++ } ++ with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch( ++ "salt.utils.systemd.has_scope", MagicMock(return_value=False) ++ ), patch.dict(yumpkg.__salt__, salt_mock): + -+ with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}): -+ yumpkg.remove(name) -+ cmd_mock.assert_not_called() ++ # Test yum ++ with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict( ++ yumpkg.__grains__, {"os": "CentOS", "osrelease": 7} ++ ): ++ yumpkg.remove(name) ++ cmd_mock.assert_not_called() + - def test_install_with_epoch(self): - """ - Tests that we properly identify a version containing an epoch as an ++ # Test dnf ++ yumpkg.__context__.pop("yum_bin") ++ cmd_mock.reset_mock() ++ with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict( ++ yumpkg.__grains__, {"os": "Fedora", "osrelease": 27} ++ ): ++ yumpkg.remove(name) ++ cmd_mock.assert_not_called() ++ ++ + def test_upgrade_with_options(): + with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})), patch( + "salt.utils.systemd.has_scope", MagicMock(return_value=False) -- -2.32.0 +2.33.0 diff --git a/fix-failing-unit-tests-for-batch-async.patch b/fix-failing-unit-tests-for-batch-async.patch deleted file mode 100644 index c5246fe..0000000 --- a/fix-failing-unit-tests-for-batch-async.patch +++ /dev/null @@ -1,196 +0,0 @@ -From 3b96edd8d23c65c6788a9980114a7e1c220c9640 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 4 Oct 2019 15:00:50 +0100 -Subject: [PATCH] Fix failing unit tests for batch async - ---- - salt/cli/batch_async.py | 2 +- - tests/unit/cli/test_batch_async.py | 66 +++++++++++++++++------------- - 2 files changed, 39 insertions(+), 29 deletions(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 89405ba917..b2d04f9d4d 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -91,7 +91,7 @@ class BatchAsync: - keep_loop=True, - ) - self.scheduled = False -- self.patterns = {} -+ self.patterns = set() - - def __set_event_handler(self): - ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) -diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index 66332a548a..c18b42be57 100644 ---- a/tests/unit/cli/test_batch_async.py -+++ b/tests/unit/cli/test_batch_async.py -@@ -61,8 +61,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - ret = self.batch.start() - # assert start_batch is called later with batch_presence_ping_timeout as param - self.assertEqual( -- self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.batch_presence_ping_timeout, self.batch.start_batch), -+ self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.start_batch,), - ) - # assert test.ping called - self.assertEqual( -@@ -81,8 +81,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - ret = self.batch.start() - # assert start_batch is called later with gather_job_timeout as param - self.assertEqual( -- self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.opts["gather_job_timeout"], self.batch.start_batch), -+ self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.start_batch,), - ) - - def test_batch_fire_start_event(self): -@@ -107,12 +107,11 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - def test_start_batch_calls_next(self): - self.batch.run_next = MagicMock(return_value=MagicMock()) - self.batch.event = MagicMock() -- future = tornado.gen.Future() -- future.set_result(None) -- self.batch.run_next = MagicMock(return_value=future) - self.batch.start_batch() - self.assertEqual(self.batch.initialized, True) -- self.assertEqual(len(self.batch.run_next.mock_calls), 1) -+ self.assertEqual( -+ self.batch.event.io_loop.spawn_callback.call_args[0], (self.batch.run_next,) -+ ) - - def test_batch_fire_done_event(self): - self.batch.targeted_minions = {"foo", "baz", "bar"} -@@ -147,14 +146,14 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - future = tornado.gen.Future() - future.set_result({"minions": ["foo", "bar"]}) - self.batch.local.run_job_async.return_value = future -- ret = self.batch.run_next().result() -+ self.batch.run_next() - self.assertEqual( - self.batch.local.run_job_async.call_args[0], - ({"foo", "bar"}, "my.fun", [], "list"), - ) - self.assertEqual( -- self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.opts["timeout"], self.batch.find_job, {"foo", "bar"}), -+ self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.find_job, {"foo", "bar"}), - ) - self.assertEqual(self.batch.active, {"bar", "foo"}) - -@@ -249,15 +248,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.assertEqual(self.batch.active, set()) - self.assertEqual(self.batch.done_minions, {"foo"}) - self.assertEqual( -- self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.batch_delay, self.batch.run_next), -+ self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.schedule_next,), - ) - - def test_batch__event_handler_find_job_return(self): - self.batch.event = MagicMock( -- unpack=MagicMock(return_value=("salt/job/1236/ret/foo", {"id": "foo"})) -+ unpack=MagicMock( -+ return_value=( -+ "salt/job/1236/ret/foo", -+ {"id": "foo", "return": "deadbeaf"}, -+ ) -+ ) - ) - self.batch.start() -+ self.batch.patterns.add(("salt/job/1236/ret/*", "find_job_return")) - self.batch._BatchAsync__event_handler(MagicMock()) - self.assertEqual(self.batch.find_job_returned, {"foo"}) - -@@ -274,14 +279,13 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - future = tornado.gen.Future() - future.set_result({}) - self.batch.local.run_job_async.return_value = future -+ self.batch.minions = {"foo", "bar"} -+ self.batch.jid_gen = MagicMock(return_value="1234") -+ tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({"foo", "bar"}) - self.assertEqual( -- self.batch.event.io_loop.call_later.call_args[0], -- ( -- self.batch.opts["gather_job_timeout"], -- self.batch.check_find_job, -- {"foo", "bar"}, -- ), -+ self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.check_find_job, {"foo", "bar"}, "1234"), - ) - - @tornado.testing.gen_test -@@ -291,17 +295,21 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - future = tornado.gen.Future() - future.set_result({}) - self.batch.local.run_job_async.return_value = future -+ self.batch.minions = {"foo", "bar"} -+ self.batch.jid_gen = MagicMock(return_value="1234") -+ tornado.gen.sleep = MagicMock(return_value=future) - self.batch.find_job({"foo", "bar"}) - self.assertEqual( -- self.batch.event.io_loop.call_later.call_args[0], -- (self.batch.opts["gather_job_timeout"], self.batch.check_find_job, {"foo"}), -+ self.batch.event.io_loop.spawn_callback.call_args[0], -+ (self.batch.check_find_job, {"foo"}, "1234"), - ) - - def test_batch_check_find_job_did_not_return(self): - self.batch.event = MagicMock() - self.batch.active = {"foo"} - self.batch.find_job_returned = set() -- self.batch.check_find_job({"foo"}) -+ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} -+ self.batch.check_find_job({"foo"}, jid="1234") - self.assertEqual(self.batch.find_job_returned, set()) - self.assertEqual(self.batch.active, set()) - self.assertEqual(len(self.batch.event.io_loop.add_callback.mock_calls), 0) -@@ -309,9 +317,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - def test_batch_check_find_job_did_return(self): - self.batch.event = MagicMock() - self.batch.find_job_returned = {"foo"} -- self.batch.check_find_job({"foo"}) -+ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} -+ self.batch.check_find_job({"foo"}, jid="1234") - self.assertEqual( -- self.batch.event.io_loop.add_callback.call_args[0], -+ self.batch.event.io_loop.spawn_callback.call_args[0], - (self.batch.find_job, {"foo"}), - ) - -@@ -332,7 +341,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - # both not yet done but only 'foo' responded to find_job - not_done = {"foo", "bar"} - -- self.batch.check_find_job(not_done) -+ self.batch.patterns = {("salt/job/1234/ret/*", "find_job_return")} -+ self.batch.check_find_job(not_done, jid="1234") - - # assert 'bar' removed from active - self.assertEqual(self.batch.active, {"foo"}) -@@ -342,7 +352,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - - # assert 'find_job' schedueled again only for 'foo' - self.assertEqual( -- self.batch.event.io_loop.add_callback.call_args[0], -+ self.batch.event.io_loop.spawn_callback.call_args[0], - (self.batch.find_job, {"foo"}), - ) - -@@ -350,4 +360,4 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.batch.event = MagicMock() - self.batch.scheduled = True - self.batch.schedule_next() -- self.assertEqual(len(self.batch.event.io_loop.call_later.mock_calls), 0) -+ self.assertEqual(len(self.batch.event.io_loop.spawn_callback.mock_calls), 0) --- -2.29.2 - - diff --git a/fix-failing-unit-tests-for-systemd.patch b/fix-failing-unit-tests-for-systemd.patch deleted file mode 100644 index a5159d8..0000000 --- a/fix-failing-unit-tests-for-systemd.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 74d8f5f2d896e5e8bbf7d3fb614ae32f2cf489a5 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Wed, 11 Aug 2021 11:44:54 +0100 -Subject: [PATCH] Fix failing unit tests for systemd - ---- - tests/unit/modules/test_systemd_service.py | 24 ++++++++++++++++------ - 1 file changed, 18 insertions(+), 6 deletions(-) - -diff --git a/tests/unit/modules/test_systemd_service.py b/tests/unit/modules/test_systemd_service.py -index bbd89bb3d0..51be130d29 100644 ---- a/tests/unit/modules/test_systemd_service.py -+++ b/tests/unit/modules/test_systemd_service.py -@@ -165,21 +165,27 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin): - - # systemd < 231 - with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 230}): -- with patch.object(systemd, "_systemctl_status", mock): -+ with patch.object(systemd, "_systemctl_status", mock), patch.object( -+ systemd, "offline", MagicMock(return_value=False) -+ ): - self.assertTrue(systemd.available("sshd.service")) - self.assertFalse(systemd.available("foo.service")) - - # systemd >= 231 - with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 231}): - with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231): -- with patch.object(systemd, "_systemctl_status", mock): -+ with patch.object(systemd, "_systemctl_status", mock), patch.object( -+ systemd, "offline", MagicMock(return_value=False) -+ ): - self.assertTrue(systemd.available("sshd.service")) - self.assertFalse(systemd.available("bar.service")) - - # systemd < 231 with retcode/output changes backported (e.g. RHEL 7.3) - with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 219}): - with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231): -- with patch.object(systemd, "_systemctl_status", mock): -+ with patch.object(systemd, "_systemctl_status", mock), patch.object( -+ systemd, "offline", MagicMock(return_value=False) -+ ): - self.assertTrue(systemd.available("sshd.service")) - self.assertFalse(systemd.available("bar.service")) - -@@ -191,21 +197,27 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin): - - # systemd < 231 - with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 230}): -- with patch.object(systemd, "_systemctl_status", mock): -+ with patch.object(systemd, "_systemctl_status", mock), patch.object( -+ systemd, "offline", MagicMock(return_value=False) -+ ): - self.assertFalse(systemd.missing("sshd.service")) - self.assertTrue(systemd.missing("foo.service")) - - # systemd >= 231 - with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 231}): - with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231): -- with patch.object(systemd, "_systemctl_status", mock): -+ with patch.object(systemd, "_systemctl_status", mock), patch.object( -+ systemd, "offline", MagicMock(return_value=False) -+ ): - self.assertFalse(systemd.missing("sshd.service")) - self.assertTrue(systemd.missing("bar.service")) - - # systemd < 231 with retcode/output changes backported (e.g. RHEL 7.3) - with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 219}): - with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231): -- with patch.object(systemd, "_systemctl_status", mock): -+ with patch.object(systemd, "_systemctl_status", mock), patch.object( -+ systemd, "offline", MagicMock(return_value=False) -+ ): - self.assertFalse(systemd.missing("sshd.service")) - self.assertTrue(systemd.missing("bar.service")) - --- -2.32.0 - - diff --git a/fix-for-log-checking-in-x509-test.patch b/fix-for-log-checking-in-x509-test.patch deleted file mode 100644 index 53679d8..0000000 --- a/fix-for-log-checking-in-x509-test.patch +++ /dev/null @@ -1,31 +0,0 @@ -From b4f54187ae7d231250f72244ffd874cc2c846150 Mon Sep 17 00:00:00 2001 -From: Jochen Breuer -Date: Thu, 28 Nov 2019 15:23:36 +0100 -Subject: [PATCH] Fix for log checking in x509 test - -We are logging in debug and not in trace mode here. ---- - tests/unit/modules/test_x509.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/tests/unit/modules/test_x509.py b/tests/unit/modules/test_x509.py -index 40aea12272..e7503395eb 100644 ---- a/tests/unit/modules/test_x509.py -+++ b/tests/unit/modules/test_x509.py -@@ -127,9 +127,9 @@ class X509TestCase(TestCase, LoaderModuleMockMixin): - - subj = FakeSubject() - x509._parse_subject(subj) -- assert x509.log.trace.call_args[0][0] == "Missing attribute '%s'. Error: %s" -- assert x509.log.trace.call_args[0][1] == list(subj.nid.keys())[0] -- assert isinstance(x509.log.trace.call_args[0][2], TypeError) -+ assert x509.log.debug.call_args[0][0] == "Missing attribute '%s'. Error: %s" -+ assert x509.log.debug.call_args[0][1] == list(subj.nid.keys())[0] -+ assert isinstance(x509.log.debug.call_args[0][2], TypeError) - - @skipIf(not HAS_M2CRYPTO, "Skipping, M2Crypto is unavailable") - def test_get_pem_entry(self): --- -2.29.2 - - diff --git a/fix-for-some-cves-bsc1181550.patch b/fix-for-some-cves-bsc1181550.patch deleted file mode 100644 index 6722ed0..0000000 --- a/fix-for-some-cves-bsc1181550.patch +++ /dev/null @@ -1,5284 +0,0 @@ -From a74b74a640da563618783f309fe1eef391a98f41 Mon Sep 17 00:00:00 2001 -From: "Daniel A. Wozniak" -Date: Fri, 29 Jan 2021 14:30:27 -0700 -Subject: [PATCH] Fix for some cves bsc1181550 - -CVE-2020-28243 CVE-2020-28972 CVE-2020-35662 CVE-2021-3148 CVE-2021-3144 -CVE-2021-25281 CVE-2021-25282 CVE-2021-25283 CVE-2021-25284 -CVE-2021-3197 ---- - salt/auth/__init__.py | 1 + - salt/client/mixins.py | 71 ++-- - salt/client/ssh/client.py | 46 +++ - salt/cloud/clouds/qingcloud.py | 57 +-- - salt/cloud/clouds/vmware.py | 158 ++++---- - salt/config/schemas/vcenter.py | 8 +- - salt/master.py | 2 +- - salt/modules/bigip.py | 25 +- - salt/modules/cmdmod.py | 29 +- - salt/modules/glassfish.py | 32 +- - salt/modules/keystone.py | 148 ++++---- - salt/modules/restartcheck.py | 4 +- - salt/modules/vsphere.py | 660 ++++++++++++++++++++++++++++----- - salt/modules/zenoss.py | 26 +- - salt/pillar/vmware_pillar.py | 26 +- - salt/proxy/cimc.py | 31 +- - salt/proxy/panos.py | 28 +- - salt/proxy/vcenter.py | 6 +- - salt/returners/splunk.py | 34 +- - salt/runners/asam.py | 19 +- - salt/states/esxi.py | 228 ++++++------ - salt/utils/http.py | 20 + - salt/utils/thin.py | 4 +- - salt/utils/vmware.py | 128 ++++--- - salt/wheel/__init__.py | 12 +- - salt/wheel/pillar_roots.py | 21 +- - 26 files changed, 1201 insertions(+), 623 deletions(-) - -diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py -index 22c54e8048..56f8bd57c8 100644 ---- a/salt/auth/__init__.py -+++ b/salt/auth/__init__.py -@@ -270,6 +270,7 @@ class LoadAuth: - - if rm_tok: - self.rm_token(tok) -+ return {} - - return tdata - -diff --git a/salt/client/mixins.py b/salt/client/mixins.py -index b33ee54f27..6f408adbba 100644 ---- a/salt/client/mixins.py -+++ b/salt/client/mixins.py -@@ -1,10 +1,7 @@ --# coding: utf-8 - """ - A collection of mixins useful for the various *Client interfaces - """ - --# Import Python libs --from __future__ import absolute_import, print_function, unicode_literals, with_statement - - import copy as pycopy - import fnmatch -@@ -14,10 +11,7 @@ import traceback - import weakref - from collections.abc import Mapping, MutableMapping - --# Import Salt libs - import salt.exceptions -- --# Import 3rd-party libs - import salt.ext.tornado.stack_context - import salt.log.setup - import salt.minion -@@ -122,7 +116,7 @@ class ClientFuncsDict(MutableMapping): - return iter(self.client.functions) - - --class SyncClientMixin(object): -+class SyncClientMixin: - """ - A mixin for *Client interfaces to abstract common function execution - """ -@@ -182,7 +176,7 @@ class SyncClientMixin(object): - ) - if ret is None: - raise salt.exceptions.SaltClientTimeout( -- "RunnerClient job '{0}' timed out".format(job["jid"]), -+ "RunnerClient job '{}' timed out".format(job["jid"]), - jid=job["jid"], - ) - -@@ -281,7 +275,7 @@ class SyncClientMixin(object): - return True - - try: -- return self.opts["{0}_returns".format(class_name)] -+ return self.opts["{}_returns".format(class_name)] - except KeyError: - # No such option, assume this isn't one we care about gating and - # just return True. -@@ -308,7 +302,7 @@ class SyncClientMixin(object): - tag = low.get("__tag__", salt.utils.event.tagify(jid, prefix=self.tag_prefix)) - - data = { -- "fun": "{0}.{1}".format(self.client, fun), -+ "fun": "{}.{}".format(self.client, fun), - "jid": jid, - "user": low.get("__user__", "UNKNOWN"), - } -@@ -353,14 +347,14 @@ class SyncClientMixin(object): - # namespace only once per module-- not per func - completed_funcs = [] - -- for mod_name in six.iterkeys(self_functions): -+ for mod_name in self_functions.keys(): - if "." not in mod_name: - continue - mod, _ = mod_name.split(".", 1) - if mod in completed_funcs: - continue - completed_funcs.append(mod) -- for global_key, value in six.iteritems(func_globals): -+ for global_key, value in func_globals.items(): - self.functions[mod_name].__globals__[global_key] = value - - # There are some discrepancies of what a "low" structure is in the -@@ -398,7 +392,7 @@ class SyncClientMixin(object): - except TypeError as exc: - data[ - "return" -- ] = "\nPassed invalid arguments: {0}\n\nUsage:\n{1}".format( -+ ] = "\nPassed invalid arguments: {}\n\nUsage:\n{}".format( - exc, func.__doc__ - ) - try: -@@ -413,9 +407,9 @@ class SyncClientMixin(object): - ) - except (Exception, SystemExit) as ex: # pylint: disable=broad-except - if isinstance(ex, salt.exceptions.NotImplemented): -- data["return"] = six.text_type(ex) -+ data["return"] = str(ex) - else: -- data["return"] = "Exception occurred in {0} {1}: {2}".format( -+ data["return"] = "Exception occurred in {} {}: {}".format( - self.client, fun, traceback.format_exc(), - ) - data["success"] = False -@@ -477,7 +471,7 @@ class SyncClientMixin(object): - return salt.utils.doc.strip_rst(docs) - - --class AsyncClientMixin(object): -+class AsyncClientMixin: - """ - A mixin for *Client interfaces to enable easy asynchronous function execution - """ -@@ -485,10 +479,34 @@ class AsyncClientMixin(object): - client = None - tag_prefix = None - -+ def _proc_function_remote(self, fun, low, user, tag, jid, daemonize=True): -+ """ -+ Run this method in a multiprocess target to execute the function on the -+ master and fire the return data on the event bus -+ """ -+ if daemonize and not salt.utils.platform.is_windows(): -+ # Shutdown the multiprocessing before daemonizing -+ salt.log.setup.shutdown_multiprocessing_logging() -+ -+ salt.utils.process.daemonize() -+ -+ # Reconfigure multiprocessing logging after daemonizing -+ salt.log.setup.setup_multiprocessing_logging() -+ -+ # pack a few things into low -+ low["__jid__"] = jid -+ low["__user__"] = user -+ low["__tag__"] = tag -+ -+ try: -+ return self.cmd_sync(low) -+ except salt.exceptions.EauthAuthenticationError as exc: -+ log.error(exc) -+ - def _proc_function(self, fun, low, user, tag, jid, daemonize=True): - """ -- Run this method in a multiprocess target to execute the function in a -- multiprocess and fire the return data on the event bus -+ Run this method in a multiprocess target to execute the function -+ locally and fire the return data on the event bus - """ - if daemonize and not salt.utils.platform.is_windows(): - # Shutdown the multiprocessing before daemonizing -@@ -504,7 +522,7 @@ class AsyncClientMixin(object): - low["__user__"] = user - low["__tag__"] = tag - -- return self.low(fun, low, full_return=False) -+ return self.low(fun, low) - - def cmd_async(self, low): - """ -@@ -532,14 +550,18 @@ class AsyncClientMixin(object): - tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix) - return {"tag": tag, "jid": jid} - -- def asynchronous(self, fun, low, user="UNKNOWN", pub=None): -+ def asynchronous(self, fun, low, user="UNKNOWN", pub=None, local=True): - """ - Execute the function in a multiprocess and return the event tag to use - to watch for the return - """ -+ if local: -+ proc_func = self._proc_function -+ else: -+ proc_func = self._proc_function_remote - async_pub = pub if pub is not None else self._gen_async_pub() - proc = salt.utils.process.SignalHandlingProcess( -- target=self._proc_function, -+ target=proc_func, - name="ProcessFunc", - args=(fun, low, user, async_pub["tag"], async_pub["jid"]), - ) -@@ -577,9 +599,10 @@ class AsyncClientMixin(object): - if suffix == "ret": - # Check if outputter was passed in the return data. If this is the case, - # then the return data will be a dict two keys: 'data' and 'outputter' -- if isinstance(event.get("return"), dict) and set(event["return"]) == set( -- ("data", "outputter") -- ): -+ if isinstance(event.get("return"), dict) and set(event["return"]) == { -+ "data", -+ "outputter", -+ }: - event_data = event["return"]["data"] - outputter = event["return"]["outputter"] - else: -diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py -index d2dbdeb00e..2cf42f53e7 100644 ---- a/salt/client/ssh/client.py -+++ b/salt/client/ssh/client.py -@@ -43,12 +43,58 @@ class SSHClient: - # Salt API should never offer a custom roster! - self.opts["__disable_custom_roster"] = disable_custom_roster - -+ def sanitize_kwargs(self, kwargs): -+ roster_vals = [ -+ ("host", str), -+ ("ssh_user", str), -+ ("ssh_passwd", str), -+ ("ssh_port", int), -+ ("ssh_sudo", bool), -+ ("ssh_sudo_user", str), -+ ("ssh_priv", str), -+ ("ssh_priv_passwd", str), -+ ("ssh_identities_only", bool), -+ ("ssh_remote_port_forwards", str), -+ ("ssh_options", list), -+ ("roster_file", str), -+ ("rosters", list), -+ ("ignore_host_keys", bool), -+ ("raw_shell", bool), -+ ] -+ sane_kwargs = {} -+ for name, kind in roster_vals: -+ if name not in kwargs: -+ continue -+ try: -+ val = kind(kwargs[name]) -+ except ValueError: -+ log.warn("Unable to cast kwarg %s", name) -+ continue -+ if kind is bool or kind is int: -+ sane_kwargs[name] = val -+ elif kind is str: -+ if val.find("ProxyCommand") != -1: -+ log.warn("Filter unsafe value for kwarg %s", name) -+ continue -+ sane_kwargs[name] = val -+ elif kind is list: -+ sane_val = [] -+ for item in val: -+ # This assumes the values are strings -+ if item.find("ProxyCommand") != -1: -+ log.warn("Filter unsafe value for kwarg %s", name) -+ continue -+ sane_val.append(item) -+ sane_kwargs[name] = sane_val -+ return sane_kwargs -+ - def _prep_ssh( - self, tgt, fun, arg=(), timeout=None, tgt_type="glob", kwarg=None, **kwargs - ): - """ - Prepare the arguments - """ -+ kwargs = self.sanitize_kwargs(kwargs) - opts = copy.deepcopy(self.opts) - opts.update(kwargs) - if timeout: -diff --git a/salt/cloud/clouds/qingcloud.py b/salt/cloud/clouds/qingcloud.py -index b388840dd5..f4632e167c 100644 ---- a/salt/cloud/clouds/qingcloud.py -+++ b/salt/cloud/clouds/qingcloud.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - QingCloud Cloud Module - ====================== -@@ -26,8 +25,6 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or - :depends: requests - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import base64 - import hmac -@@ -46,13 +43,9 @@ from salt.exceptions import ( - SaltCloudNotFound, - SaltCloudSystemExit, - ) -- --# Import Salt Libs --from salt.ext import six - from salt.ext.six.moves import range - from salt.ext.six.moves.urllib.parse import quote as _quote - --# Import Third Party Libs - try: - import requests - -@@ -110,12 +103,12 @@ def _compute_signature(parameters, access_key_secret, method, path): - """ - parameters["signature_method"] = "HmacSHA256" - -- string_to_sign = "{0}\n{1}\n".format(method.upper(), path) -+ string_to_sign = "{}\n{}\n".format(method.upper(), path) - - keys = sorted(parameters.keys()) - pairs = [] - for key in keys: -- val = six.text_type(parameters[key]).encode("utf-8") -+ val = str(parameters[key]).encode("utf-8") - pairs.append(_quote(key, safe="") + "=" + _quote(val, safe="-_~")) - qs = "&".join(pairs) - string_to_sign += qs -@@ -141,6 +134,14 @@ def query(params=None): - "secret_access_key", get_configured_provider(), __opts__, search_global=False - ) - -+ verify_ssl = config.get_cloud_config_value( -+ "verify_ssl", -+ get_configured_provider(), -+ __opts__, -+ default=True, -+ search_global=False, -+ ) -+ - # public interface parameters - real_parameters = { - "access_key_id": access_key_id, -@@ -158,9 +159,9 @@ def query(params=None): - for sk, sv in value[i - 1].items(): - if isinstance(sv, dict) or isinstance(sv, list): - sv = salt.utils.json.dumps(sv, separators=(",", ":")) -- real_parameters["{0}.{1}.{2}".format(key, i, sk)] = sv -+ real_parameters["{}.{}.{}".format(key, i, sk)] = sv - else: -- real_parameters["{0}.{1}".format(key, i)] = value[i - 1] -+ real_parameters["{}.{}".format(key, i)] = value[i - 1] - else: - real_parameters[key] = value - -@@ -171,15 +172,15 @@ def query(params=None): - # print('parameters:') - # pprint.pprint(real_parameters) - -- request = requests.get(path, params=real_parameters, verify=False) -+ request = requests.get(path, params=real_parameters, verify=verify_ssl) - - # print('url:') - # print(request.url) - - if request.status_code != 200: - raise SaltCloudSystemExit( -- "An error occurred while querying QingCloud. HTTP Code: {0} " -- "Error: '{1}'".format(request.status_code, request.text) -+ "An error occurred while querying QingCloud. HTTP Code: {} " -+ "Error: '{}'".format(request.status_code, request.text) - ) - - log.debug(request.url) -@@ -222,7 +223,7 @@ def avail_locations(call=None): - for region in items["zone_set"]: - result[region["zone_id"]] = {} - for key in region: -- result[region["zone_id"]][key] = six.text_type(region[key]) -+ result[region["zone_id"]][key] = str(region[key]) - - return result - -@@ -233,7 +234,7 @@ def _get_location(vm_=None): - """ - locations = avail_locations() - -- vm_location = six.text_type( -+ vm_location = str( - config.get_cloud_config_value("zone", vm_, __opts__, search_global=False) - ) - -@@ -244,7 +245,7 @@ def _get_location(vm_=None): - return vm_location - - raise SaltCloudNotFound( -- "The specified location, '{0}', could not be found.".format(vm_location) -+ "The specified location, '{}', could not be found.".format(vm_location) - ) - - -@@ -302,7 +303,7 @@ def _get_image(vm_): - Return the VM's image. Used by create(). - """ - images = avail_images() -- vm_image = six.text_type( -+ vm_image = str( - config.get_cloud_config_value("image", vm_, __opts__, search_global=False) - ) - -@@ -313,7 +314,7 @@ def _get_image(vm_): - return vm_image - - raise SaltCloudNotFound( -- "The specified image, '{0}', could not be found.".format(vm_image) -+ "The specified image, '{}', could not be found.".format(vm_image) - ) - - -@@ -424,7 +425,7 @@ def _get_size(vm_): - """ - sizes = avail_sizes() - -- vm_size = six.text_type( -+ vm_size = str( - config.get_cloud_config_value("size", vm_, __opts__, search_global=False) - ) - -@@ -435,7 +436,7 @@ def _get_size(vm_): - return vm_size - - raise SaltCloudNotFound( -- "The specified size, '{0}', could not be found.".format(vm_size) -+ "The specified size, '{}', could not be found.".format(vm_size) - ) - - -@@ -616,7 +617,7 @@ def show_instance(instance_id, call=None, kwargs=None): - - if items["total_count"] == 0: - raise SaltCloudNotFound( -- "The specified instance, '{0}', could not be found.".format(instance_id) -+ "The specified instance, '{}', could not be found.".format(instance_id) - ) - - full_node = items["instance_set"][0] -@@ -668,7 +669,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "starting create", -- "salt/cloud/{0}/creating".format(vm_["name"]), -+ "salt/cloud/{}/creating".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "creating", vm_, ["name", "profile", "provider", "driver"] - ), -@@ -693,7 +694,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "requesting instance", -- "salt/cloud/{0}/requesting".format(vm_["name"]), -+ "salt/cloud/{}/requesting".format(vm_["name"]), - args={ - "kwargs": __utils__["cloud.filter_event"]( - "requesting", params, list(params) -@@ -724,7 +725,7 @@ def create(vm_): - except SaltCloudSystemExit: - pass - finally: -- raise SaltCloudSystemExit(six.text_type(exc)) -+ raise SaltCloudSystemExit(str(exc)) - - private_ip = data["private_ips"][0] - -@@ -742,7 +743,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "created instance", -- "salt/cloud/{0}/created".format(vm_["name"]), -+ "salt/cloud/{}/created".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "created", vm_, ["name", "profile", "provider", "driver"] - ), -@@ -868,7 +869,7 @@ def destroy(instance_id, call=None): - __utils__["cloud.fire_event"]( - "event", - "destroying instance", -- "salt/cloud/{0}/destroying".format(name), -+ "salt/cloud/{}/destroying".format(name), - args={"name": name}, - sock_dir=__opts__["sock_dir"], - transport=__opts__["transport"], -@@ -884,7 +885,7 @@ def destroy(instance_id, call=None): - __utils__["cloud.fire_event"]( - "event", - "destroyed instance", -- "salt/cloud/{0}/destroyed".format(name), -+ "salt/cloud/{}/destroyed".format(name), - args={"name": name}, - sock_dir=__opts__["sock_dir"], - transport=__opts__["transport"], -diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py -index edaca9618b..851579bf74 100644 ---- a/salt/cloud/clouds/vmware.py -+++ b/salt/cloud/clouds/vmware.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - # pylint: disable=C0302 - """ - VMware Cloud Module -@@ -114,8 +113,6 @@ To test the connection for ``my-vmware-config`` specified in the cloud - configuration, run :py:func:`test_vcenter_connection` - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import logging - import os.path -@@ -125,10 +122,7 @@ import subprocess - import time - from random import randint - --# Import salt cloud libs - import salt.config as config -- --# Import salt libs - import salt.utils.cloud - import salt.utils.network - import salt.utils.stringutils -@@ -136,9 +130,6 @@ import salt.utils.vmware - import salt.utils.xmlutil - from salt.exceptions import SaltCloudSystemExit - --# Import 3rd-party libs --from salt.ext import six -- - try: - # Attempt to import pyVmomi libs - from pyVmomi import vim # pylint: disable=no-name-in-module -@@ -230,7 +221,7 @@ def _str_to_bool(var): - if isinstance(var, bool): - return var - -- if isinstance(var, six.string_types): -+ if isinstance(var, str): - return True if var.lower() == "true" else False - - return None -@@ -260,9 +251,15 @@ def _get_si(): - port = config.get_cloud_config_value( - "port", get_configured_provider(), __opts__, search_global=False, default=443 - ) -- -+ verify_ssl = config.get_cloud_config_value( -+ "verify_ssl", -+ get_configured_provider(), -+ __opts__, -+ search_global=False, -+ default=True, -+ ) - return salt.utils.vmware.get_service_instance( -- url, username, password, protocol=protocol, port=port -+ url, username, password, protocol=protocol, port=port, verify_ssl=verify_ssl - ) - - -@@ -299,7 +296,7 @@ def _add_new_hard_disk_helper( - disk_spec.device.key = random_key - disk_spec.device.deviceInfo = vim.Description() - disk_spec.device.deviceInfo.label = disk_label -- disk_spec.device.deviceInfo.summary = "{0} GB".format(size_gb) -+ disk_spec.device.deviceInfo.summary = "{} GB".format(size_gb) - - disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() - disk_spec.device.backing.thinProvisioned = thin_provision -@@ -320,7 +317,7 @@ def _add_new_hard_disk_helper( - if not datastore_cluster_ref: - # datastore/datastore cluster specified does not exist - raise SaltCloudSystemExit( -- "Specified datastore/datastore cluster ({0}) for disk ({1}) does not exist".format( -+ "Specified datastore/datastore cluster ({}) for disk ({}) does not exist".format( - datastore, disk_label - ) - ) -@@ -351,12 +348,12 @@ def _add_new_hard_disk_helper( - if not datastore_ref: - # datastore cluster specified does not have any accessible datastores - raise SaltCloudSystemExit( -- "Specified datastore cluster ({0}) for disk ({1}) does not have any accessible datastores available".format( -+ "Specified datastore cluster ({}) for disk ({}) does not have any accessible datastores available".format( - datastore, disk_label - ) - ) - -- datastore_path = "[" + six.text_type(datastore_ref.name) + "] " + vm_name -+ datastore_path = "[" + str(datastore_ref.name) + "] " + vm_name - disk_spec.device.backing.fileName = datastore_path + "/" + disk_label + ".vmdk" - disk_spec.device.backing.datastore = datastore_ref - log.trace( -@@ -429,11 +426,11 @@ def _edit_existing_network_adapter( - else: - # If switch type not specified or does not match, show error and return - if not switch_type: -- err_msg = "The switch type to be used by '{0}' has not been specified".format( -+ err_msg = "The switch type to be used by '{}' has not been specified".format( - network_adapter.deviceInfo.label - ) - else: -- err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format( -+ err_msg = "Cannot create '{}'. Invalid/unsupported switch type '{}'".format( - network_adapter.deviceInfo.label, switch_type - ) - raise SaltCloudSystemExit(err_msg) -@@ -516,11 +513,11 @@ def _add_new_network_adapter_helper( - else: - # If switch type not specified or does not match, show error and return - if not switch_type: -- err_msg = "The switch type to be used by '{0}' has not been specified".format( -+ err_msg = "The switch type to be used by '{}' has not been specified".format( - network_adapter_label - ) - else: -- err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format( -+ err_msg = "Cannot create '{}'. Invalid/unsupported switch type '{}'".format( - network_adapter_label, switch_type - ) - raise SaltCloudSystemExit(err_msg) -@@ -572,11 +569,11 @@ def _add_new_scsi_controller_helper(scsi_controller_label, properties, bus_numbe - else: - # If type not specified or does not match, show error and return - if not adapter_type: -- err_msg = "The type of '{0}' has not been specified".format( -+ err_msg = "The type of '{}' has not been specified".format( - scsi_controller_label - ) - else: -- err_msg = "Cannot create '{0}'. Invalid/unsupported type '{1}'".format( -+ err_msg = "Cannot create '{}'. Invalid/unsupported type '{}'".format( - scsi_controller_label, adapter_type - ) - raise SaltCloudSystemExit(err_msg) -@@ -653,7 +650,7 @@ def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path): - if datastore_ref: - drive.backing.datastore = datastore_ref - -- drive.deviceInfo.summary = "ISO {0}".format(iso_path) -+ drive.deviceInfo.summary = "ISO {}".format(iso_path) - - elif device_type == "client_device": - if mode == "passthrough": -@@ -735,8 +732,8 @@ def _set_network_adapter_mapping(adapter_specs): - gateway = adapter_specs["gateway"] - adapter_mapping.adapter.gateway = gateway - if "ip" in list(adapter_specs.keys()): -- ip = six.text_type(adapter_specs["ip"]) -- subnet_mask = six.text_type(adapter_specs["subnet_mask"]) -+ ip = str(adapter_specs["ip"]) -+ subnet_mask = str(adapter_specs["subnet_mask"]) - adapter_mapping.adapter.ip = vim.vm.customization.FixedIp(ipAddress=ip) - adapter_mapping.adapter.subnetMask = subnet_mask - else: -@@ -823,8 +820,8 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None): - - if device.capacityInKB > size_kb: - raise SaltCloudSystemExit( -- "The specified disk size '{0}GB' for '{1}' is " -- "smaller than the disk image size '{2}GB'. It must " -+ "The specified disk size '{}GB' for '{}' is " -+ "smaller than the disk image size '{}GB'. It must " - "be equal to or greater than the disk image".format( - float( - devices["disk"][device.deviceInfo.label]["size"] -@@ -908,7 +905,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None): - else None - ) - if bus_sharing and bus_sharing in ["virtual", "physical", "no"]: -- bus_sharing = "{0}Sharing".format(bus_sharing) -+ bus_sharing = "{}Sharing".format(bus_sharing) - if bus_sharing != device.sharedBus: - # Only edit the SCSI controller if bus_sharing is different - scsi_spec = _edit_existing_scsi_controller( -@@ -1112,7 +1109,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None): - ide_controllers[controller_key] = 0 - break - else: -- for ide_controller_key, num_devices in six.iteritems(ide_controllers): -+ for ide_controller_key, num_devices in ide_controllers.items(): - if num_devices < 2: - controller_key = ide_controller_key - break -@@ -1145,10 +1142,7 @@ def _wait_for_vmware_tools(vm_ref, max_wait): - vm_ref.name, - time_counter, - ) -- if ( -- six.text_type(vm_ref.summary.guest.toolsRunningStatus) -- == "guestToolsRunning" -- ): -+ if str(vm_ref.summary.guest.toolsRunningStatus) == "guestToolsRunning": - log.info( - "[ %s ] Successfully got VMware tools running on the guest in " - "%s seconds", -@@ -1314,23 +1308,21 @@ def _format_instance_info_select(vm, selection): - vm_select_info["id"] = vm["name"] - - if "image" in selection: -- vm_select_info["image"] = "{0} (Detected)".format( -+ vm_select_info["image"] = "{} (Detected)".format( - defaultto(vm, "config.guestFullName") - ) - - if "size" in selection: - cpu = defaultto(vm, "config.hardware.numCPU") -- ram = "{0} MB".format(defaultto(vm, "config.hardware.memoryMB")) -- vm_select_info["size"] = "cpu: {0}\nram: {1}".format(cpu, ram) -+ ram = "{} MB".format(defaultto(vm, "config.hardware.memoryMB")) -+ vm_select_info["size"] = "cpu: {}\nram: {}".format(cpu, ram) - vm_select_info["size_dict"] = { - "cpu": cpu, - "memory": ram, - } - - if "state" in selection: -- vm_select_info["state"] = six.text_type( -- defaultto(vm, "summary.runtime.powerState") -- ) -+ vm_select_info["state"] = str(defaultto(vm, "summary.runtime.powerState")) - - if "guest_id" in selection: - vm_select_info["guest_id"] = defaultto(vm, "config.guestId") -@@ -1342,9 +1334,7 @@ def _format_instance_info_select(vm, selection): - vm_select_info["path"] = defaultto(vm, "config.files.vmPathName") - - if "tools_status" in selection: -- vm_select_info["tools_status"] = six.text_type( -- defaultto(vm, "guest.toolsStatus") -- ) -+ vm_select_info["tools_status"] = str(defaultto(vm, "guest.toolsStatus")) - - if "private_ips" in selection or "networks" in selection: - network_full_info = {} -@@ -1585,18 +1575,18 @@ def _format_instance_info(vm): - - cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = ( -- "{0} MB".format(vm["config.hardware.memoryMB"]) -+ "{} MB".format(vm["config.hardware.memoryMB"]) - if "config.hardware.memoryMB" in vm - else "N/A" - ) - vm_full_info = { -- "id": six.text_type(vm["name"]), -- "image": "{0} (Detected)".format(vm["config.guestFullName"]) -+ "id": str(vm["name"]), -+ "image": "{} (Detected)".format(vm["config.guestFullName"]) - if "config.guestFullName" in vm - else "N/A", -- "size": "cpu: {0}\nram: {1}".format(cpu, ram), -+ "size": "cpu: {}\nram: {}".format(cpu, ram), - "size_dict": {"cpu": cpu, "memory": ram}, -- "state": six.text_type(vm["summary.runtime.powerState"]) -+ "state": str(vm["summary.runtime.powerState"]) - if "summary.runtime.powerState" in vm - else "N/A", - "private_ips": ip_addresses, -@@ -1604,16 +1594,14 @@ def _format_instance_info(vm): - "devices": device_full_info, - "storage": storage_full_info, - "files": file_full_info, -- "guest_id": six.text_type(vm["config.guestId"]) -- if "config.guestId" in vm -- else "N/A", -- "hostname": six.text_type(vm["object"].guest.hostName), -+ "guest_id": str(vm["config.guestId"]) if "config.guestId" in vm else "N/A", -+ "hostname": str(vm["object"].guest.hostName), - "mac_addresses": device_mac_addresses, - "networks": network_full_info, -- "path": six.text_type(vm["config.files.vmPathName"]) -+ "path": str(vm["config.files.vmPathName"]) - if "config.files.vmPathName" in vm - else "N/A", -- "tools_status": six.text_type(vm["guest.toolsStatus"]) -+ "tools_status": str(vm["guest.toolsStatus"]) - if "guest.toolsStatus" in vm - else "N/A", - } -@@ -1624,11 +1612,11 @@ def _format_instance_info(vm): - def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path=""): - snapshots = {} - for snapshot in snapshot_list: -- snapshot_path = "{0}/{1}".format(parent_snapshot_path, snapshot.name) -+ snapshot_path = "{}/{}".format(parent_snapshot_path, snapshot.name) - snapshots[snapshot_path] = { - "name": snapshot.name, - "description": snapshot.description, -- "created": six.text_type(snapshot.createTime).split(".")[0], -+ "created": str(snapshot.createTime).split(".")[0], - "state": snapshot.state, - "path": snapshot_path, - } -@@ -1760,7 +1748,7 @@ def test_vcenter_connection(kwargs=None, call=None): - # Get the service instance object - _get_si() - except Exception as exc: # pylint: disable=broad-except -- return "failed to connect: {0}".format(exc) -+ return "failed to connect: {}".format(exc) - - return "connection successful" - -@@ -2000,18 +1988,18 @@ def list_nodes(kwargs=None, call=None): - for vm in vm_list: - cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = ( -- "{0} MB".format(vm["config.hardware.memoryMB"]) -+ "{} MB".format(vm["config.hardware.memoryMB"]) - if "config.hardware.memoryMB" in vm - else "N/A" - ) - vm_info = { - "id": vm["name"], -- "image": "{0} (Detected)".format(vm["config.guestFullName"]) -+ "image": "{} (Detected)".format(vm["config.guestFullName"]) - if "config.guestFullName" in vm - else "N/A", -- "size": "cpu: {0}\nram: {1}".format(cpu, ram), -+ "size": "cpu: {}\nram: {}".format(cpu, ram), - "size_dict": {"cpu": cpu, "memory": ram}, -- "state": six.text_type(vm["summary.runtime.powerState"]) -+ "state": str(vm["summary.runtime.powerState"]) - if "summary.runtime.powerState" in vm - else "N/A", - "private_ips": [vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [], -@@ -2660,7 +2648,7 @@ def destroy(name, call=None): - __utils__["cloud.fire_event"]( - "event", - "destroying instance", -- "salt/cloud/{0}/destroying".format(name), -+ "salt/cloud/{}/destroying".format(name), - args={"name": name}, - sock_dir=__opts__["sock_dir"], - transport=__opts__["transport"], -@@ -2706,7 +2694,7 @@ def destroy(name, call=None): - __utils__["cloud.fire_event"]( - "event", - "destroyed instance", -- "salt/cloud/{0}/destroyed".format(name), -+ "salt/cloud/{}/destroyed".format(name), - args={"name": name}, - sock_dir=__opts__["sock_dir"], - transport=__opts__["transport"], -@@ -2748,7 +2736,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "starting create", -- "salt/cloud/{0}/creating".format(vm_["name"]), -+ "salt/cloud/{}/creating".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "creating", vm_, ["name", "profile", "provider", "driver"] - ), -@@ -2825,10 +2813,10 @@ def create(vm_): - "win_run_once", vm_, __opts__, search_global=False, default=None - ) - cpu_hot_add = config.get_cloud_config_value( -- 'cpu_hot_add', vm_, __opts__, search_global=False, default=None -+ "cpu_hot_add", vm_, __opts__, search_global=False, default=None - ) - mem_hot_add = config.get_cloud_config_value( -- 'mem_hot_add', vm_, __opts__, search_global=False, default=None -+ "mem_hot_add", vm_, __opts__, search_global=False, default=None - ) - - # Get service instance object -@@ -2988,7 +2976,7 @@ def create(vm_): - ) - if not datastore_ref: - raise SaltCloudSystemExit( -- "Specified datastore: '{0}' does not exist".format(datastore) -+ "Specified datastore: '{}' does not exist".format(datastore) - ) - - if host: -@@ -3004,7 +2992,7 @@ def create(vm_): - # If the hardware version is specified and if it is different from the current - # hardware version, then schedule a hardware version upgrade - if hardware_version and object_ref is not None: -- hardware_version = "vmx-{0:02}".format(hardware_version) -+ hardware_version = "vmx-{:02}".format(hardware_version) - if hardware_version != object_ref.config.version: - log.debug( - "Scheduling hardware version upgrade from %s to %s", -@@ -3034,7 +3022,7 @@ def create(vm_): - elif memory_unit.lower() == "gb": - memory_mb = int(float(memory_num) * 1024.0) - else: -- err_msg = "Invalid memory type specified: '{0}'".format(memory_unit) -+ err_msg = "Invalid memory type specified: '{}'".format(memory_unit) - log.error(err_msg) - return {"Error": err_msg} - except (TypeError, ValueError): -@@ -3048,19 +3036,19 @@ def create(vm_): - ) - config_spec.deviceChange = specs["device_specs"] - -- if cpu_hot_add and hasattr(config_spec, 'cpuHotAddEnabled'): -+ if cpu_hot_add and hasattr(config_spec, "cpuHotAddEnabled"): - config_spec.cpuHotAddEnabled = bool(cpu_hot_add) - -- if mem_hot_add and hasattr(config_spec, 'memoryHotAddEnabled'): -+ if mem_hot_add and hasattr(config_spec, "memoryHotAddEnabled"): - config_spec.memoryHotAddEnabled = bool(mem_hot_add) - - if extra_config: -- for key, value in six.iteritems(extra_config): -+ for key, value in extra_config.items(): - option = vim.option.OptionValue(key=key, value=value) - config_spec.extraConfig.append(option) - - if annotation: -- config_spec.annotation = six.text_type(annotation) -+ config_spec.annotation = str(annotation) - - if "clonefrom" in vm_: - clone_spec = handle_snapshot(config_spec, object_ref, reloc_spec, template, vm_) -@@ -3137,7 +3125,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "requesting instance", -- "salt/cloud/{0}/requesting".format(vm_["name"]), -+ "salt/cloud/{}/requesting".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "requesting", event_kwargs, list(event_kwargs) - ), -@@ -3190,7 +3178,7 @@ def create(vm_): - task = folder_ref.CreateVM_Task(config_spec, resourcepool_ref) - salt.utils.vmware.wait_for_task(task, vm_name, "create", 15, "info") - except Exception as exc: # pylint: disable=broad-except -- err_msg = "Error creating {0}: {1}".format(vm_["name"], exc) -+ err_msg = "Error creating {}: {}".format(vm_["name"], exc) - log.error( - err_msg, - # Show the traceback if the debug logging level is enabled -@@ -3235,7 +3223,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "created instance", -- "salt/cloud/{0}/created".format(vm_["name"]), -+ "salt/cloud/{}/created".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "created", vm_, ["name", "profile", "provider", "driver"] - ), -@@ -3267,7 +3255,7 @@ def handle_snapshot(config_spec, object_ref, reloc_spec, template, vm_): - raise SaltCloudSystemExit( - "Invalid disk move type specified" - " supported types are" -- " {0}".format(" ".join(allowed_types)) -+ " {}".format(" ".join(allowed_types)) - ) - return clone_spec - -@@ -3470,7 +3458,7 @@ def rescan_hba(kwargs=None, call=None): - if hba: - log.info("Rescanning HBA %s on host %s", hba, host_name) - host_ref.configManager.storageSystem.RescanHba(hba) -- ret = "rescanned HBA {0}".format(hba) -+ ret = "rescanned HBA {}".format(hba) - else: - log.info("Rescanning all HBAs on host %s", host_name) - host_ref.configManager.storageSystem.RescanAllHba() -@@ -3749,7 +3737,7 @@ def list_hbas(kwargs=None, call=None): - - if hba_type and hba_type not in ["parallel", "block", "iscsi", "fibre"]: - raise SaltCloudSystemExit( -- "Specified hba type {0} currently not supported.".format(hba_type) -+ "Specified hba type {} currently not supported.".format(hba_type) - ) - - host_list = salt.utils.vmware.get_mors_with_properties( -@@ -4124,10 +4112,10 @@ def revert_to_snapshot(name, kwargs=None, call=None): - task = vm_ref.RevertToCurrentSnapshot(suppressPowerOn=suppress_power_on) - else: - log.debug("Reverting VM %s to snapshot %s", name, snapshot_name) -- msg = "reverted to snapshot {0}".format(snapshot_name) -+ msg = "reverted to snapshot {}".format(snapshot_name) - snapshot_ref = _get_snapshot_ref_by_name(vm_ref, snapshot_name) - if snapshot_ref is None: -- return "specified snapshot '{0}' does not exist".format(snapshot_name) -+ return "specified snapshot '{}' does not exist".format(snapshot_name) - task = snapshot_ref.snapshot.Revert(suppressPowerOn=suppress_power_on) - - salt.utils.vmware.wait_for_task(task, name, "revert to snapshot", 5, "info") -@@ -4265,7 +4253,7 @@ def convert_to_template(name, kwargs=None, call=None): - vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) - - if vm_ref.config.template: -- raise SaltCloudSystemExit("{0} already a template".format(name)) -+ raise SaltCloudSystemExit("{} already a template".format(name)) - - try: - vm_ref.MarkAsTemplate() -@@ -4279,7 +4267,7 @@ def convert_to_template(name, kwargs=None, call=None): - ) - return "failed to convert to teamplate" - -- return "{0} converted to template".format(name) -+ return "{} converted to template".format(name) - - - def add_host(kwargs=None, call=None): -@@ -4399,7 +4387,7 @@ def add_host(kwargs=None, call=None): - ("echo", "-n"), stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - p2 = subprocess.Popen( -- ("openssl", "s_client", "-connect", "{0}:443".format(host_name)), -+ ("openssl", "s_client", "-connect", "{}:443".format(host_name)), - stdin=p1.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, -@@ -4429,12 +4417,12 @@ def add_host(kwargs=None, call=None): - try: - if cluster_name: - task = cluster_ref.AddHost(spec=spec, asConnected=True) -- ret = "added host system to cluster {0}".format(cluster_name) -+ ret = "added host system to cluster {}".format(cluster_name) - if datacenter_name: - task = datacenter_ref.hostFolder.AddStandaloneHost( - spec=spec, addConnected=True - ) -- ret = "added host system to datacenter {0}".format(datacenter_name) -+ ret = "added host system to datacenter {}".format(datacenter_name) - salt.utils.vmware.wait_for_task(task, host_name, "add host system", 5, "info") - except Exception as exc: # pylint: disable=broad-except - if isinstance(exc, vim.fault.SSLVerifyFault): -diff --git a/salt/config/schemas/vcenter.py b/salt/config/schemas/vcenter.py -index 7db8b67c41..bd82bd1761 100644 ---- a/salt/config/schemas/vcenter.py -+++ b/salt/config/schemas/vcenter.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - :codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)` - :codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` -@@ -9,11 +8,8 @@ - VCenter configuration schemas - """ - --# Import Python libs --from __future__ import absolute_import, print_function, unicode_literals - --# Import Salt libs --from salt.utils.schema import ArrayItem, IntegerItem, Schema, StringItem -+from salt.utils.schema import ArrayItem, BooleanItem, IntegerItem, Schema, StringItem - - - class VCenterEntitySchema(Schema): -@@ -48,6 +44,8 @@ class VCenterProxySchema(Schema): - mechanism = StringItem(required=True, enum=["userpass", "sspi"]) - username = StringItem() - passwords = ArrayItem(min_items=1, items=StringItem(), unique_items=True) -+ verify_ssl = BooleanItem() -+ ca_bundle = StringItem() - - domain = StringItem() - principal = StringItem(default="host") -diff --git a/salt/master.py b/salt/master.py -index 59bb19ce75..fc103ac489 100644 ---- a/salt/master.py -+++ b/salt/master.py -@@ -2126,7 +2126,7 @@ class ClearFuncs(TransportMethods): - fun = clear_load.pop("fun") - runner_client = salt.runner.RunnerClient(self.opts) - return runner_client.asynchronous( -- fun, clear_load.get("kwarg", {}), username -+ fun, clear_load.get("kwarg", {}), username, local=True - ) - except Exception as exc: # pylint: disable=broad-except - log.error("Exception occurred while introspecting %s: %s", fun, exc) -diff --git a/salt/modules/bigip.py b/salt/modules/bigip.py -index 2b54e4d27c..36168d66b4 100644 ---- a/salt/modules/bigip.py -+++ b/salt/modules/bigip.py -@@ -1,21 +1,14 @@ --# -*- coding: utf-8 -*- - """ - An execution module which can manipulate an f5 bigip via iControl REST - :maturity: develop - :platform: f5_bigip_11.6 - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - --# Import salt libs - import salt.exceptions - import salt.utils.json -- --# Import 3rd-party libs - from salt.ext import six - --# Import third party libs - try: - import requests - import requests.exceptions -@@ -52,7 +45,7 @@ def _build_session(username, password, trans_label=None): - - bigip = requests.session() - bigip.auth = (username, password) -- bigip.verify = False -+ bigip.verify = True - bigip.headers.update({"Content-Type": "application/json"}) - - if trans_label: -@@ -109,7 +102,7 @@ def _loop_payload(params): - payload = {} - - # set the payload -- for param, value in six.iteritems(params): -+ for param, value in params.items(): - if value is not None: - payload[param] = value - -@@ -153,7 +146,7 @@ def _determine_toggles(payload, toggles): - Figure out what it likes to hear without confusing the user. - """ - -- for toggle, definition in six.iteritems(toggles): -+ for toggle, definition in toggles.items(): - # did the user specify anything? - if definition["value"] is not None: - # test for yes_no toggle -@@ -1046,7 +1039,7 @@ def replace_pool_members(hostname, username, password, name, members): - # specify members if provided - if members is not None: - -- if isinstance(members, six.string_types): -+ if isinstance(members, str): - members = members.split(",") - - pool_members = [] -@@ -1583,7 +1576,7 @@ def create_virtual( - payload["vlans"] = "none" - elif vlans == "default": - payload["vlans"] = "default" -- elif isinstance(vlans, six.string_types) and ( -+ elif isinstance(vlans, str) and ( - vlans.startswith("enabled") or vlans.startswith("disabled") - ): - try: -@@ -2016,7 +2009,7 @@ def create_monitor(hostname, username, password, monitor_type, name, **kwargs): - - # there's a ton of different monitors and a ton of options for each type of monitor. - # this logic relies that the end user knows which options are meant for which monitor types -- for key, value in six.iteritems(kwargs): -+ for key, value in kwargs.items(): - if not key.startswith("__"): - if key not in ["hostname", "username", "password", "type"]: - key = key.replace("_", "-") -@@ -2067,7 +2060,7 @@ def modify_monitor(hostname, username, password, monitor_type, name, **kwargs): - - # there's a ton of different monitors and a ton of options for each type of monitor. - # this logic relies that the end user knows which options are meant for which monitor types -- for key, value in six.iteritems(kwargs): -+ for key, value in kwargs.items(): - if not key.startswith("__"): - if key not in ["hostname", "username", "password", "type", "name"]: - key = key.replace("_", "-") -@@ -2231,7 +2224,7 @@ def create_profile(hostname, username, password, profile_type, name, **kwargs): - - # there's a ton of different profiles and a ton of options for each type of profile. - # this logic relies that the end user knows which options are meant for which profile types -- for key, value in six.iteritems(kwargs): -+ for key, value in kwargs.items(): - if not key.startswith("__"): - if key not in ["hostname", "username", "password", "profile_type"]: - key = key.replace("_", "-") -@@ -2322,7 +2315,7 @@ def modify_profile(hostname, username, password, profile_type, name, **kwargs): - - # there's a ton of different profiles and a ton of options for each type of profile. - # this logic relies that the end user knows which options are meant for which profile types -- for key, value in six.iteritems(kwargs): -+ for key, value in kwargs.items(): - if not key.startswith("__"): - if key not in ["hostname", "username", "password", "profile_type"]: - key = key.replace("_", "-") -diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py -index c8eb4d2305..bbc303c3f8 100644 ---- a/salt/modules/cmdmod.py -+++ b/salt/modules/cmdmod.py -@@ -77,6 +77,12 @@ def __virtual__(): - return __virtualname__ - - -+def _log_cmd(cmd): -+ if not isinstance(cmd, list): -+ return cmd.split()[0].strip() -+ return cmd[0].strip() -+ -+ - def _check_cb(cb_): - """ - If the callback is None or is not callable, return a lambda that returns -@@ -386,22 +392,13 @@ def _run( - ) - env[bad_env_key] = "" - -- def _get_stripped(cmd): -- # Return stripped command string copies to improve logging. -- if isinstance(cmd, list): -- return [x.strip() if isinstance(x, str) else x for x in cmd] -- elif isinstance(cmd, str): -- return cmd.strip() -- else: -- return cmd -- - if output_loglevel is not None: - # Always log the shell commands at INFO unless quiet logging is - # requested. The command output is what will be controlled by the - # 'loglevel' parameter. - msg = "Executing command {}{}{} {}{}in directory '{}'{}".format( - "'" if not isinstance(cmd, list) else "", -- _get_stripped(cmd), -+ _log_cmd(cmd), - "'" if not isinstance(cmd, list) else "", - "as user '{}' ".format(runas) if runas else "", - "in group '{}' ".format(group) if group else "", -@@ -723,7 +720,7 @@ def _run( - log.error( - "Failed to decode stdout from command %s, non-decodable " - "characters have been replaced", -- cmd, -+ _log_cmd(cmd), - ) - - try: -@@ -741,7 +738,7 @@ def _run( - log.error( - "Failed to decode stderr from command %s, non-decodable " - "characters have been replaced", -- cmd, -+ _log_cmd(cmd), - ) - - if rstrip: -@@ -841,7 +838,9 @@ def _run( - if not ignore_retcode and ret["retcode"] != 0: - if output_loglevel < LOG_LEVELS["error"]: - output_loglevel = LOG_LEVELS["error"] -- msg = "Command '{}' failed with return code: {}".format(cmd, ret["retcode"]) -+ msg = "Command '{}' failed with return code: {}".format( -+ _log_cmd(cmd), ret["retcode"] -+ ) - log.error(log_callback(msg)) - if ret["stdout"]: - log.log(output_loglevel, "stdout: %s", log_callback(ret["stdout"])) -@@ -1211,7 +1210,9 @@ def run( - if not ignore_retcode and ret["retcode"] != 0: - if lvl < LOG_LEVELS["error"]: - lvl = LOG_LEVELS["error"] -- msg = "Command '{}' failed with return code: {}".format(cmd, ret["retcode"]) -+ msg = "Command '{}' failed with return code: {}".format( -+ _log_cmd(cmd), ret["retcode"] -+ ) - log.error(log_callback(msg)) - if raise_err: - raise CommandExecutionError( -diff --git a/salt/modules/glassfish.py b/salt/modules/glassfish.py -index 44df1d3cbb..59a171d2e6 100644 ---- a/salt/modules/glassfish.py -+++ b/salt/modules/glassfish.py -@@ -1,10 +1,8 @@ --# -*- coding: utf-8 -*- - """ - Module for working with the Glassfish/Payara 4.x management API - .. versionadded:: Carbon - :depends: requests - """ --from __future__ import absolute_import, print_function, unicode_literals - - import salt.defaults.exitcodes - import salt.utils.json -@@ -42,7 +40,7 @@ def __virtual__(): - else: - return ( - False, -- 'The "{0}" module could not be loaded: ' -+ 'The "{}" module could not be loaded: ' - '"requests" is not installed.'.format(__virtualname__), - ) - -@@ -73,9 +71,9 @@ def _get_url(ssl, url, port, path): - Returns the URL of the endpoint - """ - if ssl: -- return "https://{0}:{1}/management/domain/{2}".format(url, port, path) -+ return "https://{}:{}/management/domain/{}".format(url, port, path) - else: -- return "http://{0}:{1}/management/domain/{2}".format(url, port, path) -+ return "http://{}:{}/management/domain/{}".format(url, port, path) - - - def _get_server(server): -@@ -128,7 +126,7 @@ def _api_get(path, server=None): - url=_get_url(server["ssl"], server["url"], server["port"], path), - auth=_get_auth(server["user"], server["password"]), - headers=_get_headers(), -- verify=False, -+ verify=True, - ) - return _api_response(response) - -@@ -143,7 +141,7 @@ def _api_post(path, data, server=None): - auth=_get_auth(server["user"], server["password"]), - headers=_get_headers(), - data=salt.utils.json.dumps(data), -- verify=False, -+ verify=True, - ) - return _api_response(response) - -@@ -158,7 +156,7 @@ def _api_delete(path, data, server=None): - auth=_get_auth(server["user"], server["password"]), - headers=_get_headers(), - params=data, -- verify=False, -+ verify=True, - ) - return _api_response(response) - -@@ -183,7 +181,7 @@ def _get_element_properties(name, element_type, server=None): - Get an element's properties - """ - properties = {} -- data = _api_get("{0}/{1}/property".format(element_type, name), server) -+ data = _api_get("{}/{}/property".format(element_type, name), server) - - # Get properties into a dict - if any(data["extraProperties"]["properties"]): -@@ -199,7 +197,7 @@ def _get_element(name, element_type, server=None, with_properties=True): - """ - element = {} - name = quote(name, safe="") -- data = _api_get("{0}/{1}".format(element_type, name), server) -+ data = _api_get("{}/{}".format(element_type, name), server) - - # Format data, get properties if asked, and return the whole thing - if any(data["extraProperties"]["entity"]): -@@ -220,9 +218,9 @@ def _create_element(name, element_type, data, server=None): - data["property"] = "" - for key, value in data["properties"].items(): - if not data["property"]: -- data["property"] += "{0}={1}".format(key, value.replace(":", "\\:")) -+ data["property"] += "{}={}".format(key, value.replace(":", "\\:")) - else: -- data["property"] += ":{0}={1}".format(key, value.replace(":", "\\:")) -+ data["property"] += ":{}={}".format(key, value.replace(":", "\\:")) - del data["properties"] - - # Send request -@@ -242,7 +240,7 @@ def _update_element(name, element_type, data, server=None): - properties = [] - for key, value in data["properties"].items(): - properties.append({"name": key, "value": value}) -- _api_post("{0}/{1}/property".format(element_type, name), properties, server) -+ _api_post("{}/{}/property".format(element_type, name), properties, server) - del data["properties"] - - # If the element only contained properties -@@ -255,10 +253,10 @@ def _update_element(name, element_type, data, server=None): - update_data.update(data) - else: - __context__["retcode"] = salt.defaults.exitcodes.SALT_BUILD_FAIL -- raise CommandExecutionError("Cannot update {0}".format(name)) -+ raise CommandExecutionError("Cannot update {}".format(name)) - - # Finally, update the element -- _api_post("{0}/{1}".format(element_type, name), _clean_data(update_data), server) -+ _api_post("{}/{}".format(element_type, name), _clean_data(update_data), server) - return unquote(name) - - -@@ -266,7 +264,7 @@ def _delete_element(name, element_type, data, server=None): - """ - Delete an element - """ -- _api_delete("{0}/{1}".format(element_type, quote(name, safe="")), data, server) -+ _api_delete("{}/{}".format(element_type, quote(name, safe="")), data, server) - return name - - -@@ -692,4 +690,4 @@ def delete_system_properties(name, server=None): - """ - Delete a system property - """ -- _api_delete("system-properties/{0}".format(name), None, server) -+ _api_delete("system-properties/{}".format(name), None, server) -diff --git a/salt/modules/keystone.py b/salt/modules/keystone.py -index 52cb461339..e8dd2fd99d 100644 ---- a/salt/modules/keystone.py -+++ b/salt/modules/keystone.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Module for handling openstack keystone calls. - -@@ -13,6 +12,7 @@ Module for handling openstack keystone calls. - keystone.tenant: admin - keystone.tenant_id: f80919baedab48ec8931f200c65a50df - keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' -+ keystone.verify_ssl: True - - OR (for token based authentication) - -@@ -32,6 +32,7 @@ Module for handling openstack keystone calls. - keystone.tenant: admin - keystone.tenant_id: f80919baedab48ec8931f200c65a50df - keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' -+ keystone.verify_ssl: True - - openstack2: - keystone.user: admin -@@ -39,6 +40,7 @@ Module for handling openstack keystone calls. - keystone.tenant: admin - keystone.tenant_id: f80919baedab48ec8931f200c65a50df - keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' -+ keystone.verify_ssl: True - - With this configuration in place, any of the keystone functions can make use - of a configuration profile by declaring it explicitly. -@@ -49,17 +51,11 @@ Module for handling openstack keystone calls. - salt '*' keystone.tenant_list profile=openstack1 - """ - --# Import Python libs --from __future__ import absolute_import, print_function, unicode_literals - - import logging - --# Import Salt Libs - import salt.utils.http - --# Import 3rd-party libs --from salt.ext import six -- - HAS_KEYSTONE = False - try: - # pylint: disable=import-error -@@ -125,6 +121,7 @@ def _get_kwargs(profile=None, **connection_args): - endpoint = get("endpoint", "http://127.0.0.1:35357/v2.0") - user_domain_name = get("user_domain_name", "Default") - project_domain_name = get("project_domain_name", "Default") -+ verify_ssl = get("verify_ssl", True) - if token: - kwargs = {"token": token, "endpoint": endpoint} - else: -@@ -141,6 +138,7 @@ def _get_kwargs(profile=None, **connection_args): - # this ensures it's only passed in when defined - if insecure: - kwargs["insecure"] = True -+ kwargs["verify_ssl"] = verify_ssl - return kwargs - - -@@ -158,7 +156,7 @@ def api_version(profile=None, **connection_args): - auth_url = kwargs.get("auth_url", kwargs.get("endpoint", None)) - try: - return salt.utils.http.query( -- auth_url, decode=True, decode_type="json", verify_ssl=False -+ auth_url, decode=True, decode_type="json", verify_ssl=kwargs["verify_ssl"] - )["dict"]["version"]["id"] - except KeyError: - return None -@@ -269,7 +267,7 @@ def ec2_credentials_delete( - if not user_id: - return {"Error": "Could not resolve User ID"} - kstone.ec2.delete(user_id, access_key) -- return 'ec2 key "{0}" deleted under user id "{1}"'.format(access_key, user_id) -+ return 'ec2 key "{}" deleted under user id "{}"'.format(access_key, user_id) - - - def ec2_credentials_get( -@@ -373,7 +371,7 @@ def endpoint_get(service, region=None, profile=None, interface=None, **connectio - ] - if len(e) > 1: - return { -- "Error": "Multiple endpoints found ({0}) for the {1} service. Please specify region.".format( -+ "Error": "Multiple endpoints found ({}) for the {} service. Please specify region.".format( - e, service - ) - } -@@ -396,12 +394,12 @@ def endpoint_list(profile=None, **connection_args): - ret = {} - - for endpoint in kstone.endpoints.list(): -- ret[endpoint.id] = dict( -- (value, getattr(endpoint, value)) -+ ret[endpoint.id] = { -+ value: getattr(endpoint, value) - for value in dir(endpoint) - if not value.startswith("_") -- and isinstance(getattr(endpoint, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(endpoint, value), (str, dict, bool)) -+ } - return ret - - -@@ -487,7 +485,7 @@ def role_create(name, profile=None, **connection_args): - - kstone = auth(profile, **connection_args) - if "Error" not in role_get(name=name, profile=profile, **connection_args): -- return {"Error": 'Role "{0}" already exists'.format(name)} -+ return {"Error": 'Role "{}" already exists'.format(name)} - kstone.roles.create(name) - return role_get(name=name, profile=profile, **connection_args) - -@@ -518,9 +516,9 @@ def role_delete(role_id=None, name=None, profile=None, **connection_args): - role = kstone.roles.get(role_id) - kstone.roles.delete(role) - -- ret = "Role ID {0} deleted".format(role_id) -+ ret = "Role ID {} deleted".format(role_id) - if name: -- ret += " ({0})".format(name) -+ ret += " ({})".format(name) - return ret - - -@@ -564,12 +562,12 @@ def role_list(profile=None, **connection_args): - kstone = auth(profile, **connection_args) - ret = {} - for role in kstone.roles.list(): -- ret[role.name] = dict( -- (value, getattr(role, value)) -+ ret[role.name] = { -+ value: getattr(role, value) - for value in dir(role) - if not value.startswith("_") -- and isinstance(getattr(role, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(role, value), (str, dict, bool)) -+ } - return ret - - -@@ -608,7 +606,7 @@ def service_delete(service_id=None, name=None, profile=None, **connection_args): - "id" - ] - kstone.services.delete(service_id) -- return 'Keystone service ID "{0}" deleted'.format(service_id) -+ return 'Keystone service ID "{}" deleted'.format(service_id) - - - def service_get(service_id=None, name=None, profile=None, **connection_args): -@@ -633,12 +631,12 @@ def service_get(service_id=None, name=None, profile=None, **connection_args): - if not service_id: - return {"Error": "Unable to resolve service id"} - service = kstone.services.get(service_id) -- ret[service.name] = dict( -- (value, getattr(service, value)) -+ ret[service.name] = { -+ value: getattr(service, value) - for value in dir(service) - if not value.startswith("_") -- and isinstance(getattr(service, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(service, value), (str, dict, bool)) -+ } - return ret - - -@@ -655,12 +653,12 @@ def service_list(profile=None, **connection_args): - kstone = auth(profile, **connection_args) - ret = {} - for service in kstone.services.list(): -- ret[service.name] = dict( -- (value, getattr(service, value)) -+ ret[service.name] = { -+ value: getattr(service, value) - for value in dir(service) - if not value.startswith("_") -- and isinstance(getattr(service, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(service, value), (str, dict, bool)) -+ } - return ret - - -@@ -741,10 +739,10 @@ def tenant_delete(tenant_id=None, name=None, profile=None, **connection_args): - if not tenant_id: - return {"Error": "Unable to resolve tenant id"} - getattr(kstone, _TENANTS, None).delete(tenant_id) -- ret = "Tenant ID {0} deleted".format(tenant_id) -+ ret = "Tenant ID {} deleted".format(tenant_id) - if name: - -- ret += " ({0})".format(name) -+ ret += " ({})".format(name) - return ret - - -@@ -805,12 +803,12 @@ def tenant_get(tenant_id=None, name=None, profile=None, **connection_args): - if not tenant_id: - return {"Error": "Unable to resolve tenant id"} - tenant = getattr(kstone, _TENANTS, None).get(tenant_id) -- ret[tenant.name] = dict( -- (value, getattr(tenant, value)) -+ ret[tenant.name] = { -+ value: getattr(tenant, value) - for value in dir(tenant) - if not value.startswith("_") -- and isinstance(getattr(tenant, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(tenant, value), (str, dict, bool)) -+ } - return ret - - -@@ -863,12 +861,12 @@ def tenant_list(profile=None, **connection_args): - ret = {} - - for tenant in getattr(kstone, _TENANTS, None).list(): -- ret[tenant.name] = dict( -- (value, getattr(tenant, value)) -+ ret[tenant.name] = { -+ value: getattr(tenant, value) - for value in dir(tenant) - if not value.startswith("_") -- and isinstance(getattr(tenant, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(tenant, value), (str, dict, bool)) -+ } - return ret - - -@@ -938,12 +936,12 @@ def tenant_update( - tenant_id, name=name, description=description, enabled=enabled - ) - -- return dict( -- (value, getattr(updated, value)) -+ return { -+ value: getattr(updated, value) - for value in dir(updated) - if not value.startswith("_") -- and isinstance(getattr(updated, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(updated, value), (str, dict, bool)) -+ } - - - def project_update( -@@ -1034,12 +1032,12 @@ def user_list(profile=None, **connection_args): - kstone = auth(profile, **connection_args) - ret = {} - for user in kstone.users.list(): -- ret[user.name] = dict( -- (value, getattr(user, value, None)) -+ ret[user.name] = { -+ value: getattr(user, value, None) - for value in dir(user) - if not value.startswith("_") -- and isinstance(getattr(user, value, None), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(user, value, None), (str, dict, bool)) -+ } - tenant_id = getattr(user, "tenantId", None) - if tenant_id: - ret[user.name]["tenant_id"] = tenant_id -@@ -1070,16 +1068,16 @@ def user_get(user_id=None, name=None, profile=None, **connection_args): - try: - user = kstone.users.get(user_id) - except keystoneclient.exceptions.NotFound: -- msg = "Could not find user '{0}'".format(user_id) -+ msg = "Could not find user '{}'".format(user_id) - log.error(msg) - return {"Error": msg} - -- ret[user.name] = dict( -- (value, getattr(user, value, None)) -+ ret[user.name] = { -+ value: getattr(user, value, None) - for value in dir(user) - if not value.startswith("_") -- and isinstance(getattr(user, value, None), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(user, value, None), (str, dict, bool)) -+ } - - tenant_id = getattr(user, "tenantId", None) - if tenant_id: -@@ -1153,10 +1151,10 @@ def user_delete(user_id=None, name=None, profile=None, **connection_args): - if not user_id: - return {"Error": "Unable to resolve user id"} - kstone.users.delete(user_id) -- ret = "User ID {0} deleted".format(user_id) -+ ret = "User ID {} deleted".format(user_id) - if name: - -- ret += " ({0})".format(name) -+ ret += " ({})".format(name) - return ret - - -@@ -1204,7 +1202,7 @@ def user_update( - if description is None: - description = getattr(user, "description", None) - else: -- description = six.text_type(description) -+ description = str(description) - - project_id = None - if project: -@@ -1235,7 +1233,7 @@ def user_update( - if tenant_id: - kstone.users.update_tenant(user_id, tenant_id) - -- ret = "Info updated for user ID {0}".format(user_id) -+ ret = "Info updated for user ID {}".format(user_id) - return ret - - -@@ -1313,9 +1311,9 @@ def user_password_update( - kstone.users.update(user=user_id, password=password) - else: - kstone.users.update_password(user=user_id, password=password) -- ret = "Password updated for user ID {0}".format(user_id) -+ ret = "Password updated for user ID {}".format(user_id) - if name: -- ret += " ({0})".format(name) -+ ret += " ({})".format(name) - return ret - - -@@ -1356,9 +1354,9 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4 - "id" - ) - else: -- user = next( -- six.iterkeys(user_get(user_id, profile=profile, **connection_args)) -- )["name"] -+ user = next(iter(user_get(user_id, profile=profile, **connection_args).keys()))[ -+ "name" -+ ] - if not user_id: - return {"Error": "Unable to resolve user id"} - -@@ -1368,7 +1366,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4 - ].get("id") - else: - tenant = next( -- six.iterkeys(tenant_get(tenant_id, profile=profile, **connection_args)) -+ iter(tenant_get(tenant_id, profile=profile, **connection_args).keys()) - )["name"] - if not tenant_id: - return {"Error": "Unable to resolve tenant/project id"} -@@ -1376,9 +1374,9 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4 - if role: - role_id = role_get(name=role, profile=profile, **connection_args)[role]["id"] - else: -- role = next( -- six.iterkeys(role_get(role_id, profile=profile, **connection_args)) -- )["name"] -+ role = next(iter(role_get(role_id, profile=profile, **connection_args).keys()))[ -+ "name" -+ ] - if not role_id: - return {"Error": "Unable to resolve role id"} - -@@ -1427,9 +1425,9 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4 - "id" - ) - else: -- user = next( -- six.iterkeys(user_get(user_id, profile=profile, **connection_args)) -- )["name"] -+ user = next(iter(user_get(user_id, profile=profile, **connection_args).keys()))[ -+ "name" -+ ] - if not user_id: - return {"Error": "Unable to resolve user id"} - -@@ -1439,7 +1437,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4 - ].get("id") - else: - tenant = next( -- six.iterkeys(tenant_get(tenant_id, profile=profile, **connection_args)) -+ iter(tenant_get(tenant_id, profile=profile, **connection_args).keys()) - )["name"] - if not tenant_id: - return {"Error": "Unable to resolve tenant/project id"} -@@ -1447,7 +1445,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4 - if role: - role_id = role_get(name=role, profile=profile, **connection_args)[role]["id"] - else: -- role = next(six.iterkeys(role_get(role_id)))["name"] -+ role = next(iter(role_get(role_id).keys()))["name"] - if not role_id: - return {"Error": "Unable to resolve role id"} - -@@ -1504,12 +1502,12 @@ tenant_id=7167a092ece84bae8cead4bf9d15bb3b - - if _OS_IDENTITY_API_VERSION > 2: - for role in kstone.roles.list(user=user_id, project=tenant_id): -- ret[role.name] = dict( -- (value, getattr(role, value)) -+ ret[role.name] = { -+ value: getattr(role, value) - for value in dir(role) - if not value.startswith("_") -- and isinstance(getattr(role, value), (six.string_types, dict, bool)) -- ) -+ and isinstance(getattr(role, value), (str, dict, bool)) -+ } - else: - for role in kstone.roles.roles_for_user(user=user_id, tenant=tenant_id): - ret[role.name] = { -diff --git a/salt/modules/restartcheck.py b/salt/modules/restartcheck.py -index 4d541da357..c996e39dc7 100644 ---- a/salt/modules/restartcheck.py -+++ b/salt/modules/restartcheck.py -@@ -11,6 +11,7 @@ https://packages.debian.org/debian-goodies) and psdel by Sam Morris. - """ - import os - import re -+import shlex - import subprocess - import sys - import time -@@ -612,7 +613,8 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, **kwargs): - for package in packages: - _check_timeout(start_time, timeout) - cmd = cmd_pkg_query + package -- paths = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) -+ cmd = shlex.split(cmd) -+ paths = subprocess.Popen(cmd, stdout=subprocess.PIPE) - - while True: - _check_timeout(start_time, timeout) -diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py -index b3de8afb64..8fc2c410f2 100644 ---- a/salt/modules/vsphere.py -+++ b/salt/modules/vsphere.py -@@ -336,7 +336,7 @@ def _get_proxy_connection_details(): - details = __salt__["esxvm.get_details"]() - else: - raise CommandExecutionError("'{}' proxy is not supported" "".format(proxytype)) -- return ( -+ proxy_details = [ - details.get("vcenter") if "vcenter" in details else details.get("host"), - details.get("username"), - details.get("password"), -@@ -345,7 +345,10 @@ def _get_proxy_connection_details(): - details.get("mechanism"), - details.get("principal"), - details.get("domain"), -- ) -+ ] -+ if "verify_ssl" in details: -+ proxy_details.append(details.get("verify_ssl")) -+ return tuple(proxy_details) - - - def supports_proxies(*proxy_types): -@@ -429,7 +432,7 @@ def gets_service_instance_via_proxy(fn): - # case 1: The call was made with enough positional - # parameters to include 'service_instance' - if not args[idx]: -- local_service_instance = salt.utils.vmware.get_service_instance( -+ local_service_instance = salt.utils.vmware.get_service_instance( # pylint: disable=no-value-for-parameter - *connection_details - ) - # Tuples are immutable, so if we want to change what -@@ -440,7 +443,7 @@ def gets_service_instance_via_proxy(fn): - # case 2: Not enough positional parameters so - # 'service_instance' must be a named parameter - if not kwargs.get("service_instance"): -- local_service_instance = salt.utils.vmware.get_service_instance( -+ local_service_instance = salt.utils.vmware.get_service_instance( # pylint: disable=no-value-for-parameter - *connection_details - ) - kwargs["service_instance"] = local_service_instance -@@ -448,7 +451,7 @@ def gets_service_instance_via_proxy(fn): - # 'service_instance' is not a paremter in the function definition - # but it will be caught by the **kwargs parameter - if not kwargs.get("service_instance"): -- local_service_instance = salt.utils.vmware.get_service_instance( -+ local_service_instance = salt.utils.vmware.get_service_instance( # pylint: disable=no-value-for-parameter - *connection_details - ) - kwargs["service_instance"] = local_service_instance -@@ -485,7 +488,9 @@ def get_service_instance_via_proxy(service_instance=None): - See note above - """ - connection_details = _get_proxy_connection_details() -- return salt.utils.vmware.get_service_instance(*connection_details) -+ return salt.utils.vmware.get_service_instance( # pylint: disable=no-value-for-parameter -+ *connection_details -+ ) - - - @depends(HAS_PYVMOMI) -@@ -1587,7 +1592,7 @@ def upload_ssh_key( - ssh_key_file=None, - protocol=None, - port=None, -- certificate_verify=False, -+ certificate_verify=None, - ): - """ - Upload an ssh key for root to an ESXi host via http PUT. -@@ -1604,7 +1609,7 @@ def upload_ssh_key( - :param protocol: defaults to https, can be http if ssl is disabled on ESXi - :param port: defaults to 443 for https - :param certificate_verify: If true require that the SSL connection present -- a valid certificate -+ a valid certificate. Default: True - :return: Dictionary with a 'status' key, True if upload is successful. - If upload is unsuccessful, 'status' key will be False and - an 'Error' key will have an informative message. -@@ -1620,6 +1625,8 @@ def upload_ssh_key( - protocol = "https" - if port is None: - port = 443 -+ if certificate_verify is None: -+ certificate_verify = True - - url = "{}://{}:{}/host/ssh_root_authorized_keys".format(protocol, host, port) - ret = {} -@@ -1662,7 +1669,7 @@ def upload_ssh_key( - - @ignores_kwargs("credstore") - def get_ssh_key( -- host, username, password, protocol=None, port=None, certificate_verify=False -+ host, username, password, protocol=None, port=None, certificate_verify=None - ): - """ - Retrieve the authorized_keys entry for root. -@@ -1674,7 +1681,7 @@ def get_ssh_key( - :param protocol: defaults to https, can be http if ssl is disabled on ESXi - :param port: defaults to 443 for https - :param certificate_verify: If true require that the SSL connection present -- a valid certificate -+ a valid certificate. Default: True - :return: True if upload is successful - - CLI Example: -@@ -1688,6 +1695,8 @@ def get_ssh_key( - protocol = "https" - if port is None: - port = 443 -+ if certificate_verify is None: -+ certificate_verify = True - - url = "{}://{}:{}/host/ssh_root_authorized_keys".format(protocol, host, port) - ret = {} -@@ -1717,7 +1726,7 @@ def get_ssh_key( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def get_host_datetime( -- host, username, password, protocol=None, port=None, host_names=None -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True - ): - """ - Get the date/time information for a given host or list of host_names. -@@ -1748,6 +1757,9 @@ def get_host_datetime( - ``host`` location instead. This is useful for when service instance connection - information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -1760,7 +1772,12 @@ def get_host_datetime( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -1775,7 +1792,9 @@ def get_host_datetime( - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def get_ntp_config(host, username, password, protocol=None, port=None, host_names=None): -+def get_ntp_config( -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True -+): - """ - Get the NTP configuration information for a given host or list of host_names. - -@@ -1805,6 +1824,9 @@ def get_ntp_config(host, username, password, protocol=None, port=None, host_name - ``host`` location instead. This is useful for when service instance connection - information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -1817,7 +1839,12 @@ def get_ntp_config(host, username, password, protocol=None, port=None, host_name - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -1832,7 +1859,14 @@ def get_ntp_config(host, username, password, protocol=None, port=None, host_name - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def get_service_policy( -- host, username, password, service_name, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ service_name, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Get the service name's policy for a given host or list of hosts. -@@ -1879,6 +1913,9 @@ def get_service_policy( - for the ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -1891,7 +1928,12 @@ def get_service_policy( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - valid_services = [ - "DCUI", -@@ -1959,7 +2001,14 @@ def get_service_policy( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def get_service_running( -- host, username, password, service_name, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ service_name, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Get the service name's running state for a given host or list of hosts. -@@ -2006,6 +2055,9 @@ def get_service_running( - for the ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2018,7 +2070,12 @@ def get_service_running( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - valid_services = [ - "DCUI", -@@ -2086,7 +2143,13 @@ def get_service_running( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def get_vmotion_enabled( -- host, username, password, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Get the VMotion enabled status for a given host or a list of host_names. Returns ``True`` -@@ -2118,6 +2181,9 @@ def get_vmotion_enabled( - ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2130,7 +2196,12 @@ def get_vmotion_enabled( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -2148,7 +2219,13 @@ def get_vmotion_enabled( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def get_vsan_enabled( -- host, username, password, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Get the VSAN enabled status for a given host or a list of host_names. Returns ``True`` -@@ -2181,6 +2258,9 @@ def get_vsan_enabled( - ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2193,7 +2273,12 @@ def get_vsan_enabled( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -2215,7 +2300,13 @@ def get_vsan_enabled( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def get_vsan_eligible_disks( -- host, username, password, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Returns a list of VSAN-eligible disks for a given host or list of host_names. -@@ -2246,6 +2337,9 @@ def get_vsan_eligible_disks( - for the ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2258,7 +2352,12 @@ def get_vsan_eligible_disks( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - response = _get_vsan_eligible_disks(service_instance, host, host_names) -@@ -2310,7 +2409,9 @@ def test_vcenter_connection(service_instance=None): - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def system_info(host, username, password, protocol=None, port=None): -+def system_info( -+ host, username, password, protocol=None, port=None, verify_ssl=True, -+): - """ - Return system information about a VMware environment. - -@@ -2331,6 +2432,9 @@ def system_info(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2338,7 +2442,12 @@ def system_info(host, username, password, protocol=None, port=None): - salt '*' vsphere.system_info 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - ret = salt.utils.vmware.get_inventory(service_instance).about.__dict__ - if "apiType" in ret: -@@ -2351,7 +2460,9 @@ def system_info(host, username, password, protocol=None, port=None): - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_datacenters(host, username, password, protocol=None, port=None): -+def list_datacenters( -+ host, username, password, protocol=None, port=None, verify_ssl=True -+): - """ - Returns a list of datacenters for the specified host. - -@@ -2372,6 +2483,9 @@ def list_datacenters(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2380,14 +2494,19 @@ def list_datacenters(host, username, password, protocol=None, port=None): - - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_datacenters(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_clusters(host, username, password, protocol=None, port=None): -+def list_clusters(host, username, password, protocol=None, port=None, verify_ssl=True): - """ - Returns a list of clusters for the specified host. - -@@ -2408,6 +2527,9 @@ def list_clusters(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2416,14 +2538,21 @@ def list_clusters(host, username, password, protocol=None, port=None): - - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_clusters(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_datastore_clusters(host, username, password, protocol=None, port=None): -+def list_datastore_clusters( -+ host, username, password, protocol=None, port=None, verify_ssl=True -+): - """ - Returns a list of datastore clusters for the specified host. - -@@ -2444,6 +2573,9 @@ def list_datastore_clusters(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2451,14 +2583,21 @@ def list_datastore_clusters(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_datastore_clusters 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_datastore_clusters(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_datastores(host, username, password, protocol=None, port=None): -+def list_datastores( -+ host, username, password, protocol=None, port=None, verify_ssl=True -+): - """ - Returns a list of datastores for the specified host. - -@@ -2479,6 +2618,9 @@ def list_datastores(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2486,14 +2628,19 @@ def list_datastores(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_datastores 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_datastores(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_hosts(host, username, password, protocol=None, port=None): -+def list_hosts(host, username, password, protocol=None, port=None, verify_ssl=True): - """ - Returns a list of hosts for the specified VMware environment. - -@@ -2514,6 +2661,9 @@ def list_hosts(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2521,14 +2671,21 @@ def list_hosts(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_hosts 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_hosts(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_resourcepools(host, username, password, protocol=None, port=None): -+def list_resourcepools( -+ host, username, password, protocol=None, port=None, verify_ssl=True -+): - """ - Returns a list of resource pools for the specified host. - -@@ -2549,6 +2706,9 @@ def list_resourcepools(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2556,14 +2716,19 @@ def list_resourcepools(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_resourcepools 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_resourcepools(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_networks(host, username, password, protocol=None, port=None): -+def list_networks(host, username, password, protocol=None, port=None, verify_ssl=True): - """ - Returns a list of networks for the specified host. - -@@ -2584,6 +2749,9 @@ def list_networks(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2591,14 +2759,19 @@ def list_networks(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_networks 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_networks(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_vms(host, username, password, protocol=None, port=None): -+def list_vms(host, username, password, protocol=None, port=None, verify_ssl=True): - """ - Returns a list of VMs for the specified host. - -@@ -2619,6 +2792,9 @@ def list_vms(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2626,14 +2802,19 @@ def list_vms(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_vms 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_vms(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_folders(host, username, password, protocol=None, port=None): -+def list_folders(host, username, password, protocol=None, port=None, verify_ssl=True): - """ - Returns a list of folders for the specified host. - -@@ -2654,6 +2835,9 @@ def list_folders(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2661,14 +2845,19 @@ def list_folders(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_folders 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_folders(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_dvs(host, username, password, protocol=None, port=None): -+def list_dvs(host, username, password, protocol=None, port=None, verify_ssl=True): - """ - Returns a list of distributed virtual switches for the specified host. - -@@ -2689,6 +2878,9 @@ def list_dvs(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2696,14 +2888,19 @@ def list_dvs(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_dvs 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_dvs(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_vapps(host, username, password, protocol=None, port=None): -+def list_vapps(host, username, password, protocol=None, port=None, verify_ssl=True): - """ - Returns a list of vApps for the specified host. - -@@ -2724,6 +2921,9 @@ def list_vapps(host, username, password, protocol=None, port=None): - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2732,14 +2932,21 @@ def list_vapps(host, username, password, protocol=None, port=None): - salt '*' vsphere.list_vapps 1.2.3.4 root bad-password - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - return salt.utils.vmware.list_vapps(service_instance) - - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_ssds(host, username, password, protocol=None, port=None, host_names=None): -+def list_ssds( -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True -+): - """ - Returns a list of SSDs for the given host or list of host_names. - -@@ -2769,6 +2976,9 @@ def list_ssds(host, username, password, protocol=None, port=None, host_names=Non - ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2781,7 +2991,12 @@ def list_ssds(host, username, password, protocol=None, port=None, host_names=Non - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -2798,7 +3013,9 @@ def list_ssds(host, username, password, protocol=None, port=None, host_names=Non - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def list_non_ssds(host, username, password, protocol=None, port=None, host_names=None): -+def list_non_ssds( -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True -+): - """ - Returns a list of Non-SSD disks for the given host or list of host_names. - -@@ -2835,6 +3052,9 @@ def list_non_ssds(host, username, password, protocol=None, port=None, host_names - ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2847,7 +3067,12 @@ def list_non_ssds(host, username, password, protocol=None, port=None, host_names - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -2865,7 +3090,14 @@ def list_non_ssds(host, username, password, protocol=None, port=None, host_names - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def set_ntp_config( -- host, username, password, ntp_servers, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ ntp_servers, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Set NTP configuration for a given host of list of host_names. -@@ -2900,6 +3132,9 @@ def set_ntp_config( - ``host`` location instead. This is useful for when service instance connection - information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -2912,7 +3147,12 @@ def set_ntp_config( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - if not isinstance(ntp_servers, list): - raise CommandExecutionError("'ntp_servers' must be a list.") -@@ -2947,7 +3187,14 @@ def set_ntp_config( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def service_start( -- host, username, password, service_name, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ service_name, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Start the named service for the given host or list of hosts. -@@ -2994,6 +3241,9 @@ def service_start( - location instead. This is useful for when service instance connection information - is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3006,7 +3256,12 @@ def service_start( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - valid_services = [ -@@ -3074,7 +3329,14 @@ def service_start( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def service_stop( -- host, username, password, service_name, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ service_name, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Stop the named service for the given host or list of hosts. -@@ -3121,6 +3383,9 @@ def service_stop( - location instead. This is useful for when service instance connection information - is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3133,7 +3398,12 @@ def service_stop( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - valid_services = [ -@@ -3199,7 +3469,14 @@ def service_stop( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def service_restart( -- host, username, password, service_name, protocol=None, port=None, host_names=None -+ host, -+ username, -+ password, -+ service_name, -+ protocol=None, -+ port=None, -+ host_names=None, -+ verify_ssl=True, - ): - """ - Restart the named service for the given host or list of hosts. -@@ -3246,6 +3523,9 @@ def service_restart( - location instead. This is useful for when service instance connection information - is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3258,7 +3538,12 @@ def service_restart( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - valid_services = [ -@@ -3334,6 +3619,7 @@ def set_service_policy( - protocol=None, - port=None, - host_names=None, -+ verify_ssl=True, - ): - """ - Set the service name's policy for a given host or list of hosts. -@@ -3383,6 +3669,9 @@ def set_service_policy( - for the ``host`` location instead. This is useful for when service instance - connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3395,7 +3684,12 @@ def set_service_policy( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - valid_services = [ -@@ -3481,7 +3775,7 @@ def set_service_policy( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def update_host_datetime( -- host, username, password, protocol=None, port=None, host_names=None -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True - ): - """ - Update the date/time on the given host or list of host_names. This function should be -@@ -3513,6 +3807,9 @@ def update_host_datetime( - location instead. This is useful for when service instance connection - information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3525,7 +3822,12 @@ def update_host_datetime( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -3550,7 +3852,7 @@ def update_host_datetime( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def update_host_password( -- host, username, password, new_password, protocol=None, port=None -+ host, username, password, new_password, protocol=None, port=None, verify_ssl=True - ): - """ - Update the password for a given host. -@@ -3577,6 +3879,9 @@ def update_host_password( - Optionally set to alternate port if the host is not using the default - port. Default port is ``443``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3585,7 +3890,12 @@ def update_host_password( - - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - # Get LocalAccountManager object - account_manager = salt.utils.vmware.get_inventory(service_instance).accountManager -@@ -3615,7 +3925,7 @@ def update_host_password( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def vmotion_disable( -- host, username, password, protocol=None, port=None, host_names=None -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True - ): - """ - Disable vMotion for a given host or list of host_names. -@@ -3646,6 +3956,9 @@ def vmotion_disable( - location instead. This is useful for when service instance connection - information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3658,7 +3971,12 @@ def vmotion_disable( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -3683,7 +4001,14 @@ def vmotion_disable( - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") - def vmotion_enable( -- host, username, password, protocol=None, port=None, host_names=None, device="vmk0" -+ host, -+ username, -+ password, -+ protocol=None, -+ port=None, -+ host_names=None, -+ device="vmk0", -+ verify_ssl=True, - ): - """ - Enable vMotion for a given host or list of host_names. -@@ -3718,6 +4043,9 @@ def vmotion_enable( - The device that uniquely identifies the VirtualNic that will be used for - VMotion for each host. Defaults to ``vmk0``. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3730,7 +4058,12 @@ def vmotion_enable( - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - ret = {} -@@ -3754,7 +4087,9 @@ def vmotion_enable( - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def vsan_add_disks(host, username, password, protocol=None, port=None, host_names=None): -+def vsan_add_disks( -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True -+): - """ - Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names. - -@@ -3785,6 +4120,9 @@ def vsan_add_disks(host, username, password, protocol=None, port=None, host_name - VSAN system for the ``host`` location instead. This is useful for when service - instance connection information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3797,7 +4135,12 @@ def vsan_add_disks(host, username, password, protocol=None, port=None, host_name - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - host_names = _check_hosts(service_instance, host, host_names) - response = _get_vsan_eligible_disks(service_instance, host, host_names) -@@ -3872,7 +4215,9 @@ def vsan_add_disks(host, username, password, protocol=None, port=None, host_name - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def vsan_disable(host, username, password, protocol=None, port=None, host_names=None): -+def vsan_disable( -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True -+): - """ - Disable VSAN for a given host or list of host_names. - -@@ -3902,6 +4247,9 @@ def vsan_disable(host, username, password, protocol=None, port=None, host_names= - location instead. This is useful for when service instance connection - information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -3914,7 +4262,12 @@ def vsan_disable(host, username, password, protocol=None, port=None, host_names= - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - # Create a VSAN Configuration Object and set the enabled attribute to True - vsan_config = vim.vsan.host.ConfigInfo() -@@ -3961,7 +4314,9 @@ def vsan_disable(host, username, password, protocol=None, port=None, host_names= - - @depends(HAS_PYVMOMI) - @ignores_kwargs("credstore") --def vsan_enable(host, username, password, protocol=None, port=None, host_names=None): -+def vsan_enable( -+ host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True -+): - """ - Enable VSAN for a given host or list of host_names. - -@@ -3991,6 +4346,9 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N - location instead. This is useful for when service instance connection - information is used for a single ESXi host. - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -4003,7 +4361,12 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N - host_names='[esxi-1.host.com, esxi-2.host.com]' - """ - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - # Create a VSAN Configuration Object and set the enabled attribute to True - vsan_config = vim.vsan.host.ConfigInfo() -@@ -7489,6 +7852,7 @@ def add_host_to_dvs( - protocol=None, - port=None, - host_names=None, -+ verify_ssl=True, - ): - """ - Adds an ESXi host to a vSphere Distributed Virtual Switch and migrates -@@ -7531,6 +7895,9 @@ def add_host_to_dvs( - host_names: - An array of VMware host names to migrate - -+ verify_ssl -+ Verify the SSL certificate. Default: True -+ - CLI Example: - - .. code-block:: bash -@@ -7658,7 +8025,12 @@ def add_host_to_dvs( - ret["success"] = True - ret["message"] = [] - service_instance = salt.utils.vmware.get_service_instance( -- host=host, username=username, password=password, protocol=protocol, port=port -+ host=host, -+ username=username, -+ password=password, -+ protocol=protocol, -+ port=port, -+ verify_ssl=verify_ssl, - ) - dvs = salt.utils.vmware._get_dvs(service_instance, dvs_name) - if not dvs: -@@ -9926,7 +10298,7 @@ def _delete_device(device): - return device_spec - - --def _get_client(server, username, password): -+def _get_client(server, username, password, verify_ssl=None, ca_bundle=None): - """ - Establish client through proxy or with user provided credentials. - -@@ -9936,12 +10308,17 @@ def _get_client(server, username, password): - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :returns: - vSphere Client instance. - :rtype: - vSphere.Client - """ - # Get salted vSphere Client -+ details = None - if not (server and username and password): - # User didn't provide CLI args so use proxy information - details = __salt__["vcenter.get_details"]() -@@ -9949,9 +10326,32 @@ def _get_client(server, username, password): - username = details["username"] - password = details["password"] - -+ if verify_ssl is None: -+ if details is None: -+ details = __salt__["vcenter.get_details"]() -+ verify_ssl = details.get("verify_ssl", True) -+ if verify_ssl is None: -+ verify_ssl = True -+ -+ if ca_bundle is None: -+ if details is None: -+ details = __salt__["vcenter.get_details"]() -+ ca_bundle = details.get("ca_bundle", None) -+ -+ if verify_ssl is False and ca_bundle is not None: -+ log.error("Cannot set verify_ssl to False and ca_bundle together") -+ return False -+ -+ if ca_bundle: -+ ca_bundle = salt.utils.http.get_ca_bundle({"ca_bundle": ca_bundle}) -+ - # Establish connection with client - client = salt.utils.vmware.get_vsphere_client( -- server=server, username=username, password=password -+ server=server, -+ username=username, -+ password=password, -+ verify_ssl=verify_ssl, -+ ca_bundle=ca_bundle, - ) - # Will return None if utility function causes Unauthenticated error - return client -@@ -9961,7 +10361,12 @@ def _get_client(server, username, password): - @supports_proxies("vcenter") - @gets_service_instance_via_proxy - def list_tag_categories( -- server=None, username=None, password=None, service_instance=None -+ server=None, -+ username=None, -+ password=None, -+ service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, - ): - """ - List existing categories a user has access to. -@@ -9978,13 +10383,19 @@ def list_tag_categories( - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :returns: - Value(s) of category_id. - :rtype: - list of str - """ - categories = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - categories = client.tagging.Category.list() -@@ -9994,7 +10405,14 @@ def list_tag_categories( - @depends(HAS_PYVMOMI, HAS_VSPHERE_SDK) - @supports_proxies("vcenter") - @gets_service_instance_via_proxy --def list_tags(server=None, username=None, password=None, service_instance=None): -+def list_tags( -+ server=None, -+ username=None, -+ password=None, -+ service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, -+): - """ - List existing tags a user has access to. - -@@ -10010,13 +10428,19 @@ def list_tags(server=None, username=None, password=None, service_instance=None): - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :return: - Value(s) of tag_id. - :rtype: - list of str - """ - tags = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - tags = client.tagging.Tag.list() -@@ -10034,6 +10458,8 @@ def attach_tag( - username=None, - password=None, - service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, - ): - """ - Attach an existing tag to an input object. -@@ -10066,6 +10492,10 @@ def attach_tag( - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :return: - The list of all tag identifiers that correspond to the - tags attached to the given object. -@@ -10077,7 +10507,9 @@ def attach_tag( - if the user can not be authenticated. - """ - tag_attached = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - # Create dynamic id object associated with a type and an id. -@@ -10110,6 +10542,8 @@ def list_attached_tags( - username=None, - password=None, - service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, - ): - """ - List existing tags a user has access to. -@@ -10132,6 +10566,10 @@ def list_attached_tags( - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :return: - The list of all tag identifiers that correspond to the - tags attached to the given object. -@@ -10143,7 +10581,9 @@ def list_attached_tags( - if the user can not be authenticated. - """ - attached_tags = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - # Create dynamic id object associated with a type and an id. -@@ -10175,6 +10615,8 @@ def create_tag_category( - username=None, - password=None, - service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, - ): - """ - Create a category with given cardinality. -@@ -10197,6 +10639,10 @@ def create_tag_category( - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :return: - Identifier of the created category. - :rtype: -@@ -10210,7 +10656,9 @@ def create_tag_category( - if you do not have the privilege to create a category. - """ - category_created = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - if cardinality == "SINGLE": -@@ -10241,7 +10689,13 @@ def create_tag_category( - @supports_proxies("vcenter") - @gets_service_instance_via_proxy - def delete_tag_category( -- category_id, server=None, username=None, password=None, service_instance=None -+ category_id, -+ server=None, -+ username=None, -+ password=None, -+ service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, - ): - """ - Delete a category. -@@ -10262,6 +10716,10 @@ def delete_tag_category( - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :raise: NotFound - if the tag for the given tag_id does not exist in the system. - :raise: Unauthorized -@@ -10270,7 +10728,9 @@ def delete_tag_category( - if the user can not be authenticated. - """ - category_deleted = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - try: -@@ -10294,6 +10754,8 @@ def create_tag( - username=None, - password=None, - service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, - ): - """ - Create a tag under a category with given description. -@@ -10316,6 +10778,10 @@ def create_tag( - Given description of tag category. - :param str category_id: - Value of category_id representative of the category created previously. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :return: - The identifier of the created tag. - :rtype: -@@ -10332,7 +10798,9 @@ def create_tag( - if you do not have the privilege to create tag. - """ - tag_created = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - create_spec = client.tagging.Tag.CreateSpec() -@@ -10353,7 +10821,13 @@ def create_tag( - @supports_proxies("vcenter") - @gets_service_instance_via_proxy - def delete_tag( -- tag_id, server=None, username=None, password=None, service_instance=None -+ tag_id, -+ server=None, -+ username=None, -+ password=None, -+ service_instance=None, -+ verify_ssl=None, -+ ca_bundle=None, - ): - """ - Delete a tag. -@@ -10374,6 +10848,10 @@ def delete_tag( - Username associated with the vCenter center. - :param basestring password: - Password associated with the vCenter center. -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - :raise: AlreadyExists - if the name provided in the create_spec is the name of an already - existing category. -@@ -10383,7 +10861,9 @@ def delete_tag( - if you do not have the privilege to create a category. - """ - tag_deleted = None -- client = _get_client(server, username, password) -+ client = _get_client( -+ server, username, password, verify_ssl=verify_ssl, ca_bundle=ca_bundle -+ ) - - if client: - try: -diff --git a/salt/modules/zenoss.py b/salt/modules/zenoss.py -index 9c6b7de7b5..5cb64bed18 100644 ---- a/salt/modules/zenoss.py -+++ b/salt/modules/zenoss.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Module for working with the Zenoss API - -@@ -16,18 +15,19 @@ Module for working with the Zenoss API - hostname: https://zenoss.example.com - username: admin - password: admin123 -+ verify_ssl: True -+ ca_bundle: /etc/ssl/certs/ca-certificates.crt - """ - - --from __future__ import absolute_import, print_function, unicode_literals -- - import logging - import re - -+import salt.utils.http - import salt.utils.json - - try: -- import requests -+ import requests # pylint: disable=unused-import - - HAS_LIBS = True - except ImportError: -@@ -53,7 +53,7 @@ def __virtual__(): - else: - return ( - False, -- "The '{0}' module could not be loaded: " -+ "The '{}' module could not be loaded: " - "'requests' is not installed.".format(__virtualname__), - ) - -@@ -79,11 +79,13 @@ def _session(): - """ - - config = __salt__["config.option"]("zenoss") -- session = requests.session() -- session.auth = (config.get("username"), config.get("password")) -- session.verify = False -- session.headers.update({"Content-type": "application/json; charset=utf-8"}) -- return session -+ return salt.utils.http.session( -+ user=config.get("username"), -+ password=config.get("password"), -+ verify_ssl=config.get("verify_ssl", True), -+ ca_bundle=config.get("ca_bundle"), -+ headers={"Content-type": "application/json; charset=utf-8"}, -+ ) - - - def _router_request(router, method, data=None): -@@ -99,7 +101,7 @@ def _router_request(router, method, data=None): - - config = __salt__["config.option"]("zenoss") - log.debug("Making request to router %s with method %s", router, method) -- url = "{0}/zport/dmd/{1}_router".format(config.get("hostname"), ROUTERS[router]) -+ url = "{}/zport/dmd/{}_router".format(config.get("hostname"), ROUTERS[router]) - response = _session().post(url, data=req_data) - - # The API returns a 200 response code even whe auth is bad. -@@ -212,7 +214,7 @@ def set_prod_state(prod_state, device=None): - device_object = find_device(device) - - if not device_object: -- return "Unable to find a device in Zenoss for {0}".format(device) -+ return "Unable to find a device in Zenoss for {}".format(device) - - log.info("Setting prodState to %d on %s device", prod_state, device) - data = dict( -diff --git a/salt/pillar/vmware_pillar.py b/salt/pillar/vmware_pillar.py -index a33b394500..08bdb18e56 100644 ---- a/salt/pillar/vmware_pillar.py -+++ b/salt/pillar/vmware_pillar.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Pillar data from vCenter or an ESXi host - -@@ -142,18 +141,12 @@ Optionally, the following keyword arguments can be passed to the ext_pillar for - part of the pillar regardless of this setting. - - """ --from __future__ import absolute_import, print_function, unicode_literals - --# Import python libs - import logging - --# Import salt libs - import salt.utils.dictupdate as dictupdate - import salt.utils.vmware - --# Import 3rd-party libs --from salt.ext import six -- - try: - # pylint: disable=no-name-in-module - from pyVmomi import vim -@@ -370,7 +363,12 @@ def ext_pillar(minion_id, pillar, **kwargs): # pylint: disable=W0613 - vmware_pillar[pillar_key] = {} - try: - _conn = salt.utils.vmware.get_service_instance( -- host, username, password, protocol, port -+ host, -+ username, -+ password, -+ protocol, -+ port, -+ verify_ssl=kwargs.get("verify_ssl", True), - ) - if _conn: - data = None -@@ -410,12 +408,10 @@ def ext_pillar(minion_id, pillar, **kwargs): # pylint: disable=W0613 - ) - except RuntimeError: - log.error( -- ( -- "A runtime error occurred in the vmware_pillar, " -- "this is likely caused by an infinite recursion in " -- "a requested attribute. Verify your requested attributes " -- "and reconfigure the pillar." -- ) -+ "A runtime error occurred in the vmware_pillar, " -+ "this is likely caused by an infinite recursion in " -+ "a requested attribute. Verify your requested attributes " -+ "and reconfigure the pillar." - ) - - return vmware_pillar -@@ -435,7 +431,7 @@ def _recurse_config_to_dict(t_data): - return t_list - elif isinstance(t_data, dict): - t_dict = {} -- for k, v in six.iteritems(t_data): -+ for k, v in t_data.items(): - t_dict[k] = _recurse_config_to_dict(v) - return t_dict - else: -diff --git a/salt/proxy/cimc.py b/salt/proxy/cimc.py -index f302eaa6cc..a6002440ef 100644 ---- a/salt/proxy/cimc.py -+++ b/salt/proxy/cimc.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Proxy Minion interface module for managing Cisco Integrated Management Controller devices - ========================================================================================= -@@ -40,6 +39,7 @@ the ID. - host: - username: - password: -+ verify_ssl: True - - proxytype - ^^^^^^^^^ -@@ -66,13 +66,10 @@ password - The password used to login to the cimc host. Required. - """ - --from __future__ import absolute_import, print_function, unicode_literals - --# Import Python Libs - import logging - import re - --# Import Salt Libs - import salt.exceptions - from salt._compat import ElementTree as ET - -@@ -102,9 +99,7 @@ def _validate_response_code(response_code_to_check, cookie_to_logout=None): - if formatted_response_code not in ["200", "201", "202", "204"]: - if cookie_to_logout: - logout(cookie_to_logout) -- log.error( -- "Received error HTTP status code: {0}".format(formatted_response_code) -- ) -+ log.error("Received error HTTP status code: {}".format(formatted_response_code)) - raise salt.exceptions.CommandExecutionError( - "Did not receive a valid response from host." - ) -@@ -125,7 +120,7 @@ def init(opts): - log.critical("No 'passwords' key found in pillar for this proxy.") - return False - -- DETAILS["url"] = "https://{0}/nuova".format(opts["proxy"]["host"]) -+ DETAILS["url"] = "https://{}/nuova".format(opts["proxy"]["host"]) - DETAILS["headers"] = { - "Content-Type": "application/x-www-form-urlencoded", - "Content-Length": 62, -@@ -136,6 +131,10 @@ def init(opts): - DETAILS["host"] = opts["proxy"]["host"] - DETAILS["username"] = opts["proxy"].get("username") - DETAILS["password"] = opts["proxy"].get("password") -+ verify_ssl = opts["proxy"].get("verify_ssl") -+ if verify_ssl is None: -+ verify_ssl = True -+ DETAILS["verify_ssl"] = verify_ssl - - # Ensure connectivity to the device - log.debug("Attempting to connect to cimc proxy host.") -@@ -158,8 +157,8 @@ def set_config_modify(dn=None, inconfig=None, hierarchical=False): - h = "true" - - payload = ( -- '' -- "{3}".format(cookie, h, dn, inconfig) -+ '' -+ "{}".format(cookie, h, dn, inconfig) - ) - r = __utils__["http.query"]( - DETAILS["url"], -@@ -167,7 +166,7 @@ def set_config_modify(dn=None, inconfig=None, hierarchical=False): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - raise_error=True, - status=True, - headers=DETAILS["headers"], -@@ -195,7 +194,7 @@ def get_config_resolver_class(cid=None, hierarchical=False): - if hierarchical is True: - h = "true" - -- payload = ''.format( -+ payload = ''.format( - cookie, h, cid - ) - r = __utils__["http.query"]( -@@ -204,7 +203,7 @@ def get_config_resolver_class(cid=None, hierarchical=False): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - raise_error=True, - status=True, - headers=DETAILS["headers"], -@@ -226,7 +225,7 @@ def logon(): - Logs into the cimc device and returns the session cookie. - """ - content = {} -- payload = "".format( -+ payload = "".format( - DETAILS["username"], DETAILS["password"] - ) - r = __utils__["http.query"]( -@@ -235,7 +234,7 @@ def logon(): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - raise_error=False, - status=True, - headers=DETAILS["headers"], -@@ -265,7 +264,7 @@ def logout(cookie=None): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - raise_error=True, - headers=DETAILS["headers"], - ) -diff --git a/salt/proxy/panos.py b/salt/proxy/panos.py -index 5c298b4f7d..50a4639911 100644 ---- a/salt/proxy/panos.py -+++ b/salt/proxy/panos.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Proxy Minion interface module for managing Palo Alto firewall devices - ===================================================================== -@@ -53,6 +52,7 @@ the device with username and password. - host: - username: - password: -+ verify_ssl: True - - proxytype - ^^^^^^^^^ -@@ -203,17 +203,12 @@ apikey - The generated XML API key for the Panorama server. Required. - """ - --from __future__ import absolute_import, print_function, unicode_literals - --# Import Python Libs - import logging - - import salt.exceptions - import salt.utils.xmlutil as xml -- --# Import Salt Libs - from salt._compat import ElementTree as ET --from salt.ext import six - - # This must be present or the Salt loader won't load this module. - __proxyenabled__ = ["panos"] -@@ -270,10 +265,11 @@ def init(opts): - log.critical("No 'passwords' key found in pillar for this proxy.") - return False - -- DETAILS["url"] = "https://{0}/api/".format(opts["proxy"]["host"]) -+ DETAILS["url"] = "https://{}/api/".format(opts["proxy"]["host"]) - - # Set configuration details - DETAILS["host"] = opts["proxy"]["host"] -+ DETAILS["verify_ssl"] = opts["proxy"].get("verify_ssl", True) - if "serial" in opts["proxy"]: - DETAILS["serial"] = opts["proxy"].get("serial") - if "apikey" in opts["proxy"]: -@@ -321,7 +317,7 @@ def call(payload=None): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - status=True, - raise_error=True, - ) -@@ -335,7 +331,7 @@ def call(payload=None): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - status=True, - raise_error=True, - ) -@@ -352,7 +348,7 @@ def call(payload=None): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - status=True, - raise_error=True, - ) -@@ -368,7 +364,7 @@ def call(payload=None): - method="POST", - decode_type="plain", - decode=True, -- verify_ssl=False, -+ verify_ssl=DETAILS["verify_ssl"], - status=True, - raise_error=True, - ) -@@ -382,21 +378,21 @@ def call(payload=None): - "Did not receive a valid response from host." - ) - -- if six.text_type(r["status"]) not in ["200", "201", "204"]: -- if six.text_type(r["status"]) == "400": -+ if str(r["status"]) not in ["200", "201", "204"]: -+ if str(r["status"]) == "400": - raise salt.exceptions.CommandExecutionError( - "The server cannot process the request due to a client error." - ) -- elif six.text_type(r["status"]) == "401": -+ elif str(r["status"]) == "401": - raise salt.exceptions.CommandExecutionError( - "The server cannot process the request because it lacks valid authentication " - "credentials for the target resource." - ) -- elif six.text_type(r["status"]) == "403": -+ elif str(r["status"]) == "403": - raise salt.exceptions.CommandExecutionError( - "The server refused to authorize the request." - ) -- elif six.text_type(r["status"]) == "404": -+ elif str(r["status"]) == "404": - raise salt.exceptions.CommandExecutionError( - "The requested resource could not be found." - ) -diff --git a/salt/proxy/vcenter.py b/salt/proxy/vcenter.py -index fa1d090bd2..4bbdb0ee66 100644 ---- a/salt/proxy/vcenter.py -+++ b/salt/proxy/vcenter.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Proxy Minion interface module for managing VMWare vCenters. - -@@ -182,13 +181,10 @@ and that host would reach out over the network and communicate with the ESXi - host. - """ - --# Import Python Libs --from __future__ import absolute_import, print_function, unicode_literals - - import logging - import os - --# Import Salt Libs - import salt.exceptions - from salt.config.schemas.vcenter import VCenterProxySchema - from salt.utils.dictupdate import merge -@@ -277,6 +273,8 @@ def init(opts): - # Save optional - DETAILS["protocol"] = proxy_conf.get("protocol") - DETAILS["port"] = proxy_conf.get("port") -+ DETAILS["verify_ssl"] = proxy_conf.get("verify_ssl") -+ DETAILS["ca_bundle"] = proxy_conf.get("ca_bundle") - - # Test connection - if DETAILS["mechanism"] == "userpass": -diff --git a/salt/returners/splunk.py b/salt/returners/splunk.py -index 509eab3cf7..fe4194485e 100644 ---- a/salt/returners/splunk.py -+++ b/salt/returners/splunk.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - - Send json response data to Splunk via the HTTP Event Collector -@@ -11,29 +10,23 @@ Requires the following config values to be specified in config or pillar: - indexer: - sourcetype: - index: -+ verify_ssl: true - - Run a test by using ``salt-call test.ping --return splunk`` - - Written by Scott Pack (github.com/scottjpack) - - """ --# Import Python libs --from __future__ import absolute_import, print_function, unicode_literals - - import logging - import socket - import time - - import requests -- --# Import salt libs - import salt.utils.json -- --# Import 3rd-party libs - from salt.ext import six - - _max_content_bytes = 100000 --http_event_collector_SSL_verify = False - http_event_collector_debug = False - - log = logging.getLogger(__name__) -@@ -62,6 +55,9 @@ def _get_options(): - indexer = __salt__["config.get"]("splunk_http_forwarder:indexer") - sourcetype = __salt__["config.get"]("splunk_http_forwarder:sourcetype") - index = __salt__["config.get"]("splunk_http_forwarder:index") -+ verify_ssl = __salt__["config.get"]( -+ "splunk_http_forwarder:verify_ssl", default=True -+ ) - except Exception: # pylint: disable=broad-except - log.error("Splunk HTTP Forwarder parameters not present in config.") - return None -@@ -70,6 +66,7 @@ def _get_options(): - "indexer": indexer, - "sourcetype": sourcetype, - "index": index, -+ "verify_ssl": verify_ssl, - } - return splunk_opts - -@@ -84,14 +81,17 @@ def _send_splunk(event, index_override=None, sourcetype_override=None): - # Get Splunk Options - opts = _get_options() - log.info( -- str("Options: %s"), # future lint: disable=blacklisted-function -+ "Options: %s", # future lint: disable=blacklisted-function - salt.utils.json.dumps(opts), - ) - http_event_collector_key = opts["token"] - http_event_collector_host = opts["indexer"] -+ http_event_collector_verify_ssl = opts["verify_ssl"] - # Set up the collector - splunk_event = http_event_collector( -- http_event_collector_key, http_event_collector_host -+ http_event_collector_key, -+ http_event_collector_host, -+ verify_ssl=http_event_collector_verify_ssl, - ) - # init the payload - payload = {} -@@ -109,7 +109,7 @@ def _send_splunk(event, index_override=None, sourcetype_override=None): - # Add the event - payload.update({"event": event}) - log.info( -- str("Payload: %s"), # future lint: disable=blacklisted-function -+ "Payload: %s", # future lint: disable=blacklisted-function - salt.utils.json.dumps(payload), - ) - # Fire it off -@@ -120,7 +120,7 @@ def _send_splunk(event, index_override=None, sourcetype_override=None): - # Thanks to George Starcher for the http_event_collector class (https://github.com/georgestarcher/) - - --class http_event_collector(object): -+class http_event_collector: - def __init__( - self, - token, -@@ -129,11 +129,13 @@ class http_event_collector(object): - http_event_port="8088", - http_event_server_ssl=True, - max_bytes=_max_content_bytes, -+ verify_ssl=True, - ): - self.token = token - self.batchEvents = [] - self.maxByteLength = max_bytes - self.currentByteLength = 0 -+ self.verify_ssl = verify_ssl - - # Set host to specified value or default to localhostname if no value provided - if host: -@@ -164,7 +166,7 @@ class http_event_collector(object): - - # If eventtime in epoch not passed as optional argument use current system time in epoch - if not eventtime: -- eventtime = six.text_type(int(time.time())) -+ eventtime = str(int(time.time())) - - # Fill in local hostname if not manually populated - if "host" not in payload: -@@ -179,7 +181,7 @@ class http_event_collector(object): - self.server_uri, - data=salt.utils.json.dumps(data), - headers=headers, -- verify=http_event_collector_SSL_verify, -+ verify=self.verify_ssl, - ) - - # Print debug info if flag set -@@ -207,7 +209,7 @@ class http_event_collector(object): - - # If eventtime in epoch not passed as optional argument use current system time in epoch - if not eventtime: -- eventtime = six.text_type(int(time.time())) -+ eventtime = str(int(time.time())) - - # Update time value on payload if need to use system time - data = {"time": eventtime} -@@ -224,7 +226,7 @@ class http_event_collector(object): - self.server_uri, - data=" ".join(self.batchEvents), - headers=headers, -- verify=http_event_collector_SSL_verify, -+ verify=self.verify_ssl, - ) - self.batchEvents = [] - self.currentByteLength = 0 -diff --git a/salt/runners/asam.py b/salt/runners/asam.py -index f53dfba69d..4c999d3ba2 100644 ---- a/salt/runners/asam.py -+++ b/salt/runners/asam.py -@@ -17,9 +17,11 @@ master configuration at ``/etc/salt/master`` or ``/etc/salt/master.d/asam.conf`` - prov1.domain.com - username: "testuser" - password: "verybadpass" -+ verify_ssl: true - prov2.domain.com - username: "testuser" - password: "verybadpass" -+ verify_ssl: true - - .. note:: - -@@ -84,6 +86,10 @@ def _get_asam_configuration(driver_url=""): - password = service_config.get("password", None) - protocol = service_config.get("protocol", "https") - port = service_config.get("port", 3451) -+ verify_ssl = service_config.get("verify_ssl") -+ -+ if verify_ssl is None: -+ verify_ssl = True - - if not username or not password: - log.error( -@@ -108,6 +114,7 @@ def _get_asam_configuration(driver_url=""): - ), - "username": username, - "password": password, -+ "verify_ssl": verify_ssl, - } - - if (not driver_url) or (driver_url == asam_server): -@@ -206,7 +213,7 @@ def remove_platform(name, server_url): - auth = (config["username"], config["password"]) - - try: -- html_content = _make_post_request(url, data, auth, verify=False) -+ html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"]) - except Exception as exc: # pylint: disable=broad-except - err_msg = "Failed to look up existing platforms on {}".format(server_url) - log.error("%s:\n%s", err_msg, exc) -@@ -222,7 +229,9 @@ def remove_platform(name, server_url): - data["postType"] = "platformRemove" - data["Submit"] = "Yes" - try: -- html_content = _make_post_request(url, data, auth, verify=False) -+ html_content = _make_post_request( -+ url, data, auth, verify=config["verify_ssl"] -+ ) - except Exception as exc: # pylint: disable=broad-except - err_msg = "Failed to delete platform from {}".format(server_url) - log.error("%s:\n%s", err_msg, exc) -@@ -261,7 +270,7 @@ def list_platforms(server_url): - auth = (config["username"], config["password"]) - - try: -- html_content = _make_post_request(url, data, auth, verify=False) -+ html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"]) - except Exception as exc: # pylint: disable=broad-except - err_msg = "Failed to look up existing platforms" - log.error("%s:\n%s", err_msg, exc) -@@ -299,7 +308,7 @@ def list_platform_sets(server_url): - auth = (config["username"], config["password"]) - - try: -- html_content = _make_post_request(url, data, auth, verify=False) -+ html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"]) - except Exception as exc: # pylint: disable=broad-except - err_msg = "Failed to look up existing platform sets" - log.error("%s:\n%s", err_msg, exc) -@@ -351,7 +360,7 @@ def add_platform(name, platform_set, server_url): - auth = (config["username"], config["password"]) - - try: -- html_content = _make_post_request(url, data, auth, verify=False) -+ html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"]) - except Exception as exc: # pylint: disable=broad-except - err_msg = "Failed to add platform on {}".format(server_url) - log.error("%s:\n%s", err_msg, exc) -diff --git a/salt/states/esxi.py b/salt/states/esxi.py -index 6f4d44306b..12a592dc29 100644 ---- a/salt/states/esxi.py -+++ b/salt/states/esxi.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Manage VMware ESXi Hosts. - -@@ -91,8 +90,6 @@ configuration examples, dependency installation instructions, how to run remote - execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state - example. - """ --# Import Python Libs --from __future__ import absolute_import, print_function, unicode_literals - - import logging - import re -@@ -108,8 +105,6 @@ from salt.exceptions import ( - VMwareObjectRetrievalError, - VMwareSaltError, - ) -- --# Import Salt Libs - from salt.ext import six - from salt.utils.decorators import depends - -@@ -201,7 +196,7 @@ def coredump_configured(name, enabled, dump_ip, host_vnic="vmk0", dump_port=6500 - current_config = __salt__[esxi_cmd]("get_coredump_network_config").get(host) - error = current_config.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - - current_config = current_config.get("Coredump Config") -@@ -217,7 +212,7 @@ def coredump_configured(name, enabled, dump_ip, host_vnic="vmk0", dump_port=6500 - ).get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - - # Allow users to disable core dump, but then return since -@@ -252,9 +247,9 @@ def coredump_configured(name, enabled, dump_ip, host_vnic="vmk0", dump_port=6500 - changes = True - - current_port = current_config.get("port") -- if current_port != six.text_type(dump_port): -+ if current_port != str(dump_port): - ret["changes"].update( -- {"dump_port": {"old": current_port, "new": six.text_type(dump_port)}} -+ {"dump_port": {"old": current_port, "new": str(dump_port)}} - ) - changes = True - -@@ -270,7 +265,7 @@ def coredump_configured(name, enabled, dump_ip, host_vnic="vmk0", dump_port=6500 - msg = response.get("stderr") - if not msg: - msg = response.get("stdout") -- ret["comment"] = "Error: {0}".format(msg) -+ ret["comment"] = "Error: {}".format(msg) - return ret - - ret["result"] = True -@@ -328,7 +323,7 @@ def password_present(name, password): - __salt__[esxi_cmd]("update_host_password", new_password=password) - except CommandExecutionError as err: - ret["result"] = False -- ret["comment"] = "Error: {0}".format(err) -+ ret["comment"] = "Error: {}".format(err) - return ret - - return ret -@@ -400,7 +395,7 @@ def ntp_configured( - ntp_running = __salt__[esxi_cmd]("get_service_running", service_name=ntpd).get(host) - error = ntp_running.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ntp_running = ntp_running.get(ntpd) - -@@ -413,7 +408,7 @@ def ntp_configured( - ).get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - # Set changes dictionary for ntp_servers - ret["changes"].update({"ntp_servers": {"old": ntp_config, "new": ntp_servers}}) -@@ -429,7 +424,7 @@ def ntp_configured( - ) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - # Stop ntpd if service_running=False - else: -@@ -438,7 +433,7 @@ def ntp_configured( - ) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - {"service_running": {"old": ntp_running, "new": service_running}} -@@ -451,7 +446,7 @@ def ntp_configured( - ).get(host) - error = current_service_policy.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - current_service_policy = current_service_policy.get(ntpd) - -@@ -465,7 +460,7 @@ def ntp_configured( - ).get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - { -@@ -483,7 +478,7 @@ def ntp_configured( - response = __salt__[esxi_cmd]("update_host_datetime").get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - {"update_datetime": {"old": "", "new": "Host datetime was updated."}} -@@ -498,7 +493,7 @@ def ntp_configured( - ) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - {"service_restart": {"old": "", "new": "NTP Daemon Restarted."}} -@@ -559,14 +554,14 @@ def vmotion_configured(name, enabled, device="vmk0"): - response = __salt__[esxi_cmd]("vmotion_enable", device=device).get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - # Disable VMotion if enabled=False - else: - response = __salt__[esxi_cmd]("vmotion_disable").get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - {"enabled": {"old": current_vmotion_enabled, "new": enabled}} -@@ -618,7 +613,7 @@ def vsan_configured(name, enabled, add_disks_to_vsan=False): - current_vsan_enabled = __salt__[esxi_cmd]("get_vsan_enabled").get(host) - error = current_vsan_enabled.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - current_vsan_enabled = current_vsan_enabled.get("VSAN Enabled") - -@@ -631,14 +626,14 @@ def vsan_configured(name, enabled, add_disks_to_vsan=False): - response = __salt__[esxi_cmd]("vsan_enable").get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - # Disable VSAN if enabled=False - else: - response = __salt__[esxi_cmd]("vsan_disable").get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - {"enabled": {"old": current_vsan_enabled, "new": enabled}} -@@ -649,7 +644,7 @@ def vsan_configured(name, enabled, add_disks_to_vsan=False): - current_eligible_disks = __salt__[esxi_cmd]("get_vsan_eligible_disks").get(host) - error = current_eligible_disks.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - - disks = current_eligible_disks.get("Eligible") -@@ -659,7 +654,7 @@ def vsan_configured(name, enabled, add_disks_to_vsan=False): - response = __salt__[esxi_cmd]("vsan_add_disks").get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - - ret["changes"].update({"add_disks_to_vsan": {"old": "", "new": disks}}) -@@ -683,7 +678,7 @@ def ssh_configured( - ssh_key_file=None, - service_policy=None, - service_restart=False, -- certificate_verify=False, -+ certificate_verify=None, - ): - """ - Manage the SSH configuration for a host including whether or not SSH is running or -@@ -724,7 +719,7 @@ def ssh_configured( - - certificate_verify - If set to ``True``, the SSL connection must present a valid certificate. -- Default is ``False``. -+ Default is ``True``. - - Example: - -@@ -739,6 +734,8 @@ def ssh_configured( - - certificate_verify: True - - """ -+ if certificate_verify is None: -+ certificate_verify = True - ret = {"name": name, "result": False, "changes": {}, "comment": ""} - esxi_cmd = "esxi.cmd" - host = __pillar__["proxy"]["host"] -@@ -747,7 +744,7 @@ def ssh_configured( - ssh_running = __salt__[esxi_cmd]("get_service_running", service_name=ssh).get(host) - error = ssh_running.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ssh_running = ssh_running.get(ssh) - -@@ -760,14 +757,14 @@ def ssh_configured( - enable = __salt__[esxi_cmd]("service_start", service_name=ssh).get(host) - error = enable.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - # Disable SSH if service_running=False - else: - disable = __salt__[esxi_cmd]("service_stop", service_name=ssh).get(host) - error = disable.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - - ret["changes"].update( -@@ -783,7 +780,7 @@ def ssh_configured( - ) - error = current_ssh_key.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - current_ssh_key = current_ssh_key.get("key") - if current_ssh_key: -@@ -822,7 +819,7 @@ def ssh_configured( - ) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - { -@@ -840,7 +837,7 @@ def ssh_configured( - ).get(host) - error = current_service_policy.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - current_service_policy = current_service_policy.get(ssh) - -@@ -854,7 +851,7 @@ def ssh_configured( - ).get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - { -@@ -872,7 +869,7 @@ def ssh_configured( - response = __salt__[esxi_cmd]("service_restart", service_name=ssh).get(host) - error = response.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - ret["changes"].update( - {"service_restart": {"old": "", "new": "SSH service restarted."}} -@@ -965,17 +962,17 @@ def syslog_configured( - reset = __salt__[esxi_cmd]( - "reset_syslog_config", syslog_config=reset_configs - ).get(host) -- for key, val in six.iteritems(reset): -+ for key, val in reset.items(): - if isinstance(val, bool): - continue - if not val.get("success"): - msg = val.get("message") - if not msg: - msg = ( -- "There was an error resetting a syslog config '{0}'." -+ "There was an error resetting a syslog config '{}'." - "Please check debug logs.".format(val) - ) -- ret["comment"] = "Error: {0}".format(msg) -+ ret["comment"] = "Error: {}".format(msg) - return ret - - ret["changes"].update( -@@ -985,7 +982,7 @@ def syslog_configured( - current_firewall = __salt__[esxi_cmd]("get_firewall_status").get(host) - error = current_firewall.get("Error") - if error: -- ret["comment"] = "Error: {0}".format(error) -+ ret["comment"] = "Error: {}".format(error) - return ret - - current_firewall = current_firewall.get("rulesets").get("syslog") -@@ -1000,23 +997,23 @@ def syslog_configured( - if enabled.get("retcode") != 0: - err = enabled.get("stderr") - out = enabled.get("stdout") -- ret["comment"] = "Error: {0}".format(err if err else out) -+ ret["comment"] = "Error: {}".format(err if err else out) - return ret - - ret["changes"].update({"firewall": {"old": current_firewall, "new": firewall}}) - - current_syslog_config = __salt__[esxi_cmd]("get_syslog_config").get(host) -- for key, val in six.iteritems(syslog_configs): -+ for key, val in syslog_configs.items(): - # The output of get_syslog_config has different keys than the keys - # Used to set syslog_config values. We need to look them up first. - try: - lookup_key = _lookup_syslog_config(key) - except KeyError: -- ret["comment"] = "'{0}' is not a valid config variable.".format(key) -+ ret["comment"] = "'{}' is not a valid config variable.".format(key) - return ret - - current_val = current_syslog_config[lookup_key] -- if six.text_type(current_val) != six.text_type(val): -+ if str(current_val) != str(val): - # Only run the command if not using test=True - if not __opts__["test"]: - response = __salt__[esxi_cmd]( -@@ -1031,7 +1028,7 @@ def syslog_configured( - msg = response.get(key).get("message") - if not msg: - msg = ( -- "There was an error setting syslog config '{0}'. " -+ "There was an error setting syslog config '{}'. " - "Please check debug logs.".format(key) - ) - ret["comment"] = msg -@@ -1101,7 +1098,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - if not proxy_details.get("vcenter") - else proxy_details["esxi_host"] - ) -- log.info("Running state {0} for host '{1}'".format(name, hostname)) -+ log.info("Running state {} for host '{}'".format(name, hostname)) - # Variable used to return the result of the invocation - ret = {"name": name, "result": None, "changes": {}, "comments": None} - # Signals if errors have been encountered -@@ -1124,23 +1121,20 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - host_disks = __salt__["vsphere.list_disks"](service_instance=si) - if not host_disks: - raise VMwareObjectRetrievalError( -- "No disks retrieved from host '{0}'".format(hostname) -+ "No disks retrieved from host '{}'".format(hostname) - ) - scsi_addr_to_disk_map = {d["scsi_address"]: d for d in host_disks} -- log.trace("scsi_addr_to_disk_map = {0}".format(scsi_addr_to_disk_map)) -+ log.trace("scsi_addr_to_disk_map = {}".format(scsi_addr_to_disk_map)) - existing_diskgroups = __salt__["vsphere.list_diskgroups"](service_instance=si) - cache_disk_to_existing_diskgroup_map = { - dg["cache_disk"]: dg for dg in existing_diskgroups - } - except CommandExecutionError as err: -- log.error("Error: {0}".format(err)) -+ log.error("Error: {}".format(err)) - if si: - __salt__["vsphere.disconnect"](si) - ret.update( -- { -- "result": False if not __opts__["test"] else None, -- "comment": six.text_type(err), -- } -+ {"result": False if not __opts__["test"] else None, "comment": str(err),} - ) - return ret - -@@ -1149,7 +1143,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - # Check for cache disk - if not dg["cache_scsi_addr"] in scsi_addr_to_disk_map: - comments.append( -- "No cache disk with scsi address '{0}' was " -+ "No cache disk with scsi address '{}' was " - "found.".format(dg["cache_scsi_addr"]) - ) - log.error(comments[-1]) -@@ -1158,7 +1152,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - - # Check for capacity disks - cache_disk_id = scsi_addr_to_disk_map[dg["cache_scsi_addr"]]["id"] -- cache_disk_display = "{0} (id:{1})".format(dg["cache_scsi_addr"], cache_disk_id) -+ cache_disk_display = "{} (id:{})".format(dg["cache_scsi_addr"], cache_disk_id) - bad_scsi_addrs = [] - capacity_disk_ids = [] - capacity_disk_displays = [] -@@ -1168,13 +1162,13 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - continue - capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]["id"]) - capacity_disk_displays.append( -- "{0} (id:{1})".format(scsi_addr, capacity_disk_ids[-1]) -+ "{} (id:{})".format(scsi_addr, capacity_disk_ids[-1]) - ) - if bad_scsi_addrs: - comments.append( -- "Error in diskgroup #{0}: capacity disks with " -- "scsi addresses {1} were not found." -- "".format(idx, ", ".join(["'{0}'".format(a) for a in bad_scsi_addrs])) -+ "Error in diskgroup #{}: capacity disks with " -+ "scsi addresses {} were not found." -+ "".format(idx, ", ".join(["'{}'".format(a) for a in bad_scsi_addrs])) - ) - log.error(comments[-1]) - errors = True -@@ -1182,14 +1176,14 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - - if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): - # A new diskgroup needs to be created -- log.trace("erase_disks = {0}".format(erase_disks)) -+ log.trace("erase_disks = {}".format(erase_disks)) - if erase_disks: - if __opts__["test"]: - comments.append( -- "State {0} will " -- "erase all disks of disk group #{1}; " -- "cache disk: '{2}', " -- "capacity disk(s): {3}." -+ "State {} will " -+ "erase all disks of disk group #{}; " -+ "cache disk: '{}', " -+ "capacity disk(s): {}." - "".format( - name, - idx, -@@ -1206,13 +1200,13 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - disk_id=disk_id, service_instance=si - ) - comments.append( -- "Erased disks of diskgroup #{0}; " -- "cache disk: '{1}', capacity disk(s): " -- "{2}".format( -+ "Erased disks of diskgroup #{}; " -+ "cache disk: '{}', capacity disk(s): " -+ "{}".format( - idx, - cache_disk_display, - ", ".join( -- ["'{0}'".format(a) for a in capacity_disk_displays] -+ ["'{}'".format(a) for a in capacity_disk_displays] - ), - ) - ) -@@ -1220,13 +1214,13 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - - if __opts__["test"]: - comments.append( -- "State {0} will create " -- "the disk group #{1}; cache disk: '{2}', " -- "capacity disk(s): {3}.".format( -+ "State {} will create " -+ "the disk group #{}; cache disk: '{}', " -+ "capacity disk(s): {}.".format( - name, - idx, - cache_disk_display, -- ", ".join(["'{0}'".format(a) for a in capacity_disk_displays]), -+ ", ".join(["'{}'".format(a) for a in capacity_disk_displays]), - ) - ) - log.info(comments[-1]) -@@ -1241,15 +1235,15 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - ) - except VMwareSaltError as err: - comments.append( -- "Error creating disk group #{0}: " "{1}.".format(idx, err) -+ "Error creating disk group #{}: " "{}.".format(idx, err) - ) - log.error(comments[-1]) - errors = True - continue - -- comments.append("Created disk group #'{0}'.".format(idx)) -+ comments.append("Created disk group #'{}'.".format(idx)) - log.info(comments[-1]) -- diskgroup_changes[six.text_type(idx)] = { -+ diskgroup_changes[str(idx)] = { - "new": {"cache": cache_disk_display, "capacity": capacity_disk_displays} - } - changes = True -@@ -1257,12 +1251,12 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - - # The diskgroup exists; checking the capacity disks - log.debug( -- "Disk group #{0} exists. Checking capacity disks: " -- "{1}.".format(idx, capacity_disk_displays) -+ "Disk group #{} exists. Checking capacity disks: " -+ "{}.".format(idx, capacity_disk_displays) - ) - existing_diskgroup = cache_disk_to_existing_diskgroup_map.get(cache_disk_id) - existing_capacity_disk_displays = [ -- "{0} (id:{1})".format( -+ "{} (id:{})".format( - [d["scsi_address"] for d in host_disks if d["id"] == disk_id][0], - disk_id, - ) -@@ -1280,7 +1274,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - ][0] - added_capacity_disk_ids.append(disk_id) - added_capacity_disk_displays.append( -- "{0} (id:{1})".format(disk_scsi_addr, disk_id) -+ "{} (id:{})".format(disk_scsi_addr, disk_id) - ) - for disk_id in existing_diskgroup["capacity_disks"]: - if disk_id not in capacity_disk_ids: -@@ -1289,12 +1283,12 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - ][0] - removed_capacity_disk_ids.append(disk_id) - removed_capacity_disk_displays.append( -- "{0} (id:{1})".format(disk_scsi_addr, disk_id) -+ "{} (id:{})".format(disk_scsi_addr, disk_id) - ) - - log.debug( -- "Disk group #{0}: existing capacity disk ids: {1}; added " -- "capacity disk ids: {2}; removed capacity disk ids: {3}" -+ "Disk group #{}: existing capacity disk ids: {}; added " -+ "capacity disk ids: {}; removed capacity disk ids: {}" - "".format( - idx, - existing_capacity_disk_displays, -@@ -1306,11 +1300,11 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - # TODO revisit this when removing capacity disks is supported - if removed_capacity_disk_ids: - comments.append( -- "Error removing capacity disk(s) {0} from disk group #{1}; " -+ "Error removing capacity disk(s) {} from disk group #{}; " - "operation is not supported." - "".format( - ", ".join( -- ["'{0}'".format(id) for id in removed_capacity_disk_displays] -+ ["'{}'".format(id) for id in removed_capacity_disk_displays] - ), - idx, - ) -@@ -1324,11 +1318,11 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - - # Building a string representation of the capacity disks - # that need to be added -- s = ", ".join(["'{0}'".format(id) for id in added_capacity_disk_displays]) -+ s = ", ".join(["'{}'".format(id) for id in added_capacity_disk_displays]) - if __opts__["test"]: - comments.append( -- "State {0} will add " -- "capacity disk(s) {1} to disk group #{2}." -+ "State {} will add " -+ "capacity disk(s) {} to disk group #{}." - "".format(name, s, idx) - ) - log.info(comments[-1]) -@@ -1343,17 +1337,17 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - ) - except VMwareSaltError as err: - comments.append( -- "Error adding capacity disk(s) {0} to " -- "disk group #{1}: {2}.".format(s, idx, err) -+ "Error adding capacity disk(s) {} to " -+ "disk group #{}: {}.".format(s, idx, err) - ) - log.error(comments[-1]) - errors = True - continue - -- com = "Added capacity disk(s) {0} to disk group #{1}" "".format(s, idx) -+ com = "Added capacity disk(s) {} to disk group #{}" "".format(s, idx) - log.info(com) - comments.append(com) -- diskgroup_changes[six.text_type(idx)] = { -+ diskgroup_changes[str(idx)] = { - "new": { - "cache": cache_disk_display, - "capacity": capacity_disk_displays, -@@ -1367,9 +1361,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): - continue - - # No capacity needs to be added -- s = "Disk group #{0} is correctly configured. Nothing to be done." "".format( -- idx -- ) -+ s = "Disk group #{} is correctly configured. Nothing to be done." "".format(idx) - log.info(s) - comments.append(s) - __salt__["vsphere.disconnect"](si) -@@ -1532,11 +1524,11 @@ def host_cache_configured( - ) - if not existing_disks: - raise VMwareObjectRetrievalError( -- "Disk with scsi address '{0}' was not found in host '{1}'" -+ "Disk with scsi address '{}' was not found in host '{}'" - "".format(datastore["backing_disk_scsi_addr"], hostname) - ) - backing_disk = existing_disks[0] -- backing_disk_display = "{0} (id:{1})".format( -+ backing_disk_display = "{} (id:{})".format( - backing_disk["scsi_address"], backing_disk["id"] - ) - log.trace("backing_disk = %s", backing_disk_display) -@@ -1547,8 +1539,8 @@ def host_cache_configured( - if erase_backing_disk: - if __opts__["test"]: - comments.append( -- "State {0} will erase " -- "the backing disk '{1}' on host '{2}'." -+ "State {} will erase " -+ "the backing disk '{}' on host '{}'." - "".format(name, backing_disk_display, hostname) - ) - log.info(comments[-1]) -@@ -1558,16 +1550,16 @@ def host_cache_configured( - disk_id=backing_disk["id"], service_instance=si - ) - comments.append( -- "Erased backing disk '{0}' on host " -- "'{1}'.".format(backing_disk_display, hostname) -+ "Erased backing disk '{}' on host " -+ "'{}'.".format(backing_disk_display, hostname) - ) - log.info(comments[-1]) - # Create the datastore - if __opts__["test"]: - comments.append( -- "State {0} will create " -- "the datastore '{1}', with backing disk " -- "'{2}', on host '{3}'." -+ "State {} will create " -+ "the datastore '{}', with backing disk " -+ "'{}', on host '{}'." - "".format(name, datastore["name"], backing_disk_display, hostname) - ) - log.info(comments[-1]) -@@ -1582,7 +1574,7 @@ def host_cache_configured( - non_mbr_partitions = [p for p in partitions if p["format"] != "mbr"] - if len(non_mbr_partitions) > 0: - raise VMwareApiError( -- "Backing disk '{0}' has unexpected partitions" -+ "Backing disk '{}' has unexpected partitions" - "".format(backing_disk_display) - ) - __salt__["vsphere.create_vmfs_datastore"]( -@@ -1592,8 +1584,8 @@ def host_cache_configured( - service_instance=si, - ) - comments.append( -- "Created vmfs datastore '{0}', backed by " -- "disk '{1}', on host '{2}'." -+ "Created vmfs datastore '{}', backed by " -+ "disk '{}', on host '{}'." - "".format(datastore["name"], backing_disk_display, hostname) - ) - log.info(comments[-1]) -@@ -1615,21 +1607,21 @@ def host_cache_configured( - # Check datastore is backed by the correct disk - if not existing_datastores[0].get("backing_disk_ids"): - raise VMwareSaltError( -- "Datastore '{0}' doesn't have a " -+ "Datastore '{}' doesn't have a " - "backing disk" - "".format(datastore["name"]) - ) - if backing_disk["id"] not in existing_datastores[0]["backing_disk_ids"]: - - raise VMwareSaltError( -- "Datastore '{0}' is not backed by the correct disk: " -- "expected '{1}'; got {2}" -+ "Datastore '{}' is not backed by the correct disk: " -+ "expected '{}'; got {}" - "".format( - datastore["name"], - backing_disk["id"], - ", ".join( - [ -- "'{0}'".format(disk) -+ "'{}'".format(disk) - for disk in existing_datastores[0]["backing_disk_ids"] - ] - ), -@@ -1637,8 +1629,8 @@ def host_cache_configured( - ) - - comments.append( -- "Datastore '{0}' already exists on host '{1}' " -- "and is backed by disk '{2}'. Nothing to be " -+ "Datastore '{}' already exists on host '{}' " -+ "and is backed by disk '{}'. Nothing to be " - "done.".format(datastore["name"], hostname, backing_disk_display) - ) - existing_datastore = existing_datastores[0] -@@ -1686,8 +1678,8 @@ def host_cache_configured( - if needs_setting: - if __opts__["test"]: - comments.append( -- "State {0} will configure " -- "the host cache on host '{1}' to: {2}." -+ "State {} will configure " -+ "the host cache on host '{}' to: {}." - "".format( - name, - hostname, -@@ -1702,8 +1694,8 @@ def host_cache_configured( - if (existing_datastore["capacity"] / 1024.0 ** 2) < swap_size_MiB: - - raise ArgumentValueError( -- "Capacity of host cache datastore '{0}' ({1} MiB) is " -- "smaller than the required swap size ({2} MiB)" -+ "Capacity of host cache datastore '{}' ({} MiB) is " -+ "smaller than the required swap size ({} MiB)" - "".format( - existing_datastore["name"], - existing_datastore["capacity"] / 1024.0 ** 2, -@@ -1717,11 +1709,11 @@ def host_cache_configured( - service_instance=si, - ) - comments.append( -- "Host cache configured on host " "'{0}'.".format(hostname) -+ "Host cache configured on host " "'{}'.".format(hostname) - ) - else: - comments.append( -- "Host cache on host '{0}' is already correctly " -+ "Host cache on host '{}' is already correctly " - "configured. Nothing to be done.".format(hostname) - ) - result = True -diff --git a/salt/utils/http.py b/salt/utils/http.py -index 9522bd6ee4..c532da63d5 100644 ---- a/salt/utils/http.py -+++ b/salt/utils/http.py -@@ -1062,3 +1062,23 @@ def _sanitize_url_components(comp_list, field): - ret = "{}&".format(comp_list[0]) - comp_list.remove(comp_list[0]) - return ret + _sanitize_url_components(comp_list, field) -+ -+ -+def session(user=None, password=None, verify_ssl=True, ca_bundle=None, headers=None): -+ """ -+ create a requests session -+ """ -+ session = requests.session() -+ if user and password: -+ session.auth = (user, password) -+ if ca_bundle and not verify_ssl: -+ log.error("You cannot use both ca_bundle and verify_ssl False together") -+ return False -+ if ca_bundle: -+ opts = {"ca_bundle": ca_bundle} -+ session.verify = get_ca_bundle(opts) -+ if not verify_ssl: -+ session.verify = False -+ if headers: -+ session.headers.update(headers) -+ return session -diff --git a/salt/utils/thin.py b/salt/utils/thin.py -index ce48957374..60ddd0e67c 100644 ---- a/salt/utils/thin.py -+++ b/salt/utils/thin.py -@@ -217,8 +217,8 @@ def get_tops_python(py_ver, exclude=None, ext_py_ver=None): - "{} does not exist. Could not auto detect dependencies".format(py_ver) - ) - return {} -- py_shell_cmd = "{0} -c 'import {1}; print({1}.__file__)'".format(py_ver, mod) -- cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) -+ py_shell_cmd = [py_ver, "-c", "import {0}; print({0}.__file__)".format(mod)] -+ cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE) - stdout, _ = cmd.communicate() - mod_file = os.path.abspath(salt.utils.data.decode(stdout).rstrip("\n")) - -diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py -index 57aa2aaa69..f801ba2aab 100644 ---- a/salt/utils/vmware.py -+++ b/salt/utils/vmware.py -@@ -80,7 +80,6 @@ import ssl - import time - from http.client import BadStatusLine - --import requests - import salt.exceptions - import salt.modules.cmdmod - import salt.utils.path -@@ -182,7 +181,9 @@ def esxcli( - return ret - - --def get_vsphere_client(server, username, password, session=None): -+def get_vsphere_client( -+ server, username, password, session=None, verify_ssl=True, ca_bundle=None -+): - """ - Internal helper method to create an instance of the vSphere API client. - Please provide username and password to authenticate. -@@ -196,6 +197,10 @@ def get_vsphere_client(server, username, password, session=None): - :param Session session: - Request HTTP session instance. If not specified, one - is automatically created and used -+ :param boolean verify_ssl: -+ Verify the SSL certificate. Default: True -+ :param basestring ca_bundle: -+ Path to the ca bundle to use when verifying SSL certificates. - - :returns: - Vsphere Client instance -@@ -204,9 +209,7 @@ def get_vsphere_client(server, username, password, session=None): - """ - if not session: - # Create an https session to be used for a vSphere client -- session = requests.session() -- # If client uses own SSL cert, session should not verify -- session.verify = False -+ session = salt.utils.http.session(verify_ssl=verify_ssl, ca_bundle=ca_bundle) - client = None - try: - client = create_vsphere_client( -@@ -218,7 +221,15 @@ def get_vsphere_client(server, username, password, session=None): - - - def _get_service_instance( -- host, username, password, protocol, port, mechanism, principal, domain -+ host, -+ username, -+ password, -+ protocol, -+ port, -+ mechanism, -+ principal, -+ domain, -+ verify_ssl=True, - ): - """ - Internal method to authenticate with a vCenter server or ESX/ESXi host -@@ -253,21 +264,26 @@ def _get_service_instance( - raise salt.exceptions.CommandExecutionError( - "Unsupported mechanism: '{}'".format(mechanism) - ) -+ -+ log.trace( -+ "Connecting using the '%s' mechanism, with username '%s'", mechanism, username, -+ ) -+ default_msg = ( -+ "Could not connect to host '{}'. " -+ "Please check the debug log for more information.".format(host) -+ ) -+ - try: -- log.trace( -- "Connecting using the '%s' mechanism, with username '%s'", -- mechanism, -- username, -- ) -- service_instance = SmartConnect( -- host=host, -- user=username, -- pwd=password, -- protocol=protocol, -- port=port, -- b64token=token, -- mechanism=mechanism, -- ) -+ if verify_ssl: -+ service_instance = SmartConnect( -+ host=host, -+ user=username, -+ pwd=password, -+ protocol=protocol, -+ port=port, -+ b64token=token, -+ mechanism=mechanism, -+ ) - except TypeError as exc: - if "unexpected keyword argument" in exc.message: - log.error( -@@ -280,30 +296,33 @@ def _get_service_instance( - raise - except Exception as exc: # pylint: disable=broad-except - # pyVmomi's SmartConnect() actually raises Exception in some cases. -- default_msg = ( -- "Could not connect to host '{}'. " -- "Please check the debug log for more information.".format(host) -- ) -+ if ( -+ isinstance(exc, vim.fault.HostConnectFault) -+ and "[SSL: CERTIFICATE_VERIFY_FAILED]" in exc.msg -+ ) or "[SSL: CERTIFICATE_VERIFY_FAILED]" in str(exc): -+ err_msg = ( -+ "Could not verify the SSL certificate. You can use " -+ "verify_ssl: False if you do not want to verify the " -+ "SSL certificate. This is not recommended as it is " -+ "considered insecure." -+ ) -+ else: -+ log.exception(exc) -+ err_msg = exc.msg if hasattr(exc, "msg") else default_msg -+ raise salt.exceptions.VMwareConnectionError(err_msg) - -+ if not verify_ssl: - try: -- if ( -- isinstance(exc, vim.fault.HostConnectFault) -- and "[SSL: CERTIFICATE_VERIFY_FAILED]" in exc.msg -- ) or "[SSL: CERTIFICATE_VERIFY_FAILED]" in str(exc): -- service_instance = SmartConnect( -- host=host, -- user=username, -- pwd=password, -- protocol=protocol, -- port=port, -- sslContext=ssl._create_unverified_context(), -- b64token=token, -- mechanism=mechanism, -- ) -- else: -- log.exception(exc) -- err_msg = exc.msg if hasattr(exc, "msg") else default_msg -- raise salt.exceptions.VMwareConnectionError(err_msg) -+ service_instance = SmartConnect( -+ host=host, -+ user=username, -+ pwd=password, -+ protocol=protocol, -+ port=port, -+ sslContext=ssl._create_unverified_context(), -+ b64token=token, -+ mechanism=mechanism, -+ ) - except Exception as exc: # pylint: disable=broad-except - # pyVmomi's SmartConnect() actually raises Exception in some cases. - if "certificate verify failed" in str(exc): -@@ -330,6 +349,7 @@ def _get_service_instance( - err_msg = exc.msg if hasattr(exc, "msg") else default_msg - log.trace(exc) - raise salt.exceptions.VMwareConnectionError(err_msg) -+ - atexit.register(Disconnect, service_instance) - return service_instance - -@@ -384,6 +404,7 @@ def get_service_instance( - mechanism="userpass", - principal=None, - domain=None, -+ verify_ssl=True, - ): - """ - Authenticate with a vCenter server or ESX/ESXi host and return the service instance object. -@@ -416,6 +437,9 @@ def get_service_instance( - - domain - Kerberos user domain. Required if mechanism is ``sspi`` -+ -+ verify_ssl -+ Verify the SSL certificate. Default: True - """ - - if protocol is None: -@@ -438,7 +462,15 @@ def get_service_instance( - - if not service_instance: - service_instance = _get_service_instance( -- host, username, password, protocol, port, mechanism, principal, domain -+ host, -+ username, -+ password, -+ protocol, -+ port, -+ mechanism, -+ principal, -+ domain, -+ verify_ssl=verify_ssl, - ) - - # Test if data can actually be retrieved or connection has gone stale -@@ -449,7 +481,15 @@ def get_service_instance( - log.trace("Session no longer authenticating. Reconnecting") - Disconnect(service_instance) - service_instance = _get_service_instance( -- host, username, password, protocol, port, mechanism, principal, domain -+ host, -+ username, -+ password, -+ protocol, -+ port, -+ mechanism, -+ principal, -+ domain, -+ verify_ssl=verify_ssl, - ) - except vim.fault.NoPermission as exc: - log.exception(exc) -diff --git a/salt/wheel/__init__.py b/salt/wheel/__init__.py -index 38792a10f6..53c3d8527f 100644 ---- a/salt/wheel/__init__.py -+++ b/salt/wheel/__init__.py -@@ -1,8 +1,6 @@ --# -*- coding: utf-8 -*- - """ - Modules used to control the master itself - """ --from __future__ import absolute_import, print_function, unicode_literals - - from collections.abc import Mapping - -@@ -15,7 +13,7 @@ import salt.utils.zeromq - - - class WheelClient( -- salt.client.mixins.SyncClientMixin, salt.client.mixins.AsyncClientMixin, object -+ salt.client.mixins.SyncClientMixin, salt.client.mixins.AsyncClientMixin - ): - """ - An interface to Salt's wheel modules -@@ -123,8 +121,8 @@ class WheelClient( - }) - {'jid': '20131219224744416681', 'tag': 'salt/wheel/20131219224744416681'} - """ -- fun = low.pop("fun") -- return self.asynchronous(fun, low) -+ fun = low.get("fun") -+ return self.asynchronous(fun, low, local=False) - - def cmd( - self, -@@ -143,9 +141,7 @@ class WheelClient( - >>> wheel.cmd('key.finger', ['jerry']) - {'minions': {'jerry': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}} - """ -- return super(WheelClient, self).cmd( -- fun, arg, pub_data, kwarg, print_event, full_return -- ) -+ return super().cmd(fun, arg, pub_data, kwarg, print_event, full_return) - - - Wheel = WheelClient # for backward-compat -diff --git a/salt/wheel/pillar_roots.py b/salt/wheel/pillar_roots.py -index 2c242ef3a7..7504d28777 100644 ---- a/salt/wheel/pillar_roots.py -+++ b/salt/wheel/pillar_roots.py -@@ -1,19 +1,14 @@ --# -*- coding: utf-8 -*- - """ - The `pillar_roots` wheel module is used to manage files under the pillar roots - directories on the master server. - """ - --# Import python libs --from __future__ import absolute_import, print_function, unicode_literals - - import os - --# Import salt libs - import salt.utils.files - import salt.utils.path -- --# Import 3rd-party libs -+import salt.utils.verify - from salt.ext import six - - -@@ -86,7 +81,7 @@ def read(path, saltenv="base"): - ret = [] - files = find(path, saltenv) - for fn_ in files: -- full = next(six.iterkeys(fn_)) -+ full = next(iter(fn_.keys())) - form = fn_[full] - if form == "txt": - with salt.utils.files.fopen(full, "rb") as fp_: -@@ -100,19 +95,23 @@ def write(data, path, saltenv="base", index=0): - index of the file can be specified to write to a lower priority file root - """ - if saltenv not in __opts__["pillar_roots"]: -- return "Named environment {0} is not present".format(saltenv) -+ return "Named environment {} is not present".format(saltenv) - if len(__opts__["pillar_roots"][saltenv]) <= index: -- return "Specified index {0} in environment {1} is not present".format( -+ return "Specified index {} in environment {} is not present".format( - index, saltenv - ) - if os.path.isabs(path): - return ( -- "The path passed in {0} is not relative to the environment " "{1}" -+ "The path passed in {} is not relative to the environment " "{}" - ).format(path, saltenv) -+ roots_dir = __opts__["pillar_roots"][saltenv][index] -+ dest = os.path.join(roots_dir, path) -+ if not salt.utils.verify.clean_path(roots_dir, dest): -+ return "Invalid path" - dest = os.path.join(__opts__["pillar_roots"][saltenv][index], path) - dest_dir = os.path.dirname(dest) - if not os.path.isdir(dest_dir): - os.makedirs(dest_dir) - with salt.utils.files.fopen(dest, "w+") as fp_: - fp_.write(salt.utils.stringutils.to_str(data)) -- return "Wrote data to file {0}".format(dest) -+ return "Wrote data to file {}".format(dest) --- -2.30.1 - - diff --git a/fix-for-temp-folder-definition-in-loader-unit-test.patch b/fix-for-temp-folder-definition-in-loader-unit-test.patch deleted file mode 100644 index 548f3ff..0000000 --- a/fix-for-temp-folder-definition-in-loader-unit-test.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 33766e59bd53fac2c75e6ccfa1f363e2f7b1b65f Mon Sep 17 00:00:00 2001 -From: Jochen Breuer -Date: Mon, 16 Mar 2020 15:25:42 +0100 -Subject: [PATCH] Fix for temp folder definition in loader unit test - ---- - tests/unit/test_loader.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py -index 863e2182b9..5b23ad83e3 100644 ---- a/tests/unit/test_loader.py -+++ b/tests/unit/test_loader.py -@@ -240,12 +240,12 @@ class LazyLoaderUtilsTest(TestCase): - def setUpClass(cls): - cls.opts = salt.config.minion_config(None) - cls.opts["grains"] = salt.loader.grains(cls.opts) -- if not os.path.isdir(TMP): -- os.makedirs(TMP) -+ if not os.path.isdir(RUNTIME_VARS.TMP): -+ os.makedirs(RUNTIME_VARS.TMP) - - def setUp(self): - # Setup the module -- self.module_dir = tempfile.mkdtemp(dir=TMP) -+ self.module_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) - self.module_file = os.path.join( - self.module_dir, "{}.py".format(self.module_name) - ) -@@ -254,7 +254,7 @@ class LazyLoaderUtilsTest(TestCase): - fh.flush() - os.fsync(fh.fileno()) - -- self.utils_dir = tempfile.mkdtemp(dir=TMP) -+ self.utils_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) - self.utils_file = os.path.join(self.utils_dir, "{}.py".format(self.utils_name)) - with salt.utils.files.fopen(self.utils_file, "w") as fh: - fh.write(salt.utils.stringutils.to_str(loader_template_utils)) --- -2.29.2 - - diff --git a/fix-git_pillar-merging-across-multiple-__env__-repos.patch b/fix-git_pillar-merging-across-multiple-__env__-repos.patch deleted file mode 100644 index 046b837..0000000 --- a/fix-git_pillar-merging-across-multiple-__env__-repos.patch +++ /dev/null @@ -1,86 +0,0 @@ -From f5c9527aeee190a66a908037770c80a75e911d8c Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Tue, 6 Nov 2018 16:38:54 +0000 -Subject: [PATCH] Fix git_pillar merging across multiple __env__ - repositories (bsc#1112874) - -Resolve target branch when using __env__ - -Test git ext_pillar across multiple repos using __env__ - -Remove unicode references ---- - tests/integration/pillar/test_git_pillar.py | 55 +++++++++++++++++++++ - 1 file changed, 55 insertions(+) - -diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py -index c0362127f6..979dfebb94 100644 ---- a/tests/integration/pillar/test_git_pillar.py -+++ b/tests/integration/pillar/test_git_pillar.py -@@ -1600,6 +1600,61 @@ class TestPygit2SSH(GitPillarSSHTestBase): - }, - ) - -+ -+@skipIf(NO_MOCK, NO_MOCK_REASON) -+@skipIf(_windows_or_mac(), "minion is windows or mac") -+@skip_if_not_root -+@skipIf( -+ not HAS_PYGIT2, -+ "pygit2 >= {} and libgit2 >= {} required".format(PYGIT2_MINVER, LIBGIT2_MINVER), -+) -+@skipIf(not HAS_NGINX, "nginx not present") -+@skipIf(not HAS_VIRTUALENV, "virtualenv not present") -+class TestPygit2HTTP(GitPillarHTTPTestBase): -+ """ -+ Test git_pillar with pygit2 using SSH authentication -+ """ -+ -+ def test_single_source(self): -+ """ -+ Test with git_pillar_includes enabled and using "__env__" as the branch -+ name for the configured repositories. -+ The "gitinfo" repository contains top.sls file with a local reference -+ and also referencing external "nowhere.foo" which is provided by "webinfo" -+ repository mounted as "nowhere". -+ """ -+ ret = self.get_pillar( -+ """\ -+ file_ignore_regex: [] -+ file_ignore_glob: [] -+ git_pillar_provider: pygit2 -+ git_pillar_pubkey: {pubkey_nopass} -+ git_pillar_privkey: {privkey_nopass} -+ cachedir: {cachedir} -+ extension_modules: {extmods} -+ ext_pillar: -+ - git: -+ - __env__ {url_extra_repo}: -+ - name: gitinfo -+ - __env__ {url}: -+ - name: webinfo -+ - mountpoint: nowhere -+ """ -+ ) -+ self.assertEqual( -+ ret, -+ { -+ "branch": "master", -+ "motd": "The force will be with you. Always.", -+ "mylist": ["master"], -+ "mydict": { -+ "master": True, -+ "nested_list": ["master"], -+ "nested_dict": {"master": True}, -+ }, -+ }, -+ ) -+ - @slowTest - def test_root_parameter(self): - """ --- -2.29.2 - - diff --git a/fix-grains.test_core-unit-test-277.patch b/fix-grains.test_core-unit-test-277.patch deleted file mode 100644 index 8e9371b..0000000 --- a/fix-grains.test_core-unit-test-277.patch +++ /dev/null @@ -1,43 +0,0 @@ -From e2ff2f339ce7938ecdadf867f285a559bc2431dd Mon Sep 17 00:00:00 2001 -From: Dominik Gedon -Date: Tue, 6 Oct 2020 14:00:55 +0200 -Subject: [PATCH] Fix grains.test_core unit test (#277) - -This reverts 63b94ae and fixes the grains test_core unit test. The -changes are aligned with upstream. ---- - tests/unit/grains/test_core.py | 9 ++++----- - 1 file changed, 4 insertions(+), 5 deletions(-) - -diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 918a9155cb..15de4e363e 100644 ---- a/tests/unit/grains/test_core.py -+++ b/tests/unit/grains/test_core.py -@@ -60,11 +60,10 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - with salt.utils.files.fopen( - os.path.join(OS_RELEASE_DIR, "ubuntu-17.10") - ) as os_release_file: -- os_release_content = os_release_file.readlines() -- with patch("salt.utils.files.fopen", mock_open()) as os_release_file: -- os_release_file.return_value.__iter__.return_value = os_release_content -+ os_release_content = os_release_file.read() -+ with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)): - os_release = core._parse_os_release( -- ["/etc/os-release", "/usr/lib/os-release"] -+ "/etc/os-release", "/usr/lib/os-release" - ) - self.assertEqual( - os_release, -@@ -174,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - def test_missing_os_release(self): - with patch("salt.utils.files.fopen", mock_open(read_data={})): - os_release = core._parse_os_release( -- ["/etc/os-release", "/usr/lib/os-release"] -+ "/etc/os-release", "/usr/lib/os-release" - ) - self.assertEqual(os_release, {}) - --- -2.29.2 - - diff --git a/fix-ipv6-scope-bsc-1108557.patch b/fix-ipv6-scope-bsc-1108557.patch deleted file mode 100644 index 634cc49..0000000 --- a/fix-ipv6-scope-bsc-1108557.patch +++ /dev/null @@ -1,164 +0,0 @@ -From 082bb6a25b2b025a5c7c6fdbf7dbcbe64a39da2c Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Fri, 28 Sep 2018 15:22:33 +0200 -Subject: [PATCH] Fix IPv6 scope (bsc#1108557) - -Fix ipaddress imports - -Remove unused import - -Fix ipaddress import - -Fix unicode imports in compat - -Override standard IPv6Address class - -Check version via object - -Isolate Py2 and Py3 mode - -Add logging - -Add debugging to the ip_address method (py2 and py3) - -Remove multiple returns and add check for address syntax - -Remove unnecessary variable for import detection - -Remove duplicated code - -Remove unnecessary operator - -Remove multiple returns - -Use ternary operator instead - -Remove duplicated code - -Move docstrings to their native places - -Add real exception message - -Add logging to the ip_interface - -Add scope on str - -Lintfix: mute not called constructors - -Add extra detection for hexadecimal packed bytes on Python2. This cannot be detected with type comparison, because bytes == str and at the same time bytes != str if compatibility is not around - -Fix py2 case where the same class cannot initialise itself on Python2 via super. - -Simplify checking clause - -Do not use introspection for method swap - -Fix wrong type swap - -Add Py3.4 old implementation's fix - -Lintfix - -Lintfix refactor: remove duplicate returns as not needed - -Revert method remapping with pylint updates - -Remove unnecessary manipulation with IPv6 scope outside of the IPv6Address object instance - -Lintfix: W0611 - -Reverse skipping tests: if no ipaddress ---- - salt/_compat.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 74 insertions(+) - -diff --git a/salt/_compat.py b/salt/_compat.py -index 011eb8af9e..d9425523cf 100644 ---- a/salt/_compat.py -+++ b/salt/_compat.py -@@ -242,7 +242,81 @@ class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped): - self.hostmask = self.network.hostmask - - -+def ip_address(address): -+ """Take an IP string/int and return an object of the correct type. -+ -+ Args: -+ address: A string or integer, the IP address. Either IPv4 or -+ IPv6 addresses may be supplied; integers less than 2**32 will -+ be considered to be IPv4 by default. -+ -+ Returns: -+ An IPv4Address or IPv6Address object. -+ -+ Raises: -+ ValueError: if the *address* passed isn't either a v4 or a v6 -+ address -+ -+ """ -+ try: -+ return ipaddress.IPv4Address(address) -+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -+ log.debug('Error while parsing IPv4 address: %s', address) -+ log.debug(err) -+ -+ try: -+ return IPv6AddressScoped(address) -+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -+ log.debug('Error while parsing IPv6 address: %s', address) -+ log.debug(err) -+ -+ if isinstance(address, bytes): -+ raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. ' -+ 'Did you pass in a bytes (str in Python 2) instead ' -+ 'of a unicode object?'.format(repr(address))) -+ -+ raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address))) -+ -+ -+def ip_interface(address): -+ """Take an IP string/int and return an object of the correct type. -+ -+ Args: -+ address: A string or integer, the IP address. Either IPv4 or -+ IPv6 addresses may be supplied; integers less than 2**32 will -+ be considered to be IPv4 by default. -+ -+ Returns: -+ An IPv4Interface or IPv6Interface object. -+ -+ Raises: -+ ValueError: if the string passed isn't either a v4 or a v6 -+ address. -+ -+ Notes: -+ The IPv?Interface classes describe an Address on a particular -+ Network, so they're basically a combination of both the Address -+ and Network classes. -+ -+ """ -+ try: -+ return ipaddress.IPv4Interface(address) -+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -+ log.debug('Error while getting IPv4 interface for address %s', address) -+ log.debug(err) -+ -+ try: -+ return ipaddress.IPv6Interface(address) -+ except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -+ log.debug('Error while getting IPv6 interface for address %s', address) -+ log.debug(err) -+ -+ raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address)) -+ -+ - if ipaddress: - ipaddress.IPv6Address = IPv6AddressScoped - if sys.version_info.major == 2: - ipaddress.IPv6Interface = IPv6InterfaceScoped -+ ipaddress.ip_address = ip_address -+ ipaddress.ip_interface = ip_interface --- -2.29.2 - - diff --git a/fix-issue-parsing-errors-in-ansiblegate-state-module.patch b/fix-issue-parsing-errors-in-ansiblegate-state-module.patch deleted file mode 100644 index 78897c5..0000000 --- a/fix-issue-parsing-errors-in-ansiblegate-state-module.patch +++ /dev/null @@ -1,44 +0,0 @@ -From cc017f6ed279af7fe02c890e4a7725e6903f364d Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Mon, 26 Apr 2021 12:13:59 +0100 -Subject: [PATCH] Fix issue parsing errors in ansiblegate state module - ---- - salt/states/ansiblegate.py | 12 ++++++++++-- - 1 file changed, 10 insertions(+), 2 deletions(-) - -diff --git a/salt/states/ansiblegate.py b/salt/states/ansiblegate.py -index 5daba0f37f..bd00653928 100644 ---- a/salt/states/ansiblegate.py -+++ b/salt/states/ansiblegate.py -@@ -183,7 +183,11 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs= - checks = __salt__["ansible.playbooks"]( - name, rundir=rundir, check=True, diff=True, **ansible_kwargs - ) -- if all( -+ if "stats" not in checks: -+ ret["comment"] = checks.get("stderr", checks) -+ ret["result"] = False -+ ret["changes"] = {} -+ elif all( - not check["changed"] - and not check["failures"] - and not check["unreachable"] -@@ -212,7 +216,11 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs= - results = __salt__["ansible.playbooks"]( - name, rundir=rundir, diff=True, **ansible_kwargs - ) -- if all( -+ if "stats" not in results: -+ ret["comment"] = results.get("stderr", results) -+ ret["result"] = False -+ ret["changes"] = {} -+ elif all( - not check["changed"] - and not check["failures"] - and not check["unreachable"] --- -2.31.1 - - diff --git a/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch b/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch deleted file mode 100644 index 4f71f90..0000000 --- a/fix-memory-leak-produced-by-batch-async-find_jobs-me.patch +++ /dev/null @@ -1,189 +0,0 @@ -From 00c538383e463febba492e74577ae64be80d4d00 Mon Sep 17 00:00:00 2001 -From: Mihai Dinca -Date: Mon, 16 Sep 2019 11:27:30 +0200 -Subject: [PATCH] Fix memory leak produced by batch async find_jobs - mechanism (bsc#1140912) -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Multiple fixes: - -- use different JIDs per find_job -- fix bug in detection of find_job returns -- fix timeout passed from request payload -- better cleanup at the end of batching - -Co-authored-by: Pablo Suárez Hernández ---- - salt/cli/batch_async.py | 59 ++++++++++++++++++++++++++++------------- - salt/client/__init__.py | 1 + - salt/master.py | 2 -- - 3 files changed, 41 insertions(+), 21 deletions(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 7225491228..388b709416 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -73,6 +73,7 @@ class BatchAsync: - self.done_minions = set() - self.active = set() - self.initialized = False -+ self.jid_gen = jid_gen - self.ping_jid = jid_gen() - self.batch_jid = jid_gen() - self.find_job_jid = jid_gen() -@@ -91,14 +92,11 @@ class BatchAsync: - def __set_event_handler(self): - ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) - batch_return_pattern = "salt/job/{}/ret/*".format(self.batch_jid) -- find_job_return_pattern = "salt/job/{}/ret/*".format(self.find_job_jid) - self.event.subscribe(ping_return_pattern, match_type="glob") - self.event.subscribe(batch_return_pattern, match_type="glob") -- self.event.subscribe(find_job_return_pattern, match_type="glob") -- self.event.patterns = { -+ self.patterns = { - (ping_return_pattern, "ping_return"), - (batch_return_pattern, "batch_run"), -- (find_job_return_pattern, "find_job_return"), - } - self.event.set_event_handler(self.__event_handler) - -@@ -106,7 +104,7 @@ class BatchAsync: - if not self.event: - return - mtag, data = self.event.unpack(raw, self.event.serial) -- for (pattern, op) in self.event.patterns: -+ for (pattern, op) in self.patterns: - if fnmatch.fnmatch(mtag, pattern): - minion = data["id"] - if op == "ping_return": -@@ -114,7 +112,8 @@ class BatchAsync: - if self.targeted_minions == self.minions: - self.event.io_loop.spawn_callback(self.start_batch) - elif op == "find_job_return": -- self.find_job_returned.add(minion) -+ if data.get("return", None): -+ self.find_job_returned.add(minion) - elif op == "batch_run": - if minion in self.active: - self.active.remove(minion) -@@ -134,7 +133,11 @@ class BatchAsync: - return set(list(to_run)[:next_batch_size]) - - @tornado.gen.coroutine -- def check_find_job(self, batch_minions): -+ def check_find_job(self, batch_minions, jid): -+ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) -+ self.event.unsubscribe(find_job_return_pattern, match_type="glob") -+ self.patterns.remove((find_job_return_pattern, "find_job_return")) -+ - timedout_minions = batch_minions.difference(self.find_job_returned).difference( - self.done_minions - ) -@@ -143,27 +146,39 @@ class BatchAsync: - running = batch_minions.difference(self.done_minions).difference( - self.timedout_minions - ) -+ - if timedout_minions: - self.schedule_next() -+ - if running: -+ self.find_job_returned = self.find_job_returned.difference(running) - self.event.io_loop.add_callback(self.find_job, running) - - @tornado.gen.coroutine - def find_job(self, minions): -- not_done = minions.difference(self.done_minions) -- ping_return = yield self.local.run_job_async( -- not_done, -- "saltutil.find_job", -- [self.batch_jid], -- "list", -- gather_job_timeout=self.opts["gather_job_timeout"], -- jid=self.find_job_jid, -- **self.eauth -- ) -- self.event.io_loop.call_later( -- self.opts["gather_job_timeout"], self.check_find_job, not_done -+ not_done = minions.difference(self.done_minions).difference( -+ self.timedout_minions - ) - -+ if not_done: -+ jid = self.jid_gen() -+ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) -+ self.patterns.add((find_job_return_pattern, "find_job_return")) -+ self.event.subscribe(find_job_return_pattern, match_type="glob") -+ -+ ret = yield self.local.run_job_async( -+ not_done, -+ "saltutil.find_job", -+ [self.batch_jid], -+ "list", -+ gather_job_timeout=self.opts["gather_job_timeout"], -+ jid=jid, -+ **self.eauth -+ ) -+ self.event.io_loop.call_later( -+ self.opts["gather_job_timeout"], self.check_find_job, not_done, jid -+ ) -+ - @tornado.gen.coroutine - def start(self): - self.__set_event_handler() -@@ -211,6 +226,9 @@ class BatchAsync: - } - self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) - self.event.remove_event_handler(self.__event_handler) -+ for (pattern, label) in self.patterns: -+ if label in ["ping_return", "batch_run"]: -+ self.event.unsubscribe(pattern, match_type="glob") - - def schedule_next(self): - if not self.scheduled: -@@ -235,11 +253,14 @@ class BatchAsync: - jid=self.batch_jid, - metadata=self.metadata, - ) -+ - self.event.io_loop.call_later( - self.opts["timeout"], self.find_job, set(next_batch) - ) - except Exception as ex: -+ log.error("Error in scheduling next batch: %s", ex) - self.active = self.active.difference(next_batch) - else: - self.end_batch() - self.scheduled = False -+ yield -diff --git a/salt/client/__init__.py b/salt/client/__init__.py -index 1e9f11df4c..cc8fd4048d 100644 ---- a/salt/client/__init__.py -+++ b/salt/client/__init__.py -@@ -1776,6 +1776,7 @@ class LocalClient: - "key": self.key, - "tgt_type": tgt_type, - "ret": ret, -+ "timeout": timeout, - "jid": jid, - } - -diff --git a/salt/master.py b/salt/master.py -index b9bc1a7a67..7a99af357a 100644 ---- a/salt/master.py -+++ b/salt/master.py -@@ -2232,8 +2232,6 @@ class ClearFuncs(TransportMethods): - def publish_batch(self, clear_load, minions, missing): - batch_load = {} - batch_load.update(clear_load) -- import salt.cli.batch_async -- - batch = salt.cli.batch_async.BatchAsync( - self.local.opts, - functools.partial(self._prep_jid, clear_load, {}), --- -2.29.2 - - diff --git a/fix-novendorchange-option-284.patch b/fix-novendorchange-option-284.patch deleted file mode 100644 index 5c3178a..0000000 --- a/fix-novendorchange-option-284.patch +++ /dev/null @@ -1,191 +0,0 @@ -From 4123cf7b9428af1442f4aa0a54489e5c0deb4aaa Mon Sep 17 00:00:00 2001 -From: Martin Seidl -Date: Tue, 27 Oct 2020 16:12:29 +0100 -Subject: [PATCH] Fix novendorchange option (#284) - -* Fixed novendorchange handling in zypperpkg - -* refactor handling of novendorchange and fix tests ---- - salt/modules/zypperpkg.py | 21 +++--- - tests/unit/modules/test_zypperpkg.py | 100 ++++++++++++++++++++++++++- - 2 files changed, 108 insertions(+), 13 deletions(-) - -diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index 5369a0342e..d06c265202 100644 ---- a/salt/modules/zypperpkg.py -+++ b/salt/modules/zypperpkg.py -@@ -1707,7 +1707,7 @@ def upgrade( - dryrun=False, - dist_upgrade=False, - fromrepo=None, -- novendorchange=False, -+ novendorchange=True, - skip_verify=False, - no_recommends=False, - root=None, -@@ -1794,19 +1794,18 @@ def upgrade( - log.info("Targeting repos: %s", fromrepo) - - if dist_upgrade: -- if novendorchange: -- # TODO: Grains validation should be moved to Zypper class -- if __grains__["osrelease_info"][0] > 11: -+ # TODO: Grains validation should be moved to Zypper class -+ if __grains__["osrelease_info"][0] > 11: -+ if novendorchange: - cmd_update.append("--no-allow-vendor-change") - log.info("Disabling vendor changes") - else: -- log.warning( -- "Disabling vendor changes is not supported on this Zypper version" -- ) -- -- if no_recommends: -- cmd_update.append("--no-recommends") -- log.info("Disabling recommendations") -+ cmd_update.append("--allow-vendor-change") -+ log.info("Enabling vendor changes") -+ else: -+ log.warning( -+ "Enabling/Disabling vendor changes is not supported on this Zypper version" -+ ) - - if no_recommends: - cmd_update.append("--no-recommends") -diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py -index a60b1546c6..eaa4d9a76a 100644 ---- a/tests/unit/modules/test_zypperpkg.py -+++ b/tests/unit/modules/test_zypperpkg.py -@@ -642,7 +642,9 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - ret = zypper.upgrade(dist_upgrade=True) - self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}}) - zypper_mock.assert_any_call( -- "dist-upgrade", "--auto-agree-with-licenses" -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--no-allow-vendor-change", - ) - - with patch( -@@ -660,6 +662,76 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - "--debug-solver", - ) - -+ with patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -+ ): -+ ret = zypper.upgrade( -+ dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False -+ ) -+ zypper_mock.assert_any_call( -+ "update", -+ "--auto-agree-with-licenses", -+ "--repo", -+ "Dummy", -+ "--repo", -+ "Dummy2", -+ ) -+ -+ with patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]), -+ ): -+ ret = zypper.upgrade( -+ dist_upgrade=True, -+ fromrepo=["Dummy", "Dummy2"], -+ novendorchange=True, -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--no-allow-vendor-change", -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--no-allow-vendor-change", -+ ) -+ -+ with patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -+ ): -+ ret = zypper.upgrade( -+ dist_upgrade=True, -+ dryrun=True, -+ fromrepo=["Dummy", "Dummy2"], -+ novendorchange=False, -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--allow-vendor-change", -+ ) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--dry-run", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--allow-vendor-change", -+ "--debug-solver", -+ ) -+ - with patch( - "salt.modules.zypperpkg.list_pkgs", - MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]), -@@ -728,6 +800,26 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin): - "--no-allow-vendor-change", - ) - -+ with patch( -+ "salt.modules.zypperpkg.list_pkgs", -+ MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]), -+ ): -+ ret = zypper.upgrade( -+ dist_upgrade=True, -+ fromrepo=["Dummy", "Dummy2"], -+ novendorchange=False, -+ ) -+ self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}}) -+ zypper_mock.assert_any_call( -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--from", -+ "Dummy", -+ "--from", -+ "Dummy2", -+ "--allow-vendor-change", -+ ) -+ - def test_upgrade_kernel(self): - """ - Test kernel package upgrade success. -@@ -815,7 +907,11 @@ Repository 'DUMMY' not found by its alias, number, or URI. - self.assertEqual(cmd_exc.exception.info["changes"], {}) - self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out) - zypper_mock.noraise.call.assert_called_with( -- "dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY" -+ "dist-upgrade", -+ "--auto-agree-with-licenses", -+ "--from", -+ "DUMMY", -+ "--no-allow-vendor-change", - ) - - def test_upgrade_available(self): --- -2.29.2 - - diff --git a/fix-onlyif-unless-when-multiple-conditions-bsc-11808.patch b/fix-onlyif-unless-when-multiple-conditions-bsc-11808.patch deleted file mode 100644 index 9bc9d4b..0000000 --- a/fix-onlyif-unless-when-multiple-conditions-bsc-11808.patch +++ /dev/null @@ -1,287 +0,0 @@ -From 828ca76e2083d87ace12b488277e51d4e30c8c9a Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Thu, 21 Jan 2021 11:19:07 +0000 -Subject: [PATCH] Fix onlyif/unless when multiple conditions - (bsc#1180818) - -Add unit tests to ensure right onlyif/unless behavior - -Add extra unit test to cover valid cases - -Add unit tests cases to ensure proper onlyif/unless behavior - -Change tests to use 'exit' cmd and work outside Linux ---- - salt/state.py | 20 ++++-- - tests/unit/test_state.py | 148 ++++++++++++++++++++++++++++++++++++++- - 2 files changed, 163 insertions(+), 5 deletions(-) - -diff --git a/salt/state.py b/salt/state.py -index cc6db7e1b2..070a914636 100644 ---- a/salt/state.py -+++ b/salt/state.py -@@ -947,8 +947,10 @@ class State: - "result": True, - } - ) -+ return False - elif cmd == 0: - ret.update({"comment": "onlyif condition is true", "result": False}) -+ return True - - for entry in low_data_onlyif: - if isinstance(entry, str): -@@ -960,7 +962,8 @@ class State: - # Command failed, notify onlyif to skip running the item - cmd = 100 - log.debug("Last command return code: %s", cmd) -- _check_cmd(cmd) -+ if not _check_cmd(cmd): -+ return ret - elif isinstance(entry, dict): - if "fun" not in entry: - ret["comment"] = "no `fun` argument in onlyif: {}".format(entry) -@@ -972,7 +975,8 @@ class State: - if get_return: - result = salt.utils.data.traverse_dict_and_list(result, get_return) - if self.state_con.get("retcode", 0): -- _check_cmd(self.state_con["retcode"]) -+ if not _check_cmd(self.state_con["retcode"]): -+ return ret - elif not result: - ret.update( - { -@@ -981,6 +985,7 @@ class State: - "result": True, - } - ) -+ return ret - else: - ret.update({"comment": "onlyif condition is true", "result": False}) - -@@ -991,6 +996,7 @@ class State: - "result": False, - } - ) -+ return ret - return ret - - def _run_check_unless(self, low_data, cmd_opts): -@@ -1013,8 +1019,10 @@ class State: - "result": True, - } - ) -+ return False - elif cmd != 0: - ret.update({"comment": "unless condition is false", "result": False}) -+ return True - - for entry in low_data_unless: - if isinstance(entry, str): -@@ -1026,7 +1034,8 @@ class State: - except CommandExecutionError: - # Command failed, so notify unless to skip the item - cmd = 0 -- _check_cmd(cmd) -+ if not _check_cmd(cmd): -+ return ret - elif isinstance(entry, dict): - if "fun" not in entry: - ret["comment"] = "no `fun` argument in unless: {}".format(entry) -@@ -1038,7 +1047,8 @@ class State: - if get_return: - result = salt.utils.data.traverse_dict_and_list(result, get_return) - if self.state_con.get("retcode", 0): -- _check_cmd(self.state_con["retcode"]) -+ if not _check_cmd(self.state_con["retcode"]): -+ return ret - elif result: - ret.update( - { -@@ -1047,6 +1057,7 @@ class State: - "result": True, - } - ) -+ return ret - else: - ret.update( - {"comment": "unless condition is false", "result": False} -@@ -1058,6 +1069,7 @@ class State: - "result": False, - } - ) -+ return ret - - # No reason to stop, return ret - return ret -diff --git a/tests/unit/test_state.py b/tests/unit/test_state.py -index b1bcf8fe83..95018a9cf3 100644 ---- a/tests/unit/test_state.py -+++ b/tests/unit/test_state.py -@@ -205,6 +205,152 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - ) - self.assertEqual(expected_result, return_result) - -+ def test_verify_unless_list_cmd(self): -+ low_data = { -+ "state": "cmd", -+ "name": 'echo "something"', -+ "__sls__": "tests.cmd", -+ "__env__": "base", -+ "__id__": "check unless", -+ "unless": ["exit 0", "exit 1"], -+ "order": 10001, -+ "fun": "run", -+ } -+ expected_result = { -+ "comment": "unless condition is true", -+ "result": True, -+ "skip_watch": True, -+ } -+ with patch("salt.state.State._gather_pillar") as state_patch: -+ minion_opts = self.get_temp_config("minion") -+ state_obj = salt.state.State(minion_opts) -+ return_result = state_obj._run_check_unless(low_data, {}) -+ self.assertEqual(expected_result, return_result) -+ -+ def test_verify_unless_list_cmd_different_order(self): -+ low_data = { -+ "state": "cmd", -+ "name": 'echo "something"', -+ "__sls__": "tests.cmd", -+ "__env__": "base", -+ "__id__": "check unless", -+ "unless": ["exit 1", "exit 0"], -+ "order": 10001, -+ "fun": "run", -+ } -+ expected_result = { -+ "comment": "unless condition is true", -+ "result": True, -+ "skip_watch": True, -+ } -+ with patch("salt.state.State._gather_pillar") as state_patch: -+ minion_opts = self.get_temp_config("minion") -+ state_obj = salt.state.State(minion_opts) -+ return_result = state_obj._run_check_unless(low_data, {}) -+ self.assertEqual(expected_result, return_result) -+ -+ def test_verify_onlyif_list_cmd_different_order(self): -+ low_data = { -+ "state": "cmd", -+ "name": 'echo "something"', -+ "__sls__": "tests.cmd", -+ "__env__": "base", -+ "__id__": "check onlyif", -+ "onlyif": ["exit 1", "exit 0"], -+ "order": 10001, -+ "fun": "run", -+ } -+ expected_result = { -+ "comment": "onlyif condition is false", -+ "result": True, -+ "skip_watch": True, -+ } -+ with patch("salt.state.State._gather_pillar") as state_patch: -+ minion_opts = self.get_temp_config("minion") -+ state_obj = salt.state.State(minion_opts) -+ return_result = state_obj._run_check_onlyif(low_data, {}) -+ self.assertEqual(expected_result, return_result) -+ -+ def test_verify_unless_list_cmd_valid(self): -+ low_data = { -+ "state": "cmd", -+ "name": 'echo "something"', -+ "__sls__": "tests.cmd", -+ "__env__": "base", -+ "__id__": "check unless", -+ "unless": ["exit 1", "exit 1"], -+ "order": 10001, -+ "fun": "run", -+ } -+ expected_result = {"comment": "unless condition is false", "result": False} -+ with patch("salt.state.State._gather_pillar") as state_patch: -+ minion_opts = self.get_temp_config("minion") -+ state_obj = salt.state.State(minion_opts) -+ return_result = state_obj._run_check_unless(low_data, {}) -+ self.assertEqual(expected_result, return_result) -+ -+ def test_verify_onlyif_list_cmd_valid(self): -+ low_data = { -+ "state": "cmd", -+ "name": 'echo "something"', -+ "__sls__": "tests.cmd", -+ "__env__": "base", -+ "__id__": "check onlyif", -+ "onlyif": ["exit 0", "exit 0"], -+ "order": 10001, -+ "fun": "run", -+ } -+ expected_result = {"comment": "onlyif condition is true", "result": False} -+ with patch("salt.state.State._gather_pillar") as state_patch: -+ minion_opts = self.get_temp_config("minion") -+ state_obj = salt.state.State(minion_opts) -+ return_result = state_obj._run_check_onlyif(low_data, {}) -+ self.assertEqual(expected_result, return_result) -+ -+ def test_verify_unless_list_cmd_invalid(self): -+ low_data = { -+ "state": "cmd", -+ "name": 'echo "something"', -+ "__sls__": "tests.cmd", -+ "__env__": "base", -+ "__id__": "check unless", -+ "unless": ["exit 0", "exit 0"], -+ "order": 10001, -+ "fun": "run", -+ } -+ expected_result = { -+ "comment": "unless condition is true", -+ "result": True, -+ "skip_watch": True, -+ } -+ with patch("salt.state.State._gather_pillar") as state_patch: -+ minion_opts = self.get_temp_config("minion") -+ state_obj = salt.state.State(minion_opts) -+ return_result = state_obj._run_check_unless(low_data, {}) -+ self.assertEqual(expected_result, return_result) -+ -+ def test_verify_onlyif_list_cmd_invalid(self): -+ low_data = { -+ "state": "cmd", -+ "name": 'echo "something"', -+ "__sls__": "tests.cmd", -+ "__env__": "base", -+ "__id__": "check onlyif", -+ "onlyif": ["exit 1", "exit 1"], -+ "order": 10001, -+ "fun": "run", -+ } -+ expected_result = { -+ "comment": "onlyif condition is false", -+ "result": True, -+ "skip_watch": True, -+ } -+ with patch("salt.state.State._gather_pillar") as state_patch: -+ minion_opts = self.get_temp_config("minion") -+ state_obj = salt.state.State(minion_opts) -+ return_result = state_obj._run_check_onlyif(low_data, {}) -+ self.assertEqual(expected_result, return_result) -+ - def test_verify_unless_parse(self): - low_data = { - "unless": [{"fun": "test.arg", "args": ["arg1", "arg2"]}], -@@ -376,7 +522,7 @@ class StateCompilerTestCase(TestCase, AdaptedConfigurationTestCaseMixin): - "__sls__": "tests.cmd", - "__env__": "base", - "__id__": "check onlyif", -- "onlyif": ["/bin/true", "/bin/false"], -+ "onlyif": ["exit 0", "exit 1"], - "order": 10001, - "fun": "run", - } --- -2.29.2 - - diff --git a/fix-regression-on-cmd.run-when-passing-tuples-as-cmd.patch b/fix-regression-on-cmd.run-when-passing-tuples-as-cmd.patch deleted file mode 100644 index 802e424..0000000 --- a/fix-regression-on-cmd.run-when-passing-tuples-as-cmd.patch +++ /dev/null @@ -1,29 +0,0 @@ -From d8538a57553d94290870671db1d5a4fcd4d7e709 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Fri, 26 Feb 2021 09:15:03 +0000 -Subject: [PATCH] Fix regression on cmd.run when passing tuples as cmd - (bsc#1182740) - -(cherry picked from commit 9a76246adedb60e24a75682077654a352a965cb9) ---- - salt/modules/cmdmod.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py -index bbc303c3f8..f24e7cc9ae 100644 ---- a/salt/modules/cmdmod.py -+++ b/salt/modules/cmdmod.py -@@ -78,7 +78,7 @@ def __virtual__(): - - - def _log_cmd(cmd): -- if not isinstance(cmd, list): -+ if isinstance(cmd, str): - return cmd.split()[0].strip() - return cmd[0].strip() - --- -2.30.1 - - diff --git a/fix-the-removed-six.itermitems-and-six.-_type-262.patch b/fix-the-removed-six.itermitems-and-six.-_type-262.patch deleted file mode 100644 index 9781d37..0000000 --- a/fix-the-removed-six.itermitems-and-six.-_type-262.patch +++ /dev/null @@ -1,180 +0,0 @@ -From 01e2e60a5aba609d219b73f1018f12517a294a64 Mon Sep 17 00:00:00 2001 -From: Cedric Bosdonnat -Date: Tue, 15 Sep 2020 13:46:06 +0200 -Subject: [PATCH] Fix the removed six.itermitems and six.*_type* (#262) - -* Fix the removed six.itermitems and six.*_type* - -Upstream py2 to py3 cleanup tool removes a bunch of six calls that we -still need when backporting since our Salt minion might still be running -on python 2.7. - -* fixup! Fix the removed six.itermitems and six.*_type* ---- - salt/_compat.py | 25 ++++++++++++++++--------- - salt/modules/virt.py | 11 ++++------- - salt/states/virt.py | 1 + - salt/utils/xmlutil.py | 3 ++- - tests/unit/modules/test_virt.py | 2 +- - 5 files changed, 24 insertions(+), 18 deletions(-) - -diff --git a/salt/_compat.py b/salt/_compat.py -index d9425523cf..de100de3fa 100644 ---- a/salt/_compat.py -+++ b/salt/_compat.py -@@ -7,6 +7,7 @@ Salt compatibility code - import binascii - import logging - import sys -+import xml.sax.saxutils as saxutils - - from salt.exceptions import SaltException - from salt.ext.six import binary_type, integer_types, string_types, text_type -@@ -261,21 +262,25 @@ def ip_address(address): - try: - return ipaddress.IPv4Address(address) - except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -- log.debug('Error while parsing IPv4 address: %s', address) -+ log.debug("Error while parsing IPv4 address: %s", address) - log.debug(err) - - try: - return IPv6AddressScoped(address) - except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -- log.debug('Error while parsing IPv6 address: %s', address) -+ log.debug("Error while parsing IPv6 address: %s", address) - log.debug(err) - - if isinstance(address, bytes): -- raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. ' -- 'Did you pass in a bytes (str in Python 2) instead ' -- 'of a unicode object?'.format(repr(address))) -+ raise ipaddress.AddressValueError( -+ "{} does not appear to be an IPv4 or IPv6 address. " -+ "Did you pass in a bytes (str in Python 2) instead " -+ "of a unicode object?".format(repr(address)) -+ ) - -- raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address))) -+ raise ValueError( -+ "{} does not appear to be an IPv4 or IPv6 address".format(repr(address)) -+ ) - - - def ip_interface(address): -@@ -302,16 +307,18 @@ def ip_interface(address): - try: - return ipaddress.IPv4Interface(address) - except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -- log.debug('Error while getting IPv4 interface for address %s', address) -+ log.debug("Error while getting IPv4 interface for address %s", address) - log.debug(err) - - try: - return ipaddress.IPv6Interface(address) - except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err: -- log.debug('Error while getting IPv6 interface for address %s', address) -+ log.debug("Error while getting IPv6 interface for address %s", address) - log.debug(err) - -- raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address)) -+ raise ValueError( -+ "{} does not appear to be an IPv4 or IPv6 interface".format(address) -+ ) - - - if ipaddress: -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index ec40f08359..c042738370 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -88,8 +88,6 @@ import string # pylint: disable=deprecated-module - import subprocess - import sys - import time --from xml.etree import ElementTree --from xml.sax import saxutils - - import jinja2.exceptions - import salt.utils.files -@@ -99,8 +97,9 @@ import salt.utils.stringutils - import salt.utils.templates - import salt.utils.xmlutil as xmlutil - import salt.utils.yaml --from salt._compat import ipaddress -+from salt._compat import ElementTree, ipaddress, saxutils - from salt.exceptions import CommandExecutionError, SaltInvocationError -+from salt.ext import six - from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin - from salt.ext.six.moves.urllib.parse import urlparse, urlunparse - from salt.utils.virt import check_remote, download_remote -@@ -1516,7 +1515,7 @@ def _handle_remote_boot_params(orig_boot): - """ - saltinst_dir = None - new_boot = orig_boot.copy() -- keys = orig_boot.keys() -+ keys = set(orig_boot.keys()) - cases = [ - {"efi"}, - {"kernel", "initrd", "efi"}, -@@ -2559,9 +2558,7 @@ def update( - - # Attaching device - if source_file: -- ElementTree.SubElement( -- updated_disk, "source", attrib={"file": source_file} -- ) -+ ElementTree.SubElement(updated_disk, "source", file=source_file) - - changes["disk"]["new"] = new_disks - -diff --git a/salt/states/virt.py b/salt/states/virt.py -index b45cf72ed3..df7ebb63e6 100644 ---- a/salt/states/virt.py -+++ b/salt/states/virt.py -@@ -22,6 +22,7 @@ import salt.utils.files - import salt.utils.stringutils - import salt.utils.versions - from salt.exceptions import CommandExecutionError, SaltInvocationError -+from salt.ext import six - - try: - import libvirt # pylint: disable=import-error -diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py -index b9f047820b..111ca155d4 100644 ---- a/salt/utils/xmlutil.py -+++ b/salt/utils/xmlutil.py -@@ -7,6 +7,7 @@ import string # pylint: disable=deprecated-module - from xml.etree import ElementTree - - import salt.utils.data -+from salt.ext import six - - - def _conv_name(x): -@@ -160,7 +161,7 @@ def clean_node(parent_map, node, ignored=None): - has_text = node.text is not None and node.text.strip() - parent = parent_map.get(node) - if ( -- len(node.attrib.keys() - (ignored or [])) == 0 -+ len(set(node.attrib.keys()) - set(ignored or [])) == 0 - and not list(node) - and not has_text - ): -diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index 4775fec31f..4a4c0395a7 100644 ---- a/tests/unit/modules/test_virt.py -+++ b/tests/unit/modules/test_virt.py -@@ -45,7 +45,7 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors - """ - - def __init__(self, msg): -- super().__init__(msg) -+ super(Exception, self).__init__(msg) - self.msg = msg - - def get_error_message(self): --- -2.29.2 - - diff --git a/fix-unit-test-for-grains-core.patch b/fix-unit-test-for-grains-core.patch deleted file mode 100644 index 95364fd..0000000 --- a/fix-unit-test-for-grains-core.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 192bac1ae2f20b098384264c8802034a340cd124 Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Thu, 11 Oct 2018 16:20:40 +0200 -Subject: [PATCH] Fix unit test for grains core - ---- - tests/unit/grains/test_core.py | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) - -diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 34aaa4f5bc..7dbf34deac 100644 ---- a/tests/unit/grains/test_core.py -+++ b/tests/unit/grains/test_core.py -@@ -59,10 +59,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - with salt.utils.files.fopen( - os.path.join(OS_RELEASE_DIR, "ubuntu-17.10") - ) as os_release_file: -- os_release_content = os_release_file.read() -- with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)): -+ os_release_content = os_release_file.readlines() -+ with patch("salt.utils.files.fopen", mock_open()) as os_release_file: -+ os_release_file.return_value.__iter__.return_value = os_release_content - os_release = core._parse_os_release( -- "/etc/os-release", "/usr/lib/os-release" -+ ["/etc/os-release", "/usr/lib/os-release"] - ) - self.assertEqual( - os_release, -@@ -172,7 +173,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - def test_missing_os_release(self): - with patch("salt.utils.files.fopen", mock_open(read_data={})): - os_release = core._parse_os_release( -- "/etc/os-release", "/usr/lib/os-release" -+ ["/etc/os-release", "/usr/lib/os-release"] - ) - self.assertEqual(os_release, {}) - --- -2.29.2 - - diff --git a/fix-unit-tests-for-batch-async-after-refactor.patch b/fix-unit-tests-for-batch-async-after-refactor.patch deleted file mode 100644 index c3d6b36..0000000 --- a/fix-unit-tests-for-batch-async-after-refactor.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 09a871c197be4933475ee4582755d9b0cb5a700e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Wed, 4 Mar 2020 10:13:43 +0000 -Subject: [PATCH] Fix unit tests for batch async after refactor - ---- - tests/unit/cli/test_batch_async.py | 20 +++++++++++++++++++- - 1 file changed, 19 insertions(+), 1 deletion(-) - -diff --git a/tests/unit/cli/test_batch_async.py b/tests/unit/cli/test_batch_async.py -index b04965268a..dcee9a87bd 100644 ---- a/tests/unit/cli/test_batch_async.py -+++ b/tests/unit/cli/test_batch_async.py -@@ -120,9 +120,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - self.batch.timedout_minions = {"bar"} - self.batch.event = MagicMock() - self.batch.metadata = {"mykey": "myvalue"} -+ old_event = self.batch.event - self.batch.end_batch() - self.assertEqual( -- self.batch.event.fire_event.call_args[0], -+ old_event.fire_event.call_args[0], - ( - { - "available_minions": {"foo", "bar"}, -@@ -140,6 +141,23 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase): - event = MagicMock() - batch.event = event - batch.__del__() -+ self.assertEqual(batch.local, None) -+ self.assertEqual(batch.event, None) -+ self.assertEqual(batch.ioloop, None) -+ -+ def test_batch_close_safe(self): -+ batch = BatchAsync(MagicMock(), MagicMock(), MagicMock()) -+ event = MagicMock() -+ batch.event = event -+ batch.patterns = { -+ ("salt/job/1234/ret/*", "find_job_return"), -+ ("salt/job/4321/ret/*", "find_job_return"), -+ } -+ batch.close_safe() -+ self.assertEqual(batch.local, None) -+ self.assertEqual(batch.event, None) -+ self.assertEqual(batch.ioloop, None) -+ self.assertEqual(len(event.unsubscribe.mock_calls), 2) - self.assertEqual(len(event.remove_event_handler.mock_calls), 1) - - @tornado.testing.gen_test --- -2.29.2 - - diff --git a/fix-virt.update-with-cpu-defined-263.patch b/fix-virt.update-with-cpu-defined-263.patch deleted file mode 100644 index 450d941..0000000 --- a/fix-virt.update-with-cpu-defined-263.patch +++ /dev/null @@ -1,31 +0,0 @@ -From c05d571058b9520dbaf4aba3de001b1aefe8e2c2 Mon Sep 17 00:00:00 2001 -From: Cedric Bosdonnat -Date: Tue, 15 Sep 2020 16:03:30 +0200 -Subject: [PATCH] Fix virt.update with cpu defined (#263) - -In case the cpu was defined the rest of the definition wasn't completely -updated. ---- - salt/modules/virt.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index c042738370..c1a73fcb7f 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -2441,9 +2441,9 @@ def update( - data = {k: v for k, v in locals().items() if bool(v)} - if boot_dev: - data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} -- need_update = need_update or salt.utils.xmlutil.change_xml( -+ need_update = salt.utils.xmlutil.change_xml( - desc, data, params_mapping -- ) -+ ) or need_update - - # Update the XML definition with the new disks and diff changes - devices_node = desc.find("devices") --- -2.29.2 - - diff --git a/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch b/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch index 8670aa3..9e4c705 100644 --- a/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch +++ b/fix-wrong-test_mod_del_repo_multiline_values-test-af.patch @@ -1,35 +1,35 @@ -From f3ac041e34952a4b753e4afc9dc4b6adaa1d0ff2 Mon Sep 17 00:00:00 2001 +From e3ef9165b66c3d74a3c3dbfe82ba58f7fa1613e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Thu, 12 Mar 2020 13:26:51 +0000 -Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test - after rebase +Subject: [PATCH] Fix wrong test_mod_del_repo_multiline_values test after + rebase --- - tests/integration/modules/test_pkg.py | 63 ++++----------------------- - 1 file changed, 8 insertions(+), 55 deletions(-) + tests/integration/modules/test_pkg.py | 34 +++++++++++++++++++++------ + 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py -index 3ece73074b..933755a9ec 100644 +index ccf69998fc..6a84ea0bc3 100644 --- a/tests/integration/modules/test_pkg.py +++ b/tests/integration/modules/test_pkg.py -@@ -143,6 +143,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): +@@ -138,6 +138,10 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): self.run_function("pkg.del_repo", [repo]) - @slowTest -+ @destructiveTest -+ @requires_salt_modules("pkg.mod_repo", "pkg.del_repo", "pkg.get_repo") -+ @requires_network() + @pytest.mark.slow_test ++ @pytest.mark.destructive_test ++ @pytest.mark.requires_salt_modules("pkg.mod_repo", "pkg.del_repo", "pkg.get_repo") ++ @pytest.mark.requires_network() + @requires_system_grains def test_mod_del_repo_multiline_values(self): """ test modifying and deleting a software repository defined with multiline values -@@ -150,10 +154,13 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): +@@ -145,10 +149,13 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): os_grain = self.run_function("grains.item", ["os"])["os"] repo = None try: -- if os_grain in ["CentOS", "RedHat"]: -+ if os_grain in ["CentOS", "RedHat", "SUSE"]: +- if os_grain in ["CentOS", "RedHat", "VMware Photon OS"]: ++ if os_grain in ["CentOS", "RedHat", "VMware Photon OS", "SUSE"]: my_baseurl = ( "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/" ) @@ -39,68 +39,44 @@ index 3ece73074b..933755a9ec 100644 expected_get_repo_baseurl = ( "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/" ) -@@ -207,60 +214,6 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): - if repo is not None: - self.run_function("pkg.del_repo", [repo]) - -- def test_mod_del_repo_multiline_values(self): -- """ -- test modifying and deleting a software repository defined with multiline values -- """ -- os_grain = self.run_function("grains.item", ["os"])["os"] -- repo = None -- try: -- if os_grain in ["CentOS", "RedHat", "SUSE"]: -- my_baseurl = ( -- "http://my.fake.repo/foo/bar/\n http://my.fake.repo.alt/foo/bar/" -- ) -- expected_get_repo_baseurl_zypp = ( -- "http://my.fake.repo/foo/bar/%0A%20http://my.fake.repo.alt/foo/bar/" -- ) -- expected_get_repo_baseurl = ( -- "http://my.fake.repo/foo/bar/\nhttp://my.fake.repo.alt/foo/bar/" -- ) -- major_release = int( -- self.run_function("grains.item", ["osmajorrelease"])[ -- "osmajorrelease" -- ] -- ) -- repo = "fakerepo" -- name = "Fake repo for RHEL/CentOS/SUSE" -- baseurl = my_baseurl -- gpgkey = "https://my.fake.repo/foo/bar/MY-GPG-KEY.pub" -- failovermethod = "priority" -- gpgcheck = 1 -- enabled = 1 -- ret = self.run_function( -- "pkg.mod_repo", -- [repo], -- name=name, -- baseurl=baseurl, -- gpgkey=gpgkey, -- gpgcheck=gpgcheck, -- enabled=enabled, -- failovermethod=failovermethod, -- ) +@@ -174,17 +181,30 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): + enabled=enabled, + failovermethod=failovermethod, + ) - # return data from pkg.mod_repo contains the file modified at - # the top level, so use next(iter(ret)) to get that key -- self.assertNotEqual(ret, {}) + self.assertNotEqual(ret, {}) - repo_info = ret[next(iter(ret))] -- self.assertIn(repo, repo_info) ++ repo_info = {repo: ret} + self.assertIn(repo, repo_info) - self.assertEqual(repo_info[repo]["baseurl"], my_baseurl) -- ret = self.run_function("pkg.get_repo", [repo]) ++ if os_grain == "SUSE": ++ self.assertEqual( ++ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp ++ ) ++ else: ++ self.assertEqual(repo_info[repo]["baseurl"], my_baseurl) + ret = self.run_function("pkg.get_repo", [repo]) - self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) -- self.run_function("pkg.mod_repo", [repo]) -- ret = self.run_function("pkg.get_repo", [repo]) ++ if os_grain == "SUSE": ++ self.assertEqual( ++ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp ++ ) ++ else: ++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) + self.run_function("pkg.mod_repo", [repo]) + ret = self.run_function("pkg.get_repo", [repo]) - self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) -- finally: -- if repo is not None: -- self.run_function("pkg.del_repo", [repo]) -- - @requires_salt_modules("pkg.owner") - def test_owner(self): - """ ++ if os_grain == "SUSE": ++ self.assertEqual( ++ repo_info[repo]["baseurl"], expected_get_repo_baseurl_zypp ++ ) ++ else: ++ self.assertEqual(ret["baseurl"], expected_get_repo_baseurl) + finally: + if repo is not None: + self.run_function("pkg.del_repo", [repo]) -- -2.29.2 +2.33.0 diff --git a/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch b/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch deleted file mode 100644 index abac036..0000000 --- a/fix-zypper-pkg.list_pkgs-expectation-and-dpkg-mockin.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 81f38c8cb16634b2c86b3e1e7c745870f90771d0 Mon Sep 17 00:00:00 2001 -From: Mihai Dinca -Date: Thu, 13 Jun 2019 17:48:55 +0200 -Subject: [PATCH] Fix zypper pkg.list_pkgs expectation and dpkg mocking - ---- - tests/unit/modules/test_dpkg_lowpkg.py | 22 ++++++++++++++++------ - 1 file changed, 16 insertions(+), 6 deletions(-) - -diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py -index 160bbcd5b1..dadbc30dfa 100644 ---- a/tests/unit/modules/test_dpkg_lowpkg.py -+++ b/tests/unit/modules/test_dpkg_lowpkg.py -@@ -308,9 +308,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): - dpkg.bin_pkg_info("package.deb")["name"], "package_name" - ) - -- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg)) -- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) -- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3")) -+ @patch( -+ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail", -+ MagicMock(return_value=dselect_pkg), -+ ) -+ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) -+ @patch( -+ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3") -+ ) - def test_info(self): - """ - Test info -@@ -359,9 +364,14 @@ class DpkgTestCase(TestCase, LoaderModuleMockMixin): - ) - assert pkg_data["license"] == "BSD v3" - -- @patch("salt.modules.dpkg._get_pkg_ds_avail", MagicMock(return_value=dselect_pkg)) -- @patch("salt.modules.dpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) -- @patch("salt.modules.dpkg._get_pkg_license", MagicMock(return_value="BSD v3")) -+ @patch( -+ "salt.modules.dpkg_lowpkg._get_pkg_ds_avail", -+ MagicMock(return_value=dselect_pkg), -+ ) -+ @patch("salt.modules.dpkg_lowpkg._get_pkg_info", MagicMock(return_value=pkgs_info)) -+ @patch( -+ "salt.modules.dpkg_lowpkg._get_pkg_license", MagicMock(return_value="BSD v3") -+ ) - def test_info_attr(self): - """ - Test info with 'attr' parameter --- -2.29.2 - - diff --git a/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch b/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch deleted file mode 100644 index 766c8ec..0000000 --- a/fix-zypper.list_pkgs-to-be-aligned-with-pkg-state.patch +++ /dev/null @@ -1,58 +0,0 @@ -From b9ba6875945e1ffafdeb862d8b2ac7fccd9cccf5 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Mon, 25 Jun 2018 13:06:40 +0100 -Subject: [PATCH] Fix zypper.list_pkgs to be aligned with pkg state - -Handle packages with multiple version properly with zypper - -Add unit test coverage for multiple version packages on Zypper - -Fix '_find_remove_targets' after aligning Zypper with pkg state ---- - salt/states/pkg.py | 21 --------------------- - 1 file changed, 21 deletions(-) - -diff --git a/salt/states/pkg.py b/salt/states/pkg.py -index a1b2a122bb..f7327a33e3 100644 ---- a/salt/states/pkg.py -+++ b/salt/states/pkg.py -@@ -477,16 +477,6 @@ def _find_remove_targets( - - if __grains__["os"] == "FreeBSD" and origin: - cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname] -- elif __grains__["os_family"] == "Suse": -- # On SUSE systems. Zypper returns packages without "arch" in name -- try: -- namepart, archpart = pkgname.rsplit(".", 1) -- except ValueError: -- cver = cur_pkgs.get(pkgname, []) -- else: -- if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",): -- pkgname = namepart -- cver = cur_pkgs.get(pkgname, []) - else: - cver = cur_pkgs.get(pkgname, []) - -@@ -930,17 +920,6 @@ def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None): - cver = new_pkgs.get(pkgname.split("%")[0]) - elif __grains__["os_family"] == "Debian": - cver = new_pkgs.get(pkgname.split("=")[0]) -- elif __grains__["os_family"] == "Suse": -- # On SUSE systems. Zypper returns packages without "arch" in name -- try: -- namepart, archpart = pkgname.rsplit(".", 1) -- except ValueError: -- cver = new_pkgs.get(pkgname) -- else: -- if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",): -- cver = new_pkgs.get(namepart) -- else: -- cver = new_pkgs.get(pkgname) - else: - cver = new_pkgs.get(pkgname) - if not cver and pkgname in new_caps: --- -2.29.2 - - diff --git a/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch b/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch deleted file mode 100644 index 08a9c4b..0000000 --- a/fixed-bug-lvm-has-no-parttion-type.-the-scipt-later-.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 17ad05e3cbb3718ca12cef20600be81aa5d42d33 Mon Sep 17 00:00:00 2001 -From: tyl0re -Date: Wed, 17 Jul 2019 10:13:09 +0200 -Subject: [PATCH] Fixed Bug LVM has no Parttion Type. the Scipt Later - it is checked if fs_type: cmd = ('parted', '-m', '-s', '--', device, - 'mkpart', part_type, fs_type, start, end) else: cmd = ('parted', '-m', '-s', - '--', device, 'mkpart', part_type, start, end) But never reached. The Check - was in earlier Versions with parted.py 443 if fs_type and fs_type not in - set(['ext2', 'fat32', 'fat16', 'linux-swap', 'reiserfs', 444 'hfs', 'hfs+', - 'hfsx', 'NTFS', 'ufs', 'xfs', 'zfs']): - -So the check on not defined fs_type is missing ---- - salt/modules/parted_partition.py | 19 ++++++++++++++++--- - 1 file changed, 16 insertions(+), 3 deletions(-) - -diff --git a/salt/modules/parted_partition.py b/salt/modules/parted_partition.py -index 015d4cbc29..bb34cd58b4 100644 ---- a/salt/modules/parted_partition.py -+++ b/salt/modules/parted_partition.py -@@ -552,10 +552,23 @@ def mkpartfs(device, part_type, fs_type=None, start=None, end=None): - - .. code-block:: bash - -- salt '*' partition.mkpartfs /dev/sda primary fs_type=fat32 start=0 end=639 -- salt '*' partition.mkpartfs /dev/sda primary start=0 end=639 -+ salt '*' partition.mkpartfs /dev/sda logical ext2 440 670 - """ -- out = mkpart(device, part_type, fs_type, start, end) -+ _validate_device(device) -+ -+ if part_type not in {"primary", "logical", "extended"}: -+ raise CommandExecutionError("Invalid part_type passed to partition.mkpartfs") -+ -+ if fs_type and not _is_fstype(fs_type): -+ raise CommandExecutionError("Invalid fs_type passed to partition.mkpartfs") -+ -+ _validate_partition_boundary(start) -+ _validate_partition_boundary(end) -+ -+ cmd = "parted -m -s -- {} mkpart {} {} {} {}".format( -+ device, part_type, fs_type, start, end -+ ) -+ out = __salt__["cmd.run"](cmd).splitlines() - return out - - --- -2.29.2 - - diff --git a/fixes-56144-to-enable-hotadd-profile-support.patch b/fixes-56144-to-enable-hotadd-profile-support.patch index 11aa9a8..f2d6edf 100644 --- a/fixes-56144-to-enable-hotadd-profile-support.patch +++ b/fixes-56144-to-enable-hotadd-profile-support.patch @@ -1,4 +1,4 @@ -From 5761a11227c8d78df62d1a1552a50c0a4b76ae33 Mon Sep 17 00:00:00 2001 +From 0def15837c3470f20ce85ec81e2c1d42cd933c23 Mon Sep 17 00:00:00 2001 From: nicholasmhughes Date: Fri, 14 Feb 2020 22:03:42 -0500 Subject: [PATCH] fixes #56144 to enable hotadd profile support @@ -9,7 +9,7 @@ Subject: [PATCH] fixes #56144 to enable hotadd profile support 2 files changed, 20 insertions(+) diff --git a/doc/topics/cloud/vmware.rst b/doc/topics/cloud/vmware.rst -index e4cb607e8d..0ac7c255a8 100644 +index bbc5cdff11..1a18ebf226 100644 --- a/doc/topics/cloud/vmware.rst +++ b/doc/topics/cloud/vmware.rst @@ -457,6 +457,14 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or @@ -28,10 +28,10 @@ index e4cb607e8d..0ac7c255a8 100644 Specifies the additional configuration information for the virtual machine. This describes a set of modifications to the additional options. If the key is already diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py -index 5ebf448abc..edaca9618b 100644 +index 1e9943ad78..4999ca089f 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py -@@ -2824,6 +2824,12 @@ def create(vm_): +@@ -2821,6 +2821,12 @@ def create(vm_): win_run_once = config.get_cloud_config_value( "win_run_once", vm_, __opts__, search_global=False, default=None ) @@ -44,7 +44,7 @@ index 5ebf448abc..edaca9618b 100644 # Get service instance object si = _get_si() -@@ -3042,6 +3048,12 @@ def create(vm_): +@@ -3039,6 +3045,12 @@ def create(vm_): ) config_spec.deviceChange = specs["device_specs"] @@ -55,9 +55,9 @@ index 5ebf448abc..edaca9618b 100644 + config_spec.memoryHotAddEnabled = bool(mem_hot_add) + if extra_config: - for key, value in six.iteritems(extra_config): + for key, value in extra_config.items(): option = vim.option.OptionValue(key=key, value=value) -- -2.29.2 +2.33.0 diff --git a/fixes-cve-2018-15750-cve-2018-15751.patch b/fixes-cve-2018-15750-cve-2018-15751.patch deleted file mode 100644 index 9c8999a..0000000 --- a/fixes-cve-2018-15750-cve-2018-15751.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 731a53bd241240e08c455a8cb3a59e4d65a6abb5 Mon Sep 17 00:00:00 2001 -From: Erik Johnson -Date: Fri, 24 Aug 2018 10:35:55 -0500 -Subject: [PATCH] Fixes: CVE-2018-15750, CVE-2018-15751 - -Ensure that tokens are hex to avoid hanging/errors in cherrypy - -Add empty token salt-api integration tests - -Handle Auth exceptions in run_job - -Update tornado test to correct authentication message ---- - salt/netapi/rest_cherrypy/app.py | 7 ------- - tests/integration/netapi/rest_tornado/test_app.py | 8 ++++++-- - 2 files changed, 6 insertions(+), 9 deletions(-) - -diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py -index e7641ccbc5..5dfbadf759 100644 ---- a/salt/netapi/rest_cherrypy/app.py -+++ b/salt/netapi/rest_cherrypy/app.py -@@ -1181,13 +1181,6 @@ class LowDataAdapter: - except (TypeError, ValueError): - raise cherrypy.HTTPError(401, "Invalid token") - -- if "token" in chunk: -- # Make sure that auth token is hex -- try: -- int(chunk["token"], 16) -- except (TypeError, ValueError): -- raise cherrypy.HTTPError(401, "Invalid token") -- - if client: - chunk["client"] = client - -diff --git a/tests/integration/netapi/rest_tornado/test_app.py b/tests/integration/netapi/rest_tornado/test_app.py -index e3ad8820d3..4e5e741f1d 100644 ---- a/tests/integration/netapi/rest_tornado/test_app.py -+++ b/tests/integration/netapi/rest_tornado/test_app.py -@@ -326,8 +326,12 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase): - self.assertIn("jid", ret[0]) # the first 2 are regular returns - self.assertIn("jid", ret[1]) - self.assertIn("Failed to authenticate", ret[2]) # bad auth -- self.assertEqual(ret[0]["minions"], sorted(["minion", "sub_minion"])) -- self.assertEqual(ret[1]["minions"], sorted(["minion", "sub_minion"])) -+ self.assertEqual( -+ ret[0]["minions"], sorted(["minion", "sub_minion", "localhost"]) -+ ) -+ self.assertEqual( -+ ret[1]["minions"], sorted(["minion", "sub_minion", "localhost"]) -+ ) - - @slowTest - def test_simple_local_async_post_no_tgt(self): --- -2.29.2 - - diff --git a/fixing-streamclosed-issue.patch b/fixing-streamclosed-issue.patch deleted file mode 100644 index da2e635..0000000 --- a/fixing-streamclosed-issue.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 82d1cadff4fa6248a9d891a3c228fc415207d8d6 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Mihai=20Dinc=C4=83?= -Date: Tue, 26 Nov 2019 18:26:31 +0100 -Subject: [PATCH] Fixing StreamClosed issue - ---- - salt/cli/batch_async.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index f3d92b88f1..8d2601e636 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -232,7 +232,6 @@ class BatchAsync: - "metadata": self.metadata, - } - self.event.fire_event(data, "salt/batch/{}/done".format(self.batch_jid)) -- self.event.remove_event_handler(self.__event_handler) - for (pattern, label) in self.patterns: - if label in ["ping_return", "batch_run"]: - self.event.unsubscribe(pattern, match_type="glob") -@@ -277,6 +276,7 @@ class BatchAsync: - - def __del__(self): - self.local = None -+ self.event.remove_event_handler(self.__event_handler) - self.event = None - self.ioloop = None - gc.collect() --- -2.29.2 - - diff --git a/get-os_arch-also-without-rpm-package-installed.patch b/get-os_arch-also-without-rpm-package-installed.patch deleted file mode 100644 index decb600..0000000 --- a/get-os_arch-also-without-rpm-package-installed.patch +++ /dev/null @@ -1,92 +0,0 @@ -From e987664551debb9affce8ce5a70593ef0750dcd5 Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Wed, 14 Nov 2018 17:36:23 +0100 -Subject: [PATCH] Get os_arch also without RPM package installed - -backport pkg.rpm test - -Add pkg.rpm unit test case - -Fix docstring - -Add UT for getting OS architecture fallback, when no RPM found (initrd, e.g.) - -Add UT for OS architecture detection on fallback, when no CPU arch can be determined - -Add UT for OS arch detection when no CPU arch or machine can be determined - -Remove unsupported testcase ---- - tests/unit/utils/test_pkg.py | 53 ++++++------------------------------ - 1 file changed, 8 insertions(+), 45 deletions(-) - -diff --git a/tests/unit/utils/test_pkg.py b/tests/unit/utils/test_pkg.py -index b4a67b8e57..404b01b12b 100644 ---- a/tests/unit/utils/test_pkg.py -+++ b/tests/unit/utils/test_pkg.py -@@ -1,53 +1,16 @@ --# -*- coding: utf-8 -*- -- --from __future__ import absolute_import, print_function, unicode_literals -- - import salt.utils.pkg - from salt.utils.pkg import rpm --from tests.support.mock import MagicMock, patch --from tests.support.unit import TestCase -- -+from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, Mock, patch -+from tests.support.unit import TestCase, skipIf - --class PkgUtilsTestCase(TestCase): -- """ -- TestCase for salt.utils.pkg module -- """ -- -- test_parameters = [ -- ("16.0.0.49153-0+f1", "", "16.0.0.49153-0+f1"), -- ("> 15.0.0", ">", "15.0.0"), -- ("< 15.0.0", "<", "15.0.0"), -- ("<< 15.0.0", "<<", "15.0.0"), -- (">> 15.0.0", ">>", "15.0.0"), -- (">= 15.0.0", ">=", "15.0.0"), -- ("<= 15.0.0", "<=", "15.0.0"), -- ("!= 15.0.0", "!=", "15.0.0"), -- ("<=> 15.0.0", "<=>", "15.0.0"), -- ("<> 15.0.0", "<>", "15.0.0"), -- ("= 15.0.0", "=", "15.0.0"), -- (">15.0.0", ">", "15.0.0"), -- ("<15.0.0", "<", "15.0.0"), -- ("<<15.0.0", "<<", "15.0.0"), -- (">>15.0.0", ">>", "15.0.0"), -- (">=15.0.0", ">=", "15.0.0"), -- ("<=15.0.0", "<=", "15.0.0"), -- ("!=15.0.0", "!=", "15.0.0"), -- ("<=>15.0.0", "<=>", "15.0.0"), -- ("<>15.0.0", "<>", "15.0.0"), -- ("=15.0.0", "=", "15.0.0"), -- ("", "", ""), -- ] -- -- def test_split_comparison(self): -- """ -- Tests salt.utils.pkg.split_comparison -- """ -- for test_parameter in self.test_parameters: -- oper, verstr = salt.utils.pkg.split_comparison(test_parameter[0]) -- self.assertEqual(test_parameter[1], oper) -- self.assertEqual(test_parameter[2], verstr) -+try: -+ import pytest -+except ImportError: -+ pytest = None - - -+@skipIf(NO_MOCK, NO_MOCK_REASON) -+@skipIf(pytest is None, "PyTest is missing") - class PkgRPMTestCase(TestCase): - """ - Test case for pkg.rpm utils --- -2.29.2 - - diff --git a/grains-master-can-read-grains.patch b/grains-master-can-read-grains.patch deleted file mode 100644 index 0f91120..0000000 --- a/grains-master-can-read-grains.patch +++ /dev/null @@ -1,34 +0,0 @@ -From d9618fed8ff241c6f127f08ec59fea9c8b8e12a6 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Tue, 27 Oct 2020 13:16:37 +0100 -Subject: [PATCH] grains: master can read grains - ---- - salt/grains/extra.py | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - -diff --git a/salt/grains/extra.py b/salt/grains/extra.py -index d25faac3b7..7729a5c0a5 100644 ---- a/salt/grains/extra.py -+++ b/salt/grains/extra.py -@@ -76,8 +76,14 @@ def __secure_boot(): - enabled = False - sboot = glob.glob("/sys/firmware/efi/vars/SecureBoot-*/data") - if len(sboot) == 1: -- with salt.utils.files.fopen(sboot[0], "rb") as fd: -- enabled = fd.read()[-1:] == b"\x01" -+ # The minion is usually running as a privileged user, but is -+ # not the case for the master. Seems that the master can also -+ # pick the grains, and this file can only be readed by "root" -+ try: -+ with salt.utils.files.fopen(sboot[0], "rb") as fd: -+ enabled = fd.read()[-1:] == b"\x01" -+ except PermissionError: -+ pass - return enabled - - --- -2.29.2 - - diff --git a/grains.extra-support-old-non-intel-kernels-bsc-11806.patch b/grains.extra-support-old-non-intel-kernels-bsc-11806.patch deleted file mode 100644 index 6000526..0000000 --- a/grains.extra-support-old-non-intel-kernels-bsc-11806.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 27c7a9f62b1a589365785c9428293653ac76fee3 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Mon, 10 May 2021 16:26:02 +0200 -Subject: [PATCH] grains.extra: support old non-intel kernels - (bsc#1180650) (#368) - ---- - salt/grains/extra.py | 16 ++++++++++++---- - 1 file changed, 12 insertions(+), 4 deletions(-) - -diff --git a/salt/grains/extra.py b/salt/grains/extra.py -index 7729a5c0a5..f2abd1281c 100644 ---- a/salt/grains/extra.py -+++ b/salt/grains/extra.py -@@ -71,10 +71,10 @@ def suse_backported_capabilities(): - } - - --def __secure_boot(): -+def __secure_boot(efivars_dir): - """Detect if secure-boot is enabled.""" - enabled = False -- sboot = glob.glob("/sys/firmware/efi/vars/SecureBoot-*/data") -+ sboot = glob.glob(os.path.join(efivars_dir, "SecureBoot-*/data")) - if len(sboot) == 1: - # The minion is usually running as a privileged user, but is - # not the case for the master. Seems that the master can also -@@ -89,9 +89,17 @@ def __secure_boot(): - - def uefi(): - """Populate UEFI grains.""" -+ efivars_dir = next( -+ iter( -+ filter( -+ os.path.exists, ["/sys/firmware/efi/efivars", "/sys/firmware/efi/vars"] -+ ) -+ ), -+ None, -+ ) - grains = { -- "efi": os.path.exists("/sys/firmware/efi/systab"), -- "efi-secure-boot": __secure_boot(), -+ "efi": bool(efivars_dir), -+ "efi-secure-boot": __secure_boot(efivars_dir) if efivars_dir else False, - } - - return grains --- -2.31.1 - - diff --git a/handle-master-tops-data-when-states-are-applied-by-t.patch b/handle-master-tops-data-when-states-are-applied-by-t.patch deleted file mode 100644 index da3bb96..0000000 --- a/handle-master-tops-data-when-states-are-applied-by-t.patch +++ /dev/null @@ -1,109 +0,0 @@ -From e0b7511e30da289b4100aa156b67b652681afc03 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Thu, 8 Jul 2021 08:57:13 +0100 -Subject: [PATCH] Handle "master tops" data when states are applied by - "transactional_update" (bsc#1187787) (#398) - -* Handle master tops data when states are applied by transactional_update (bsc#1187787) - -* Fix unit tests for transactional_update module ---- - salt/modules/transactional_update.py | 9 +++++++-- - .../unit/modules/test_transactional_update.py | 20 +++++++++---------- - 2 files changed, 17 insertions(+), 12 deletions(-) - -diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py -index 7bbdb697b8..9cdaddb91a 100644 ---- a/salt/modules/transactional_update.py -+++ b/salt/modules/transactional_update.py -@@ -301,6 +301,11 @@ def __virtual__(): - return (False, "Module transactional_update requires a transactional system") - - -+class TransactionalUpdateHighstate(salt.client.ssh.state.SSHHighState): -+ def _master_tops(self): -+ return self.client.master_tops() -+ -+ - def _global_params(self_update, snapshot=None, quiet=False): - """Utility function to prepare common global parameters.""" - params = ["--non-interactive", "--drop-if-no-change"] -@@ -1107,7 +1112,7 @@ def sls( - # Clone the options data and apply some default values. May not be - # needed, as this module just delegate - opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) -- st_ = salt.client.ssh.state.SSHHighState( -+ st_ = TransactionalUpdateHighstate( - opts, pillar, __salt__, salt.fileclient.get_file_client(__opts__) - ) - -@@ -1180,7 +1185,7 @@ def highstate(activate_transaction=False, **kwargs): - # Clone the options data and apply some default values. May not be - # needed, as this module just delegate - opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) -- st_ = salt.client.ssh.state.SSHHighState( -+ st_ = TransactionalUpdateHighstate( - opts, pillar, __salt__, salt.fileclient.get_file_client(__opts__) - ) - -diff --git a/tests/unit/modules/test_transactional_update.py b/tests/unit/modules/test_transactional_update.py -index 19e477d02f..2d30f296d7 100644 ---- a/tests/unit/modules/test_transactional_update.py -+++ b/tests/unit/modules/test_transactional_update.py -@@ -622,22 +622,22 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin): - utils_mock["files.rm_rf"].assert_called_once() - - @patch("salt.modules.transactional_update._create_and_execute_salt_state") -- @patch("salt.client.ssh.state.SSHHighState") -+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate") - @patch("salt.fileclient.get_file_client") - @patch("salt.utils.state.get_sls_opts") - def test_sls( - self, - get_sls_opts, - get_file_client, -- SSHHighState, -+ TransactionalUpdateHighstate, - _create_and_execute_salt_state, - ): - """Test transactional_update.sls""" -- SSHHighState.return_value = SSHHighState -- SSHHighState.render_highstate.return_value = (None, []) -- SSHHighState.state.reconcile_extend.return_value = (None, []) -- SSHHighState.state.requisite_in.return_value = (None, []) -- SSHHighState.state.verify_high.return_value = [] -+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate -+ TransactionalUpdateHighstate.render_highstate.return_value = (None, []) -+ TransactionalUpdateHighstate.state.reconcile_extend.return_value = (None, []) -+ TransactionalUpdateHighstate.state.requisite_in.return_value = (None, []) -+ TransactionalUpdateHighstate.state.verify_high.return_value = [] - - _create_and_execute_salt_state.return_value = "result" - opts_mock = { -@@ -649,18 +649,18 @@ class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin): - _create_and_execute_salt_state.assert_called_once() - - @patch("salt.modules.transactional_update._create_and_execute_salt_state") -- @patch("salt.client.ssh.state.SSHHighState") -+ @patch("salt.modules.transactional_update.TransactionalUpdateHighstate") - @patch("salt.fileclient.get_file_client") - @patch("salt.utils.state.get_sls_opts") - def test_highstate( - self, - get_sls_opts, - get_file_client, -- SSHHighState, -+ TransactionalUpdateHighstate, - _create_and_execute_salt_state, - ): - """Test transactional_update.highstage""" -- SSHHighState.return_value = SSHHighState -+ TransactionalUpdateHighstate.return_value = TransactionalUpdateHighstate - - _create_and_execute_salt_state.return_value = "result" - opts_mock = { --- -2.32.0 - - diff --git a/handle-volumes-on-stopped-pools-in-virt.vm_info-373.patch b/handle-volumes-on-stopped-pools-in-virt.vm_info-373.patch deleted file mode 100644 index 6a1081c..0000000 --- a/handle-volumes-on-stopped-pools-in-virt.vm_info-373.patch +++ /dev/null @@ -1,152 +0,0 @@ -From b154f0a17c85c2fe0b85226dfeb3919bd833a85c Mon Sep 17 00:00:00 2001 -From: Cedric Bosdonnat -Date: Fri, 21 May 2021 13:04:46 +0200 -Subject: [PATCH] Handle volumes on stopped pools in virt.vm_info - (#373) - -For VMs having at least a disk on a stopped volume, we don't want the -user to get an exception when running virt.vm_info. Instead just provide -less information. ---- - changelog/60132.fixed | 1 + - salt/modules/virt.py | 73 +++++++++++-------- - .../pytests/unit/modules/virt/test_domain.py | 9 ++- - 3 files changed, 50 insertions(+), 33 deletions(-) - create mode 100644 changelog/60132.fixed - -diff --git a/changelog/60132.fixed b/changelog/60132.fixed -new file mode 100644 -index 0000000000..1e3bc96b98 ---- /dev/null -+++ b/changelog/60132.fixed -@@ -0,0 +1 @@ -+Gracefuly handle errors in virt.vm_info -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index 6409089109..d8a8c51ce5 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -515,41 +515,50 @@ def _get_disks(conn, dom): - def _get_disk_volume_data(pool_name, volume_name): - qemu_target = "{}/{}".format(pool_name, volume_name) - pool = conn.storagePoolLookupByName(pool_name) -- vol = pool.storageVolLookupByName(volume_name) -- vol_info = vol.info() -- extra_properties = { -- "virtual size": vol_info[1], -- "disk size": vol_info[2], -- } -- -- backing_files = [ -- { -- "file": node.find("source").get("file"), -- "file format": node.find("format").get("type"), -+ extra_properties = {} -+ try: -+ vol = pool.storageVolLookupByName(volume_name) -+ vol_info = vol.info() -+ extra_properties = { -+ "virtual size": vol_info[1], -+ "disk size": vol_info[2], - } -- for node in elem.findall(".//backingStore[source]") -- ] - -- if backing_files: -- # We had the backing files in a flat list, nest them again. -- extra_properties["backing file"] = backing_files[0] -- parent = extra_properties["backing file"] -- for sub_backing_file in backing_files[1:]: -- parent["backing file"] = sub_backing_file -- parent = sub_backing_file -+ backing_files = [ -+ { -+ "file": node.find("source").get("file"), -+ "file format": node.find("format").get("type"), -+ } -+ for node in elem.findall(".//backingStore[source]") -+ ] - -- else: -- # In some cases the backing chain is not displayed by the domain definition -- # Try to see if we have some of it in the volume definition. -- vol_desc = ElementTree.fromstring(vol.XMLDesc()) -- backing_path = vol_desc.find("./backingStore/path") -- backing_format = vol_desc.find("./backingStore/format") -- if backing_path is not None: -- extra_properties["backing file"] = {"file": backing_path.text} -- if backing_format is not None: -- extra_properties["backing file"][ -- "file format" -- ] = backing_format.get("type") -+ if backing_files: -+ # We had the backing files in a flat list, nest them again. -+ extra_properties["backing file"] = backing_files[0] -+ parent = extra_properties["backing file"] -+ for sub_backing_file in backing_files[1:]: -+ parent["backing file"] = sub_backing_file -+ parent = sub_backing_file -+ -+ else: -+ # In some cases the backing chain is not displayed by the domain definition -+ # Try to see if we have some of it in the volume definition. -+ vol_desc = ElementTree.fromstring(vol.XMLDesc()) -+ backing_path = vol_desc.find("./backingStore/path") -+ backing_format = vol_desc.find("./backingStore/format") -+ if backing_path is not None: -+ extra_properties["backing file"] = { -+ "file": backing_path.text -+ } -+ if backing_format is not None: -+ extra_properties["backing file"][ -+ "file format" -+ ] = backing_format.get("type") -+ except libvirt.libvirtError: -+ # The volume won't be found if the pool is not started, just output less infos -+ log.info( -+ "Couldn't extract all volume informations: pool is likely not running or refreshed" -+ ) - return (qemu_target, extra_properties) - - if disk_type == "file": -diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py -index 76433eaef4..a9453e4a66 100644 ---- a/tests/pytests/unit/modules/virt/test_domain.py -+++ b/tests/pytests/unit/modules/virt/test_domain.py -@@ -192,6 +192,11 @@ def test_get_disks(make_mock_vm, make_mock_storage_pool): - -
- -+ -+ -+ -+ -+ - - - -@@ -205,11 +210,12 @@ def test_get_disks(make_mock_vm, make_mock_storage_pool): - - - """ -- domain_mock = make_mock_vm(vm_def) -+ make_mock_vm(vm_def) - - pool_mock = make_mock_storage_pool( - "default", "dir", ["srv01_system", "srv01_data", "vm05_system"] - ) -+ make_mock_storage_pool("stopped", "dir", []) - - # Append backing store to srv01_data volume XML description - srv1data_mock = pool_mock.storageVolLookupByName("srv01_data") -@@ -256,6 +262,7 @@ def test_get_disks(make_mock_vm, make_mock_storage_pool): - }, - }, - }, -+ "vdd": {"type": "disk", "file": "stopped/vm05_data", "file format": "qcow2"}, - "hda": { - "type": "cdrom", - "file format": "raw", --- -2.31.1 - - diff --git a/implement-network.fqdns-module-function-bsc-1134860-.patch b/implement-network.fqdns-module-function-bsc-1134860-.patch deleted file mode 100644 index 6492017..0000000 --- a/implement-network.fqdns-module-function-bsc-1134860-.patch +++ /dev/null @@ -1,281 +0,0 @@ -From ac34a8d839f91285f4ced605250422a1ecf5cb55 Mon Sep 17 00:00:00 2001 -From: EricS <54029547+ESiebigteroth@users.noreply.github.com> -Date: Tue, 3 Sep 2019 11:22:53 +0200 -Subject: [PATCH] Implement network.fqdns module function (bsc#1134860) - (#172) - -* Duplicate fqdns logic in module.network -* Move _get_interfaces to utils.network -* Reuse network.fqdns in grains.core.fqdns -* Return empty list when fqdns grains is disabled - -Co-authored-by: Eric Siebigteroth ---- - salt/grains/core.py | 58 +++------------------------------- - salt/modules/network.py | 12 +++---- - salt/utils/network.py | 2 +- - tests/unit/grains/test_core.py | 55 ++++++++++++-------------------- - 4 files changed, 31 insertions(+), 96 deletions(-) - -diff --git a/salt/grains/core.py b/salt/grains/core.py -index 5f18ba4a58..0dc1d97f97 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -23,7 +23,6 @@ import uuid - import warnings - import zlib - from errno import EACCES, EPERM --from multiprocessing.pool import ThreadPool - - import distro - import salt.exceptions -@@ -2406,59 +2405,10 @@ def fqdns(): - then trying to reverse resolve them (excluding 'lo' interface). - To disable the fqdns grain, set enable_fqdns_grains: False in the minion configuration file. - """ -- # Provides: -- # fqdns -- -- grains = {} -- fqdns = set() -- -- def _lookup_fqdn(ip): -- try: -- name, aliaslist, addresslist = socket.gethostbyaddr(ip) -- return [socket.getfqdn(name)] + [ -- als for als in aliaslist if salt.utils.network.is_fqdn(als) -- ] -- except socket.herror as err: -- if err.errno in (0, HOST_NOT_FOUND, NO_DATA): -- # No FQDN for this IP address, so we don't need to know this all the time. -- log.debug("Unable to resolve address %s: %s", ip, err) -- else: -- log.error(err_message, ip, err) -- except (OSError, socket.gaierror, socket.timeout) as err: -- log.error(err_message, ip, err) -- -- start = time.time() -- -- addresses = salt.utils.network.ip_addrs( -- include_loopback=False, interface_data=_get_interfaces() -- ) -- addresses.extend( -- salt.utils.network.ip_addrs6( -- include_loopback=False, interface_data=_get_interfaces() -- ) -- ) -- err_message = "Exception during resolving address: %s" -- -- # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel. -- # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing -- # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. -- -- try: -- pool = ThreadPool(8) -- results = pool.map(_lookup_fqdn, addresses) -- pool.close() -- pool.join() -- except Exception as exc: -- log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) -- -- for item in results: -- if item: -- fqdns.update(item) -- -- elapsed = time.time() - start -- log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed)) -- -- return {"fqdns": sorted(list(fqdns))} -+ opt = {"fqdns": []} -+ if __opts__.get("enable_fqdns_grains", True) == True: -+ opt = __salt__["network.fqdns"]() -+ return opt - - - def ip_fqdn(): -diff --git a/salt/modules/network.py b/salt/modules/network.py -index 2e1410c288..59ed43bba6 100644 ---- a/salt/modules/network.py -+++ b/salt/modules/network.py -@@ -2,7 +2,6 @@ - Module for gathering and managing network information - """ - --# Import python libs - import datetime - import hashlib - import logging -@@ -12,7 +11,6 @@ import socket - import time - from multiprocessing.pool import ThreadPool - --# Import salt libs - import salt.utils.decorators.path - import salt.utils.functools - import salt.utils.network -@@ -20,8 +18,6 @@ import salt.utils.platform - import salt.utils.validate.net - from salt._compat import ipaddress - from salt.exceptions import CommandExecutionError -- --# Import 3rd-party libs - from salt.ext.six.moves import range - - log = logging.getLogger(__name__) -@@ -2076,7 +2072,10 @@ def fqdns(): - - def _lookup_fqdn(ip): - try: -- return [socket.getfqdn(socket.gethostbyaddr(ip)[0])] -+ name, aliaslist, addresslist = socket.gethostbyaddr(ip) -+ return [socket.getfqdn(name)] + [ -+ als for als in aliaslist if salt.utils.network.is_fqdn(als) -+ ] - except socket.herror as err: - if err.errno in (0, HOST_NOT_FOUND, NO_DATA): - # No FQDN for this IP address, so we don't need to know this all the time. -@@ -2102,13 +2101,12 @@ def fqdns(): - # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing - # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. - -- results = [] - try: - pool = ThreadPool(8) - results = pool.map(_lookup_fqdn, addresses) - pool.close() - pool.join() -- except Exception as exc: # pylint: disable=broad-except -+ except Exception as exc: - log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) - - for item in results: -diff --git a/salt/utils/network.py b/salt/utils/network.py -index d253ded3ab..25b2d06758 100644 ---- a/salt/utils/network.py -+++ b/salt/utils/network.py -@@ -49,7 +49,7 @@ except (ImportError, OSError, AttributeError, TypeError): - _INTERFACES = {} - - --def _get_interfaces(): -+def _get_interfaces(): #! function - """ - Provide a dict of the connected interfaces and their ip addresses - """ -diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index d760e57a54..a5ceeb8317 100644 ---- a/tests/unit/grains/test_core.py -+++ b/tests/unit/grains/test_core.py -@@ -18,6 +18,7 @@ import salt.utils.network - import salt.utils.path - import salt.utils.platform - from salt._compat import ipaddress -+from salt.ext import six - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.mock import MagicMock, Mock, mock_open, patch - from tests.support.unit import TestCase, skipIf -@@ -1293,14 +1294,14 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - ): - assert core.dns() == ret - -- def test_enable_fqdns_false(self): -+ def test_enablefqdnsFalse(self): - """ - tests enable_fqdns_grains is set to False - """ - with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": False}): - assert core.fqdns() == {"fqdns": []} - -- def test_enable_fqdns_true(self): -+ def test_enablefqdnsTrue(self): - """ - testing that grains uses network.fqdns module - """ -@@ -1311,14 +1312,14 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": True}): - assert core.fqdns() == "my.fake.domain" - -- def test_enable_fqdns_none(self): -+ def test_enablefqdnsNone(self): - """ - testing default fqdns grains is returned when enable_fqdns_grains is None - """ - with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": None}): - assert core.fqdns() == {"fqdns": []} - -- def test_enable_fqdns_without_patching(self): -+ def test_enablefqdnswithoutpaching(self): - """ - testing fqdns grains is enabled by default - """ -@@ -1326,23 +1327,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - "salt.grains.core.__salt__", - {"network.fqdns": MagicMock(return_value="my.fake.domain")}, - ): -- # fqdns is disabled by default on Windows -- if salt.utils.platform.is_windows(): -- assert core.fqdns() == {"fqdns": []} -- else: -- assert core.fqdns() == "my.fake.domain" -- -- def test_enable_fqdns_false_is_proxy(self): -- """ -- testing fqdns grains is disabled by default for proxy minions -- """ -- with patch("salt.utils.platform.is_proxy", return_value=True, autospec=True): -- with patch.dict( -- "salt.grains.core.__salt__", -- {"network.fqdns": MagicMock(return_value="my.fake.domain")}, -- ): -- # fqdns is disabled by default on proxy minions -- assert core.fqdns() == {"fqdns": []} -+ assert core.fqdns() == "my.fake.domain" - - @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") - @patch( -@@ -1367,11 +1352,12 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - ("bluesniff.foo.bar", [], ["fe80::a8b2:93ff:dead:beef"]), - ] - ret = {"fqdns": ["bluesniff.foo.bar", "foo.bar.baz", "rinzler.evil-corp.com"]} -- with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): -- fqdns = core.fqdns() -- assert "fqdns" in fqdns -- assert len(fqdns["fqdns"]) == len(ret["fqdns"]) -- assert set(fqdns["fqdns"]) == set(ret["fqdns"]) -+ with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}): -+ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): -+ fqdns = core.fqdns() -+ assert "fqdns" in fqdns -+ assert len(fqdns["fqdns"]) == len(ret["fqdns"]) -+ assert set(fqdns["fqdns"]) == set(ret["fqdns"]) - - @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") - @patch("salt.utils.network.ip_addrs", MagicMock(return_value=["1.2.3.4"])) -@@ -1437,14 +1423,15 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - ["fe80::a8b2:93ff:dead:beef"], - ), - ] -- with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): -- fqdns = core.fqdns() -- assert "fqdns" in fqdns -- for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: -- assert alias in fqdns["fqdns"] -- -- for alias in ["throwmeaway", "false-hostname", "badaliass"]: -- assert alias not in fqdns["fqdns"] -+ with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}): -+ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): -+ fqdns = core.fqdns() -+ assert "fqdns" in fqdns -+ for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: -+ assert alias in fqdns["fqdns"] -+ -+ for alias in ["throwmeaway", "false-hostname", "badaliass"]: -+ assert alias not in fqdns["fqdns"] - - def test_core_virtual(self): - """ --- -2.29.2 - - diff --git a/implementation-of-held-unheld-functions-for-state-pk.patch b/implementation-of-held-unheld-functions-for-state-pk.patch index f8d08a0..83680e0 100644 --- a/implementation-of-held-unheld-functions-for-state-pk.patch +++ b/implementation-of-held-unheld-functions-for-state-pk.patch @@ -1,4 +1,4 @@ -From 2ee360753c8fa937d9c81bf7da24f457041650bc Mon Sep 17 00:00:00 2001 +From f6e5a6bd16fa49cceadde9a9f46fefd12d92316b Mon Sep 17 00:00:00 2001 From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> Date: Mon, 5 Jul 2021 18:39:26 +0300 Subject: [PATCH] Implementation of held/unheld functions for state pkg @@ -6,19 +6,17 @@ Subject: [PATCH] Implementation of held/unheld functions for state pkg * Implementation of held/unheld functions for state pkg --- - salt/modules/zypperpkg.py | 201 +++++++++--- + salt/modules/zypperpkg.py | 117 ++++++- salt/states/pkg.py | 310 +++++++++++++++++++ - tests/pytests/unit/modules/test_zypperpkg.py | 142 +++++++++ - tests/pytests/unit/states/test_pkg.py | 155 ++++++++++ - 4 files changed, 760 insertions(+), 48 deletions(-) - create mode 100644 tests/pytests/unit/modules/test_zypperpkg.py - create mode 100644 tests/pytests/unit/states/test_pkg.py + tests/pytests/unit/modules/test_zypperpkg.py | 133 ++++++++ + tests/pytests/unit/states/test_pkg.py | 137 ++++++++ + 4 files changed, 684 insertions(+), 13 deletions(-) diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py -index e064e2cb4e..932b30bac5 100644 +index 863be3c894..9c7ffcf5da 100644 --- a/salt/modules/zypperpkg.py +++ b/salt/modules/zypperpkg.py -@@ -2071,6 +2071,76 @@ def purge( +@@ -2092,6 +2092,76 @@ def purge( return _uninstall(inclusion_detection, name=name, pkgs=pkgs, root=root) @@ -95,72 +93,48 @@ index e064e2cb4e..932b30bac5 100644 def list_locks(root=None): """ List current package locks. -@@ -2141,43 +2211,68 @@ def clean_locks(root=None): +@@ -2162,7 +2232,7 @@ def clean_locks(root=None): return out -def unhold(name=None, pkgs=None, **kwargs): +def unhold(name=None, pkgs=None, root=None, **kwargs): """ -- Remove specified package lock. -+ Remove a package hold. -+ -+ name -+ A package name to unhold, or a comma-separated list of package names to -+ unhold. -+ -+ pkgs -+ A list of packages to unhold. The ``name`` parameter will be ignored if -+ this option is passed. + .. versionadded:: 3003 - root -- operate on a different root directory. +@@ -2176,6 +2246,8 @@ def unhold(name=None, pkgs=None, **kwargs): + A list of packages to unhold. The ``name`` parameter will be ignored if + this option is passed. + ++ root + Operate on a different root directory. CLI Example: - .. code-block:: bash +@@ -2191,24 +2263,38 @@ def unhold(name=None, pkgs=None, **kwargs): -- salt '*' pkg.remove_lock -- salt '*' pkg.remove_lock ,, -- salt '*' pkg.remove_lock pkgs='["foo", "bar"]' -+ salt '*' pkg.unhold -+ salt '*' pkg.unhold ,, -+ salt '*' pkg.unhold pkgs='["foo", "bar"]' - """ - ret = {} -- root = kwargs.get("root") -- if (not name and not pkgs) or (name and pkgs): -+ if not name and not pkgs: - raise CommandExecutionError("Name or packages must be specified.") -- elif name: -- pkgs = [name] - -- locks = list_locks(root) -- try: -- pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys()) -- except MinionError as exc: -- raise CommandExecutionError(exc) -+ targets = [] -+ if pkgs: + targets = [] + if pkgs: +- for pkg in salt.utils.data.repack_dictlist(pkgs): +- targets.append(pkg) + targets.extend(pkgs) -+ else: -+ targets.append(name) + else: + targets.append(name) -+ locks = list_locks() + locks = list_locks() removed = [] - missing = [] -- for pkg in pkgs: -- if locks.get(pkg): -- removed.append(pkg) -- ret[pkg]["comment"] = "Package {} is no longer held.".format(pkg) -+ -+ for target in targets: + + for target in targets: + version = None + if isinstance(target, dict): + (target, version) = next(iter(target.items())) -+ ret[target] = {"name": target, "changes": {}, "result": True, "comment": ""} -+ if locks.get(target): + ret[target] = {"name": target, "changes": {}, "result": True, "comment": ""} + if locks.get(target): +- removed.append(target) +- ret[target]["changes"]["new"] = "" +- ret[target]["changes"]["old"] = "hold" +- ret[target]["comment"] = "Package {} is no longer held.".format(target) + lock_ver = None + if "version" in locks.get(target): + lock_ver = locks.get(target)["version"] @@ -180,104 +154,61 @@ index e064e2cb4e..932b30bac5 100644 + ret[target]["changes"]["old"] = "hold" + ret[target]["comment"] = "Package {} is no longer held.".format(target) else: -- missing.append(pkg) -- ret[pkg]["comment"] = "Package {} unable to be unheld.".format(pkg) -+ ret[target]["comment"] = "Package {} was already unheld.".format(target) +- missing.append(target) + ret[target]["comment"] = "Package {} was already unheld.".format(target) if removed: - __zypper__(root=root).call("rl", *removed) -@@ -2223,47 +2318,57 @@ def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argume +@@ -2261,7 +2347,7 @@ def remove_lock(name, root=None, **kwargs): return {"removed": len(removed), "not_found": missing} -def hold(name=None, pkgs=None, **kwargs): +def hold(name=None, pkgs=None, root=None, **kwargs): """ -- Add a package lock. Specify packages to lock by exact name. -+ Add a package hold. Specify one of ``name`` and ``pkgs``. -+ -+ name -+ A package name to hold, or a comma-separated list of package names to -+ hold. -+ -+ pkgs -+ A list of packages to hold. The ``name`` parameter will be ignored if -+ this option is passed. + .. versionadded:: 3003 - root -- operate on a different root directory. +@@ -2275,6 +2361,9 @@ def hold(name=None, pkgs=None, **kwargs): + A list of packages to hold. The ``name`` parameter will be ignored if + this option is passed. + ++ root + Operate on a different root directory. + CLI Example: - .. code-block:: bash +@@ -2290,8 +2379,7 @@ def hold(name=None, pkgs=None, **kwargs): -- salt '*' pkg.add_lock -- salt '*' pkg.add_lock ,, -- salt '*' pkg.add_lock pkgs='["foo", "bar"]' -- -- :param name: -- :param pkgs: -- :param kwargs: -- :return: -+ salt '*' pkg.hold -+ salt '*' pkg.hold ,, -+ salt '*' pkg.hold pkgs='["foo", "bar"]' - """ - ret = {} -- root = kwargs.get("root") -- if (not name and not pkgs) or (name and pkgs): -+ if not name and not pkgs: - raise CommandExecutionError("Name or packages must be specified.") -- elif name: -- pkgs = [name] - -- locks = list_locks(root=root) -+ targets = [] -+ if pkgs: + targets = [] + if pkgs: +- for pkg in salt.utils.data.repack_dictlist(pkgs): +- targets.append(pkg) + targets.extend(pkgs) -+ else: -+ targets.append(name) -+ -+ locks = list_locks() - added = [] -- try: -- pkgs = list(__salt__["pkg_resource.parse_targets"](pkgs)[0].keys()) -- except MinionError as exc: -- raise CommandExecutionError(exc) + else: + targets.append(name) -- for pkg in pkgs: -- ret[pkg] = {"name": pkg, "changes": {}, "result": False, "comment": ""} -- if not locks.get(pkg): -- added.append(pkg) -- ret[pkg]["comment"] = "Package {} is now being held.".format(pkg) -+ for target in targets: +@@ -2299,9 +2387,12 @@ def hold(name=None, pkgs=None, **kwargs): + added = [] + + for target in targets: + version = None + if isinstance(target, dict): + (target, version) = next(iter(target.items())) -+ ret[target] = {"name": target, "changes": {}, "result": True, "comment": ""} -+ if not locks.get(target): + ret[target] = {"name": target, "changes": {}, "result": True, "comment": ""} + if not locks.get(target): +- added.append(target) + added.append(target if not version else "{}={}".format(target, version)) -+ ret[target]["changes"]["new"] = "hold" -+ ret[target]["changes"]["old"] = "" -+ ret[target]["comment"] = "Package {} is now being held.".format(target) - else: -- ret[pkg]["comment"] = "Package {} is already set to be held.".format(pkg) -+ ret[target]["comment"] = "Package {} is already set to be held.".format( -+ target -+ ) - - if added: - __zypper__(root=root).call("al", *added) + ret[target]["changes"]["new"] = "hold" + ret[target]["changes"]["old"] = "" + ret[target]["comment"] = "Package {} is now being held.".format(target) diff --git a/salt/states/pkg.py b/salt/states/pkg.py -index f7327a33e3..0ef3f056c5 100644 +index fd6808a2dc..a8ffe25a77 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py -@@ -3550,3 +3550,313 @@ def mod_watch(name, **kwargs): - "comment": "pkg.{} does not work with the watch requisite".format(sfun), - "result": False, - } +@@ -3607,3 +3607,313 @@ def mod_beacon(name, **kwargs): + ), + "result": False, + } + + +def held(name, version=None, pkgs=None, replace=False, **kwargs): @@ -589,20 +520,13 @@ index f7327a33e3..0ef3f056c5 100644 + + return ret diff --git a/tests/pytests/unit/modules/test_zypperpkg.py b/tests/pytests/unit/modules/test_zypperpkg.py -new file mode 100644 -index 0000000000..464fae1f47 ---- /dev/null +index 37bbef87b7..dbe09976b2 100644 +--- a/tests/pytests/unit/modules/test_zypperpkg.py +++ b/tests/pytests/unit/modules/test_zypperpkg.py -@@ -0,0 +1,142 @@ -+import pytest -+import salt.modules.pkg_resource as pkg_resource -+import salt.modules.zypperpkg as zypper -+from tests.support.mock import MagicMock, patch -+ -+ -+@pytest.fixture -+def configure_loader_modules(): -+ return {zypper: {"rpm": None}, pkg_resource: {}} +@@ -119,3 +119,136 @@ def test_del_repo_key(): + with patch.dict(zypper.__salt__, salt_mock): + assert zypper.del_repo_key(keyid="keyid", root="/mnt") + salt_mock["lowpkg.remove_gpg_key"].assert_called_once_with("keyid", "/mnt") + + +def test_pkg_hold(): @@ -737,29 +661,13 @@ index 0000000000..464fae1f47 + assert len(ret) == 1 + assert "bar-2:2.3.4-2.1.*" in ret diff --git a/tests/pytests/unit/states/test_pkg.py b/tests/pytests/unit/states/test_pkg.py -new file mode 100644 -index 0000000000..faf42c4681 ---- /dev/null +index 8e5ae42ed8..a2e63d5157 100644 +--- a/tests/pytests/unit/states/test_pkg.py +++ b/tests/pytests/unit/states/test_pkg.py -@@ -0,0 +1,155 @@ -+import pytest -+import salt.states.pkg as pkg -+from tests.support.mock import MagicMock, patch -+ -+ -+@pytest.fixture -+def configure_loader_modules(): -+ return { -+ pkg: { -+ "__env__": "base", -+ "__salt__": {}, -+ "__grains__": {"os": "CentOS"}, -+ "__opts__": {"test": False, "cachedir": ""}, -+ "__instance_id__": "", -+ "__low__": {}, -+ "__utils__": {}, -+ }, -+ } +@@ -352,3 +352,140 @@ def test_mod_beacon(): + } + + assert ret == expected + + +@pytest.mark.parametrize( @@ -898,6 +806,6 @@ index 0000000000..faf42c4681 + unhold_mock.assert_any_call(name="held-test", pkgs=["baz"]) + unhold_mock.assert_any_call(name="held-test", pkgs=["bar"]) -- -2.32.0 +2.33.0 diff --git a/implementation-of-suse_ip-execution-module-bsc-10999.patch b/implementation-of-suse_ip-execution-module-bsc-10999.patch index d4157ad..330b543 100644 --- a/implementation-of-suse_ip-execution-module-bsc-10999.patch +++ b/implementation-of-suse_ip-execution-module-bsc-10999.patch @@ -1,4 +1,4 @@ -From fc15e6791deaac9b5ac52268b218e202481440a4 Mon Sep 17 00:00:00 2001 +From ebf90aaad969a61708673a9681d0d534134e16f8 Mon Sep 17 00:00:00 2001 From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> Date: Thu, 18 Feb 2021 15:56:01 +0300 Subject: [PATCH] Implementation of suse_ip execution module @@ -8,12 +8,12 @@ Subject: [PATCH] Implementation of suse_ip execution module salt/modules/linux_ip.py | 2 + salt/modules/rh_ip.py | 2 +- salt/modules/suse_ip.py | 1151 ++++++++++++++++++++++++++ - salt/states/network.py | 35 +- + salt/states/network.py | 28 +- salt/templates/suse_ip/ifcfg.jinja | 34 + salt/templates/suse_ip/ifroute.jinja | 8 + salt/templates/suse_ip/network.jinja | 30 + setup.py | 1 + - 8 files changed, 1248 insertions(+), 15 deletions(-) + 8 files changed, 1248 insertions(+), 8 deletions(-) create mode 100644 salt/modules/suse_ip.py create mode 100644 salt/templates/suse_ip/ifcfg.jinja create mode 100644 salt/templates/suse_ip/ifroute.jinja @@ -33,10 +33,10 @@ index bac0665de2..e7a268694d 100644 return (False, "Module linux_ip: RedHat systems are not supported.") if __grains__["os_family"] == "Debian": diff --git a/salt/modules/rh_ip.py b/salt/modules/rh_ip.py -index 2da954bdd0..fa13cc85d1 100644 +index d3bab3a1f8..790241a82e 100644 --- a/salt/modules/rh_ip.py +++ b/salt/modules/rh_ip.py -@@ -543,7 +543,7 @@ def _parse_settings_eth(opts, iface_type, enabled, iface): +@@ -551,7 +551,7 @@ def _parse_settings_eth(opts, iface_type, enabled, iface): """ result = {"name": iface} if "proto" in opts: @@ -1203,7 +1203,7 @@ index 0000000000..92dad50351 + + return _read_file(_SUSE_NETWORK_FILE) diff --git a/salt/states/network.py b/salt/states/network.py -index 30bd07810e..04d6a71f34 100644 +index f20863113b..49d7857f1d 100644 --- a/salt/states/network.py +++ b/salt/states/network.py @@ -504,6 +504,8 @@ def managed(name, enabled=True, **kwargs): @@ -1215,9 +1215,9 @@ index 30bd07810e..04d6a71f34 100644 # Build interface try: old = __salt__["ip.get_interface"](name) -@@ -649,25 +651,30 @@ def managed(name, enabled=True, **kwargs): +@@ -649,17 +651,29 @@ def managed(name, enabled=True, **kwargs): present_slaves = __salt__["cmd.run"]( - ["cat", "/sys/class/net/{0}/bonding/slaves".format(name)] + ["cat", "/sys/class/net/{}/bonding/slaves".format(name)] ).split() - desired_slaves = kwargs["slaves"].split() + if isinstance(kwargs['slaves'], list): @@ -1250,16 +1250,8 @@ index 30bd07810e..04d6a71f34 100644 + ret["changes"]["enslave"] = "Added slaves '{0}' to master '{1}'".format( + " ".join(missing_slaves), name ) -- cmd = [ifenslave_path, name] + list(missing_slaves) -- __salt__["cmd.run"](cmd, python_shell=False) -- else: -- log.error("Command 'ifenslave' not found") -- ret["changes"]["enslave"] = "Added slaves '{0}' to master '{1}'".format( -- " ".join(missing_slaves), name -- ) - else: - log.info( - "All slaves '%s' are already added to the master %s" + cmd = [ifenslave_path, name] + list(missing_slaves) + __salt__["cmd.run"](cmd, python_shell=False) diff --git a/salt/templates/suse_ip/ifcfg.jinja b/salt/templates/suse_ip/ifcfg.jinja new file mode 100644 index 0000000000..8384d0eab7 @@ -1351,10 +1343,10 @@ index 0000000000..64ae911271 +{%endif%}{% if wireless_regulatory_domain %}WIRELESS_REGULATORY_DOMAIN="{{wireless_regulatory_domain}}" +{%endif%} diff --git a/setup.py b/setup.py -index d9c3d6e303..c6bd4a3c03 100755 +index e13e5485ed..866e8d91f9 100755 --- a/setup.py +++ b/setup.py -@@ -1108,6 +1108,7 @@ class SaltDistribution(distutils.dist.Distribution): +@@ -1106,6 +1106,7 @@ class SaltDistribution(distutils.dist.Distribution): package_data = { "salt.templates": [ "rh_ip/*.jinja", @@ -1363,6 +1355,6 @@ index d9c3d6e303..c6bd4a3c03 100755 "virt/*.jinja", "git/*", -- -2.30.0 +2.33.0 diff --git a/improve-batch_async-to-release-consumed-memory-bsc-1.patch b/improve-batch_async-to-release-consumed-memory-bsc-1.patch deleted file mode 100644 index 34cf8ee..0000000 --- a/improve-batch_async-to-release-consumed-memory-bsc-1.patch +++ /dev/null @@ -1,205 +0,0 @@ -From e53d50ce5fabf67eeb5344f7be9cccbb09d0179b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Thu, 26 Sep 2019 10:41:06 +0100 -Subject: [PATCH] Improve batch_async to release consumed memory - (bsc#1140912) - ---- - salt/cli/batch_async.py | 89 ++++++++++++++++++++++++----------------- - 1 file changed, 52 insertions(+), 37 deletions(-) - -diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py -index 388b709416..0a0b8f5f83 100644 ---- a/salt/cli/batch_async.py -+++ b/salt/cli/batch_async.py -@@ -2,7 +2,7 @@ - Execute a job on the targeted minions by using a moving window of fixed size `batch`. - """ - --import fnmatch -+import gc - - # pylint: enable=import-error,no-name-in-module,redefined-builtin - import logging -@@ -78,6 +78,7 @@ class BatchAsync: - self.batch_jid = jid_gen() - self.find_job_jid = jid_gen() - self.find_job_returned = set() -+ self.ended = False - self.event = salt.utils.event.get_event( - "master", - self.opts["sock_dir"], -@@ -88,6 +89,7 @@ class BatchAsync: - keep_loop=True, - ) - self.scheduled = False -+ self.patterns = {} - - def __set_event_handler(self): - ping_return_pattern = "salt/job/{}/ret/*".format(self.ping_jid) -@@ -118,7 +120,7 @@ class BatchAsync: - if minion in self.active: - self.active.remove(minion) - self.done_minions.add(minion) -- self.schedule_next() -+ self.event.io_loop.spawn_callback(self.schedule_next) - - def _get_next(self): - to_run = ( -@@ -132,27 +134,27 @@ class BatchAsync: - ) - return set(list(to_run)[:next_batch_size]) - -- @tornado.gen.coroutine - def check_find_job(self, batch_minions, jid): -- find_job_return_pattern = "salt/job/{}/ret/*".format(jid) -- self.event.unsubscribe(find_job_return_pattern, match_type="glob") -- self.patterns.remove((find_job_return_pattern, "find_job_return")) -+ if self.event: -+ find_job_return_pattern = "salt/job/{}/ret/*".format(jid) -+ self.event.unsubscribe(find_job_return_pattern, match_type="glob") -+ self.patterns.remove((find_job_return_pattern, "find_job_return")) - -- timedout_minions = batch_minions.difference(self.find_job_returned).difference( -- self.done_minions -- ) -- self.timedout_minions = self.timedout_minions.union(timedout_minions) -- self.active = self.active.difference(self.timedout_minions) -- running = batch_minions.difference(self.done_minions).difference( -- self.timedout_minions -- ) -+ timedout_minions = batch_minions.difference( -+ self.find_job_returned -+ ).difference(self.done_minions) -+ self.timedout_minions = self.timedout_minions.union(timedout_minions) -+ self.active = self.active.difference(self.timedout_minions) -+ running = batch_minions.difference(self.done_minions).difference( -+ self.timedout_minions -+ ) - -- if timedout_minions: -- self.schedule_next() -+ if timedout_minions: -+ self.schedule_next() - -- if running: -- self.find_job_returned = self.find_job_returned.difference(running) -- self.event.io_loop.add_callback(self.find_job, running) -+ if running: -+ self.find_job_returned = self.find_job_returned.difference(running) -+ self.event.io_loop.spawn_callback(self.find_job, running) - - @tornado.gen.coroutine - def find_job(self, minions): -@@ -175,18 +177,12 @@ class BatchAsync: - jid=jid, - **self.eauth - ) -- self.event.io_loop.call_later( -- self.opts["gather_job_timeout"], self.check_find_job, not_done, jid -- ) -+ yield tornado.gen.sleep(self.opts["gather_job_timeout"]) -+ self.event.io_loop.spawn_callback(self.check_find_job, not_done, jid) - - @tornado.gen.coroutine - def start(self): - self.__set_event_handler() -- # start batching even if not all minions respond to ping -- self.event.io_loop.call_later( -- self.batch_presence_ping_timeout or self.opts["gather_job_timeout"], -- self.start_batch, -- ) - ping_return = yield self.local.run_job_async( - self.opts["tgt"], - "test.ping", -@@ -198,6 +194,11 @@ class BatchAsync: - **self.eauth - ) - self.targeted_minions = set(ping_return["minions"]) -+ # start batching even if not all minions respond to ping -+ yield tornado.gen.sleep( -+ self.batch_presence_ping_timeout or self.opts["gather_job_timeout"] -+ ) -+ self.event.io_loop.spawn_callback(self.start_batch) - - @tornado.gen.coroutine - def start_batch(self): -@@ -209,14 +210,18 @@ class BatchAsync: - "down_minions": self.targeted_minions.difference(self.minions), - "metadata": self.metadata, - } -- self.event.fire_event(data, "salt/batch/{}/start".format(self.batch_jid)) -- yield self.run_next() -+ ret = self.event.fire_event( -+ data, "salt/batch/{}/start".format(self.batch_jid) -+ ) -+ self.event.io_loop.spawn_callback(self.run_next) - -+ @tornado.gen.coroutine - def end_batch(self): - left = self.minions.symmetric_difference( - self.done_minions.union(self.timedout_minions) - ) -- if not left: -+ if not left and not self.ended: -+ self.ended = True - data = { - "available_minions": self.minions, - "down_minions": self.targeted_minions.difference(self.minions), -@@ -229,20 +234,26 @@ class BatchAsync: - for (pattern, label) in self.patterns: - if label in ["ping_return", "batch_run"]: - self.event.unsubscribe(pattern, match_type="glob") -+ del self -+ gc.collect() -+ yield - -+ @tornado.gen.coroutine - def schedule_next(self): - if not self.scheduled: - self.scheduled = True - # call later so that we maybe gather more returns -- self.event.io_loop.call_later(self.batch_delay, self.run_next) -+ yield tornado.gen.sleep(self.batch_delay) -+ self.event.io_loop.spawn_callback(self.run_next) - - @tornado.gen.coroutine - def run_next(self): -+ self.scheduled = False - next_batch = self._get_next() - if next_batch: - self.active = self.active.union(next_batch) - try: -- yield self.local.run_job_async( -+ ret = yield self.local.run_job_async( - next_batch, - self.opts["fun"], - self.opts["arg"], -@@ -254,13 +265,17 @@ class BatchAsync: - metadata=self.metadata, - ) - -- self.event.io_loop.call_later( -- self.opts["timeout"], self.find_job, set(next_batch) -- ) -+ yield tornado.gen.sleep(self.opts["timeout"]) -+ self.event.io_loop.spawn_callback(self.find_job, set(next_batch)) - except Exception as ex: - log.error("Error in scheduling next batch: %s", ex) - self.active = self.active.difference(next_batch) - else: -- self.end_batch() -- self.scheduled = False -+ yield self.end_batch() -+ gc.collect() - yield -+ -+ def __del__(self): -+ self.event = None -+ self.ioloop = None -+ gc.collect() --- -2.29.2 - - diff --git a/improvements-on-ansiblegate-module-354.patch b/improvements-on-ansiblegate-module-354.patch index b56c907..e9cce7e 100644 --- a/improvements-on-ansiblegate-module-354.patch +++ b/improvements-on-ansiblegate-module-354.patch @@ -1,4 +1,4 @@ -From aa0f845e2bbc37332db04c583f475cfe25304db6 Mon Sep 17 00:00:00 2001 +From 90cc5349ed085729db43966bf290c76db5c7f6b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Tue, 20 Apr 2021 11:01:26 +0100 @@ -33,26 +33,29 @@ Subject: [PATCH] Improvements on "ansiblegate" module (#354) * Fix pylint issue * Fix issue in documentation + +Fix issue parsing errors in ansiblegate state module --- salt/modules/ansiblegate.py | 166 +++++++++++++++++- salt/roster/ansible.py | 18 +- + salt/states/ansiblegate.py | 12 +- salt/utils/ansible.py | 44 +++++ .../pytests/unit/modules/test_ansiblegate.py | 94 +++++++++- .../example_playbooks/example-playbook2/hosts | 7 + .../example-playbook2/site.yml | 28 +++ .../playbooks/example_playbooks/playbook1.yml | 5 + tests/unit/roster/test_ansible.py | 2 +- - 8 files changed, 354 insertions(+), 10 deletions(-) + 9 files changed, 364 insertions(+), 12 deletions(-) create mode 100644 salt/utils/ansible.py create mode 100644 tests/unit/files/playbooks/example_playbooks/example-playbook2/hosts create mode 100644 tests/unit/files/playbooks/example_playbooks/example-playbook2/site.yml create mode 100644 tests/unit/files/playbooks/example_playbooks/playbook1.yml diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py -index 5d4b986ec2..4f96607a07 100644 +index 0279a26017..2b4d4b0bdf 100644 --- a/salt/modules/ansiblegate.py +++ b/salt/modules/ansiblegate.py -@@ -426,7 +426,171 @@ def playbooks( +@@ -425,7 +425,171 @@ def playbooks( } ret = __salt__["cmd.run_all"](**cmd_kwargs) log.debug("Ansible Playbook Return: %s", ret) @@ -276,6 +279,36 @@ index f17316bdd7..cc61f6fb7d 100644 hosts.extend(_get_hosts_from_group(child)) return hosts +diff --git a/salt/states/ansiblegate.py b/salt/states/ansiblegate.py +index 5daba0f37f..bd00653928 100644 +--- a/salt/states/ansiblegate.py ++++ b/salt/states/ansiblegate.py +@@ -183,7 +183,11 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs= + checks = __salt__["ansible.playbooks"]( + name, rundir=rundir, check=True, diff=True, **ansible_kwargs + ) +- if all( ++ if "stats" not in checks: ++ ret["comment"] = checks.get("stderr", checks) ++ ret["result"] = False ++ ret["changes"] = {} ++ elif all( + not check["changed"] + and not check["failures"] + and not check["unreachable"] +@@ -212,7 +216,11 @@ def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs= + results = __salt__["ansible.playbooks"]( + name, rundir=rundir, diff=True, **ansible_kwargs + ) +- if all( ++ if "stats" not in results: ++ ret["comment"] = results.get("stderr", results) ++ ret["result"] = False ++ ret["changes"] = {} ++ elif all( + not check["changed"] + and not check["failures"] + and not check["unreachable"] diff --git a/salt/utils/ansible.py b/salt/utils/ansible.py new file mode 100644 index 0000000000..ee85cb656c @@ -516,6 +549,6 @@ index a5cdcbbdbc..8bc9c1c6f7 100644 return {ansible: {"__utils__": utils, "__opts__": {}, "__runner__": runner}} -- -2.31.1 +2.33.0 diff --git a/include-aliases-in-the-fqdns-grains.patch b/include-aliases-in-the-fqdns-grains.patch index f030206..3aa9efd 100644 --- a/include-aliases-in-the-fqdns-grains.patch +++ b/include-aliases-in-the-fqdns-grains.patch @@ -1,4 +1,4 @@ -From 3c956a1cf1de17c5c49f0856051cabe2ffb4d0f2 Mon Sep 17 00:00:00 2001 +From 0c0f470f0bc082316cf854c8c4f6f6500f80f3f0 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Tue, 29 Jan 2019 11:11:38 +0100 Subject: [PATCH] Include aliases in the fqdns grains @@ -14,114 +14,43 @@ Deprecate UnitTest assertion in favour of built-in assert keyword Add UT for fqdns aliases Leverage cached interfaces, if any. ---- - salt/grains/core.py | 69 +++++++++++++++++++++----------- - salt/utils/network.py | 16 ++++++++ - tests/unit/grains/test_core.py | 45 ++++++++++++++++++--- - tests/unit/utils/test_network.py | 37 +++++++++++++++++ - 4 files changed, 138 insertions(+), 29 deletions(-) -diff --git a/salt/grains/core.py b/salt/grains/core.py -index bc3cf129cd..006878f806 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -1733,29 +1733,31 @@ def _parse_cpe_name(cpe): +Implement network.fqdns module function (bsc#1134860) (#172) + +* Duplicate fqdns logic in module.network +* Move _get_interfaces to utils.network +* Reuse network.fqdns in grains.core.fqdns +* Return empty list when fqdns grains is disabled + +Co-authored-by: Eric Siebigteroth +--- + salt/modules/network.py | 5 ++- + salt/utils/network.py | 16 +++++++++ + tests/unit/grains/test_core.py | 60 +++++++++++++++++++++----------- + tests/unit/utils/test_network.py | 37 ++++++++++++++++++++ + 4 files changed, 97 insertions(+), 21 deletions(-) + +diff --git a/salt/modules/network.py b/salt/modules/network.py +index 9280a0f854..d8ff251271 100644 +--- a/salt/modules/network.py ++++ b/salt/modules/network.py +@@ -2073,7 +2073,10 @@ def fqdns(): - - def _parse_cpe_name(cpe): -- ''' -+ """ - Parse CPE_NAME data from the os-release - - Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe - - :param cpe: - :return: -- ''' -+ """ - part = { -- 'o': 'operating system', -- 'h': 'hardware', -- 'a': 'application', -+ "o": "operating system", -+ "h": "hardware", -+ "a": "application", - } - ret = {} -- cpe = (cpe or '').split(':') -- if len(cpe) > 4 and cpe[0] == 'cpe': -- if cpe[1].startswith('/'): # WFN to URI -- ret['vendor'], ret['product'], ret['version'] = cpe[2:5] -- ret['phase'] = cpe[5] if len(cpe) > 5 else None -- ret['part'] = part.get(cpe[1][1:]) -- elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string -- ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]] -- ret['part'] = part.get(cpe[2]) -+ cpe = (cpe or "").split(":") -+ if len(cpe) > 4 and cpe[0] == "cpe": -+ if cpe[1].startswith("/"): # WFN to URI -+ ret["vendor"], ret["product"], ret["version"] = cpe[2:5] -+ ret["phase"] = cpe[5] if len(cpe) > 5 else None -+ ret["part"] = part.get(cpe[1][1:]) -+ elif len(cpe) == 13 and cpe[1] == "2.3": # WFN to a string -+ ret["vendor"], ret["product"], ret["version"], ret["phase"] = [ -+ x if x != "*" else None for x in cpe[3:7] -+ ] -+ ret["part"] = part.get(cpe[2]) - - return ret - -@@ -2396,15 +2398,36 @@ def fqdns(): - """ - # Provides: - # fqdns -- opt = {"fqdns": []} -- if __opts__.get( -- "enable_fqdns_grains", -- False -- if salt.utils.platform.is_windows() or salt.utils.platform.is_proxy() -- else True, -- ): -- opt = __salt__["network.fqdns"]() -- return opt -+ -+ grains = {} -+ fqdns = set() -+ -+ addresses = salt.utils.network.ip_addrs( -+ include_loopback=False, interface_data=_get_interfaces() -+ ) -+ addresses.extend( -+ salt.utils.network.ip_addrs6( -+ include_loopback=False, interface_data=_get_interfaces() -+ ) -+ ) -+ err_message = "Exception during resolving address: %s" -+ for ip in addresses: -+ try: + def _lookup_fqdn(ip): + try: +- return [socket.getfqdn(socket.gethostbyaddr(ip)[0])] + name, aliaslist, addresslist = socket.gethostbyaddr(ip) -+ fqdns.update( -+ [socket.getfqdn(name)] -+ + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] -+ ) -+ except socket.herror as err: -+ if err.errno in (0, HOST_NOT_FOUND, NO_DATA): -+ # No FQDN for this IP address, so we don't need to know this all the time. -+ log.debug("Unable to resolve address %s: %s", ip, err) -+ else: -+ log.error(err_message, ip, err) -+ except (OSError, socket.gaierror, socket.timeout) as err: -+ log.error(err_message, ip, err) -+ -+ return {"fqdns": sorted(list(fqdns))} - - - def ip_fqdn(): ++ return [socket.getfqdn(name)] + [ ++ als for als in aliaslist if salt.utils.network.is_fqdn(als) ++ ] + except socket.herror as err: + if err.errno in (0, HOST_NOT_FOUND, NO_DATA): + # No FQDN for this IP address, so we don't need to know this all the time. diff --git a/salt/utils/network.py b/salt/utils/network.py -index b3e8db3886..dd7fceb91a 100644 +index 144f9dc850..5fc9a34ca4 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py -@@ -2208,3 +2208,19 @@ def filter_by_networks(values, networks): +@@ -2286,3 +2286,19 @@ def filter_by_networks(values, networks): raise ValueError("Do not know how to filter a {}".format(type(values))) else: return values @@ -142,28 +71,69 @@ index b3e8db3886..dd7fceb91a 100644 + and all(compliant.match(x) for x in hostname.rstrip(".").split(".")) + ) diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py -index 7dbf34deac..d760e57a54 100644 +index 914be531ed..7173f04979 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py -@@ -1367,12 +1367,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): - ("bluesniff.foo.bar", [], ["fe80::a8b2:93ff:dead:beef"]), - ] - ret = {"fqdns": ["bluesniff.foo.bar", "foo.bar.baz", "rinzler.evil-corp.com"]} -- with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}): -- with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): -- fqdns = core.fqdns() -- assert "fqdns" in fqdns -- assert len(fqdns["fqdns"]) == len(ret["fqdns"]) -- assert set(fqdns["fqdns"]) == set(ret["fqdns"]) -+ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): -+ fqdns = core.fqdns() -+ assert "fqdns" in fqdns -+ assert len(fqdns["fqdns"]) == len(ret["fqdns"]) -+ assert set(fqdns["fqdns"]) == set(ret["fqdns"]) +@@ -18,6 +18,7 @@ import salt.utils.network + import salt.utils.path + import salt.utils.platform + from salt._compat import ipaddress ++from salt.ext import six + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, Mock, mock_open, patch + from tests.support.unit import TestCase, skipIf +@@ -1428,7 +1429,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": False}): + assert core.fqdns() == {"fqdns": []} - @skipIf(not salt.utils.platform.is_linux(), "System is not Linux") - @patch("salt.utils.network.ip_addrs", MagicMock(return_value=["1.2.3.4"])) -@@ -1413,6 +1412,40 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): +- def test_enable_fqdns_true(self): ++ def test_enablefqdnsTrue(self): + """ + testing that grains uses network.fqdns module + """ +@@ -1439,14 +1440,14 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": True}): + assert core.fqdns() == "my.fake.domain" + +- def test_enable_fqdns_none(self): ++ def test_enablefqdnsNone(self): + """ + testing default fqdns grains is returned when enable_fqdns_grains is None + """ + with patch.dict("salt.grains.core.__opts__", {"enable_fqdns_grains": None}): + assert core.fqdns() == {"fqdns": []} + +- def test_enable_fqdns_without_patching(self): ++ def test_enablefqdnswithoutpaching(self): + """ + testing fqdns grains is enabled by default + """ +@@ -1454,23 +1455,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + "salt.grains.core.__salt__", + {"network.fqdns": MagicMock(return_value="my.fake.domain")}, + ): +- # fqdns is disabled by default on Windows +- if salt.utils.platform.is_windows(): +- assert core.fqdns() == {"fqdns": []} +- else: +- assert core.fqdns() == "my.fake.domain" +- +- def test_enable_fqdns_false_is_proxy(self): +- """ +- testing fqdns grains is disabled by default for proxy minions +- """ +- with patch("salt.utils.platform.is_proxy", return_value=True, autospec=True): +- with patch.dict( +- "salt.grains.core.__salt__", +- {"network.fqdns": MagicMock(return_value="my.fake.domain")}, +- ): +- # fqdns is disabled by default on proxy minions +- assert core.fqdns() == {"fqdns": []} ++ assert core.fqdns() == "my.fake.domain" + + def test_enable_fqdns_false_is_aix(self): + """ +@@ -1577,6 +1562,41 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): mock_log.debug.assert_called_once() mock_log.error.assert_called() @@ -192,23 +162,24 @@ index 7dbf34deac..d760e57a54 100644 + ["fe80::a8b2:93ff:dead:beef"], + ), + ] -+ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): -+ fqdns = core.fqdns() -+ assert "fqdns" in fqdns -+ for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: -+ assert alias in fqdns["fqdns"] ++ with patch.dict(core.__salt__, {"network.fqdns": salt.modules.network.fqdns}): ++ with patch.object(socket, "gethostbyaddr", side_effect=reverse_resolv_mock): ++ fqdns = core.fqdns() ++ assert "fqdns" in fqdns ++ for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: ++ assert alias in fqdns["fqdns"] + -+ for alias in ["throwmeaway", "false-hostname", "badaliass"]: -+ assert alias not in fqdns["fqdns"] ++ for alias in ["throwmeaway", "false-hostname", "badaliass"]: ++ assert alias not in fqdns["fqdns"] + def test_core_virtual(self): """ test virtual grain with cmd virt-what diff --git a/tests/unit/utils/test_network.py b/tests/unit/utils/test_network.py -index 779fc0fc34..9a37a94d8f 100644 +index 6863ccd0c9..637d5e9811 100644 --- a/tests/unit/utils/test_network.py +++ b/tests/unit/utils/test_network.py -@@ -1274,3 +1274,40 @@ class NetworkTestCase(TestCase): +@@ -1273,3 +1273,40 @@ class NetworkTestCase(TestCase): ), ): self.assertEqual(network.get_fqhostname(), host) @@ -250,6 +221,6 @@ index 779fc0fc34..9a37a94d8f 100644 + ]: + assert not network.is_fqdn(fqdn) -- -2.29.2 +2.33.0 diff --git a/integration-of-msi-authentication-with-azurearm-clou.patch b/integration-of-msi-authentication-with-azurearm-clou.patch deleted file mode 100644 index 2f8a78c..0000000 --- a/integration-of-msi-authentication-with-azurearm-clou.patch +++ /dev/null @@ -1,387 +0,0 @@ -From bb2070d4f4e8fbb5a963c521d61feb7419abdec1 Mon Sep 17 00:00:00 2001 -From: ed lane -Date: Thu, 30 Aug 2018 06:07:08 -0600 -Subject: [PATCH] Integration of MSI authentication with azurearm cloud - driver (#105) - ---- - salt/cloud/clouds/azurearm.py | 98 +++++++++++++++-------------------- - 1 file changed, 43 insertions(+), 55 deletions(-) - -diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py -index 54fc7b497b..8b9254cecb 100644 ---- a/salt/cloud/clouds/azurearm.py -+++ b/salt/cloud/clouds/azurearm.py -@@ -1,4 +1,3 @@ --# -*- coding: utf-8 -*- - """ - Azure ARM Cloud Module - ====================== -@@ -61,6 +60,9 @@ The Azure ARM cloud module is used to control access to Microsoft Azure Resource - virtual machine type will be "Windows". Only set this parameter on profiles which install Windows operating systems. - - -+ if using MSI-style authentication: -+ * ``subscription_id`` -+ - Example ``/etc/salt/cloud.providers`` or - ``/etc/salt/cloud.providers.d/azure.conf`` configuration: - -@@ -91,7 +93,6 @@ Example ``/etc/salt/cloud.providers`` or - - - # pylint: disable=wrong-import-position,wrong-import-order --from __future__ import absolute_import, print_function, unicode_literals - - import importlib - import logging -@@ -121,7 +122,6 @@ from salt.exceptions import ( - # Salt libs - from salt.ext import six - --# Import 3rd-party libs - HAS_LIBS = False - try: - import azure.mgmt.compute.models as compute_models -@@ -179,7 +179,7 @@ def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument - ) - - for resource in provider_query.resource_types: -- if six.text_type(resource.resource_type) == kwargs["resource_type"]: -+ if str(resource.resource_type) == kwargs["resource_type"]: - resource_dict = resource.as_dict() - api_versions = resource_dict["api_versions"] - except CloudError as exc: -@@ -263,6 +263,7 @@ def get_conn(client_type): - ) - - if tenant is not None: -+ # using Service Principle style authentication... - client_id = config.get_cloud_config_value( - "client_id", get_configured_provider(), __opts__, search_global=False - ) -@@ -319,7 +320,7 @@ def avail_locations(call=None): - ) - locations = [] - for resource in provider_query.resource_types: -- if six.text_type(resource.resource_type) == "virtualMachines": -+ if str(resource.resource_type) == "virtualMachines": - resource_dict = resource.as_dict() - locations = resource_dict["locations"] - for location in locations: -@@ -399,7 +400,7 @@ def avail_images(call=None): - results = pool.map_async(_get_publisher_images, publishers) - results.wait() - -- ret = {k: v for result in results.get() for k, v in six.iteritems(result)} -+ ret = {k: v for result in results.get() for k, v in result.items()} - - return ret - -@@ -529,7 +530,7 @@ def list_nodes_full(call=None): - results = pool.map_async(_get_node_info, nodes) - results.wait() - -- group_ret = {k: v for result in results.get() for k, v in six.iteritems(result)} -+ group_ret = {k: v for result in results.get() for k, v in result.items()} - ret.update(group_ret) - - return ret -@@ -707,7 +708,7 @@ def create_network_interface(call=None, kwargs=None): - ) - - if kwargs.get("iface_name") is None: -- kwargs["iface_name"] = "{0}-iface0".format(vm_["name"]) -+ kwargs["iface_name"] = "{}-iface0".format(vm_["name"]) - - try: - subnet_obj = netconn.subnets.get( -@@ -717,7 +718,7 @@ def create_network_interface(call=None, kwargs=None): - ) - except CloudError as exc: - raise SaltCloudSystemExit( -- '{0} (Resource Group: "{1}", VNET: "{2}", Subnet: "{3}")'.format( -+ '{} (Resource Group: "{}", VNET: "{}", Subnet: "{}")'.format( - exc.message, - kwargs["network_resource_group"], - kwargs["network"], -@@ -740,11 +741,11 @@ def create_network_interface(call=None, kwargs=None): - ) - pool_ids.append({"id": lbbep_data.as_dict()["id"]}) - except CloudError as exc: -- log.error("There was a cloud error: %s", six.text_type(exc)) -+ log.error("There was a cloud error: %s", str(exc)) - except KeyError as exc: - log.error( - "There was an error getting the Backend Pool ID: %s", -- six.text_type(exc), -+ str(exc), - ) - ip_kwargs["load_balancer_backend_address_pools"] = pool_ids - -@@ -755,7 +756,7 @@ def create_network_interface(call=None, kwargs=None): - ip_kwargs["private_ip_allocation_method"] = IPAllocationMethod.dynamic - - if kwargs.get("allocate_public_ip") is True: -- pub_ip_name = "{0}-ip".format(kwargs["iface_name"]) -+ pub_ip_name = "{}-ip".format(kwargs["iface_name"]) - poller = netconn.public_ip_addresses.create_or_update( - resource_group_name=kwargs["resource_group"], - public_ip_address_name=pub_ip_name, -@@ -773,11 +774,11 @@ def create_network_interface(call=None, kwargs=None): - ) - if pub_ip_data.ip_address: # pylint: disable=no-member - ip_kwargs["public_ip_address"] = PublicIPAddress( -- id=six.text_type(pub_ip_data.id), # pylint: disable=no-member -+ id=str(pub_ip_data.id), # pylint: disable=no-member - ) - ip_configurations = [ - NetworkInterfaceIPConfiguration( -- name="{0}-ip".format(kwargs["iface_name"]), -+ name="{}-ip".format(kwargs["iface_name"]), - subnet=subnet_obj, - **ip_kwargs - ) -@@ -790,7 +791,7 @@ def create_network_interface(call=None, kwargs=None): - raise ValueError("Timed out waiting for public IP Address.") - time.sleep(5) - else: -- priv_ip_name = "{0}-ip".format(kwargs["iface_name"]) -+ priv_ip_name = "{}-ip".format(kwargs["iface_name"]) - ip_configurations = [ - NetworkInterfaceIPConfiguration( - name=priv_ip_name, subnet=subnet_obj, **ip_kwargs -@@ -900,7 +901,7 @@ def request_instance(vm_): - ) - vm_["iface_id"] = iface_data["id"] - -- disk_name = "{0}-vol0".format(vm_["name"]) -+ disk_name = "{}-vol0".format(vm_["name"]) - - vm_username = config.get_cloud_config_value( - "ssh_username", -@@ -922,8 +923,8 @@ def request_instance(vm_): - ssh_publickeyfile_contents = spkc_.read() - except Exception as exc: # pylint: disable=broad-except - raise SaltCloudConfigError( -- "Failed to read ssh publickey file '{0}': " -- "{1}".format(ssh_publickeyfile, exc.args[-1]) -+ "Failed to read ssh publickey file '{}': " -+ "{}".format(ssh_publickeyfile, exc.args[-1]) - ) - - disable_password_authentication = config.get_cloud_config_value( -@@ -941,7 +942,7 @@ def request_instance(vm_): - if not win_installer and ssh_publickeyfile_contents is not None: - sshpublickey = SshPublicKey( - key_data=ssh_publickeyfile_contents, -- path="/home/{0}/.ssh/authorized_keys".format(vm_username), -+ path="/home/{}/.ssh/authorized_keys".format(vm_username), - ) - sshconfiguration = SshConfiguration(public_keys=[sshpublickey],) - linuxconfiguration = LinuxConfiguration( -@@ -991,9 +992,9 @@ def request_instance(vm_): - availability_set = config.get_cloud_config_value( - "availability_set", vm_, __opts__, search_global=False, default=None - ) -- if availability_set is not None and isinstance(availability_set, six.string_types): -+ if availability_set is not None and isinstance(availability_set, str): - availability_set = { -- "id": "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/availabilitySets/{2}".format( -+ "id": "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}".format( - subscription_id, vm_["resource_group"], availability_set - ) - } -@@ -1004,7 +1005,7 @@ def request_instance(vm_): - - storage_endpoint_suffix = cloud_env.suffixes.storage_endpoint - -- if isinstance(vm_.get("volumes"), six.string_types): -+ if isinstance(vm_.get("volumes"), str): - volumes = salt.utils.yaml.safe_load(vm_["volumes"]) - else: - volumes = vm_.get("volumes") -@@ -1018,16 +1019,14 @@ def request_instance(vm_): - lun = 0 - luns = [] - for volume in volumes: -- if isinstance(volume, six.string_types): -+ if isinstance(volume, str): - volume = {"name": volume} - - volume.setdefault( - "name", - volume.get( - "name", -- volume.get( -- "name", "{0}-datadisk{1}".format(vm_["name"], six.text_type(lun)) -- ), -+ volume.get("name", "{}-datadisk{}".format(vm_["name"], str(lun))), - ), - ) - -@@ -1050,7 +1049,7 @@ def request_instance(vm_): - del volume["media_link"] - elif volume.get("vhd") == "unmanaged": - volume["vhd"] = VirtualHardDisk( -- uri="https://{0}.blob.{1}/vhds/{2}-datadisk{3}.vhd".format( -+ uri="https://{}.blob.{}/vhds/{}-datadisk{}.vhd".format( - vm_["storage_account"], - storage_endpoint_suffix, - vm_["name"], -@@ -1090,7 +1089,7 @@ def request_instance(vm_): - create_option=DiskCreateOptionTypes.from_image, - name=disk_name, - vhd=VirtualHardDisk( -- uri="https://{0}.blob.{1}/vhds/{2}.vhd".format( -+ uri="https://{}.blob.{}/vhds/{}.vhd".format( - vm_["storage_account"], storage_endpoint_suffix, disk_name, - ), - ), -@@ -1209,7 +1208,7 @@ def request_instance(vm_): - __utils__["cloud.fire_event"]( - "event", - "requesting instance", -- "salt/cloud/{0}/requesting".format(vm_["name"]), -+ "salt/cloud/{}/requesting".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "requesting", vm_, ["name", "profile", "provider", "driver"] - ), -@@ -1260,7 +1259,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "starting create", -- "salt/cloud/{0}/creating".format(vm_["name"]), -+ "salt/cloud/{}/creating".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "creating", vm_, ["name", "profile", "provider", "driver"] - ), -@@ -1278,9 +1277,7 @@ def create(vm_): - vm_request = request_instance(vm_=vm_) - - if not vm_request or "error" in vm_request: -- err_message = "Error creating VM {0}! ({1})".format( -- vm_["name"], six.text_type(vm_request) -- ) -+ err_message = "Error creating VM {}! ({})".format(vm_["name"], str(vm_request)) - log.error(err_message) - raise SaltCloudSystemExit(err_message) - -@@ -1322,7 +1319,7 @@ def create(vm_): - try: - log.warning(exc) - finally: -- raise SaltCloudSystemExit(six.text_type(exc)) -+ raise SaltCloudSystemExit(str(exc)) - - vm_["ssh_host"] = data - if not vm_.get("ssh_username"): -@@ -1341,7 +1338,7 @@ def create(vm_): - __utils__["cloud.fire_event"]( - "event", - "created instance", -- "salt/cloud/{0}/created".format(vm_["name"]), -+ "salt/cloud/{}/created".format(vm_["name"]), - args=__utils__["cloud.filter_event"]( - "created", vm_, ["name", "profile", "provider", "driver"] - ), -@@ -1548,9 +1545,7 @@ def _get_cloud_environment(): - cloud_env = getattr(cloud_env_module, cloud_environment or "AZURE_PUBLIC_CLOUD") - except (AttributeError, ImportError): - raise SaltCloudSystemExit( -- "The azure {0} cloud environment is not available.".format( -- cloud_environment -- ) -+ "The azure {} cloud environment is not available.".format(cloud_environment) - ) - - return cloud_env -@@ -1585,7 +1580,7 @@ def _get_block_blob_service(kwargs=None): - resource_group, storage_account - ) - storage_keys = {v.key_name: v.value for v in storage_keys.keys} -- storage_key = next(six.itervalues(storage_keys)) -+ storage_key = next(iter(storage_keys.values())) - - cloud_env = _get_cloud_environment() - -@@ -1620,7 +1615,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument - "server_encrypted": blob.properties.server_encrypted, - } - except Exception as exc: # pylint: disable=broad-except -- log.warning(six.text_type(exc)) -+ log.warning(str(exc)) - - return ret - -@@ -1655,9 +1650,7 @@ def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argum - compconn.disks.delete(kwargs["resource_group"], kwargs["blob"]) - except Exception as exc: # pylint: disable=broad-except - log.error( -- "Error deleting managed disk %s - %s", -- kwargs.get("blob"), -- six.text_type(exc), -+ "Error deleting managed disk %s - %s", kwargs.get("blob"), str(exc), - ) - return False - -@@ -1834,7 +1827,7 @@ def create_or_update_vmextension( - except CloudError as exc: - __utils__["azurearm.log_cloud_error"]( - "compute", -- "Error attempting to create the VM extension: {0}".format(exc.message), -+ "Error attempting to create the VM extension: {}".format(exc.message), - ) - ret = {"error": exc.message} - -@@ -1881,11 +1874,9 @@ def stop(name, call=None): - ret = {"error": exc.message} - if not ret: - __utils__["azurearm.log_cloud_error"]( -- "compute", "Unable to find virtual machine with name: {0}".format(name) -+ "compute", "Unable to find virtual machine with name: {}".format(name) - ) -- ret = { -- "error": "Unable to find virtual machine with name: {0}".format(name) -- } -+ ret = {"error": "Unable to find virtual machine with name: {}".format(name)} - else: - try: - instance = compconn.virtual_machines.deallocate( -@@ -1896,7 +1887,7 @@ def stop(name, call=None): - ret = vm_result.as_dict() - except CloudError as exc: - __utils__["azurearm.log_cloud_error"]( -- "compute", "Error attempting to stop {0}: {1}".format(name, exc.message) -+ "compute", "Error attempting to stop {}: {}".format(name, exc.message) - ) - ret = {"error": exc.message} - -@@ -1945,11 +1936,9 @@ def start(name, call=None): - ret = {"error": exc.message} - if not ret: - __utils__["azurearm.log_cloud_error"]( -- "compute", "Unable to find virtual machine with name: {0}".format(name) -+ "compute", "Unable to find virtual machine with name: {}".format(name) - ) -- ret = { -- "error": "Unable to find virtual machine with name: {0}".format(name) -- } -+ ret = {"error": "Unable to find virtual machine with name: {}".format(name)} - else: - try: - instance = compconn.virtual_machines.start( -@@ -1960,8 +1949,7 @@ def start(name, call=None): - ret = vm_result.as_dict() - except CloudError as exc: - __utils__["azurearm.log_cloud_error"]( -- "compute", -- "Error attempting to start {0}: {1}".format(name, exc.message), -+ "compute", "Error attempting to start {}: {}".format(name, exc.message), - ) - ret = {"error": exc.message} - --- -2.29.2 - - diff --git a/invalidate-file-list-cache-when-cache-file-modified-.patch b/invalidate-file-list-cache-when-cache-file-modified-.patch deleted file mode 100644 index 981febd..0000000 --- a/invalidate-file-list-cache-when-cache-file-modified-.patch +++ /dev/null @@ -1,107 +0,0 @@ -From c9268ec731371cdd7b2fc129ad111d9f73800752 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= - -Date: Tue, 22 Sep 2020 15:15:51 +0100 -Subject: [PATCH] Invalidate file list cache when cache file modified - time is in the future (bsc#1176397) - -Add test_future_file_list_cache_file_ignored unit test ---- - salt/fileserver/__init__.py | 2 +- - tests/unit/test_fileserver.py | 53 +++++++++++++++++++++++------------ - 2 files changed, 36 insertions(+), 19 deletions(-) - -diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py -index c8c417168f..b9e345d8c3 100644 ---- a/salt/fileserver/__init__.py -+++ b/salt/fileserver/__init__.py -@@ -132,7 +132,7 @@ def check_file_list_cache(opts, form, list_cache, w_lock): - current_time, - file_mtime, - ) -- age = 0 -+ age = -1 - else: - age = current_time - file_mtime - else: -diff --git a/tests/unit/test_fileserver.py b/tests/unit/test_fileserver.py -index 0bf30ee5cc..a1087bf4b0 100644 ---- a/tests/unit/test_fileserver.py -+++ b/tests/unit/test_fileserver.py -@@ -1,14 +1,15 @@ --# -*- coding: utf-8 -*- - """ - :codeauthor: Joao Mesquita - """ - --# Import Python libs --from __future__ import absolute_import, print_function, unicode_literals - --from salt import fileserver -+import datetime -+import os -+import time - --# Import Salt Testing libs -+import salt.utils.files -+from salt import fileserver -+from tests.support.helpers import with_tempdir - from tests.support.mixins import LoaderModuleMockMixin - from tests.support.unit import TestCase - -@@ -31,22 +32,38 @@ class MapDiffTestCase(TestCase): - assert fileserver.diff_mtime_map(map1, map2) is True - - --class VCSBackendWhitelistCase(TestCase, LoaderModuleMockMixin): -+class VCSBackendWhitelistCase(TestCase): - def setup_loader_modules(self): - return {fileserver: {}} - -- def test_whitelist(self): -+ @with_tempdir() -+ def test_future_file_list_cache_file_ignored(self, cachedir): - opts = { -- "fileserver_backend": ["roots", "git", "hgfs", "svn"], -+ "fileserver_backend": ["roots"], -+ "cachedir": cachedir, - "extension_modules": "", - } -- fs = fileserver.Fileserver(opts) -- assert fs.servers.whitelist == [ -- "git", -- "gitfs", -- "hg", -- "hgfs", -- "svn", -- "svnfs", -- "roots", -- ], fs.servers.whitelist -+ -+ back_cachedir = os.path.join(cachedir, "file_lists/roots") -+ os.makedirs(os.path.join(back_cachedir)) -+ -+ # Touch a couple files -+ for filename in ("base.p", "foo.txt"): -+ with salt.utils.files.fopen( -+ os.path.join(back_cachedir, filename), "wb" -+ ) as _f: -+ if filename == "base.p": -+ _f.write(b"\x80") -+ -+ # Set modification time to file list cache file to 1 year in the future -+ now = datetime.datetime.utcnow() -+ future = now + datetime.timedelta(days=365) -+ mod_time = time.mktime(future.timetuple()) -+ os.utime(os.path.join(back_cachedir, "base.p"), (mod_time, mod_time)) -+ -+ list_cache = os.path.join(back_cachedir, "base.p") -+ w_lock = os.path.join(back_cachedir, ".base.w") -+ ret = fileserver.check_file_list_cache(opts, "files", list_cache, w_lock) -+ assert ( -+ ret[1] is True -+ ), "Cache file list cache file is not refreshed when future modification time" --- -2.29.2 - - diff --git a/loop-fix-variable-names-for-until_no_eval.patch b/loop-fix-variable-names-for-until_no_eval.patch deleted file mode 100644 index 03d3416..0000000 --- a/loop-fix-variable-names-for-until_no_eval.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 239e897776b889105cfd6f54092100c86f52ce21 Mon Sep 17 00:00:00 2001 -From: Alberto Planas -Date: Tue, 24 Mar 2020 17:46:23 +0100 -Subject: [PATCH] loop: fix variable names for until_no_eval - ---- - salt/states/loop.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/salt/states/loop.py b/salt/states/loop.py -index de37b7d60c..533166c5dc 100644 ---- a/salt/states/loop.py -+++ b/salt/states/loop.py -@@ -182,10 +182,10 @@ def until_no_eval( - ) - if ret["comment"]: - return ret -- if not m_args: -- m_args = [] -- if not m_kwargs: -- m_kwargs = {} -+ if not args: -+ args = [] -+ if not kwargs: -+ kwargs = {} - - if init_wait: - time.sleep(init_wait) --- -2.29.2 - - diff --git a/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch b/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch deleted file mode 100644 index fb57693..0000000 --- a/loosen-azure-sdk-dependencies-in-azurearm-cloud-driv.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 6381be1a6e6d863f85dd33c82b9b949b552a7e49 Mon Sep 17 00:00:00 2001 -From: Joachim Gleissner -Date: Tue, 18 Sep 2018 15:07:13 +0200 -Subject: [PATCH] loosen azure sdk dependencies in azurearm cloud - driver - -Remove dependency to azure-cli, which is not used at all. -Use azure-storage-sdk as fallback if multiapi version is not available. - -remove unused import from azurearm driver ---- - salt/cloud/clouds/azurearm.py | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py -index 8b9254cecb..0e92a56156 100644 ---- a/salt/cloud/clouds/azurearm.py -+++ b/salt/cloud/clouds/azurearm.py -@@ -98,6 +98,7 @@ import importlib - import logging - import os - import os.path -+import pkgutil - import pprint - import string - import time -@@ -129,6 +130,11 @@ try: - from azure.storage.blob.blockblobservice import BlockBlobService - from msrestazure.azure_exceptions import CloudError - -+ if pkgutil.find_loader("azure.multiapi"): -+ # use multiapi version if available -+ from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount -+ else: -+ from azure.storage import CloudStorageAccount - HAS_LIBS = True - except ImportError: - pass --- -2.29.2 - - diff --git a/make-aptpkg.list_repos-compatible-on-enabled-disable.patch b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch index 1e05b8b..fb9324a 100644 --- a/make-aptpkg.list_repos-compatible-on-enabled-disable.patch +++ b/make-aptpkg.list_repos-compatible-on-enabled-disable.patch @@ -1,18 +1,18 @@ -From 7d507f8f5879a1de3e707fdb5cadd618a150123f Mon Sep 17 00:00:00 2001 +From f31ab712a0838709bee0ba2420c99caa6700fbf4 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Fri, 16 Nov 2018 10:54:12 +0100 Subject: [PATCH] Make aptpkg.list_repos compatible on enabled/disabled output --- - salt/modules/aptpkg.py | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) + salt/modules/aptpkg.py | 3 +++ + 1 file changed, 3 insertions(+) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py -index 1e2866b47b..70e173806a 100644 +index 0a1c3b347c..1b4e311cee 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py -@@ -1681,10 +1681,13 @@ def list_repos(**kwargs): +@@ -1691,6 +1691,9 @@ def list_repos(**kwargs): repo["file"] = source.file repo["comps"] = getattr(source, "comps", []) repo["disabled"] = source.disabled @@ -21,14 +21,8 @@ index 1e2866b47b..70e173806a 100644 + ] # This is for compatibility with the other modules repo["dist"] = source.dist repo["type"] = source.type -- repo["uri"] = source.uri -- repo["line"] = source.line.strip() -+ repo["uri"] = source.uri.rstrip("/") -+ repo["line"] = salt.utils.pkg.deb.strip_uri(source.line.strip()) - repo["architectures"] = getattr(source, "architectures", []) - repos.setdefault(source.uri, []).append(repo) - return repos + repo["uri"] = source.uri -- -2.29.2 +2.33.0 diff --git a/make-profiles-a-package.patch b/make-profiles-a-package.patch deleted file mode 100644 index e10a211..0000000 --- a/make-profiles-a-package.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 44dfbc906e4c19eef6c9cfe96c76a99e6077c7ec Mon Sep 17 00:00:00 2001 -From: Bo Maryniuk -Date: Mon, 8 Oct 2018 17:52:07 +0200 -Subject: [PATCH] Make profiles a package. - -Add UTF-8 encoding - -Add a docstring ---- - salt/cli/support/profiles/__init__.py | 4 ++++ - 1 file changed, 4 insertions(+) - create mode 100644 salt/cli/support/profiles/__init__.py - -diff --git a/salt/cli/support/profiles/__init__.py b/salt/cli/support/profiles/__init__.py -new file mode 100644 -index 0000000000..b86aef30b8 ---- /dev/null -+++ b/salt/cli/support/profiles/__init__.py -@@ -0,0 +1,4 @@ -+# coding=utf-8 -+''' -+Profiles for salt-support. -+''' --- -2.29.2 - - diff --git a/move-server_id-deprecation-warning-to-reduce-log-spa.patch b/move-server_id-deprecation-warning-to-reduce-log-spa.patch deleted file mode 100644 index 1053782..0000000 --- a/move-server_id-deprecation-warning-to-reduce-log-spa.patch +++ /dev/null @@ -1,61 +0,0 @@ -From caffb14059c2d4ab186cb24918f4e53332f697af Mon Sep 17 00:00:00 2001 -From: Mihai Dinca -Date: Fri, 14 Jun 2019 15:13:12 +0200 -Subject: [PATCH] Move server_id deprecation warning to reduce log - spamming (bsc#1135567) (bsc#1135732) - ---- - salt/grains/core.py | 7 ------- - salt/minion.py | 10 ++++++++++ - 2 files changed, 10 insertions(+), 7 deletions(-) - -diff --git a/salt/grains/core.py b/salt/grains/core.py -index d7d03c5e70..5f18ba4a58 100644 ---- a/salt/grains/core.py -+++ b/salt/grains/core.py -@@ -3066,13 +3066,6 @@ def get_server_id(): - & 0xFFFFFFFF - ) - else: -- salt.utils.versions.warn_until( -- "Sodium", -- "This server_id is computed nor by Adler32 neither by CRC32. " -- 'Please use "server_id_use_crc" option and define algorithm you' -- 'prefer (default "Adler32"). The server_id will be computed with' -- "Adler32 by default.", -- ) - id_hash = _get_hash_by_shell() - server_id = {"server_id": id_hash} - -diff --git a/salt/minion.py b/salt/minion.py -index 4da665a130..4d271c6d08 100644 ---- a/salt/minion.py -+++ b/salt/minion.py -@@ -82,6 +82,7 @@ from salt.utils.event import tagify - from salt.utils.network import parse_host_port - from salt.utils.odict import OrderedDict - from salt.utils.process import ProcessManager, SignalHandlingProcess, default_signals -+from salt.utils.versions import warn_until - from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq - - HAS_PSUTIL = False -@@ -1096,6 +1097,15 @@ class MinionManager(MinionBase): - ): - masters = [masters] - -+ if not self.opts.get("server_id_use_crc"): -+ warn_until( -+ "Sodium", -+ "This server_id is computed nor by Adler32 neither by CRC32. " -+ 'Please use "server_id_use_crc" option and define algorithm you' -+ 'prefer (default "Adler32"). The server_id will be computed with' -+ "Adler32 by default.", -+ ) -+ - beacons_leader = True - for master in masters: - s_opts = copy.deepcopy(self.opts) --- -2.29.2 - - diff --git a/open-suse-3002.2-bigvm-310.patch b/open-suse-3002.2-bigvm-310.patch deleted file mode 100644 index cf80e59..0000000 --- a/open-suse-3002.2-bigvm-310.patch +++ /dev/null @@ -1,6515 +0,0 @@ -From 0d606b481752d1112321046ce78d3a7f9d2a6604 Mon Sep 17 00:00:00 2001 -From: Cedric Bosdonnat -Date: Tue, 12 Jan 2021 10:48:27 +0100 -Subject: [PATCH] Open suse 3002.2 bigvm (#310) -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -* revert stop_on_reboot commits to help applying upstream patches - -* libvirt domain template memory config fixes - -Add unit tests for _gen_xml() on the recently added memory parameters. -Also fixes an issue with an optional attribute. - -* virt: support host numa tunning capability - -* fixup! precommit failure fix - -* virt: support cpu model and topology - -* virt: make context preprocessing more reusable in _gen_xml - -Introduce mapping structures in order to help reusing the common patterns -in the virt._gen_xml() context pre processing. - -* xmlutil.change_xml properly handle xpath node number - -In XPath the node numbers are counted from 1 rather than 0. -Thus /foo/bar[0] is invalid and should be /foo/bar[1]. - -Since in the change_xml function we are getting the index from python -lists in these cases, we need to offset these. - -* virt: support memory_backing - -* virt: support cpu tunning and Iothread allocation - -* xmlutil.change_xml: properly handle updated return value for removals - -When deleting an attribute that doesn't exist in the node we should not -report a change was made. - -* virt.update: properly handle nosharepages and locked elements - -When updating we shouldn't set the value as text in those elements. -Libvirt seems happy with it, but it forces modifying the VM definition -even if there was no change. - -* xmlutil: use a comparison function to update XML - -When updating an XML file, we may need to have a more intelligent -comparison of the current and new values. This typically fits for the -case of numeric values that may have a negligible delta. - -* virt.update: handle tiny difference in memory values - -Libvirt may round the memory values when defining or updating a VM. That -is perfectly fine, but then the value are slightly different from the -ones passed to the virt.update() function or the virt.running state. -In those cases the state would be reapplied even though there is no real -difference with the VM. - -In order to handle that case the memory parameters in the virt.update -mapping now have a comparison function that considers the tiny differences -as equal. - -This commit also factorizes the creation of the memory entries in the -virt.update() mapping. - -* virt.update: factorize the mapping value definition - -In the mapping passed to xmlutil.change_xml() in virt.update() there are -a lot of common patterns. Extract these into helper functions. Some of -them are common enough to even be defined in the xmlutil module. - -* virt: add kvm-hint-dedicated feature handling - -* virt: add clock configuration for guests - -* virt: add qemu guest agent channel - -For libvirt to be able to communicate with the QEMU Guest Agent if -installed in the guest, a channel named org.qemu.guest_agent.0 is -needed. - -Add this channel by default on all newly created KVM virtual machines. - -* virt: allow using IO threads on disks - -* Remove unneeded VM XML definition fragments in tests - -* virt: canonicalize cpuset before comparing - -Multiple libvirt cpuset notations can designate the same thing. We need -to expand those notations into an actual cpu list in order to be able to -properly compare. - -For instance if the libvirt definition has '0-5,^4', and we have -'0,1,2,3,5' passed to virt.update(), those should not trigger an update -of the définition since they are defining the same thing. - -* virt: only live update vcpu max if there is a change - -* Add console and serial to update and running status - -* virt: cleanup the consoles and serials support - -* virt: add stop_on_reboot parameter in guest states and definition - -It can be needed to force a VM to stop instead of rebooting. A typical -example of this is when creating a VM using a install CDROM ISO or when -using an autoinstallation profile. Forcing a shutdown allows libvirt to -pick up another XML definition for the new start to remove the -firstboot-only options. - -* virt: expose live parameter in virt.defined state - -Allow updating the definition of a VM without touching the live -instance. This can be helpful since live update may change the device -names in the guest. - -* Ensure virt.update stop_on_reboot is updated with its default value - -While all virt.update properties default values should not be used when -updating the XML definition, the stop_on_reboot default value (False) -needs to be passed still or the user will never be able to update with -this value. - -Co-authored-by: gqlo -Co-authored-by: gqlo -Co-authored-by: marina2209 ---- - changelog/57880.added | 1 + - changelog/58844.added | 1 + - salt/modules/virt.py | 1232 ++++++- - salt/states/virt.py | 341 +- - salt/templates/virt/libvirt_chardevs.jinja | 16 + - salt/templates/virt/libvirt_domain.jinja | 268 +- - salt/utils/xmlutil.py | 79 +- - tests/pytests/unit/modules/virt/conftest.py | 126 + - .../pytests/unit/modules/virt/test_domain.py | 335 ++ - tests/pytests/unit/utils/test_xmlutil.py | 41 + - tests/unit/modules/test_virt.py | 2961 +++++++++++++++-- - tests/unit/states/test_virt.py | 57 + - 12 files changed, 4934 insertions(+), 524 deletions(-) - create mode 100644 changelog/57880.added - create mode 100644 changelog/58844.added - create mode 100644 salt/templates/virt/libvirt_chardevs.jinja - -diff --git a/changelog/57880.added b/changelog/57880.added -new file mode 100644 -index 0000000000..6fff4295fa ---- /dev/null -+++ b/changelog/57880.added -@@ -0,0 +1 @@ -+CPU model, topology and NUMA node tuning -diff --git a/changelog/58844.added b/changelog/58844.added -new file mode 100644 -index 0000000000..c8599125d2 ---- /dev/null -+++ b/changelog/58844.added -@@ -0,0 +1 @@ -+Enhance console and serial support in virt module -diff --git a/salt/modules/virt.py b/salt/modules/virt.py -index 786bfa1e58..b852f8175d 100644 ---- a/salt/modules/virt.py -+++ b/salt/modules/virt.py -@@ -788,11 +788,11 @@ def _handle_unit(s, def_unit="m"): - return int(value) - - --def nesthash(): -+def nesthash(value=None): - """ - create default dict that allows arbitrary level of nesting - """ -- return collections.defaultdict(nesthash) -+ return collections.defaultdict(nesthash, value or {}) - - - def _gen_xml( -@@ -808,6 +808,11 @@ def _gen_xml( - graphics=None, - boot=None, - boot_dev=None, -+ numatune=None, -+ hypervisor_features=None, -+ clock=None, -+ serials=None, -+ consoles=None, - stop_on_reboot=False, - **kwargs - ): -@@ -817,24 +822,36 @@ def _gen_xml( - context = { - "hypervisor": hypervisor, - "name": name, -- "cpu": str(cpu), -+ "hypervisor_features": hypervisor_features or {}, -+ "clock": clock or {}, - "on_reboot": "destroy" if stop_on_reboot else "restart", - } - -+ context["to_kib"] = lambda v: int(_handle_unit(v) / 1024) -+ context["yesno"] = lambda v: "yes" if v else "no" -+ - context["mem"] = nesthash() - if isinstance(mem, int): -- mem = int(mem) * 1024 # MB -- context["mem"]["boot"] = str(mem) -- context["mem"]["current"] = str(mem) -+ context["mem"]["boot"] = mem -+ context["mem"]["current"] = mem - elif isinstance(mem, dict): -- for tag, val in mem.items(): -- if val: -- if tag == "slots": -- context["mem"]["slots"] = "{}='{}'".format(tag, val) -- else: -- context["mem"][tag] = str(int(_handle_unit(val) / 1024)) -+ context["mem"] = nesthash(mem) -+ -+ context["cpu"] = nesthash() -+ context["cputune"] = nesthash() -+ if isinstance(cpu, int): -+ context["cpu"]["maximum"] = str(cpu) -+ elif isinstance(cpu, dict): -+ context["cpu"] = nesthash(cpu) -+ -+ if clock: -+ offset = "utc" if clock.get("utc", True) else "localtime" -+ if "timezone" in clock: -+ offset = "timezone" -+ context["clock"]["offset"] = offset - - if hypervisor in ["qemu", "kvm"]: -+ context["numatune"] = numatune if numatune else {} - context["controller_model"] = False - elif hypervisor == "vmware": - # TODO: make bus and model parameterized, this works for 64-bit Linux -@@ -873,18 +890,57 @@ def _gen_xml( - context["boot"]["kernel"] = "/usr/lib/grub2/x86_64-xen/grub.xen" - context["boot_dev"] = [] - -- if "serial_type" in kwargs: -- context["serial_type"] = kwargs["serial_type"] -- if "serial_type" in context and context["serial_type"] == "tcp": -- if "telnet_port" in kwargs: -- context["telnet_port"] = kwargs["telnet_port"] -- else: -- context["telnet_port"] = 23023 # FIXME: use random unused port -- if "serial_type" in context: -- if "console" in kwargs: -- context["console"] = kwargs["console"] -- else: -- context["console"] = True -+ default_port = 23023 -+ default_chardev_type = "tcp" -+ -+ chardev_types = ["serial", "console"] -+ for chardev_type in chardev_types: -+ context[chardev_type + "s"] = [] -+ parameter_value = locals()[chardev_type + "s"] -+ if parameter_value is not None: -+ for chardev in parameter_value: -+ chardev_context = chardev -+ chardev_context["type"] = chardev.get("type", default_chardev_type) -+ -+ if chardev_context["type"] == "tcp": -+ chardev_context["port"] = chardev.get("port", default_port) -+ chardev_context["protocol"] = chardev.get("protocol", "telnet") -+ context[chardev_type + "s"].append(chardev_context) -+ -+ # processing of deprecated parameters -+ old_port = kwargs.get("telnet_port") -+ if old_port: -+ salt.utils.versions.warn_until( -+ "Phosphorus", -+ "'telnet_port' parameter has been deprecated, use the 'serials' and 'consoles' parameters instead. " -+ "'telnet_port' parameter has been deprecated, use the 'serials' parameter with a value " -+ "like ``{{{{'type': 'tcp', 'protocol': 'telnet', 'port': {}}}}}`` instead and a similar `consoles` parameter. " -+ "It will be removed in {{version}}.".format(old_port), -+ ) -+ -+ old_serial_type = kwargs.get("serial_type") -+ if old_serial_type: -+ salt.utils.versions.warn_until( -+ "Phosphorus", -+ "'serial_type' parameter has been deprecated, use the 'serials' parameter with a value " -+ "like ``{{{{'type': '{}', 'protocol': 'telnet' }}}}`` instead and a similar `consoles` parameter. " -+ "It will be removed in {{version}}.".format(old_serial_type), -+ ) -+ serial_context = {"type": old_serial_type} -+ if serial_context["type"] == "tcp": -+ serial_context["port"] = old_port or default_port -+ serial_context["protocol"] = "telnet" -+ context["serials"].append(serial_context) -+ -+ old_console = kwargs.get("console") -+ if old_console: -+ salt.utils.versions.warn_until( -+ "Phosphorus", -+ "'console' parameter has been deprecated, use the 'serials' and 'consoles' parameters instead. " -+ "It will be removed in {version}.", -+ ) -+ if old_console is True: -+ context["consoles"].append(serial_context) - - context["disks"] = [] - disk_bus_map = {"virtio": "vd", "xen": "xvd", "fdc": "fd", "ide": "hd"} -@@ -897,6 +953,7 @@ def _gen_xml( - "disk_bus": disk["model"], - "format": disk.get("format", "raw"), - "index": str(i), -+ "io": "threads" if disk.get("iothreads", False) else "native", - } - targets.append(disk_context["target_dev"]) - if disk.get("source_file"): -@@ -946,7 +1003,6 @@ def _gen_xml( - - context["os_type"] = os_type - context["arch"] = arch -- - fn_ = "libvirt_domain.jinja" - try: - template = JINJA.get_template(fn_) -@@ -1751,6 +1807,11 @@ def init( - arch=None, - boot=None, - boot_dev=None, -+ numatune=None, -+ hypervisor_features=None, -+ clock=None, -+ serials=None, -+ consoles=None, - stop_on_reboot=False, - **kwargs - ): -@@ -1758,13 +1819,126 @@ def init( - Initialize a new vm - - :param name: name of the virtual machine to create -- :param cpu: Number of virtual CPUs to assign to the virtual machine -- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to -+ :param cpu: -+ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure -+ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is -+ documented in :ref:`init-cpu-def`. -+ -+ .. code-block:: yaml -+ -+ cpu: -+ placement: static -+ cpuset: 0-11 -+ current: 5 -+ maximum: 12 -+ vcpus: -+ 0: -+ enabled: True -+ hotpluggable: False -+ order: 1 -+ 1: -+ enabled: False -+ hotpluggable: True -+ match: minimum -+ mode: custom -+ check: full -+ vendor: Intel -+ model: -+ name: core2duo -+ fallback: allow -+ vendor_id: GenuineIntel -+ topology: -+ sockets: 1 -+ cores: 12 -+ threads: 1 -+ cache: -+ level: 3 -+ mode: emulate -+ features: -+ lahf: optional -+ pcid: require -+ numa: -+ 0: -+ cpus: 0-3 -+ memory: 1g -+ discard: True -+ distances: -+ 0: 10 # sibling id : value -+ 1: 21 -+ 2: 31 -+ 3: 41 -+ 1: -+ cpus: 4-6 -+ memory: 1g -+ memAccess: shared -+ distances: -+ 0: 21 -+ 1: 10 -+ 2: 21 -+ 3: 31 -+ tuning: -+ vcpupin: -+ 0: 1-4,^2 # vcpuid : cpuset -+ 1: 0,1 -+ 2: 2,3 -+ 3: 0,4 -+ emulatorpin: 1-3 -+ iothreadpin: -+ 1: 5,6 # iothread id: cpuset -+ 2: 7,8 -+ shares: 2048 -+ period: 1000000 -+ quota: -1 -+ global_period: 1000000 -+ global_quota: -1 -+ emulator_period: 1000000 -+ emulator_quota: -1 -+ iothread_period: 1000000 -+ iothread_quota: -1 -+ vcpusched: -+ - scheduler: fifo -+ priority: 1 -+ vcpus: 0,3-5 -+ - scheduler: rr -+ priority: 3 -+ iothreadsched: -+ - scheduler: idle -+ - scheduler: batch -+ iothreads: 2,3 -+ emulatorsched: -+ - scheduler: batch -+ cachetune: -+ 0-3: # vcpus set -+ 0: # cache id -+ level: 3 -+ type: both -+ size: 4 -+ 1: -+ level: 3 -+ type: both -+ size: 6 -+ monitor: -+ 1: 3 -+ 0-3: 3 -+ 4-5: -+ monitor: -+ 4: 3 # vcpus: level -+ 5: 3 -+ memorytune: -+ 0-3: # vcpus set -+ 0: 60 # node id: bandwidth -+ 4-5: -+ 0: 60 -+ iothreads: 4 -+ -+ .. versionadded:: Aluminium -+ -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to - contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, -- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The -- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be -- an integer. -+ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``, -+ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure -+ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit -+ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer. - - .. code-block:: python - -@@ -1773,10 +1947,17 @@ def init( - 'current': 1g, - 'max': 1g, - 'slots': 10, -- 'hard_limit': '1024' -- 'soft_limit': '512m' -- 'swap_hard_limit': '1g' -- 'min_guarantee': '512mib' -+ 'hard_limit': '1024', -+ 'soft_limit': '512m', -+ 'swap_hard_limit': '1g', -+ 'min_guarantee': '512mib', -+ 'hugepages': [{'nodeset': '0-3,^2', 'size': '1g'}, {'nodeset': '2', 'size': '2m'}], -+ 'nosharepages': True, -+ 'locked': True, -+ 'source': 'file', -+ 'access': 'shared', -+ 'allocation': 'immediate', -+ 'discard': True - } - - .. versionchanged:: Magnesium -@@ -1872,6 +2053,232 @@ def init( - - By default, the value will ``"hd"``. - -+ :param numatune: -+ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA -+ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process -+ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition -+ used in the dictionary can be found at :ref:`init-cpu-def`. -+ -+ .. versionadded:: Aluminium -+ -+ .. code-block:: python -+ -+ { -+ 'memory': {'mode': 'strict', 'nodeset': '0-11'}, -+ 'memnodes': {0: {'mode': 'strict', 'nodeset': 1}, 1: {'mode': 'preferred', 'nodeset': 2}} -+ } -+ -+ :param hypervisor_features: -+ Enable or disable hypervisor-specific features on the virtual machine. -+ -+ .. versionadded:: Aluminium -+ -+ .. code-block:: yaml -+ -+ hypervisor_features: -+ kvm-hint-dedicated: True -+ -+ :param clock: -+ Configure the guest clock. -+ The value is a dictionary with the following keys: -+ -+ adjustment -+ time adjustment in seconds or ``reset`` -+ -+ utc -+ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``. -+ -+ timezone -+ synchronize the guest to the correspding timezone -+ -+ timers -+ a dictionary associating the timer name with its configuration. -+ This configuration is a dictionary with the properties ``track``, ``tickpolicy``, -+ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``. -+ See `libvirt time keeping documentation `_ for the possible values. -+ -+ .. versionadded:: Aluminium -+ -+ Set the clock to local time using an offset in seconds -+ .. code-block:: yaml -+ -+ clock: -+ adjustment: 3600 -+ utc: False -+ -+ Set the clock to a specific time zone: -+ -+ .. code-block:: yaml -+ -+ clock: -+ timezone: CEST -+ -+ Tweak guest timers: -+ -+ .. code-block:: yaml -+ -+ clock: -+ timers: -+ tsc: -+ frequency: 3504000000 -+ mode: native -+ rtc: -+ track: wall -+ tickpolicy: catchup -+ slew: 4636 -+ threshold: 123 -+ limit: 2342 -+ hpet: -+ present: False -+ -+ :param serials: -+ Dictionary providing details on the serials connection to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium -+ -+ :param consoles: -+ Dictionary providing details on the consoles device to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium -+ -+ .. _init-cpu-def: -+ -+ .. rubric:: cpu parameters definition -+ -+ The cpu parameters dictionary can contain the following properties: -+ -+ cpuset -+ a comma-separated list of physical CPU numbers that domain process and virtual CPUs can be pinned to by default. -+ eg. ``1-4,^3`` cpuset 3 is excluded. -+ -+ current -+ the number of virtual cpus available at startup -+ -+ placement -+ indicate the CPU placement mode for domain process. the value can be either ``static`` or ``auto`` -+ -+ vcpus -+ specify the state of individual vcpu. Possible attribute for each individual vcpu include: ``id``, ``enabled``, -+ ``hotpluggable`` and ``order``. Valid ``ids`` are from 0 to the maximum vCPU count minus 1. ``enabled`` takes -+ boolean values which controls the state of the vcpu. ``hotpluggable`` take boolean value which controls whether -+ given vCPU can be hotplugged and hotunplugged. ``order`` takes an integer value which specifies the order to add -+ the online vCPUs. -+ -+ match -+ The cpu attribute ``match`` attribute specifies how strictly the virtual CPU provided to the guest matches the CPU -+ requirements, possible values are ``minimum``, ``exact`` or ``strict``. -+ -+ check -+ Optional cpu attribute ``check`` attribute can be used to request a specific way of checking whether the virtual -+ CPU matches the specification, possible values are ``none``, ``partial`` and ``full``. -+ -+ mode -+ Optional cpu attribute ``mode`` attribute may be used to make it easier to configure a guest CPU to be as close -+ to host CPU as possible, possible values are ``custom``, ``host-model`` and ``host-passthrough``. -+ -+ model -+ specifies CPU model requested by the guest. An optional ``fallback`` attribute can be used to forbid libvirt falls -+ back to the closest model supported by the hypervisor, possible values are ``allow`` or ``forbid``. ``vendor_id`` -+ attribute can be used to set the vendor id seen by the guest, the length must be exactly 12 characters long. -+ -+ vendor -+ specifies CPU vendor requested by the guest. -+ -+ topology -+ specifies requested topology of virtual CPU provided to the guest. Four possible attributes , ``sockets``, ``dies``, -+ ``cores``, and ``threads``, accept non-zero positive integer values. They refer to the number of CPU sockets per -+ NUMA node, number of dies per socket, number of cores per die, and number of threads per core, respectively. -+ -+ features -+ A dictionary conains a set of cpu features to fine-tune features provided by the selected CPU model. Use cpu -+ feature ``name`` as the key and the ``policy`` as the value. ``policy`` Attribute takes ``force``, ``require``, -+ ``optional``, ``disable`` or ``forbid``. -+ -+ cache -+ describes the virtual CPU cache. Optional attribute ``level`` takes an integer value which describes cache level -+ ``mode`` attribute supported three possible values: ``emulate``, ``passthrough``, ``disable`` -+ -+ numa -+ specify the guest numa topology. ``cell`` element specifies a NUMA cell or a NUMA node, ``cpus`` specifies the -+ CPU or range of CPUs that are part of the node, ``memory`` specifies the size of the node memory. All cells -+ should have ``id`` attribute in case referring to some cell is necessary in the code. optional attribute -+ ``memAccess`` control whether the memory is to be mapped as ``shared`` or ``private``, ``discard`` attribute which -+ fine tunes the discard feature for given numa node, possible values are ``True`` or ``False``. ``distances`` -+ element define the distance between NUMA cells and ``sibling`` sub-element is used to specify the distance value -+ between sibling NUMA cells. -+ -+ vcpupin -+ The optional vcpupin element specifies which of host's physical CPUs the domain vCPU will be pinned to. -+ -+ emulatorpin -+ The optional emulatorpin element specifies which of host physical CPUs the "emulator", a subset of a domain not -+ including vCPU or iothreads will be pinned to. -+ -+ iothreadpin -+ The optional iothreadpin element specifies which of host physical CPUs the IOThreads will be pinned to. -+ -+ shares -+ The optional shares element specifies the proportional weighted share for the domain. -+ -+ period -+ The optional period element specifies the enforcement interval (unit: microseconds). -+ -+ quota -+ The optional quota element specifies the maximum allowed bandwidth (unit: microseconds). -+ -+ global_period -+ The optional global_period element specifies the enforcement CFS scheduler interval (unit: microseconds) for the -+ whole domain in contrast with period which enforces the interval per vCPU. -+ -+ global_quota -+ The optional global_quota element specifies the maximum allowed bandwidth (unit: microseconds) within a period -+ for the whole domain. -+ -+ emulator_period -+ The optional emulator_period element specifies the enforcement interval (unit: microseconds). -+ -+ emulator_quota -+ The optional emulator_quota element specifies the maximum allowed bandwidth (unit: microseconds) for domain's -+ emulator threads (those excluding vCPUs). -+ -+ iothread_period -+ The optional iothread_period element specifies the enforcement interval (unit: microseconds) for IOThreads. -+ -+ iothread_quota -+ The optional iothread_quota element specifies the maximum allowed bandwidth (unit: microseconds) for IOThreads. -+ -+ vcpusched -+ specify the scheduler type for vCPUs. -+ The value is a list of dictionaries with the ``scheduler`` key (values ``batch``, ``idle``, ``fifo``, ``rr``) -+ and the optional ``priority`` and ``vcpus`` keys. The ``priority`` value usually is a positive integer and the -+ ``vcpus`` value is a cpu set like ``1-4,^3,6`` or simply the vcpu id. -+ -+ iothreadsched -+ specify the scheduler type for IO threads. -+ The value is a list of dictionaries with the ``scheduler`` key (values ``batch``, ``idle``, ``fifo``, ``rr``) -+ and the optional ``priority`` and ``vcpus`` keys. The ``priority`` value usually is a positive integer and the -+ ``vcpus`` value is a cpu set like ``1-4,^3,6`` or simply the vcpu id. -+ -+ emulatorsched -+ specify the scheduler type (values batch, idle, fifo, rr) for particular the emulator. -+ The value is a dictionary with the ``scheduler`` key (values ``batch``, ``idle``, ``fifo``, ``rr``) -+ and the optional ``priority`` and ``vcpus`` keys. The ``priority`` value usually is a positive integer. -+ -+ cachetune -+ Optional cachetune element can control allocations for CPU caches using the resctrl on the host. -+ -+ monitor -+ The optional element monitor creates the cache monitor(s) for current cache allocation. -+ -+ memorytune -+ Optional memorytune element can control allocations for memory bandwidth using the resctrl on the host. -+ -+ iothreads -+ Number of threads for supported disk devices to perform I/O requests. iothread id will be numbered from 1 to -+ the provided number (Default: None). -+ - .. _init-boot-def: - - .. rubric:: Boot parameters definition -@@ -1932,6 +2339,33 @@ def init( - min_guarantee - the guaranteed minimum memory allocation for the guest - -+ hugepages -+ memory allocated using ``hugepages`` instead of the normal native page size. It takes a list of -+ dictionaries with ``nodeset`` and ``size`` keys. -+ For example ``"hugepages": [{"nodeset": "1-4,^3", "size": "2m"}, {"nodeset": "3", "size": "1g"}]``. -+ -+ nosharepages -+ boolean value to instruct hypervisor to disable shared pages (memory merge, KSM) for this domain -+ -+ locked -+ boolean value that allows memory pages belonging to the domain will be locked in host's memory and the host will -+ not be allowed to swap them out, which might be required for some workloads such as real-time. -+ -+ source -+ possible values are ``file`` which utilizes file memorybacking, ``anonymous`` by default and ``memfd`` backing. -+ (QEMU/KVM only) -+ -+ access -+ specify if the memory is to be ``shared`` or ``private``. This can be overridden per numa node by memAccess. -+ -+ allocation -+ specify when to allocate the memory by supplying either ``immediate`` or ``ondemand``. -+ -+ discard -+ boolean value to ensure the memory content is discarded just before guest shuts down (or when DIMM module is -+ unplugged). Please note that this is just an optimization and is not guaranteed to work in all cases -+ (e.g. when hypervisor crashes). (QEMU/KVM only) -+ - .. _init-nic-def: - - .. rubric:: Network Interfaces Definitions -@@ -2051,6 +2485,10 @@ def init( - hostname_property: virt:hostname - sparse_volume: True - -+ iothreads -+ When ``True`` dedicated threads will be used for the I/O of the disk. -+ (Default: ``False``) -+ - .. _init-graphics-def: - - .. rubric:: Graphics Definition -@@ -2077,6 +2515,42 @@ def init( - By default, not setting the ``listen`` part of the dictionary will default to - listen on all addresses. - -+ .. _init-chardevs-def: -+ -+ .. rubric:: Serials and Consoles Definitions -+ -+ Serial dictionaries can contain the following properties: -+ -+ type -+ Type of the serial connection, like ``'tcp'``, ``'pty'``, ``'file'``, ``'udp'``, ``'dev'``, -+ ``'pipe'``, ``'unix'``. -+ -+ path -+ Path to the source device. Can be a log file, a host character device to pass through, -+ a unix socket, a named pipe path. -+ -+ host -+ The serial UDP or TCP host name. -+ (Default: 23023) -+ -+ port -+ The serial UDP or TCP port number. -+ (Default: 23023) -+ -+ protocol -+ Name of the TCP connection protocol. -+ (Default: telnet) -+ -+ tls -+ Boolean value indicating whether to use hypervisor TLS certificates environment for TCP devices. -+ -+ target_port -+ The guest device port number starting from 0 -+ -+ target_type -+ The guest device type. Common values are ``serial``, ``virtio`` or ``usb-serial``, but more are documented in -+ `the libvirt documentation `_. -+ - .. rubric:: CLI Example - - .. code-block:: bash -@@ -2226,6 +2700,11 @@ def init( - graphics, - boot, - boot_dev, -+ numatune, -+ hypervisor_features, -+ clock, -+ serials, -+ consoles, - stop_on_reboot, - **kwargs - ) -@@ -2249,19 +2728,15 @@ def _disks_equal(disk1, disk2): - """ - target1 = disk1.find("target") - target2 = disk2.find("target") -- source1 = ( -- disk1.find("source") -- if disk1.find("source") is not None -- else ElementTree.Element("source") -- ) -- source2 = ( -- disk2.find("source") -- if disk2.find("source") is not None -- else ElementTree.Element("source") -- ) - -- source1_dict = xmlutil.to_dict(source1, True) -- source2_dict = xmlutil.to_dict(source2, True) -+ disk1_dict = xmlutil.to_dict(disk1, True) -+ disk2_dict = xmlutil.to_dict(disk2, True) -+ -+ source1_dict = disk1_dict.get("source", {}) -+ source2_dict = disk2_dict.get("source", {}) -+ -+ io1 = disk1_dict.get("driver", {}).get("io", "native") -+ io2 = disk2_dict.get("driver", {}).get("io", "native") - - # Remove the index added by libvirt in the source for backing chain - if source1_dict: -@@ -2276,6 +2751,7 @@ def _disks_equal(disk1, disk2): - and target1.get("bus") == target2.get("bus") - and disk1.get("device", "disk") == disk2.get("device", "disk") - and target1.get("dev") == target2.get("dev") -+ and io1 == io2 - ) - - -@@ -2443,6 +2919,101 @@ def _diff_graphics_lists(old, new): - return _diff_lists(old, new, _graphics_equal) - - -+def _expand_cpuset(cpuset): -+ """ -+ Expand the libvirt cpuset and nodeset values into a list of cpu/node IDs -+ """ -+ if cpuset is None: -+ return None -+ -+ if isinstance(cpuset, int): -+ return str(cpuset) -+ -+ result = set() -+ toremove = set() -+ for part in cpuset.split(","): -+ m = re.match("([0-9]+)-([0-9]+)", part) -+ if m: -+ result |= set(range(int(m.group(1)), int(m.group(2)) + 1)) -+ elif part.startswith("^"): -+ toremove.add(int(part[1:])) -+ else: -+ result.add(int(part)) -+ cpus = list(result - toremove) -+ cpus.sort() -+ cpus = [str(cpu) for cpu in cpus] -+ return ",".join(cpus) -+ -+ -+def _normalize_cpusets(desc, data): -+ """ -+ Expand the cpusets that can't be expanded by the change_xml() function, -+ namely the ones that are used as keys and in the middle of the XPath expressions. -+ """ -+ # Normalize the cpusets keys in the XML -+ xpaths = ["cputune/cachetune", "cputune/cachetune/monitor", "cputune/memorytune"] -+ for xpath in xpaths: -+ nodes = desc.findall(xpath) -+ for node in nodes: -+ node.set("vcpus", _expand_cpuset(node.get("vcpus"))) -+ -+ # data paths to change: -+ # - cpu:tuning:cachetune:{id}:monitor:{sid} -+ # - cpu:tuning:memorytune:{id} -+ if not isinstance(data.get("cpu"), dict): -+ return -+ tuning = data["cpu"].get("tuning", {}) -+ for child in ["cachetune", "memorytune"]: -+ if tuning.get(child): -+ new_item = dict() -+ for cpuset, value in tuning[child].items(): -+ if child == "cachetune" and value.get("monitor"): -+ value["monitor"] = { -+ _expand_cpuset(monitor_cpus): monitor -+ for monitor_cpus, monitor in value["monitor"].items() -+ } -+ new_item[_expand_cpuset(cpuset)] = value -+ tuning[child] = new_item -+ -+ -+def _serial_or_concole_equal(old, new): -+ def _filter_serial_or_concole(item): -+ """ -+ Filter out elements to ignore when comparing items -+ """ -+ return { -+ "type": item.attrib["type"], -+ "port": item.find("source").attrib["service"] -+ if item.find("source") is not None -+ else None, -+ "protocol": item.find("protocol").attrib["type"] -+ if item.find("protocol") is not None -+ else None, -+ } -+ -+ return _filter_serial_or_concole(old) == _filter_serial_or_concole(new) -+ -+ -+def _diff_serial_list(old, new): -+ """ -+ Compare serial definitions to extract the changes -+ -+ :param old: list of ElementTree nodes representing the old serials -+ :param new: list of ElementTree nodes representing the new serials -+ """ -+ return _diff_lists(old, new, _serial_or_concole_equal) -+ -+ -+def _diff_console_list(old, new): -+ """ -+ Compare console definitions to extract the changes -+ -+ :param old: list of ElementTree nodes representing the old consoles -+ :param new: list of ElementTree nodes representing the new consoles -+ """ -+ return _diff_lists(old, new, _serial_or_concole_equal) -+ -+ - def update( - name, - cpu=0, -@@ -2454,8 +3025,13 @@ def update( - graphics=None, - live=True, - boot=None, -+ numatune=None, - test=False, - boot_dev=None, -+ hypervisor_features=None, -+ clock=None, -+ serials=None, -+ consoles=None, - stop_on_reboot=False, - **kwargs - ): -@@ -2463,13 +3039,20 @@ def update( - Update the definition of an existing domain. - - :param name: Name of the domain to update -- :param cpu: Number of virtual CPUs to assign to the virtual machine -- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to -+ :param cpu: -+ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure -+ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is -+ documented in :ref:`init-cpu-def`. -+ -+ To update any cpu parameters specify the new values to the corresponding tag. To remove any element or attribute, -+ specify ``None`` object. Please note that ``None`` object is mapped to ``null`` in yaml, use ``null`` in sls file -+ instead. -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to - contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, -- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The -- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be -- an integer. -+ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``, -+ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure -+ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit -+ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer. - - To remove any parameters, pass a None object, for instance: 'soft_limit': ``None``. Please note that ``None`` - is mapped to ``null`` in sls file, pass ``null`` in sls file instead. -@@ -2538,6 +3121,30 @@ def update( - - .. versionadded:: Magnesium - -+ :param numatune: -+ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA -+ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process -+ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition -+ used in the dictionary can be found at :ref:`init-cpu-def`. -+ -+ To update any numatune parameters, specify the new value. To remove any ``numatune`` parameters, pass a None object, -+ for instance: 'numatune': ``None``. Please note that ``None`` is mapped to ``null`` in sls file, pass ``null`` in -+ sls file instead. -+ -+ .. versionadded:: Aluminium -+ -+ :param serials: -+ Dictionary providing details on the serials connection to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium -+ -+ :param consoles: -+ Dictionary providing details on the consoles device to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium -+ - :param stop_on_reboot: - If set to ``True`` the guest will stop instead of rebooting. - This is specially useful when creating a virtual machine with an installation cdrom or -@@ -2550,6 +3157,69 @@ def update( - - .. versionadded:: sodium - -+ :param hypervisor_features: -+ Enable or disable hypervisor-specific features on the virtual machine. -+ -+ .. versionadded:: Aluminium -+ -+ .. code-block:: yaml -+ -+ hypervisor_features: -+ kvm-hint-dedicated: True -+ -+ :param clock: -+ Configure the guest clock. -+ The value is a dictionary with the following keys: -+ -+ adjustment -+ time adjustment in seconds or ``reset`` -+ -+ utc -+ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``. -+ -+ timezone -+ synchronize the guest to the correspding timezone -+ -+ timers -+ a dictionary associating the timer name with its configuration. -+ This configuration is a dictionary with the properties ``track``, ``tickpolicy``, -+ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``. -+ See `libvirt time keeping documentation `_ for the possible values. -+ -+ .. versionadded:: Aluminium -+ -+ Set the clock to local time using an offset in seconds -+ .. code-block:: yaml -+ -+ clock: -+ adjustment: 3600 -+ utc: False -+ -+ Set the clock to a specific time zone: -+ -+ .. code-block:: yaml -+ -+ clock: -+ timezone: CEST -+ -+ Tweak guest timers: -+ -+ .. code-block:: yaml -+ -+ clock: -+ timers: -+ tsc: -+ frequency: 3504000000 -+ mode: native -+ rtc: -+ track: wall -+ tickpolicy: catchup -+ slew: 4636 -+ threshold: 123 -+ limit: 2342 -+ hpet: -+ present: False -+ - :return: - - Returns a dictionary indicating the status of what has been done. It is structured in -@@ -2595,12 +3265,11 @@ def update( - boot = _handle_remote_boot_params(boot) - if boot.get("efi", None) is not None: - need_update = _handle_efi_param(boot, desc) -- - new_desc = ElementTree.fromstring( - _gen_xml( - conn, - name, -- cpu or 0, -+ cpu, - mem or 0, - all_disks, - _get_merged_nics(hypervisor, nic_profile, interfaces), -@@ -2610,17 +3279,19 @@ def update( - graphics, - boot, - boot_dev, -- stop_on_reboot, -+ numatune, -+ serial=serials, -+ consoles=consoles, -+ stop_on_reboot=stop_on_reboot, - **kwargs - ) - ) - -- # Update the cpu -- cpu_node = desc.find("vcpu") -- if cpu and int(cpu_node.text) != cpu: -- cpu_node.text = str(cpu) -- cpu_node.set("current", str(cpu)) -- need_update = True -+ if clock: -+ offset = "utc" if clock.get("utc", True) else "localtime" -+ if "timezone" in clock: -+ offset = "timezone" -+ clock["offset"] = offset - - def _set_loader(node, value): - salt.utils.xmlutil.set_node_text(node, value) -@@ -2631,20 +3302,110 @@ def update( - def _set_nvram(node, value): - node.set("template", value) - -- def _set_with_byte_unit(node, value): -- node.text = str(value) -- node.set("unit", "bytes") -+ def _set_with_byte_unit(attr_name=None): -+ def _setter(node, value): -+ if attr_name: -+ node.set(attr_name, str(value)) -+ else: -+ node.text = str(value) -+ node.set("unit", "bytes") -+ -+ return _setter - - def _get_with_unit(node): - unit = node.get("unit", "KiB") - # _handle_unit treats bytes as invalid unit for the purpose of consistency - unit = unit if unit != "bytes" else "b" -- value = node.get("memory") or node.text -+ value = node.get("memory") or node.get("size") or node.text - return _handle_unit("{}{}".format(value, unit)) if value else None - -+ def _set_vcpu(node, value): -+ node.text = str(value) -+ node.set("current", str(value)) -+ - old_mem = int(_get_with_unit(desc.find("memory")) / 1024) -+ old_cpu = int(desc.find("./vcpu").text) -+ -+ def _almost_equal(current, new): -+ if current is None or new is None: -+ return False -+ return abs(current - new) / current < 1e-03 -+ -+ def _yesno_attribute(path, xpath, attr_name, ignored=None): -+ return xmlutil.attribute( -+ path, xpath, attr_name, ignored, lambda v: "yes" if v else "no" -+ ) -+ -+ def _memory_parameter(path, xpath, attr_name=None, ignored=None): -+ entry = { -+ "path": path, -+ "xpath": xpath, -+ "convert": _handle_unit, -+ "get": _get_with_unit, -+ "set": _set_with_byte_unit(attr_name), -+ "equals": _almost_equal, -+ } -+ if attr_name: -+ entry["del"] = salt.utils.xmlutil.del_attribute(attr_name, ignored) -+ return entry -+ -+ def _cpuset_parameter(path, xpath, attr_name=None, ignored=None): -+ def _set_cpuset(node, value): -+ if attr_name: -+ node.set(attr_name, value) -+ else: -+ node.text = value -+ -+ entry = { -+ "path": path, -+ "xpath": xpath, -+ "convert": _expand_cpuset, -+ "get": lambda n: _expand_cpuset(n.get(attr_name) if attr_name else n.text), -+ "set": _set_cpuset, -+ } -+ if attr_name: -+ entry["del"] = salt.utils.xmlutil.del_attribute(attr_name, ignored) -+ return entry - - # Update the kernel boot parameters -+ data = {k: v for k, v in locals().items() if bool(v)} -+ data["stop_on_reboot"] = stop_on_reboot -+ if boot_dev: -+ data["boot_dev"] = boot_dev.split() -+ -+ # Set the missing optional attributes and timers to None in timers to help cleaning up -+ timer_names = [ -+ "platform", -+ "hpet", -+ "kvmclock", -+ "pit", -+ "rtc", -+ "tsc", -+ "hypervclock", -+ "armvtimer", -+ ] -+ if data.get("clock", {}).get("timers"): -+ attributes = [ -+ "track", -+ "tickpolicy", -+ "frequency", -+ "mode", -+ "present", -+ "slew", -+ "threshold", -+ "limit", -+ ] -+ for timer in data["clock"]["timers"].values(): -+ for attribute in attributes: -+ if attribute not in timer: -+ timer[attribute] = None -+ -+ for timer_name in timer_names: -+ if timer_name not in data["clock"]["timers"]: -+ data["clock"]["timers"][timer_name] = None -+ -+ _normalize_cpusets(desc, data) -+ - params_mapping = [ - { - "path": "stop_on_reboot", -@@ -2657,89 +3418,251 @@ def update( - {"path": "boot:loader", "xpath": "os/loader", "set": _set_loader}, - {"path": "boot:nvram", "xpath": "os/nvram", "set": _set_nvram}, - # Update the memory, note that libvirt outputs all memory sizes in KiB -+ _memory_parameter("mem", "memory"), -+ _memory_parameter("mem", "currentMemory"), -+ _memory_parameter("mem:max", "maxMemory"), -+ _memory_parameter("mem:boot", "memory"), -+ _memory_parameter("mem:current", "currentMemory"), -+ xmlutil.attribute("mem:slots", "maxMemory", "slots", ["unit"]), -+ _memory_parameter("mem:hard_limit", "memtune/hard_limit"), -+ _memory_parameter("mem:soft_limit", "memtune/soft_limit"), -+ _memory_parameter("mem:swap_hard_limit", "memtune/swap_hard_limit"), -+ _memory_parameter("mem:min_guarantee", "memtune/min_guarantee"), -+ xmlutil.attribute("boot_dev:{dev}", "os/boot[$dev]", "dev"), -+ _memory_parameter( -+ "mem:hugepages:{id}:size", -+ "memoryBacking/hugepages/page[$id]", -+ "size", -+ ["unit", "nodeset"], -+ ), -+ _cpuset_parameter( -+ "mem:hugepages:{id}:nodeset", "memoryBacking/hugepages/page[$id]", "nodeset" -+ ), - { -- "path": "mem", -- "xpath": "memory", -- "convert": _handle_unit, -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -- }, -- { -- "path": "mem", -- "xpath": "currentMemory", -- "convert": _handle_unit, -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -- }, -- { -- "path": "mem:max", -- "convert": _handle_unit, -- "xpath": "maxMemory", -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -+ "path": "mem:nosharepages", -+ "xpath": "memoryBacking/nosharepages", -+ "get": lambda n: n is not None, -+ "set": lambda n, v: None, - }, - { -- "path": "mem:boot", -- "convert": _handle_unit, -- "xpath": "memory", -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -- }, -- { -- "path": "mem:current", -- "convert": _handle_unit, -- "xpath": "currentMemory", -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -+ "path": "mem:locked", -+ "xpath": "memoryBacking/locked", -+ "get": lambda n: n is not None, -+ "set": lambda n, v: None, - }, -+ xmlutil.attribute("mem:source", "memoryBacking/source", "type"), -+ xmlutil.attribute("mem:access", "memoryBacking/access", "mode"), -+ xmlutil.attribute("mem:allocation", "memoryBacking/allocation", "mode"), -+ {"path": "mem:discard", "xpath": "memoryBacking/discard"}, - { -- "path": "mem:slots", -- "xpath": "maxMemory", -- "get": lambda n: n.get("slots"), -- "set": lambda n, v: n.set("slots", str(v)), -- "del": salt.utils.xmlutil.del_attribute("slots", ["unit"]), -- }, -- { -- "path": "mem:hard_limit", -- "convert": _handle_unit, -- "xpath": "memtune/hard_limit", -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -- }, -- { -- "path": "mem:soft_limit", -- "convert": _handle_unit, -- "xpath": "memtune/soft_limit", -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -- }, -- { -- "path": "mem:swap_hard_limit", -- "convert": _handle_unit, -- "xpath": "memtune/swap_hard_limit", -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -- }, -- { -- "path": "mem:min_guarantee", -- "convert": _handle_unit, -- "xpath": "memtune/min_guarantee", -- "get": _get_with_unit, -- "set": _set_with_byte_unit, -- }, -- { -- "path": "boot_dev:{dev}", -- "xpath": "os/boot[$dev]", -- "get": lambda n: n.get("dev"), -- "set": lambda n, v: n.set("dev", v), -- "del": salt.utils.xmlutil.del_attribute("dev"), -+ "path": "cpu", -+ "xpath": "vcpu", -+ "get": lambda n: int(n.text), -+ "set": _set_vcpu, - }, -+ {"path": "cpu:maximum", "xpath": "vcpu", "get": lambda n: int(n.text)}, -+ xmlutil.attribute("cpu:placement", "vcpu", "placement"), -+ _cpuset_parameter("cpu:cpuset", "vcpu", "cpuset"), -+ xmlutil.attribute("cpu:current", "vcpu", "current"), -+ xmlutil.attribute("cpu:match", "cpu", "match"), -+ xmlutil.attribute("cpu:mode", "cpu", "mode"), -+ xmlutil.attribute("cpu:check", "cpu", "check"), -+ {"path": "cpu:model:name", "xpath": "cpu/model"}, -+ xmlutil.attribute("cpu:model:fallback", "cpu/model", "fallback"), -+ xmlutil.attribute("cpu:model:vendor_id", "cpu/model", "vendor_id"), -+ {"path": "cpu:vendor", "xpath": "cpu/vendor"}, -+ xmlutil.attribute("cpu:topology:sockets", "cpu/topology", "sockets"), -+ xmlutil.attribute("cpu:topology:cores", "cpu/topology", "cores"), -+ xmlutil.attribute("cpu:topology:threads", "cpu/topology", "threads"), -+ xmlutil.attribute("cpu:cache:level", "cpu/cache", "level"), -+ xmlutil.attribute("cpu:cache:mode", "cpu/cache", "mode"), -+ xmlutil.attribute( -+ "cpu:features:{id}", "cpu/feature[@name='$id']", "policy", ["name"] -+ ), -+ _yesno_attribute( -+ "cpu:vcpus:{id}:enabled", "vcpus/vcpu[@id='$id']", "enabled", ["id"] -+ ), -+ _yesno_attribute( -+ "cpu:vcpus:{id}:hotpluggable", -+ "vcpus/vcpu[@id='$id']", -+ "hotpluggable", -+ ["id"], -+ ), -+ xmlutil.int_attribute( -+ "cpu:vcpus:{id}:order", "vcpus/vcpu[@id='$id']", "order", ["id"] -+ ), -+ _cpuset_parameter( -+ "cpu:numa:{id}:cpus", "cpu/numa/cell[@id='$id']", "cpus", ["id"] -+ ), -+ _memory_parameter( -+ "cpu:numa:{id}:memory", "cpu/numa/cell[@id='$id']", "memory", ["id"] -+ ), -+ _yesno_attribute( -+ "cpu:numa:{id}:discard", "cpu/numa/cell[@id='$id']", "discard", ["id"] -+ ), -+ xmlutil.attribute( -+ "cpu:numa:{id}:memAccess", "cpu/numa/cell[@id='$id']", "memAccess", ["id"] -+ ), -+ xmlutil.attribute( -+ "cpu:numa:{id}:distances:{sid}", -+ "cpu/numa/cell[@id='$id']/distances/sibling[@id='$sid']", -+ "value", -+ ["id"], -+ ), -+ {"path": "cpu:iothreads", "xpath": "iothreads"}, -+ {"path": "cpu:tuning:shares", "xpath": "cputune/shares"}, -+ {"path": "cpu:tuning:period", "xpath": "cputune/period"}, -+ {"path": "cpu:tuning:quota", "xpath": "cputune/quota"}, -+ {"path": "cpu:tuning:global_period", "xpath": "cputune/global_period"}, -+ {"path": "cpu:tuning:global_quota", "xpath": "cputune/global_quota"}, -+ {"path": "cpu:tuning:emulator_period", "xpath": "cputune/emulator_period"}, -+ {"path": "cpu:tuning:emulator_quota", "xpath": "cputune/emulator_quota"}, -+ {"path": "cpu:tuning:iothread_period", "xpath": "cputune/iothread_period"}, -+ {"path": "cpu:tuning:iothread_quota", "xpath": "cputune/iothread_quota"}, -+ _cpuset_parameter( -+ "cpu:tuning:vcpupin:{id}", -+ "cputune/vcpupin[@vcpu='$id']", -+ "cpuset", -+ ["vcpu"], -+ ), -+ _cpuset_parameter("cpu:tuning:emulatorpin", "cputune/emulatorpin", "cpuset"), -+ _cpuset_parameter( -+ "cpu:tuning:iothreadpin:{id}", -+ "cputune/iothreadpin[@iothread='$id']", -+ "cpuset", -+ ["iothread"], -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:vcpusched:{id}:scheduler", -+ "cputune/vcpusched[$id]", -+ "scheduler", -+ ["priority", "vcpus"], -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:vcpusched:{id}:priority", "cputune/vcpusched[$id]", "priority" -+ ), -+ _cpuset_parameter( -+ "cpu:tuning:vcpusched:{id}:vcpus", "cputune/vcpusched[$id]", "vcpus" -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:iothreadsched:{id}:scheduler", -+ "cputune/iothreadsched[$id]", -+ "scheduler", -+ ["priority", "iothreads"], -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:iothreadsched:{id}:priority", -+ "cputune/iothreadsched[$id]", -+ "priority", -+ ), -+ _cpuset_parameter( -+ "cpu:tuning:iothreadsched:{id}:iothreads", -+ "cputune/iothreadsched[$id]", -+ "iothreads", -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:emulatorsched:scheduler", -+ "cputune/emulatorsched", -+ "scheduler", -+ ["priority"], -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:emulatorsched:priority", "cputune/emulatorsched", "priority" -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:cachetune:{id}:monitor:{sid}", -+ "cputune/cachetune[@vcpus='$id']/monitor[@vcpus='$sid']", -+ "level", -+ ["vcpus"], -+ ), -+ xmlutil.attribute( -+ "cpu:tuning:memorytune:{id}:{sid}", -+ "cputune/memorytune[@vcpus='$id']/node[@id='$sid']", -+ "bandwidth", -+ ["id", "vcpus"], -+ ), -+ xmlutil.attribute("clock:offset", "clock", "offset"), -+ xmlutil.attribute("clock:adjustment", "clock", "adjustment", convert=str), -+ xmlutil.attribute("clock:timezone", "clock", "timezone"), - ] - -- data = {k: v for k, v in locals().items() if bool(v)} -- data["stop_on_reboot"] = stop_on_reboot -- if boot_dev: -- data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} -+ for timer in timer_names: -+ params_mapping += [ -+ xmlutil.attribute( -+ "clock:timers:{}:track".format(timer), -+ "clock/timer[@name='{}']".format(timer), -+ "track", -+ ["name"], -+ ), -+ xmlutil.attribute( -+ "clock:timers:{}:tickpolicy".format(timer), -+ "clock/timer[@name='{}']".format(timer), -+ "tickpolicy", -+ ["name"], -+ ), -+ xmlutil.int_attribute( -+ "clock:timers:{}:frequency".format(timer), -+ "clock/timer[@name='{}']".format(timer), -+ "frequency", -+ ["name"], -+ ), -+ xmlutil.attribute( -+ "clock:timers:{}:mode".format(timer), -+ "clock/timer[@name='{}']".format(timer), -+ "mode", -+ ["name"], -+ ), -+ _yesno_attribute( -+ "clock:timers:{}:present".format(timer), -+ "clock/timer[@name='{}']".format(timer), -+ "present", -+ ["name"], -+ ), -+ ] -+ for attr in ["slew", "threshold", "limit"]: -+ params_mapping.append( -+ xmlutil.int_attribute( -+ "clock:timers:{}:{}".format(timer, attr), -+ "clock/timer[@name='{}']/catchup".format(timer), -+ attr, -+ ) -+ ) -+ -+ for attr in ["level", "type", "size"]: -+ params_mapping.append( -+ xmlutil.attribute( -+ "cpu:tuning:cachetune:{id}:{sid}:" + attr, -+ "cputune/cachetune[@vcpus='$id']/cache[@id='$sid']", -+ attr, -+ ["id", "unit", "vcpus"], -+ ) -+ ) -+ -+ # update NUMA host policy -+ if hypervisor in ["qemu", "kvm"]: -+ params_mapping += [ -+ xmlutil.attribute("numatune:memory:mode", "numatune/memory", "mode"), -+ _cpuset_parameter("numatune:memory:nodeset", "numatune/memory", "nodeset"), -+ xmlutil.attribute( -+ "numatune:memnodes:{id}:mode", -+ "numatune/memnode[@cellid='$id']", -+ "mode", -+ ["cellid"], -+ ), -+ _cpuset_parameter( -+ "numatune:memnodes:{id}:nodeset", -+ "numatune/memnode[@cellid='$id']", -+ "nodeset", -+ ["cellid"], -+ ), -+ xmlutil.attribute( -+ "hypervisor_features:kvm-hint-dedicated", -+ "features/kvm/hint-dedicated", -+ "state", -+ convert=lambda v: "on" if v else "off", -+ ), -+ ] -+ - need_update = ( - salt.utils.xmlutil.change_xml(desc, data, params_mapping) or need_update - ) -@@ -2750,6 +3673,8 @@ def update( - "disk": ["disks", "disk_profile"], - "interface": ["interfaces", "nic_profile"], - "graphics": ["graphics"], -+ "serial": ["serial"], -+ "console": ["console"], - } - changes = {} - for dev_type in parameters: -@@ -2787,7 +3712,6 @@ def update( - _qemu_image_create(all_disks[idx]) - elif item in changes["disk"]["new"] and not source_file: - _disk_volume_create(conn, all_disks[idx]) -- - if not test: - xml_desc = ElementTree.tostring(desc) - log.debug("Update virtual machine definition: %s", xml_desc) -@@ -2803,14 +3727,18 @@ def update( - commands = [] - removable_changes = [] - if domain.isActive() and live: -- if cpu: -- commands.append( -- { -- "device": "cpu", -- "cmd": "setVcpusFlags", -- "args": [cpu, libvirt.VIR_DOMAIN_AFFECT_LIVE], -- } -- ) -+ if cpu and ( -+ isinstance(cpu, int) or isinstance(cpu, dict) and cpu.get("maximum") -+ ): -+ new_cpu = cpu.get("maximum") if isinstance(cpu, dict) else cpu -+ if old_cpu != new_cpu and new_cpu is not None: -+ commands.append( -+ { -+ "device": "cpu", -+ "cmd": "setVcpusFlags", -+ "args": [new_cpu, libvirt.VIR_DOMAIN_AFFECT_LIVE], -+ } -+ ) - if mem: - if isinstance(mem, dict): - # setMemoryFlags takes memory amount in KiB -@@ -2822,7 +3750,7 @@ def update( - elif isinstance(mem, int): - new_mem = int(mem * 1024) - -- if old_mem != new_mem and new_mem is not None: -+ if not _almost_equal(old_mem, new_mem) and new_mem is not None: - commands.append( - { - "device": "mem", -@@ -4402,7 +5330,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs): - directories.add(os.path.dirname(disks[disk]["file"])) - else: - # We may have a volume to delete here -- matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"],) -+ matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"]) - if matcher: - pool_name = matcher.group("pool") - pool = None -diff --git a/salt/states/virt.py b/salt/states/virt.py -index 20ea1c25f1..784cdca73c 100644 ---- a/salt/states/virt.py -+++ b/salt/states/virt.py -@@ -287,8 +287,13 @@ def defined( - os_type=None, - arch=None, - boot=None, -+ numatune=None, - update=True, - boot_dev=None, -+ hypervisor_features=None, -+ clock=None, -+ serials=None, -+ consoles=None, - stop_on_reboot=False, - live=True, - ): -@@ -298,26 +303,151 @@ def defined( - .. versionadded:: sodium - - :param name: name of the virtual machine to run -- :param cpu: number of CPUs for the virtual machine to create -- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to -+ :param cpu: -+ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure -+ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is -+ documented in :ref:`init-cpu-def`. -+ -+ .. code-block:: yaml -+ -+ cpu: -+ placement: static -+ cpuset: 0-11 -+ current: 5 -+ maximum: 12 -+ vcpus: -+ 0: -+ enabled: 'yes' -+ hotpluggable: 'no' -+ order: 1 -+ 1: -+ enabled: 'no' -+ hotpluggable: 'yes' -+ match: minimum -+ mode: custom -+ check: full -+ vendor: Intel -+ model: -+ name: core2duo -+ fallback: allow -+ vendor_id: GenuineIntel -+ topology: -+ sockets: 1 -+ cores: 12 -+ threads: 1 -+ cache: -+ level: 3 -+ mode: emulate -+ feature: -+ policy: optional -+ name: lahf_lm -+ numa: -+ 0: -+ cpus: 0-3 -+ memory: 1g -+ discard: 'yes' -+ distances: -+ 0: 10 # sibling id : value -+ 1: 21 -+ 2: 31 -+ 3: 41 -+ 1: -+ cpus: 4-6 -+ memory: 1g -+ memAccess: shared -+ distances: -+ 0: 21 -+ 1: 10 -+ 2: 21 -+ 3: 31 -+ tuning: -+ vcpupin: -+ 0: 1-4,^2 # vcpuid : cpuset -+ 1: 0,1 -+ 2: 2,3 -+ 3: 0,4 -+ emulatorpin: 1-3 -+ iothreadpin: -+ 1: 5,6 # iothread id: cpuset -+ 2: 7,8 -+ shares: 2048 -+ period: 1000000 -+ quota: -1 -+ global_period: 1000000 -+ global_quota: -1 -+ emulator_period: 1000000 -+ emulator_quota: -1 -+ iothread_period: 1000000 -+ iothread_quota: -1 -+ vcpusched: -+ - scheduler: fifo -+ priority: 1 -+ - scheduler: fifo -+ priority: 2 -+ vcpus: 1-3 -+ - scheduler: rr -+ priority: 3 -+ vcpus: 4 -+ iothreadsched: -+ - scheduler: batch -+ iothreads: 2 -+ emulatorsched: -+ scheduler: idle -+ cachetune: -+ 0-3: # vcpus set -+ 0: # cache id -+ level: 3 -+ type: both -+ size: 4 -+ 1: -+ level: 3 -+ type: both -+ size: 6 -+ monitor: -+ 1: 3 -+ 0-3: 3 -+ 4-5: -+ monitor: -+ 4: 3 # vcpus: level -+ 5: 3 -+ memorytune: -+ 0-3: # vcpus set -+ 0: 60 # node id: bandwidth -+ 4-5: -+ 0: 60 -+ iothreads: 4 -+ -+ .. versionadded:: Aluminium -+ -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to - contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, -- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The -- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be -- an integer. -+ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``, -+ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure -+ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit -+ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer. - -- .. code-block:: python -+ .. code-block:: yaml - -- { -- 'boot': 1g, -- 'current': 1g, -- 'max': 1g, -- 'slots': 10, -- 'hard_limit': '1024' -- 'soft_limit': '512m' -- 'swap_hard_limit': '1g' -- 'min_guarantee': '512mib' -- } -+ boot: 1g -+ current: 1g -+ max: 1g -+ slots: 10 -+ hard_limit: 1024 -+ soft_limit: 512m -+ swap_hard_limit: 1g -+ min_guarantee: 512mib -+ hugepages: -+ - size: 2m -+ - nodeset: 0-2 -+ size: 1g -+ - nodeset: 3 -+ size: 2g -+ nosharepages: True -+ locked: True -+ source: file -+ access: shared -+ allocation: immediate -+ discard: True - - .. versionchanged:: Magnesium - -@@ -380,6 +510,77 @@ def defined( - - .. versionadded:: Magnesium - -+ :param numatune: -+ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA -+ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process -+ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition -+ used in the dictionary can be found at :ref:`init-cpu-def`. -+ -+ .. versionadded:: Aluminium -+ -+ .. code-block:: python -+ -+ { -+ 'memory': {'mode': 'strict', 'nodeset': '0-11'}, -+ 'memnodes': {0: {'mode': 'strict', 'nodeset': 1}, 1: {'mode': 'preferred', 'nodeset': 2}} -+ } -+ -+ :param hypervisor_features: -+ Enable or disable hypervisor-specific features on the virtual machine. -+ -+ .. versionadded:: Aluminium -+ -+ .. code-block:: yaml -+ -+ hypervisor_features: -+ kvm-hint-dedicated: True -+ -+ :param clock: -+ Configure the guest clock. -+ The value is a dictionary with the following keys: -+ -+ adjustment -+ time adjustment in seconds or ``reset`` -+ -+ utc -+ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``. -+ -+ timezone -+ synchronize the guest to the correspding timezone -+ -+ timers -+ a dictionary associating the timer name with its configuration. -+ This configuration is a dictionary with the properties ``track``, ``tickpolicy``, -+ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``. -+ See `libvirt time keeping documentation `_ for the possible values. -+ -+ .. versionadded:: Aluminium -+ -+ Set the clock to local time using an offset in seconds -+ .. code-block:: yaml -+ -+ clock: -+ adjustment: 3600 -+ utc: False -+ -+ Set the clock to a specific time zone: -+ -+ .. code-block:: yaml -+ -+ clock: -+ timezone: CEST -+ -+ :param serials: -+ Dictionary providing details on the serials connection to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium -+ :param consoles: -+ Dictionary providing details on the consoles device to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium -+ - :param stop_on_reboot: - If set to ``True`` the guest will stop instead of rebooting. - This is specially useful when creating a virtual machine with an installation cdrom or -@@ -456,8 +657,13 @@ def defined( - username=username, - password=password, - boot=boot, -+ numatune=numatune, -+ serials=serials, -+ consoles=consoles, - test=__opts__["test"], - boot_dev=boot_dev, -+ hypervisor_features=hypervisor_features, -+ clock=clock, - stop_on_reboot=stop_on_reboot, - ) - ret["changes"][name] = status -@@ -492,8 +698,13 @@ def defined( - username=username, - password=password, - boot=boot, -+ numatune=numatune, -+ serials=serials, -+ consoles=consoles, - start=False, - boot_dev=boot_dev, -+ hypervisor_features=hypervisor_features, -+ clock=clock, - stop_on_reboot=stop_on_reboot, - ) - ret["changes"][name] = {"definition": True} -@@ -528,6 +739,11 @@ def running( - arch=None, - boot=None, - boot_dev=None, -+ numatune=None, -+ hypervisor_features=None, -+ clock=None, -+ serials=None, -+ consoles=None, - stop_on_reboot=False, - ): - """ -@@ -536,13 +752,20 @@ def running( - .. versionadded:: 2016.3.0 - - :param name: name of the virtual machine to run -- :param cpu: number of CPUs for the virtual machine to create -- :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to -+ :param cpu: -+ Number of virtual CPUs to assign to the virtual machine or a dictionary with detailed information to configure -+ cpu model and topology, numa node tuning, cpu tuning and iothreads allocation. The structure of the dictionary is -+ documented in :ref:`init-cpu-def`. -+ -+ To update any cpu parameters specify the new values to the corresponding tag. To remove any element or attribute, -+ specify ``None`` object. Please note that ``None`` object is mapped to ``null`` in yaml, use ``null`` in sls file -+ instead. -+ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since 3002, a dictionary can be used to - contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, -- ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The -- structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. -- Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be -- an integer. -+ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit``, ``min_guarantee``, -+ ``hugepages`` , ``nosharepages``, ``locked``, ``source``, ``access``, ``allocation`` and ``discard``. The structure -+ of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. Detail unit -+ specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be an integer. - - To remove any parameters, pass a None object, for instance: 'soft_limit': ``None``. Please note that ``None`` - is mapped to ``null`` in sls file, pass ``null`` in sls file instead. -@@ -638,6 +861,16 @@ def running( - pass a None object, for instance: 'kernel': ``None``. - - .. versionadded:: 3000 -+ :param serials: -+ Dictionary providing details on the serials connection to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium -+ :param consoles: -+ Dictionary providing details on the consoles device to create. (Default: ``None``) -+ See :ref:`init-chardevs-def` for more details on the possible values. -+ -+ .. versionadded:: Aluminium - - :param boot: - Specifies kernel for the virtual machine, as well as boot parameters -@@ -664,6 +897,18 @@ def running( - - .. versionadded:: Magnesium - -+ :param numatune: -+ The optional numatune element provides details of how to tune the performance of a NUMA host via controlling NUMA -+ policy for domain process. The optional ``memory`` element specifies how to allocate memory for the domain process -+ on a NUMA host. ``memnode`` elements can specify memory allocation policies per each guest NUMA node. The definition -+ used in the dictionary can be found at :ref:`init-cpu-def`. -+ -+ To update any numatune parameters, specify the new value. To remove any ``numatune`` parameters, pass a None object, -+ for instance: 'numatune': ``None``. Please note that ``None`` is mapped to ``null`` in sls file, pass ``null`` in -+ sls file instead. -+ -+ .. versionadded:: Aluminium -+ - :param stop_on_reboot: - If set to ``True`` the guest will stop instead of rebooting. - This is specially useful when creating a virtual machine with an installation cdrom or -@@ -672,6 +917,51 @@ def running( - - .. versionadded:: Aluminium - -+ :param hypervisor_features: -+ Enable or disable hypervisor-specific features on the virtual machine. -+ -+ .. versionadded:: Aluminium -+ -+ .. code-block:: yaml -+ -+ hypervisor_features: -+ kvm-hint-dedicated: True -+ -+ :param clock: -+ Configure the guest clock. -+ The value is a dictionary with the following keys: -+ -+ adjustment -+ time adjustment in seconds or ``reset`` -+ -+ utc -+ set to ``False`` to use the host local time as the guest clock. Defaults to ``True``. -+ -+ timezone -+ synchronize the guest to the correspding timezone -+ -+ timers -+ a dictionary associating the timer name with its configuration. -+ This configuration is a dictionary with the properties ``track``, ``tickpolicy``, -+ ``catchup``, ``frequency``, ``mode``, ``present``, ``slew``, ``threshold`` and ``limit``. -+ See `libvirt time keeping documentation `_ for the possible values. -+ -+ .. versionadded:: Aluminium -+ -+ Set the clock to local time using an offset in seconds -+ .. code-block:: yaml -+ -+ clock: -+ adjustment: 3600 -+ utc: False -+ -+ Set the clock to a specific time zone: -+ -+ .. code-block:: yaml -+ -+ clock: -+ timezone: CEST -+ - .. rubric:: Example States - - Make sure an already-defined virtual machine called ``domain_name`` is running: -@@ -740,10 +1030,15 @@ def running( - boot=boot, - update=update, - boot_dev=boot_dev, -+ numatune=numatune, -+ hypervisor_features=hypervisor_features, -+ clock=clock, - stop_on_reboot=stop_on_reboot, - connection=connection, - username=username, - password=password, -+ serials=serials, -+ consoles=consoles, - ) - - result = True if not __opts__["test"] else None -diff --git a/salt/templates/virt/libvirt_chardevs.jinja b/salt/templates/virt/libvirt_chardevs.jinja -new file mode 100644 -index 0000000000..1795277180 ---- /dev/null -+++ b/salt/templates/virt/libvirt_chardevs.jinja -@@ -0,0 +1,16 @@ -+{% macro chardev(dev) -%} -+ {% if dev.type == "unix" -%} -+ -+ {% elif dev.type in ["udp", "tcp"] -%} -+ -+ {% elif dev.type in ["pipe", "dev", "pty", "file"] and dev.path -%} -+ -+ {%- endif %} -+ {% if dev.type == "tcp" -%} -+ -+ {%- endif %} -+ {% if "target_port" in dev or "target_type" in dev -%} -+ -+ {%- endif %} -+{%- endmacro %} -diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja -index fb4c9f40d0..6ac3e867b9 100644 ---- a/salt/templates/virt/libvirt_domain.jinja -+++ b/salt/templates/virt/libvirt_domain.jinja -@@ -1,32 +1,220 @@ - {%- import 'libvirt_disks.jinja' as libvirt_disks -%} -+{%- macro opt_attribute(obj, name, conv=none) %} -+{%- if obj.get(name) is not none %} {{ name }}='{{ obj[name] if conv is none else conv(obj[name]) }}'{% endif -%} -+{%- endmacro %} -+{%- import 'libvirt_chardevs.jinja' as libvirt_chardevs -%} - - {{ name }} -- {{ cpu }} -+ {%- if cpu %} -+ {{ cpu.get('maximum', '') }} -+ {%- endif %} -+ {%- if cpu.get('vcpus') %} -+ -+ {%- for vcpu_id in cpu["vcpus"].keys() %} -+ -+ {%- endfor %} -+ -+ {%- endif %} -+ {%- if cpu %} -+ -+ {%- if cpu.model %} -+ {{ cpu.model.get('name', '') }} -+ {%- endif %} -+ {%- if cpu.vendor %} -+ {{ cpu.get('vendor', '') }} -+ {%- endif %} -+ {%- if cpu.topology %} -+ -+ {%- endif %} -+ {%- if cpu.cache %} -+ -+ {%- endif %} -+ {%- if cpu.features %} -+ {%- for k, v in cpu.features.items() %} -+ -+ {%- endfor %} -+ {%- endif %} -+ {%- if cpu.numa %} -+ -+ {%- for numa_id in cpu.numa.keys() %} -+ {%- if cpu.numa.get(numa_id) %} -+ -+ {%- if cpu.numa[numa_id].distances %} -+ -+ {%- for sibling_id in cpu.numa[numa_id].distances %} -+ -+ {%- endfor %} -+ -+ {%- endif %} -+ -+ {%- endif %} -+ {%- endfor %} -+ -+ {%- endif %} -+ -+ {%- if cpu.iothreads %} -+ {{ cpu.iothreads }} -+ {%- endif %} -+ {%- endif %} -+ {%- if cpu.tuning %} -+ -+ {%- if cpu.tuning.vcpupin %} -+ {%- for vcpu_id, cpuset in cpu.tuning.vcpupin.items() %} -+ -+ {%- endfor %} -+ {%- endif %} -+ {%- if cpu.tuning.emulatorpin %} -+ -+ {%- endif %} -+ {%- if cpu.tuning.iothreadpin %} -+ {%- for thread_id, cpuset in cpu.tuning.iothreadpin.items() %} -+ -+ {%- endfor %} -+ {%- endif %} -+ {%- if cpu.tuning.shares %} -+ {{ cpu.tuning.shares }} -+ {%- endif %} -+ {%- if cpu.tuning.period %} -+ {{ cpu.tuning.period }} -+ {%- endif %} -+ {%- if cpu.tuning.quota %} -+ {{ cpu.tuning.quota }} -+ {%- endif %} -+ {%- if cpu.tuning.global_period %} -+ {{ cpu.tuning.global_period }} -+ {%- endif %} -+ {%- if cpu.tuning.global_quota %} -+ {{ cpu.tuning.global_quota }} -+ {%- endif %} -+ {%- if cpu.tuning.emulator_period %} -+ {{ cpu.tuning.emulator_period }} -+ {%- endif %} -+ {%- if cpu.tuning.emulator_quota %} -+ {{ cpu.tuning.emulator_quota }} -+ {%- endif %} -+ {%- if cpu.tuning.iothread_period %} -+ {{ cpu.tuning.iothread_period }} -+ {%- endif %} -+ {%- if cpu.tuning.iothread_quota %} -+ {{ cpu.tuning.iothread_quota }} -+ {%- endif %} -+ {%- if cpu.tuning.vcpusched %} -+ {%- for sched in cpu.tuning.vcpusched %} -+ -+ {%- endfor %} -+ {%- endif %} -+ {%- if cpu.tuning.iothreadsched %} -+ {%- for sched in cpu.tuning.iothreadsched %} -+ -+ {%- endfor %} -+ {%- endif %} -+ {%- if cpu.tuning.emulatorsched %} -+ -+ {%- endif %} -+ {%- if cpu.tuning.cachetune %} -+ {%- for k, v in cpu.tuning.cachetune.items() %} -+ -+ {%- for e, atrs in v.items() %} -+ {%- if e is number and atrs %} -+ -+ {%- elif e is not number %} -+ {%- for atr, val in atrs.items() %} -+ -+ {%- endfor %} -+ {%- endif %} -+ {%- endfor %} -+ -+ {%- endfor %} -+ {%- endif %} -+ {%- if cpu.tuning.memorytune %} -+ {%- for vcpus, nodes in cpu.tuning.memorytune.items() %} -+ -+ {%- for id, bandwidth in nodes.items() %} -+ -+ {%- endfor %} -+ -+ {%- endfor %} -+ {%- endif %} -+ -+ {%- endif %} - {%- if mem.max %} -- {{ mem.max }} -+ {{ to_kib(mem.max) }} - {%- endif %} - {%- if mem.boot %} -- {{ mem.boot }} -+ {{ to_kib(mem.boot) }} - {%- endif %} - {%- if mem.current %} -- {{ mem.current }} -+ {{ to_kib(mem.current) }} - {%- endif %} - {%- if mem %} - - {%- if 'hard_limit' in mem and mem.hard_limit %} -- {{ mem.hard_limit }} -+ {{ to_kib(mem.hard_limit) }} - {%- endif %} - {%- if 'soft_limit' in mem and mem.soft_limit %} -- {{ mem.soft_limit }} -+ {{ to_kib(mem.soft_limit) }} - {%- endif %} - {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %} -- {{ mem.swap_hard_limit }} -+ {{ to_kib(mem.swap_hard_limit) }} - {%- endif %} - {%- if 'min_guarantee' in mem and mem.min_guarantee %} -- {{ mem.min_guarantee }} -+ {{ to_kib(mem.min_guarantee) }} - {%- endif %} - - {%- endif %} -+ {%- if numatune %} -+ -+ {%- if 'memory' in numatune and numatune.memory %} -+ -+ {%- endif %} -+ {%- if 'memnodes' in numatune and numatune.memnodes %} -+ {%- for cell_id in numatune['memnodes'] %} -+ -+ {%- endfor %} -+ {%- endif %} -+ -+ {%- endif %} -+ {%- if mem %} -+ -+ {%- if mem.hugepages %} -+ -+ {%- for page in mem.hugepages %} -+ -+ {%- endfor %} -+ -+ {%- if mem.nosharepages %} -+ -+ {%- endif %} -+ {%- if mem.locked %} -+ -+ {%- endif %} -+ {%- if mem.source %} -+ -+ {%- endif %} -+ {%- if mem.access %} -+ -+ {%- endif %} -+ {%- if mem.allocation %} -+ -+ {%- endif %} -+ {%- if mem.discard %} -+ -+ {%- endif %} -+ {%- endif %} -+ -+ {%- endif %} - - {{ os_type }} - {% if boot %} -@@ -50,6 +238,18 @@ - - {% endfor %} - -+{%- if clock %} -+ -+ {%- for timer_name in clock.timers %} -+ {%- set timer = clock.timers[timer_name] %} -+ -+ {%- if "threshold" in timer or "slew" in timer or "limit" in timer %} -+ -+ {%- endif %} -+ -+ {%- endfor %} -+ -+{%- endif %} - {{ on_reboot }} - - {% for disk in disks %} -@@ -69,7 +269,7 @@ -
- {% endif %} - {% if disk.driver -%} -- -+ - {% endif %} - - {% endfor %} -@@ -104,35 +304,39 @@ - address='{{ graphics.listen.address }}' - {% endif %}/> - -- {% endif %} -- {% if serial_type == 'pty' %} -- -- -- -- {% if console %} -- -- -- -- {% endif %} -+ -+ {% if graphics.type == "spice" -%} -+ -+ -+ -+ {%- endif %} - {% endif %} - -- {% if serial_type == 'tcp' %} -- -- -- -- -+ {%- for serial in serials %} -+ -+ {{ libvirt_chardevs.chardev(serial) }} - -- {% if console %} -- -- -- -- -- -- {% endif %} -- {% endif %} -+ {%- endfor %} - -+ {%- for console in consoles %} -+ -+ {{ libvirt_chardevs.chardev(console) }} -+ -+ {% endfor %} -+{%- if hypervisor in ["qemu", "kvm"] %} -+ -+ -+ -+{%- endif %} - - - -+ -+ -+{%- if hypervisor_features.get("kvm-hint-dedicated") %} -+ -+ -+ -+{%- endif %} - - -diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py -index d25f5c8da5..5c187ca7e5 100644 ---- a/salt/utils/xmlutil.py -+++ b/salt/utils/xmlutil.py -@@ -157,18 +157,24 @@ def clean_node(parent_map, node, ignored=None): - :param parent_map: dictionary mapping each node to its parent - :param node: the node to clean - :param ignored: a list of ignored attributes. -+ :return: True if anything has been removed, False otherwise - """ - has_text = node.text is not None and node.text.strip() - parent = parent_map.get(node) -+ removed = False - if ( - len(set(node.attrib.keys()) - set(ignored or [])) == 0 - and not list(node) - and not has_text -+ and parent - ): - parent.remove(node) -+ removed = True - # Clean parent nodes if needed - if parent is not None: -- clean_node(parent_map, parent, ignored) -+ parent_cleaned = clean_node(parent_map, parent, ignored) -+ removed = removed or parent_cleaned -+ return removed - - - def del_text(parent_map, node): -@@ -180,6 +186,7 @@ def del_text(parent_map, node): - parent = parent_map[node] - parent.remove(node) - clean_node(parent, node) -+ return True - - - def del_attribute(attribute, ignored=None): -@@ -197,13 +204,54 @@ def del_attribute(attribute, ignored=None): - - def _do_delete(parent_map, node): - if attribute not in node.keys(): -- return -+ return False - node.attrib.pop(attribute) - clean_node(parent_map, node, ignored) -+ return True - - return _do_delete - - -+def attribute(path, xpath, attr_name, ignored=None, convert=None): -+ """ -+ Helper function creating a change_xml mapping entry for a text XML attribute. -+ -+ :param path: the path to the value in the data -+ :param xpath: the xpath to the node holding the attribute -+ :param attr_name: the attribute name -+ :param ignored: the list of attributes to ignore when cleaning up the node -+ :param convert: a function used to convert the value -+ """ -+ entry = { -+ "path": path, -+ "xpath": xpath, -+ "get": lambda n: n.get(attr_name), -+ "set": lambda n, v: n.set(attr_name, str(v)), -+ "del": salt.utils.xmlutil.del_attribute(attr_name, ignored), -+ } -+ if convert: -+ entry["convert"] = convert -+ return entry -+ -+ -+def int_attribute(path, xpath, attr_name, ignored=None): -+ """ -+ Helper function creating a change_xml mapping entry for a text XML integer attribute. -+ -+ :param path: the path to the value in the data -+ :param xpath: the xpath to the node holding the attribute -+ :param attr_name: the attribute name -+ :param ignored: the list of attributes to ignore when cleaning up the node -+ """ -+ return { -+ "path": path, -+ "xpath": xpath, -+ "get": lambda n: int(n.get(attr_name)) if n.get(attr_name) else None, -+ "set": lambda n, v: n.set(attr_name, str(v)), -+ "del": salt.utils.xmlutil.del_attribute(attr_name, ignored), -+ } -+ -+ - def change_xml(doc, data, mapping): - """ - Change an XML ElementTree document according. -@@ -237,6 +285,7 @@ def change_xml(doc, data, mapping): - del - function deleting the value in the XML. - Takes two parameters for the parent node and the node matched by the XPath. -+ Returns True if anything was removed, False otherwise. - Default is to remove the text value. - More cleanup may be performed, see the :py:func:`clean_node` function for details. - -@@ -281,8 +330,17 @@ def change_xml(doc, data, mapping): - continue - - if new_value is not None: -+ # We need to increment ids from arrays since xpath starts at 1 -+ converters = { -+ p: (lambda n: n + 1) -+ if "[${}]".format(p) in xpath -+ else (lambda n: n) -+ for p in placeholders -+ } - ctx = { -- placeholder: value_item.get(placeholder, "") -+ placeholder: converters[placeholder]( -+ value_item.get(placeholder, "") -+ ) - for placeholder in placeholders - } - node_xpath = string.Template(xpath).substitute(ctx) -@@ -299,7 +357,9 @@ def change_xml(doc, data, mapping): - if convert_fn: - new_value = convert_fn(new_value) - -- if str(current_value) != str(new_value): -+ # Allow custom comparison. Can be useful for almost equal numeric values -+ compare_fn = param.get("equals", lambda o, n: str(o) == str(n)) -+ if not compare_fn(current_value, new_value): - set_fn(node, new_value) - need_update = True - else: -@@ -307,17 +367,16 @@ def change_xml(doc, data, mapping): - del_fn = param.get("del", del_text) - parent_map = {c: p for p in doc.iter() for c in p} - for node in nodes: -- del_fn(parent_map, node) -- need_update = True -+ deleted = del_fn(parent_map, node) -+ need_update = need_update or deleted - - # Clean the left over XML elements if there were placeholders -- if placeholders and values[0].get("value") != []: -+ if placeholders and [v for v in values if v.get("value") != []]: - all_nodes = set(doc.findall(all_nodes_xpath)) - to_remove = all_nodes - kept_nodes - del_fn = param.get("del", del_text) - parent_map = {c: p for p in doc.iter() for c in p} - for node in to_remove: -- del_fn(parent_map, node) -- need_update = True -- -+ deleted = del_fn(parent_map, node) -+ need_update = need_update or deleted - return need_update -diff --git a/tests/pytests/unit/modules/virt/conftest.py b/tests/pytests/unit/modules/virt/conftest.py -index 1c32ae12eb..ec56bdff24 100644 ---- a/tests/pytests/unit/modules/virt/conftest.py -+++ b/tests/pytests/unit/modules/virt/conftest.py -@@ -189,3 +189,129 @@ def make_mock_storage_pool(): - return mocked_pool - - return _make_mock_storage_pool -+ -+ -+@pytest.fixture -+def make_capabilities(): -+ def _make_capabilities(): -+ mocked_conn = virt.libvirt.openAuth.return_value -+ mocked_conn.getCapabilities.return_value = """ -+ -+ -+ 44454c4c-3400-105a-8033-b3c04f4b344a -+ -+ x86_64 -+ Nehalem -+ Intel -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ tcp -+ rdma -+ -+ -+ -+ -+ -+ 12367120 -+ 3091780 -+ 0 -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ apparmor -+ 0 -+ -+ -+ dac -+ 0 -+ +487:+486 -+ +487:+486 -+ -+ -+ -+ -+ hvm -+ -+ 32 -+ /usr/bin/qemu-system-i386 -+ pc-i440fx-2.6 -+ pc -+ pc-0.12 -+ -+ -+ /usr/bin/qemu-kvm -+ pc-i440fx-2.6 -+ pc -+ pc-0.12 -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ hvm -+ -+ 64 -+ /usr/bin/qemu-system-x86_64 -+ pc-i440fx-2.6 -+ pc -+ pc-0.12 -+ -+ -+ /usr/bin/qemu-kvm -+ pc-i440fx-2.6 -+ pc -+ pc-0.12 -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+""" -+ -+ return _make_capabilities -diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py -index 5f9b45ec9a..347c3bcd88 100644 ---- a/tests/pytests/unit/modules/virt/test_domain.py -+++ b/tests/pytests/unit/modules/virt/test_domain.py -@@ -254,3 +254,338 @@ def test_get_disk_convert_volumes(make_mock_vm, make_mock_storage_pool): - "virtual size": 214748364800, - }, - } == virt.get_disks("srv01") -+ -+ -+def test_update_approx_mem(make_mock_vm): -+ """ -+ test virt.update with memory parameter unchanged thought not exactly equals to the current value. -+ This may happen since libvirt sometimes rounds the memory value. -+ """ -+ xml_def = """ -+ -+ my_vm -+ 3177680 -+ 3177680 -+ 1 -+ -+ hvm -+ -+ restart -+ -+ """ -+ domain_mock = make_mock_vm(xml_def) -+ -+ ret = virt.update("my_vm", mem={"boot": "3253941043B", "current": "3253941043B"}) -+ assert not ret["definition"] -+ -+ -+def test_gen_hypervisor_features(): -+ """ -+ Test the virt._gen_xml hypervisor_features handling -+ """ -+ xml_data = virt._gen_xml( -+ virt.libvirt.openAuth.return_value, -+ "hello", -+ 1, -+ 512, -+ {}, -+ {}, -+ "kvm", -+ "hvm", -+ "x86_64", -+ hypervisor_features={"kvm-hint-dedicated": True}, -+ ) -+ root = ET.fromstring(xml_data) -+ assert "on" == root.find("features/kvm/hint-dedicated").attrib["state"] -+ -+ -+def test_update_hypervisor_features(make_mock_vm): -+ """ -+ Test changing the hypervisor features of a guest -+ """ -+ xml_def = """ -+ -+ my_vm -+ 524288 -+ 524288 -+ 1 -+ -+ linux -+ /usr/lib/grub2/x86_64-xen/grub.xen -+ -+ -+ -+ -+ -+ -+ restart -+ -+ """ -+ domain_mock = make_mock_vm(xml_def) -+ -+ # Update with no change to the features -+ ret = virt.update("my_vm", hypervisor_features={"kvm-hint-dedicated": True}) -+ assert not ret["definition"] -+ -+ # Alter the features -+ ret = virt.update("my_vm", hypervisor_features={"kvm-hint-dedicated": False}) -+ assert ret["definition"] -+ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0]) -+ assert "off" == setxml.find("features/kvm/hint-dedicated").get("state") -+ -+ # Add the features -+ xml_def = """ -+ -+ my_vm -+ 524288 -+ 524288 -+ 1 -+ -+ linux -+ /usr/lib/grub2/x86_64-xen/grub.xen -+ -+ -+ """ -+ domain_mock = make_mock_vm(xml_def) -+ ret = virt.update("my_vm", hypervisor_features={"kvm-hint-dedicated": True}) -+ assert ret["definition"] -+ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0]) -+ assert "on" == setxml.find("features/kvm/hint-dedicated").get("state") -+ -+ -+def test_gen_clock(): -+ """ -+ Test the virt._gen_xml clock property -+ """ -+ # Localtime with adjustment -+ xml_data = virt._gen_xml( -+ virt.libvirt.openAuth.return_value, -+ "hello", -+ 1, -+ 512, -+ {}, -+ {}, -+ "kvm", -+ "hvm", -+ "x86_64", -+ clock={"adjustment": 3600, "utc": False}, -+ ) -+ root = ET.fromstring(xml_data) -+ assert "localtime" == root.find("clock").get("offset") -+ assert "3600" == root.find("clock").get("adjustment") -+ -+ # Specific timezone -+ xml_data = virt._gen_xml( -+ virt.libvirt.openAuth.return_value, -+ "hello", -+ 1, -+ 512, -+ {}, -+ {}, -+ "kvm", -+ "hvm", -+ "x86_64", -+ clock={"timezone": "CEST"}, -+ ) -+ root = ET.fromstring(xml_data) -+ assert "timezone" == root.find("clock").get("offset") -+ assert "CEST" == root.find("clock").get("timezone") -+ -+ # UTC -+ xml_data = virt._gen_xml( -+ virt.libvirt.openAuth.return_value, -+ "hello", -+ 1, -+ 512, -+ {}, -+ {}, -+ "kvm", -+ "hvm", -+ "x86_64", -+ clock={"utc": True}, -+ ) -+ root = ET.fromstring(xml_data) -+ assert "utc" == root.find("clock").get("offset") -+ -+ # Timers -+ xml_data = virt._gen_xml( -+ virt.libvirt.openAuth.return_value, -+ "hello", -+ 1, -+ 512, -+ {}, -+ {}, -+ "kvm", -+ "hvm", -+ "x86_64", -+ clock={ -+ "timers": { -+ "tsc": {"frequency": 3504000000, "mode": "native"}, -+ "rtc": { -+ "tickpolicy": "catchup", -+ "slew": 4636, -+ "threshold": 123, -+ "limit": 2342, -+ }, -+ "hpet": {"present": False}, -+ }, -+ }, -+ ) -+ root = ET.fromstring(xml_data) -+ assert "utc" == root.find("clock").get("offset") -+ assert "3504000000" == root.find("clock/timer[@name='tsc']").get("frequency") -+ assert "native" == root.find("clock/timer[@name='tsc']").get("mode") -+ assert "catchup" == root.find("clock/timer[@name='rtc']").get("tickpolicy") -+ assert {"slew": "4636", "threshold": "123", "limit": "2342"} == root.find( -+ "clock/timer[@name='rtc']/catchup" -+ ).attrib -+ assert "no" == root.find("clock/timer[@name='hpet']").get("present") -+ -+ -+def test_update_clock(make_mock_vm): -+ """ -+ test virt.update with clock parameter -+ """ -+ xml_def = """ -+ -+ my_vm -+ 524288 -+ 524288 -+ 1 -+ -+ linux -+ /usr/lib/grub2/x86_64-xen/grub.xen -+ -+ -+ -+ -+ -+ restart -+ -+ """ -+ domain_mock = make_mock_vm(xml_def) -+ -+ # Update with no change to the features -+ ret = virt.update( -+ "my_vm", -+ clock={ -+ "utc": False, -+ "adjustment": -3600, -+ "timers": { -+ "tsc": {"frequency": 3504000000, "mode": "native"}, -+ "kvmclock": {"present": False}, -+ }, -+ }, -+ ) -+ assert not ret["definition"] -+ -+ # Update -+ ret = virt.update( -+ "my_vm", -+ clock={ -+ "timezone": "CEST", -+ "timers": { -+ "rtc": { -+ "track": "wall", -+ "tickpolicy": "catchup", -+ "slew": 4636, -+ "threshold": 123, -+ "limit": 2342, -+ }, -+ "hpet": {"present": True}, -+ }, -+ }, -+ ) -+ assert ret["definition"] -+ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0]) -+ assert "timezone" == setxml.find("clock").get("offset") -+ assert "CEST" == setxml.find("clock").get("timezone") -+ assert {"rtc", "hpet"} == {t.get("name") for t in setxml.findall("clock/timer")} -+ assert "catchup" == setxml.find("clock/timer[@name='rtc']").get("tickpolicy") -+ assert "wall" == setxml.find("clock/timer[@name='rtc']").get("track") -+ assert {"slew": "4636", "threshold": "123", "limit": "2342"} == setxml.find( -+ "clock/timer[@name='rtc']/catchup" -+ ).attrib -+ assert "yes" == setxml.find("clock/timer[@name='hpet']").get("present") -+ -+ # Revert to UTC -+ ret = virt.update("my_vm", clock={"utc": True, "adjustment": None, "timers": None}) -+ assert ret["definition"] -+ setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0]) -+ assert {"offset": "utc"} == setxml.find("clock").attrib -+ assert setxml.find("clock/timer") is None -+ -+ -+def test_update_stop_on_reboot_reset(make_mock_vm): -+ """ -+ Test virt.update to remove the on_reboot=destroy flag -+ """ -+ xml_def = """ -+ -+ my_vm -+ 524288 -+ 524288 -+ 1 -+ destroy -+ -+ hvm -+ -+ """ -+ domain_mock = make_mock_vm(xml_def) -+ -+ ret = virt.update("my_vm") -+ -+ assert ret["definition"] -+ define_mock = virt.libvirt.openAuth().defineXML -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ assert "restart" == setxml.find("./on_reboot").text -+ -+ -+def test_update_stop_on_reboot(make_mock_vm): -+ """ -+ Test virt.update to add the on_reboot=destroy flag -+ """ -+ xml_def = """ -+ -+ my_vm -+ 524288 -+ 524288 -+ 1 -+ -+ hvm -+ -+ """ -+ domain_mock = make_mock_vm(xml_def) -+ -+ ret = virt.update("my_vm", stop_on_reboot=True) -+ -+ assert ret["definition"] -+ define_mock = virt.libvirt.openAuth().defineXML -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ assert "destroy" == setxml.find("./on_reboot").text -+ -+ -+def test_init_no_stop_on_reboot(make_capabilities): -+ """ -+ Test virt.init to add the on_reboot=restart flag -+ """ -+ make_capabilities() -+ with patch.dict(virt.os.__dict__, {"chmod": MagicMock(), "makedirs": MagicMock()}): -+ with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}): -+ virt.init("test_vm", 2, 2048, start=False) -+ define_mock = virt.libvirt.openAuth().defineXML -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ assert "restart" == setxml.find("./on_reboot").text -+ -+ -+def test_init_stop_on_reboot(make_capabilities): -+ """ -+ Test virt.init to add the on_reboot=destroy flag -+ """ -+ make_capabilities() -+ with patch.dict(virt.os.__dict__, {"chmod": MagicMock(), "makedirs": MagicMock()}): -+ with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}): -+ virt.init("test_vm", 2, 2048, stop_on_reboot=True, start=False) -+ define_mock = virt.libvirt.openAuth().defineXML -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ assert "destroy" == setxml.find("./on_reboot").text -diff --git a/tests/pytests/unit/utils/test_xmlutil.py b/tests/pytests/unit/utils/test_xmlutil.py -index 081cc64193..2bcaff3a17 100644 ---- a/tests/pytests/unit/utils/test_xmlutil.py -+++ b/tests/pytests/unit/utils/test_xmlutil.py -@@ -16,6 +16,11 @@ def xml_doc(): - - - -+ -+ -+ -+ -+ - - """ - ) -@@ -36,6 +41,22 @@ def test_change_xml_text_nochange(xml_doc): - assert not ret - - -+def test_change_xml_equals_nochange(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"mem": 1023}, -+ [ -+ { -+ "path": "mem", -+ "xpath": "memory", -+ "get": lambda n: int(n.text), -+ "equals": lambda o, n: abs(o - n) <= 1, -+ } -+ ], -+ ) -+ assert not ret -+ -+ - def test_change_xml_text_notdefined(xml_doc): - ret = xml.change_xml(xml_doc, {}, [{"path": "name", "xpath": "name"}]) - assert not ret -@@ -167,3 +188,23 @@ def test_change_xml_template_remove(xml_doc): - ) - assert ret - assert xml_doc.find("vcpus") is None -+ -+ -+def test_change_xml_template_list(xml_doc): -+ ret = xml.change_xml( -+ xml_doc, -+ {"memtune": {"hugepages": [{"size": "1024"}, {"size": "512"}]}}, -+ [ -+ { -+ "path": "memtune:hugepages:{id}:size", -+ "xpath": "memtune/hugepages/page[$id]", -+ "get": lambda n: n.get("size"), -+ "set": lambda n, v: n.set("size", v), -+ "del": xml.del_attribute("size"), -+ }, -+ ], -+ ) -+ assert ret -+ assert ["1024", "512"] == [ -+ n.get("size") for n in xml_doc.findall("memtune/hugepages/page") -+ ] -diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py -index 83152eda6e..91dee2098d 100644 ---- a/tests/unit/modules/test_virt.py -+++ b/tests/unit/modules/test_virt.py -@@ -106,6 +106,10 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - mock_domain.name.return_value = name - return mock_domain - -+ def assertEqualUnit(self, actual, expected, unit="KiB"): -+ self.assertEqual(actual.get("unit"), unit) -+ self.assertEqual(actual.text, str(expected)) -+ - def test_disk_profile_merge(self): - """ - Test virt._disk_profile() when merging with user-defined disks -@@ -215,16 +219,14 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "kvm", - "hvm", - "x86_64", -- serial_type="pty", -- console=True, -+ serials=[{"type": "pty"}], - ) - root = ET.fromstring(xml_data) - self.assertEqual(root.find("devices/serial").attrib["type"], "pty") -- self.assertEqual(root.find("devices/console").attrib["type"], "pty") - -- def test_gen_xml_for_serial_console(self): -+ def test_gen_xml_for_telnet_serial(self): - """ -- Test virt._gen_xml() serial console -+ Test virt._gen_xml() telnet serial - """ - diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") - nicp = virt._nic_profile("default", "kvm") -@@ -238,11 +240,134 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "kvm", - "hvm", - "x86_64", -- serial_type="pty", -- console=True, -+ serials=[{"type": "tcp", "port": 22223, "protocol": "telnet"}], -+ ) -+ root = ET.fromstring(xml_data) -+ self.assertEqual(root.find("devices/serial").attrib["type"], "tcp") -+ self.assertEqual(root.find("devices/serial/source").attrib["service"], "22223") -+ self.assertEqual(root.find("devices/serial/protocol").attrib["type"], "telnet") -+ -+ def test_gen_xml_for_telnet_serial_unspecified_port(self): -+ """ -+ Test virt._gen_xml() telnet serial without any specified port -+ """ -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ 1, -+ 512, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ serials=[{"type": "tcp"}], -+ ) -+ root = ET.fromstring(xml_data) -+ self.assertEqual(root.find("devices/serial").attrib["type"], "tcp") -+ self.assertEqual(root.find("devices/serial/source").attrib["service"], "23023") -+ self.assertFalse("tls" in root.find("devices/serial/source").keys()) -+ self.assertEqual(root.find("devices/serial/protocol").attrib["type"], "telnet") -+ -+ def test_gen_xml_for_chardev_types(self): -+ """ -+ Test virt._gen_xml() consoles and serials of various types -+ """ -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ 1, -+ 512, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ consoles=[ -+ {"type": "pty", "path": "/dev/pts/2", "target_port": 2}, -+ {"type": "pty", "target_type": "usb-serial"}, -+ {"type": "stdio"}, -+ {"type": "file", "path": "/path/to/serial.log"}, -+ ], -+ serials=[ -+ {"type": "pipe", "path": "/tmp/mypipe"}, -+ {"type": "udp", "host": "127.0.0.1", "port": 1234}, -+ {"type": "tcp", "port": 22223, "protocol": "raw", "tls": True}, -+ {"type": "unix", "path": "/path/to/socket"}, -+ ], -+ ) -+ root = ET.fromstring(xml_data) -+ -+ self.assertEqual(root.find("devices/console[1]").attrib["type"], "pty") -+ self.assertEqual( -+ root.find("devices/console[1]/source").attrib["path"], "/dev/pts/2" -+ ) -+ self.assertEqual(root.find("devices/console[1]/target").attrib["port"], "2") -+ -+ self.assertEqual(root.find("devices/console[2]").attrib["type"], "pty") -+ self.assertIsNone(root.find("devices/console[2]/source")) -+ self.assertEqual( -+ root.find("devices/console[2]/target").attrib["type"], "usb-serial" -+ ) -+ -+ self.assertEqual(root.find("devices/console[3]").attrib["type"], "stdio") -+ self.assertIsNone(root.find("devices/console[3]/source")) -+ -+ self.assertEqual(root.find("devices/console[4]").attrib["type"], "file") -+ self.assertEqual( -+ root.find("devices/console[4]/source").attrib["path"], "/path/to/serial.log" -+ ) -+ -+ self.assertEqual(root.find("devices/serial[1]").attrib["type"], "pipe") -+ self.assertEqual( -+ root.find("devices/serial[1]/source").attrib["path"], "/tmp/mypipe" -+ ) -+ -+ self.assertEqual(root.find("devices/serial[2]").attrib["type"], "udp") -+ self.assertEqual(root.find("devices/serial[2]/source").attrib["mode"], "bind") -+ self.assertEqual( -+ root.find("devices/serial[2]/source").attrib["service"], "1234" -+ ) -+ self.assertEqual( -+ root.find("devices/serial[2]/source").attrib["host"], "127.0.0.1" -+ ) -+ -+ self.assertEqual(root.find("devices/serial[3]").attrib["type"], "tcp") -+ self.assertEqual(root.find("devices/serial[3]/source").attrib["mode"], "bind") -+ self.assertEqual( -+ root.find("devices/serial[3]/source").attrib["service"], "22223" -+ ) -+ self.assertEqual(root.find("devices/serial[3]/source").attrib["tls"], "yes") -+ self.assertEqual(root.find("devices/serial[3]/protocol").attrib["type"], "raw") -+ -+ self.assertEqual(root.find("devices/serial[4]").attrib["type"], "unix") -+ self.assertEqual( -+ root.find("devices/serial[4]/source").attrib["path"], "/path/to/socket" -+ ) -+ -+ def test_gen_xml_no_nic_console(self): -+ """ -+ Test virt._gen_xml() console -+ """ -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ 1, -+ 512, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ consoles=[{"type": "pty"}], - ) - root = ET.fromstring(xml_data) -- self.assertEqual(root.find("devices/serial").attrib["type"], "pty") - self.assertEqual(root.find("devices/console").attrib["type"], "pty") - - def test_gen_xml_for_telnet_console(self): -@@ -261,14 +386,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "kvm", - "hvm", - "x86_64", -- serial_type="tcp", -- console=True, -- telnet_port=22223, -+ consoles=[{"type": "tcp", "port": 22223, "protocol": "telnet"}], - ) - root = ET.fromstring(xml_data) -- self.assertEqual(root.find("devices/serial").attrib["type"], "tcp") - self.assertEqual(root.find("devices/console").attrib["type"], "tcp") - self.assertEqual(root.find("devices/console/source").attrib["service"], "22223") -+ self.assertEqual(root.find("devices/console/protocol").attrib["type"], "telnet") - - def test_gen_xml_for_telnet_console_unspecified_port(self): - """ -@@ -286,15 +409,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "kvm", - "hvm", - "x86_64", -- serial_type="tcp", -- console=True, -+ consoles=[{"type": "tcp"}], - ) - root = ET.fromstring(xml_data) -- self.assertEqual(root.find("devices/serial").attrib["type"], "tcp") - self.assertEqual(root.find("devices/console").attrib["type"], "tcp") -- self.assertIsInstance( -- int(root.find("devices/console/source").attrib["service"]), int -- ) -+ self.assertEqual(root.find("devices/console/source").attrib["service"], "23023") -+ self.assertEqual(root.find("devices/console/protocol").attrib["type"], "telnet") - - def test_gen_xml_for_serial_no_console(self): - """ -@@ -312,8 +432,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "kvm", - "hvm", - "x86_64", -- serial_type="pty", -- console=False, -+ serials=[{"type": "pty"}], -+ consoles=[], - ) - root = ET.fromstring(xml_data) - self.assertEqual(root.find("devices/serial").attrib["type"], "pty") -@@ -335,8 +455,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - "kvm", - "hvm", - "x86_64", -- serial_type="tcp", -- console=False, -+ serials=[{"type": "tcp", "port": 22223, "protocol": "telnet"}], -+ consoles=[], - ) - root = ET.fromstring(xml_data) - self.assertEqual(root.find("devices/serial").attrib["type"], "tcp") -@@ -459,109 +579,493 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertEqual(root.find("devices/graphics/listen").attrib["type"], "none") - self.assertFalse("address" in root.find("devices/graphics/listen").attrib) - -- def test_default_disk_profile_hypervisor_esxi(self): -+ def test_gen_xml_memory(self): - """ -- Test virt._disk_profile() default ESXi profile -+ Test virt._gen_xml() with advanced memory settings - """ -- mock = MagicMock(return_value={}) -- with patch.dict( -- virt.__salt__, {"config.get": mock} # pylint: disable=no-member -- ): -- ret = virt._disk_profile( -- self.mock_conn, "nonexistent", "vmware", None, "test-vm" -- ) -- self.assertTrue(len(ret) == 1) -- found = [disk for disk in ret if disk["name"] == "system"] -- self.assertTrue(bool(found)) -- system = found[0] -- self.assertEqual(system["format"], "vmdk") -- self.assertEqual(system["model"], "scsi") -- self.assertTrue(int(system["size"]) >= 1) -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ 1, -+ { -+ "boot": "512m", -+ "current": "256m", -+ "max": "1g", -+ "hard_limit": "1024", -+ "soft_limit": "512m", -+ "swap_hard_limit": "1g", -+ "min_guarantee": "256m", -+ "hugepages": [ -+ {"nodeset": "", "size": "128m"}, -+ {"nodeset": "0", "size": "256m"}, -+ {"nodeset": "1", "size": "512m"}, -+ ], -+ "nosharepages": True, -+ "locked": True, -+ "source": "file", -+ "access": "shared", -+ "allocation": "immediate", -+ "discard": True, -+ }, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ ) -+ root = ET.fromstring(xml_data) -+ self.assertEqualUnit(root.find("memory"), 512 * 1024) -+ self.assertEqualUnit(root.find("currentMemory"), 256 * 1024) -+ self.assertEqualUnit(root.find("maxMemory"), 1024 * 1024) -+ self.assertFalse("slots" in root.find("maxMemory").keys()) -+ self.assertEqualUnit(root.find("memtune/hard_limit"), 1024 * 1024) -+ self.assertEqualUnit(root.find("memtune/soft_limit"), 512 * 1024) -+ self.assertEqualUnit(root.find("memtune/swap_hard_limit"), 1024 ** 2) -+ self.assertEqualUnit(root.find("memtune/min_guarantee"), 256 * 1024) -+ self.assertEqual( -+ [ -+ {"nodeset": page.get("nodeset"), "size": page.get("size")} -+ for page in root.findall("memoryBacking/hugepages/page") -+ ], -+ [ -+ {"nodeset": None, "size": str(128 * 1024)}, -+ {"nodeset": "0", "size": str(256 * 1024)}, -+ {"nodeset": "1", "size": str(512 * 1024)}, -+ ], -+ ) -+ self.assertIsNotNone(root.find("memoryBacking/nosharepages")) -+ self.assertIsNotNone(root.find("memoryBacking/locked")) -+ self.assertIsNotNone(root.find("memoryBacking/discard")) -+ self.assertEqual(root.find("memoryBacking/source").get("type"), "file") -+ self.assertEqual(root.find("memoryBacking/access").get("mode"), "shared") -+ self.assertEqual(root.find("memoryBacking/allocation").get("mode"), "immediate") - -- def test_default_disk_profile_hypervisor_kvm(self): -+ def test_gen_xml_cpu(self): - """ -- Test virt._disk_profile() default KVM profile -+ Test virt._gen_xml() with CPU advanced properties - """ -- mock = MagicMock(side_effect=[{}, "/images/dir"]) -- with patch.dict( -- virt.__salt__, {"config.get": mock} # pylint: disable=no-member -- ): -- ret = virt._disk_profile( -- self.mock_conn, "nonexistent", "kvm", None, "test-vm" -- ) -- self.assertTrue(len(ret) == 1) -- found = [disk for disk in ret if disk["name"] == "system"] -- self.assertTrue(bool(found)) -- system = found[0] -- self.assertEqual(system["format"], "qcow2") -- self.assertEqual(system["model"], "virtio") -- self.assertTrue(int(system["size"]) >= 1) -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ { -+ "maximum": 12, -+ "placement": "static", -+ "cpuset": "0-11", -+ "current": 5, -+ "mode": "custom", -+ "match": "minimum", -+ "check": "full", -+ "vendor": "Intel", -+ "model": { -+ "name": "core2duo", -+ "fallback": "allow", -+ "vendor_id": "GenuineIntel", -+ }, -+ "cache": {"level": 3, "mode": "emulate"}, -+ "features": {"lahf": "optional", "vmx": "require"}, -+ "vcpus": { -+ 0: {"enabled": True, "hotpluggable": True}, -+ 1: {"enabled": False}, -+ }, -+ }, -+ 512, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ ) -+ root = ET.fromstring(xml_data) -+ self.assertEqual(root.find("vcpu").get("current"), "5") -+ self.assertEqual(root.find("vcpu").get("placement"), "static") -+ self.assertEqual(root.find("vcpu").get("cpuset"), "0-11") -+ self.assertEqual(root.find("vcpu").text, "12") -+ self.assertEqual(root.find("cpu").get("match"), "minimum") -+ self.assertEqual(root.find("cpu").get("mode"), "custom") -+ self.assertEqual(root.find("cpu").get("check"), "full") -+ self.assertEqual(root.find("cpu/vendor").text, "Intel") -+ self.assertEqual(root.find("cpu/model").text, "core2duo") -+ self.assertEqual(root.find("cpu/model").get("fallback"), "allow") -+ self.assertEqual(root.find("cpu/model").get("vendor_id"), "GenuineIntel") -+ self.assertEqual(root.find("cpu/cache").get("level"), "3") -+ self.assertEqual(root.find("cpu/cache").get("mode"), "emulate") -+ self.assertEqual( -+ {f.get("name"): f.get("policy") for f in root.findall("cpu/feature")}, -+ {"lahf": "optional", "vmx": "require"}, -+ ) -+ self.assertEqual( -+ { -+ v.get("id"): { -+ "enabled": v.get("enabled"), -+ "hotpluggable": v.get("hotpluggable"), -+ } -+ for v in root.findall("vcpus/vcpu") -+ }, -+ { -+ "0": {"enabled": "yes", "hotpluggable": "yes"}, -+ "1": {"enabled": "no", "hotpluggable": None}, -+ }, -+ ) - -- def test_default_disk_profile_hypervisor_xen(self): -+ def test_gen_xml_cpu_topology(self): - """ -- Test virt._disk_profile() default XEN profile -+ Test virt._gen_xml() with CPU topology - """ -- mock = MagicMock(side_effect=[{}, "/images/dir"]) -- with patch.dict( -- virt.__salt__, {"config.get": mock} # pylint: disable=no-member -- ): -- ret = virt._disk_profile( -- self.mock_conn, "nonexistent", "xen", None, "test-vm" -- ) -- self.assertTrue(len(ret) == 1) -- found = [disk for disk in ret if disk["name"] == "system"] -- self.assertTrue(bool(found)) -- system = found[0] -- self.assertEqual(system["format"], "qcow2") -- self.assertEqual(system["model"], "xen") -- self.assertTrue(int(system["size"]) >= 1) -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ {"maximum": 1, "topology": {"sockets": 4, "cores": 16, "threads": 2}}, -+ 512, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ ) -+ root = ET.fromstring(xml_data) -+ self.assertEqual(root.find("cpu/topology").get("sockets"), "4") -+ self.assertEqual(root.find("cpu/topology").get("cores"), "16") -+ self.assertEqual(root.find("cpu/topology").get("threads"), "2") - -- def test_default_nic_profile_hypervisor_esxi(self): -+ def test_gen_xml_cpu_numa(self): - """ -- Test virt._nic_profile() default ESXi profile -+ Test virt._gen_xml() with CPU numa settings - """ -- mock = MagicMock(return_value={}) -- with patch.dict( -- virt.__salt__, {"config.get": mock} # pylint: disable=no-member -- ): -- ret = virt._nic_profile("nonexistent", "vmware") -- self.assertTrue(len(ret) == 1) -- eth0 = ret[0] -- self.assertEqual(eth0["name"], "eth0") -- self.assertEqual(eth0["type"], "bridge") -- self.assertEqual(eth0["source"], "DEFAULT") -- self.assertEqual(eth0["model"], "e1000") -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ { -+ "maximum": 1, -+ "numa": { -+ 0: { -+ "cpus": "0-3", -+ "memory": "1g", -+ "discard": True, -+ "distances": {0: 10, 1: 20}, -+ }, -+ 1: {"cpus": "4-7", "memory": "2g", "distances": {0: 20, 1: 10}}, -+ }, -+ }, -+ 512, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ ) -+ root = ET.fromstring(xml_data) -+ cell0 = root.find("cpu/numa/cell[@id='0']") -+ self.assertEqual(cell0.get("cpus"), "0-3") -+ self.assertIsNone(cell0.get("unit")) -+ self.assertEqual(cell0.get("memory"), str(1024 ** 2)) -+ self.assertEqual(cell0.get("discard"), "yes") -+ self.assertEqual( -+ {d.get("id"): d.get("value") for d in cell0.findall("distances/sibling")}, -+ {"0": "10", "1": "20"}, -+ ) - -- def test_default_nic_profile_hypervisor_kvm(self): -- """ -- Test virt._nic_profile() default KVM profile -- """ -- mock = MagicMock(return_value={}) -- with patch.dict( -- virt.__salt__, {"config.get": mock} # pylint: disable=no-member -- ): -- ret = virt._nic_profile("nonexistent", "kvm") -- self.assertTrue(len(ret) == 1) -- eth0 = ret[0] -- self.assertEqual(eth0["name"], "eth0") -- self.assertEqual(eth0["type"], "bridge") -- self.assertEqual(eth0["source"], "br0") -- self.assertEqual(eth0["model"], "virtio") -+ cell1 = root.find("cpu/numa/cell[@id='1']") -+ self.assertEqual(cell1.get("cpus"), "4-7") -+ self.assertIsNone(cell0.get("unit")) -+ self.assertEqual(cell1.get("memory"), str(2 * 1024 ** 2)) -+ self.assertFalse("discard" in cell1.keys()) -+ self.assertEqual( -+ {d.get("id"): d.get("value") for d in cell1.findall("distances/sibling")}, -+ {"0": "20", "1": "10"}, -+ ) - -- def test_default_nic_profile_hypervisor_xen(self): -+ def test_gen_xml_cputune(self): - """ -- Test virt._nic_profile() default XEN profile -+ Test virt._gen_xml() with CPU tuning - """ -- mock = MagicMock(return_value={}) -- with patch.dict( -- virt.__salt__, {"config.get": mock} # pylint: disable=no-member -- ): -- ret = virt._nic_profile("nonexistent", "xen") -- self.assertTrue(len(ret) == 1) -- eth0 = ret[0] -- self.assertEqual(eth0["name"], "eth0") -- self.assertEqual(eth0["type"], "bridge") -- self.assertEqual(eth0["source"], "br0") -+ diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello") -+ nicp = virt._nic_profile("default", "kvm") -+ cputune = { -+ "shares": 2048, -+ "period": 122000, -+ "quota": -1, -+ "global_period": 1000000, -+ "global_quota": -3, -+ "emulator_period": 1200000, -+ "emulator_quota": -10, -+ "iothread_period": 133000, -+ "iothread_quota": -1, -+ "vcpupin": {0: "1-4,^2", 1: "0,1", 2: "2,3", 3: "0,4"}, -+ "emulatorpin": "1-3", -+ "iothreadpin": {1: "5-6", 2: "7-8"}, -+ "vcpusched": [ -+ {"scheduler": "fifo", "priority": 1, "vcpus": "0"}, -+ {"scheduler": "fifo", "priority": 2, "vcpus": "1"}, -+ {"scheduler": "idle", "priority": 3, "vcpus": "2"}, -+ ], -+ "iothreadsched": [ -+ {"scheduler": "idle"}, -+ {"scheduler": "batch", "iothreads": "5-7", "priority": 1}, -+ ], -+ "emulatorsched": {"scheduler": "rr", "priority": 2}, -+ "cachetune": { -+ "0-3": { -+ 0: {"level": 3, "type": "both", "size": 3}, -+ 1: {"level": 3, "type": "both", "size": 3}, -+ "monitor": {1: 3, "0-3": 3}, -+ }, -+ "4-5": {"monitor": {4: 3, 5: 2}}, -+ }, -+ "memorytune": {"0-2": {0: 60}, "3-4": {0: 50, 1: 70}}, -+ } -+ xml_data = virt._gen_xml( -+ self.mock_conn, -+ "hello", -+ {"maximum": 1, "tuning": cputune, "iothreads": 2}, -+ 512, -+ diskp, -+ nicp, -+ "kvm", -+ "hvm", -+ "x86_64", -+ ) -+ root = ET.fromstring(xml_data) -+ self.assertEqual(root.find("cputune").find("shares").text, "2048") -+ self.assertEqual(root.find("cputune").find("period").text, "122000") -+ self.assertEqual(root.find("cputune").find("quota").text, "-1") -+ self.assertEqual(root.find("cputune").find("global_period").text, "1000000") -+ self.assertEqual(root.find("cputune").find("global_quota").text, "-3") -+ self.assertEqual(root.find("cputune").find("emulator_period").text, "1200000") -+ self.assertEqual(root.find("cputune").find("emulator_quota").text, "-10") -+ self.assertEqual(root.find("cputune").find("iothread_period").text, "133000") -+ self.assertEqual(root.find("cputune").find("iothread_quota").text, "-1") -+ self.assertEqual( -+ root.find("cputune").find("vcpupin[@vcpu='0']").attrib.get("cpuset"), -+ "1-4,^2", -+ ) -+ self.assertEqual( -+ root.find("cputune").find("vcpupin[@vcpu='1']").attrib.get("cpuset"), "0,1", -+ ) -+ self.assertEqual( -+ root.find("cputune").find("vcpupin[@vcpu='2']").attrib.get("cpuset"), "2,3", -+ ) -+ self.assertEqual( -+ root.find("cputune").find("vcpupin[@vcpu='3']").attrib.get("cpuset"), "0,4", -+ ) -+ self.assertEqual( -+ root.find("cputune").find("emulatorpin").attrib.get("cpuset"), "1-3" -+ ) -+ self.assertEqual( -+ root.find("cputune") -+ .find("iothreadpin[@iothread='1']") -+ .attrib.get("cpuset"), -+ "5-6", -+ ) -+ self.assertEqual( -+ root.find("cputune") -+ .find("iothreadpin[@iothread='2']") -+ .attrib.get("cpuset"), -+ "7-8", -+ ) -+ self.assertDictEqual( -+ { -+ s.get("vcpus"): { -+ "scheduler": s.get("scheduler"), -+ "priority": s.get("priority"), -+ } -+ for s in root.findall("cputune/vcpusched") -+ }, -+ { -+ "0": {"scheduler": "fifo", "priority": "1"}, -+ "1": {"scheduler": "fifo", "priority": "2"}, -+ "2": {"scheduler": "idle", "priority": "3"}, -+ }, -+ ) -+ self.assertDictEqual( -+ { -+ s.get("iothreads"): { -+ "scheduler": s.get("scheduler"), -+ "priority": s.get("priority"), -+ } -+ for s in root.findall("cputune/iothreadsched") -+ }, -+ { -+ None: {"scheduler": "idle", "priority": None}, -+ "5-7": {"scheduler": "batch", "priority": "1"}, -+ }, -+ ) -+ self.assertEqual(root.find("cputune/emulatorsched").get("scheduler"), "rr") -+ self.assertEqual(root.find("cputune/emulatorsched").get("priority"), "2") -+ self.assertEqual( -+ root.find("./cputune/cachetune[@vcpus='0-3']").attrib.get("vcpus"), "0-3" -+ ) -+ self.assertEqual( -+ root.find("./cputune/cachetune[@vcpus='0-3']/cache[@id='0']").attrib.get( -+ "level" -+ ), -+ "3", -+ ) -+ self.assertEqual( -+ root.find("./cputune/cachetune[@vcpus='0-3']/cache[@id='0']").attrib.get( -+ "type" -+ ), -+ "both", -+ ) -+ self.assertEqual( -+ root.find( -+ "./cputune/cachetune[@vcpus='0-3']/monitor[@vcpus='1']" -+ ).attrib.get("level"), -+ "3", -+ ) -+ self.assertNotEqual( -+ root.find("./cputune/cachetune[@vcpus='0-3']/monitor[@vcpus='1']"), None -+ ) -+ self.assertNotEqual( -+ root.find("./cputune/cachetune[@vcpus='4-5']").attrib.get("vcpus"), None -+ ) -+ self.assertEqual( -+ root.find("./cputune/cachetune[@vcpus='4-5']/cache[@id='0']"), None -+ ) -+ self.assertEqual( -+ root.find( -+ "./cputune/cachetune[@vcpus='4-5']/monitor[@vcpus='4']" -+ ).attrib.get("level"), -+ "3", -+ ) -+ self.assertEqual( -+ root.find( -+ "./cputune/cachetune[@vcpus='4-5']/monitor[@vcpus='5']" -+ ).attrib.get("level"), -+ "2", -+ ) -+ self.assertNotEqual(root.find("./cputune/memorytune[@vcpus='0-2']"), None) -+ self.assertEqual( -+ root.find("./cputune/memorytune[@vcpus='0-2']/node[@id='0']").attrib.get( -+ "bandwidth" -+ ), -+ "60", -+ ) -+ self.assertNotEqual(root.find("./cputune/memorytune[@vcpus='3-4']"), None) -+ self.assertEqual( -+ root.find("./cputune/memorytune[@vcpus='3-4']/node[@id='0']").attrib.get( -+ "bandwidth" -+ ), -+ "50", -+ ) -+ self.assertEqual( -+ root.find("./cputune/memorytune[@vcpus='3-4']/node[@id='1']").attrib.get( -+ "bandwidth" -+ ), -+ "70", -+ ) -+ self.assertEqual(root.find("iothreads").text, "2") -+ -+ def test_default_disk_profile_hypervisor_esxi(self): -+ """ -+ Test virt._disk_profile() default ESXi profile -+ """ -+ mock = MagicMock(return_value={}) -+ with patch.dict( -+ virt.__salt__, {"config.get": mock} # pylint: disable=no-member -+ ): -+ ret = virt._disk_profile( -+ self.mock_conn, "nonexistent", "vmware", None, "test-vm" -+ ) -+ self.assertTrue(len(ret) == 1) -+ found = [disk for disk in ret if disk["name"] == "system"] -+ self.assertTrue(bool(found)) -+ system = found[0] -+ self.assertEqual(system["format"], "vmdk") -+ self.assertEqual(system["model"], "scsi") -+ self.assertTrue(int(system["size"]) >= 1) -+ -+ def test_default_disk_profile_hypervisor_kvm(self): -+ """ -+ Test virt._disk_profile() default KVM profile -+ """ -+ mock = MagicMock(side_effect=[{}, "/images/dir"]) -+ with patch.dict( -+ virt.__salt__, {"config.get": mock} # pylint: disable=no-member -+ ): -+ ret = virt._disk_profile( -+ self.mock_conn, "nonexistent", "kvm", None, "test-vm" -+ ) -+ self.assertTrue(len(ret) == 1) -+ found = [disk for disk in ret if disk["name"] == "system"] -+ self.assertTrue(bool(found)) -+ system = found[0] -+ self.assertEqual(system["format"], "qcow2") -+ self.assertEqual(system["model"], "virtio") -+ self.assertTrue(int(system["size"]) >= 1) -+ -+ def test_default_disk_profile_hypervisor_xen(self): -+ """ -+ Test virt._disk_profile() default XEN profile -+ """ -+ mock = MagicMock(side_effect=[{}, "/images/dir"]) -+ with patch.dict( -+ virt.__salt__, {"config.get": mock} # pylint: disable=no-member -+ ): -+ ret = virt._disk_profile( -+ self.mock_conn, "nonexistent", "xen", None, "test-vm" -+ ) -+ self.assertTrue(len(ret) == 1) -+ found = [disk for disk in ret if disk["name"] == "system"] -+ self.assertTrue(bool(found)) -+ system = found[0] -+ self.assertEqual(system["format"], "qcow2") -+ self.assertEqual(system["model"], "xen") -+ self.assertTrue(int(system["size"]) >= 1) -+ -+ def test_default_nic_profile_hypervisor_esxi(self): -+ """ -+ Test virt._nic_profile() default ESXi profile -+ """ -+ mock = MagicMock(return_value={}) -+ with patch.dict( -+ virt.__salt__, {"config.get": mock} # pylint: disable=no-member -+ ): -+ ret = virt._nic_profile("nonexistent", "vmware") -+ self.assertTrue(len(ret) == 1) -+ eth0 = ret[0] -+ self.assertEqual(eth0["name"], "eth0") -+ self.assertEqual(eth0["type"], "bridge") -+ self.assertEqual(eth0["source"], "DEFAULT") -+ self.assertEqual(eth0["model"], "e1000") -+ -+ def test_default_nic_profile_hypervisor_kvm(self): -+ """ -+ Test virt._nic_profile() default KVM profile -+ """ -+ mock = MagicMock(return_value={}) -+ with patch.dict( -+ virt.__salt__, {"config.get": mock} # pylint: disable=no-member -+ ): -+ ret = virt._nic_profile("nonexistent", "kvm") -+ self.assertTrue(len(ret) == 1) -+ eth0 = ret[0] -+ self.assertEqual(eth0["name"], "eth0") -+ self.assertEqual(eth0["type"], "bridge") -+ self.assertEqual(eth0["source"], "br0") -+ self.assertEqual(eth0["model"], "virtio") -+ -+ def test_default_nic_profile_hypervisor_xen(self): -+ """ -+ Test virt._nic_profile() default XEN profile -+ """ -+ mock = MagicMock(return_value={}) -+ with patch.dict( -+ virt.__salt__, {"config.get": mock} # pylint: disable=no-member -+ ): -+ ret = virt._nic_profile("nonexistent", "xen") -+ self.assertTrue(len(ret) == 1) -+ eth0 = ret[0] -+ self.assertEqual(eth0["name"], "eth0") -+ self.assertEqual(eth0["type"], "bridge") -+ self.assertEqual(eth0["source"], "br0") - self.assertFalse(eth0["model"]) - - def test_gen_vol_xml_esx(self): -@@ -1836,6 +2340,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - -
- -+ -+ - - - """.format( -@@ -1896,10 +2402,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - username=None, - password=None, - boot=None, -+ numatune=None, - ), - ) - -- # Update vcpus case -+ # test cpu passed as an integer case - setvcpus_mock = MagicMock(return_value=0) - domain_mock.setVcpusFlags = setvcpus_mock - self.assertEqual( -@@ -1914,142 +2421,400 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - setxml = ET.fromstring(define_mock.call_args[0][0]) - self.assertEqual(setxml.find("vcpu").text, "2") - self.assertEqual(setvcpus_mock.call_args[0][0], 2) -+ define_mock.reset_mock() - -- boot = { -- "kernel": "/root/f8-i386-vmlinuz", -- "initrd": "/root/f8-i386-initrd", -- "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/", -+ # test updating vcpu attribute -+ vcpu = { -+ "placement": "static", -+ "cpuset": "0-11", -+ "current": 5, -+ "maximum": 12, - } -- -- # Update boot devices case -- define_mock.reset_mock() - self.assertEqual( - { - "definition": True, -+ "cpu": True, - "disk": {"attached": [], "detached": [], "updated": []}, - "interface": {"attached": [], "detached": []}, - }, -- virt.update("my_vm", boot_dev="cdrom network hd"), -+ virt.update("my_vm", cpu=vcpu), - ) - setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("vcpu").text, "12") -+ self.assertEqual(setxml.find("vcpu").attrib["placement"], "static") - self.assertEqual( -- ["cdrom", "network", "hd"], -- [node.get("dev") for node in setxml.findall("os/boot")], -+ setxml.find("vcpu").attrib["cpuset"], "0,1,2,3,4,5,6,7,8,9,10,11" - ) -+ self.assertEqual(setxml.find("vcpu").attrib["current"], "5") - -- # Update unchanged boot devices case -- define_mock.reset_mock() -+ # test adding vcpus elements -+ vcpus = { -+ "vcpus": { -+ "0": {"enabled": True, "hotpluggable": False, "order": 1}, -+ "1": {"enabled": False, "hotpluggable": True}, -+ } -+ } - self.assertEqual( - { -- "definition": False, -+ "definition": True, - "disk": {"attached": [], "detached": [], "updated": []}, - "interface": {"attached": [], "detached": []}, - }, -- virt.update("my_vm", boot_dev="hd"), -+ virt.update("my_vm", cpu=vcpus), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["id"], "0") -+ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["enabled"], "yes") -+ self.assertEqual( -+ setxml.find("./vcpus/vcpu/[@id='0']").attrib["hotpluggable"], "no" -+ ) -+ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='0']").attrib["order"], "1") -+ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='1']").attrib["id"], "1") -+ self.assertEqual(setxml.find("./vcpus/vcpu/[@id='1']").attrib["enabled"], "no") -+ self.assertEqual( -+ setxml.find("./vcpus/vcpu/[@id='1']").attrib["hotpluggable"], "yes" -+ ) -+ self.assertEqual( -+ setxml.find("./vcpus/vcpu/[@id='1']").attrib.get("order"), None - ) -- define_mock.assert_not_called() - -- # Update with boot parameter case -- define_mock.reset_mock() -+ # test adding cpu attribute -+ cpu_atr = {"mode": "custom", "match": "exact", "check": "full"} - self.assertEqual( - { - "definition": True, - "disk": {"attached": [], "detached": [], "updated": []}, - "interface": {"attached": [], "detached": []}, - }, -- virt.update("my_vm", boot=boot), -- ) -- setxml = ET.fromstring(define_mock.call_args[0][0]) -- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz") -- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd") -- self.assertEqual( -- setxml.find("os").find("cmdline").text, -- "console=ttyS0 ks=http://example.com/f8-i386/os/", -+ virt.update("my_vm", cpu=cpu_atr), - ) - setxml = ET.fromstring(define_mock.call_args[0][0]) -- self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz") -- self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd") -- self.assertEqual( -- setxml.find("os").find("cmdline").text, -- "console=ttyS0 ks=http://example.com/f8-i386/os/", -- ) -- -- boot_uefi = { -- "loader": "/usr/share/OVMF/OVMF_CODE.fd", -- "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd", -+ self.assertEqual(setxml.find("cpu").attrib["mode"], "custom") -+ self.assertEqual(setxml.find("cpu").attrib["match"], "exact") -+ self.assertEqual(setxml.find("cpu").attrib["check"], "full") -+ -+ # test adding cpu model -+ cpu_model = { -+ "model": { -+ "name": "coreduo", -+ "fallback": "allow", -+ "vendor_id": "Genuine20201", -+ } - } -- - self.assertEqual( - { - "definition": True, - "disk": {"attached": [], "detached": [], "updated": []}, - "interface": {"attached": [], "detached": []}, - }, -- virt.update("my_vm", boot=boot_uefi), -+ virt.update("my_vm", cpu=cpu_model), - ) - setxml = ET.fromstring(define_mock.call_args[0][0]) - self.assertEqual( -- setxml.find("os").find("loader").text, "/usr/share/OVMF/OVMF_CODE.fd" -+ setxml.find("cpu").find("model").attrib.get("vendor_id"), "Genuine20201" - ) -- self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes") -- self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash") - self.assertEqual( -- setxml.find("os").find("nvram").attrib["template"], -- "/usr/share/OVMF/OVMF_VARS.ms.fd", -+ setxml.find("cpu").find("model").attrib.get("fallback"), "allow" - ) -+ self.assertEqual(setxml.find("cpu").find("model").text, "coreduo") - -+ # test adding cpu vendor -+ cpu_vendor = {"vendor": "Intel"} - self.assertEqual( - { - "definition": True, - "disk": {"attached": [], "detached": [], "updated": []}, - "interface": {"attached": [], "detached": []}, - }, -- virt.update("my_vm", boot={"efi": True}), -+ virt.update("my_vm", cpu=cpu_vendor), - ) - setxml = ET.fromstring(define_mock.call_args[0][0]) -- self.assertEqual(setxml.find("os").attrib.get("firmware"), "efi") -- -- invalid_boot = { -- "loader": "/usr/share/OVMF/OVMF_CODE.fd", -- "initrd": "/root/f8-i386-initrd", -- } -- -- with self.assertRaises(SaltInvocationError): -- virt.update("my_vm", boot=invalid_boot) -- -- with self.assertRaises(SaltInvocationError): -- virt.update("my_vm", boot={"efi": "Not a boolean value"}) -- -- # Update memtune parameter case -- memtune = { -- "soft_limit": "0.5g", -- "hard_limit": "1024", -- "swap_hard_limit": "2048m", -- "min_guarantee": "1 g", -- } -+ self.assertEqual(setxml.find("cpu").find("vendor").text, "Intel") - -+ # test adding cpu topology -+ cpu_topology = {"topology": {"sockets": 1, "cores": 12, "threads": 1}} - self.assertEqual( - { - "definition": True, - "disk": {"attached": [], "detached": [], "updated": []}, - "interface": {"attached": [], "detached": []}, - }, -- virt.update("my_vm", mem=memtune), -+ virt.update("my_vm", cpu=cpu_topology), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("sockets"), "1") -+ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("cores"), "12") -+ self.assertEqual(setxml.find("cpu").find("topology").attrib.get("threads"), "1") -+ -+ # test adding cache -+ cpu_cache = {"cache": {"mode": "emulate", "level": 3}} -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", cpu=cpu_cache), - ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("cpu").find("cache").attrib.get("level"), "3") -+ self.assertEqual(setxml.find("cpu").find("cache").attrib.get("mode"), "emulate") - -+ # test adding feature -+ cpu_feature = {"features": {"lahf": "optional", "pcid": "disable"}} -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", cpu=cpu_feature), -+ ) - setxml = ET.fromstring(define_mock.call_args[0][0]) - self.assertEqual( -- setxml.find("memtune").find("soft_limit").text, str(int(0.5 * 1024 ** 3)) -+ setxml.find("./cpu/feature[@name='pcid']").attrib.get("policy"), "disable" -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/feature[@name='lahf']").attrib.get("policy"), "optional" - ) -- self.assertEqual(setxml.find("memtune").find("soft_limit").get("unit"), "bytes") -+ -+ # test adding numa cell -+ numa_cell = { -+ "numa": { -+ "0": { -+ "cpus": "0-3", -+ "memory": "1g", -+ "discard": True, -+ "distances": {0: 10, 1: 21, 2: 31, 3: 41}, -+ }, -+ "1": { -+ "cpus": "4-6", -+ "memory": "0.5g", -+ "discard": False, -+ "memAccess": "shared", -+ "distances": {0: 21, 1: 10, 2: 15, 3: 30}, -+ }, -+ } -+ } - self.assertEqual( -- setxml.find("memtune").find("hard_limit").text, str(1024 * 1024 ** 2) -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", cpu=numa_cell), - ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) - self.assertEqual( -- setxml.find("memtune").find("swap_hard_limit").text, str(2048 * 1024 ** 2) -+ setxml.find("./cpu/numa/cell/[@id='0']").attrib["cpus"], "0,1,2,3" - ) - self.assertEqual( -- setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) -+ setxml.find("./cpu/numa/cell/[@id='0']").attrib["memory"], str(1024 ** 3) -+ ) -+ self.assertEqual(setxml.find("./cpu/numa/cell/[@id='0']").get("unit"), "bytes") -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='0']").attrib["discard"], "yes" -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='0']").attrib[ -+ "value" -+ ], -+ "10", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='1']").attrib[ -+ "value" -+ ], -+ "21", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='2']").attrib[ -+ "value" -+ ], -+ "31", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='0']/distances/sibling/[@id='3']").attrib[ -+ "value" -+ ], -+ "41", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']").attrib["cpus"], "4,5,6" -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memory"], -+ str(int(1024 ** 3 / 2)), -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']").get("unit"), "bytes", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']").attrib["discard"], "no" -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']").attrib["memAccess"], "shared" -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='0']").attrib[ -+ "value" -+ ], -+ "21", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='1']").attrib[ -+ "value" -+ ], -+ "10", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='2']").attrib[ -+ "value" -+ ], -+ "15", -+ ) -+ self.assertEqual( -+ setxml.find("./cpu/numa/cell/[@id='1']/distances/sibling/[@id='3']").attrib[ -+ "value" -+ ], -+ "30", -+ ) -+ -+ # Update boot parameter case -+ boot = { -+ "kernel": "/root/f8-i386-vmlinuz", -+ "initrd": "/root/f8-i386-initrd", -+ "cmdline": "console=ttyS0 ks=http://example.com/f8-i386/os/", -+ } -+ -+ # Update boot devices case -+ define_mock.reset_mock() -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot_dev="cdrom network hd"), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ ["cdrom", "network", "hd"], -+ [node.get("dev") for node in setxml.findall("os/boot")], -+ ) -+ -+ # Update unchanged boot devices case -+ define_mock.reset_mock() -+ self.assertEqual( -+ { -+ "definition": False, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot_dev="hd"), -+ ) -+ define_mock.assert_not_called() -+ -+ # Update with boot parameter case -+ define_mock.reset_mock() -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot=boot), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz") -+ self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd") -+ self.assertEqual( -+ setxml.find("os").find("cmdline").text, -+ "console=ttyS0 ks=http://example.com/f8-i386/os/", -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("os").find("kernel").text, "/root/f8-i386-vmlinuz") -+ self.assertEqual(setxml.find("os").find("initrd").text, "/root/f8-i386-initrd") -+ self.assertEqual( -+ setxml.find("os").find("cmdline").text, -+ "console=ttyS0 ks=http://example.com/f8-i386/os/", -+ ) -+ -+ boot_uefi = { -+ "loader": "/usr/share/OVMF/OVMF_CODE.fd", -+ "nvram": "/usr/share/OVMF/OVMF_VARS.ms.fd", -+ } -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot=boot_uefi), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ setxml.find("os").find("loader").text, "/usr/share/OVMF/OVMF_CODE.fd" -+ ) -+ self.assertEqual(setxml.find("os").find("loader").attrib.get("readonly"), "yes") -+ self.assertEqual(setxml.find("os").find("loader").attrib["type"], "pflash") -+ self.assertEqual( -+ setxml.find("os").find("nvram").attrib["template"], -+ "/usr/share/OVMF/OVMF_VARS.ms.fd", -+ ) -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", boot={"efi": True}), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("os").attrib.get("firmware"), "efi") -+ -+ invalid_boot = { -+ "loader": "/usr/share/OVMF/OVMF_CODE.fd", -+ "initrd": "/root/f8-i386-initrd", -+ } -+ -+ with self.assertRaises(SaltInvocationError): -+ virt.update("my_vm", boot=invalid_boot) -+ -+ with self.assertRaises(SaltInvocationError): -+ virt.update("my_vm", boot={"efi": "Not a boolean value"}) -+ -+ # Update memtune parameter case -+ memtune = { -+ "soft_limit": "0.5g", -+ "hard_limit": "1024", -+ "swap_hard_limit": "2048m", -+ "min_guarantee": "1 g", -+ } -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", mem=memtune), -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqualUnit( -+ setxml.find("memtune").find("soft_limit"), int(0.5 * 1024 ** 3), "bytes" -+ ) -+ self.assertEqualUnit( -+ setxml.find("memtune").find("hard_limit"), 1024 * 1024 ** 2, "bytes" -+ ) -+ self.assertEqualUnit( -+ setxml.find("memtune").find("swap_hard_limit"), 2048 * 1024 ** 2, "bytes" -+ ) -+ self.assertEqualUnit( -+ setxml.find("memtune").find("min_guarantee"), 1 * 1024 ** 3, "bytes" - ) - - invalid_unit = {"soft_limit": "2HB"} -@@ -2064,6 +2829,50 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - with self.assertRaises(SaltInvocationError): - virt.update("my_vm", mem=invalid_number) - -+ # Update numatune case -+ numatune = { -+ "memory": {"mode": "strict", "nodeset": 1}, -+ "memnodes": { -+ 0: {"mode": "strict", "nodeset": 1}, -+ 1: {"mode": "preferred", "nodeset": 2}, -+ }, -+ } -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", numatune=numatune), -+ ) -+ -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ setxml.find("numatune").find("memory").attrib.get("mode"), "strict" -+ ) -+ -+ self.assertEqual( -+ setxml.find("numatune").find("memory").attrib.get("nodeset"), "1" -+ ) -+ -+ self.assertEqual( -+ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("mode"), "strict" -+ ) -+ -+ self.assertEqual( -+ setxml.find("./numatune/memnode/[@cellid='0']").attrib.get("nodeset"), "1" -+ ) -+ -+ self.assertEqual( -+ setxml.find("./numatune/memnode/[@cellid='1']").attrib.get("mode"), -+ "preferred", -+ ) -+ -+ self.assertEqual( -+ setxml.find("./numatune/memnode/[@cellid='1']").attrib.get("nodeset"), "2" -+ ) -+ - # Update memory case - setmem_mock = MagicMock(return_value=0) - domain_mock.setMemoryFlags = setmem_mock -@@ -2115,37 +2924,250 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) - self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") - -- # Update disks case -- devattach_mock = MagicMock(return_value=0) -- devdetach_mock = MagicMock(return_value=0) -- domain_mock.attachDevice = devattach_mock -- domain_mock.detachDevice = devdetach_mock -- mock_chmod = MagicMock() -- mock_run = MagicMock() -- with patch.dict( -- os.__dict__, {"chmod": mock_chmod, "makedirs": MagicMock()} -- ): # pylint: disable=no-member -- with patch.dict( -- virt.__salt__, {"cmd.run": mock_run} -- ): # pylint: disable=no-member -- ret = virt.update( -- "my_vm", -- disk_profile="default", -- disks=[ -- { -- "name": "cddrive", -- "device": "cdrom", -- "source_file": None, -- "model": "ide", -- }, -- {"name": "added", "size": 2048}, -- ], -- ) -- added_disk_path = os.path.join( -- virt.__salt__["config.get"]("virt:images"), "my_vm_added.qcow2" -- ) # pylint: disable=no-member -- self.assertEqual( -- mock_run.call_args[0][0], -+ # update memory backing case -+ mem_back = { -+ "hugepages": [ -+ {"nodeset": "1-5,^4", "size": "1g"}, -+ {"nodeset": "4", "size": "2g"}, -+ ], -+ "nosharepages": True, -+ "locked": True, -+ "source": "file", -+ "access": "shared", -+ "allocation": "immediate", -+ "discard": True, -+ } -+ -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", mem=mem_back), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertDictEqual( -+ { -+ p.get("nodeset"): {"size": p.get("size"), "unit": p.get("unit")} -+ for p in setxml.findall("memoryBacking/hugepages/page") -+ }, -+ { -+ "1,2,3,5": {"size": str(1024 ** 3), "unit": "bytes"}, -+ "4": {"size": str(2 * 1024 ** 3), "unit": "bytes"}, -+ }, -+ ) -+ self.assertNotEqual(setxml.find("./memoryBacking/nosharepages"), None) -+ self.assertIsNone(setxml.find("./memoryBacking/nosharepages").text) -+ self.assertEqual([], setxml.find("./memoryBacking/nosharepages").keys()) -+ self.assertNotEqual(setxml.find("./memoryBacking/locked"), None) -+ self.assertIsNone(setxml.find("./memoryBacking/locked").text) -+ self.assertEqual([], setxml.find("./memoryBacking/locked").keys()) -+ self.assertEqual(setxml.find("./memoryBacking/source").attrib["type"], "file") -+ self.assertEqual(setxml.find("./memoryBacking/access").attrib["mode"], "shared") -+ self.assertNotEqual(setxml.find("./memoryBacking/discard"), None) -+ -+ # test adding iothreads -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", cpu={"iothreads": 5}), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("iothreads").text, "5") -+ -+ # test adding cpu tune parameters -+ cputune = { -+ "shares": 2048, -+ "period": 122000, -+ "quota": -1, -+ "global_period": 1000000, -+ "global_quota": -3, -+ "emulator_period": 1200000, -+ "emulator_quota": -10, -+ "iothread_period": 133000, -+ "iothread_quota": -1, -+ "vcpupin": {0: "1-4,^2", 1: "0,1", 2: "2,3", 3: "0,4"}, -+ "emulatorpin": "1-3", -+ "iothreadpin": {1: "5-6", 2: "7-8"}, -+ "vcpusched": [ -+ {"scheduler": "fifo", "priority": 1, "vcpus": "0"}, -+ {"scheduler": "fifo", "priotity": 2, "vcpus": "1"}, -+ {"scheduler": "idle", "priotity": 3, "vcpus": "2"}, -+ ], -+ "iothreadsched": [{"scheduler": "batch", "iothreads": "7"}], -+ "cachetune": { -+ "0-3": { -+ 0: {"level": 3, "type": "both", "size": 3}, -+ 1: {"level": 3, "type": "both", "size": 3}, -+ "monitor": {1: 3, "0-3": 3}, -+ }, -+ "4-5": {"monitor": {4: 3, 5: 2}}, -+ }, -+ "memorytune": {"0-2": {0: 60}, "3-4": {0: 50, 1: 70}}, -+ } -+ self.assertEqual( -+ { -+ "definition": True, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", cpu={"tuning": cputune}), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("cputune").find("shares").text, "2048") -+ self.assertEqual(setxml.find("cputune").find("period").text, "122000") -+ self.assertEqual(setxml.find("cputune").find("quota").text, "-1") -+ self.assertEqual(setxml.find("cputune").find("global_period").text, "1000000") -+ self.assertEqual(setxml.find("cputune").find("global_quota").text, "-3") -+ self.assertEqual(setxml.find("cputune").find("emulator_period").text, "1200000") -+ self.assertEqual(setxml.find("cputune").find("emulator_quota").text, "-10") -+ self.assertEqual(setxml.find("cputune").find("iothread_period").text, "133000") -+ self.assertEqual(setxml.find("cputune").find("iothread_quota").text, "-1") -+ self.assertEqual( -+ setxml.find("cputune").find("vcpupin[@vcpu='0']").attrib.get("cpuset"), -+ "1,3,4", -+ ) -+ self.assertEqual( -+ setxml.find("cputune").find("vcpupin[@vcpu='1']").attrib.get("cpuset"), -+ "0,1", -+ ) -+ self.assertEqual( -+ setxml.find("cputune").find("vcpupin[@vcpu='2']").attrib.get("cpuset"), -+ "2,3", -+ ) -+ self.assertEqual( -+ setxml.find("cputune").find("vcpupin[@vcpu='3']").attrib.get("cpuset"), -+ "0,4", -+ ) -+ self.assertEqual( -+ setxml.find("cputune").find("emulatorpin").attrib.get("cpuset"), "1,2,3" -+ ) -+ self.assertEqual( -+ setxml.find("cputune") -+ .find("iothreadpin[@iothread='1']") -+ .attrib.get("cpuset"), -+ "5,6", -+ ) -+ self.assertEqual( -+ setxml.find("cputune") -+ .find("iothreadpin[@iothread='2']") -+ .attrib.get("cpuset"), -+ "7,8", -+ ) -+ self.assertEqual( -+ setxml.find("cputune").find("vcpusched[@vcpus='0']").attrib.get("priority"), -+ "1", -+ ) -+ self.assertEqual( -+ setxml.find("cputune") -+ .find("vcpusched[@vcpus='0']") -+ .attrib.get("scheduler"), -+ "fifo", -+ ) -+ self.assertEqual( -+ setxml.find("cputune").find("iothreadsched").attrib.get("iothreads"), "7" -+ ) -+ self.assertEqual( -+ setxml.find("cputune").find("iothreadsched").attrib.get("scheduler"), -+ "batch", -+ ) -+ self.assertIsNotNone(setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']")) -+ self.assertEqual( -+ setxml.find( -+ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']" -+ ).attrib.get("level"), -+ "3", -+ ) -+ self.assertEqual( -+ setxml.find( -+ "./cputune/cachetune[@vcpus='0,1,2,3']/cache[@id='0']" -+ ).attrib.get("type"), -+ "both", -+ ) -+ self.assertEqual( -+ setxml.find( -+ "./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='1']" -+ ).attrib.get("level"), -+ "3", -+ ) -+ self.assertNotEqual( -+ setxml.find("./cputune/cachetune[@vcpus='0,1,2,3']/monitor[@vcpus='1']"), -+ None, -+ ) -+ self.assertNotEqual( -+ setxml.find("./cputune/cachetune[@vcpus='4,5']").attrib.get("vcpus"), None -+ ) -+ self.assertEqual( -+ setxml.find("./cputune/cachetune[@vcpus='4,5']/cache[@id='0']"), None -+ ) -+ self.assertEqual( -+ setxml.find( -+ "./cputune/cachetune[@vcpus='4,5']/monitor[@vcpus='4']" -+ ).attrib.get("level"), -+ "3", -+ ) -+ self.assertEqual( -+ setxml.find( -+ "./cputune/cachetune[@vcpus='4,5']/monitor[@vcpus='5']" -+ ).attrib.get("level"), -+ "2", -+ ) -+ self.assertNotEqual(setxml.find("./cputune/memorytune[@vcpus='0,1,2']"), None) -+ self.assertEqual( -+ setxml.find( -+ "./cputune/memorytune[@vcpus='0,1,2']/node[@id='0']" -+ ).attrib.get("bandwidth"), -+ "60", -+ ) -+ self.assertNotEqual(setxml.find("./cputune/memorytune[@vcpus='3,4']"), None) -+ self.assertEqual( -+ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='0']").attrib.get( -+ "bandwidth" -+ ), -+ "50", -+ ) -+ self.assertEqual( -+ setxml.find("./cputune/memorytune[@vcpus='3,4']/node[@id='1']").attrib.get( -+ "bandwidth" -+ ), -+ "70", -+ ) -+ -+ # Update disks case -+ devattach_mock = MagicMock(return_value=0) -+ devdetach_mock = MagicMock(return_value=0) -+ domain_mock.attachDevice = devattach_mock -+ domain_mock.detachDevice = devdetach_mock -+ mock_chmod = MagicMock() -+ mock_run = MagicMock() -+ with patch.dict( -+ os.__dict__, {"chmod": mock_chmod, "makedirs": MagicMock()} -+ ): # pylint: disable=no-member -+ with patch.dict( -+ virt.__salt__, {"cmd.run": mock_run} -+ ): # pylint: disable=no-member -+ ret = virt.update( -+ "my_vm", -+ disk_profile="default", -+ disks=[ -+ { -+ "name": "cddrive", -+ "device": "cdrom", -+ "source_file": None, -+ "model": "ide", -+ }, -+ {"name": "added", "size": 2048, "iothreads": True}, -+ ], -+ ) -+ added_disk_path = os.path.join( -+ virt.__salt__["config.get"]("virt:images"), "my_vm_added.qcow2" -+ ) # pylint: disable=no-member -+ self.assertEqual( -+ mock_run.call_args[0][0], - 'qemu-img create -f qcow2 "{}" 2048M'.format(added_disk_path), - ) - self.assertEqual(mock_chmod.call_args[0][0], added_disk_path) -@@ -2170,6 +3192,11 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - self.assertEqual(devattach_mock.call_count, 2) - self.assertEqual(devdetach_mock.call_count, 2) - -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual( -+ "threads", setxml.find("devices/disk[3]/driver").get("io") -+ ) -+ - # Update nics case - yaml_config = """ - virt: -@@ -2244,6 +3271,19 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - setxml = ET.fromstring(define_mock.call_args[0][0]) - self.assertEqual("vnc", setxml.find("devices/graphics").get("type")) - -+ # Serial and console test case -+ self.assertEqual( -+ { -+ "definition": False, -+ "disk": {"attached": [], "detached": [], "updated": []}, -+ "interface": {"attached": [], "detached": []}, -+ }, -+ virt.update("my_vm", serials=[{"type": "tcp"}], consoles=[{"type": "tcp"}]), -+ ) -+ setxml = ET.fromstring(define_mock.call_args[0][0]) -+ self.assertEqual(setxml.find("devices/serial").attrib["type"], "pty") -+ self.assertEqual(setxml.find("devices/console").attrib["type"], "pty") -+ - # Update with no diff case - pool_mock = MagicMock() - default_pool_desc = "" -@@ -2644,48 +3684,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): - /usr/share/old/OVMF_CODE.fd - /usr/share/old/OVMF_VARS.ms.fd - -- -- -- -- -- -- -- --
-- -- -- -- -- -- -- --
-- -- -- -- -- -- -- --
-- -- -- -- -- -- -- --
-- -- -- -- --