From ae1540a455fead7f80a43b6c59e8fd075bfb0800b5e408332e5b949ca0fff7f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Klaus=20K=C3=A4mpf?= Date: Fri, 4 Aug 2017 10:29:26 +0000 Subject: [PATCH] Accepting request 514025 from systemsmanagement:saltstack:testing - Bugfix: clean up `change` attribute from interface dict (upstream) Issue: https://github.com/saltstack/salt/issues/41461 PR: 1. https://github.com/saltstack/salt/pull/41487 2. https://github.com/saltstack/salt/pull/41533 Added: * clean-up-change-attribute-from-interface-dict.patch - Bugfix: orchestrate and batches returns false failed information https://github.com/saltstack/salt/issues/40635 - speed-up cherrypy by removing sleep call - wrong os_family grains on SUSE - fix unittests (bsc#1038855) - fix setting the language on SUSE systems (bsc#1038855) - Bugfix: unable to use hostname for minion ID as '127' (upstream) - Bugfix: remove sleep call in CheppryPy API handler (upstream) - Fix core grains constants for timezone (bsc#1032931) - Added: * bugfix-unable-to-use-127-as-hostname.patch * fix-grain-for-os_family-on-suse-series.patch * fix-os_family-case-in-unittest.patch * fix-setting-language-on-suse-systems.patch * fixed-issue-with-parsing-of-master-minion-returns-wh.patch * rest_cherrypy-remove-sleep-call.patch * use-correct-grain-constants-for-timezone.patch - Update to 2016.11.4 See https://docs.saltstack.com/en/develop/topics/releases/2016.11.4.html for full changelog - Changed: * add-options-for-dockerng.patch * fix-regression-in-file.get_managed-add-unit-tests.patch OBS-URL: https://build.opensuse.org/request/show/514025 OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=89 --- add-options-for-dockerng.patch | 36 +- ...r-skip-false-values-from-preferred_i.patch | 917 ++++++++++++++++ ...or-installing-patches-in-yum-dnf-exe.patch | 991 ++++++++++++++++++ bugfix-unable-to-use-127-as-hostname.patch | 77 ++ ...change-attribute-from-interface-dict.patch | 84 ++ fix-case-in-os_family-for-suse.patch | 26 - fix-grain-for-os_family-on-suse-series.patch | 132 +++ fix-os_family-case-in-unittest.patch | 26 + ...n-in-file.get_managed-add-unit-tests.patch | 187 +--- fix-setting-language-on-suse-systems.patch | 121 +++ ...-parsing-of-master-minion-returns-wh.patch | 41 + ...eacons.list-integration-test-failure.patch | 26 + rest_cherrypy-remove-sleep-call.patch | 33 + run-salt-master-as-dedicated-salt-user.patch | 21 +- salt-2016.11.3.tar.gz | 3 - salt-2016.11.4.tar.gz | 3 + salt.changes | 167 +++ salt.spec | 113 +- ...e-cache_dir-because-storage-paths-ch.patch | 185 ++++ ...l-salt-minion.service-file-for-rhel7.patch | 34 + ...correct-grain-constants-for-timezone.patch | 530 ++++++++++ 21 files changed, 3464 insertions(+), 289 deletions(-) create mode 100644 add-unit-test-for-skip-false-values-from-preferred_i.patch create mode 100644 adding-support-for-installing-patches-in-yum-dnf-exe.patch create mode 100644 bugfix-unable-to-use-127-as-hostname.patch create mode 100644 clean-up-change-attribute-from-interface-dict.patch delete mode 100644 fix-case-in-os_family-for-suse.patch create mode 100644 fix-grain-for-os_family-on-suse-series.patch create mode 100644 fix-os_family-case-in-unittest.patch create mode 100644 fix-setting-language-on-suse-systems.patch create mode 100644 fixed-issue-with-parsing-of-master-minion-returns-wh.patch create mode 100644 fixing-beacons.list-integration-test-failure.patch create mode 100644 rest_cherrypy-remove-sleep-call.patch delete mode 100644 salt-2016.11.3.tar.gz create mode 100644 salt-2016.11.4.tar.gz create mode 100644 search-the-entire-cache_dir-because-storage-paths-ch.patch create mode 100644 special-salt-minion.service-file-for-rhel7.patch create mode 100644 use-correct-grain-constants-for-timezone.patch diff --git a/add-options-for-dockerng.patch b/add-options-for-dockerng.patch index 02bd0c8..6aedd7f 100644 --- a/add-options-for-dockerng.patch +++ b/add-options-for-dockerng.patch @@ -1,4 +1,4 @@ -From 95d547d3e6c98f927eaad77346b84b2ccef95627 Mon Sep 17 00:00:00 2001 +From c1a54f79fa0c35536e420eda1e429723c532c891 Mon Sep 17 00:00:00 2001 From: Michael Calmer Date: Thu, 19 Jan 2017 15:39:10 +0100 Subject: [PATCH] add options for dockerng @@ -17,7 +17,7 @@ added unit test for dockerng.sls_build dryrun option 2 files changed, 122 insertions(+), 11 deletions(-) diff --git a/salt/modules/dockerng.py b/salt/modules/dockerng.py -index 29af703ffa..c199fede1e 100644 +index 09d80bdd3f..8366e5e8ba 100644 --- a/salt/modules/dockerng.py +++ b/salt/modules/dockerng.py @@ -28,13 +28,13 @@ to replace references to ``dockerng`` with ``docker``. @@ -38,7 +38,7 @@ index 29af703ffa..c199fede1e 100644 .. _docker-py: https://pypi.python.org/pypi/docker-py .. _Docker: https://www.docker.com/ -@@ -332,8 +332,8 @@ __func_alias__ = { +@@ -268,8 +268,8 @@ __func_alias__ = { } # Minimum supported versions @@ -49,7 +49,7 @@ index 29af703ffa..c199fede1e 100644 VERSION_RE = r'([\d.]+)' -@@ -3557,7 +3557,8 @@ def build(path=None, +@@ -3479,7 +3479,8 @@ def build(path=None, rm=True, api_response=False, fileobj=None, @@ -59,7 +59,7 @@ index 29af703ffa..c199fede1e 100644 ''' Builds a docker image from a Dockerfile or a URL -@@ -3591,6 +3592,10 @@ def build(path=None, +@@ -3513,6 +3514,10 @@ def build(path=None, .. versionadded:: develop @@ -70,7 +70,7 @@ index 29af703ffa..c199fede1e 100644 **RETURN DATA** A dictionary containing one or more of the following keys: -@@ -3637,7 +3642,8 @@ def build(path=None, +@@ -3559,7 +3564,8 @@ def build(path=None, fileobj=fileobj, rm=rm, nocache=not cache, @@ -80,7 +80,7 @@ index 29af703ffa..c199fede1e 100644 ret = {'Time_Elapsed': time.time() - time_started} _clear_context() -@@ -5755,7 +5761,9 @@ def call(name, function, *args, **kwargs): +@@ -5657,7 +5663,9 @@ def call(name, function, *args, **kwargs): raise CommandExecutionError('Missing function parameter') # move salt into the container @@ -91,18 +91,18 @@ index 29af703ffa..c199fede1e 100644 with io.open(thin_path, 'rb') as file: _client_wrapper('put_archive', name, thin_dest_path, file) try: -@@ -5865,7 +5873,7 @@ def sls(name, mods=None, saltenv='base', **kwargs): +@@ -5774,7 +5782,7 @@ def sls(name, mods=None, saltenv='base', **kwargs): def sls_build(name, base='opensuse/python', mods=None, saltenv='base', - **kwargs): + dryrun=False, **kwargs): ''' - Build a docker image using the specified sls modules and base image. + Build a Docker image using the specified SLS modules on top of base image -@@ -5873,6 +5881,24 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base', - can build a docker image inside myminion that results of applying those - states by doing: +@@ -5796,6 +5804,24 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base', + Specify the environment from which to retrieve the SLS indicated by the + `mods` parameter. + base + the base image @@ -125,7 +125,7 @@ index 29af703ffa..c199fede1e 100644 CLI Example: .. code-block:: bash -@@ -5905,9 +5931,12 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base', +@@ -5822,11 +5848,14 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base', # Now execute the state into the container ret = __salt__['dockerng.sls'](id_, mods, saltenv, **kwargs) # fail if the state was not successful @@ -139,11 +139,13 @@ index 29af703ffa..c199fede1e 100644 + __salt__['dockerng.rm'](id_) + return ret return __salt__['dockerng.commit'](id_, name) + + diff --git a/tests/unit/modules/dockerng_test.py b/tests/unit/modules/dockerng_test.py -index ec4901e275..546635577a 100644 +index f213ef0d1f..36478cc4c6 100644 --- a/tests/unit/modules/dockerng_test.py +++ b/tests/unit/modules/dockerng_test.py -@@ -680,6 +680,84 @@ class DockerngTestCase(TestCase): +@@ -755,6 +755,84 @@ class DockerngTestCase(TestCase): self.assertEqual( {'Id': 'ID2', 'Image': 'foo', 'Time_Elapsed': 42}, ret) @@ -228,7 +230,7 @@ index ec4901e275..546635577a 100644 def test_call_success(self): ''' test module calling inside containers -@@ -694,6 +772,9 @@ class DockerngTestCase(TestCase): +@@ -769,6 +847,9 @@ class DockerngTestCase(TestCase): return_value={ 'retcode': 0 }) @@ -238,7 +240,7 @@ index ec4901e275..546635577a 100644 client = Mock() client.put_archive = Mock() -@@ -704,6 +785,7 @@ class DockerngTestCase(TestCase): +@@ -779,6 +860,7 @@ class DockerngTestCase(TestCase): dockerng_mod.__salt__, { 'dockerng.run_all': docker_run_all_mock, 'dockerng.copy_to': docker_copy_to_mock, diff --git a/add-unit-test-for-skip-false-values-from-preferred_i.patch b/add-unit-test-for-skip-false-values-from-preferred_i.patch new file mode 100644 index 0000000..8078861 --- /dev/null +++ b/add-unit-test-for-skip-false-values-from-preferred_i.patch @@ -0,0 +1,917 @@ +From a983f9342c6917eaa1aba63cd5ceebd9271f43d5 Mon Sep 17 00:00:00 2001 +From: Bo Maryniuk +Date: Thu, 20 Apr 2017 14:03:30 +0200 +Subject: [PATCH] Add unit test for skip false values from preferred_ip + +- Add fake preferred IP function for testing +- Add initial unit test for openstack cloud module +- Move out nested function to be unit-testable +- Lintfix +- Add unit test for nova connector +- Move out nested function for testing purposes +- Fix name error exception +- Skip test, if libcloud is not around +- Add unit test for node ip filtering +- Lintfix E0602 +- Fix UT parameter changes +- Fix lint, typos and readability +- PEP8: fix unused variable +- Reformat idents, fix typos +- Describe debug information +--- + salt/cloud/clouds/dimensiondata.py | 116 +++++----- + salt/cloud/clouds/nova.py | 295 ++++++++++++-------------- + salt/cloud/clouds/openstack.py | 229 ++++++++++---------- + tests/unit/cloud/clouds/__init__.py | 17 ++ + tests/unit/cloud/clouds/dimensiondata_test.py | 28 ++- + tests/unit/cloud/clouds/nova_test.py | 43 ++++ + tests/unit/cloud/clouds/openstack_test.py | 43 ++++ + 7 files changed, 441 insertions(+), 330 deletions(-) + create mode 100644 tests/unit/cloud/clouds/nova_test.py + create mode 100644 tests/unit/cloud/clouds/openstack_test.py + +diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py +index e4af241867..d8478436b8 100644 +--- a/salt/cloud/clouds/dimensiondata.py ++++ b/salt/cloud/clouds/dimensiondata.py +@@ -131,6 +131,60 @@ def get_dependencies(): + ) + + ++def _query_node_data(vm_, data): ++ running = False ++ try: ++ node = show_instance(vm_['name'], 'action') ++ running = (node['state'] == NodeState.RUNNING) ++ log.debug('Loaded node data for %s:\nname: %s\nstate: %s', ++ vm_['name'], pprint.pformat(node['name']), node['state']) ++ except Exception as err: ++ log.error( ++ 'Failed to get nodes list: %s', err, ++ # Show the traceback if the debug logging level is enabled ++ exc_info_on_loglevel=logging.DEBUG ++ ) ++ # Trigger a failure in the wait for IP function ++ return running ++ ++ if not running: ++ # Still not running, trigger another iteration ++ return ++ ++ private = node['private_ips'] ++ public = node['public_ips'] ++ ++ if private and not public: ++ log.warning('Private IPs returned, but not public. Checking for misidentified IPs.') ++ for private_ip in private: ++ private_ip = preferred_ip(vm_, [private_ip]) ++ if private_ip is False: ++ continue ++ if salt.utils.cloud.is_public_ip(private_ip): ++ log.warning('%s is a public IP', private_ip) ++ data.public_ips.append(private_ip) ++ else: ++ log.warning('%s is a private IP', private_ip) ++ if private_ip not in data.private_ips: ++ data.private_ips.append(private_ip) ++ ++ if ssh_interface(vm_) == 'private_ips' and data.private_ips: ++ return data ++ ++ if private: ++ data.private_ips = private ++ if ssh_interface(vm_) == 'private_ips': ++ return data ++ ++ if public: ++ data.public_ips = public ++ if ssh_interface(vm_) != 'private_ips': ++ return data ++ ++ log.debug('Contents of the node data:') ++ log.debug(data) ++ ++ + def create(vm_): + ''' + Create a single VM from a data dict +@@ -197,69 +251,9 @@ def create(vm_): + ) + return False + +- def __query_node_data(vm_, data): +- running = False +- try: +- node = show_instance(vm_['name'], 'action') +- running = (node['state'] == NodeState.RUNNING) +- log.debug( +- 'Loaded node data for %s:\nname: %s\nstate: %s', +- vm_['name'], +- pprint.pformat(node['name']), +- node['state'] +- ) +- except Exception as err: +- log.error( +- 'Failed to get nodes list: %s', err, +- # Show the traceback if the debug logging level is enabled +- exc_info_on_loglevel=logging.DEBUG +- ) +- # Trigger a failure in the wait for IP function +- return False +- +- if not running: +- # Still not running, trigger another iteration +- return +- +- private = node['private_ips'] +- public = node['public_ips'] +- +- if private and not public: +- log.warning( +- 'Private IPs returned, but not public... Checking for ' +- 'misidentified IPs' +- ) +- for private_ip in private: +- private_ip = preferred_ip(vm_, [private_ip]) +- if private_ip is False: +- continue +- if salt.utils.cloud.is_public_ip(private_ip): +- log.warning('%s is a public IP', private_ip) +- data.public_ips.append(private_ip) +- else: +- log.warning('%s is a private IP', private_ip) +- if private_ip not in data.private_ips: +- data.private_ips.append(private_ip) +- +- if ssh_interface(vm_) == 'private_ips' and data.private_ips: +- return data +- +- if private: +- data.private_ips = private +- if ssh_interface(vm_) == 'private_ips': +- return data +- +- if public: +- data.public_ips = public +- if ssh_interface(vm_) != 'private_ips': +- return data +- +- log.debug('DATA') +- log.debug(data) +- + try: + data = salt.utils.cloud.wait_for_ip( +- __query_node_data, ++ _query_node_data, + update_args=(vm_, data), + timeout=config.get_cloud_config_value( + 'wait_for_ip_timeout', vm_, __opts__, default=25 * 60), +diff --git a/salt/cloud/clouds/nova.py b/salt/cloud/clouds/nova.py +index ed9251d4b1..d2cbf7387a 100644 +--- a/salt/cloud/clouds/nova.py ++++ b/salt/cloud/clouds/nova.py +@@ -722,6 +722,145 @@ def request_instance(vm_=None, call=None): + return data, vm_ + + ++def _query_node_data(vm_, data, conn): ++ try: ++ node = show_instance(vm_['name'], 'action') ++ log.debug('Loaded node data for {0}:' ++ '\n{1}'.format(vm_['name'], pprint.pformat(node))) ++ except Exception as err: ++ # Show the traceback if the debug logging level is enabled ++ log.error('Failed to get nodes list: {0}'.format(err), ++ exc_info_on_loglevel=logging.DEBUG) ++ # Trigger a failure in the wait for IP function ++ return False ++ ++ running = node['state'] == 'ACTIVE' ++ if not running: ++ # Still not running, trigger another iteration ++ return ++ ++ if rackconnect(vm_) is True: ++ extra = node.get('extra', {}) ++ rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') ++ if rc_status != 'DEPLOYED': ++ log.debug('Waiting for Rackconnect automation to complete') ++ return ++ ++ if managedcloud(vm_) is True: ++ extra = conn.server_show_libcloud(node['id']).extra ++ mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') ++ ++ if mc_status != 'Complete': ++ log.debug('Waiting for managed cloud automation to complete') ++ return ++ ++ access_ip = node.get('extra', {}).get('access_ip', '') ++ ++ rcv3 = rackconnectv3(vm_) in node['addresses'] ++ sshif = ssh_interface(vm_) in node['addresses'] ++ ++ if any((rcv3, sshif)): ++ networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) ++ for network in node['addresses'].get(networkname, []): ++ if network['version'] is 4: ++ access_ip = network['addr'] ++ break ++ vm_['cloudnetwork'] = True ++ ++ # Conditions to pass this ++ # ++ # Rackconnect v2: vm_['rackconnect'] = True ++ # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. ++ # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the ++ # server. In this case we can use the private_ips for ssh_interface, or the access_ip. ++ # ++ # Rackconnect v3: vm['rackconnectv3'] = ++ # If this is the case, salt will need to use the cloud network to login to the server. There ++ # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud ++ # also cannot use the private_ips, because that traffic is dropped at the hypervisor. ++ # ++ # CloudNetwork: vm['cloudnetwork'] = True ++ # If this is True, then we should have an access_ip at this point set to the ip on the cloud ++ # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will ++ # use the initial access_ip, and not overwrite anything. ++ ++ if (any((cloudnetwork(vm_), rackconnect(vm_))) ++ and (ssh_interface(vm_) != 'private_ips' or rcv3) ++ and access_ip != ''): ++ data.public_ips = [access_ip] ++ return data ++ ++ result = [] ++ ++ if ('private_ips' not in node ++ and 'public_ips' not in node ++ and 'floating_ips' not in node ++ and 'fixed_ips' not in node ++ and 'access_ip' in node.get('extra', {})): ++ result = [node['extra']['access_ip']] ++ ++ private = node.get('private_ips', []) ++ public = node.get('public_ips', []) ++ fixed = node.get('fixed_ips', []) ++ floating = node.get('floating_ips', []) ++ ++ if private and not public: ++ log.warning('Private IPs returned, but not public. ' ++ 'Checking for misidentified IPs') ++ for private_ip in private: ++ private_ip = preferred_ip(vm_, [private_ip]) ++ if private_ip is False: ++ continue ++ if salt.utils.cloud.is_public_ip(private_ip): ++ log.warning('{0} is a public IP'.format(private_ip)) ++ data.public_ips.append(private_ip) ++ log.warning('Public IP address was not ready when we last checked. ' ++ 'Appending public IP address now.') ++ public = data.public_ips ++ else: ++ log.warning('{0} is a private IP'.format(private_ip)) ++ ignore_ip = ignore_cidr(vm_, private_ip) ++ if private_ip not in data.private_ips and not ignore_ip: ++ result.append(private_ip) ++ ++ # populate return data with private_ips ++ # when ssh_interface is set to private_ips and public_ips exist ++ if not result and ssh_interface(vm_) == 'private_ips': ++ for private_ip in private: ++ ignore_ip = ignore_cidr(vm_, private_ip) ++ if private_ip not in data.private_ips and not ignore_ip: ++ result.append(private_ip) ++ ++ non_private_ips = [] ++ ++ if public: ++ data.public_ips = public ++ if ssh_interface(vm_) == 'public_ips': ++ non_private_ips.append(public) ++ ++ if floating: ++ data.floating_ips = floating ++ if ssh_interface(vm_) == 'floating_ips': ++ non_private_ips.append(floating) ++ ++ if fixed: ++ data.fixed_ips = fixed ++ if ssh_interface(vm_) == 'fixed_ips': ++ non_private_ips.append(fixed) ++ ++ if non_private_ips: ++ log.debug('result = {0}'.format(non_private_ips)) ++ data.private_ips = result ++ if ssh_interface(vm_) != 'private_ips': ++ return data ++ ++ if result: ++ log.debug('result = {0}'.format(result)) ++ data.private_ips = result ++ if ssh_interface(vm_) == 'private_ips': ++ return data ++ ++ + def create(vm_): + ''' + Create a single VM from a data dict +@@ -792,162 +931,10 @@ def create(vm_): + # Pull the instance ID, valid for both spot and normal instances + vm_['instance_id'] = data.id + +- def __query_node_data(vm_, data): +- try: +- node = show_instance(vm_['name'], 'action') +- log.debug( +- 'Loaded node data for {0}:\n{1}'.format( +- vm_['name'], +- pprint.pformat(node) +- ) +- ) +- except Exception as err: +- log.error( +- 'Failed to get nodes list: {0}'.format( +- err +- ), +- # Show the traceback if the debug logging level is enabled +- exc_info_on_loglevel=logging.DEBUG +- ) +- # Trigger a failure in the wait for IP function +- return False +- +- running = node['state'] == 'ACTIVE' +- if not running: +- # Still not running, trigger another iteration +- return +- +- if rackconnect(vm_) is True: +- extra = node.get('extra', {}) +- rc_status = extra.get('metadata', {}).get( +- 'rackconnect_automation_status', '') +- if rc_status != 'DEPLOYED': +- log.debug('Waiting for Rackconnect automation to complete') +- return +- +- if managedcloud(vm_) is True: +- extra = conn.server_show_libcloud( +- node['id'] +- ).extra +- mc_status = extra.get('metadata', {}).get( +- 'rax_service_level_automation', '') +- +- if mc_status != 'Complete': +- log.debug('Waiting for managed cloud automation to complete') +- return +- +- access_ip = node.get('extra', {}).get('access_ip', '') +- +- rcv3 = rackconnectv3(vm_) in node['addresses'] +- sshif = ssh_interface(vm_) in node['addresses'] +- +- if any((rcv3, sshif)): +- networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) +- for network in node['addresses'].get(networkname, []): +- if network['version'] is 4: +- access_ip = network['addr'] +- break +- vm_['cloudnetwork'] = True +- +- # Conditions to pass this +- # +- # Rackconnect v2: vm_['rackconnect'] = True +- # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. +- # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the +- # server. In this case we can use the private_ips for ssh_interface, or the access_ip. +- # +- # Rackconnect v3: vm['rackconnectv3'] = +- # If this is the case, salt will need to use the cloud network to login to the server. There +- # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud +- # also cannot use the private_ips, because that traffic is dropped at the hypervisor. +- # +- # CloudNetwork: vm['cloudnetwork'] = True +- # If this is True, then we should have an access_ip at this point set to the ip on the cloud +- # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will +- # use the initial access_ip, and not overwrite anything. +- +- if any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != '': +- data.public_ips = [access_ip, ] +- return data +- +- result = [] +- +- if 'private_ips' not in node and 'public_ips' not in node and \ +- 'floating_ips' not in node and 'fixed_ips' not in node and \ +- 'access_ip' in node.get('extra', {}): +- result = [node['extra']['access_ip']] +- +- private = node.get('private_ips', []) +- public = node.get('public_ips', []) +- fixed = node.get('fixed_ips', []) +- floating = node.get('floating_ips', []) +- +- if private and not public: +- log.warning( +- 'Private IPs returned, but not public... Checking for ' +- 'misidentified IPs' +- ) +- for private_ip in private: +- private_ip = preferred_ip(vm_, [private_ip]) +- if private_ip is False: +- continue +- if salt.utils.cloud.is_public_ip(private_ip): +- log.warning('{0} is a public IP'.format(private_ip)) +- data.public_ips.append(private_ip) +- log.warning( +- ( +- 'Public IP address was not ready when we last' +- ' checked. Appending public IP address now.' +- ) +- ) +- public = data.public_ips +- else: +- log.warning('{0} is a private IP'.format(private_ip)) +- ignore_ip = ignore_cidr(vm_, private_ip) +- if private_ip not in data.private_ips and not ignore_ip: +- result.append(private_ip) +- +- # populate return data with private_ips +- # when ssh_interface is set to private_ips and public_ips exist +- if not result and ssh_interface(vm_) == 'private_ips': +- for private_ip in private: +- ignore_ip = ignore_cidr(vm_, private_ip) +- if private_ip not in data.private_ips and not ignore_ip: +- result.append(private_ip) +- +- non_private_ips = [] +- +- if public: +- data.public_ips = public +- if ssh_interface(vm_) == 'public_ips': +- non_private_ips.append(public) +- +- if floating: +- data.floating_ips = floating +- if ssh_interface(vm_) == 'floating_ips': +- non_private_ips.append(floating) +- +- if fixed: +- data.fixed_ips = fixed +- if ssh_interface(vm_) == 'fixed_ips': +- non_private_ips.append(fixed) +- +- if non_private_ips: +- log.debug('result = {0}'.format(non_private_ips)) +- data.private_ips = result +- if ssh_interface(vm_) != 'private_ips': +- return data +- +- if result: +- log.debug('result = {0}'.format(result)) +- data.private_ips = result +- if ssh_interface(vm_) == 'private_ips': +- return data +- + try: + data = salt.utils.cloud.wait_for_ip( +- __query_node_data, +- update_args=(vm_, data), ++ _query_node_data, ++ update_args=(vm_, data, conn), + timeout=config.get_cloud_config_value( + 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), + interval=config.get_cloud_config_value( +diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py +index cc936509c7..c8ad91ff23 100644 +--- a/salt/cloud/clouds/openstack.py ++++ b/salt/cloud/clouds/openstack.py +@@ -585,6 +585,119 @@ def request_instance(vm_=None, call=None): + return data, vm_ + + ++def _query_node_data(vm_, data, floating, conn): ++ try: ++ node = show_instance(vm_['name'], 'action') ++ log.debug( ++ 'Loaded node data for {0}:\n{1}'.format( ++ vm_['name'], ++ pprint.pformat(node) ++ ) ++ ) ++ except Exception as err: ++ log.error( ++ 'Failed to get nodes list: {0}'.format( ++ err ++ ), ++ # Show the traceback if the debug logging level is enabled ++ exc_info_on_loglevel=logging.DEBUG ++ ) ++ # Trigger a failure in the wait for IP function ++ return False ++ ++ running = node['state'] == NodeState.RUNNING ++ if not running: ++ # Still not running, trigger another iteration ++ return ++ ++ if rackconnect(vm_) is True: ++ check_libcloud_version((0, 14, 0), why='rackconnect: True') ++ extra = node.get('extra') ++ rc_status = extra.get('metadata', {}).get( ++ 'rackconnect_automation_status', '') ++ access_ip = extra.get('access_ip', '') ++ ++ if rc_status != 'DEPLOYED': ++ log.debug('Waiting for Rackconnect automation to complete') ++ return ++ ++ if managedcloud(vm_) is True: ++ extra = node.get('extra') ++ mc_status = extra.get('metadata', {}).get( ++ 'rax_service_level_automation', '') ++ ++ if mc_status != 'Complete': ++ log.debug('Waiting for managed cloud automation to complete') ++ return ++ ++ public = node['public_ips'] ++ if floating: ++ try: ++ name = data.name ++ ip = floating[0].ip_address ++ conn.ex_attach_floating_ip_to_node(data, ip) ++ log.info( ++ 'Attaching floating IP \'{0}\' to node \'{1}\''.format( ++ ip, name ++ ) ++ ) ++ data.public_ips.append(ip) ++ public = data.public_ips ++ except Exception: ++ # Note(pabelanger): Because we loop, we only want to attach the ++ # floating IP address one. So, expect failures if the IP is ++ # already attached. ++ pass ++ ++ result = [] ++ private = node['private_ips'] ++ if private and not public: ++ log.warning( ++ 'Private IPs returned, but not public... Checking for ' ++ 'misidentified IPs' ++ ) ++ for private_ip in private: ++ private_ip = preferred_ip(vm_, [private_ip]) ++ if private_ip is False: ++ continue ++ if salt.utils.cloud.is_public_ip(private_ip): ++ log.warning('{0} is a public IP'.format(private_ip)) ++ data.public_ips.append(private_ip) ++ log.warning( ++ 'Public IP address was not ready when we last checked.' ++ ' Appending public IP address now.' ++ ) ++ public = data.public_ips ++ else: ++ log.warning('{0} is a private IP'.format(private_ip)) ++ ignore_ip = ignore_cidr(vm_, private_ip) ++ if private_ip not in data.private_ips and not ignore_ip: ++ result.append(private_ip) ++ ++ if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ++ data.public_ips = access_ip ++ return data ++ ++ # populate return data with private_ips ++ # when ssh_interface is set to private_ips and public_ips exist ++ if not result and ssh_interface(vm_) == 'private_ips': ++ for private_ip in private: ++ ignore_ip = ignore_cidr(vm_, private_ip) ++ if private_ip not in data.private_ips and not ignore_ip: ++ result.append(private_ip) ++ ++ if result: ++ log.debug('result = {0}'.format(result)) ++ data.private_ips = result ++ if ssh_interface(vm_) == 'private_ips': ++ return data ++ ++ if public: ++ data.public_ips = public ++ if ssh_interface(vm_) != 'private_ips': ++ return data ++ ++ + def create(vm_): + ''' + Create a single VM from a data dict +@@ -659,122 +772,10 @@ def create(vm_): + # Pull the instance ID, valid for both spot and normal instances + vm_['instance_id'] = data.id + +- def __query_node_data(vm_, data, floating): +- try: +- node = show_instance(vm_['name'], 'action') +- log.debug( +- 'Loaded node data for {0}:\n{1}'.format( +- vm_['name'], +- pprint.pformat(node) +- ) +- ) +- except Exception as err: +- log.error( +- 'Failed to get nodes list: {0}'.format( +- err +- ), +- # Show the traceback if the debug logging level is enabled +- exc_info_on_loglevel=logging.DEBUG +- ) +- # Trigger a failure in the wait for IP function +- return False +- +- running = node['state'] == NodeState.RUNNING +- if not running: +- # Still not running, trigger another iteration +- return +- +- if rackconnect(vm_) is True: +- check_libcloud_version((0, 14, 0), why='rackconnect: True') +- extra = node.get('extra') +- rc_status = extra.get('metadata', {}).get( +- 'rackconnect_automation_status', '') +- access_ip = extra.get('access_ip', '') +- +- if rc_status != 'DEPLOYED': +- log.debug('Waiting for Rackconnect automation to complete') +- return +- +- if managedcloud(vm_) is True: +- extra = node.get('extra') +- mc_status = extra.get('metadata', {}).get( +- 'rax_service_level_automation', '') +- +- if mc_status != 'Complete': +- log.debug('Waiting for managed cloud automation to complete') +- return +- +- public = node['public_ips'] +- if floating: +- try: +- name = data.name +- ip = floating[0].ip_address +- conn.ex_attach_floating_ip_to_node(data, ip) +- log.info( +- 'Attaching floating IP \'{0}\' to node \'{1}\''.format( +- ip, name +- ) +- ) +- data.public_ips.append(ip) +- public = data.public_ips +- except Exception: +- # Note(pabelanger): Because we loop, we only want to attach the +- # floating IP address one. So, expect failures if the IP is +- # already attached. +- pass +- +- result = [] +- private = node['private_ips'] +- if private and not public: +- log.warning( +- 'Private IPs returned, but not public... Checking for ' +- 'misidentified IPs' +- ) +- for private_ip in private: +- private_ip = preferred_ip(vm_, [private_ip]) +- if private_ip is False: +- continue +- if salt.utils.cloud.is_public_ip(private_ip): +- log.warning('{0} is a public IP'.format(private_ip)) +- data.public_ips.append(private_ip) +- log.warning( +- 'Public IP address was not ready when we last checked.' +- ' Appending public IP address now.' +- ) +- public = data.public_ips +- else: +- log.warning('{0} is a private IP'.format(private_ip)) +- ignore_ip = ignore_cidr(vm_, private_ip) +- if private_ip not in data.private_ips and not ignore_ip: +- result.append(private_ip) +- +- if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': +- data.public_ips = access_ip +- return data +- +- # populate return data with private_ips +- # when ssh_interface is set to private_ips and public_ips exist +- if not result and ssh_interface(vm_) == 'private_ips': +- for private_ip in private: +- ignore_ip = ignore_cidr(vm_, private_ip) +- if private_ip not in data.private_ips and not ignore_ip: +- result.append(private_ip) +- +- if result: +- log.debug('result = {0}'.format(result)) +- data.private_ips = result +- if ssh_interface(vm_) == 'private_ips': +- return data +- +- if public: +- data.public_ips = public +- if ssh_interface(vm_) != 'private_ips': +- return data +- + try: + data = salt.utils.cloud.wait_for_ip( +- __query_node_data, +- update_args=(vm_, data, vm_['floating']), ++ _query_node_data, ++ update_args=(vm_, data, vm_['floating'], conn), + timeout=config.get_cloud_config_value( + 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), + interval=config.get_cloud_config_value( +diff --git a/tests/unit/cloud/clouds/__init__.py b/tests/unit/cloud/clouds/__init__.py +index 40a96afc6f..15d1e2c5c6 100644 +--- a/tests/unit/cloud/clouds/__init__.py ++++ b/tests/unit/cloud/clouds/__init__.py +@@ -1 +1,18 @@ + # -*- coding: utf-8 -*- ++ ++ ++def _preferred_ip(ip_set, preferred=None): ++ ''' ++ Returns a function that reacts which ip is prefered ++ :param ip_set: ++ :param private: ++ :return: ++ ''' ++ ++ def _ip_decider(vm, ips): ++ for ip in ips: ++ if ip in preferred: ++ return ip ++ return False ++ ++ return _ip_decider +diff --git a/tests/unit/cloud/clouds/dimensiondata_test.py b/tests/unit/cloud/clouds/dimensiondata_test.py +index b4ea7f57f5..9f92fd7dbe 100644 +--- a/tests/unit/cloud/clouds/dimensiondata_test.py ++++ b/tests/unit/cloud/clouds/dimensiondata_test.py +@@ -25,6 +25,7 @@ from salt.exceptions import SaltCloudSystemExit + from salttesting import TestCase, skipIf + from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch + from salttesting.helpers import ensure_in_syspath ++from tests.unit.cloud.clouds import _preferred_ip + + ensure_in_syspath('../../../') + +@@ -48,7 +49,7 @@ VM_NAME = 'winterfell' + try: + import certifi + libcloud.security.CA_CERTS_PATH.append(certifi.where()) +-except ImportError: ++except (ImportError, NameError): + pass + + +@@ -129,6 +130,7 @@ class DimensionDataTestCase(ExtendedTestCase): + call='function' + ) + ++ @skipIf(HAS_LIBCLOUD is False, "Install 'libcloud' to be able to run this unit test.") + def test_avail_sizes(self): + ''' + Tests that avail_sizes returns an empty dictionary. +@@ -160,6 +162,30 @@ class DimensionDataTestCase(ExtendedTestCase): + p = dimensiondata.get_configured_provider() + self.assertNotEqual(p, None) + ++ PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2'] ++ ++ @patch('salt.cloud.clouds.dimensiondata.show_instance', ++ MagicMock(return_value={'state': True, ++ 'name': 'foo', ++ 'public_ips': [], ++ 'private_ips': PRIVATE_IPS})) ++ @patch('salt.cloud.clouds.dimensiondata.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0'])) ++ @patch('salt.cloud.clouds.dimensiondata.ssh_interface', MagicMock(return_value='private_ips')) ++ def test_query_node_data_filter_preferred_ip_addresses(self): ++ ''' ++ Test if query node data is filtering out unpreferred IP addresses. ++ ''' ++ dimensiondata.NodeState = MagicMock() ++ dimensiondata.NodeState.RUNNING = True ++ dimensiondata.__opts__ = {} ++ ++ vm = {'name': None} ++ data = MagicMock() ++ data.public_ips = [] ++ ++ assert dimensiondata._query_node_data(vm, data).public_ips == ['0.0.0.0'] ++ ++ + if __name__ == '__main__': + from integration import run_tests + run_tests(DimensionDataTestCase, needs_daemon=False) +diff --git a/tests/unit/cloud/clouds/nova_test.py b/tests/unit/cloud/clouds/nova_test.py +new file mode 100644 +index 0000000000..c44c0bd507 +--- /dev/null ++++ b/tests/unit/cloud/clouds/nova_test.py +@@ -0,0 +1,43 @@ ++# -*- coding: utf-8 -*- ++''' ++ :codeauthor: :email:`Bo Maryniuk ` ++''' ++ ++# Import Python libs ++from __future__ import absolute_import ++ ++# Import Salt Testing Libs ++from salttesting import TestCase ++from salt.cloud.clouds import nova ++from salttesting.mock import MagicMock, patch ++from tests.unit.cloud.clouds import _preferred_ip ++ ++ ++class NovaTestCase(TestCase): ++ ''' ++ Test case for openstack ++ ''' ++ PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2'] ++ ++ @patch('salt.cloud.clouds.nova.show_instance', ++ MagicMock(return_value={'state': 'ACTIVE', ++ 'public_ips': [], ++ 'addresses': [], ++ 'private_ips': PRIVATE_IPS})) ++ @patch('salt.cloud.clouds.nova.rackconnect', MagicMock(return_value=False)) ++ @patch('salt.cloud.clouds.nova.rackconnectv3', MagicMock(return_value={'mynet': ['1.1.1.1']})) ++ @patch('salt.cloud.clouds.nova.cloudnetwork', MagicMock(return_value=False)) ++ @patch('salt.cloud.clouds.nova.managedcloud', MagicMock(return_value=False)) ++ @patch('salt.cloud.clouds.nova.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0'])) ++ @patch('salt.cloud.clouds.nova.ssh_interface', MagicMock(return_value='public_ips')) ++ def test_query_node_data_filter_preferred_ip_addresses(self): ++ ''' ++ Test if query node data is filtering out unpreferred IP addresses. ++ ''' ++ nova.__opts__ = {} ++ ++ vm = {'name': None} ++ data = MagicMock() ++ data.public_ips = [] ++ ++ assert nova._query_node_data(vm, data, MagicMock()).public_ips == ['0.0.0.0'] +diff --git a/tests/unit/cloud/clouds/openstack_test.py b/tests/unit/cloud/clouds/openstack_test.py +new file mode 100644 +index 0000000000..9e70e3874a +--- /dev/null ++++ b/tests/unit/cloud/clouds/openstack_test.py +@@ -0,0 +1,43 @@ ++# -*- coding: utf-8 -*- ++''' ++ :codeauthor: :email:`Bo Maryniuk ` ++''' ++ ++# Import Python libs ++from __future__ import absolute_import ++ ++# Import Salt Testing Libs ++from salttesting import TestCase ++from salt.cloud.clouds import openstack ++from salttesting.mock import MagicMock, patch ++from tests.unit.cloud.clouds import _preferred_ip ++ ++ ++class OpenstackTestCase(TestCase): ++ ''' ++ Test case for openstack ++ ''' ++ PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2'] ++ ++ @patch('salt.cloud.clouds.openstack.show_instance', ++ MagicMock(return_value={'state': True, ++ 'public_ips': [], ++ 'private_ips': PRIVATE_IPS})) ++ @patch('salt.cloud.clouds.openstack.rackconnect', MagicMock(return_value=False)) ++ @patch('salt.cloud.clouds.openstack.managedcloud', MagicMock(return_value=False)) ++ @patch('salt.cloud.clouds.openstack.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0'])) ++ @patch('salt.cloud.clouds.openstack.ssh_interface', MagicMock(return_value=False)) ++ def test_query_node_data_filter_preferred_ip_addresses(self): ++ ''' ++ Test if query node data is filtering out unpreferred IP addresses. ++ ''' ++ openstack.NodeState = MagicMock() ++ openstack.NodeState.RUNNING = True ++ openstack.__opts__ = {} ++ ++ vm = {'name': None} ++ data = MagicMock() ++ data.public_ips = [] ++ ++ with patch('salt.utils.cloud.is_public_ip', MagicMock(return_value=True)): ++ assert openstack._query_node_data(vm, data, False, MagicMock()).public_ips == ['0.0.0.0'] +-- +2.11.0 + + diff --git a/adding-support-for-installing-patches-in-yum-dnf-exe.patch b/adding-support-for-installing-patches-in-yum-dnf-exe.patch new file mode 100644 index 0000000..9a3295e --- /dev/null +++ b/adding-support-for-installing-patches-in-yum-dnf-exe.patch @@ -0,0 +1,991 @@ +From 55ac73ad201d8a23ddc8e44b5310343e99562610 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= + +Date: Tue, 21 Mar 2017 11:10:06 +0000 +Subject: [PATCH] Adding support for installing patches in yum/dnf + execution module + +Adding support for installing patches in Zypper module + +Adding list_downloaded function to Zypper module + +Adding list_downloaded function to Yum module + +Adding new pkg.downloaded state + +Adding documentation for pkg.downloaded state + +Adding pkg.patched and pkg.patch_downloaded states + +Check targets for advisory patches installation + +Adds support for listing advisory patches with Zypper + +Adds support for listing advisory patches with Yum + +Improving function naming + +Moving advisory ids checks into pkg_resource.parse_targets + +Fixes _find_download_targets to call _preflight_check + +Fixes parse_targets when advisory id is passed as name + +Pylint fixes + +Enables pkg.downloaded to verify packages after package manager call. + +Adding missing kwargs parameters to pkg.install call + +Adding versionadded flags + +Some refactoring and minor fixes + +Adding unit tests for Zypper execution module + +Adding more unit tests for Zypper module + +Pylint fix +--- + salt/modules/pkg_resource.py | 9 + + salt/modules/yumpkg.py | 108 +++++++- + salt/modules/zypper.py | 91 ++++++- + salt/states/pkg.py | 420 +++++++++++++++++++++++++++++ + tests/unit/modules/zypp/zypper-patches.xml | 10 + + tests/unit/modules/zypper_test.py | 119 ++++++++ + 6 files changed, 751 insertions(+), 6 deletions(-) + create mode 100644 tests/unit/modules/zypp/zypper-patches.xml + +diff --git a/salt/modules/pkg_resource.py b/salt/modules/pkg_resource.py +index 1df9307..ad0f0ff 100644 +--- a/salt/modules/pkg_resource.py ++++ b/salt/modules/pkg_resource.py +@@ -119,6 +119,15 @@ def parse_targets(name=None, + log.error('Only one of "pkgs" and "sources" can be used.') + return None, None + ++ elif 'advisory_ids' in kwargs: ++ if pkgs: ++ log.error('Cannot use "advisory_ids" and "pkgs" at the same time') ++ return None, None ++ elif kwargs['advisory_ids']: ++ return kwargs['advisory_ids'], 'advisory' ++ else: ++ return [name], 'advisory' ++ + elif pkgs: + pkgs = _repack_pkgs(pkgs, normalize=normalize) + if not pkgs: +diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py +index d67bbe0..f6777d7 100644 +--- a/salt/modules/yumpkg.py ++++ b/salt/modules/yumpkg.py +@@ -19,6 +19,7 @@ from __future__ import absolute_import + import contextlib + import copy + import fnmatch ++import glob + import itertools + import logging + import os +@@ -800,6 +801,27 @@ def list_upgrades(refresh=True, **kwargs): + list_updates = salt.utils.alias_function(list_upgrades, 'list_updates') + + ++def list_downloaded(): ++ ''' ++ .. versionadded:: Oxygen ++ ++ List prefetched packages downloaded by Yum in the local disk. ++ ++ CLI example: ++ ++ .. code-block:: bash ++ ++ salt '*' pkg.list_downloaded ++ ''' ++ CACHE_DIR = os.path.join('/var/cache/', _yum()) ++ ++ ret = {} ++ for package_path in glob.glob(os.path.join(CACHE_DIR, '*/*/*/packages/*.rpm')): ++ pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path) ++ ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = package_path ++ return ret ++ ++ + def info_installed(*names): + ''' + .. versionadded:: 2015.8.1 +@@ -1082,10 +1104,10 @@ def install(name=None, + log.warning('"version" parameter will be ignored for multiple ' + 'package targets') + +- old = list_pkgs(versions_as_list=False) ++ old = list_pkgs(versions_as_list=False) if not downloadonly else list_downloaded() + # Use of __context__ means no duplicate work here, just accessing + # information already in __context__ from the previous call to list_pkgs() +- old_as_list = list_pkgs(versions_as_list=True) ++ old_as_list = list_pkgs(versions_as_list=True) if not downloadonly else list_downloaded() + + to_install = [] + to_downgrade = [] +@@ -1108,6 +1130,16 @@ def install(name=None, + + if pkg_type == 'repository': + pkg_params_items = six.iteritems(pkg_params) ++ elif pkg_type == 'advisory': ++ pkg_params_items = [] ++ cur_patches = list_patches() ++ for advisory_id in pkg_params: ++ if advisory_id not in cur_patches: ++ raise CommandExecutionError( ++ 'Advisory id "{0}" not found'.format(advisory_id) ++ ) ++ else: ++ pkg_params_items.append(advisory_id) + else: + pkg_params_items = [] + for pkg_source in pkg_params: +@@ -1131,6 +1163,9 @@ def install(name=None, + for pkg_item_list in pkg_params_items: + if pkg_type == 'repository': + pkgname, version_num = pkg_item_list ++ elif pkg_type == 'advisory': ++ pkgname = pkg_item_list ++ version_num = None + else: + try: + pkgname, pkgpath, version_num = pkg_item_list +@@ -1145,6 +1180,8 @@ def install(name=None, + to_reinstall.append((pkgname, pkgname)) + else: + to_install.append((pkgname, pkgname)) ++ elif pkg_type == 'advisory': ++ to_install.append((pkgname, pkgname)) + else: + to_install.append((pkgname, pkgpath)) + else: +@@ -1291,6 +1328,8 @@ def install(name=None, + targets = [] + with _temporarily_unhold(to_install, targets): + if targets: ++ if pkg_type == 'advisory': ++ targets = ["--advisory={0}".format(t) for t in targets] + cmd = [] + if salt.utils.systemd.has_scope(__context__) \ + and __salt__['config.get']('systemd.scope', True): +@@ -1299,7 +1338,7 @@ def install(name=None, + if _yum() == 'dnf': + cmd.extend(['--best', '--allowerasing']) + _add_common_args(cmd) +- cmd.append('install') ++ cmd.append('install' if pkg_type is not 'advisory' else 'update') + cmd.extend(targets) + out = __salt__['cmd.run_all']( + cmd, +@@ -1351,7 +1390,7 @@ def install(name=None, + errors.append(out['stdout']) + + __context__.pop('pkg.list_pkgs', None) +- new = list_pkgs(versions_as_list=False) ++ new = list_pkgs(versions_as_list=False) if not downloadonly else list_downloaded() + + ret = salt.utils.compare_dicts(old, new) + +@@ -2757,3 +2796,64 @@ def diff(*paths): + local_pkgs[pkg]['path'], path) or 'Unchanged' + + return ret ++ ++ ++def _get_patches(installed_only=False): ++ ''' ++ List all known patches in repos. ++ ''' ++ patches = {} ++ ++ cmd = [_yum(), '--quiet', 'updateinfo', 'list', 'security', 'all'] ++ ret = __salt__['cmd.run_stdout']( ++ cmd, ++ python_shell=False ++ ) ++ for line in salt.utils.itertools.split(ret, os.linesep): ++ inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)', ++ line).groups() ++ if inst != 'i' and installed_only: ++ continue ++ patches[advisory_id] = { ++ 'installed': True if inst == 'i' else False, ++ 'summary': pkg ++ } ++ return patches ++ ++ ++def list_patches(refresh=False): ++ ''' ++ .. versionadded:: Oxygen ++ ++ List all known advisory patches from available repos. ++ ++ refresh ++ force a refresh if set to True. ++ If set to False (default) it depends on yum if a refresh is ++ executed. ++ ++ CLI Examples: ++ ++ .. code-block:: bash ++ ++ salt '*' pkg.list_patches ++ ''' ++ if refresh: ++ refresh_db() ++ ++ return _get_patches() ++ ++ ++def list_installed_patches(): ++ ''' ++ .. versionadded:: Oxygen ++ ++ List installed advisory patches on the system. ++ ++ CLI Examples: ++ ++ .. code-block:: bash ++ ++ salt '*' pkg.list_installed_patches ++ ''' ++ return _get_patches(installed_only=True) +diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py +index 75e529c..28087f5 100644 +--- a/salt/modules/zypper.py ++++ b/salt/modules/zypper.py +@@ -15,6 +15,7 @@ Package support for openSUSE via the zypper package manager + # Import python libs + from __future__ import absolute_import + import copy ++import glob + import logging + import re + import os +@@ -1029,10 +1030,18 @@ def install(name=None, + for problem in problems: + log.error(problem) + return {} ++ elif pkg_type == 'advisory': ++ targets = [] ++ cur_patches = list_patches() ++ for advisory_id in pkg_params: ++ if advisory_id not in cur_patches: ++ raise CommandExecutionError('Advisory id "{0}" not found'.format(advisory_id)) ++ else: ++ targets.append(advisory_id) + else: + targets = pkg_params + +- old = list_pkgs() ++ old = list_pkgs() if not downloadonly else list_downloaded() + downgrades = [] + if fromrepo: + fromrepoopt = ['--force', '--force-resolution', '--from', fromrepo] +@@ -1050,6 +1059,8 @@ def install(name=None, + cmd_install.extend(fromrepoopt) + + errors = [] ++ if pkg_type == 'advisory': ++ targets = ["patch:{0}".format(t) for t in targets] + + # Split the targets into batches of 500 packages each, so that + # the maximal length of the command line is not broken +@@ -1068,7 +1079,7 @@ def install(name=None, + __zypper__(no_repo_failure=ignore_repo_failure).call(*cmd) + + __context__.pop('pkg.list_pkgs', None) +- new = list_pkgs() ++ new = list_pkgs() if not downloadonly else list_downloaded() + ret = salt.utils.compare_dicts(old, new) + + if errors: +@@ -1771,6 +1782,28 @@ def download(*packages, **kwargs): + ) + + ++def list_downloaded(): ++ ''' ++ .. versionadded:: Oxygen ++ ++ List prefetched packages downloaded by Zypper in the local disk. ++ ++ CLI example: ++ ++ .. code-block:: bash ++ ++ salt '*' pkg.list_downloaded ++ ''' ++ CACHE_DIR = '/var/cache/zypp/packages/' ++ ++ ret = {} ++ # Zypper storage is repository_tag/arch/package-version.rpm ++ for package_path in glob.glob(os.path.join(CACHE_DIR, '*/*/*.rpm')): ++ pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path) ++ ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = package_path ++ return ret ++ ++ + def diff(*paths): + ''' + Return a formatted diff between current files and original in a package. +@@ -1808,3 +1841,57 @@ def diff(*paths): + ) or 'Unchanged' + + return ret ++ ++ ++def _get_patches(installed_only=False): ++ ''' ++ List all known patches in repos. ++ ''' ++ patches = {} ++ for element in __zypper__.nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'): ++ installed = element.getAttribute('status') == 'installed' ++ if (installed_only and installed) or not installed_only: ++ patches[element.getAttribute('name')] = { ++ 'installed': installed, ++ 'summary': element.getAttribute('summary'), ++ } ++ ++ return patches ++ ++ ++def list_patches(refresh=False): ++ ''' ++ .. versionadded:: Oxygen ++ ++ List all known advisory patches from available repos. ++ ++ refresh ++ force a refresh if set to True. ++ If set to False (default) it depends on zypper if a refresh is ++ executed. ++ ++ CLI Examples: ++ ++ .. code-block:: bash ++ ++ salt '*' pkg.list_patches ++ ''' ++ if refresh: ++ refresh_db() ++ ++ return _get_patches() ++ ++ ++def list_installed_patches(): ++ ''' ++ .. versionadded:: Oxygen ++ ++ List installed advisory patches on the system. ++ ++ CLI Examples: ++ ++ .. code-block:: bash ++ ++ salt '*' pkg.list_installed_patches ++ ''' ++ return _get_patches(installed_only=True) +diff --git a/salt/states/pkg.py b/salt/states/pkg.py +index 7c3b27b..d185002 100644 +--- a/salt/states/pkg.py ++++ b/salt/states/pkg.py +@@ -213,6 +213,171 @@ def _find_unpurge_targets(desired): + ] + + ++def _find_download_targets(name=None, ++ version=None, ++ pkgs=None, ++ normalize=True, ++ skip_suggestions=False, ++ ignore_epoch=False, ++ **kwargs): ++ ''' ++ Inspect the arguments to pkg.downloaded and discover what packages need to ++ be downloaded. Return a dict of packages to download. ++ ''' ++ cur_pkgs = __salt__['pkg.list_downloaded']() ++ if pkgs: ++ to_download = _repack_pkgs(pkgs, normalize=normalize) ++ ++ if not to_download: ++ # Badly-formatted SLS ++ return {'name': name, ++ 'changes': {}, ++ 'result': False, ++ 'comment': 'Invalidly formatted pkgs parameter. See ' ++ 'minion log.'} ++ else: ++ if normalize: ++ _normalize_name = \ ++ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) ++ to_download = {_normalize_name(name): version} ++ else: ++ to_download = {name: version} ++ ++ cver = cur_pkgs.get(name, {}) ++ if name in to_download: ++ # Package already downloaded, no need to download again ++ if cver and version in cver: ++ return {'name': name, ++ 'changes': {}, ++ 'result': True, ++ 'comment': 'Version {0} of package \'{1}\' is already ' ++ 'downloaded'.format(version, name)} ++ ++ # if cver is not an empty string, the package is already downloaded ++ elif cver and version is None: ++ # The package is downloaded ++ return {'name': name, ++ 'changes': {}, ++ 'result': True, ++ 'comment': 'Package {0} is already ' ++ 'downloaded'.format(name)} ++ ++ version_spec = False ++ if not skip_suggestions: ++ try: ++ problems = _preflight_check(to_download, **kwargs) ++ except CommandExecutionError: ++ pass ++ else: ++ comments = [] ++ if problems.get('no_suggest'): ++ comments.append( ++ 'The following package(s) were not found, and no ' ++ 'possible matches were found in the package db: ' ++ '{0}'.format( ++ ', '.join(sorted(problems['no_suggest'])) ++ ) ++ ) ++ if problems.get('suggest'): ++ for pkgname, suggestions in \ ++ six.iteritems(problems['suggest']): ++ comments.append( ++ 'Package \'{0}\' not found (possible matches: ' ++ '{1})'.format(pkgname, ', '.join(suggestions)) ++ ) ++ if comments: ++ if len(comments) > 1: ++ comments.append('') ++ return {'name': name, ++ 'changes': {}, ++ 'result': False, ++ 'comment': '. '.join(comments).rstrip()} ++ ++ # Find out which packages will be targeted in the call to pkg.download ++ # Check current downloaded versions against specified versions ++ targets = {} ++ problems = [] ++ for pkgname, pkgver in six.iteritems(to_download): ++ cver = cur_pkgs.get(pkgname, {}) ++ # Package not yet downloaded, so add to targets ++ if not cver: ++ targets[pkgname] = pkgver ++ continue ++ # No version specified but package is already downloaded ++ elif cver and not pkgver: ++ continue ++ ++ version_spec = True ++ try: ++ oper, verstr = _get_comparison_spec(pkgver) ++ except CommandExecutionError as exc: ++ problems.append(exc.strerror) ++ continue ++ ++ if not _fulfills_version_spec(cver.keys(), oper, verstr, ++ ignore_epoch=ignore_epoch): ++ targets[pkgname] = pkgver ++ ++ if problems: ++ return {'name': name, ++ 'changes': {}, ++ 'result': False, ++ 'comment': ' '.join(problems)} ++ ++ if not targets: ++ # All specified packages are already downloaded ++ msg = ( ++ 'All specified packages{0} are already downloaded' ++ .format(' (matching specified versions)' if version_spec else '') ++ ) ++ return {'name': name, ++ 'changes': {}, ++ 'result': True, ++ 'comment': msg} ++ ++ return targets ++ ++ ++def _find_advisory_targets(name=None, ++ advisory_ids=None, ++ **kwargs): ++ ''' ++ Inspect the arguments to pkg.patch_installed and discover what advisory ++ patches need to be installed. Return a dict of advisory patches to install. ++ ''' ++ cur_patches = __salt__['pkg.list_installed_patches']() ++ if advisory_ids: ++ to_download = advisory_ids ++ else: ++ to_download = [name] ++ if cur_patches.get(name, {}): ++ # Advisory patch already installed, no need to install it again ++ return {'name': name, ++ 'changes': {}, ++ 'result': True, ++ 'comment': 'Advisory patch {0} is already ' ++ 'installed'.format(name)} ++ ++ # Find out which advisory patches will be targeted in the call to pkg.install ++ targets = [] ++ for patch_name in to_download: ++ cver = cur_patches.get(patch_name, {}) ++ # Advisory patch not yet installed, so add to targets ++ if not cver: ++ targets.append(patch_name) ++ continue ++ ++ if not targets: ++ # All specified packages are already downloaded ++ msg = ('All specified advisory patches are already installed') ++ return {'name': name, ++ 'changes': {}, ++ 'result': True, ++ 'comment': msg} ++ ++ return targets ++ ++ + def _find_remove_targets(name=None, + version=None, + pkgs=None, +@@ -1700,6 +1865,261 @@ def installed( + return ret + + ++def downloaded(name, ++ version=None, ++ pkgs=None, ++ fromrepo=None, ++ ignore_epoch=None, ++ **kwargs): ++ ''' ++ .. versionadded:: Oxygen ++ ++ Ensure that the package is downloaded, and that it is the correct version ++ (if specified). ++ ++ Currently supported for the following pkg providers: ++ :mod:`yumpkg ` and :mod:`zypper ` ++ ++ :param str name: ++ The name of the package to be downloaded. This parameter is ignored if ++ either "pkgs" is used. Additionally, please note that this option can ++ only be used to download packages from a software repository. ++ ++ :param str version: ++ Download a specific version of a package. ++ ++ .. important:: ++ As of version 2015.8.7, for distros which use yum/dnf, packages ++ which have a version with a nonzero epoch (that is, versions which ++ start with a number followed by a colon must have the epoch included ++ when specifying the version number. For example: ++ ++ .. code-block:: yaml ++ ++ vim-enhanced: ++ pkg.downloaded: ++ - version: 2:7.4.160-1.el7 ++ ++ An **ignore_epoch** argument has been added to which causes the ++ epoch to be disregarded when the state checks to see if the desired ++ version was installed. ++ ++ You can install a specific version when using the ``pkgs`` argument by ++ including the version after the package: ++ ++ .. code-block:: yaml ++ ++ common_packages: ++ pkg.downloaded: ++ - pkgs: ++ - unzip ++ - dos2unix ++ - salt-minion: 2015.8.5-1.el6 ++ ++ CLI Example: ++ ++ .. code-block:: yaml ++ ++ zsh: ++ pkg.downloaded: ++ - version: 5.0.5-4.63 ++ - fromrepo: "myrepository" ++ ''' ++ ret = {'name': name, ++ 'changes': {}, ++ 'result': None, ++ 'comment': ''} ++ ++ if 'pkg.list_downloaded' not in __salt__: ++ ret['result'] = False ++ ret['comment'] = 'The pkg.downloaded state is not available on ' \ ++ 'this platform' ++ return ret ++ ++ if isinstance(pkgs, list) and len(pkgs) == 0: ++ ret['result'] = True ++ ret['comment'] = 'No packages to download provided' ++ return ret ++ ++ # It doesn't make sense here to received 'downloadonly' as kwargs ++ # as we're explicitely passing 'downloadonly=True' to execution module. ++ if 'downloadonly' in kwargs: ++ del kwargs['downloadonly'] ++ ++ # Only downloading not yet downloaded packages ++ targets = _find_download_targets(name, ++ version, ++ pkgs, ++ fromrepo=fromrepo, ++ ignore_epoch=ignore_epoch, ++ **kwargs) ++ if isinstance(targets, dict) and 'result' in targets: ++ return targets ++ elif not isinstance(targets, dict): ++ ret['result'] = False ++ ret['comment'] = 'An error was encountered while checking targets: ' \ ++ '{0}'.format(targets) ++ return ret ++ ++ if __opts__['test']: ++ summary = ', '.join(targets) ++ ret['comment'] = 'The following packages would be ' \ ++ 'downloaded: {0}'.format(summary) ++ return ret ++ ++ try: ++ pkg_ret = __salt__['pkg.install'](name=name, ++ pkgs=pkgs, ++ version=version, ++ downloadonly=True, ++ fromrepo=fromrepo, ++ ignore_epoch=ignore_epoch, ++ **kwargs) ++ ret['result'] = True ++ ret['changes'].update(pkg_ret) ++ except CommandExecutionError as exc: ++ ret = {'name': name, 'result': False} ++ if exc.info: ++ # Get information for state return from the exception. ++ ret['changes'] = exc.info.get('changes', {}) ++ ret['comment'] = exc.strerror_without_changes ++ else: ++ ret['changes'] = {} ++ ret['comment'] = 'An error was encountered while downloading ' \ ++ 'package(s): {0}'.format(exc) ++ return ret ++ ++ new_pkgs = __salt__['pkg.list_downloaded']() ++ ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) ++ ++ if failed: ++ summary = ', '.join([_get_desired_pkg(x, targets) ++ for x in failed]) ++ ret['result'] = False ++ ret['comment'] = 'The following packages failed to ' \ ++ 'download: {0}'.format(summary) ++ ++ if not ret['changes'] and not ret['comment']: ++ ret['result'] = True ++ ret['comment'] = 'Packages are already downloaded: ' \ ++ '{0}'.format(', '.join(targets)) ++ ++ return ret ++ ++ ++def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ++ ''' ++ .. versionadded:: Oxygen ++ ++ Ensure that packages related to certain advisory ids are installed. ++ ++ Currently supported for the following pkg providers: ++ :mod:`yumpkg ` and :mod:`zypper ` ++ ++ CLI Example: ++ ++ .. code-block:: yaml ++ ++ issue-foo-fixed: ++ pkg.patch_installed: ++ - advisory_ids: ++ - SUSE-SLE-SERVER-12-SP2-2017-185 ++ - SUSE-SLE-SERVER-12-SP2-2017-150 ++ - SUSE-SLE-SERVER-12-SP2-2017-120 ++ ''' ++ ret = {'name': name, ++ 'changes': {}, ++ 'result': None, ++ 'comment': ''} ++ ++ if 'pkg.list_patches' not in __salt__: ++ ret['result'] = False ++ ret['comment'] = 'The pkg.patch_installed state is not available on ' \ ++ 'this platform' ++ return ret ++ ++ if isinstance(advisory_ids, list) and len(advisory_ids) == 0: ++ ret['result'] = True ++ ret['comment'] = 'No advisory ids provided' ++ return ret ++ ++ # Only downloading not yet downloaded packages ++ targets = _find_advisory_targets(name, advisory_ids, **kwargs) ++ if isinstance(targets, dict) and 'result' in targets: ++ return targets ++ elif not isinstance(targets, list): ++ ret['result'] = False ++ ret['comment'] = 'An error was encountered while checking targets: ' \ ++ '{0}'.format(targets) ++ return ret ++ ++ if __opts__['test']: ++ summary = ', '.join(targets) ++ ret['comment'] = 'The following advisory patches would be ' \ ++ 'downloaded: {0}'.format(summary) ++ return ret ++ ++ try: ++ pkg_ret = __salt__['pkg.install'](name=name, ++ advisory_ids=advisory_ids, ++ downloadonly=downloadonly, ++ **kwargs) ++ ret['result'] = True ++ ret['changes'].update(pkg_ret) ++ except CommandExecutionError as exc: ++ ret = {'name': name, 'result': False} ++ if exc.info: ++ # Get information for state return from the exception. ++ ret['changes'] = exc.info.get('changes', {}) ++ ret['comment'] = exc.strerror_without_changes ++ else: ++ ret['changes'] = {} ++ ret['comment'] = ('An error was encountered while downloading ' ++ 'package(s): {0}'.format(exc)) ++ return ret ++ ++ if not ret['changes'] and not ret['comment']: ++ status = 'downloaded' if downloadonly else 'installed' ++ ret['result'] = True ++ ret['comment'] = 'Related packages are already {}'.format(status) ++ ++ return ret ++ ++ ++def patch_downloaded(name, advisory_ids=None, **kwargs): ++ ''' ++ .. versionadded:: Oxygen ++ ++ Ensure that packages related to certain advisory ids are downloaded. ++ ++ Currently supported for the following pkg providers: ++ :mod:`yumpkg ` and :mod:`zypper ` ++ ++ CLI Example: ++ ++ .. code-block:: yaml ++ ++ preparing-to-fix-issues: ++ pkg.patch_downloaded: ++ - advisory_ids: ++ - SUSE-SLE-SERVER-12-SP2-2017-185 ++ - SUSE-SLE-SERVER-12-SP2-2017-150 ++ - SUSE-SLE-SERVER-12-SP2-2017-120 ++ ''' ++ if 'pkg.list_patches' not in __salt__: ++ return {'name': name, ++ 'result': False, ++ 'changes': {}, ++ 'comment': 'The pkg.patch_downloaded state is not available on ' ++ 'this platform'} ++ ++ # It doesn't make sense here to received 'downloadonly' as kwargs ++ # as we're explicitely passing 'downloadonly=True' to execution module. ++ if 'downloadonly' in kwargs: ++ del kwargs['downloadonly'] ++ return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) ++ ++ + def latest( + name, + refresh=None, +diff --git a/tests/unit/modules/zypp/zypper-patches.xml b/tests/unit/modules/zypp/zypper-patches.xml +new file mode 100644 +index 0000000..2088634 +--- /dev/null ++++ b/tests/unit/modules/zypp/zypper-patches.xml +@@ -0,0 +1,10 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/tests/unit/modules/zypper_test.py b/tests/unit/modules/zypper_test.py +index c29d12c..39bd2e7 100644 +--- a/tests/unit/modules/zypper_test.py ++++ b/tests/unit/modules/zypper_test.py +@@ -462,6 +462,48 @@ Repository 'DUMMY' not found by its alias, number, or URI. + self.assertTrue(pkgs.get(pkg_name)) + self.assertEqual(pkgs[pkg_name], pkg_version) + ++ def test_list_patches(self): ++ ''' ++ Test advisory patches listing. ++ ++ :return: ++ ''' ++ ++ ref_out = { ++ 'stdout': get_test_data('zypper-patches.xml'), ++ 'stderr': None, ++ 'retcode': 0 ++ } ++ ++ PATCHES_RET = { ++ 'SUSE-SLE-SERVER-12-SP2-2017-97': {'installed': False, 'summary': 'Recommended update for ovmf'}, ++ 'SUSE-SLE-SERVER-12-SP2-2017-98': {'installed': True, 'summary': 'Recommended update for kmod'}, ++ 'SUSE-SLE-SERVER-12-SP2-2017-99': {'installed': False, 'summary': 'Security update for apache2'} ++ } ++ ++ with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=ref_out)}): ++ list_patches = zypper.list_patches(refresh=False) ++ self.assertEqual(len(list_patches), 3) ++ self.assertDictEqual(list_patches, PATCHES_RET) ++ ++ @patch('glob.glob', MagicMock(return_value=['/var/cache/zypper/packages/foo/bar/test_package.rpm'])) ++ def test_list_downloaded(self): ++ ''' ++ Test downloaded packages listing. ++ ++ :return: ++ ''' ++ DOWNLOADED_RET = { ++ 'test-package': { ++ '1.0': '/var/cache/zypper/packages/foo/bar/test_package.rpm' ++ } ++ } ++ ++ with patch.dict(zypper.__salt__, {'lowpkg.bin_pkg_info': MagicMock(return_value={'name': 'test-package', 'version': '1.0'})}): ++ list_downloaded = zypper.list_downloaded() ++ self.assertEqual(len(list_downloaded), 1) ++ self.assertDictEqual(list_downloaded, DOWNLOADED_RET) ++ + def test_download(self): + ''' + Test package download +@@ -487,6 +529,83 @@ Repository 'DUMMY' not found by its alias, number, or URI. + test_out['_error'] = "The following package(s) failed to download: foo" + self.assertEqual(zypper.download("nmap", "foo"), test_out) + ++ @patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False)) ++ @patch('salt.modules.zypper.list_downloaded', MagicMock(side_effect=[{}, {'vim': {'1.1': '/foo/bar/test.rpm'}}])) ++ def test_install_with_downloadonly(self): ++ ''' ++ Test a package installation with downloadonly=True. ++ ++ :return: ++ ''' ++ with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'vim': None}, 'repository'))}): ++ with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock: ++ ret = zypper.install(pkgs=['vim'], downloadonly=True) ++ zypper_mock.assert_called_once_with( ++ '--no-refresh', ++ 'install', ++ '--name', ++ '--auto-agree-with-licenses', ++ '--download-only', ++ 'vim' ++ ) ++ self.assertDictEqual(ret, {'vim': {'new': {'1.1': '/foo/bar/test.rpm'}, 'old': ''}}) ++ ++ @patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False)) ++ @patch('salt.modules.zypper.list_downloaded', MagicMock(return_value={'vim': {'1.1': '/foo/bar/test.rpm'}})) ++ def test_install_with_downloadonly_already_downloaded(self): ++ ''' ++ Test a package installation with downloadonly=True when package is already downloaded. ++ ++ :return: ++ ''' ++ with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'vim': None}, 'repository'))}): ++ with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock: ++ ret = zypper.install(pkgs=['vim'], downloadonly=True) ++ zypper_mock.assert_called_once_with( ++ '--no-refresh', ++ 'install', ++ '--name', ++ '--auto-agree-with-licenses', ++ '--download-only', ++ 'vim' ++ ) ++ self.assertDictEqual(ret, {}) ++ ++ @patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False)) ++ @patch('salt.modules.zypper._get_patches', MagicMock(return_value={'SUSE-PATCH-1234': {'installed': False, 'summary': 'test'}})) ++ @patch('salt.modules.zypper.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}])) ++ def test_install_advisory_patch_ok(self): ++ ''' ++ Test successfully advisory patch installation. ++ ++ :return: ++ ''' ++ with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'SUSE-PATCH-1234': None}, 'advisory'))}): ++ with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock: ++ ret = zypper.install(advisory_ids=['SUSE-PATCH-1234']) ++ zypper_mock.assert_called_once_with( ++ '--no-refresh', ++ 'install', ++ '--name', ++ '--auto-agree-with-licenses', ++ 'patch:SUSE-PATCH-1234' ++ ) ++ self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}}) ++ ++ @patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False)) ++ @patch('salt.modules.zypper._get_patches', MagicMock(return_value={'SUSE-PATCH-1234': {'installed': False, 'summary': 'test'}})) ++ @patch('salt.modules.zypper.list_pkgs', MagicMock(return_value={"vim": "1.1"})) ++ def test_install_advisory_patch_failure(self): ++ ''' ++ Test failing advisory patch installation because patch does not exist. ++ ++ :return: ++ ''' ++ with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'SUSE-PATCH-XXX': None}, 'advisory'))}): ++ with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock: ++ with self.assertRaisesRegex(CommandExecutionError, '^Advisory id "SUSE-PATCH-XXX" not found$'): ++ zypper.install(advisory_ids=['SUSE-PATCH-XXX']) ++ + def test_remove_purge(self): + ''' + Test package removal +-- +2.10.1 + + diff --git a/bugfix-unable-to-use-127-as-hostname.patch b/bugfix-unable-to-use-127-as-hostname.patch new file mode 100644 index 0000000..98fcc68 --- /dev/null +++ b/bugfix-unable-to-use-127-as-hostname.patch @@ -0,0 +1,77 @@ +From 257e7dc14458e879844ae6dda2337b3f7fba441c Mon Sep 17 00:00:00 2001 +From: Bo Maryniuk +Date: Tue, 16 May 2017 12:06:51 +0200 +Subject: [PATCH] Bugfix: unable to use 127 as hostname + +Unit test for accepting hosts names as 127 + +Harden to 127. IP part + +Add unit test for hostname can be started from 127 +--- + salt/utils/network.py | 4 ++-- + tests/unit/utils/network_test.py | 32 ++++++++++++++++++++++++++++++++ + 2 files changed, 34 insertions(+), 2 deletions(-) + +diff --git a/salt/utils/network.py b/salt/utils/network.py +index 8d2e9f5fb2..036c00d430 100644 +--- a/salt/utils/network.py ++++ b/salt/utils/network.py +@@ -95,8 +95,8 @@ def _generate_minion_id(): + Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. + Override 'filter()' for custom filtering. + ''' +- localhost_matchers = ['localhost.*', 'ip6-.*', '127.*', r'0\.0\.0\.0', +- '::1.*', 'ipv6-.*', 'fe00::.*', 'fe02::.*', '1.0.0.*.ip6.arpa'] ++ localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', ++ r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] + + def append(self, p_object): + if p_object and p_object not in self and not self.filter(p_object): +diff --git a/tests/unit/utils/network_test.py b/tests/unit/utils/network_test.py +index a13492f8f8..b7eea54eb1 100644 +--- a/tests/unit/utils/network_test.py ++++ b/tests/unit/utils/network_test.py +@@ -266,6 +266,38 @@ class NetworkTestCase(TestCase): + self.assertEqual(network._generate_minion_id(), + ['hostname.domainname.blank', 'nodename', 'hostname', '1.2.3.4', '5.6.7.8']) + ++ @patch('platform.node', MagicMock(return_value='127')) ++ @patch('socket.gethostname', MagicMock(return_value='127')) ++ @patch('socket.getfqdn', MagicMock(return_value='127.domainname.blank')) ++ @patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'attrname', ('127.0.1.1', 0))])) ++ @patch('salt.utils.fopen', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=False)) ++ @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8'])) ++ def test_generate_minion_id_127_name(self): ++ ''' ++ Test if minion IDs can be named 127.foo ++ ++ :return: ++ ''' ++ self.assertEqual(network._generate_minion_id(), ++ ['127.domainname.blank', '127', '1.2.3.4', '5.6.7.8']) ++ ++ @patch('platform.node', MagicMock(return_value='127890')) ++ @patch('socket.gethostname', MagicMock(return_value='127890')) ++ @patch('socket.getfqdn', MagicMock(return_value='127890.domainname.blank')) ++ @patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'attrname', ('127.0.1.1', 0))])) ++ @patch('salt.utils.fopen', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=False)) ++ @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8'])) ++ def test_generate_minion_id_127_name_startswith(self): ++ ''' ++ Test if minion IDs can be named starting from "127" ++ ++ :return: ++ ''' ++ self.assertEqual(network._generate_minion_id(), ++ ['127890.domainname.blank', '127890', '1.2.3.4', '5.6.7.8']) ++ + @patch('platform.node', MagicMock(return_value='hostname')) + @patch('socket.gethostname', MagicMock(return_value='hostname')) + @patch('socket.getfqdn', MagicMock(return_value='hostname')) +-- +2.13.0 + + diff --git a/clean-up-change-attribute-from-interface-dict.patch b/clean-up-change-attribute-from-interface-dict.patch new file mode 100644 index 0000000..d745ea2 --- /dev/null +++ b/clean-up-change-attribute-from-interface-dict.patch @@ -0,0 +1,84 @@ +From 58468a451d7d87450fbc36beb99dd39b10f06d61 Mon Sep 17 00:00:00 2001 +From: "Peter V. Saveliev" +Date: Mon, 29 May 2017 16:30:49 +0200 +Subject: [PATCH] clean up `change` attribute from interface dict + +The attribute is hidden in IPDB from the high-level logics since +pyroute2 version 0.4.2. + +Bug-Url: https://github.com/saltstack/salt/issues/41461 + +unit tests: add pyroute2 interface dict test + +Bug-Url: https://github.com/saltstack/salt/pull/41487 +Bug-Url: https://github.com/saltstack/salt/issues/41461 + +unit tests: fix absolute imports in test_pyroute2 + +Bug-Url: https://github.com/saltstack/salt/pull/41533 + +unit tests: add encoding clause into test_pyroute2 + +Bug-Url: https://github.com/saltstack/salt/pull/41533 + +unit tests: test_pyroute2 -- add skipIf + +... and comments + +Bug-Url: https://github.com/saltstack/salt/pull/41533 +--- + salt/beacons/network_settings.py | 2 +- + tests/unit/modules/test_pyroute2.py | 27 +++++++++++++++++++++++++++ + 2 files changed, 28 insertions(+), 1 deletion(-) + create mode 100644 tests/unit/modules/test_pyroute2.py + +diff --git a/salt/beacons/network_settings.py b/salt/beacons/network_settings.py +index 5af71a0804..78c387b2f2 100644 +--- a/salt/beacons/network_settings.py ++++ b/salt/beacons/network_settings.py +@@ -25,7 +25,7 @@ __virtual_name__ = 'network_settings' + ATTRS = ['family', 'txqlen', 'ipdb_scope', 'index', 'operstate', 'group', + 'carrier_changes', 'ipaddr', 'neighbours', 'ifname', 'promiscuity', + 'linkmode', 'broadcast', 'address', 'num_tx_queues', 'ipdb_priority', +- 'change', 'kind', 'qdisc', 'mtu', 'num_rx_queues', 'carrier', 'flags', ++ 'kind', 'qdisc', 'mtu', 'num_rx_queues', 'carrier', 'flags', + 'ifi_type', 'ports'] + + LAST_STATS = {} +diff --git a/tests/unit/modules/test_pyroute2.py b/tests/unit/modules/test_pyroute2.py +new file mode 100644 +index 0000000000..a4ccce74e8 +--- /dev/null ++++ b/tests/unit/modules/test_pyroute2.py +@@ -0,0 +1,27 @@ ++# -*- coding: UTF-8 -*- ++ ++from __future__ import absolute_import ++ ++from tests.support.unit import TestCase ++from tests.support.unit import skipIf ++from salt.beacons.network_settings import ATTRS ++try: ++ from pyroute2 import IPDB ++ HAS_PYROUTE2 = True ++except ImportError: ++ HAS_PYROUTE2 = False ++ ++ ++@skipIf(not HAS_PYROUTE2, 'no pyroute2 installed, skipping') ++class Pyroute2TestCase(TestCase): ++ ++ def test_interface_dict_fields(self): ++ with IPDB() as ipdb: ++ for attr in ATTRS: ++ # ipdb.interfaces is a dict-like object, that ++ # contains interface definitions. Interfaces can ++ # be referenced both with indices and names. ++ # ++ # ipdb.interfaces[1] is an interface with index 1, ++ # that is the loopback interface. ++ self.assertIn(attr, ipdb.interfaces[1]) +-- +2.13.0 + + diff --git a/fix-case-in-os_family-for-suse.patch b/fix-case-in-os_family-for-suse.patch deleted file mode 100644 index bced534..0000000 --- a/fix-case-in-os_family-for-suse.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 036be5f7300bbf6c5ef3967b5cc935fd678cd1e1 Mon Sep 17 00:00:00 2001 -From: Michael Calmer -Date: Thu, 23 Feb 2017 12:01:05 +0100 -Subject: [PATCH] fix case in os_family for Suse - ---- - salt/modules/service.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/salt/modules/service.py b/salt/modules/service.py -index bb7133ee99..49186e4c9d 100644 ---- a/salt/modules/service.py -+++ b/salt/modules/service.py -@@ -53,7 +53,7 @@ def __virtual__(): - if __grains__['kernel'] != 'Linux': - return (False, 'Non Linux OSes are not supported') - # SUSE >=12.0 uses systemd -- if __grains__.get('os_family', '') == 'SUSE': -+ if __grains__.get('os_family', '') == 'Suse': - try: - # osrelease might be in decimal format (e.g. "12.1"), or for - # SLES might include service pack (e.g. "11 SP3"), so split on --- -2.11.0 - - diff --git a/fix-grain-for-os_family-on-suse-series.patch b/fix-grain-for-os_family-on-suse-series.patch new file mode 100644 index 0000000..58f285a --- /dev/null +++ b/fix-grain-for-os_family-on-suse-series.patch @@ -0,0 +1,132 @@ +From 067ef07513d86093fd5373ac62a4d5eb39bcc5b4 Mon Sep 17 00:00:00 2001 +From: Bo Maryniuk +Date: Tue, 16 May 2017 14:42:07 +0200 +Subject: [PATCH] Fix grain for os_family on SUSE series + +--- + doc/topics/spm/spm_formula.rst | 2 +- + salt/modules/apache.py | 2 +- + salt/modules/inspectlib/collector.py | 6 +++--- + salt/modules/iptables.py | 2 +- + salt/modules/localemod.py | 6 +++--- + tests/integration/modules/pkg.py | 2 +- + 6 files changed, 10 insertions(+), 10 deletions(-) + +diff --git a/doc/topics/spm/spm_formula.rst b/doc/topics/spm/spm_formula.rst +index 2493527a22..aa53098e2e 100644 +--- a/doc/topics/spm/spm_formula.rst ++++ b/doc/topics/spm/spm_formula.rst +@@ -11,7 +11,7 @@ describes the package. An example of this file is: + + name: apache + os: RedHat, Debian, Ubuntu, SUSE, FreeBSD +- os_family: RedHat, Debian, SUSE, FreeBSD ++ os_family: RedHat, Debian, Suse, FreeBSD + version: 201506 + release: 2 + summary: Formula for installing Apache +diff --git a/salt/modules/apache.py b/salt/modules/apache.py +index ad502df530..5d2261175a 100644 +--- a/salt/modules/apache.py ++++ b/salt/modules/apache.py +@@ -52,7 +52,7 @@ def _detect_os(): + os_family = __grains__['os_family'] + if os_family == 'RedHat': + return 'apachectl' +- elif os_family == 'Debian' or os_family == 'SUSE': ++ elif os_family == 'Debian' or os_family == 'Suse': + return 'apache2ctl' + else: + return 'apachectl' +diff --git a/salt/modules/inspectlib/collector.py b/salt/modules/inspectlib/collector.py +index 332c6efdec..b87a46b82f 100644 +--- a/salt/modules/inspectlib/collector.py ++++ b/salt/modules/inspectlib/collector.py +@@ -87,7 +87,7 @@ class Inspector(EnvLoader): + ''' + if self.grains_core.os_data().get('os_family') == 'Debian': + return self.__get_cfg_pkgs_dpkg() +- elif self.grains_core.os_data().get('os_family') in ['SUSE', 'redhat']: ++ elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']: + return self.__get_cfg_pkgs_rpm() + else: + return dict() +@@ -163,7 +163,7 @@ class Inspector(EnvLoader): + if self.grains_core.os_data().get('os_family') == 'Debian': + cfg_data = salt.utils.to_str(self._syscall("dpkg", None, None, '--verify', + pkg_name)[0]).split(os.linesep) +- elif self.grains_core.os_data().get('os_family') in ['SUSE', 'redhat']: ++ elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']: + cfg_data = salt.utils.to_str(self._syscall("rpm", None, None, '-V', '--nodeps', '--nodigest', + '--nosignature', '--nomtime', '--nolinkto', + pkg_name)[0]).split(os.linesep) +@@ -240,7 +240,7 @@ class Inspector(EnvLoader): + ''' + if self.grains_core.os_data().get('os_family') == 'Debian': + return self.__get_managed_files_dpkg() +- elif self.grains_core.os_data().get('os_family') in ['SUSE', 'redhat']: ++ elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']: + return self.__get_managed_files_rpm() + + return list(), list(), list() +diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py +index 322553d285..b1823e891a 100644 +--- a/salt/modules/iptables.py ++++ b/salt/modules/iptables.py +@@ -80,7 +80,7 @@ def _conf(family='ipv4'): + return '/var/lib/ip6tables/rules-save' + else: + return '/var/lib/iptables/rules-save' +- elif __grains__['os_family'] == 'SUSE': ++ elif __grains__['os_family'] == 'Suse': + # SuSE does not seem to use separate files for IPv4 and IPv6 + return '/etc/sysconfig/scripts/SuSEfirewall2-custom' + else: +diff --git a/salt/modules/localemod.py b/salt/modules/localemod.py +index 0bb8690fcc..b805cd429f 100644 +--- a/salt/modules/localemod.py ++++ b/salt/modules/localemod.py +@@ -132,7 +132,7 @@ def get_locale(): + return params.get('LANG', '') + elif 'RedHat' in __grains__['os_family']: + cmd = 'grep "^LANG=" /etc/sysconfig/i18n' +- elif 'SUSE' in __grains__['os_family']: ++ elif 'Suse' in __grains__['os_family']: + cmd = 'grep "^RC_LANG" /etc/sysconfig/language' + elif 'Debian' in __grains__['os_family']: + # this block only applies to Debian without systemd +@@ -172,7 +172,7 @@ def set_locale(locale): + 'LANG="{0}"'.format(locale), + append_if_not_found=True + ) +- elif 'SUSE' in __grains__['os_family']: ++ elif 'Suse' in __grains__['os_family']: + if not __salt__['file.file_exists']('/etc/sysconfig/language'): + __salt__['file.touch']('/etc/sysconfig/language') + __salt__['file.replace']( +@@ -261,7 +261,7 @@ def gen_locale(locale, **kwargs): + on_debian = __grains__.get('os') == 'Debian' + on_ubuntu = __grains__.get('os') == 'Ubuntu' + on_gentoo = __grains__.get('os_family') == 'Gentoo' +- on_suse = __grains__.get('os_family') == 'SUSE' ++ on_suse = __grains__.get('os_family') == 'Suse' + on_solaris = __grains__.get('os_family') == 'Solaris' + + if on_solaris: # all locales are pre-generated +diff --git a/tests/integration/modules/pkg.py b/tests/integration/modules/pkg.py +index d00d93bd6e..7dd7f1330c 100644 +--- a/tests/integration/modules/pkg.py ++++ b/tests/integration/modules/pkg.py +@@ -235,7 +235,7 @@ class PkgModuleTest(integration.ModuleCase, + keys = ret.keys() + self.assertIn('rpm', keys) + self.assertIn('yum', keys) +- elif os_family == 'SUSE': ++ elif os_family == 'Suse': + ret = self.run_function(func, ['less', 'zypper']) + keys = ret.keys() + self.assertIn('less', keys) +-- +2.13.0 + + diff --git a/fix-os_family-case-in-unittest.patch b/fix-os_family-case-in-unittest.patch new file mode 100644 index 0000000..24341f3 --- /dev/null +++ b/fix-os_family-case-in-unittest.patch @@ -0,0 +1,26 @@ +From 1d5e0e1c9d2ca8bb01cfe781289b4b03e0ce4c1e Mon Sep 17 00:00:00 2001 +From: Michael Calmer +Date: Fri, 19 May 2017 14:07:08 +0200 +Subject: [PATCH] Fix os_family case in unittest + +--- + tests/unit/modules/inspect_collector_test.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/unit/modules/inspect_collector_test.py b/tests/unit/modules/inspect_collector_test.py +index 9105670526..293de1ad51 100644 +--- a/tests/unit/modules/inspect_collector_test.py ++++ b/tests/unit/modules/inspect_collector_test.py +@@ -127,7 +127,7 @@ gcc-6-base:i386 + inspector.grains_core.os_data = MagicMock() + inspector.grains_core.os_data().get = MagicMock(return_value='Debian') + self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg') +- inspector.grains_core.os_data().get = MagicMock(return_value='SUSE') ++ inspector.grains_core.os_data().get = MagicMock(return_value='Suse') + self.assertEqual(inspector._get_cfg_pkgs(), 'rpm') + inspector.grains_core.os_data().get = MagicMock(return_value='redhat') + self.assertEqual(inspector._get_cfg_pkgs(), 'rpm') +-- +2.13.0 + + diff --git a/fix-regression-in-file.get_managed-add-unit-tests.patch b/fix-regression-in-file.get_managed-add-unit-tests.patch index 1764a68..9c14f34 100644 --- a/fix-regression-in-file.get_managed-add-unit-tests.patch +++ b/fix-regression-in-file.get_managed-add-unit-tests.patch @@ -1,4 +1,4 @@ -From 89fd1a83d282a10728077a08466627271a052733 Mon Sep 17 00:00:00 2001 +From 4b59b328de2653310f845352c099efc25c2cafdf Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 1 Mar 2017 10:19:33 -0600 Subject: [PATCH] Fix regression in file.get_managed, add unit tests @@ -10,186 +10,21 @@ and not via the state compiler. * Add integration tests for remote file sources * Remove next(iter()) extraction --- - salt/modules/file.py | 6 +-- - salt/states/archive.py | 11 ---- - tests/integration/states/file.py | 105 +++++++++++++++++++++++++++++++++++++++ - 3 files changed, 108 insertions(+), 14 deletions(-) + tests/integration/states/file.py | 1 + + 1 file changed, 1 insertion(+) -diff --git a/salt/modules/file.py b/salt/modules/file.py -index 8f0c6914b6..381800bc1a 100644 ---- a/salt/modules/file.py -+++ b/salt/modules/file.py -@@ -3745,13 +3745,13 @@ def get_managed( - if cached_dest and (source_hash or skip_verify): - htype = source_sum.get('hash_type', 'sha256') - cached_sum = get_hash(cached_dest, form=htype) -- if cached_sum != source_sum['hsum']: -- cache_refetch = True -- elif skip_verify: -+ if skip_verify: - # prev: if skip_verify or cached_sum == source_sum['hsum']: - # but `cached_sum == source_sum['hsum']` is elliptical as prev if - sfn = cached_dest - source_sum = {'hsum': cached_sum, 'hash_type': htype} -+ elif cached_sum != source_sum['hsum']: -+ cache_refetch = True - - # If we didn't have the template or remote file, let's get it - # Similarly when the file has been updated and the cache has to be refreshed -diff --git a/salt/states/archive.py b/salt/states/archive.py -index c5df213620..46146e971e 100644 ---- a/salt/states/archive.py -+++ b/salt/states/archive.py -@@ -897,17 +897,6 @@ def extracted(name, - ret['comment'] = '\n'.join([str(x) for x in file_result]) - return ret - -- # Get actual state result. The state.single return is a single-element -- # dictionary with the state's unique ID at the top level, and its value -- # being the state's return dictionary. next(iter(dict_name)) will give -- # us the value of the first key, so -- # file_result[next(iter(file_result))] will give us the results of the -- # state.single we just ran. -- try: -- file_result = file_result[next(iter(file_result))] -- except AttributeError: -- pass -- - try: - if not file_result['result']: - log.debug('failed to download {0}'.format(source_match)) diff --git a/tests/integration/states/file.py b/tests/integration/states/file.py -index d63f318064..faa83d00e8 100644 +index aad7fac441..54e6196c80 100644 --- a/tests/integration/states/file.py +++ b/tests/integration/states/file.py -@@ -9,15 +9,22 @@ from __future__ import absolute_import - from distutils.version import LooseVersion - import errno - import glob -+import logging - import os - import re - import sys - import shutil -+import socket - import stat - import tempfile - import textwrap -+import threading -+import tornado.ioloop -+import tornado.web - import filecmp - -+log = logging.getLogger(__name__) -+ - # Import 3rd-party libs - from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin - -@@ -2392,6 +2399,104 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): - if check_file: - self.run_function('file.remove', [file]) - -+ -+PORT = 9999 -+FILE_SOURCE = 'http://localhost:{0}/grail/scene33'.format(PORT) -+FILE_HASH = 'd2feb3beb323c79fc7a0f44f1408b4a3' +@@ -2404,6 +2404,7 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): + PORT = 9999 + FILE_SOURCE = 'http://localhost:{0}/grail/scene33'.format(PORT) + FILE_HASH = 'd2feb3beb323c79fc7a0f44f1408b4a3' +STATE_DIR = os.path.join(integration.FILES, 'file', 'base') -+ -+ -+class RemoteFileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): -+ ''' -+ Uses a local tornado webserver to test http(s) file.managed states with and -+ without skip_verify -+ ''' -+ @classmethod -+ def webserver(cls): -+ ''' -+ method to start tornado static web app -+ ''' -+ application = tornado.web.Application([ -+ (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATE_DIR}) -+ ]) -+ application.listen(PORT) -+ tornado.ioloop.IOLoop.instance().start() -+ -+ @classmethod -+ def setUpClass(cls): -+ ''' -+ start tornado app on thread and wait until it is running -+ ''' -+ cls.server_thread = threading.Thread(target=cls.webserver) -+ cls.server_thread.daemon = True -+ cls.server_thread.start() -+ # check if tornado app is up -+ port_closed = True -+ while port_closed: -+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -+ result = sock.connect_ex(('127.0.0.1', PORT)) -+ if result == 0: -+ port_closed = False -+ -+ @classmethod -+ def tearDownClass(cls): -+ tornado.ioloop.IOLoop.instance().stop() -+ cls.server_thread.join() -+ -+ def setUp(self): -+ fd_, self.name = tempfile.mkstemp(dir=integration.TMP) -+ try: -+ os.close(fd_) -+ except OSError as exc: -+ if exc.errno != errno.EBADF: -+ raise exc -+ # Remove the file that mkstemp just created so that the states can test -+ # creating a new file instead of a diff from a zero-length file. -+ self.tearDown() -+ -+ def tearDown(self): -+ try: -+ os.remove(self.name) -+ except OSError as exc: -+ if exc.errno != errno.ENOENT: -+ raise exc -+ -+ def test_file_managed_http_source_no_hash(self): -+ ''' -+ Test a remote file with no hash -+ ''' -+ ret = self.run_state('file.managed', -+ name=self.name, -+ source=FILE_SOURCE, -+ skip_verify=False) -+ log.debug('ret = %s', ret) -+ # This should fail because no hash was provided -+ self.assertSaltFalseReturn(ret) -+ -+ def test_file_managed_http_source(self): -+ ''' -+ Test a remote file with no hash -+ ''' -+ ret = self.run_state('file.managed', -+ name=self.name, -+ source=FILE_SOURCE, -+ source_hash=FILE_HASH, -+ skip_verify=False) -+ log.debug('ret = %s', ret) -+ self.assertSaltTrueReturn(ret) -+ -+ def test_file_managed_http_source_skip_verify(self): -+ ''' -+ Test a remote file using skip_verify -+ ''' -+ ret = self.run_state('file.managed', -+ name=self.name, -+ source=FILE_SOURCE, -+ skip_verify=True) -+ log.debug('ret = %s', ret) -+ self.assertSaltTrueReturn(ret) -+ -+ - if __name__ == '__main__': - from integration import run_tests - run_tests(FileTest) + + + class RemoteFileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): -- 2.11.0 diff --git a/fix-setting-language-on-suse-systems.patch b/fix-setting-language-on-suse-systems.patch new file mode 100644 index 0000000..26b3ce1 --- /dev/null +++ b/fix-setting-language-on-suse-systems.patch @@ -0,0 +1,121 @@ +From 2bc2078d8549c277ba40836de4e36953af9efc78 Mon Sep 17 00:00:00 2001 +From: Michael Calmer +Date: Thu, 18 May 2017 19:46:50 +0200 +Subject: [PATCH] fix setting language on SUSE systems + +--- + salt/modules/localemod.py | 28 +++++++++++++++------------- + tests/unit/modules/localemod_test.py | 32 +++++++++++++++++--------------- + 2 files changed, 32 insertions(+), 28 deletions(-) + +diff --git a/salt/modules/localemod.py b/salt/modules/localemod.py +index b805cd429f..272aff4cc2 100644 +--- a/salt/modules/localemod.py ++++ b/salt/modules/localemod.py +@@ -127,13 +127,14 @@ def get_locale(): + salt '*' locale.get_locale + ''' + cmd = '' +- if salt.utils.systemd.booted(__context__): ++ if 'Suse' in __grains__['os_family']: ++ # this block applies to all SUSE systems - also with systemd ++ cmd = 'grep "^RC_LANG" /etc/sysconfig/language' ++ elif salt.utils.systemd.booted(__context__): + params = _parse_dbus_locale() if HAS_DBUS else _parse_localectl() + return params.get('LANG', '') + elif 'RedHat' in __grains__['os_family']: + cmd = 'grep "^LANG=" /etc/sysconfig/i18n' +- elif 'Suse' in __grains__['os_family']: +- cmd = 'grep "^RC_LANG" /etc/sysconfig/language' + elif 'Debian' in __grains__['os_family']: + # this block only applies to Debian without systemd + cmd = 'grep "^LANG=" /etc/default/locale' +@@ -161,7 +162,17 @@ def set_locale(locale): + + salt '*' locale.set_locale 'en_US.UTF-8' + ''' +- if salt.utils.systemd.booted(__context__): ++ if 'Suse' in __grains__['os_family']: ++ # this block applies to all SUSE systems - also with systemd ++ if not __salt__['file.file_exists']('/etc/sysconfig/language'): ++ __salt__['file.touch']('/etc/sysconfig/language') ++ __salt__['file.replace']( ++ '/etc/sysconfig/language', ++ '^RC_LANG=.*', ++ 'RC_LANG="{0}"'.format(locale), ++ append_if_not_found=True ++ ) ++ elif salt.utils.systemd.booted(__context__): + return _localectl_set(locale) + elif 'RedHat' in __grains__['os_family']: + if not __salt__['file.file_exists']('/etc/sysconfig/i18n'): +@@ -172,15 +183,6 @@ def set_locale(locale): + 'LANG="{0}"'.format(locale), + append_if_not_found=True + ) +- elif 'Suse' in __grains__['os_family']: +- if not __salt__['file.file_exists']('/etc/sysconfig/language'): +- __salt__['file.touch']('/etc/sysconfig/language') +- __salt__['file.replace']( +- '/etc/sysconfig/language', +- '^RC_LANG=.*', +- 'RC_LANG="{0}"'.format(locale), +- append_if_not_found=True +- ) + elif 'Debian' in __grains__['os_family']: + # this block only applies to Debian without systemd + update_locale = salt.utils.which('update-locale') +diff --git a/tests/unit/modules/localemod_test.py b/tests/unit/modules/localemod_test.py +index b5cedfd8a6..069a3c6503 100644 +--- a/tests/unit/modules/localemod_test.py ++++ b/tests/unit/modules/localemod_test.py +@@ -44,19 +44,20 @@ class LocalemodTestCase(TestCase): + Test for Get the current system locale + ''' + with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}): +- localemod.HAS_DBUS = True +- with patch.object(localemod, +- '_parse_dbus_locale', +- return_value={'LANG': 'A'}): +- self.assertEqual('A', localemod.get_locale()) +- localemod._parse_dbus_locale.assert_called_once_with() +- +- localemod.HAS_DBUS = False +- with patch.object(localemod, +- '_parse_localectl', +- return_value={'LANG': 'A'}): +- self.assertEqual('A', localemod.get_locale()) +- localemod._parse_localectl.assert_called_once_with() ++ with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}): ++ localemod.HAS_DBUS = True ++ with patch.object(localemod, ++ '_parse_dbus_locale', ++ return_value={'LANG': 'A'}): ++ self.assertEqual('A', localemod.get_locale()) ++ localemod._parse_dbus_locale.assert_called_once_with() ++ ++ localemod.HAS_DBUS = False ++ with patch.object(localemod, ++ '_parse_localectl', ++ return_value={'LANG': 'A'}): ++ self.assertEqual('A', localemod.get_locale()) ++ localemod._parse_localectl.assert_called_once_with() + + with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}): + with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}): +@@ -82,8 +83,9 @@ class LocalemodTestCase(TestCase): + Test for Sets the current system locale + ''' + with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}): +- with patch.object(localemod, '_localectl_set', return_value=True): +- self.assertTrue(localemod.set_locale('l')) ++ with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}): ++ with patch.object(localemod, '_localectl_set', return_value=True): ++ self.assertTrue(localemod.set_locale('l')) + + with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}): + with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}): +-- +2.13.0 + + diff --git a/fixed-issue-with-parsing-of-master-minion-returns-wh.patch b/fixed-issue-with-parsing-of-master-minion-returns-wh.patch new file mode 100644 index 0000000..7c25744 --- /dev/null +++ b/fixed-issue-with-parsing-of-master-minion-returns-wh.patch @@ -0,0 +1,41 @@ +From 36ab0b6a6f8830404e4cd1a9db1918d6703ed270 Mon Sep 17 00:00:00 2001 +From: Graham Hayes +Date: Mon, 22 May 2017 09:22:36 -0400 +Subject: [PATCH] Fixed issue with parsing of master minion returns when + batching is enabled. (#30) + +--- + salt/states/saltmod.py | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py +index a9d1f6be93..35cd01fb4e 100644 +--- a/salt/states/saltmod.py ++++ b/salt/states/saltmod.py +@@ -300,7 +300,7 @@ def state( + except KeyError: + m_state = False + if m_state: +- m_state = salt.utils.check_state_result(m_ret) ++ m_state = salt.utils.check_state_result(m_ret, recurse=True) + + if not m_state: + if minion not in fail_minions: +@@ -309,9 +309,10 @@ def state( + continue + try: + for state_item in six.itervalues(m_ret): +- if 'changes' in state_item and state_item['changes']: +- changes[minion] = m_ret +- break ++ if isinstance(state_item, dict): ++ if 'changes' in state_item and state_item['changes']: ++ changes[minion] = m_ret ++ break + else: + no_change.add(minion) + except AttributeError: +-- +2.13.0 + + diff --git a/fixing-beacons.list-integration-test-failure.patch b/fixing-beacons.list-integration-test-failure.patch new file mode 100644 index 0000000..1e4b4ae --- /dev/null +++ b/fixing-beacons.list-integration-test-failure.patch @@ -0,0 +1,26 @@ +From 9d303be7e9f856ab41bec24e6dd83a00a1a7a04e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Piotr=20Ka=C5=BAmierczak?= +Date: Wed, 3 May 2017 18:38:15 +0200 +Subject: [PATCH] fixing beacons.list integration test failure + +--- + tests/integration/modules/beacons.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/integration/modules/beacons.py b/tests/integration/modules/beacons.py +index e04aa92dd6..ee1e81b898 100644 +--- a/tests/integration/modules/beacons.py ++++ b/tests/integration/modules/beacons.py +@@ -66,7 +66,7 @@ class BeaconsTest(integration.ModuleCase): + + @classmethod + def tearDownClass(cls): +- if os.path.isfile(cls.beacons_config_file_path): ++ if cls.beacons_config_file_path and os.path.isfile(cls.beacons_config_file_path): + os.unlink(cls.beacons_config_file_path) + + def setUp(self): +-- +2.11.0 + + diff --git a/rest_cherrypy-remove-sleep-call.patch b/rest_cherrypy-remove-sleep-call.patch new file mode 100644 index 0000000..854237b --- /dev/null +++ b/rest_cherrypy-remove-sleep-call.patch @@ -0,0 +1,33 @@ +From 2ac331ef9c2e28bb133bda04a5b3f667aff66c6c Mon Sep 17 00:00:00 2001 +From: Silvio Moioli +Date: Mon, 15 May 2017 07:44:05 +0200 +Subject: [PATCH] rest_cherrypy: remove sleep call + +--- + salt/netapi/rest_cherrypy/app.py | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py +index 3e89ff7882..221865a2ee 100644 +--- a/salt/netapi/rest_cherrypy/app.py ++++ b/salt/netapi/rest_cherrypy/app.py +@@ -465,7 +465,6 @@ import json + import os + import signal + import tarfile +-import time + from multiprocessing import Process, Pipe + + # Import third-party libs +@@ -2238,7 +2237,6 @@ class WebsocketEndpoint(object): + logger.error( + "Error: Salt event has non UTF-8 data:\n{0}" + .format(data)) +- time.sleep(0.1) + + parent_pipe, child_pipe = Pipe() + handler.pipe = parent_pipe +-- +2.13.0 + + diff --git a/run-salt-master-as-dedicated-salt-user.patch b/run-salt-master-as-dedicated-salt-user.patch index 7de91bb..68e5ec0 100644 --- a/run-salt-master-as-dedicated-salt-user.patch +++ b/run-salt-master-as-dedicated-salt-user.patch @@ -1,15 +1,16 @@ -From d1d0fec1dd0fbf6a67c313718975ceb72c10cd2f Mon Sep 17 00:00:00 2001 +From 7641133d3d95d1f13116aabe0ec7b280ad7891c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Klaus=20K=C3=A4mpf?= Date: Wed, 20 Jan 2016 11:01:06 +0100 Subject: [PATCH] Run salt master as dedicated salt user +* Minion runs always as a root --- conf/master | 3 ++- - pkg/salt-common.logrotate | 3 +++ - 2 files changed, 5 insertions(+), 1 deletion(-) + pkg/salt-common.logrotate | 2 ++ + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/conf/master b/conf/master -index 3930c9832d..f58eb496bf 100644 +index d89da74d58..fa29ca5827 100644 --- a/conf/master +++ b/conf/master @@ -25,7 +25,8 @@ @@ -23,7 +24,7 @@ index 3930c9832d..f58eb496bf 100644 # The port used by the communication interface. The ret (return) port is the # interface used for the file server, authentication, job returns, etc. diff --git a/pkg/salt-common.logrotate b/pkg/salt-common.logrotate -index 3cd002308e..8d970c0a64 100644 +index 3cd002308e..0d99d1b801 100644 --- a/pkg/salt-common.logrotate +++ b/pkg/salt-common.logrotate @@ -1,4 +1,5 @@ @@ -32,15 +33,7 @@ index 3cd002308e..8d970c0a64 100644 weekly missingok rotate 7 -@@ -7,6 +8,7 @@ - } - - /var/log/salt/minion { -+ su salt salt - weekly - missingok - rotate 7 -@@ -15,6 +17,7 @@ +@@ -15,6 +16,7 @@ } /var/log/salt/key { diff --git a/salt-2016.11.3.tar.gz b/salt-2016.11.3.tar.gz deleted file mode 100644 index 32c9ee7..0000000 --- a/salt-2016.11.3.tar.gz +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9d5849f38a858288ebc6ef790ced86ae724e61b06e3ee27e6cecf3f6c1ecbc51 -size 9163351 diff --git a/salt-2016.11.4.tar.gz b/salt-2016.11.4.tar.gz new file mode 100644 index 0000000..4860bbc --- /dev/null +++ b/salt-2016.11.4.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb4109b28d3fd898291c162e42ef843fbd4c3e57244075670fa8f366e705765f +size 9360198 diff --git a/salt.changes b/salt.changes index 2ba91c5..232bee0 100644 --- a/salt.changes +++ b/salt.changes @@ -1,3 +1,170 @@ +------------------------------------------------------------------- +Tue Jun 6 15:47:12 UTC 2017 - bmaryniuk@suse.com + +- Bugfix: clean up `change` attribute from interface dict (upstream) + Issue: https://github.com/saltstack/salt/issues/41461 + PR: 1. https://github.com/saltstack/salt/pull/41487 + 2. https://github.com/saltstack/salt/pull/41533 + +Added: + * clean-up-change-attribute-from-interface-dict.patch + +------------------------------------------------------------------- +Mon May 22 14:23:49 UTC 2017 - bmaryniuk@suse.com + +- Bugfix: orchestrate and batches returns false failed information + https://github.com/saltstack/salt/issues/40635 +- speed-up cherrypy by removing sleep call +- wrong os_family grains on SUSE - fix unittests (bsc#1038855) +- fix setting the language on SUSE systems (bsc#1038855) +- Bugfix: unable to use hostname for minion ID as '127' (upstream) +- Bugfix: remove sleep call in CheppryPy API handler (upstream) +- Fix core grains constants for timezone (bsc#1032931) + +- Added: + * bugfix-unable-to-use-127-as-hostname.patch + * fix-grain-for-os_family-on-suse-series.patch + * fix-os_family-case-in-unittest.patch + * fix-setting-language-on-suse-systems.patch + * fixed-issue-with-parsing-of-master-minion-returns-wh.patch + * rest_cherrypy-remove-sleep-call.patch + * use-correct-grain-constants-for-timezone.patch + +------------------------------------------------------------------- +Thu May 4 14:05:54 UTC 2017 - bmaryniuk@suse.com + +- Update to 2016.11.4 + See https://docs.saltstack.com/en/develop/topics/releases/2016.11.4.html + for full changelog + +- Changed: + * add-options-for-dockerng.patch + * fix-regression-in-file.get_managed-add-unit-tests.patch + +- Added: + * add-unit-test-for-skip-false-values-from-preferred_i.patch + * fixing-beacons.list-integration-test-failure.patch + +- Removed: + * adding-downloadonly-support-to-yum-dnf-module.patch + * adds-custom-timeout-and-gather_job_timeout-to-local_.patch + * allows-to-set-timeout-and-gather_job_timeout-via-kwa.patch + * fix-case-in-os_family-for-suse.patch + * fix-race-condition-on-cache-directory-creation.patch + * makes-sure-gather_job_timeout-is-an-integer.patch + * merge-output-from-master_tops.patch + * openscap-module.patch + * use-salt-s-ordereddict-for-comparison.patch + +------------------------------------------------------------------- +Tue May 2 11:41:11 UTC 2017 - pablo.suarezhernandez@suse.com + +- Adding "yum-plugin-security" as required for RHEL 6 + +------------------------------------------------------------------- +Wed Apr 26 14:24:33 UTC 2017 - pablo.suarezhernandez@suse.com + +- Minor fixes on new pkg.list_downloaded +- Listing all type of advisory patches for Yum module +- Prevents zero length error on Python 2.6 +- Fixes zypper test error after backporting + +- Added: + * search-the-entire-cache_dir-because-storage-paths-ch.patch + +------------------------------------------------------------------- +Mon Apr 17 15:24:02 UTC 2017 - pablo.suarezhernandez@suse.com + +- Refactoring on Zypper and Yum execution and state modules to allow + installation of patches/errata. + +- Added: + * adding-support-for-installing-patches-in-yum-dnf-exe.patch +- Removed: + * allows-using-downloadonly-in-a-pkg.installed-state.patch + +------------------------------------------------------------------- +Mon Apr 10 09:56:50 UTC 2017 - pablo.suarezhernandez@suse.com + +- Fixes 'timeout' and 'gather_job_timeout' kwargs parameters + for 'local_batch' client + +- Added: + * adds-custom-timeout-and-gather_job_timeout-to-local_.patch + +------------------------------------------------------------------- +Fri Apr 7 10:53:39 UTC 2017 - bmaryniuk@suse.com + +- Add missing bootstrap script for Salt Cloud (bsc#1032452) + +------------------------------------------------------------------- +Tue Apr 4 14:58:20 UTC 2017 - bmaryniuk@suse.com + +- raet protocol is no longer supported (bsc#1020831) + +------------------------------------------------------------------- +Tue Apr 4 14:52:02 UTC 2017 - bmaryniuk@suse.com + +- Fix: add missing /var/cache/salt/cloud directory (bsc#1032213) + +------------------------------------------------------------------- +Fri Mar 31 12:27:35 UTC 2017 - pablo.suarezhernandez@suse.com + +- Adding "pkg.install downloadonly=True" support to yum/dnf + execution module +- Makes sure "gather_job_timeout" is an Integer +- Adding "pkg.downloaded" state and support for installing + patches/erratas + +- Added: + * adding-downloadonly-support-to-yum-dnf-module.patch + * allows-using-downloadonly-in-a-pkg.installed-state.patch + * makes-sure-gather_job_timeout-is-an-integer.patch + +------------------------------------------------------------------- +Fri Mar 31 12:03:18 UTC 2017 - bmaryniuk@suse.com + +- Added test case for race conditions on cache directory creation +- Modified: + * fix-race-condition-on-cache-directory-creation.patch + +------------------------------------------------------------------- +Fri Mar 24 09:41:01 UTC 2017 - bmaryniuk@suse.com + +- Cleanup salt user environment preparation (bsc#1027722) + +------------------------------------------------------------------- +Thu Mar 23 15:53:22 UTC 2017 - moio@suse.com + +- Fix: race condition on cache directory creation + +- Added: + * fix-race-condition-on-cache-directory-creation.patch + +------------------------------------------------------------------- +Mon Mar 20 10:35:36 UTC 2017 - bmaryniuk@suse.com + +- Fix: /var/log/salt/minion fails logrotate (bsc#1030009) +- Fix: Result of master_tops extension is mutually overwritten + (bsc#1030073) +- Allows to set 'timeout' and 'gather_job_timeout' via kwargs +- Allows to set custom timeouts for 'manage.up' and 'manage.status' +- Use salt's ordereddict for comparison (fixes failing tests) +- add special salt-minion.service file for RES7 +- fix scripts for salt-proxy +- openscap module + +- Changed: + * run-salt-master-as-dedicated-salt-user.patch + +- Added: + * allows-to-set-timeout-and-gather_job_timeout-via-kwa.patch + * merge-output-from-master_tops.patch + * openscap-module.patch + * special-salt-minion.service-file-for-rhel7.patch + * use-salt-s-ordereddict-for-comparison.patch + + ------------------------------------------------------------------- Fri Mar 3 09:36:17 UTC 2017 - bmaryniuk@suse.com diff --git a/salt.spec b/salt.spec index d0d9c71..3f7471f 100644 --- a/salt.spec +++ b/salt.spec @@ -32,12 +32,11 @@ %bcond_with zsh_completion %endif %bcond_with test -%bcond_with raet %bcond_without docs %bcond_with builddocs Name: salt -Version: 2016.11.3 +Version: 2016.11.4 Release: 0 Summary: A parallel remote execution system License: Apache-2.0 @@ -51,21 +50,21 @@ Source3: html.tar.bz2 Source4: update-documentation.sh Source5: travis.yml -# PATCH-FIX-OPENSUSE use-forking-daemon.patch tserong@suse.com -- We don't have python-systemd, so notify can't work # We do not upstream this patch because this is something that we have to fix on our side +# PATCH-FIX-OPENSUSE use-forking-daemon.patch tserong@suse.com -- We don't have python-systemd, so notify can't work Patch1: tserong-suse.com-we-don-t-have-python-systemd-so-not.patch -# PATCH-FIX-OPENSUSE use-salt-user-for-master.patch -- Run salt master as dedicated salt user # We do not upstream this patch because this is suse custom configuration +# PATCH-FIX-OPENSUSE use-salt-user-for-master.patch -- Run salt master as dedicated salt user Patch2: run-salt-master-as-dedicated-salt-user.patch -# PATCH-FIX-OPENSUSE https://github.com/saltstack/salt/pull/30424 # We do not upstream this patch because it has been fixed upstream # (see: https://trello.com/c/wh96lCD4/1528-get-rid-of-0003-check-if-byte-strings-are-properly-encoded-in-utf-8-patch-in-the-salt-package) +# PATCH-FIX-OPENSUSE https://github.com/saltstack/salt/pull/30424 Patch3: check-if-byte-strings-are-properly-encoded-in-utf-8.patch -# PATCH-FIX-OPENSUSE prevent rebuilds in OBS # We do not upstream this patch because the issue is on our side +# PATCH-FIX-OPENSUSE prevent rebuilds in OBS Patch4: do-not-generate-a-date-in-a-comment-to-prevent-rebui.patch -# PATCH-FIX-OPENSUSE Generate events from the Salt minion, # We do not upstream this because this is for SUSE only (15.08.2016) if Zypper has been used outside the Salt infrastructure +# PATCH-FIX-OPENSUSE Generate events from the Salt minion, Patch5: add-zypp-notify-plugin.patch # PATCH-FIX_OPENSUSE Patch6: run-salt-api-as-user-salt-bsc-990029.patch @@ -85,12 +84,35 @@ Patch12: add-ssh-option-to-salt-ssh.patch Patch13: add-a-salt-minion-service-control-file.patch # PATCH-FIX-OPENSUSE Patch14: add-options-for-dockerng.patch -# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/39591 -Patch15: fix-case-in-os_family-for-suse.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/39762 +Patch15: fix-regression-in-file.get_managed-add-unit-tests.patch # PATCH-FIX_OPENSUSE Patch16: translate-variable-arguments-if-they-contain-hidden-.patch -# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/39762 -Patch17: fix-regression-in-file.get_managed-add-unit-tests.patch +# PATCH-FIX_OPENSUSE +Patch17: special-salt-minion.service-file-for-rhel7.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/40266 +Patch18: adding-support-for-installing-patches-in-yum-dnf-exe.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/40761 +Patch19: search-the-entire-cache_dir-because-storage-paths-ch.patch +# PATCH-FIX_OPENSUSE +Patch20: fixing-beacons.list-integration-test-failure.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/40817 +Patch21: add-unit-test-for-skip-false-values-from-preferred_i.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/40852 +Patch22: use-correct-grain-constants-for-timezone.patch +# PATCH-FIX_OPENSUSE (upstream coming soon) +Patch23: fix-grain-for-os_family-on-suse-series.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/41269 +Patch24: bugfix-unable-to-use-127-as-hostname.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/41336 +Patch25: fix-setting-language-on-suse-systems.patch +Patch26: fix-os_family-case-in-unittest.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/41235 +Patch27: rest_cherrypy-remove-sleep-call.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/40905 +Patch28: fixed-issue-with-parsing-of-master-minion-returns-wh.patch +# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/41533 +Patch29: clean-up-change-attribute-from-interface-dict.patch BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildRequires: logrotate @@ -114,12 +136,6 @@ BuildRequires: python-tornado >= 4.2.1 # BuildRequires: python-timelib # BuildRequires: python-gnupg # BuildRequires: python-cherrypy >= 3.2.2 -%if %{with raet} -# requirements/raet.txt -BuildRequires: python-ioflo >= 1.1.7 -BuildRequires: python-libnacl >= 1.0.0 -BuildRequires: python-raet >= 0.6.0 -%endif # requirements/zeromq.txt BuildRequires: python-pycrypto >= 2.6.1 BuildRequires: python-pyzmq >= 2.2.0 @@ -164,6 +180,9 @@ Requires: python-certifi %if 0%{?rhel} Requires: python-jinja2 Requires: yum +%if 0%{?rhel} == 6 +Requires: yum-plugin-security +%endif %else Requires: python-Jinja2 %endif @@ -183,8 +202,6 @@ Requires: zypp-plugin-python Recommends: python-MySQL-python Recommends: python-timelib Recommends: python-gnupg -# requirements/raet.txt -# Recommends: salt-raet # requirements/zeromq.txt %endif Requires: python-pycrypto >= 2.6.1 @@ -272,7 +289,7 @@ This contains the documentation of salt, it is an offline version of http://docs %endif %package master -Summary: The management component of Saltstack both protocols zmq and raet supported +Summary: The management component of Saltstack with zmq protocol supported Group: System/Management Requires: %{name} = %{version}-%{release} %if 0%{?suse_version} @@ -323,28 +340,6 @@ Requires(pre): %fillup_prereq Salt minion is queried and controlled from the master. Listens to the salt master and execute the commands. -%package raet -Summary: Raet Support for Saltstack -Group: System/Management -Requires: %{name} = %{version}-%{release} -Requires: python-enum34 -Requires: python-ioflo >= 1.1.7 -Requires: python-libnacl >= 1.0.0 -Requires: python-raet >= 0.6.0 - -%description raet -The Reliable Asynchronous Event Transport, or RAET, is an alternative transport -medium developed specifically with Salt in mind. It has been developed to allow -queuing to happen up on the application layer and comes with socket layer -encryption. It also abstracts a great deal of control over the socket layer and -makes it easy to bubble up errors and exceptions. - -RAET also offers very powerful message routing capabilities, allowing for -messages to be routed between processes on a single machine all the way up to -processes on multiple machines. Messages can also be restricted, allowing -processes to be sent messages of specific types from specific sources allowing -for trust to be established. - %package proxy Summary: Component for salt that enables controlling arbitrary devices Group: System/Management @@ -482,6 +477,18 @@ cp %{S:5} ./.travis.yml %patch15 -p1 %patch16 -p1 %patch17 -p1 +%patch18 -p1 +%patch19 -p1 +%patch20 -p1 +%patch21 -p1 +%patch22 -p1 +%patch23 -p1 +%patch24 -p1 +%patch25 -p1 +%patch26 -p1 +%patch27 -p1 +%patch28 -p1 +%patch29 -p1 %build %{__python} setup.py --salt-transport=both build @@ -519,6 +526,7 @@ install -Dd -m 0750 %{buildroot}%{_localstatedir}/cache/salt/master/queues install -Dd -m 0750 %{buildroot}%{_localstatedir}/cache/salt/master/roots install -Dd -m 0750 %{buildroot}%{_localstatedir}/cache/salt/master/syndics install -Dd -m 0750 %{buildroot}%{_localstatedir}/cache/salt/master/tokens +install -Dd -m 0750 %{buildroot}%{_localstatedir}/cache/salt/cloud install -Dd -m 0750 %{buildroot}/var/lib/salt install -Dd -m 0750 %{buildroot}/srv/salt install -Dd -m 0750 %{buildroot}/srv/pillar @@ -607,10 +615,6 @@ mkdir -p %{buildroot}%{fish_completions_dir} install -Dpm 0644 pkg/fish-completions/* %{buildroot}%{fish_completions_dir} %endif -# raet transport config -echo "transport: raet" > %{buildroot}%{_sysconfdir}/salt/master.d/transport-raet.conf -echo "transport: raet" > %{buildroot}%{_sysconfdir}/salt/minion.d/transport-raet.conf - %if 0%{?suse_version} > 1020 %fdupes %{buildroot}%{_docdir} %fdupes %{buildroot}%{python_sitelib} @@ -622,9 +626,14 @@ python setup.py test --runtests-opts=-u %endif %pre -getent passwd salt | grep srv\/salt >/dev/null && usermod -d /var/lib/salt salt +S_HOME="/var/lib/salt" +S_PHOME="/srv/salt" +getent passwd salt | grep $S_PHOME >/dev/null && sed -i "s:$S_PHOME:$S_HOME:g" /etc/passwd getent group salt >/dev/null || %{_sbindir}/groupadd -r salt -getent passwd salt >/dev/null || %{_sbindir}/useradd -r -g salt -d /var/lib/salt -s /bin/false -c "salt-master daemon" salt +getent passwd salt >/dev/null || %{_sbindir}/useradd -r -g salt -d $S_HOME -s /bin/false -c "salt-master daemon" salt +if [[ -d "$S_PHOME/.ssh" ]]; then + mv $S_PHOME/.ssh $S_HOME +fi %post %if %{with systemd} @@ -861,6 +870,9 @@ fi %config(noreplace) %attr(0640, root, salt) %{_sysconfdir}/salt/cloud %config(noreplace) %attr(0640, root, salt) %{_sysconfdir}/salt/cloud.profiles %config(noreplace) %attr(0640, root, salt) %{_sysconfdir}/salt/cloud.providers +%dir %attr(0750, root, salt) %{_localstatedir}/cache/salt/cloud +%{python_sitelib}/salt/cloud/deploy/bootstrap-salt.sh +%attr(755,root,root)%{python_sitelib}/salt/cloud/deploy/bootstrap-salt.sh %{_mandir}/man1/salt-cloud.1.* %files ssh @@ -957,11 +969,6 @@ fi %dir %attr(0750, salt, salt) %{_localstatedir}/cache/salt/master/tokens/ #%dir %ghost %attr(0750, salt, salt) %{_localstatedir}/run/salt/master/ -%files raet -%defattr(-,root,root,-) -%config(noreplace) %attr(0640, root, salt) %{_sysconfdir}/salt/master.d/transport-raet.conf -%config(noreplace) %attr(0640, root, root) %{_sysconfdir}/salt/minion.d/transport-raet.conf - %files %defattr(-,root,root,-) %{_bindir}/spm diff --git a/search-the-entire-cache_dir-because-storage-paths-ch.patch b/search-the-entire-cache_dir-because-storage-paths-ch.patch new file mode 100644 index 0000000..2d7c864 --- /dev/null +++ b/search-the-entire-cache_dir-because-storage-paths-ch.patch @@ -0,0 +1,185 @@ +From c9eb78888326d6ca6173a8d6059e1de26884030e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= + +Date: Tue, 18 Apr 2017 16:04:14 +0100 +Subject: [PATCH] Search the entire CACHE_DIR because storage paths + change across versions + +Prevents zero length error on Python 2.6 + +Fixes Zypper unit test + +Enhances pkg.list_downloaded information of a package + +Listing all patches instead of security ones only + +Adapting Zypper test to new list_downloaded output + +Fixes zypper test error after backporting + +Pylint fixes +--- + salt/modules/yumpkg.py | 18 +++++++++++++----- + salt/modules/zypper.py | 17 ++++++++++++----- + salt/states/pkg.py | 3 ++- + tests/unit/modules/zypper_test.py | 20 ++++++++++++++------ + 4 files changed, 41 insertions(+), 17 deletions(-) + +diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py +index f6777d770f..690d0c4e3a 100644 +--- a/salt/modules/yumpkg.py ++++ b/salt/modules/yumpkg.py +@@ -18,8 +18,8 @@ Support for YUM/DNF + from __future__ import absolute_import + import contextlib + import copy ++import datetime + import fnmatch +-import glob + import itertools + import logging + import os +@@ -816,9 +816,17 @@ def list_downloaded(): + CACHE_DIR = os.path.join('/var/cache/', _yum()) + + ret = {} +- for package_path in glob.glob(os.path.join(CACHE_DIR, '*/*/*/packages/*.rpm')): +- pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path) +- ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = package_path ++ for root, dirnames, filenames in os.walk(CACHE_DIR): ++ for filename in fnmatch.filter(filenames, '*.rpm'): ++ package_path = os.path.join(root, filename) ++ pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path) ++ pkg_timestamp = int(os.path.getctime(package_path)) ++ ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = { ++ 'path': package_path, ++ 'size': os.path.getsize(package_path), ++ 'creation_date_time_t': pkg_timestamp, ++ 'creation_date_time': datetime.datetime.fromtimestamp(pkg_timestamp).isoformat(), ++ } + return ret + + +@@ -2804,7 +2812,7 @@ def _get_patches(installed_only=False): + ''' + patches = {} + +- cmd = [_yum(), '--quiet', 'updateinfo', 'list', 'security', 'all'] ++ cmd = [_yum(), '--quiet', 'updateinfo', 'list', 'all'] + ret = __salt__['cmd.run_stdout']( + cmd, + python_shell=False +diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py +index 28087f5dbd..6055966904 100644 +--- a/salt/modules/zypper.py ++++ b/salt/modules/zypper.py +@@ -15,7 +15,7 @@ Package support for openSUSE via the zypper package manager + # Import python libs + from __future__ import absolute_import + import copy +-import glob ++import fnmatch + import logging + import re + import os +@@ -1797,10 +1797,17 @@ def list_downloaded(): + CACHE_DIR = '/var/cache/zypp/packages/' + + ret = {} +- # Zypper storage is repository_tag/arch/package-version.rpm +- for package_path in glob.glob(os.path.join(CACHE_DIR, '*/*/*.rpm')): +- pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path) +- ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = package_path ++ for root, dirnames, filenames in os.walk(CACHE_DIR): ++ for filename in fnmatch.filter(filenames, '*.rpm'): ++ package_path = os.path.join(root, filename) ++ pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path) ++ pkg_timestamp = int(os.path.getctime(package_path)) ++ ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = { ++ 'path': package_path, ++ 'size': os.path.getsize(package_path), ++ 'creation_date_time_t': pkg_timestamp, ++ 'creation_date_time': datetime.datetime.fromtimestamp(pkg_timestamp).isoformat(), ++ } + return ret + + +diff --git a/salt/states/pkg.py b/salt/states/pkg.py +index d185002d41..0983712b4c 100644 +--- a/salt/states/pkg.py ++++ b/salt/states/pkg.py +@@ -2081,7 +2081,8 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): + if not ret['changes'] and not ret['comment']: + status = 'downloaded' if downloadonly else 'installed' + ret['result'] = True +- ret['comment'] = 'Related packages are already {}'.format(status) ++ ret['comment'] = ('Advisory patch is not needed or related packages ' ++ 'are already {0}'.format(status)) + + return ret + +diff --git a/tests/unit/modules/zypper_test.py b/tests/unit/modules/zypper_test.py +index 39bd2e73e8..c9d44d102c 100644 +--- a/tests/unit/modules/zypper_test.py ++++ b/tests/unit/modules/zypper_test.py +@@ -486,7 +486,10 @@ Repository 'DUMMY' not found by its alias, number, or URI. + self.assertEqual(len(list_patches), 3) + self.assertDictEqual(list_patches, PATCHES_RET) + +- @patch('glob.glob', MagicMock(return_value=['/var/cache/zypper/packages/foo/bar/test_package.rpm'])) ++ @patch('os.walk', MagicMock(return_value=[('test', 'test', 'test')])) ++ @patch('os.path.getsize', MagicMock(return_value=123456)) ++ @patch('os.path.getctime', MagicMock(return_value=1234567890.123456)) ++ @patch('fnmatch.filter', MagicMock(return_value=['/var/cache/zypper/packages/foo/bar/test_package.rpm'])) + def test_list_downloaded(self): + ''' + Test downloaded packages listing. +@@ -495,7 +498,12 @@ Repository 'DUMMY' not found by its alias, number, or URI. + ''' + DOWNLOADED_RET = { + 'test-package': { +- '1.0': '/var/cache/zypper/packages/foo/bar/test_package.rpm' ++ '1.0': { ++ 'path': '/var/cache/zypper/packages/foo/bar/test_package.rpm', ++ 'size': 123456, ++ 'creation_date_time_t': 1234567890, ++ 'creation_date_time': '2009-02-13T23:31:30', ++ } + } + } + +@@ -530,7 +538,7 @@ Repository 'DUMMY' not found by its alias, number, or URI. + self.assertEqual(zypper.download("nmap", "foo"), test_out) + + @patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False)) +- @patch('salt.modules.zypper.list_downloaded', MagicMock(side_effect=[{}, {'vim': {'1.1': '/foo/bar/test.rpm'}}])) ++ @patch('salt.modules.zypper.list_downloaded', MagicMock(side_effect=[{}, {'vim': {'1.1': {'path': '/foo/bar/test.rpm', 'size': 1234, 'creation_date_time_t': 1234567890, 'creation_date_time': '2009-02-13T23:31:30'}}}])) + def test_install_with_downloadonly(self): + ''' + Test a package installation with downloadonly=True. +@@ -548,10 +556,10 @@ Repository 'DUMMY' not found by its alias, number, or URI. + '--download-only', + 'vim' + ) +- self.assertDictEqual(ret, {'vim': {'new': {'1.1': '/foo/bar/test.rpm'}, 'old': ''}}) ++ self.assertDictEqual(ret, {'vim': {'new': {'1.1': {'path': '/foo/bar/test.rpm', 'size': 1234, 'creation_date_time_t': 1234567890, 'creation_date_time': '2009-02-13T23:31:30'}}, 'old': ''}}) + + @patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False)) +- @patch('salt.modules.zypper.list_downloaded', MagicMock(return_value={'vim': {'1.1': '/foo/bar/test.rpm'}})) ++ @patch('salt.modules.zypper.list_downloaded', MagicMock(return_value={'vim': {'1.1': {'path': '/foo/bar/test.rpm', 'size': 1234, 'creation_date_time_t': 1234567890, 'creation_date_time': '2017-01-01T11:00:00'}}})) + def test_install_with_downloadonly_already_downloaded(self): + ''' + Test a package installation with downloadonly=True when package is already downloaded. +@@ -603,7 +611,7 @@ Repository 'DUMMY' not found by its alias, number, or URI. + ''' + with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'SUSE-PATCH-XXX': None}, 'advisory'))}): + with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock: +- with self.assertRaisesRegex(CommandExecutionError, '^Advisory id "SUSE-PATCH-XXX" not found$'): ++ with self.assertRaisesRegexp(CommandExecutionError, '^Advisory id "SUSE-PATCH-XXX" not found$'): + zypper.install(advisory_ids=['SUSE-PATCH-XXX']) + + def test_remove_purge(self): +-- +2.12.2 + + diff --git a/special-salt-minion.service-file-for-rhel7.patch b/special-salt-minion.service-file-for-rhel7.patch new file mode 100644 index 0000000..d9dff8b --- /dev/null +++ b/special-salt-minion.service-file-for-rhel7.patch @@ -0,0 +1,34 @@ +From 8860800e7a9af54757096014f91a25be4f3fa552 Mon Sep 17 00:00:00 2001 +From: Michael Calmer +Date: Tue, 7 Mar 2017 13:50:13 +0100 +Subject: [PATCH] special salt-minion.service file for rhel7 + +--- + pkg/salt-minion.service.rhel7 | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + create mode 100644 pkg/salt-minion.service.rhel7 + +diff --git a/pkg/salt-minion.service.rhel7 b/pkg/salt-minion.service.rhel7 +new file mode 100644 +index 0000000000..6917267714 +--- /dev/null ++++ b/pkg/salt-minion.service.rhel7 +@@ -0,0 +1,14 @@ ++[Unit] ++Description=The Salt Minion ++After=network.target ++ ++[Service] ++Type=simple ++LimitNOFILE=8192 ++ExecStart=/usr/bin/salt-minion ++KillMode=process ++Restart=on-failure ++RestartSec=15 ++ ++[Install] ++WantedBy=multi-user.target +-- +2.11.0 + + diff --git a/use-correct-grain-constants-for-timezone.patch b/use-correct-grain-constants-for-timezone.patch new file mode 100644 index 0000000..92cc21a --- /dev/null +++ b/use-correct-grain-constants-for-timezone.patch @@ -0,0 +1,530 @@ +From 48cc3497eb19059a7acf14268a722e46b12e59be Mon Sep 17 00:00:00 2001 +From: Bo Maryniuk +Date: Fri, 21 Apr 2017 15:53:51 +0200 +Subject: [PATCH] Use correct grain constants for timezone + +Adjust the test so it is using the right grain for SUSE systems + +Bugfix: use correct grain constant for platform + +Refactor with setup/teardown + +Add UT for RedHat's set_zone + +Fix doc for RH UT + +Remove unnecessary mock patch + +Doc fix + +Add UT for set_zone on SUSE series + +Adjust UT to use correct grain for SUSE series + +Bugfix: use correct os_family grain value for SUSE series + +Add UT for gentoo on set_zone + +Add UT for Debian on set_zone + +Remove duplicate code + +Add UT for get_hwclock on UTC/localtime + +Remove dead code + +Add UT for get_hwclock on SUSE platform + +Bugfix: use correct grain for SUSE and RedHat platform + +Add UT for RedHat/SUSE platforms on get_hwclock + +Add UT for Debian on get_hwclock + +Add UT on Solaris + +Add UT for AIX on get_hwclock + +Add UT for set_hwclock on AIX + +Fix docstrings + +Add UT for set_hwclock on solaris + +Add UT for set_hwclock on Arch + +Add UT for set_hwclock on RedHat + +Fix UT names + +Add UT set_hwclock on SUSE + +Bugfix: use correct grain name for SUSE platform + +Add UT for set_hwclock on Debian + +Add UT on set_hw_clock on Gentoo + +Fix lint issues + +Rewrite test case for using no patch decorators + +Disable the test for a while + +Do not use multiple variables in "with" statement as of lint issues +--- + salt/modules/timezone.py | 13 +- + tests/unit/modules/timezone_test.py | 390 ++++++++++++++++++++++++++++++++++++ + 2 files changed, 395 insertions(+), 8 deletions(-) + create mode 100644 tests/unit/modules/timezone_test.py + +diff --git a/salt/modules/timezone.py b/salt/modules/timezone.py +index 69fb4fb663..e0d079f50a 100644 +--- a/salt/modules/timezone.py ++++ b/salt/modules/timezone.py +@@ -160,7 +160,7 @@ def get_zone(): + if __grains__['os'].lower() == 'centos': + return _get_zone_etc_localtime() + os_family = __grains__['os_family'] +- for family in ('RedHat', 'SUSE'): ++ for family in ('RedHat', 'Suse'): + if family in os_family: + return _get_zone_sysconfig() + for family in ('Debian', 'Gentoo'): +@@ -273,16 +273,13 @@ def set_zone(timezone): + if 'RedHat' in __grains__['os_family']: + __salt__['file.sed']( + '/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone)) +- elif 'SUSE' in __grains__['os_family']: ++ elif 'Suse' in __grains__['os_family']: + __salt__['file.sed']( + '/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone)) +- elif 'Debian' in __grains__['os_family']: ++ elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']: + with salt.utils.fopen('/etc/timezone', 'w') as ofh: + ofh.write(timezone.strip()) + ofh.write('\n') +- elif 'Gentoo' in __grains__['os_family']: +- with salt.utils.fopen('/etc/timezone', 'w') as ofh: +- ofh.write(timezone) + + return True + +@@ -373,7 +370,7 @@ def get_hwclock(): + + else: + os_family = __grains__['os_family'] +- for family in ('RedHat', 'SUSE'): ++ for family in ('RedHat', 'Suse'): + if family in os_family: + cmd = ['tail', '-n', '1', '/etc/adjtime'] + return __salt__['cmd.run'](cmd, python_shell=False) +@@ -505,7 +502,7 @@ def set_hwclock(clock): + elif 'RedHat' in __grains__['os_family']: + __salt__['file.sed']( + '/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone)) +- elif 'SUSE' in __grains__['os_family']: ++ elif 'Suse' in __grains__['os_family']: + __salt__['file.sed']( + '/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone)) + elif 'Debian' in __grains__['os_family']: +diff --git a/tests/unit/modules/timezone_test.py b/tests/unit/modules/timezone_test.py +new file mode 100644 +index 0000000000..ebf28e28ee +--- /dev/null ++++ b/tests/unit/modules/timezone_test.py +@@ -0,0 +1,390 @@ ++# -*- coding: utf-8 -*- ++''' ++ :codeauthor: :email:`Bo Maryniuk ` ++''' ++ ++# Import Python Libs ++from __future__ import absolute_import ++ ++# Import Salt Testing Libs ++from salttesting import TestCase, skipIf ++from salttesting.mock import ( ++ MagicMock, ++ patch, ++ NO_MOCK, ++ NO_MOCK_REASON ++) ++ ++from salttesting.helpers import ensure_in_syspath ++from salt.exceptions import SaltInvocationError ++ ++ensure_in_syspath('../../') ++ ++# Import Salt Libs ++from salt.modules import timezone ++ ++ ++@skipIf(NO_MOCK, NO_MOCK_REASON) ++class TimezoneTestCase(TestCase): ++ ''' ++ Timezone test case ++ ''' ++ TEST_TZ = 'UTC' ++ ++ def setUp(self): ++ ''' ++ Setup test ++ :return: ++ ''' ++ timezone.__salt__ = {'file.sed': MagicMock(), ++ 'cmd.run': MagicMock(), ++ 'cmd.retcode': MagicMock(return_value=0)} ++ timezone.__grains__ = {'os': 'unknown'} ++ ++ def tearDown(self): ++ ''' ++ Teardown test ++ :return: ++ ''' ++ timezone.__salt__ = timezone.__grains__ = None ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ def test_get_zone_centos(self): ++ ''' ++ Test CentOS is recognized ++ :return: ++ ''' ++ timezone.__grains__['os'] = 'centos' ++ with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)): ++ assert timezone.get_zone() == self.TEST_TZ ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ def test_get_zone_os_family_rh_suse(self): ++ ''' ++ Test RedHat and Suse are recognized ++ :return: ++ ''' ++ for osfamily in ['RedHat', 'Suse']: ++ timezone.__grains__['os_family'] = [osfamily] ++ with patch('salt.modules.timezone._get_zone_sysconfig', MagicMock(return_value=self.TEST_TZ)): ++ assert timezone.get_zone() == self.TEST_TZ ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ def test_get_zone_os_family_debian_gentoo(self): ++ ''' ++ Test Debian and Gentoo are recognized ++ :return: ++ ''' ++ for osfamily in ['Debian', 'Gentoo']: ++ timezone.__grains__['os_family'] = [osfamily] ++ with patch('salt.modules.timezone._get_zone_etc_timezone', MagicMock(return_value=self.TEST_TZ)): ++ assert timezone.get_zone() == self.TEST_TZ ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ def test_get_zone_os_family_allbsd_nilinuxrt(self): ++ ''' ++ Test *BSD and NILinuxRT are recognized ++ :return: ++ ''' ++ for osfamily in ['FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT']: ++ timezone.__grains__['os_family'] = osfamily ++ with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)): ++ assert timezone.get_zone() == self.TEST_TZ ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ def test_get_zone_os_family_slowlaris(self): ++ ''' ++ Test Slowlaris is recognized ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Solaris'] ++ with patch('salt.modules.timezone._get_zone_solaris', MagicMock(return_value=self.TEST_TZ)): ++ assert timezone.get_zone() == self.TEST_TZ ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ def test_get_zone_os_family_aix(self): ++ ''' ++ Test IBM AIX is recognized ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['AIX'] ++ with patch('salt.modules.timezone._get_zone_aix', MagicMock(return_value=self.TEST_TZ)): ++ assert timezone.get_zone() == self.TEST_TZ ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_set_zone_redhat(self): ++ ''' ++ Test zone set on RH series ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['RedHat'] ++ assert timezone.set_zone(self.TEST_TZ) ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] ++ assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="UTC"') ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_set_zone_suse(self): ++ ''' ++ Test zone set on SUSE series ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Suse'] ++ assert timezone.set_zone(self.TEST_TZ) ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] ++ assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="UTC"') ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_set_zone_gentoo(self): ++ ''' ++ Test zone set on Gentoo series ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Gentoo'] ++ _fopen = MagicMock(return_value=MagicMock(spec=file)) ++ with patch('salt.utils.fopen', _fopen): ++ assert timezone.set_zone(self.TEST_TZ) ++ name, args, kwargs = _fopen.mock_calls[0] ++ assert args == ('/etc/timezone', 'w') ++ name, args, kwargs = _fopen.return_value.__enter__.return_value.write.mock_calls[0] ++ assert args == ('UTC',) ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_set_zone_debian(self): ++ ''' ++ Test zone set on Debian series ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Debian'] ++ _fopen = MagicMock(return_value=MagicMock(spec=file)) ++ with patch('salt.utils.fopen', _fopen): ++ assert timezone.set_zone(self.TEST_TZ) ++ name, args, kwargs = _fopen.mock_calls[0] ++ assert args == ('/etc/timezone', 'w') ++ name, args, kwargs = _fopen.return_value.__enter__.return_value.write.mock_calls[0] ++ assert args == ('UTC',) ++ ++ @patch('salt.utils.which', MagicMock(return_value=True)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_get_hwclock_timedate_utc(self): ++ ''' ++ Test get hwclock UTC/localtime ++ :return: ++ ''' ++ with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz'})): ++ assert timezone.get_hwclock() == 'UTC' ++ with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz:yes'})): ++ assert timezone.get_hwclock() == 'localtime' ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_get_hwclock_suse(self): ++ ''' ++ Test get hwclock on SUSE ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Suse'] ++ timezone.get_hwclock() ++ name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0] ++ assert args == (['tail', '-n', '1', '/etc/adjtime'],) ++ assert kwarg == {'python_shell': False} ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_get_hwclock_redhat(self): ++ ''' ++ Test get hwclock on RedHat ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['RedHat'] ++ timezone.get_hwclock() ++ name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0] ++ assert args == (['tail', '-n', '1', '/etc/adjtime'],) ++ assert kwarg == {'python_shell': False} ++ ++ def _test_get_hwclock_debian(self): # TODO: Enable this when testing environment is working properly ++ ''' ++ Test get hwclock on Debian ++ :return: ++ ''' ++ with patch('salt.utils.which', MagicMock(return_value=False)): ++ with patch('os.path.exists', MagicMock(return_value=True)): ++ with patch('os.unlink', MagicMock()): ++ with patch('os.symlink', MagicMock()): ++ timezone.__grains__['os_family'] = ['Debian'] ++ timezone.get_hwclock() ++ name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0] ++ assert args == (['tail', '-n', '1', '/etc/adjtime'],) ++ assert kwarg == {'python_shell': False} ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_get_hwclock_solaris(self): ++ ''' ++ Test get hwclock on Solaris ++ :return: ++ ''' ++ # Incomplete ++ timezone.__grains__['os_family'] = ['Solaris'] ++ assert timezone.get_hwclock() == 'UTC' ++ _fopen = MagicMock(return_value=MagicMock(spec=file)) ++ with patch('salt.utils.fopen', _fopen): ++ assert timezone.get_hwclock() == 'localtime' ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_get_hwclock_aix(self): ++ ''' ++ Test get hwclock on AIX ++ :return: ++ ''' ++ # Incomplete ++ timezone.__grains__['os_family'] = ['AIX'] ++ assert timezone.get_hwclock() == 'localtime' ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ def test_set_hwclock_aix(self): ++ ''' ++ Test set hwclock on AIX ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['AIX'] ++ with self.assertRaises(SaltInvocationError): ++ assert timezone.set_hwclock('forty two') ++ assert timezone.set_hwclock('UTC') ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ @patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE')) ++ def test_set_hwclock_solaris(self): ++ ''' ++ Test set hwclock on Solaris ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Solaris'] ++ timezone.__grains__['cpuarch'] = 'x86' ++ ++ with self.assertRaises(SaltInvocationError): ++ assert timezone.set_hwclock('forty two') ++ assert timezone.set_hwclock('UTC') ++ name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0] ++ assert args == (['rtc', '-z', 'GMT'],) ++ assert kwargs == {'python_shell': False} ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ @patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE')) ++ def test_set_hwclock_arch(self): ++ ''' ++ Test set hwclock on arch ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Arch'] ++ ++ assert timezone.set_hwclock('UTC') ++ name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0] ++ assert args == (['timezonectl', 'set-local-rtc', 'false'],) ++ assert kwargs == {'python_shell': False} ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ @patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE')) ++ def test_set_hwclock_redhat(self): ++ ''' ++ Test set hwclock on RedHat ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['RedHat'] ++ ++ assert timezone.set_hwclock('UTC') ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] ++ assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="TEST_TIMEZONE"') ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ @patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE')) ++ def test_set_hwclock_suse(self): ++ ''' ++ Test set hwclock on SUSE ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Suse'] ++ ++ assert timezone.set_hwclock('UTC') ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] ++ assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="TEST_TIMEZONE"') ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ @patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE')) ++ def test_set_hwclock_debian(self): ++ ''' ++ Test set hwclock on Debian ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Debian'] ++ ++ assert timezone.set_hwclock('UTC') ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] ++ assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=yes') ++ ++ assert timezone.set_hwclock('localtime') ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1] ++ assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=no') ++ ++ @patch('salt.utils.which', MagicMock(return_value=False)) ++ @patch('os.path.exists', MagicMock(return_value=True)) ++ @patch('os.unlink', MagicMock()) ++ @patch('os.symlink', MagicMock()) ++ @patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE')) ++ def test_set_hwclock_gentoo(self): ++ ''' ++ Test set hwclock on Gentoo ++ :return: ++ ''' ++ timezone.__grains__['os_family'] = ['Gentoo'] ++ ++ with self.assertRaises(SaltInvocationError): ++ timezone.set_hwclock('forty two') ++ ++ timezone.set_hwclock('UTC') ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] ++ assert args == ('/etc/conf.d/hwclock', '^clock=.*', 'clock="UTC"') ++ ++ timezone.set_hwclock('localtime') ++ name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1] ++ assert args == ('/etc/conf.d/hwclock', '^clock=.*', 'clock="local"') +-- +2.13.0 + +