From 471a3ec29da4bb156b041593ad11ca2f09dcb347cf385694ab96e55b644405b6 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Fri, 6 Sep 2019 12:06:25 +0000 Subject: [PATCH] osc copypac from project:systemsmanagement:saltstack:testing package:salt revision:291 OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=150 --- 2019.2.0-pr-54196-backport-173.patch | 224 +++++++++++ _lastrevision | 2 +- fix-aptpkg-systemd-call-bsc-1143301.patch | 40 ++ fix-virt.full_info-176.patch | 374 ++++++++++++++++++ ...k.fqdns-module-function-bsc-1134860-.patch | 326 +++++++++++++++ ...eprecation-warning-to-reduce-log-spa.patch | 57 +++ ...default-behaviour-of-pkg-list-return.patch | 135 +++++++ salt.changes | 58 +++ salt.spec | 43 +- ...rom-repo.uri-when-comparing-repos-in.patch | 28 ++ ...s-needs-to-ignore-inactive-pools-174.patch | 83 ++++ ...os-silence-libvirt-error-message-175.patch | 37 ++ 12 files changed, 1405 insertions(+), 2 deletions(-) create mode 100644 2019.2.0-pr-54196-backport-173.patch create mode 100644 fix-aptpkg-systemd-call-bsc-1143301.patch create mode 100644 fix-virt.full_info-176.patch create mode 100644 implement-network.fqdns-module-function-bsc-1134860-.patch create mode 100644 move-server_id-deprecation-warning-to-reduce-log-spa.patch create mode 100644 restore-default-behaviour-of-pkg-list-return.patch create mode 100644 strip-trailing-from-repo.uri-when-comparing-repos-in.patch create mode 100644 virt.volume_infos-needs-to-ignore-inactive-pools-174.patch create mode 100644 virt.volume_infos-silence-libvirt-error-message-175.patch diff --git a/2019.2.0-pr-54196-backport-173.patch b/2019.2.0-pr-54196-backport-173.patch new file mode 100644 index 0000000..fcbf178 --- /dev/null +++ b/2019.2.0-pr-54196-backport-173.patch @@ -0,0 +1,224 @@ +From 3119bc27584472b0f0d440a37ec4cff2504165f2 Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Tue, 3 Sep 2019 15:16:30 +0200 +Subject: [PATCH] 2019.2.0 PR 54196 backport (#173) + +* virt.network_define doesn't have vport as positional argument + +virt.network_running state calls virt.network_define with vport as a +positional argument resulting in an error at runtime. Fix the state to +use the vport named argument instead. + +* Fix virt.pool_running state documentation + +virt.pool_running needs the source to be a dictionary, which the +documentation was not reflecting. Along the same lines the source hosts +need to be a list, adjust the example to show it. + +* Get virt.pool_running to start the pool after creating it + +Commit 25b96815 is wrong in assuming the pool build also starts it. The +pool needs to be stopped before building it, but we still need to start +it after the build: libvirt won't do it automagically for us. + +* Fix states to match virt.{network,pool}_infos return + +virt.network_infos and virt.pool_infos return the infos as a dictionary +with the network or pool name as a key even when there is only one +value. Adapt the network_running and pool_running states to this. + +* Fix virt.running use of virt.vm_state + +vm_state return a dictionary with the VM name as a key. Fix virt.running +state and its tests to match this. See issue #53107. +--- + salt/states/virt.py | 26 ++++++++++++++++---------- + tests/unit/states/test_virt.py | 27 +++++++++++++++------------ + 2 files changed, 31 insertions(+), 22 deletions(-) + +diff --git a/salt/states/virt.py b/salt/states/virt.py +index d411f864cd..32a9e31ae5 100644 +--- a/salt/states/virt.py ++++ b/salt/states/virt.py +@@ -389,8 +389,8 @@ def running(name, + + try: + try: +- __salt__['virt.vm_state'](name) +- if __salt__['virt.vm_state'](name) != 'running': ++ domain_state = __salt__['virt.vm_state'](name) ++ if domain_state.get(name, None) != 'running': + action_msg = 'started' + if update: + status = __salt__['virt.update'](name, +@@ -670,7 +670,7 @@ def network_running(name, + try: + info = __salt__['virt.network_info'](name, connection=connection, username=username, password=password) + if info: +- if info['active']: ++ if info[name]['active']: + ret['comment'] = 'Network {0} exists and is running'.format(name) + else: + __salt__['virt.network_start'](name, connection=connection, username=username, password=password) +@@ -680,7 +680,7 @@ def network_running(name, + __salt__['virt.network_define'](name, + bridge, + forward, +- vport, ++ vport=vport, + tag=tag, + autostart=autostart, + start=True, +@@ -744,11 +744,11 @@ def pool_running(name, + - owner: 1000 + - group: 100 + - source: +- - dir: samba_share +- - hosts: +- one.example.com +- two.example.com +- - format: cifs ++ dir: samba_share ++ hosts: ++ - one.example.com ++ - two.example.com ++ format: cifs + - autostart: True + + ''' +@@ -761,7 +761,7 @@ def pool_running(name, + try: + info = __salt__['virt.pool_info'](name, connection=connection, username=username, password=password) + if info: +- if info['state'] == 'running': ++ if info[name]['state'] == 'running': + ret['comment'] = 'Pool {0} exists and is running'.format(name) + else: + __salt__['virt.pool_start'](name, connection=connection, username=username, password=password) +@@ -795,6 +795,12 @@ def pool_running(name, + connection=connection, + username=username, + password=password) ++ ++ __salt__['virt.pool_start'](name, ++ connection=connection, ++ username=username, ++ password=password) ++ + ret['changes'][name] = 'Pool defined and started' + ret['comment'] = 'Pool {0} defined and started'.format(name) + except libvirt.libvirtError as err: +diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py +index 8022989937..2904fa224d 100644 +--- a/tests/unit/states/test_virt.py ++++ b/tests/unit/states/test_virt.py +@@ -229,7 +229,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + 'result': True, + 'comment': 'myvm is running'} + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.vm_state': MagicMock(return_value='stopped'), ++ 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}), + 'virt.start': MagicMock(return_value=0), + }): + ret.update({'changes': {'myvm': 'Domain started'}, +@@ -322,7 +322,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + password='supersecret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.vm_state': MagicMock(return_value='stopped'), ++ 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}), + 'virt.start': MagicMock(side_effect=[self.mock_libvirt.libvirtError('libvirt error msg')]) + }): + ret.update({'changes': {}, 'result': False, 'comment': 'libvirt error msg'}) +@@ -330,7 +330,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + + # Working update case when running + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.vm_state': MagicMock(return_value='running'), ++ 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}), + 'virt.update': MagicMock(return_value={'definition': True, 'cpu': True}) + }): + ret.update({'changes': {'myvm': {'definition': True, 'cpu': True}}, +@@ -340,7 +340,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + + # Working update case when stopped + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.vm_state': MagicMock(return_value='stopped'), ++ 'virt.vm_state': MagicMock(return_value={'myvm': 'stopped'}), + 'virt.start': MagicMock(return_value=0), + 'virt.update': MagicMock(return_value={'definition': True}) + }): +@@ -351,7 +351,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + + # Failed live update case + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.vm_state': MagicMock(return_value='running'), ++ 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}), + 'virt.update': MagicMock(return_value={'definition': True, 'cpu': False, 'errors': ['some error']}) + }): + ret.update({'changes': {'myvm': {'definition': True, 'cpu': False, 'errors': ['some error']}}, +@@ -361,7 +361,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + + # Failed definition update case + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.vm_state': MagicMock(return_value='running'), ++ 'virt.vm_state': MagicMock(return_value={'myvm': 'running'}), + 'virt.update': MagicMock(side_effect=[self.mock_libvirt.libvirtError('error message')]) + }): + ret.update({'changes': {}, +@@ -573,7 +573,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + define_mock.assert_called_with('mynet', + 'br2', + 'bridge', +- 'openvswitch', ++ vport='openvswitch', + tag=180, + autostart=False, + start=True, +@@ -582,7 +582,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.network_info': MagicMock(return_value={'active': True}), ++ 'virt.network_info': MagicMock(return_value={'mynet': {'active': True}}), + 'virt.network_define': define_mock, + }): + ret.update({'changes': {}, 'comment': 'Network mynet exists and is running'}) +@@ -590,7 +590,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + + start_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.network_info': MagicMock(return_value={'active': False}), ++ 'virt.network_info': MagicMock(return_value={'mynet': {'active': False}}), + 'virt.network_start': start_mock, + 'virt.network_define': define_mock, + }): +@@ -666,10 +666,13 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + connection='myconnection', + username='user', + password='secret') +- mocks['start'].assert_not_called() ++ mocks['start'].assert_called_with('mypool', ++ connection='myconnection', ++ username='user', ++ password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.pool_info': MagicMock(return_value={'state': 'running'}), ++ 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'running'}}), + }): + ret.update({'changes': {}, 'comment': 'Pool mypool exists and is running'}) + self.assertDictEqual(virt.pool_running('mypool', +@@ -680,7 +683,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + for mock in mocks: + mocks[mock].reset_mock() + with patch.dict(virt.__salt__, { # pylint: disable=no-member +- 'virt.pool_info': MagicMock(return_value={'state': 'stopped'}), ++ 'virt.pool_info': MagicMock(return_value={'mypool': {'state': 'stopped'}}), + 'virt.pool_build': mocks['build'], + 'virt.pool_start': mocks['start'] + }): +-- +2.20.1 + + diff --git a/_lastrevision b/_lastrevision index 1cf74dc..2092378 100644 --- a/_lastrevision +++ b/_lastrevision @@ -1 +1 @@ -3968202dca49a70046366b3807d1e6a2f762ffdf \ No newline at end of file +24e59963d380d183a48f6ddd4d66dbf6a8fa4210 \ No newline at end of file diff --git a/fix-aptpkg-systemd-call-bsc-1143301.patch b/fix-aptpkg-systemd-call-bsc-1143301.patch new file mode 100644 index 0000000..440a054 --- /dev/null +++ b/fix-aptpkg-systemd-call-bsc-1143301.patch @@ -0,0 +1,40 @@ +From f667d6f0534498e2aaa6e46242727bafc13241fd Mon Sep 17 00:00:00 2001 +From: Mihai Dinca +Date: Wed, 31 Jul 2019 15:29:03 +0200 +Subject: [PATCH] Fix aptpkg systemd call (bsc#1143301) + +--- + salt/modules/aptpkg.py | 2 +- + tests/unit/modules/test_aptpkg.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py +index e537f5b007..b7c1a342ef 100644 +--- a/salt/modules/aptpkg.py ++++ b/salt/modules/aptpkg.py +@@ -165,7 +165,7 @@ def _call_apt(args, scope=True, **kwargs): + ''' + cmd = [] + if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True): +- cmd.extend(['systemd-run', '--scope']) ++ cmd.extend(['systemd-run', '--scope', '--description "{0}"'.format(__name__)]) + cmd.extend(args) + + params = {'output_loglevel': 'trace', +diff --git a/tests/unit/modules/test_aptpkg.py b/tests/unit/modules/test_aptpkg.py +index 580b840197..06f3a9f6aa 100644 +--- a/tests/unit/modules/test_aptpkg.py ++++ b/tests/unit/modules/test_aptpkg.py +@@ -544,7 +544,7 @@ class AptUtilsTestCase(TestCase, LoaderModuleMockMixin): + with patch.dict(aptpkg.__salt__, {'cmd.run_all': MagicMock(), 'config.get': MagicMock(return_value=True)}): + aptpkg._call_apt(['apt-get', 'purge', 'vim']) # pylint: disable=W0106 + aptpkg.__salt__['cmd.run_all'].assert_called_once_with( +- ['systemd-run', '--scope', 'apt-get', 'purge', 'vim'], env={}, ++ ['systemd-run', '--scope', '--description "salt.modules.aptpkg"', 'apt-get', 'purge', 'vim'], env={}, + output_loglevel='trace', python_shell=False) + + def test_call_apt_with_kwargs(self): +-- +2.22.0 + + diff --git a/fix-virt.full_info-176.patch b/fix-virt.full_info-176.patch new file mode 100644 index 0000000..1b5a016 --- /dev/null +++ b/fix-virt.full_info-176.patch @@ -0,0 +1,374 @@ +From 4ce0bc544174fdb00482db4653fb4b0ef411e78b Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Tue, 3 Sep 2019 15:18:04 +0200 +Subject: [PATCH] Fix virt.full_info (#176) + +* virt.get_xml doesn't take a domain object + +In some places in the virt module, the get_xml function was called with +a domain object, leading to runtime errors like the following one: + +'ERROR: The VM "" is not present' + +* qemu-img info needs -U flag on running VMs + +When getting VM disks informations on a running VM, the following error +occured: + + The minion function caused an exception: Traceback (most recent call last): + File "/usr/lib/python3.6/site-packages/salt/minion.py", line 1673, in _thread_return + return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) + File "/usr/lib/python3.6/site-packages/salt/executors/direct_call.py", line 12, in execute + return func(*args, **kwargs) + File "/usr/lib/python3.6/site-packages/salt/modules/virt.py", line 2411, in full_info + 'vm_info': vm_info()} + File "/usr/lib/python3.6/site-packages/salt/modules/virt.py", line 2020, in vm_info + info[domain.name()] = _info(domain) + File "/usr/lib/python3.6/site-packages/salt/modules/virt.py", line 2004, in _info + 'disks': _get_disks(dom), + File "/usr/lib/python3.6/site-packages/salt/modules/virt.py", line 465, in _get_disks + output = _parse_qemu_img_info(qemu_output) + File "/usr/lib/python3.6/site-packages/salt/modules/virt.py", line 287, in _parse_qemu_img_info + raw_infos = salt.utils.json.loads(info) + File "/usr/lib/python3.6/site-packages/salt/utils/json.py", line 92, in loads + return json_module.loads(s, **kwargs) + File "/usr/lib64/python3.6/json/__init__.py", line 354, in loads + return _default_decoder.decode(s) + File "/usr/lib64/python3.6/json/decoder.py", line 339, in decode + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + File "/usr/lib64/python3.6/json/decoder.py", line 357, in raw_decode + raise JSONDecodeError("Expecting value", s, err.value) from None + json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0) + +This is due to the fact that qemu-img can't get infos on a disk that is +already used like by a running VM. Using the qemu-img -U flag gets it +running in all cases. +--- + salt/modules/virt.py | 10 +- + tests/unit/modules/test_virt.py | 242 +++++++++++++++++--------------- + 2 files changed, 132 insertions(+), 120 deletions(-) + +diff --git a/salt/modules/virt.py b/salt/modules/virt.py +index 96c17bd60b..d01b6c3f1e 100644 +--- a/salt/modules/virt.py ++++ b/salt/modules/virt.py +@@ -331,7 +331,7 @@ def _get_uuid(dom): + + salt '*' virt.get_uuid + ''' +- return ElementTree.fromstring(get_xml(dom)).find('uuid').text ++ return ElementTree.fromstring(dom.XMLDesc(0)).find('uuid').text + + + def _get_on_poweroff(dom): +@@ -344,7 +344,7 @@ def _get_on_poweroff(dom): + + salt '*' virt.get_on_restart + ''' +- node = ElementTree.fromstring(get_xml(dom)).find('on_poweroff') ++ node = ElementTree.fromstring(dom.XMLDesc(0)).find('on_poweroff') + return node.text if node is not None else '' + + +@@ -358,7 +358,7 @@ def _get_on_reboot(dom): + + salt '*' virt.get_on_reboot + ''' +- node = ElementTree.fromstring(get_xml(dom)).find('on_reboot') ++ node = ElementTree.fromstring(dom.XMLDesc(0)).find('on_reboot') + return node.text if node is not None else '' + + +@@ -372,7 +372,7 @@ def _get_on_crash(dom): + + salt '*' virt.get_on_crash + ''' +- node = ElementTree.fromstring(get_xml(dom)).find('on_crash') ++ node = ElementTree.fromstring(dom.XMLDesc(0)).find('on_crash') + return node.text if node is not None else '' + + +@@ -458,7 +458,7 @@ def _get_disks(dom): + if driver is not None and driver.get('type') == 'qcow2': + try: + stdout = subprocess.Popen( +- ['qemu-img', 'info', '--output', 'json', '--backing-chain', disk['file']], ++ ['qemu-img', 'info', '-U', '--output', 'json', '--backing-chain', disk['file']], + shell=False, + stdout=subprocess.PIPE).communicate()[0] + qemu_output = salt.utils.stringutils.to_str(stdout) +diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py +index e644e62452..4d20e998d8 100644 +--- a/tests/unit/modules/test_virt.py ++++ b/tests/unit/modules/test_virt.py +@@ -81,7 +81,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + mock_domain.XMLDesc.return_value = xml # pylint: disable=no-member + + # Return state as shutdown +- mock_domain.info.return_value = [4, 0, 0, 0] # pylint: disable=no-member ++ mock_domain.info.return_value = [4, 2048 * 1024, 1024 * 1024, 2, 1234] # pylint: disable=no-member ++ mock_domain.ID.return_value = 1 ++ mock_domain.name.return_value = name + return mock_domain + + def test_disk_profile_merge(self): +@@ -1394,49 +1396,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', + interface_attrs['mac'], re.I)) + +- def test_get_graphics(self): +- ''' +- Test virt.get_graphics() +- ''' +- xml = ''' +- test-vm +- +- +- +- +- +- +- ''' +- self.set_mock_vm("test-vm", xml) +- +- graphics = virt.get_graphics('test-vm') +- self.assertEqual('vnc', graphics['type']) +- self.assertEqual('5900', graphics['port']) +- self.assertEqual('0.0.0.0', graphics['listen']) +- +- def test_get_nics(self): +- ''' +- Test virt.get_nics() +- ''' +- xml = ''' +- test-vm +- +- +- +- +- +-
+- +- +- +- ''' +- self.set_mock_vm("test-vm", xml) +- +- nics = virt.get_nics('test-vm') +- nic = nics[list(nics)[0]] +- self.assertEqual('bridge', nic['type']) +- self.assertEqual('ac:de:48:b6:8b:59', nic['mac']) +- + def test_parse_qemu_img_info(self): + ''' + Make sure that qemu-img info output is properly parsed +@@ -1558,77 +1517,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + ], + }, virt._parse_qemu_img_info(qemu_infos)) + +- def test_get_disks(self): +- ''' +- Test virt.get_disks() +- ''' +- xml = ''' +- test-vm +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- ''' +- self.set_mock_vm("test-vm", xml) +- +- qemu_infos = '''[{ +- "virtual-size": 25769803776, +- "filename": "/disks/test.qcow2", +- "cluster-size": 65536, +- "format": "qcow2", +- "actual-size": 217088, +- "format-specific": { +- "type": "qcow2", +- "data": { +- "compat": "1.1", +- "lazy-refcounts": false, +- "refcount-bits": 16, +- "corrupt": false +- } +- }, +- "full-backing-filename": "/disks/mybacking.qcow2", +- "backing-filename": "mybacking.qcow2", +- "dirty-flag": false +- }, +- { +- "virtual-size": 25769803776, +- "filename": "/disks/mybacking.qcow2", +- "cluster-size": 65536, +- "format": "qcow2", +- "actual-size": 393744384, +- "format-specific": { +- "type": "qcow2", +- "data": { +- "compat": "1.1", +- "lazy-refcounts": false, +- "refcount-bits": 16, +- "corrupt": false +- } +- }, +- "dirty-flag": false +- }]''' +- +- self.mock_popen.communicate.return_value = [qemu_infos] # pylint: disable=no-member +- disks = virt.get_disks('test-vm') +- disk = disks.get('vda') +- self.assertEqual('/disks/test.qcow2', disk['file']) +- self.assertEqual('disk', disk['type']) +- self.assertEqual('/disks/mybacking.qcow2', disk['backing file']['file']) +- cdrom = disks.get('hda') +- self.assertEqual('/disks/test-cdrom.iso', cdrom['file']) +- self.assertEqual('cdrom', cdrom['type']) +- self.assertFalse('backing file' in cdrom.keys()) +- + @patch('salt.modules.virt.stop', return_value=True) + @patch('salt.modules.virt.undefine') + @patch('os.remove') +@@ -2994,3 +2882,127 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + virt.volume_delete('default', 'missing') + virt.volume_delete('missing', 'test_volume') + self.assertEqual(mock_delete.call_count, 2) ++ ++ def test_full_info(self): ++ ''' ++ Test virt.full_info ++ ''' ++ xml = ''' ++ 28deee33-4859-4f23-891c-ee239cffec94 ++ test-vm ++ destroy ++ restart ++ destroy ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ ++ ++ ++ ''' ++ self.set_mock_vm("test-vm", xml) ++ ++ qemu_infos = '''[{ ++ "virtual-size": 25769803776, ++ "filename": "/disks/test.qcow2", ++ "cluster-size": 65536, ++ "format": "qcow2", ++ "actual-size": 217088, ++ "format-specific": { ++ "type": "qcow2", ++ "data": { ++ "compat": "1.1", ++ "lazy-refcounts": false, ++ "refcount-bits": 16, ++ "corrupt": false ++ } ++ }, ++ "full-backing-filename": "/disks/mybacking.qcow2", ++ "backing-filename": "mybacking.qcow2", ++ "dirty-flag": false ++ }, ++ { ++ "virtual-size": 25769803776, ++ "filename": "/disks/mybacking.qcow2", ++ "cluster-size": 65536, ++ "format": "qcow2", ++ "actual-size": 393744384, ++ "format-specific": { ++ "type": "qcow2", ++ "data": { ++ "compat": "1.1", ++ "lazy-refcounts": false, ++ "refcount-bits": 16, ++ "corrupt": false ++ } ++ }, ++ "dirty-flag": false ++ }]''' ++ ++ self.mock_popen.communicate.return_value = [qemu_infos] # pylint: disable=no-member ++ ++ self.mock_conn.getInfo = MagicMock(return_value=['x86_64', 4096, 8, 2712, 1, 2, 4, 2]) ++ ++ actual = virt.full_info() ++ ++ # Test the hypervisor infos ++ self.assertEqual(2816, actual['freemem']) ++ self.assertEqual(6, actual['freecpu']) ++ self.assertEqual(4, actual['node_info']['cpucores']) ++ self.assertEqual(2712, actual['node_info']['cpumhz']) ++ self.assertEqual('x86_64', actual['node_info']['cpumodel']) ++ self.assertEqual(8, actual['node_info']['cpus']) ++ self.assertEqual(2, actual['node_info']['cputhreads']) ++ self.assertEqual(1, actual['node_info']['numanodes']) ++ self.assertEqual(4096, actual['node_info']['phymemory']) ++ self.assertEqual(2, actual['node_info']['sockets']) ++ ++ # Test the vm_info output: ++ self.assertEqual(2, actual['vm_info']['test-vm']['cpu']) ++ self.assertEqual(1234, actual['vm_info']['test-vm']['cputime']) ++ self.assertEqual(1024 * 1024, actual['vm_info']['test-vm']['mem']) ++ self.assertEqual(2048 * 1024, actual['vm_info']['test-vm']['maxMem']) ++ self.assertEqual('shutdown', actual['vm_info']['test-vm']['state']) ++ self.assertEqual('28deee33-4859-4f23-891c-ee239cffec94', actual['vm_info']['test-vm']['uuid']) ++ self.assertEqual('destroy', actual['vm_info']['test-vm']['on_crash']) ++ self.assertEqual('restart', actual['vm_info']['test-vm']['on_reboot']) ++ self.assertEqual('destroy', actual['vm_info']['test-vm']['on_poweroff']) ++ ++ # Test the nics ++ nic = actual['vm_info']['test-vm']['nics']['ac:de:48:b6:8b:59'] ++ self.assertEqual('bridge', nic['type']) ++ self.assertEqual('ac:de:48:b6:8b:59', nic['mac']) ++ ++ # Test the disks ++ disks = actual['vm_info']['test-vm']['disks'] ++ disk = disks.get('vda') ++ self.assertEqual('/disks/test.qcow2', disk['file']) ++ self.assertEqual('disk', disk['type']) ++ self.assertEqual('/disks/mybacking.qcow2', disk['backing file']['file']) ++ cdrom = disks.get('hda') ++ self.assertEqual('/disks/test-cdrom.iso', cdrom['file']) ++ self.assertEqual('cdrom', cdrom['type']) ++ self.assertFalse('backing file' in cdrom.keys()) ++ ++ # Test the graphics ++ graphics = actual['vm_info']['test-vm']['graphics'] ++ self.assertEqual('vnc', graphics['type']) ++ self.assertEqual('5900', graphics['port']) ++ self.assertEqual('0.0.0.0', graphics['listen']) +-- +2.20.1 + + diff --git a/implement-network.fqdns-module-function-bsc-1134860-.patch b/implement-network.fqdns-module-function-bsc-1134860-.patch new file mode 100644 index 0000000..96fe36d --- /dev/null +++ b/implement-network.fqdns-module-function-bsc-1134860-.patch @@ -0,0 +1,326 @@ +From 76d0ec5ec0764f6c5e71ddc2dc03bd12c25045a0 Mon Sep 17 00:00:00 2001 +From: EricS <54029547+ESiebigteroth@users.noreply.github.com> +Date: Tue, 3 Sep 2019 11:22:53 +0200 +Subject: [PATCH] Implement network.fqdns module function (bsc#1134860) + (#172) + +* Duplicate fqdns logic in module.network +* Move _get_interfaces to utils.network +* Reuse network.fqdns in grains.core.fqdns +* Return empty list when fqdns grains is disabled + + +Co-authored-by: Eric Siebigteroth +--- + salt/grains/core.py | 66 +++++----------------------------- + salt/modules/network.py | 60 +++++++++++++++++++++++++++++++ + salt/utils/network.py | 12 +++++++ + tests/unit/grains/test_core.py | 64 ++++++++++++++++++++++++++------- + 4 files changed, 131 insertions(+), 71 deletions(-) + +diff --git a/salt/grains/core.py b/salt/grains/core.py +index e54212edfb..fa188a6ff7 100644 +--- a/salt/grains/core.py ++++ b/salt/grains/core.py +@@ -25,8 +25,9 @@ import zlib + from errno import EACCES, EPERM + import datetime + import warnings ++import salt.modules.network + +-from multiprocessing.pool import ThreadPool ++from salt.utils.network import _get_interfaces + + # pylint: disable=import-error + try: +@@ -83,6 +84,7 @@ __salt__ = { + 'cmd.run_all': salt.modules.cmdmod._run_all_quiet, + 'smbios.records': salt.modules.smbios.records, + 'smbios.get': salt.modules.smbios.get, ++ 'network.fqdns': salt.modules.network.fqdns, + } + log = logging.getLogger(__name__) + +@@ -106,7 +108,6 @@ HAS_UNAME = True + if not hasattr(os, 'uname'): + HAS_UNAME = False + +-_INTERFACES = {} + + # Possible value for h_errno defined in netdb.h + HOST_NOT_FOUND = 1 +@@ -1506,17 +1507,6 @@ def _linux_bin_exists(binary): + return False + + +-def _get_interfaces(): +- ''' +- Provide a dict of the connected interfaces and their ip addresses +- ''' +- +- global _INTERFACES +- if not _INTERFACES: +- _INTERFACES = salt.utils.network.interfaces() +- return _INTERFACES +- +- + def _parse_lsb_release(): + ret = {} + try: +@@ -2200,52 +2190,12 @@ def fqdns(): + ''' + Return all known FQDNs for the system by enumerating all interfaces and + then trying to reverse resolve them (excluding 'lo' interface). ++ To disable the fqdns grain, set enable_fqdns_grains: False in the minion configuration file. + ''' +- # Provides: +- # fqdns +- +- grains = {} +- fqdns = set() +- +- def _lookup_fqdn(ip): +- try: +- name, aliaslist, addresslist = socket.gethostbyaddr(ip) +- return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] +- except socket.herror as err: +- if err.errno in (0, HOST_NOT_FOUND, NO_DATA): +- # No FQDN for this IP address, so we don't need to know this all the time. +- log.debug("Unable to resolve address %s: %s", ip, err) +- else: +- log.error(err_message, err) +- except (socket.error, socket.gaierror, socket.timeout) as err: +- log.error(err_message, err) +- +- start = time.time() +- +- addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces()) +- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces())) +- err_message = 'Exception during resolving address: %s' +- +- # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel. +- # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing +- # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. +- +- try: +- pool = ThreadPool(8) +- results = pool.map(_lookup_fqdn, addresses) +- pool.close() +- pool.join() +- except Exception as exc: +- log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) +- +- for item in results: +- if item: +- fqdns.update(item) +- +- elapsed = time.time() - start +- log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed)) +- +- return {"fqdns": sorted(list(fqdns))} ++ opt = {"fqdns": []} ++ if __opts__.get('enable_fqdns_grains', True) == True: ++ opt = __salt__['network.fqdns']() ++ return opt + + + def ip_fqdn(): +diff --git a/salt/modules/network.py b/salt/modules/network.py +index 28bcff1622..5b6ac930ea 100644 +--- a/salt/modules/network.py ++++ b/salt/modules/network.py +@@ -11,6 +11,10 @@ import logging + import re + import os + import socket ++import time ++ ++from multiprocessing.pool import ThreadPool ++ + + # Import salt libs + import salt.utils.decorators.path +@@ -1881,3 +1885,59 @@ def iphexval(ip): + a = ip.split('.') + hexval = ['%02X' % int(x) for x in a] # pylint: disable=E1321 + return ''.join(hexval) ++ ++ ++def fqdns(): ++ ''' ++ Return all known FQDNs for the system by enumerating all interfaces and ++ then trying to reverse resolve them (excluding 'lo' interface). ++ ''' ++ # Provides: ++ # fqdns ++ ++ # Possible value for h_errno defined in netdb.h ++ HOST_NOT_FOUND = 1 ++ NO_DATA = 4 ++ ++ grains = {} ++ fqdns = set() ++ ++ def _lookup_fqdn(ip): ++ try: ++ name, aliaslist, addresslist = socket.gethostbyaddr(ip) ++ return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)] ++ except socket.herror as err: ++ if err.errno in (0, HOST_NOT_FOUND, NO_DATA): ++ # No FQDN for this IP address, so we don't need to know this all the time. ++ log.debug("Unable to resolve address %s: %s", ip, err) ++ else: ++ log.error(err_message, err) ++ except (socket.error, socket.gaierror, socket.timeout) as err: ++ log.error(err_message, err) ++ ++ start = time.time() ++ ++ addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=salt.utils.network._get_interfaces()) ++ addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=salt.utils.network._get_interfaces())) ++ err_message = 'Exception during resolving address: %s' ++ ++ # Create a ThreadPool to process the underlying calls to 'socket.gethostbyaddr' in parallel. ++ # This avoid blocking the execution when the "fqdn" is not defined for certains IP addresses, which was causing ++ # that "socket.timeout" was reached multiple times secuencially, blocking execution for several seconds. ++ ++ try: ++ pool = ThreadPool(8) ++ results = pool.map(_lookup_fqdn, addresses) ++ pool.close() ++ pool.join() ++ except Exception as exc: ++ log.error("Exception while creating a ThreadPool for resolving FQDNs: %s", exc) ++ ++ for item in results: ++ if item: ++ fqdns.update(item) ++ ++ elapsed = time.time() - start ++ log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed)) ++ ++ return {"fqdns": sorted(list(fqdns))} +\ No newline at end of file +diff --git a/salt/utils/network.py b/salt/utils/network.py +index 3f0522b9a5..942adf1ca4 100644 +--- a/salt/utils/network.py ++++ b/salt/utils/network.py +@@ -55,6 +55,18 @@ except (ImportError, OSError, AttributeError, TypeError): + # pylint: disable=C0103 + + ++_INTERFACES = {} ++def _get_interfaces(): #! function ++ ''' ++ Provide a dict of the connected interfaces and their ip addresses ++ ''' ++ ++ global _INTERFACES ++ if not _INTERFACES: ++ _INTERFACES = interfaces() ++ return _INTERFACES ++ ++ + def sanitize_host(host): + ''' + Sanitize host string. +diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py +index 5fa0ea06f1..889fb90074 100644 +--- a/tests/unit/grains/test_core.py ++++ b/tests/unit/grains/test_core.py +@@ -33,6 +33,7 @@ import salt.utils.network + import salt.utils.platform + import salt.utils.path + import salt.grains.core as core ++import salt.modules.network + + # Import 3rd-party libs + from salt.ext import six +@@ -845,6 +846,40 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + with patch.object(salt.utils.dns, 'parse_resolv', MagicMock(return_value=resolv_mock)): + assert core.dns() == ret + ++ ++ def test_enablefqdnsFalse(self): ++ ''' ++ tests enable_fqdns_grains is set to False ++ ''' ++ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':False}): ++ assert core.fqdns() == {"fqdns": []} ++ ++ ++ def test_enablefqdnsTrue(self): ++ ''' ++ testing that grains uses network.fqdns module ++ ''' ++ with patch.dict('salt.grains.core.__salt__', {'network.fqdns': MagicMock(return_value="my.fake.domain")}): ++ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':True}): ++ assert core.fqdns() == 'my.fake.domain' ++ ++ ++ def test_enablefqdnsNone(self): ++ ''' ++ testing default fqdns grains is returned when enable_fqdns_grains is None ++ ''' ++ with patch.dict('salt.grains.core.__opts__', {'enable_fqdns_grains':None}): ++ assert core.fqdns() == {"fqdns": []} ++ ++ ++ def test_enablefqdnswithoutpaching(self): ++ ''' ++ testing fqdns grains is enabled by default ++ ''' ++ with patch.dict('salt.grains.core.__salt__', {'network.fqdns': MagicMock(return_value="my.fake.domain")}): ++ assert core.fqdns() == 'my.fake.domain' ++ ++ + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') + @patch.object(salt.utils, 'is_windows', MagicMock(return_value=False)) + @patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8'])) +@@ -861,11 +896,12 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + ('foo.bar.baz', [], ['fe80::a8b2:93ff:fe00:0']), + ('bluesniff.foo.bar', [], ['fe80::a8b2:93ff:dead:beef'])] + ret = {'fqdns': ['bluesniff.foo.bar', 'foo.bar.baz', 'rinzler.evil-corp.com']} +- with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): +- fqdns = core.fqdns() +- assert "fqdns" in fqdns +- assert len(fqdns['fqdns']) == len(ret['fqdns']) +- assert set(fqdns['fqdns']) == set(ret['fqdns']) ++ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}): ++ with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): ++ fqdns = core.fqdns() ++ assert "fqdns" in fqdns ++ assert len(fqdns['fqdns']) == len(ret['fqdns']) ++ assert set(fqdns['fqdns']) == set(ret['fqdns']) + + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') + @patch.object(salt.utils.platform, 'is_windows', MagicMock(return_value=False)) +@@ -881,14 +917,16 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + ('rinzler.evil-corp.com', ["false-hostname", "badaliass"], ['5.6.7.8']), + ('foo.bar.baz', [], ['fe80::a8b2:93ff:fe00:0']), + ('bluesniff.foo.bar', ["alias.bluesniff.foo.bar"], ['fe80::a8b2:93ff:dead:beef'])] +- with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): +- fqdns = core.fqdns() +- assert "fqdns" in fqdns +- for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: +- assert alias in fqdns["fqdns"] +- +- for alias in ["throwmeaway", "false-hostname", "badaliass"]: +- assert alias not in fqdns["fqdns"] ++ with patch.dict(core.__salt__, {'network.fqdns': salt.modules.network.fqdns}): ++ with patch.object(socket, 'gethostbyaddr', side_effect=reverse_resolv_mock): ++ fqdns = core.fqdns() ++ assert "fqdns" in fqdns ++ for alias in ["this.is.valid.alias", "alias.bluesniff.foo.bar"]: ++ assert alias in fqdns["fqdns"] ++ ++ for alias in ["throwmeaway", "false-hostname", "badaliass"]: ++ assert alias not in fqdns["fqdns"] ++ + def test_core_virtual(self): + ''' + test virtual grain with cmd virt-what +-- +2.22.0 + + diff --git a/move-server_id-deprecation-warning-to-reduce-log-spa.patch b/move-server_id-deprecation-warning-to-reduce-log-spa.patch new file mode 100644 index 0000000..f2ed7ec --- /dev/null +++ b/move-server_id-deprecation-warning-to-reduce-log-spa.patch @@ -0,0 +1,57 @@ +From dab9967f8e4a67e5b7ddd4e6718414d2e9b25e42 Mon Sep 17 00:00:00 2001 +From: Mihai Dinca +Date: Fri, 14 Jun 2019 15:13:12 +0200 +Subject: [PATCH] Move server_id deprecation warning to reduce log + spamming (bsc#1135567) (bsc#1135732) + +--- + salt/grains/core.py | 4 ---- + salt/minion.py | 9 +++++++++ + 2 files changed, 9 insertions(+), 4 deletions(-) + +diff --git a/salt/grains/core.py b/salt/grains/core.py +index ce64620a24..e54212edfb 100644 +--- a/salt/grains/core.py ++++ b/salt/grains/core.py +@@ -2812,10 +2812,6 @@ def get_server_id(): + if bool(use_crc): + id_hash = getattr(zlib, use_crc, zlib.adler32)(__opts__.get('id', '').encode()) & 0xffffffff + else: +- salt.utils.versions.warn_until('Sodium', 'This server_id is computed nor by Adler32 neither by CRC32. ' +- 'Please use "server_id_use_crc" option and define algorithm you' +- 'prefer (default "Adler32"). The server_id will be computed with' +- 'Adler32 by default.') + id_hash = _get_hash_by_shell() + server_id = {'server_id': id_hash} + +diff --git a/salt/minion.py b/salt/minion.py +index 058b7ef6b8..97f74bf47e 100644 +--- a/salt/minion.py ++++ b/salt/minion.py +@@ -103,6 +103,7 @@ from salt.utils.odict import OrderedDict + from salt.utils.process import (default_signals, + SignalHandlingMultiprocessingProcess, + ProcessManager) ++from salt.utils.versions import warn_until + from salt.exceptions import ( + CommandExecutionError, + CommandNotFoundError, +@@ -992,6 +993,14 @@ class MinionManager(MinionBase): + if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): + masters = [masters] + ++ if not self.opts.get('server_id_use_crc'): ++ warn_until( ++ 'Sodium', ++ 'This server_id is computed nor by Adler32 neither by CRC32. ' ++ 'Please use "server_id_use_crc" option and define algorithm you' ++ 'prefer (default "Adler32"). The server_id will be computed with' ++ 'Adler32 by default.') ++ + for master in masters: + s_opts = copy.deepcopy(self.opts) + s_opts['master'] = master +-- +2.22.0 + + diff --git a/restore-default-behaviour-of-pkg-list-return.patch b/restore-default-behaviour-of-pkg-list-return.patch new file mode 100644 index 0000000..394479a --- /dev/null +++ b/restore-default-behaviour-of-pkg-list-return.patch @@ -0,0 +1,135 @@ +From 56fd68474f399a36b0a74ca9a01890649d997792 Mon Sep 17 00:00:00 2001 +From: Jochen Breuer +Date: Fri, 30 Aug 2019 14:20:06 +0200 +Subject: [PATCH] Restore default behaviour of pkg list return + +The default behaviour for pkg list return was to not include patches, +even when installing patches. Only the packages where returned. There +is now parameter to also return patches if that is needed. + +Co-authored-by: Mihai Dinca +--- + salt/modules/zypperpkg.py | 32 +++++++++++++++++++++++--------- + 1 file changed, 23 insertions(+), 9 deletions(-) + +diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py +index f71d6aac9e..da1953b2a5 100644 +--- a/salt/modules/zypperpkg.py ++++ b/salt/modules/zypperpkg.py +@@ -1302,8 +1302,10 @@ def refresh_db(root=None): + return ret + + +-def _find_types(pkgs): ++def _detect_includes(pkgs, inclusion_detection): + '''Form a package names list, find prefixes of packages types.''' ++ if not inclusion_detection: ++ return None + return sorted({pkg.split(':', 1)[0] for pkg in pkgs + if len(pkg.split(':', 1)) == 2}) + +@@ -1319,6 +1321,7 @@ def install(name=None, + ignore_repo_failure=False, + no_recommends=False, + root=None, ++ inclusion_detection=False, + **kwargs): + ''' + .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 +@@ -1433,6 +1436,9 @@ def install(name=None, + + .. versionadded:: 2018.3.0 + ++ inclusion_detection: ++ Detect ``includes`` based on ``sources`` ++ By default packages are always included + + Returns a dict containing the new package names and versions:: + +@@ -1498,7 +1504,8 @@ def install(name=None, + + diff_attr = kwargs.get("diff_attr") + +- includes = _find_types(targets) ++ includes = _detect_includes(targets, inclusion_detection) ++ + old = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root) + + downgrades = [] +@@ -1688,7 +1695,7 @@ def upgrade(refresh=True, + return ret + + +-def _uninstall(name=None, pkgs=None, root=None): ++def _uninstall(inclusion_detection, name=None, pkgs=None, root=None): + ''' + Remove and purge do identical things but with different Zypper commands, + this function performs the common logic. +@@ -1698,7 +1705,7 @@ def _uninstall(name=None, pkgs=None, root=None): + except MinionError as exc: + raise CommandExecutionError(exc) + +- includes = _find_types(pkg_params.keys()) ++ includes = _detect_includes(pkg_params.keys(), inclusion_detection) + old = list_pkgs(root=root, includes=includes) + targets = [] + for target in pkg_params: +@@ -1757,7 +1764,7 @@ def normalize_name(name): + return name + + +-def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument ++def remove(name=None, pkgs=None, root=None, inclusion_detection=False, **kwargs): # pylint: disable=unused-argument + ''' + .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 + On minions running systemd>=205, `systemd-run(1)`_ is now used to +@@ -1788,8 +1795,11 @@ def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused + root + Operate on a different root directory. + +- .. versionadded:: 0.16.0 ++ inclusion_detection: ++ Detect ``includes`` based on ``pkgs`` ++ By default packages are always included + ++ .. versionadded:: 0.16.0 + + Returns a dict containing the changes. + +@@ -1801,10 +1811,10 @@ def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused + salt '*' pkg.remove ,, + salt '*' pkg.remove pkgs='["foo", "bar"]' + ''' +- return _uninstall(name=name, pkgs=pkgs, root=root) ++ return _uninstall(inclusion_detection, name=name, pkgs=pkgs, root=root) + + +-def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument ++def purge(name=None, pkgs=None, root=None, inclusion_detection=False, **kwargs): # pylint: disable=unused-argument + ''' + .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 + On minions running systemd>=205, `systemd-run(1)`_ is now used to +@@ -1836,6 +1846,10 @@ def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused- + root + Operate on a different root directory. + ++ inclusion_detection: ++ Detect ``includes`` based on ``pkgs`` ++ By default packages are always included ++ + .. versionadded:: 0.16.0 + + +@@ -1849,7 +1863,7 @@ def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused- + salt '*' pkg.purge ,, + salt '*' pkg.purge pkgs='["foo", "bar"]' + ''' +- return _uninstall(name=name, pkgs=pkgs, root=root) ++ return _uninstall(inclusion_detection, name=name, pkgs=pkgs, root=root) + + + def list_locks(root=None): +-- +2.20.1 + + diff --git a/salt.changes b/salt.changes index c9a3232..862d38f 100644 --- a/salt.changes +++ b/salt.changes @@ -1,3 +1,61 @@ +------------------------------------------------------------------- +Thu Sep 5 17:45:50 UTC 2019 - Jochen Breuer + +- Require shadow instead of old pwdutils (bsc#1130588) + +------------------------------------------------------------------- +Wed Sep 4 18:45:56 UTC 2019 - Jochen Breuer + +- Conflict with tornado >= 5; for now we can only cope with Tornado 4.x (boo#1101780). + +------------------------------------------------------------------- +Tue Sep 3 15:16:15 UTC 2019 - Mihai Dincă + +- Fix virt.full_info (bsc#1146382) +- virt.volume_infos: silence libvirt error message +- virt.volume_infos needs to ignore inactive pools +- Fix for various bugs in virt network and pool states +- Implement network.fqdns module function (bsc#1134860) + +- Added: + * 2019.2.0-pr-54196-backport-173.patch + * virt.volume_infos-silence-libvirt-error-message-175.patch + * fix-virt.full_info-176.patch + * implement-network.fqdns-module-function-bsc-1134860-.patch + * virt.volume_infos-needs-to-ignore-inactive-pools-174.patch + +------------------------------------------------------------------- +Fri Aug 30 13:36:05 UTC 2019 - Jochen Breuer + +- Restore default behaviour of pkg list return (bsc#1148714) +- Strip trailing "/" from repo.uri when comparing repos in apktpkg.mod_repo (bsc#1146192) + +- Added: + * strip-trailing-from-repo.uri-when-comparing-repos-in.patch + * restore-default-behaviour-of-pkg-list-return.patch + +------------------------------------------------------------------- +Tue Aug 13 10:43:21 UTC 2019 - Pablo Suárez Hernández + +- Use python3 to build package Salt for RHEL8 +- Make python3 default for RHEL8 + +------------------------------------------------------------------- +Fri Aug 9 09:45:31 UTC 2019 - Mihai Dincă + +- Fix aptpkg systemd call (bsc#1143301) + +- Added: + * fix-aptpkg-systemd-call-bsc-1143301.patch + +------------------------------------------------------------------- +Tue Jul 30 14:56:02 UTC 2019 - Mihai Dincă + +- Move server_id deprecation warning to reduce log spamming (bsc#1135567) (bsc#1135732) + +- Added: + * move-server_id-deprecation-warning-to-reduce-log-spa.patch + ------------------------------------------------------------------- Tue Jul 30 09:34:27 UTC 2019 - Pablo Suárez Hernández diff --git a/salt.spec b/salt.spec index 6aa0a9d..ca2afe5 100644 --- a/salt.spec +++ b/salt.spec @@ -26,8 +26,13 @@ %global build_py3 1 %global build_py2 1 %else +%if 0%{?rhel} == 7 # RES7 %global build_py2 1 +%else +%global build_py3 1 +%global default_py3 1 +%endif %endif %endif %define pythonX %{?default_py3: python3}%{!?default_py3: python2} @@ -215,6 +220,25 @@ Patch72: avoid-traceback-when-http.query-request-cannot-be-pe.patch # https://github.com/saltstack/salt/pull/54022 # https://github.com/saltstack/salt/pull/54024 Patch73: accumulated-changes-required-for-yomi-165.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/159 +Patch74: move-server_id-deprecation-warning-to-reduce-log-spa.patch +# PATCH_FIX_UPSTREAM: https://github.com/saltstack/salt/pull/54077 +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/166 +Patch75: fix-aptpkg-systemd-call-bsc-1143301.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/170 +Patch76: strip-trailing-from-repo.uri-when-comparing-repos-in.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/177 +Patch77: restore-default-behaviour-of-pkg-list-return.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/172 +Patch78: implement-network.fqdns-module-function-bsc-1134860-.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/173 +Patch79: 2019.2.0-pr-54196-backport-173.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/174 +Patch80: virt.volume_infos-needs-to-ignore-inactive-pools-174.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/175 +Patch81: virt.volume_infos-silence-libvirt-error-message-175.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/176 +Patch82: fix-virt.full_info-176.patch # BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildRoot: %{_tmppath}/%{name}-%{version}-build @@ -230,7 +254,7 @@ Requires(pre): %{_sbindir}/useradd %if 0%{?suse_version} Requires(pre): %fillup_prereq -Requires(pre): pwdutils +Requires(pre): shadow %endif %if 0%{?suse_version} @@ -313,6 +337,8 @@ BuildRequires: python-msgpack-python > 0.3 BuildRequires: python-psutil BuildRequires: python-requests >= 1.0.0 BuildRequires: python-tornado >= 4.2.1 +# We can't cope with tornado 5.x and newer (boo#1101780) +BuildConflicts: python3-tornado >= 5 # requirements/zeromq.txt BuildRequires: python-pycrypto >= 2.6.1 @@ -355,6 +381,8 @@ Requires: python-msgpack-python > 0.3 Requires: python-psutil Requires: python-requests >= 1.0.0 Requires: python-tornado >= 4.2.1 +# We can't cope with tornado 5.x and newer (boo#1101780) +Conflicts: python3-tornado >= 5 %if 0%{?suse_version} # required for zypper.py Requires: rpm-python @@ -401,6 +429,8 @@ BuildRequires: python3-msgpack-python > 0.3 BuildRequires: python3-psutil BuildRequires: python3-requests >= 1.0.0 BuildRequires: python3-tornado >= 4.2.1 +# We can't cope with tornado 5.x and newer (boo#1101780) +BuildConflicts: python3-tornado >= 5 # requirements/zeromq.txt BuildRequires: python3-pycrypto >= 2.6.1 @@ -439,6 +469,8 @@ Requires: python3-msgpack-python > 0.3 Requires: python3-psutil Requires: python3-requests >= 1.0.0 Requires: python3-tornado >= 4.2.1 +# We can't cope with tornado 5.x and newer (boo#1101780) +Conflicts: python3-tornado >= 5 %if 0%{?suse_version} # required for zypper.py Requires: python3-rpm @@ -758,6 +790,15 @@ cp %{S:5} ./.travis.yml %patch71 -p1 %patch72 -p1 %patch73 -p1 +%patch74 -p1 +%patch75 -p1 +%patch76 -p1 +%patch77 -p1 +%patch78 -p1 +%patch79 -p1 +%patch80 -p1 +%patch81 -p1 +%patch82 -p1 %build %if 0%{?build_py2} diff --git a/strip-trailing-from-repo.uri-when-comparing-repos-in.patch b/strip-trailing-from-repo.uri-when-comparing-repos-in.patch new file mode 100644 index 0000000..301642e --- /dev/null +++ b/strip-trailing-from-repo.uri-when-comparing-repos-in.patch @@ -0,0 +1,28 @@ +From 1b6f3f2e8b88ddfaebd5bfd1ae8258d417a9f098 Mon Sep 17 00:00:00 2001 +From: Matei Albu +Date: Fri, 15 Feb 2019 14:34:13 +0100 +Subject: [PATCH] Strip trailing "/" from repo.uri when comparing repos + in apktpkg.mod_repo (bsc#1146192) + +(cherry picked from commit af85627) +--- + salt/modules/aptpkg.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py +index b7c1a342ef..d49a48310e 100644 +--- a/salt/modules/aptpkg.py ++++ b/salt/modules/aptpkg.py +@@ -2354,7 +2354,7 @@ def mod_repo(repo, saltenv='base', **kwargs): + # and the resulting source line. The idea here is to ensure + # we are not returning bogus data because the source line + # has already been modified on a previous run. +- repo_matches = source.type == repo_type and source.uri == repo_uri and source.dist == repo_dist ++ repo_matches = source.type == repo_type and source.uri.rstrip('/') == repo_uri.rstrip('/') and source.dist == repo_dist + kw_matches = source.dist == kw_dist and source.type == kw_type + + if repo_matches or kw_matches: +-- +2.20.1 + + diff --git a/virt.volume_infos-needs-to-ignore-inactive-pools-174.patch b/virt.volume_infos-needs-to-ignore-inactive-pools-174.patch new file mode 100644 index 0000000..2050f02 --- /dev/null +++ b/virt.volume_infos-needs-to-ignore-inactive-pools-174.patch @@ -0,0 +1,83 @@ +From df1caa8fa6551f880202649a7f4133343da5da0f Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Tue, 3 Sep 2019 15:17:38 +0200 +Subject: [PATCH] virt.volume_infos needs to ignore inactive pools (#174) + +libvirt raises an error when getting the list of volumes of a pool that +is not active. Rule out those pools from virt.volume_infos since we +still need to give infos on the other pools' volumes. +--- + salt/modules/virt.py | 7 +++++-- + tests/unit/modules/test_virt.py | 9 +++++++++ + 2 files changed, 14 insertions(+), 2 deletions(-) + +diff --git a/salt/modules/virt.py b/salt/modules/virt.py +index 953064cc2c..0353e6a1f5 100644 +--- a/salt/modules/virt.py ++++ b/salt/modules/virt.py +@@ -5021,7 +5021,9 @@ def _get_all_volumes_paths(conn): + + :param conn: libvirt connection to use + ''' +- volumes = [vol for l in [obj.listAllVolumes() for obj in conn.listAllStoragePools()] for vol in l] ++ volumes = [vol for l in ++ [obj.listAllVolumes() for obj in conn.listAllStoragePools() ++ if obj.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING] for vol in l] + return {vol.path(): [path.text for path in ElementTree.fromstring(vol.XMLDesc()).findall('.//backingStore/path')] + for vol in volumes if _is_valid_volume(vol)} + +@@ -5086,7 +5088,8 @@ def volume_infos(pool=None, volume=None, **kwargs): + 'used_by': used_by, + } + +- pools = [obj for obj in conn.listAllStoragePools() if pool is None or obj.name() == pool] ++ pools = [obj for obj in conn.listAllStoragePools() ++ if (pool is None or obj.name() == pool) and obj.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING] + vols = {pool_obj.name(): {vol.name(): _volume_extract_infos(vol) + for vol in pool_obj.listAllVolumes() + if (volume is None or vol.name() == volume) and _is_valid_volume(vol)} +diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py +index b343b9bc31..e644e62452 100644 +--- a/tests/unit/modules/test_virt.py ++++ b/tests/unit/modules/test_virt.py +@@ -2743,6 +2743,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + mock_pool_data = [ + { + 'name': 'pool0', ++ 'state': self.mock_libvirt.VIR_STORAGE_POOL_RUNNING, + 'volumes': [ + { + 'key': '/key/of/vol0', +@@ -2755,6 +2756,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + }, + { + 'name': 'pool1', ++ 'state': self.mock_libvirt.VIR_STORAGE_POOL_RUNNING, + 'volumes': [ + { + 'key': '/key/of/vol0bad', +@@ -2784,6 +2786,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + for pool_data in mock_pool_data: + mock_pool = MagicMock() + mock_pool.name.return_value = pool_data['name'] # pylint: disable=no-member ++ mock_pool.info.return_value = [pool_data['state']] + mock_volumes = [] + for vol_data in pool_data['volumes']: + mock_volume = MagicMock() +@@ -2817,6 +2820,12 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + mock_pool.listAllVolumes.return_value = mock_volumes # pylint: disable=no-member + mock_pools.append(mock_pool) + ++ inactive_pool = MagicMock() ++ inactive_pool.name.return_value = 'pool2' ++ inactive_pool.info.return_value = [self.mock_libvirt.VIR_STORAGE_POOL_INACTIVE] ++ inactive_pool.listAllVolumes.side_effect = self.mock_libvirt.libvirtError('pool is inactive') ++ mock_pools.append(inactive_pool) ++ + self.mock_conn.listAllStoragePools.return_value = mock_pools # pylint: disable=no-member + + with patch('salt.modules.virt._get_domain', MagicMock(return_value=mock_vms)): +-- +2.20.1 + + diff --git a/virt.volume_infos-silence-libvirt-error-message-175.patch b/virt.volume_infos-silence-libvirt-error-message-175.patch new file mode 100644 index 0000000..6ba2a46 --- /dev/null +++ b/virt.volume_infos-silence-libvirt-error-message-175.patch @@ -0,0 +1,37 @@ +From fa621d17371ea6c8eff75460755c0040fcbf13de Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Tue, 3 Sep 2019 15:17:46 +0200 +Subject: [PATCH] virt.volume_infos: silence libvirt error message (#175) + +Even though the volume_infos handles the libvirt exception when a volume +is missing, libvirt was still outputting the error message in the log. +Since this can add noise to the log only record the libvirt error +message in debug level. +--- + salt/modules/virt.py | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/salt/modules/virt.py b/salt/modules/virt.py +index 0353e6a1f5..96c17bd60b 100644 +--- a/salt/modules/virt.py ++++ b/salt/modules/virt.py +@@ -5008,8 +5008,14 @@ def _is_valid_volume(vol): + the last pool refresh. + ''' + try: +- # Getting info on an invalid volume raises error ++ # Getting info on an invalid volume raises error and libvirt logs an error ++ def discarder(ctxt, error): # pylint: disable=unused-argument ++ log.debug("Ignore libvirt error: %s", error[2]) ++ # Disable the libvirt error logging ++ libvirt.registerErrorHandler(discarder, None) + vol.info() ++ # Reenable the libvirt error logging ++ libvirt.registerErrorHandler(None, None) + return True + except libvirt.libvirtError as err: + return False +-- +2.20.1 + +