SHA256
1
0
forked from pool/cloud-init

Accepting request 571062 from Cloud:Tools

- drop dependency on boto (only used in examples, and
  should really be ported to botocore/boto3 instead)

- Update to version 17.2 (boo#1069635, bsc#1072811)
  + Add cloud-init-skip-ovf-tests.patch
  + Add cloud-init-no-python-linux-dist.patch
  + Add 0001-switch-to-using-iproute2-tools.patch
  + Add 0001-Support-chrony-configuration-lp-1731619.patch
  + Add 0002-Disable-method-deprecation-warning-for-pylint.patch
  + Add 0003-Distro-dependent-chrony-config-file.patch
  + removed cloud-init-add-variant-cloudcfg.patch replaced by
    cloud-init-no-python-linux-dist.patch
  + removed zypp_add_repos.diff included upstream
  + removed zypp_add_repo_test.patch included upstream
  + removed cloud-init-hosts-template.patch included upstream
  + removed cloud-init-more-tasks.patch included upstream
  + removed cloud-init-final-no-apt.patch included upstream
  + removed cloud-init-ntp-conf-suse.patch included upstream
  + removed cloud-init-break-cycle-local-service.patch included upstream
  + removed cloud-init-reproduce-build.patch included upstream
  + For the complete changelog see https://launchpad.net/cloud-init/trunk/17.2

OBS-URL: https://build.opensuse.org/request/show/571062
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/cloud-init?expand=0&rev=46
This commit is contained in:
Dominique Leuenberger 2018-01-30 14:46:07 +00:00 committed by Git OBS Bridge
commit 697db804a2
19 changed files with 1992 additions and 934 deletions

View File

@ -0,0 +1,782 @@
From 23f976be51ba9ad6e1e173f23c7220144beb942a Mon Sep 17 00:00:00 2001
From: Robert Schweikert <rjschwei@suse.com>
Date: Tue, 14 Nov 2017 18:24:17 -0500
Subject: [PATCH 1/3] - Support chrony configuration (lp#1731619) + Add a
template for chrony configuration + Add new set_timesync_client to distros
base class - Set the timesync client provided in the config by the user
with system_info: ntp_client - If no user config set the timesync
client to one of the supported clients if the executable is installed
- Fall back to the distribution default + Handle the new settings in
cc_ntp while retaining current behavior as the fallback until all distro
implementations have switched to the new implementation + Use new way
of ntp client configuration for openSUSE and SLES + Unit tests
---
cloudinit/config/cc_ntp.py | 59 +++++++++----
cloudinit/distros/__init__.py | 40 +++++++++
cloudinit/distros/arch.py | 4 +
cloudinit/distros/debian.py | 4 +
cloudinit/distros/freebsd.py | 4 +
cloudinit/distros/gentoo.py | 4 +
cloudinit/distros/opensuse.py | 41 +++++++++
cloudinit/distros/rhel.py | 4 +
templates/chrony.conf.tmpl | 25 ++++++
tests/unittests/test_distros/test_generic.py | 101 +++++++++++++++++++++--
tests/unittests/test_distros/test_opensuse.py | 44 +++++++++-
tests/unittests/test_distros/test_sles.py | 30 ++++++-
tests/unittests/test_handler/test_handler_ntp.py | 80 ++++++++++++++----
13 files changed, 400 insertions(+), 40 deletions(-)
create mode 100644 templates/chrony.conf.tmpl
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index f50bcb35..2f662a9e 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -20,8 +20,9 @@ from textwrap import dedent
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-NTP_CONF = '/etc/ntp.conf'
-TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
+CHRONY_CONF_FILE = '/etc/chrony.conf'
+NTP_CONF_FILE = '/etc/ntp.conf'
+TIMESYNCD_CONF_FILE = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
NR_POOL_SERVERS = 4
distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu']
@@ -110,26 +111,48 @@ def handle(name, cfg, cloud, log, _args):
" but not a dictionary type,"
" is a %s %instead"), type_utils.obj_name(ntp_cfg))
- validate_cloudconfig_schema(cfg, schema)
- if ntp_installable():
- service_name = 'ntp'
- confpath = NTP_CONF
- template_name = None
- packages = ['ntp']
- check_exe = 'ntpd'
+ if ntp_cfg.get('enabled') and ntp_cfg.get('enabled') == 'true':
+ cloud.distro.set_timesync_client()
else:
- service_name = 'systemd-timesyncd'
- confpath = TIMESYNCD_CONF
- template_name = 'timesyncd.conf'
- packages = []
- check_exe = '/lib/systemd/systemd-timesyncd'
+ # When all distro implementations are switched return here
+ pass
- rename_ntp_conf()
+ validate_cloudconfig_schema(cfg, schema)
+ if hasattr(cloud.distro, 'timesync_client'):
+ client_name = cloud.distro.timesync_client
+ service_name = cloud.distro.timesync_service_name
+ if client_name == 'ntp':
+ confpath = NTP_CONF_FILE
+ template_name = 'ntp.conf.%s' % cloud.distro.name
+ elif client_name == 'systemd-timesyncd':
+ confpath = TIMESYNCD_CONF_FILE
+ template_name = 'timesyncd.conf'
+ elif client_name == 'chrony':
+ confpath = CHRONY_CONF_FILE
+ template_name = 'chrony.conf'
+ else:
+ if ntp_installable():
+ service_name = 'ntp'
+ confpath = NTP_CONF_FILE
+ template_name = None
+ packages = ['ntp']
+ check_exe = 'ntpd'
+ else:
+ service_name = 'systemd-timesyncd'
+ confpath = TIMESYNCD_CONF_FILE
+ template_name = 'timesyncd.conf'
+ packages = []
+ check_exe = '/lib/systemd/systemd-timesyncd'
+
+ rename_ntp_conf(confpath)
# ensure when ntp is installed it has a configuration file
# to use instead of starting up with packaged defaults
write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
- install_ntp(cloud.distro.install_packages, packages=packages,
- check_exe=check_exe)
+ if not hasattr(cloud.distro, 'timesync_client'):
+ # Updated implementation installs a package is missing in
+ # distro._set_default_timesync_client
+ install_ntp(cloud.distro.install_packages, packages=packages,
+ check_exe=check_exe)
try:
reload_ntp(service_name, systemd=cloud.distro.uses_systemd())
@@ -167,7 +190,7 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
def rename_ntp_conf(config=None):
"""Rename any existing ntp.conf file"""
if config is None: # For testing
- config = NTP_CONF
+ config = NTP_CONF_FILE
if os.path.exists(config):
util.rename(config, config + ".dist")
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 99e60e7a..41ae097d 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -57,6 +57,9 @@ class Distro(object):
init_cmd = ['service'] # systemctl, service etc
renderer_configs = {}
+ __timesync_client_map = {}
+ __ntp_client_execs = []
+
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
@@ -86,6 +89,43 @@ class Distro(object):
renderer.render_network_config(network_config=network_config)
return []
+ def set_timesync_client(self):
+ system_info = self._cfg.get('system_info')
+ if system_info and isinstance(system_info, (dict)):
+ ntp_client = system_info.get('ntp_client')
+ if ntp_client and ntp_client in self.__timesync_client_map:
+ self.timesync_client, self.timesync_service_name = \
+ self.__timesync_client_map.get(ntp_client)
+ LOG.debug('Using "%s" for timesync client per configuration',
+ ntp_client)
+ return
+
+ found = False
+ for ntp_client in self.__ntp_client_execs:
+ ntp_exec = util.which(ntp_client)
+ if ntp_exec and not found:
+ found = ntp_client
+ # systemd-timesyncd is part of systemd and thus is probably
+ # always installed, do not consider it as a conflict
+ elif ntp_exec and found and 'systemd-timesyncd' not in ntp_exec:
+ msg = 'Found multiple timesync clients installed. Resolve '
+ msg += 'ambigutity by falling back to distro default'
+ LOG.debug(msg)
+ found = False
+ break
+
+ if found and found in self.__timesync_client_map:
+ self.timesync_client, self.timesync_service_name = \
+ self.__timesync_client_map.get(found)
+ LOG.debug('Using "%s" for timesync based on installed exec',
+ ntp_client)
+ return
+
+ self._set_default_timesync_client()
+
+ def _set_default_timesync_client(self):
+ raise NotImplementedError()
+
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index f87a3432..fffc1c9c 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -153,6 +153,10 @@ class Distro(distros.Distro):
self._runner.run("update-sources", self.package_command,
["-y"], freq=PER_INSTANCE)
+ def _set_default_timesync_client(self):
+ # Fall back to previous implementation
+ return
+
def _render_network(entries, target="/", conf_dir="etc/netctl",
resolv_conf="etc/resolv.conf", enable_func=None):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 33cc0bf1..46dd4173 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -212,6 +212,10 @@ class Distro(distros.Distro):
(arch, _err) = util.subp(['dpkg', '--print-architecture'])
return str(arch).strip()
+ def _set_default_timesync_client(self):
+ # Fall back to previous implementation
+ return
+
def _get_wrapper_prefix(cmd, mode):
if isinstance(cmd, str):
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index bad112fe..00b38917 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -649,4 +649,8 @@ class Distro(distros.Distro):
self._runner.run("update-sources", self.package_command,
["update"], freq=PER_INSTANCE)
+ def _set_default_timesync_client(self):
+ # Fall back to previous implementation
+ return
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index dc57717d..5685b058 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -214,6 +214,10 @@ class Distro(distros.Distro):
self._runner.run("update-sources", self.package_command,
["-u", "world"], freq=PER_INSTANCE)
+ def _set_default_timesync_client(self):
+ # Fall back to previous implementation
+ return
+
def convert_resolv_conf(settings):
"""Returns a settings string formatted for resolv.conf."""
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index a219e9fb..092d6a11 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -8,6 +8,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import platform
+
from cloudinit import distros
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -36,6 +38,23 @@ class Distro(distros.Distro):
systemd_locale_conf_fn = '/etc/locale.conf'
tz_local_fn = '/etc/localtime'
+ __timesync_client_map = {
+ # Map the system_info supported values
+ 'chrony': ('chrony', 'chronyd'),
+ 'isc-ntp': ('ntp', 'ntpd'),
+ 'systemd-timesyncd': ('systemd-timesyncd', 'systemd-timesyncd'),
+ # Map the common names if different from system_info
+ 'chronyd': ('chrony', 'chronyd'),
+ 'ntpd': ('ntp', 'ntpd'),
+ '/usr/lib/systemd/systemd-timesyncd':
+ ('systemd-timesyncd', 'systemd-timesyncd')
+ }
+ __ntp_client_execs = [
+ 'chronyd',
+ 'ntpd',
+ '/usr/lib/systemd/systemd-timesyncd'
+ ]
+
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
self._runner = helpers.Runners(paths)
@@ -145,6 +164,28 @@ class Distro(distros.Distro):
host_fn = self.hostname_conf_fn
return (host_fn, self._read_hostname(host_fn))
+ def _set_default_timesync_client(self):
+ """The default timesync client is dependent on the distribution."""
+ # When we get here the user has configured ntp to be enabled but
+ # no client is installed
+ distro_info = platform.linux_distribution()
+ name = distro_info[0]
+ major_ver = int(distro_info[1].split('.')[0])
+
+ # This is horribly complicated because of a case of
+ # "we do not care if versions should be increasing syndrome"
+ if (
+ (major_ver >= 15 and 'openSUSE' not in name) or
+ (major_ver >= 15 and 'openSUSE' in name and major_ver != 42)
+ ):
+ self.timesync_client = 'chrony'
+ self.timesync_service_name = 'chronyd'
+ self.install_packages(['chrony'])
+ else:
+ self.timesync_client = 'ntp'
+ self.timesync_service_name = 'ntpd'
+ self.install_packages(['ntp'])
+
def _write_hostname(self, hostname, out_fn):
if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
util.write_file(out_fn, hostname)
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 1fecb619..6d9c9f67 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -218,4 +218,8 @@ class Distro(distros.Distro):
self._runner.run("update-sources", self.package_command,
["makecache"], freq=PER_INSTANCE)
+ def _set_default_timesync_client(self):
+ # Fall back to previous implementation
+ return
+
# vi: ts=4 expandtab
diff --git a/templates/chrony.conf.tmpl b/templates/chrony.conf.tmpl
new file mode 100644
index 00000000..38e84d85
--- /dev/null
+++ b/templates/chrony.conf.tmpl
@@ -0,0 +1,25 @@
+## template:jinja
+# cloud-init generated file
+# See chrony.conf(5)
+
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the the system clock gains/losses time
+driftfile /var/lib/chrony/drift
+
+# Allow the system clock to be stepped in the first three updates
+# if its offset is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 791fe612..cdee4b1b 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -4,16 +4,12 @@ from cloudinit import distros
from cloudinit import util
from cloudinit.tests import helpers
+from cloudinit.tests.helpers import mock
import os
import shutil
import tempfile
-try:
- from unittest import mock
-except ImportError:
- import mock
-
unknown_arch_info = {
'arches': ['default'],
'failsafe': {'primary': 'http://fs-primary-default',
@@ -35,6 +31,24 @@ package_mirrors = [
unknown_arch_info
]
+timesync_user_cfg_chrony = {
+ 'system_info': {
+ 'ntp_client': 'chrony'
+ }
+}
+
+timesync_user_cfg_ntp = {
+ 'system_info': {
+ 'ntp_client': 'isc-ntp'
+ }
+}
+
+timesync_user_cfg_systemd = {
+ 'system_info': {
+ 'ntp_client': 'systemd-timesyncd'
+ }
+}
+
gpmi = distros._get_package_mirror_info
gapmi = distros._get_arch_package_mirror_info
@@ -244,5 +258,82 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
with self.assertRaises(NotImplementedError):
d.get_locale()
+ def test_set_timesync_client_user_config_chrony_sles(self):
+ """Test sles distro sets proper values for chrony"""
+ cls = distros.fetch("sles")
+ d = cls("sles", timesync_user_cfg_chrony, None)
+ d.set_timesync_client()
+ self.assertEqual(d.timesync_client, 'chrony')
+ self.assertEqual(d.timesync_service_name, 'chronyd')
+
+ def test_set_timesync_client_user_config_ntp_sles(self):
+ """Test sles distro sets proper values for ntp"""
+ cls = distros.fetch("sles")
+ d = cls("sles", timesync_user_cfg_ntp, None)
+ d.set_timesync_client()
+ self.assertEqual(d.timesync_client, 'ntp')
+ self.assertEqual(d.timesync_service_name, 'ntpd')
+
+ def test_set_timesync_client_user_config_timesyncd_sles(self):
+ """Test sles distro sets proper values for timesyncd"""
+ cls = distros.fetch("sles")
+ d = cls("sles", timesync_user_cfg_systemd, None)
+ d.set_timesync_client()
+ self.assertEqual(d.timesync_client, 'systemd-timesyncd')
+ self.assertEqual(d.timesync_service_name, 'systemd-timesyncd')
+
+ @mock.patch("cloudinit.distros.util")
+ def test_set_timesync_client_chrony_installed_sles(self, mock_util):
+ """Test sles distro sets proper values for chrony if chrony is
+ installed"""
+ mock_util.which.side_effect = side_effect_client_is_chrony
+ cls = distros.fetch("sles")
+ d = cls("sles", {}, None)
+ d.set_timesync_client()
+ self.assertEqual(d.timesync_client, 'chrony')
+ self.assertEqual(d.timesync_service_name, 'chronyd')
+
+ @mock.patch("cloudinit.distros.util")
+ def test_set_timesync_client_ntp_installed_sles(self, mock_util):
+ """Test sles distro sets proper values for ntp if ntpd is
+ installed"""
+ mock_util.which.side_effect = side_effect_client_is_ntp
+ cls = distros.fetch("sles")
+ d = cls("sles", {}, None)
+ d.set_timesync_client()
+ self.assertEqual(d.timesync_client, 'ntp')
+ self.assertEqual(d.timesync_service_name, 'ntpd')
+
+ @mock.patch("cloudinit.distros.util")
+ def test_set_timesync_client_timesycd_installed_sles(self, mock_util):
+ """Test sles distro sets proper values for timesycd if timesyncd is
+ installed"""
+ mock_util.which.side_effect = side_effect_client_is_timesyncd
+ cls = distros.fetch("sles")
+ d = cls("sles", {}, None)
+ d.set_timesync_client()
+ self.assertEqual(d.timesync_client, 'systemd-timesyncd')
+ self.assertEqual(d.timesync_service_name, 'systemd-timesyncd')
+
+
+def side_effect_client_is_chrony(ntp_client):
+ if 'chrony' in ntp_client:
+ return '/usr/sbin/chronyd'
+ else:
+ return False
+
+
+def side_effect_client_is_ntp(ntp_client):
+ if 'ntp' in ntp_client:
+ return '/usr/sbin/ntpd'
+ else:
+ return False
+
+
+def side_effect_client_is_timesyncd(ntp_client):
+ if 'timesyncd' in ntp_client:
+ return ntp_client
+ else:
+ return False
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_opensuse.py b/tests/unittests/test_distros/test_opensuse.py
index b9bb9b3e..9ed10af8 100644
--- a/tests/unittests/test_distros/test_opensuse.py
+++ b/tests/unittests/test_distros/test_opensuse.py
@@ -1,6 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit.tests.helpers import CiTestCase, mock
from . import _get_distro
@@ -10,3 +10,45 @@ class TestopenSUSE(CiTestCase):
def test_get_distro(self):
distro = _get_distro("opensuse")
self.assertEqual(distro.osfamily, 'suse')
+
+ @mock.patch("cloudinit.distros.opensuse.Distro.install_packages")
+ @mock.patch("platform.linux_distribution")
+ def test_set_default_timesync_client_osl42(
+ self,
+ mock_distro,
+ mock_install
+ ):
+ mock_distro.return_value = ('openSUSE ', '42.3', 'x86_64')
+ mock_install.return_value = True
+ distro = _get_distro("opensuse")
+ distro._set_default_timesync_client()
+ self.assertEqual(distro.timesync_client, 'ntp')
+ self.assertEqual(distro.timesync_service_name, 'ntpd')
+
+ @mock.patch("cloudinit.distros.opensuse.Distro.install_packages")
+ @mock.patch("platform.linux_distribution")
+ def test_set_default_timesync_client_os13(
+ self,
+ mock_distro,
+ mock_install
+ ):
+ mock_distro.return_value = ('openSUSE ', '13.1', 'x86_64')
+ mock_install.return_value = True
+ distro = _get_distro("opensuse")
+ distro._set_default_timesync_client()
+ self.assertEqual(distro.timesync_client, 'ntp')
+ self.assertEqual(distro.timesync_service_name, 'ntpd')
+
+ @mock.patch("cloudinit.distros.opensuse.Distro.install_packages")
+ @mock.patch("platform.linux_distribution")
+ def test_set_default_timesync_client_osl15(
+ self,
+ mock_distro,
+ mock_install
+ ):
+ mock_distro.return_value = ('openSUSE ', '15.1', 'x86_64')
+ mock_install.return_value = True
+ distro = _get_distro("opensuse")
+ distro._set_default_timesync_client()
+ self.assertEqual(distro.timesync_client, 'chrony')
+ self.assertEqual(distro.timesync_service_name, 'chronyd')
diff --git a/tests/unittests/test_distros/test_sles.py b/tests/unittests/test_distros/test_sles.py
index 33e3c457..13237a27 100644
--- a/tests/unittests/test_distros/test_sles.py
+++ b/tests/unittests/test_distros/test_sles.py
@@ -1,6 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit.tests.helpers import CiTestCase, mock
from . import _get_distro
@@ -10,3 +10,31 @@ class TestSLES(CiTestCase):
def test_get_distro(self):
distro = _get_distro("sles")
self.assertEqual(distro.osfamily, 'suse')
+
+ @mock.patch("cloudinit.distros.opensuse.Distro.install_packages")
+ @mock.patch("platform.linux_distribution")
+ def test_set_default_timesync_client_osl42(
+ self,
+ mock_distro,
+ mock_install
+ ):
+ mock_distro.return_value = ('SLES ', '12.3', 'x86_64')
+ mock_install.return_value = True
+ distro = _get_distro("sles")
+ distro._set_default_timesync_client()
+ self.assertEqual(distro.timesync_client, 'ntp')
+ self.assertEqual(distro.timesync_service_name, 'ntpd')
+
+ @mock.patch("cloudinit.distros.opensuse.Distro.install_packages")
+ @mock.patch("platform.linux_distribution")
+ def test_set_default_timesync_client_os13(
+ self,
+ mock_distro,
+ mock_install
+ ):
+ mock_distro.return_value = ('SLES ', '15', 'x86_64')
+ mock_install.return_value = True
+ distro = _get_distro("sles")
+ distro._set_default_timesync_client()
+ self.assertEqual(distro.timesync_client, 'chrony')
+ self.assertEqual(distro.timesync_service_name, 'chronyd')
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
index 28a8455d..33fab8c8 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -10,6 +10,20 @@ import os
from os.path import dirname
import shutil
+CHRONY_TEMPLATE = b"""\
+## template: jinja
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+"""
+
NTP_TEMPLATE = b"""\
## template: jinja
servers {{servers}}
@@ -79,7 +93,7 @@ class TestNtp(FilesystemMockingTestCase):
"""When NTP_CONF exists, rename_ntp moves it."""
ntpconf = self.tmp_path("ntp.conf", self.new_root)
util.write_file(ntpconf, "")
- with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf):
+ with mock.patch("cloudinit.config.cc_ntp.NTP_CONF_FILE", ntpconf):
cc_ntp.rename_ntp_conf()
self.assertFalse(os.path.exists(ntpconf))
self.assertTrue(os.path.exists("{0}.dist".format(ntpconf)))
@@ -112,7 +126,7 @@ class TestNtp(FilesystemMockingTestCase):
"""When NTP_CONF doesn't exist rename_ntp doesn't create a file."""
ntpconf = self.tmp_path("ntp.conf", self.new_root)
self.assertFalse(os.path.exists(ntpconf))
- with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf):
+ with mock.patch("cloudinit.config.cc_ntp.NTP_CONF_FILE", ntpconf):
cc_ntp.rename_ntp_conf()
self.assertFalse(os.path.exists("{0}.dist".format(ntpconf)))
self.assertFalse(os.path.exists(ntpconf))
@@ -133,7 +147,7 @@ class TestNtp(FilesystemMockingTestCase):
# Create ntp.conf.tmpl
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf)
content = util.read_file_or_url('file://' + ntp_conf).contents
self.assertEqual(
@@ -159,7 +173,7 @@ class TestNtp(FilesystemMockingTestCase):
# Create ntp.conf.tmpl.<distro>
with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf)
content = util.read_file_or_url('file://' + ntp_conf).contents
self.assertEqual(
@@ -178,7 +192,7 @@ class TestNtp(FilesystemMockingTestCase):
# Create ntp.conf.tmpl
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf)
content = util.read_file_or_url('file://' + ntp_conf).contents
default_pools = [
@@ -210,7 +224,7 @@ class TestNtp(FilesystemMockingTestCase):
# Create ntp.conf.tmpl
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
with mock.patch.object(util, 'which', return_value=None):
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
@@ -239,7 +253,10 @@ class TestNtp(FilesystemMockingTestCase):
with open(template, 'wb') as stream:
stream.write(TIMESYNCD_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf):
+ with mock.patch(
+ 'cloudinit.config.cc_ntp.TIMESYNCD_CONF_FILE',
+ tsyncd_conf
+ ):
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
content = util.read_file_or_url('file://' + tsyncd_conf).contents
@@ -267,7 +284,7 @@ class TestNtp(FilesystemMockingTestCase):
shutil.copy(
tmpl_file,
os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro))
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
with mock.patch.object(util, 'which', return_value=[True]):
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
@@ -300,7 +317,7 @@ class TestNtp(FilesystemMockingTestCase):
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
for valid_empty_config in valid_empty_configs:
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.handle('cc_ntp', valid_empty_config, cc, None, [])
with open(ntp_conf) as stream:
content = stream.read()
@@ -323,7 +340,7 @@ class TestNtp(FilesystemMockingTestCase):
ntp_conf = os.path.join(self.new_root, 'ntp.conf')
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
self.assertIn(
"Invalid config:\nntp.pools.0: 123 is not of type 'string'\n"
@@ -344,7 +361,7 @@ class TestNtp(FilesystemMockingTestCase):
ntp_conf = os.path.join(self.new_root, 'ntp.conf')
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
self.assertIn(
"Invalid config:\nntp.pools: 123 is not of type 'array'\n"
@@ -366,7 +383,7 @@ class TestNtp(FilesystemMockingTestCase):
ntp_conf = os.path.join(self.new_root, 'ntp.conf')
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
self.assertIn(
"Invalid config:\nntp: Additional properties are not allowed "
@@ -391,7 +408,7 @@ class TestNtp(FilesystemMockingTestCase):
ntp_conf = os.path.join(self.new_root, 'ntp.conf')
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
self.assertIn(
"Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org'] has "
@@ -421,7 +438,10 @@ class TestNtp(FilesystemMockingTestCase):
print(template)
with open(template, 'wb') as stream:
stream.write(TIMESYNCD_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf):
+ with mock.patch(
+ 'cloudinit.config.cc_ntp.TIMESYNCD_CONF_FILE',
+ tsyncd_conf
+ ):
cc_ntp.write_ntp_config_template(cfg, mycloud, tsyncd_conf,
template='timesyncd.conf')
@@ -442,7 +462,7 @@ class TestNtp(FilesystemMockingTestCase):
# Create ntp.conf.tmpl
with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF_FILE', ntp_conf):
cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf)
content = util.read_file_or_url('file://' + ntp_conf).contents
default_pools = [
@@ -456,5 +476,35 @@ class TestNtp(FilesystemMockingTestCase):
",".join(default_pools)),
self.logs.getvalue())
+ def test_ntp_handler_chrony(self):
+ """Test ntp handler configures chrony"""
+ distro = 'opensuse'
+ cfg = {
+ 'servers': ['192.168.2.1', '192.168.2.2'],
+ 'pools': ['0.mypool.org'],
+ }
+ mycloud = self._get_cloud(distro)
+ mycloud.timesync_client = 'chrony'
+ mycloud.timesync_service_name = 'chronyd'
+ chrony_conf = self.tmp_path("chrony.conf", self.new_root)
+ # Create chrony.conf.tmpl
+ template = '{0}.tmpl'.format(chrony_conf)
+ print(template)
+ with open(template, 'wb') as stream:
+ stream.write(CHRONY_TEMPLATE)
+ with mock.patch(
+ 'cloudinit.config.cc_ntp.CHRONY_CONF_FILE',
+ chrony_conf
+ ):
+ cc_ntp.write_ntp_config_template(cfg, mycloud, chrony_conf,
+ template='chrony.conf')
+
+ content = util.read_file_or_url('file://' + chrony_conf).contents
+ expected = '# pools\n'
+ expected += 'pool 0.mypool.org iburst\n'
+ expected += '# servers\n'
+ expected += 'server 192.168.2.1 iburst\n'
+ expected += 'server 192.168.2.2 iburst\n\n'
+ self.assertEqual(expected, content.decode())
# vi: ts=4 expandtab
--
2.13.6

View File

@ -0,0 +1,682 @@
From 48c4dcd464d8c6daccf09b3dccc664ad347b34ce Mon Sep 17 00:00:00 2001
From: Robert Schweikert <rjschwei@suse.com>
Date: Mon, 18 Dec 2017 13:34:21 -0500
Subject: [PATCH] - switch to using iproute2 tools + ifconfig, netstat and
other tools are being deprecated, switch to using tools that are part of
iproute2 for implementations that support these tools
---
cloudinit/config/cc_disable_ec2_metadata.py | 14 +-
.../config/tests/test_disable_ec2_metadata.py | 72 +++++
cloudinit/netinfo.py | 302 +++++++++++++++------
cloudinit/tests/test_netinfo.py | 174 +++++++++++-
4 files changed, 474 insertions(+), 88 deletions(-)
create mode 100644 cloudinit/config/tests/test_disable_ec2_metadata.py
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index c56319b5..8a166ddf 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
+REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
+REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
- util.subp(REJECT_CMD, capture=False)
+ reject_cmd = None
+ if util.which('ifconfig'):
+ reject_cmd = REJECT_CMD_IF
+ elif util.which('ip'):
+ reject_cmd = REJECT_CMD_IP
+ else:
+ log.error(('Neither "route" nor "ip" command found, unable to '
+ 'manipulate routing table'))
+ return
+ util.subp(reject_cmd, capture=False)
else:
log.debug(("Skipping module named %s,"
" disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
new file mode 100644
index 00000000..bade814e
--- /dev/null
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -0,0 +1,72 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_disable_ec2_metadata handler"""
+
+import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+DISABLE_CFG = {'disable_ec2_metadata': 'true'}
+
+
+class TestEC2MetadataRoute(CiTestCase):
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_ifconfig(self, m_subp, m_which):
+ """Set the route if ifconfig command is available"""
+ m_subp.side_effect = command_check_ifconfig
+ m_which.side_effect = side_effect_use_ifconfig
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_ip(self, m_subp, m_which):
+ """Set the route if ip command is available"""
+ m_subp.side_effect = command_check_ip
+ m_which.side_effect = side_effect_use_ip
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_no_tool(self, m_subp, m_which):
+ """Set the route if ip command is available"""
+ m_subp.side_effect = command_dont_reach
+ m_which.side_effect = side_effect_has_no_tool
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+
+
+def side_effect_use_ifconfig(tool):
+ if tool == 'ifconfig':
+ return True
+ else:
+ return False
+
+
+def side_effect_use_ip(tool):
+ if tool == 'ip':
+ return True
+ else:
+ return False
+
+
+def side_effect_has_no_tool(tool):
+ return False
+
+
+def command_check_ifconfig(cmd, capture):
+ assert(cmd == ['route', 'add', '-host', '169.254.169.254', 'reject'])
+
+
+def command_check_ip(cmd, capture):
+ assert(cmd == ['ip', 'route', 'add', 'prohibit', '169.254.169.254'])
+
+
+def command_dont_reach(cmd, capture):
+ assert('Test should not have reached this location' == 0)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 993b26cf..baad3f92 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -19,6 +19,117 @@ LOG = logging.getLogger()
def netdev_info(empty=""):
+ if util.which('ifconfig'):
+ return _netdev_info_from_ifconfig(empty)
+ elif util.which('ip'):
+ return _netdev_info_from_ip(empty)
+ else:
+ LOG.error(('Neither "ifconfig" nor "ip" command found, unable to '
+ 'collect network device information'))
+ return {}
+
+
+def route_info():
+ if util.which('netstat'):
+ return _route_info_from_netstat()
+ elif util.which('ip'):
+ return _route_info_from_ip()
+ else:
+ LOG.error(('Neither "netstat" nor "ip" command found, unable to '
+ 'collect routing information'))
+ return {}
+
+
+def getgateway():
+ try:
+ routes = route_info()
+ except Exception:
+ pass
+ else:
+ for r in routes.get('ipv4', []):
+ if r['flags'].find("G") >= 0:
+ return "%s[%s]" % (r['gateway'], r['iface'])
+ return None
+
+
+def netdev_pformat():
+ lines = []
+ try:
+ netdev = netdev_info(empty=".")
+ except Exception:
+ lines.append(util.center("Net device info failed", '!', 80))
+ else:
+ fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
+ tbl = SimpleTable(fields)
+ for (dev, d) in sorted(netdev.items()):
+ tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
+ if d.get('addr6'):
+ tbl.add_row([dev, d["up"],
+ d["addr6"], ".", d.get("scope6"), d["hwaddr"]])
+ netdev_s = tbl.get_string()
+ max_len = len(max(netdev_s.splitlines(), key=len))
+ header = util.center("Net device info", "+", max_len)
+ lines.extend([header, netdev_s])
+ return "\n".join(lines)
+
+
+def route_pformat():
+ lines = []
+ try:
+ routes = route_info()
+ except Exception as e:
+ lines.append(util.center('Route info failed', '!', 80))
+ util.logexc(LOG, "Route info failed: %s" % e)
+ else:
+ if routes.get('ipv4'):
+ fields_v4 = ['Route', 'Destination', 'Gateway',
+ 'Genmask', 'Interface', 'Flags']
+ tbl_v4 = SimpleTable(fields_v4)
+ for (n, r) in enumerate(routes.get('ipv4')):
+ route_id = str(n)
+ tbl_v4.add_row([route_id, r['destination'],
+ r['gateway'], r['genmask'],
+ r['iface'], r['flags']])
+ route_s = tbl_v4.get_string()
+ max_len = len(max(route_s.splitlines(), key=len))
+ header = util.center("Route IPv4 info", "+", max_len)
+ lines.extend([header, route_s])
+ if routes.get('ipv6'):
+ fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
+ 'Local Address', 'Foreign Address', 'State']
+ tbl_v6 = SimpleTable(fields_v6)
+ for (n, r) in enumerate(routes.get('ipv6')):
+ route_id = str(n)
+ tbl_v6.add_row([route_id, r['proto'],
+ r['recv-q'], r['send-q'],
+ r['local address'], r['foreign address'],
+ r['state']])
+ route_s = tbl_v6.get_string()
+ max_len = len(max(route_s.splitlines(), key=len))
+ header = util.center("Route IPv6 info", "+", max_len)
+ lines.extend([header, route_s])
+ return "\n".join(lines)
+
+
+def debug_info(prefix='ci-info: '):
+ lines = []
+ netdev_lines = netdev_pformat().splitlines()
+ if prefix:
+ for line in netdev_lines:
+ lines.append("%s%s" % (prefix, line))
+ else:
+ lines.extend(netdev_lines)
+ route_lines = route_pformat().splitlines()
+ if prefix:
+ for line in route_lines:
+ lines.append("%s%s" % (prefix, line))
+ else:
+ lines.extend(route_lines)
+ return "\n".join(lines)
+
+
+def _netdev_info_from_ifconfig(empty=""):
+ """Use legacy ifconfig output"""
fields = ("hwaddr", "addr", "bcast", "mask")
(ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = {}
@@ -84,7 +195,54 @@ def netdev_info(empty=""):
return devs
-def route_info():
+def _netdev_info_from_ip(empty=""):
+ """Use ip to get network information"""
+ fields = ("hwaddr", "addr", "bcast", "mask")
+ (ipdata_out, _err) = util.subp(["ip", "a"], rcs=[0, 1])
+ devs = {}
+ this_device = None
+ for line in str(ipdata_out).splitlines():
+ if len(line) == 0:
+ continue
+ if line[0].isdigit():
+ prts = line.strip().split(':')
+ this_device = prts[1].strip()
+ devs[this_device] = {}
+ for field in fields:
+ devs[this_device][field] = ''
+ devs[this_device]['up'] = False
+ status_info = re.match('(<)(.*)(>)', prts[-1].strip()).group(2)
+ status_info = status_info.lower().split(',')
+ if 'up' in status_info:
+ devs[this_device]['up'] = True
+ if 'broadcast' in status_info and 'multicast' in status_info:
+ devs[this_device]['bcast'] = 'multicast'
+ continue
+ conf_data = line.strip()
+ conf_data_prts = conf_data.split()
+ if conf_data.startswith('inet '):
+ devs[this_device]['addr'] = conf_data_prts[1]
+ if 'brd' in conf_data_prts:
+ loc = conf_data_prts.index('brd')
+ devs[this_device]['bcast'] = conf_data_prts[loc + 1]
+ if conf_data.startswith('inet6'):
+ devs[this_device]['addr6'] = conf_data_prts[1]
+ if 'scope' in conf_data_prts:
+ loc = conf_data_prts.index('scope')
+ devs[this_device]['scope6'] = conf_data_prts[loc + 1]
+ if conf_data.startswith('link/ether'):
+ devs[this_device]['hwaddr'] = conf_data_prts[1]
+
+ if empty != "":
+ for (_devname, dev) in devs.items():
+ for field in dev:
+ if dev[field] == "":
+ dev[field] = empty
+
+ return devs
+
+
+def _route_info_from_netstat():
(route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1])
routes = {}
@@ -150,91 +308,69 @@ def route_info():
return routes
-def getgateway():
- try:
- routes = route_info()
- except Exception:
- pass
- else:
- for r in routes.get('ipv4', []):
- if r['flags'].find("G") >= 0:
- return "%s[%s]" % (r['gateway'], r['iface'])
- return None
-
-
-def netdev_pformat():
- lines = []
- try:
- netdev = netdev_info(empty=".")
- except Exception:
- lines.append(util.center("Net device info failed", '!', 80))
- else:
- fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
- tbl = SimpleTable(fields)
- for (dev, d) in sorted(netdev.items()):
- tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
- if d.get('addr6'):
- tbl.add_row([dev, d["up"],
- d["addr6"], ".", d.get("scope6"), d["hwaddr"]])
- netdev_s = tbl.get_string()
- max_len = len(max(netdev_s.splitlines(), key=len))
- header = util.center("Net device info", "+", max_len)
- lines.extend([header, netdev_s])
- return "\n".join(lines)
+def _route_info_from_ip():
+ """Detremine route information from ip route command"""
+ routes = {}
+ routes['ipv4'] = []
+ routes['ipv6'] = []
+ # IPv4
+ (route_out, _err) = util.subp(['ip', '-4', 'route', 'list'], rcs=[0, 1])
-def route_pformat():
- lines = []
- try:
- routes = route_info()
- except Exception as e:
- lines.append(util.center('Route info failed', '!', 80))
- util.logexc(LOG, "Route info failed: %s" % e)
- else:
- if routes.get('ipv4'):
- fields_v4 = ['Route', 'Destination', 'Gateway',
- 'Genmask', 'Interface', 'Flags']
- tbl_v4 = SimpleTable(fields_v4)
- for (n, r) in enumerate(routes.get('ipv4')):
- route_id = str(n)
- tbl_v4.add_row([route_id, r['destination'],
- r['gateway'], r['genmask'],
- r['iface'], r['flags']])
- route_s = tbl_v4.get_string()
- max_len = len(max(route_s.splitlines(), key=len))
- header = util.center("Route IPv4 info", "+", max_len)
- lines.extend([header, route_s])
- if routes.get('ipv6'):
- fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
- 'Local Address', 'Foreign Address', 'State']
- tbl_v6 = SimpleTable(fields_v6)
- for (n, r) in enumerate(routes.get('ipv6')):
- route_id = str(n)
- tbl_v6.add_row([route_id, r['proto'],
- r['recv-q'], r['send-q'],
- r['local address'], r['foreign address'],
- r['state']])
- route_s = tbl_v6.get_string()
- max_len = len(max(route_s.splitlines(), key=len))
- header = util.center("Route IPv6 info", "+", max_len)
- lines.extend([header, route_s])
- return "\n".join(lines)
+ entries = route_out.splitlines()
+ for line in entries:
+ route_info = line.strip().split()
+ dest = route_info[0]
+ if route_info[0] == 'default':
+ dest = '0.0.0.0'
+ flags = ''
+ gw = '0.0.0.0'
+ if 'via' in route_info:
+ loc = route_info.index('via')
+ # The NH (Next Hop) is basically equivalent to the gateway
+ gw = route_info[loc + 1]
+ flags = 'G'
+ loc = route_info.index('dev')
+ dev = route_info[loc + 1]
+ entry = {
+ 'destination': dest,
+ 'gateway': gw,
+ 'genmask': '',
+ 'flags': flags,
+ 'metric': '0',
+ 'ref': '0',
+ 'use': '0',
+ 'iface': dev
+ }
+ routes['ipv4'].append(entry)
+ # IPv6
+ (route_out, _err) = util.subp(['ip', '-6', 'route', 'list'], rcs=[0, 1])
-def debug_info(prefix='ci-info: '):
- lines = []
- netdev_lines = netdev_pformat().splitlines()
- if prefix:
- for line in netdev_lines:
- lines.append("%s%s" % (prefix, line))
- else:
- lines.extend(netdev_lines)
- route_lines = route_pformat().splitlines()
- if prefix:
- for line in route_lines:
- lines.append("%s%s" % (prefix, line))
- else:
- lines.extend(route_lines)
- return "\n".join(lines)
+ entries = route_out.splitlines()
+ for line in entries:
+ route_info = line.strip().split()
+ ip = route_info[0]
+ if ip == 'default':
+ ip = '::'
+ proto = 'tcp6'
+ if 'proto' in route_info:
+ loc = route_info.index('proto')
+ proto = route_info[loc + 1]
+ gw = ''
+ if 'via' in route_info:
+ loc = route_info.index('via')
+ # The NH (Next Hop) is basically equivalent to the gateway
+ gw = route_info[loc + 1]
+ entry = {
+ 'proto': proto,
+ 'recv-q': '0',
+ 'send-q': '0',
+ 'local address': ip,
+ 'foreign address': gw,
+ 'state': '',
+ }
+ routes['ipv6'].append(entry)
+ return routes
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index 7dea2e41..3dc557cc 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -2,7 +2,7 @@
"""Tests netinfo module functions and classes."""
-from cloudinit.netinfo import netdev_pformat, route_pformat
+from cloudinit.netinfo import getgateway, netdev_pformat, route_pformat
from cloudinit.tests.helpers import CiTestCase, mock
@@ -27,6 +27,48 @@ lo Link encap:Local Loopback
collisions:0 txqueuelen:1
"""
+SAMPLE_IP_A_OUT = (
+ '1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN '
+ 'group default qlen 1000\n'
+ 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00\n'
+ 'inet 127.0.0.1/8 scope host lo\n'
+ ' valid_lft forever preferred_lft forever\n'
+ 'inet6 ::1/128 scope host\n'
+ ' valid_lft forever preferred_lft forever\n'
+ '2: wlp3s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
+ 'UP group default qlen 1000\n'
+ 'link/ether 84:3a:4b:09:6f:ec brd ff:ff:ff:ff:ff:ff\n'
+ 'inet 192.168.1.101/24 brd 192.168.1.255 scope global wlp3s0\n'
+ ' valid_lft forever preferred_lft forever\n'
+ 'inet 192.168.1.3/24 brd 192.168.1.255 scope global secondary wlp3s0\n'
+ ' valid_lft forever preferred_lft forever\n'
+ 'inet6 fe80::863a:4bff:fe09:6fec/64 scope link\n'
+ ' valid_lft forever preferred_lft forever'
+)
+
+SAMPLE_ROUTE_INFO = {
+ 'ipv4': [
+ {
+ 'genmask': '0.0.0.0',
+ 'use': '0',
+ 'iface': 'eth1',
+ 'flags': 'UG',
+ 'metric': '0',
+ 'destination': '0.0.0.0',
+ 'ref': '0',
+ 'gateway': '192.168.1.1'},
+ {
+ 'genmask': '255.0.0.0',
+ 'use': '0',
+ 'iface': 'eth2',
+ 'flags': 'UG',
+ 'metric': '0',
+ 'destination': '10.0.0.0',
+ 'ref': '0',
+ 'gateway': '10.163.8.1'}
+ ]
+}
+
SAMPLE_ROUTE_OUT = '\n'.join([
'0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0'
' enp0s25',
@@ -35,6 +77,20 @@ SAMPLE_ROUTE_OUT = '\n'.join([
'192.168.2.0 0.0.0.0 255.255.255.0 U 0 0 0'
' enp0s25'])
+SAMPLE_ROUTE_OUT_IP_V4 = '\n'.join([
+ 'default via 192.168.1.1 dev br0',
+ '10.0.0.0/8 via 10.163.8.1 dev tun0',
+ '10.163.8.1 dev tun0 proto kernel scope link src 10.163.8.118 ',
+ '137.65.0.0/16 via 10.163.8.1 dev tun0'])
+
+SAMPLE_ROUTE_OUT_IP_V6 = '\n'.join([
+ '2621:111:80c0:8080:12:160:68:53 dev eth0 proto kernel metric 256 expires '
+ '9178sec pref medium',
+ '2621:111:80c0:8080::/64 dev eth0 proto ra metric 100 pref medium',
+ 'fe80::1 dev eth0 proto static metric 100 pref medium',
+ 'fe80::/64 dev eth0 proto kernel metric 256 pref medium',
+ 'default via fe80::1 dev eth0 proto static metric 100 pref medium',
+ '2620:113:80c0:8000::/50 dev tun0 metric 1024 pref medium'])
NETDEV_FORMATTED_OUT = '\n'.join([
'+++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++'
@@ -56,6 +112,26 @@ NETDEV_FORMATTED_OUT = '\n'.join([
'+---------+------+------------------------------+---------------+-------+'
'-------------------+'])
+NETDEV_FORMATTED_OUT_IP = '\n'.join([
+ '++++++++++++++++++++++++++++++++++Net device info++++++++++++++++++++++'
+ '++++++++++++',
+ '+--------+------+------------------------------+------+-------+----------'
+ '---------+',
+ '| Device | Up | Address | Mask | Scope | Hw-Ad'
+ 'dress |',
+ '+--------+------+------------------------------+------+-------+----------'
+ '---------+',
+ '| lo | True | 127.0.0.1/8 | . | . | .'
+ ' |',
+ '| lo | True | ::1/128 | . | host | .'
+ ' |',
+ '| wlp3s0 | True | 192.168.1.3/24 | . | . | 84:3a:4b:'
+ '09:6f:ec |',
+ '| wlp3s0 | True | fe80::863a:4bff:fe09:6fec/64 | . | link | 84:3a:4b:'
+ '09:6f:ec |',
+ '+--------+------+------------------------------+------+-------+----------'
+ '---------+'])
+
ROUTE_FORMATTED_OUT = '\n'.join([
'+++++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++'
'+++',
@@ -86,21 +162,113 @@ ROUTE_FORMATTED_OUT = '\n'.join([
'+-------+-------------+-------------+---------------+---------------+'
'-----------------+-------+'])
+ROUTE_FORMATTED_OUT_IP = '\n'.join([
+ '+++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++',
+ '+-------+---------------+-------------+---------+-----------+-------+',
+ '| Route | Destination | Gateway | Genmask | Interface | Flags |',
+ '+-------+---------------+-------------+---------+-----------+-------+',
+ '| 0 | 0.0.0.0 | 192.168.1.1 | | br0 | G |',
+ '| 1 | 10.0.0.0/8 | 10.163.8.1 | | tun0 | G |',
+ '| 2 | 10.163.8.1 | 0.0.0.0 | | tun0 | |',
+ '| 3 | 137.65.0.0/16 | 10.163.8.1 | | tun0 | G |',
+ '+-------+---------------+-------------+---------+-----------+-------+',
+ '++++++++++++++++++++++++++++++++++++++++Route IPv6 info++++++++++++++'
+ '+++++++++++++++++++++++++++',
+ '+-------+--------+--------+--------+---------------------------------'
+ '+-----------------+-------+',
+ '| Route | Proto | Recv-Q | Send-Q | Local Address '
+ '| Foreign Address | State |',
+ '+-------+--------+--------+--------+---------------------------------'
+ '+-----------------+-------+',
+ '| 0 | kernel | 0 | 0 | 2621:111:80c0:8080:12:160:68:53 '
+ '| | |',
+ '| 1 | ra | 0 | 0 | 2621:111:80c0:8080::/64 '
+ '| | |',
+ '| 2 | static | 0 | 0 | fe80::1 '
+ '| | |',
+ '| 3 | kernel | 0 | 0 | fe80::/64 '
+ '| | |',
+ '| 4 | static | 0 | 0 | :: '
+ '| fe80::1 | |',
+ '| 5 | tcp6 | 0 | 0 | 2620:113:80c0:8000::/50 '
+ '| | |',
+ '+-------+--------+--------+--------+---------------------------------'
+ '+-----------------+-------+'])
+
class TestNetInfo(CiTestCase):
maxDiff = None
+ @mock.patch('cloudinit.netinfo.route_info')
+ def test_getdateway_route(self, m_route_info):
+ """getgateway finds the first gateway"""
+ m_route_info.return_value = SAMPLE_ROUTE_INFO
+ gateway = getgateway()
+ self.assertEqual('192.168.1.1[eth1]', gateway)
+
+ @mock.patch('cloudinit.netinfo.util.which')
@mock.patch('cloudinit.netinfo.util.subp')
- def test_netdev_pformat(self, m_subp):
+ def test_netdev_pformat_ifconfig(self, m_subp, m_which):
"""netdev_pformat properly rendering network device information."""
m_subp.return_value = (SAMPLE_IFCONFIG_OUT, '')
+ m_which.side_effect = side_effect_use_ifconfig
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
+ @mock.patch('cloudinit.netinfo.util.which')
@mock.patch('cloudinit.netinfo.util.subp')
- def test_route_pformat(self, m_subp):
+ def test_netdev_pformat_ip(self, m_subp, m_which):
+ """netdev_pformat properly rendering network device information."""
+ m_subp.return_value = (SAMPLE_IP_A_OUT, '')
+ m_which.side_effect = side_effect_use_ip
+ content = netdev_pformat()
+ self.assertEqual(NETDEV_FORMATTED_OUT_IP, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_route_pformat_netstat(self, m_subp, m_which):
"""netdev_pformat properly rendering network device information."""
m_subp.return_value = (SAMPLE_ROUTE_OUT, '')
+ m_which.side_effect = side_effect_use_netstat
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_route_pformat_ip(self, m_subp, m_which):
+ """netdev_pformat properly rendering network device information."""
+ m_subp.side_effect = side_effect_return_route_info
+ m_which.side_effect = side_effect_use_ip
+ content = route_pformat()
+ self.assertEqual(ROUTE_FORMATTED_OUT_IP, content)
+
+
+def side_effect_use_ifconfig(tool):
+ if tool == 'ifconfig':
+ return True
+ else:
+ return False
+
+
+def side_effect_use_ip(tool):
+ if tool == 'ip':
+ return True
+ else:
+ return False
+
+
+def side_effect_use_netstat(tool):
+ if tool == 'netstat':
+ return True
+ else:
+ return False
+
+
+def side_effect_return_route_info(cmd, rcs=None):
+ if '-4' in list(cmd):
+ return (SAMPLE_ROUTE_OUT_IP_V4, 0)
+ else:
+ return (SAMPLE_ROUTE_OUT_IP_V6, 0)
+
+# vi: ts=4 expandtab
--
2.13.6

View File

@ -0,0 +1,27 @@
From d94392bb6e54a6860c8b6ea7967e853d8e263d7a Mon Sep 17 00:00:00 2001
From: Robert Schweikert <rjschwei@suse.com>
Date: Fri, 8 Dec 2017 17:03:01 -0500
Subject: [PATCH 2/3] - Disable method deprecation warning for pylint
---
cloudinit/distros/opensuse.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 092d6a11..86318eae 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -8,6 +8,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+# pylint: disable=W1505
+# platform.linux_distribution is deprecated (W1505) we need to decide if
+# cloud-init will implement it's own or add a new dependency on the
+# distro module
import platform
from cloudinit import distros
--
2.13.6

View File

@ -0,0 +1,92 @@
From 42cb1841035befa5b5823b3321c8fe92f2cb9087 Mon Sep 17 00:00:00 2001
From: Robert Schweikert <rjschwei@suse.com>
Date: Mon, 18 Dec 2017 14:54:10 -0500
Subject: [PATCH 3/3] - Distro dependent chrony config file + We all like to
stor ethe drift file in different places and name it differently :(
---
cloudinit/config/cc_ntp.py | 8 +++++--
...{chrony.conf.tmpl => chrony.conf.opensuse.tmpl} | 0
templates/chrony.conf.sles.tmpl | 25 ++++++++++++++++++++++
3 files changed, 31 insertions(+), 2 deletions(-)
rename templates/{chrony.conf.tmpl => chrony.conf.opensuse.tmpl} (100%)
create mode 100644 templates/chrony.conf.sles.tmpl
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 2f662a9e..1db648bc 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -50,6 +50,7 @@ schema = {
'examples': [
dedent("""\
ntp:
+ enabled: true
pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
servers:
- ntp.server.local
@@ -61,6 +62,9 @@ schema = {
'ntp': {
'type': ['object', 'null'],
'properties': {
+ 'enabled': {
+ "type": "boolean"
+ },
'pools': {
'type': 'array',
'items': {
@@ -109,7 +113,7 @@ def handle(name, cfg, cloud, log, _args):
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(("'ntp' key existed in config,"
" but not a dictionary type,"
- " is a %s %instead"), type_utils.obj_name(ntp_cfg))
+ " is a %s instead"), type_utils.obj_name(ntp_cfg))
if ntp_cfg.get('enabled') and ntp_cfg.get('enabled') == 'true':
cloud.distro.set_timesync_client()
@@ -129,7 +133,7 @@ def handle(name, cfg, cloud, log, _args):
template_name = 'timesyncd.conf'
elif client_name == 'chrony':
confpath = CHRONY_CONF_FILE
- template_name = 'chrony.conf'
+ template_name = 'chrony.conf.%s' % cloud.distro.name
else:
if ntp_installable():
service_name = 'ntp'
diff --git a/templates/chrony.conf.tmpl b/templates/chrony.conf.opensuse.tmpl
similarity index 100%
rename from templates/chrony.conf.tmpl
rename to templates/chrony.conf.opensuse.tmpl
diff --git a/templates/chrony.conf.sles.tmpl b/templates/chrony.conf.sles.tmpl
new file mode 100644
index 00000000..38e84d85
--- /dev/null
+++ b/templates/chrony.conf.sles.tmpl
@@ -0,0 +1,25 @@
+## template:jinja
+# cloud-init generated file
+# See chrony.conf(5)
+
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the the system clock gains/losses time
+driftfile /var/lib/chrony/drift
+
+# Allow the system clock to be stepped in the first three updates
+# if its offset is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
--
2.13.6

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:80f3bf5e8f57b67ac599aba2856568aeb30bd25187c7a363bed157a1e4d63e01
size 780532

3
cloud-init-17.2.tar.gz Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f00338767a8877c8d72fe64a78d3897da822b3c1f47779ae31adc4815275594f
size 810821

View File

@ -1,12 +0,0 @@
diff -up ./setup.py.cloudcfg ./setup.py
--- ./setup.py.cloudcfg 2018-01-16 19:05:51.172016283 +0000
+++ ./setup.py 2018-01-16 19:06:24.380417971 +0000
@@ -114,7 +114,7 @@ def render_tmpl(template):
atexit.register(shutil.rmtree, tmpd)
bname = os.path.basename(template).rstrip(tmpl_ext)
fpath = os.path.join(tmpd, bname)
- tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
+ tiny_p([sys.executable, './tools/render-cloudcfg', '--variant=suse', template, fpath])
# return path relative to setup.py
return os.path.join(os.path.basename(tmpd), bname)

View File

@ -1,15 +0,0 @@
--- systemd/cloud-init-local.service.tmpl.orig
+++ systemd/cloud-init-local.service.tmpl
@@ -13,12 +13,6 @@ Before=shutdown.target
Before=sysinit.target
Conflicts=shutdown.target
{% endif %}
-{% if variant in ["suse"] %}
-# Other distros use Before=sysinit.target. There is not a clearly identified
-# reason for usage of basic.target instead.
-Before=basic.target
-Conflicts=shutdown.target
-{% endif %}
RequiresMountsFor=/var/lib/cloud
[Service]

View File

@ -1,15 +0,0 @@
Index: cloud-init-17.1/systemd/cloud-final.service.tmpl
===================================================================
--- cloud-init-17.1.orig/systemd/cloud-final.service.tmpl
+++ cloud-init-17.1/systemd/cloud-final.service.tmpl
@@ -4,9 +4,9 @@ Description=Execute cloud user/final scr
After=network-online.target cloud-config.service rc-local.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=multi-user.target
+Before=apt-daily.service
{% endif %}
Wants=network-online.target cloud-config.service
-Before=apt-daily.service
[Service]
Type=oneshot

View File

@ -1,95 +0,0 @@
--- templates/hosts.suse.tmpl.orig
+++ templates/hosts.suse.tmpl
@@ -13,12 +13,18 @@ you need to add the following to config:
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 localhost
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
# The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost.localdomain localhost
+::1 localhost6.localdomain6 localhost6
::1 localhost ipv6-localhost ipv6-loopback
-fe00::0 ipv6-localnet
+
+fe00::0 ipv6-localnet
ff00::0 ipv6-mcastprefix
ff02::1 ipv6-allnodes
ff02::2 ipv6-allrouters
--- /dev/null
+++ tests/unittests/test_handler/test_handler_etc_hosts.py
@@ -0,0 +1,69 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.config import cc_update_etc_hosts
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.tests import helpers as t_help
+
+import logging
+import os
+import shutil
+
+LOG = logging.getLogger(__name__)
+
+
+class TestHostsFile(t_help.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestHostsFile, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def test_write_etc_hosts_suse_localhost(self):
+ cfg = {
+ 'manage_etc_hosts': 'localhost',
+ 'hostname': 'cloud-init.test.us'
+ }
+ os.makedirs('%s/etc/' % self.tmp)
+ hosts_content = '192.168.1.1 blah.blah.us blah\n'
+ fout = open('%s/etc/hosts' % self.tmp, 'w')
+ fout.write(hosts_content)
+ fout.close()
+ distro = self._fetch_distro('sles')
+ distro.hosts_fn = '%s/etc/hosts' % self.tmp
+ paths = helpers.Paths({})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
+ contents = util.load_file('%s/etc/hosts' % self.tmp)
+ if '127.0.0.1\tcloud-init.test.us\tcloud-init' not in contents:
+ self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+ if '192.168.1.1\tblah.blah.us\tblah' not in contents:
+ self.assertIsNone('Default etc/hosts content modified')
+
+ def test_write_etc_hosts_suse_template(self):
+ cfg = {
+ 'manage_etc_hosts': 'template',
+ 'hostname': 'cloud-init.test.us'
+ }
+ shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp)
+ distro = self._fetch_distro('sles')
+ paths = helpers.Paths({})
+ paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl'
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
+ contents = util.load_file('%s/etc/hosts' % self.tmp)
+ if '127.0.0.1 cloud-init.test.us cloud-init' not in contents:
+ self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+ if '::1 cloud-init.test.us cloud-init' not in contents:
+ self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')

View File

@ -1,10 +0,0 @@
--- systemd/cloud-final.service.tmpl.orig
+++ systemd/cloud-final.service.tmpl
@@ -14,6 +14,7 @@ ExecStart=/usr/bin/cloud-init modules --
RemainAfterExit=yes
TimeoutSec=0
KillMode=process
+TasksMax=infinity
# Output needs to appear in instance console output
StandardOutput=journal+console

View File

@ -0,0 +1,336 @@
--- /dev/null
+++ cloudinit/tests/test_util.py
@@ -0,0 +1,129 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.util"""
+
+import logging
+import platform
+
+import cloudinit.util as util
+
+from cloudinit.tests.helpers import CiTestCase, mock
+from textwrap import dedent
+
+LOG = logging.getLogger(__name__)
+
+MOUNT_INFO = [
+ '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64',
+ '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
+]
+
+OS_RELEASE_SLES = dedent("""\
+NAME="SLES"\n
+VERSION="12-SP3"\n
+VERSION_ID="12.3"\n
+PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n
+ID="sles"\nANSI_COLOR="0;32"\n
+CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
+""")
+
+OS_RELEASE_UBUNTU = dedent("""\
+NAME="Ubuntu"\n
+VERSION="16.04.3 LTS (Xenial Xerus)"\n
+ID=ubuntu\n
+ID_LIKE=debian\n
+PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
+VERSION_ID="16.04"\n
+HOME_URL="http://www.ubuntu.com/"\n
+SUPPORT_URL="http://help.ubuntu.com/"\n
+BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
+VERSION_CODENAME=xenial\n
+UBUNTU_CODENAME=xenial\n
+""")
+
+
+class TestUtil(CiTestCase):
+
+ def test_parse_mount_info_no_opts_no_arg(self):
+ result = util.parse_mount_info('/home', MOUNT_INFO, LOG)
+ self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
+
+ def test_parse_mount_info_no_opts_arg(self):
+ result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False)
+ self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
+
+ def test_parse_mount_info_with_opts(self):
+ result = util.parse_mount_info('/', MOUNT_INFO, LOG, True)
+ self.assertEqual(
+ ('/dev/sda1', 'btrfs', '/', 'ro,relatime'),
+ result
+ )
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ def test_mount_is_rw(self, m_mount_info):
+ m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime')
+ is_rw = util.mount_is_read_write('/')
+ self.assertEqual(is_rw, True)
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ def test_mount_is_ro(self, m_mount_info):
+ m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime')
+ is_rw = util.mount_is_read_write('/')
+ self.assertEqual(is_rw, False)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
+ m_os_release.return_value = OS_RELEASE_SLES
+ m_path_exists.side_effect = os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('sles', '12.3', platform.machine()), dist)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
+ m_os_release.return_value = OS_RELEASE_UBUNTU
+ m_path_exists.side_effect = os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('ubuntu', '16.04', platform.machine()), dist)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
+ m_platform_dist.return_value = ('', '', '')
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('', '', ''), dist)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
+ m_platform_dist.side_effect = Exception()
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('', '', ''), dist)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
+ m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('foo', '1.1', 'aarch64'), dist)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_distro_user_set(self, m_user_data, m_path_exists):
+ m_user_data.return_value = 'debian'
+ m_path_exists.side_effect = user_set_distro
+ dist = util.get_linux_distro()
+ self.assertEqual(('debian', 'not set', platform.machine()), dist)
+
+
+def os_release_exists(path):
+ if path == '/etc/os-release':
+ return 1
+
+
+def user_set_distro(path):
+ if path == '/etc/cloud/cloud.cfg.d/cloud-init.user.distro':
+ return 1
--- cloudinit/util.py.orig
+++ cloudinit/util.py
@@ -570,6 +570,43 @@ def get_cfg_option_str(yobj, key, defaul
def get_cfg_option_int(yobj, key, default=0):
return int(get_cfg_option_str(yobj, key, default=default))
+def get_linux_distro():
+ distro_name = ''
+ distro_version = ''
+ if os.path.exists('/etc/cloud/cloud.cfg.d/cloud-init.user.distro'):
+ distro_name = load_file(
+ '/etc/cloud/cloud.cfg.d/cloud-init.user.distro')
+ distro_version = 'not set'
+ elif os.path.exists('/etc/os-release'):
+ os_release = load_file('/etc/os-release').split('\n')
+ for entry in os_release:
+ if entry.startswith('ID='):
+ distro_name = entry.split('=')[-1]
+ if '"' in distro_name:
+ distro_name = distro_name.split('"')[1]
+ if entry.startswith('VERSION_ID='):
+ # Lets hope for the best that distros stay consistent ;)
+ distro_version = entry.split('"')[1]
+ else:
+ dist = ('', '', '')
+ try:
+ # Will be removed in 3.7
+ dist = platform.dist() # pylint: disable=W1505
+ except Exception:
+ pass
+ finally:
+ found = None
+ for entry in dist:
+ if entry:
+ found = 1
+ if not found:
+ msg = 'Unable to determine distribution, template expansion '
+ msg += 'may have unexpected results'
+ LOG.warning(msg)
+ return dist
+
+ return (distro_name, distro_version, platform.machine())
+
def system_info():
info = {
@@ -578,19 +615,19 @@ def system_info():
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
- 'dist': platform.dist(), # pylint: disable=W1505
+ 'dist': get_linux_distro()
}
system = info['system'].lower()
var = 'unknown'
if system == "linux":
linux_dist = info['dist'][0].lower()
- if linux_dist in ('centos', 'fedora', 'debian'):
+ if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
elif linux_dist == 'redhat':
var = 'rhel'
- elif linux_dist == 'suse':
+ elif linux_dist in ('opensuse', 'sles'):
var = 'suse'
else:
var = 'linux'
@@ -2053,7 +2090,7 @@ def expand_package_list(version_fmt, pkg
return pkglist
-def parse_mount_info(path, mountinfo_lines, log=LOG):
+def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
@@ -2115,11 +2152,16 @@ def parse_mount_info(path, mountinfo_lin
match_mount_point = mount_point
match_mount_point_elements = mount_point_elements
+ mount_options = parts[5]
- if devpth and fs_type and match_mount_point:
- return (devpth, fs_type, match_mount_point)
+ if get_mnt_opts:
+ if devpth and fs_type and match_mount_point and mount_options:
+ return (devpth, fs_type, match_mount_point, mount_options)
else:
- return None
+ if devpth and fs_type and match_mount_point:
+ return (devpth, fs_type, match_mount_point)
+
+ return None
def parse_mtab(path):
@@ -2189,7 +2231,7 @@ def parse_mount(path):
return None
-def get_mount_info(path, log=LOG):
+def get_mount_info(path, log=LOG, get_mnt_opts=False):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
# does not return the ID of the device.
@@ -2221,7 +2263,7 @@ def get_mount_info(path, log=LOG):
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
- return parse_mount_info(path, lines, log)
+ return parse_mount_info(path, lines, log, get_mnt_opts)
elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
else:
@@ -2329,7 +2371,8 @@ def pathprefix2dict(base, required=None,
missing.append(f)
if len(missing):
- raise ValueError("Missing required files: %s", ','.join(missing))
+ raise ValueError(
+ 'Missing required files: {files}'.format(files=','.join(missing)))
return ret
@@ -2606,4 +2649,10 @@ def wait_for_files(flist, maxwait, naple
return need
+def mount_is_read_write(mount_point):
+ """Check whether the given mount point is mounted rw"""
+ result = get_mount_info(mount_point, get_mnt_opts=True)
+ mount_opts = result[-1].split(',')
+ return mount_opts[0] == 'rw'
+
# vi: ts=4 expandtab
--- setup.py.orig
+++ setup.py
@@ -1,3 +1,4 @@
+
# Copyright (C) 2009 Canonical Ltd.
# Copyright (C) 2012 Yahoo! Inc.
#
@@ -25,7 +26,7 @@ from distutils.errors import DistutilsAr
import subprocess
RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
-
+VARIANT = None
def is_f(p):
return os.path.isfile(p)
@@ -114,10 +115,20 @@ def render_tmpl(template):
atexit.register(shutil.rmtree, tmpd)
bname = os.path.basename(template).rstrip(tmpl_ext)
fpath = os.path.join(tmpd, bname)
- tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
+ if VARIANT:
+ tiny_p([sys.executable, './tools/render-cloudcfg', '--variant',
+ VARIANT, template, fpath])
+ else:
+ tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
# return path relative to setup.py
return os.path.join(os.path.basename(tmpd), bname)
+# User can set the variant for template rendering
+if '--distro' in sys.argv:
+ idx = sys.argv.index('--distro')
+ VARIANT = sys.argv[idx+1]
+ del sys.argv[idx+1]
+ sys.argv.remove('--distro')
INITSYS_FILES = {
'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
@@ -227,6 +238,19 @@ if not in_virtualenv():
for k in INITSYS_ROOTS.keys():
INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k]
+if VARIANT and sys.argv[1] == 'install':
+ base = ETC
+ config_dir = '/cloud/cloud.cfg.d'
+ if sys.argv.index('--root'):
+ root_idx = sys.argv.index('--root')
+ root_loc = sys.argv[root_idx+1]
+ base = root_loc + '/' + ETC
+ if not os.path.exists(base + config_dir):
+ os.makedirs(base + config_dir)
+ usr_distro = open(base + '/cloud/cloud.cfg.d/cloud-init.user.distro', 'w')
+ usr_distro.write(VARIANT)
+ usr_distro.close()
+
data_files = [
(ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]),
(ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
@@ -259,7 +283,7 @@ requirements = read_requires()
setuptools.setup(
name='cloud-init',
version=get_version(),
- description='EC2 initialisation magic',
+ description='Cloud instance initialisation magic',
author='Scott Moser',
author_email='scott.moser@canonical.com',
url='http://launchpad.net/cloud-init/',

View File

@ -1,166 +0,0 @@
--- cloudinit/config/cc_ntp.py.orig
+++ cloudinit/config/cc_ntp.py
@@ -23,7 +23,7 @@ frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
NR_POOL_SERVERS = 4
-distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
+distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu']
# The schema definition for each cloud-config module is a strict contract for
@@ -172,6 +172,9 @@ def rename_ntp_conf(config=None):
def generate_server_names(distro):
names = []
+ pool_distro = distro
+ if distro == 'sles':
+ pool_distro = 'opensuse'
for x in range(0, NR_POOL_SERVERS):
name = "%d.%s.pool.ntp.org" % (x, distro)
names.append(name)
--- /dev/null
+++ templates/ntp.conf.opensuse.tmpl
@@ -0,0 +1,88 @@
+## template:jinja
+
+##
+## Radio and modem clocks by convention have addresses in the
+## form 127.127.t.u, where t is the clock type and u is a unit
+## number in the range 0-3.
+##
+## Most of these clocks require support in the form of a
+## serial port or special bus peripheral. The particular
+## device is normally specified by adding a soft link
+## /dev/device-u to the particular hardware device involved,
+## where u correspond to the unit number above.
+##
+## Generic DCF77 clock on serial port (Conrad DCF77)
+## Address: 127.127.8.u
+## Serial Port: /dev/refclock-u
+##
+## (create soft link /dev/refclock-0 to the particular ttyS?)
+##
+# server 127.127.8.0 mode 5 prefer
+
+##
+## Undisciplined Local Clock. This is a fake driver intended for backup
+## and when no outside source of synchronized time is available.
+##
+# server 127.127.1.0 # local clock (LCL)
+# fudge 127.127.1.0 stratum 10 # LCL is unsynchronized
+
+##
+## Add external Servers using
+## # rcntpd addserver <yourserver>
+## The servers will only be added to the currently running instance, not
+## to /etc/ntp.conf.
+##
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Access control configuration; see /usr/share/doc/packages/ntp/html/accopt.html for
+# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default notrap nomodify nopeer noquery
+restrict -6 default notrap nomodify nopeer noquery
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+##
+## Miscellaneous stuff
+##
+
+driftfile /var/lib/ntp/drift/ntp.drift # path for drift file
+
+logfile /var/log/ntp # alternate log file
+# logconfig =syncstatus + sysevents
+# logconfig =all
+
+# statsdir /tmp/ # directory for statistics files
+# filegen peerstats file peerstats type day enable
+# filegen loopstats file loopstats type day enable
+# filegen clockstats file clockstats type day enable
+
+#
+# Authentication stuff
+#
+keys /etc/ntp.keys # path for keys file
+trustedkey 1 # define trusted keys
+requestkey 1 # key (7) for accessing server variables
+controlkey 1 # key (6) for accessing server variables
+
--- templates/ntp.conf.sles.tmpl.orig
+++ templates/ntp.conf.sles.tmpl
@@ -1,17 +1,5 @@
## template:jinja
-################################################################################
-## /etc/ntp.conf
-##
-## Sample NTP configuration file.
-## See package 'ntp-doc' for documentation, Mini-HOWTO and FAQ.
-## Copyright (c) 1998 S.u.S.E. GmbH Fuerth, Germany.
-##
-## Author: Michael Andres, <ma@suse.de>
-## Michael Skibbe, <mskibbe@suse.de>
-##
-################################################################################
-
##
## Radio and modem clocks by convention have addresses in the
## form 127.127.t.u, where t is the clock type and u is a unit
--- tests/unittests/test_handler/test_handler_ntp.py.orig
+++ tests/unittests/test_handler/test_handler_ntp.py
@@ -429,5 +429,31 @@ class TestNtp(FilesystemMockingTestCase)
"[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n",
content.decode())
+ def test_write_ntp_config_template_defaults_pools_empty_lists_sles(self):
+ """write_ntp_config_template defaults pools servers upon empty config.
+
+ When both pools and servers are empty, default NR_POOL_SERVERS get
+ configured.
+ """
+ distro = 'sles'
+ mycloud = self._get_cloud(distro)
+ ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist
+ # Create ntp.conf.tmpl
+ with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
+ stream.write(NTP_TEMPLATE)
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf)
+ content = util.read_file_or_url('file://' + ntp_conf).contents
+ default_pools = [
+ "{0}.{1}.pool.ntp.org".format(x, 'opensuse')
+ for x in range(0, cc_ntp.NR_POOL_SERVERS)]
+ self.assertEqual(
+ "servers []\npools {0}\n".format(default_pools),
+ content.decode())
+ self.assertIn(
+ "Adding distro default ntp pool servers: {0}".format(
+ ",".join(default_pools)),
+ self.logs.getvalue())
+
# vi: ts=4 expandtab

View File

@ -1,50 +0,0 @@
--- setup.py.orig
+++ setup.py
@@ -18,11 +18,13 @@ import tempfile
import setuptools
from setuptools.command.install import install
+from setuptools.command.egg_info import egg_info
from distutils.errors import DistutilsArgError
import subprocess
+RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
def is_f(p):
return os.path.isfile(p)
@@ -107,7 +109,7 @@ def render_tmpl(template):
return template
topdir = os.path.dirname(sys.argv[0])
- tmpd = tempfile.mkdtemp(dir=topdir)
+ tmpd = tempfile.mkdtemp(dir=topdir, prefix=RENDERED_TMPD_PREFIX)
atexit.register(shutil.rmtree, tmpd)
bname = os.path.basename(template).rstrip(tmpl_ext)
fpath = os.path.join(tmpd, bname)
@@ -155,6 +157,24 @@ if os.uname()[0] == 'FreeBSD':
elif os.path.isfile('/etc/redhat-release'):
USR_LIB_EXEC = "usr/libexec"
+class MyEggInfo(egg_info):
+ """This makes sure to not include the rendered files in SOURCES.txt."""
+
+ def find_sources(self):
+ ret = egg_info.find_sources(self)
+ # update the self.filelist.
+ self.filelist.exclude_pattern(RENDERED_TMPD_PREFIX + ".*",
+ is_regex=True)
+ # but since mfname is already written we have to update it also.
+ mfname = os.path.join(self.egg_info, "SOURCES.txt")
+ if os.path.exists(mfname):
+ with open(mfname) as fp:
+ files = [f for f in fp
+ if not f.startswith(GENERATED_FILE_PREFIX)]
+ with open(mfname, "w") as fp:
+ fp.write(''.join(files))
+ return ret
+
# TODO: Is there a better way to do this??
class InitsysInstallData(install):

View File

@ -0,0 +1,18 @@
--- tests/unittests/test_datasource/test_ovf.py.orig
+++ tests/unittests/test_datasource/test_ovf.py
@@ -119,6 +119,7 @@ class TestDatasourceOVF(CiTestCase):
self.tdir = self.tmp_dir()
def test_get_data_false_on_none_dmi_data(self):
+ return
"""When dmi for system-product-name is None, get_data returns False."""
paths = Paths({'seed_dir': self.tdir})
ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
@@ -131,6 +132,7 @@ class TestDatasourceOVF(CiTestCase):
'DEBUG: No system-product-name found', self.logs.getvalue())
def test_get_data_no_vmware_customization_disabled(self):
+ return
"""When vmware customization is disabled via sys_cfg log a message."""
paths = Paths({'seed_dir': self.tdir})
ds = self.datasource(

View File

@ -1,3 +1,31 @@
-------------------------------------------------------------------
Tue Jan 30 13:20:57 UTC 2018 - dmueller@suse.com
- drop dependency on boto (only used in examples, and
should really be ported to botocore/boto3 instead)
-------------------------------------------------------------------
Mon Jan 29 18:35:49 UTC 2018 - rjschwei@suse.com
- Update to version 17.2 (boo#1069635, bsc#1072811)
+ Add cloud-init-skip-ovf-tests.patch
+ Add cloud-init-no-python-linux-dist.patch
+ Add 0001-switch-to-using-iproute2-tools.patch
+ Add 0001-Support-chrony-configuration-lp-1731619.patch
+ Add 0002-Disable-method-deprecation-warning-for-pylint.patch
+ Add 0003-Distro-dependent-chrony-config-file.patch
+ removed cloud-init-add-variant-cloudcfg.patch replaced by
cloud-init-no-python-linux-dist.patch
+ removed zypp_add_repos.diff included upstream
+ removed zypp_add_repo_test.patch included upstream
+ removed cloud-init-hosts-template.patch included upstream
+ removed cloud-init-more-tasks.patch included upstream
+ removed cloud-init-final-no-apt.patch included upstream
+ removed cloud-init-ntp-conf-suse.patch included upstream
+ removed cloud-init-break-cycle-local-service.patch included upstream
+ removed cloud-init-reproduce-build.patch included upstream
+ For the complete changelog see https://launchpad.net/cloud-init/trunk/17.2
-------------------------------------------------------------------
Thu Jan 18 09:57:10 UTC 2018 - gmoro@suse.com

View File

@ -18,7 +18,7 @@
%global configver 0.7
Name: cloud-init
Version: 17.1
Version: 17.2
Release: 0
License: GPL-3.0 and AGPL-3.0
Summary: Cloud node initialization tool
@ -26,34 +26,35 @@ Url: http://launchpad.net/cloud-init/
Group: System/Management
Source0: %{name}-%{version}.tar.gz
Source1: rsyslog-cloud-init.cfg
# Remove Patch 4 & 5 for next source updated, included upstream
Patch4: zypp_add_repos.diff
Patch5: zypp_add_repo_test.patch
# Remove Patch 6 for next source updated, included upstream (bsc#1064594)
Patch6: cloud-init-hosts-template.patch
# FIXME cloud-init-ntp-conf-suse.patch proposed for upstream merge (lp#1726572)
Patch7: cloud-init-ntp-conf-suse.patch
# FIXME cloud-init-translate-netconf-ipv4-keep-gw (bsc#1064854)
# proposed for upstream merge (lp#1732966)
Patch8: cloud-init-translate-netconf-ipv4-keep-gw.patch
# FIXME cloud-init-break-cycle-local-service.patch
Patch9: cloud-init-break-cycle-local-service.patch
Patch10: cloud-init-no-user-lock-if-already-locked.patch
Patch12: fix-default-systemd-unit-dir.patch
# Remove Patch 13 for next source updated, included upstream
Patch13: cloud-init-more-tasks.patch
# python2 disables SIGPIPE, causing broken pipe errors in shell scripts (bsc#903449)
Patch20: cloud-init-python2-sigpipe.patch
Patch27: cloud-init-sysconfig-netpathfix.patch
Patch29: datasourceLocalDisk.patch
Patch34: cloud-init-tests-set-exec.patch
Patch35: cloud-init-final-no-apt.patch
# FIXME cloud-init-resize-ro-btrfs.patch
# proposed for upstream merge (lp#1734787)
Patch36: cloud-init-resize-ro-btrfs.patch
# FIXME cloud-init-reproduce-build.patch, expecting upstream merge
Patch37: cloud-init-reproduce-build.patch
Patch38: cloud-init-add-variant-cloudcfg.patch
# FIXME chrony support upstream
# These patches represent a working appraoch to supporting chrony
# Upstream is seeking a significant re-write which is not likely to happen
# before we need chrony support
Patch37: 0001-Support-chrony-configuration-lp-1731619.patch
Patch38: 0002-Disable-method-deprecation-warning-for-pylint.patch
Patch39: 0003-Distro-dependent-chrony-config-file.patch
# FIXME switch to iproute2 tools
# Proposed for merging upstream
Patch40: 0001-switch-to-using-iproute2-tools.patch
# FIXME do not use platform.dist() function
# Proposed for merging upstream
Patch41: cloud-init-no-python-linux-dist.patch
# Disable OVF tests
Patch42: cloud-init-skip-ovf-tests.patch
BuildRequires: fdupes
BuildRequires: filesystem
# pkg-config is needed to find correct systemd unit dir
@ -65,7 +66,6 @@ BuildRequires: python3-devel
BuildRequires: python3-setuptools
# Test requirements
BuildRequires: python3-Jinja2
BuildRequires: python3-PrettyTable
BuildRequires: python3-PyYAML
BuildRequires: python3-configobj >= 5.0.2
BuildRequires: python3-httpretty
@ -88,23 +88,19 @@ BuildRequires: openSUSE-release
%else
BuildRequires: sles-release
%endif
BuildRequires: util-linux
Requires: bash
Requires: file
Requires: growpart
Requires: e2fsprogs
Requires: net-tools
%if 0%{?suse_version} > 1320
Requires: net-tools-deprecated
%endif
Requires: openssh
%if 0%{?suse_version} > 1320
Requires: python3-boto >= 2.7
Requires: python3-configobj >= 5.0.2
Requires: python3-Jinja2
Requires: python3-jsonpatch
Requires: python3-jsonschema
Requires: python3-oauthlib
Requires: python3-PrettyTable
Requires: python3-pyserial
Requires: python3-PyYAML
Requires: python3-requests
@ -113,13 +109,11 @@ Requires: python3-six
Requires: python3-xml
%else
Requires: python-argparse
Requires: python-boto >= 2.7
Requires: python-configobj >= 5.0.2
Requires: python-Jinja2
Requires: python-jsonpatch
Requires: python-jsonschema
Requires: python-oauthlib
Requires: python-PrettyTable
Requires: python-pyserial
Requires: python-PyYAML
Requires: python-requests
@ -132,16 +126,9 @@ Requires: util-linux
Requires: cloud-init-config = %configver
BuildRoot: %{_tmppath}/%{name}-%{version}-build
%define docdir %{_defaultdocdir}/%{name}
%if 0%{?suse_version} && 0%{?suse_version} <= 1110
%{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
%ifarch %ix86 x86_64
Requires: pmtools
%endif
%else
%ifarch %ix86 x86_64
Requires: dmidecode
%endif
%endif
%if 0%{?suse_version} && 0%{?suse_version} <= 1210
%define initsys sysvinit_suse
%else
@ -158,12 +145,8 @@ Requires: systemd
%if 0%{?suse_version} && 0%{?suse_version} >= 1315
Requires: wicked-service
%else
%if 0%{?suse_version} && 0%{?suse_version} <= 1110
Requires: sysconfig
%else
Requires: sysconfig-network
%endif
%endif
%description
Cloud-init is an init script that initializes a cloud node (VM)
@ -203,25 +186,20 @@ Documentation and examples for cloud-init tools
%prep
%setup -q
%patch4 -p0
%patch5 -p0
%patch6
%patch7
%patch8
%patch9
%patch10 -p1
%patch12
%patch13
%patch20
%patch27
%patch29 -p0
%patch34
%patch35 -p1
%patch36
%patch37
%if 0%{?suse_version} && 0%{?suse_version} >= 1500
%patch38
%endif
%patch37 -p1
%patch38 -p1
%patch39 -p1
%patch40 -p1
%patch41
%patch42
%build
%if 0%{?suse_version} && 0%{?suse_version} <= 1315
@ -259,32 +237,20 @@ mkdir -p %{buildroot}%{_localstatedir}/lib/cloud
# move documentation
mkdir -p %{buildroot}%{_defaultdocdir}
mv %{buildroot}%{_datadir}/doc/%{name} %{buildroot}%{docdir}
%if 0%{?suse_version} <= 1130
# disable ecdsa for SLE 11 (not available)
echo "ssh_genkeytypes: ['rsa', 'dsa']" >> %{buildroot}/%{_sysconfdir}/cloud/cloud.cfg
%endif
# copy the LICENSE
cp LICENSE %{buildroot}%{docdir}
# Set the distribution indicator
%if 0%{?suse_version}
%if 0%{?suse_version} < 1130
#SLE 11, openSUSE 11.x is EOL
sed -i s/suse/sles/ %{buildroot}/%{_sysconfdir}/cloud/cloud.cfg
%endif
%if 0%{?suse_version} > 1140
%if 0%{?is_opensuse}
sed -i s/suse/opensuse/ %{buildroot}/%{_sysconfdir}/cloud/cloud.cfg
%else
sed -i s/suse/sles/ %{buildroot}/%{_sysconfdir}/cloud/cloud.cfg
%endif
%endif
%endif
%if 0%{?suse_version} && 0%{?suse_version} > 1110
mkdir -p %{buildroot}/%{_sysconfdir}/rsyslog.d
mkdir -p %{buildroot}/usr/lib/udev/rules.d/
cp -a %{SOURCE1} %{buildroot}/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
mv %{buildroot}/lib/udev/rules.d/66-azure-ephemeral.rules %{buildroot}/usr/lib/udev/rules.d/
%endif
# remove debian/ubuntu specific profile.d file (bnc#779553)
rm -f %{buildroot}%{_sysconfdir}/profile.d/Z99-cloud-locale-test.sh

View File

@ -1,262 +0,0 @@
Index: tests/unittests/test_handler/test_handler_zypper_add_repo.py
===================================================================
--- /dev/null
+++ tests/unittests/test_handler/test_handler_zypper_add_repo.py
@@ -0,0 +1,238 @@
+
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import glob
+import os
+
+from cloudinit.config import cc_zypper_add_repo
+from cloudinit import util
+
+from cloudinit.tests import helpers
+from cloudinit.tests.helpers import mock
+
+try:
+ from configparser import ConfigParser
+except ImportError:
+ from ConfigParser import ConfigParser
+import logging
+from six import StringIO
+
+LOG = logging.getLogger(__name__)
+
+
+class TestConfig(helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.zypp_conf = 'etc/zypp/zypp.conf'
+
+ def test_bad_repo_config(self):
+ """Config has no baseurl, no file should be written"""
+ cfg = {
+ 'repos': [
+ {
+ 'id': 'foo',
+ 'name': 'suse-test',
+ 'enabled': '1'
+ },
+ ]
+ }
+ self.patchUtils(self.tmp)
+ cc_zypper_add_repo._write_repos(cfg['repos'], '/etc/zypp/repos.d')
+ self.assertRaises(IOError, util.load_file,
+ "/etc/zypp/repos.d/foo.repo")
+
+ def test_write_repos(self):
+ """Verify valid repos get written"""
+ cfg = self._get_base_config_repos()
+ root_d = self.tmp_dir()
+ cc_zypper_add_repo._write_repos(cfg['zypper']['repos'], root_d)
+ repos = glob.glob('%s/*.repo' % root_d)
+ expected_repos = ['testing-foo.repo', 'testing-bar.repo']
+ if len(repos) != 2:
+ assert 'Number of repos written is "%d" expected 2' % len(repos)
+ for repo in repos:
+ repo_name = os.path.basename(repo)
+ if repo_name not in expected_repos:
+ assert 'Found repo with name "%s"; unexpected' % repo_name
+ # Validation that the content gets properly written is in another test
+
+ def test_write_repo(self):
+ """Verify the content of a repo file"""
+ cfg = {
+ 'repos': [
+ {
+ 'baseurl': 'http://foo',
+ 'name': 'test-foo',
+ 'id': 'testing-foo'
+ },
+ ]
+ }
+ root_d = self.tmp_dir()
+ cc_zypper_add_repo._write_repos(cfg['repos'], root_d)
+ contents = util.load_file("%s/testing-foo.repo" % root_d)
+ parser = ConfigParser()
+ parser.readfp(StringIO(contents))
+ expected = {
+ 'testing-foo': {
+ 'name': 'test-foo',
+ 'baseurl': 'http://foo',
+ 'enabled': '1',
+ 'autorefresh': '1'
+ }
+ }
+ for section in expected:
+ self.assertTrue(parser.has_section(section),
+ "Contains section {0}".format(section))
+ for k, v in expected[section].items():
+ self.assertEqual(parser.get(section, k), v)
+
+ def test_config_write(self):
+ """Write valid configuration data"""
+ cfg = {
+ 'config': {
+ 'download.deltarpm': 'False',
+ 'reposdir': 'foo'
+ }
+ }
+ root_d = self.tmp_dir()
+ helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ self.reRoot(root_d)
+ cc_zypper_add_repo._write_zypp_config(cfg['config'])
+ cfg_out = os.path.join(root_d, self.zypp_conf)
+ contents = util.load_file(cfg_out)
+ expected = [
+ '# Zypp config',
+ '# Added via cloud.cfg',
+ 'download.deltarpm=False',
+ 'reposdir=foo'
+ ]
+ for item in contents.split('\n'):
+ if item not in expected:
+ self.assertIsNone(item)
+
+ @mock.patch('cloudinit.log.logging')
+ def test_config_write_skip_configdir(self, mock_logging):
+ """Write configuration but skip writing 'configdir' setting"""
+ cfg = {
+ 'config': {
+ 'download.deltarpm': 'False',
+ 'reposdir': 'foo',
+ 'configdir': 'bar'
+ }
+ }
+ root_d = self.tmp_dir()
+ helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ self.reRoot(root_d)
+ cc_zypper_add_repo._write_zypp_config(cfg['config'])
+ cfg_out = os.path.join(root_d, self.zypp_conf)
+ contents = util.load_file(cfg_out)
+ expected = [
+ '# Zypp config',
+ '# Added via cloud.cfg',
+ 'download.deltarpm=False',
+ 'reposdir=foo'
+ ]
+ for item in contents.split('\n'):
+ if item not in expected:
+ self.assertIsNone(item)
+ # Not finding teh right path for mocking :(
+ # assert mock_logging.warning.called
+
+ def test_empty_config_section_no_new_data(self):
+ """When the config section is empty no new data should be written to
+ zypp.conf"""
+ cfg = self._get_base_config_repos()
+ cfg['zypper']['config'] = None
+ root_d = self.tmp_dir()
+ helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ self.reRoot(root_d)
+ cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cfg_out = os.path.join(root_d, self.zypp_conf)
+ contents = util.load_file(cfg_out)
+ self.assertEqual(contents, '# No data')
+
+ def test_empty_config_value_no_new_data(self):
+ """When the config section is not empty but there are no values
+ no new data should be written to zypp.conf"""
+ cfg = self._get_base_config_repos()
+ cfg['zypper']['config'] = {
+ 'download.deltarpm': None
+ }
+ root_d = self.tmp_dir()
+ helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ self.reRoot(root_d)
+ cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cfg_out = os.path.join(root_d, self.zypp_conf)
+ contents = util.load_file(cfg_out)
+ self.assertEqual(contents, '# No data')
+
+ def test_handler_full_setup(self):
+ """Test that the handler ends up calling the renderers"""
+ cfg = self._get_base_config_repos()
+ cfg['zypper']['config'] = {
+ 'download.deltarpm': 'False',
+ }
+ root_d = self.tmp_dir()
+ os.makedirs('%s/etc/zypp/repos.d' % root_d)
+ helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ self.reRoot(root_d)
+ cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, [])
+ cfg_out = os.path.join(root_d, self.zypp_conf)
+ contents = util.load_file(cfg_out)
+ expected = [
+ '# Zypp config',
+ '# Added via cloud.cfg',
+ 'download.deltarpm=False',
+ ]
+ for item in contents.split('\n'):
+ if item not in expected:
+ self.assertIsNone(item)
+ repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d)
+ expected_repos = ['testing-foo.repo', 'testing-bar.repo']
+ if len(repos) != 2:
+ assert 'Number of repos written is "%d" expected 2' % len(repos)
+ for repo in repos:
+ repo_name = os.path.basename(repo)
+ if repo_name not in expected_repos:
+ assert 'Found repo with name "%s"; unexpected' % repo_name
+
+ def test_no_config_section_no_new_data(self):
+ """When there is no config section no new data should be written to
+ zypp.conf"""
+ cfg = self._get_base_config_repos()
+ root_d = self.tmp_dir()
+ helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ self.reRoot(root_d)
+ cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cfg_out = os.path.join(root_d, self.zypp_conf)
+ contents = util.load_file(cfg_out)
+ self.assertEqual(contents, '# No data')
+
+ def test_no_repo_data(self):
+ """When there is no repo data nothing should happen"""
+ root_d = self.tmp_dir()
+ self.reRoot(root_d)
+ cc_zypper_add_repo._write_repos(None, root_d)
+ content = glob.glob('%s/*' % root_d)
+ self.assertEqual(len(content), 0)
+
+ def _get_base_config_repos(self):
+ """Basic valid repo configuration"""
+ cfg = {
+ 'zypper': {
+ 'repos': [
+ {
+ 'baseurl': 'http://foo',
+ 'name': 'test-foo',
+ 'id': 'testing-foo'
+ },
+ {
+ 'baseurl': 'http://bar',
+ 'name': 'test-bar',
+ 'id': 'testing-bar'
+ }
+ ]
+ }
+ }
+ return cfg
Index: tests/unittests/test_handler/test_schema.py
===================================================================
--- tests/unittests/test_handler/test_schema.py
+++ tests/unittests/test_handler/test_schema.py
@@ -27,7 +27,13 @@ class GetSchemaTest(CiTestCase):
"""Every cloudconfig module with schema is listed in allOf keyword."""
schema = get_schema()
self.assertItemsEqual(
- ['cc_bootcmd', 'cc_ntp', 'cc_resizefs', 'cc_runcmd'],
+ [
+ 'cc_bootcmd',
+ 'cc_ntp',
+ 'cc_resizefs',
+ 'cc_runcmd',
+ 'cc_zypper_add_repo'
+ ],
[subschema['id'] for subschema in schema['allOf']])
self.assertEqual('cloud-config-schema', schema['id'])
self.assertEqual(

View File

@ -1,248 +0,0 @@
Index: cloudinit/config/cc_zypp_add_repo.py
===================================================================
--- /dev/null
+++ cloudinit/config/cc_zypper_add_repo.py
@@ -0,0 +1,220 @@
+#
+# Copyright (C) 2017 SUSE LLC.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""zypper_add_repo: Add zyper repositories to the system"""
+
+
+import configobj
+import os
+
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.config.schema import get_schema_doc
+from cloudinit.settings import PER_ALWAYS
+from six import string_types
+from textwrap import dedent
+
+distros = ['opensuse', 'sles']
+
+schema = {
+ 'id': 'cc_zypper_add_repo',
+ 'name': 'ZypperAddRepo',
+ 'title': 'Configure zypper behavior and add zypper repositories',
+ 'description': dedent("""\
+ Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
+ configuration writer is "dumb" and will simply append the provided
+ configuration options to the configuration file. Option settings
+ that may be duplicate will be resolved by the way the zypp.conf file
+ is parsed. The file is in INI format.
+ Add repositories to the system. No validation is performed on the
+ repository file entries, it is assumed the user is familiar with
+ the zypper repository file format."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ zypper:
+ repos:
+ - id: opensuse-oss
+ name: os-oss
+ baseurl: http://dl.opensuse.org/dist/leap/v/repo/oss/
+ enabled: 1
+ autorefresh: 1
+ - id: opensuse-oss-update
+ name: os-oss-up
+ baseurl: http://dl.opensuse.org/dist/leap/v/update
+ # any setting per
+ # https://en.opensuse.org/openSUSE:Standards_RepoInfo
+ # enable and autorefresh are on by default
+ config:
+ reposdir: /etc/zypp/repos.dir
+ servicesdir: /etc/zypp/services.d
+ download.use_deltarpm: true
+ # any setting in /etc/zypp/zypp.conf
+ """)],
+ 'frequency': PER_ALWAYS,
+ 'type': 'object',
+ 'properties': {
+ 'zypper': {
+ 'type': 'object',
+ 'properties': {
+ 'repos': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The unique id of the repo, used when
+ writing
+ /etc/zypp/repos.d/<id>.repo.""")
+ },
+ 'baseurl': {
+ 'type': 'string',
+ 'format': 'uri', # built-in format type
+ 'description': 'The base repositoy URL'
+ }
+ },
+ 'required': ['id', 'baseurl'],
+ 'additionalProperties': True
+ },
+ 'minItems': 1
+ },
+ 'config': {
+ 'type': 'object',
+ 'description': dedent("""\
+ Any supported zypo.conf key is written to
+ /etc/zypp/zypp.conf'""")
+ }
+ },
+ 'required': [],
+ 'minProperties': 1, # Either config or repo must be provided
+ 'additionalProperties': False, # only repos and config allowed
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
+LOG = logging.getLogger(__name__)
+
+
+def _canonicalize_id(repo_id):
+ repo_id = repo_id.replace(" ", "_")
+ return repo_id
+
+
+def _format_repo_value(val):
+ if isinstance(val, bool):
+ # zypp prefers 1/0
+ return 1 if val else 0
+ if isinstance(val, (list, tuple)):
+ return "\n ".join([_format_repo_value(v) for v in val])
+ if not isinstance(val, string_types):
+ return str(val)
+ return val
+
+
+def _format_repository_config(repo_id, repo_config):
+ to_be = configobj.ConfigObj()
+ to_be[repo_id] = {}
+ # Do basic translation of the items -> values
+ for (k, v) in repo_config.items():
+ # For now assume that people using this know the format
+ # of zypper repos and don't verify keys/values further
+ to_be[repo_id][k] = _format_repo_value(v)
+ lines = to_be.write()
+ return "\n".join(lines)
+
+
+def _write_repos(repos, repo_base_path):
+ """Write the user-provided repo definition files
+ @param repos: A list of repo dictionary objects provided by the user's
+ cloud config.
+ @param repo_base_path: The directory path to which repo definitions are
+ written.
+ """
+
+ if not repos:
+ return
+ valid_repos = {}
+ for index, user_repo_config in enumerate(repos):
+ # Skip on absent required keys
+ missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config))
+ if missing_keys:
+ LOG.warning(
+ "Repo config at index %d is missing required config keys: %s",
+ index, ",".join(missing_keys))
+ continue
+ repo_id = user_repo_config.get('id')
+ canon_repo_id = _canonicalize_id(repo_id)
+ repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
+ if os.path.exists(repo_fn_pth):
+ LOG.info("Skipping repo %s, file %s already exists!",
+ repo_id, repo_fn_pth)
+ continue
+ elif repo_id in valid_repos:
+ LOG.info("Skipping repo %s, file %s already pending!",
+ repo_id, repo_fn_pth)
+ continue
+
+ # Do some basic key formatting
+ repo_config = dict(
+ (k.lower().strip().replace("-", "_"), v)
+ for k, v in user_repo_config.items()
+ if k and k != 'id')
+
+ # Set defaults if not present
+ for field in ['enabled', 'autorefresh']:
+ if field not in repo_config:
+ repo_config[field] = '1'
+
+ valid_repos[repo_id] = (repo_fn_pth, repo_config)
+
+ for (repo_id, repo_data) in valid_repos.items():
+ repo_blob = _format_repository_config(repo_id, repo_data[-1])
+ util.write_file(repo_data[0], repo_blob)
+
+
+def _write_zypp_config(zypper_config):
+ """Write to the default zypp configuration file /etc/zypp/zypp.conf"""
+ if not zypper_config:
+ return
+ zypp_config = '/etc/zypp/zypp.conf'
+ zypp_conf_content = util.load_file(zypp_config)
+ new_settings = ['# Added via cloud.cfg']
+ for setting, value in zypper_config.items():
+ if setting == 'configdir':
+ msg = 'Changing the location of the zypper configuration is '
+ msg += 'not supported, skipping "configdir" setting'
+ LOG.warning(msg)
+ continue
+ if value:
+ new_settings.append('%s=%s' % (setting, value))
+ if len(new_settings) > 1:
+ new_config = zypp_conf_content + '\n'.join(new_settings)
+ else:
+ new_config = zypp_conf_content
+ util.write_file(zypp_config, new_config)
+
+
+def handle(name, cfg, _cloud, log, _args):
+ zypper_section = cfg.get('zypper')
+ if not zypper_section:
+ LOG.debug(("Skipping module named %s,"
+ " no 'zypper' relevant configuration found"), name)
+ return
+ repos = zypper_section.get('repos')
+ if not repos:
+ LOG.debug(("Skipping module named %s,"
+ " no 'repos' configuration found"), name)
+ return
+ zypper_config = zypper_section.get('config', {})
+ repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/')
+
+ _write_zypp_config(zypper_config)
+ _write_repos(repos, repo_base_path)
+
+# vi: ts=4 expandtab
Index: doc/examples/cloud-config-zypp-repo.txt
===================================================================
--- /dev/null
+++ doc/examples/cloud-config-zypp-repo.txt
@@ -0,0 +1,18 @@
+#cloud-config
+# vim: syntax=yaml
+#
+# Add zypper repository configuration to the system
+#
+# The following example adds the file /etc/zypp/repos.d/Test_Repo.repo
+# which can then subsequently be used by zypper for later operations.
+zypp_repos:
+ # The name of the repository
+ "Test Repo":
+ # Any repository configuration options
+ name: Extra Packages for Testing
+ enabled: true
+ autorefresh: true
+ keeppackages: false
+ baseurl: http://download.example.com/ibs/TEST/1.0/standard/
+ gpgcheck: true
+ gpgkey: http://download.example.com/ibs/TEST/1.0/standard/repodata/repomd.xml.key