diff --git a/cloud-init-digital-ocean-datasource-enable-by-default.patch b/cloud-init-digital-ocean-datasource-enable-by-default.patch new file mode 100644 index 0000000..45e2919 --- /dev/null +++ b/cloud-init-digital-ocean-datasource-enable-by-default.patch @@ -0,0 +1,23 @@ +From 7ae201166402fbf2e6c1632028be956a954835ef Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Tue, 18 Oct 2016 12:30:38 -0400 +Subject: DigitalOcean: enable usage of data source by default. + +Just add DigitalOcean to the list of datasources that are used +if there is no 'datasource_list' provided in config. + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index 8c258ea..a968271 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -32,6 +32,7 @@ CFG_BUILTIN = { + 'NoCloud', + 'ConfigDrive', + 'OpenNebula', ++ 'DigitalOcean', + 'Azure', + 'AltCloud', + 'OVF', +-- +cgit v0.10.2 + diff --git a/cloud-init-digital-ocean-datasource.patch b/cloud-init-digital-ocean-datasource.patch new file mode 100644 index 0000000..c4611ae --- /dev/null +++ b/cloud-init-digital-ocean-datasource.patch @@ -0,0 +1,782 @@ +From 9f83bb8e80806d3dd79ba426474dc3c696e19a41 Mon Sep 17 00:00:00 2001 +From: Ben Howard +Date: Fri, 19 Aug 2016 16:28:26 -0600 +Subject: DigitalOcean: use meta-data for network configruation + +On DigitalOcean, Network information is provided via Meta-data. +It changes the datasource to be a local datasource, meaning it +will run before fallback networking is configured. + +The advantage of that is that before networking is configured it +can bring up a network device with ipv4 link-local and hit the +metadata service that lives at 169.254.169.254 to find its networking +configuration. It then takes down the link local address and lets +cloud-init configure networking. + +The configuring of a network device to go looking for a metadata +service is gated by a check of data in the smbios. This guarantees +that the code will not run on another system. + +diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py +index fc596e1..c5770d5 100644 +--- a/cloudinit/sources/DataSourceDigitalOcean.py ++++ b/cloudinit/sources/DataSourceDigitalOcean.py +@@ -18,13 +18,12 @@ + # DigitalOcean Droplet API: + # https://developers.digitalocean.com/documentation/metadata/ + +-import json +- + from cloudinit import log as logging + from cloudinit import sources +-from cloudinit import url_helper + from cloudinit import util + ++import cloudinit.sources.helpers.digitalocean as do_helper ++ + LOG = logging.getLogger(__name__) + + BUILTIN_DS_CONFIG = { +@@ -36,11 +35,13 @@ BUILTIN_DS_CONFIG = { + MD_RETRIES = 30 + MD_TIMEOUT = 2 + MD_WAIT_RETRY = 2 ++MD_USE_IPV4LL = True + + + class DataSourceDigitalOcean(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) ++ self.distro = distro + self.metadata = dict() + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), +@@ -48,80 +49,72 @@ class DataSourceDigitalOcean(sources.DataSource): + self.metadata_address = self.ds_cfg['metadata_url'] + self.retries = self.ds_cfg.get('retries', MD_RETRIES) + self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) ++ self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL) + self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) ++ self._network_config = None + + def _get_sysinfo(self): +- # DigitalOcean embeds vendor ID and instance/droplet_id in the +- # SMBIOS information +- +- LOG.debug("checking if instance is a DigitalOcean droplet") +- +- # Detect if we are on DigitalOcean and return the Droplet's ID +- vendor_name = util.read_dmi_data("system-manufacturer") +- if vendor_name != "DigitalOcean": +- return (False, None) ++ return do_helper.read_sysinfo() + +- LOG.info("running on DigitalOcean") +- +- droplet_id = util.read_dmi_data("system-serial-number") +- if droplet_id: +- LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet" +- "{}").format(droplet_id)) +- else: +- LOG.critical(("system identified via SMBIOS as a DigitalOcean " +- "Droplet, but did not provide an ID. Please file a " +- "support ticket at: " +- "https://cloud.digitalocean.com/support/tickets/" +- "new")) +- +- return (True, droplet_id) +- +- def get_data(self, apply_filter=False): ++ def get_data(self): + (is_do, droplet_id) = self._get_sysinfo() + + # only proceed if we know we are on DigitalOcean + if not is_do: + return False + +- LOG.debug("reading metadata from {}".format(self.metadata_address)) +- response = url_helper.readurl(self.metadata_address, +- timeout=self.timeout, +- sec_between=self.wait_retry, +- retries=self.retries) ++ LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id) + +- contents = util.decode_binary(response.contents) +- decoded = json.loads(contents) ++ ipv4LL_nic = None ++ if self.use_ip4LL: ++ ipv4LL_nic = do_helper.assign_ipv4_link_local() + +- self.metadata = decoded +- self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id) +- self.metadata['local-hostname'] = decoded.get('hostname', droplet_id) +- self.vendordata_raw = decoded.get("vendor_data", None) +- self.userdata_raw = decoded.get("user_data", None) +- return True ++ md = do_helper.read_metadata( ++ self.metadata_address, timeout=self.timeout, ++ sec_between=self.wait_retry, retries=self.retries) + +- def get_public_ssh_keys(self): +- public_keys = self.metadata.get('public_keys', []) +- if isinstance(public_keys, list): +- return public_keys +- else: +- return [public_keys] ++ self.metadata_full = md ++ self.metadata['instance-id'] = md.get('droplet_id', droplet_id) ++ self.metadata['local-hostname'] = md.get('hostname', droplet_id) ++ self.metadata['interfaces'] = md.get('interfaces') ++ self.metadata['public-keys'] = md.get('public_keys') ++ self.metadata['availability_zone'] = md.get('region', 'default') ++ self.vendordata_raw = md.get("vendor_data", None) ++ self.userdata_raw = md.get("user_data", None) + +- @property +- def availability_zone(self): +- return self.metadata.get('region', 'default') ++ if ipv4LL_nic: ++ do_helper.del_ipv4_link_local(ipv4LL_nic) + +- @property +- def launch_index(self): +- return None ++ return True + + def check_instance_id(self, sys_cfg): + return sources.instance_id_matches_system_uuid( + self.get_instance_id(), 'system-serial-number') + ++ @property ++ def network_config(self): ++ """Configure the networking. This needs to be done each boot, since ++ the IP information may have changed due to snapshot and/or ++ migration. ++ """ ++ ++ if self._network_config: ++ return self._network_config ++ ++ interfaces = self.metadata.get('interfaces') ++ LOG.debug(interfaces) ++ if not interfaces: ++ raise Exception("Unable to get meta-data from server....") ++ ++ nameservers = self.metadata_full['dns']['nameservers'] ++ self._network_config = do_helper.convert_network_configuration( ++ interfaces, nameservers) ++ return self._network_config ++ + + # Used to match classes to dependencies + datasources = [ +- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ++ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )), + ] + + +diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py +new file mode 100644 +index 0000000..b0a721c +--- /dev/null ++++ b/cloudinit/sources/helpers/digitalocean.py +@@ -0,0 +1,218 @@ ++# vi: ts=4 expandtab ++# ++# Author: Ben Howard ++ ++# This program is free software: you can redistribute it and/or modify ++# it under the terms of the GNU General Public License version 3, as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program. If not, see . ++ ++import json ++import random ++ ++from cloudinit import log as logging ++from cloudinit import net as cloudnet ++from cloudinit import url_helper ++from cloudinit import util ++ ++NIC_MAP = {'public': 'eth0', 'private': 'eth1'} ++ ++LOG = logging.getLogger(__name__) ++ ++ ++def assign_ipv4_link_local(nic=None): ++ """Bring up NIC using an address using link-local (ip4LL) IPs. On ++ DigitalOcean, the link-local domain is per-droplet routed, so there ++ is no risk of collisions. However, to be more safe, the ip4LL ++ address is random. ++ """ ++ ++ if not nic: ++ for cdev in sorted(cloudnet.get_devicelist()): ++ if cloudnet.is_physical(cdev): ++ nic = cdev ++ LOG.debug("assigned nic '%s' for link-local discovery", nic) ++ break ++ ++ if not nic: ++ raise RuntimeError("unable to find interfaces to access the" ++ "meta-data server. This droplet is broken.") ++ ++ addr = "169.254.{0}.{1}/16".format(random.randint(1, 168), ++ random.randint(0, 255)) ++ ++ ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic] ++ ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up'] ++ ++ if not util.which('ip'): ++ raise RuntimeError("No 'ip' command available to configure ip4LL " ++ "address") ++ ++ try: ++ (result, _err) = util.subp(ip_addr_cmd) ++ LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) ++ ++ (result, _err) = util.subp(ip_link_cmd) ++ LOG.debug("brought device '%s' up", nic) ++ except Exception: ++ util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." ++ " Droplet networking will be broken", addr, nic) ++ raise ++ ++ return nic ++ ++ ++def del_ipv4_link_local(nic=None): ++ """Remove the ip4LL address. While this is not necessary, the ip4LL ++ address is extraneous and confusing to users. ++ """ ++ if not nic: ++ LOG.debug("no link_local address interface defined, skipping link " ++ "local address cleanup") ++ return ++ ++ LOG.debug("cleaning up ipv4LL address") ++ ++ ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] ++ ++ try: ++ (result, _err) = util.subp(ip_addr_cmd) ++ LOG.debug("removed ip4LL addresses from %s", nic) ++ ++ except Exception as e: ++ util.logexc(LOG, "failed to remove ip4LL address from '%s'.", nic, e) ++ ++ ++def convert_network_configuration(config, dns_servers): ++ """Convert the DigitalOcean Network description into Cloud-init's netconfig ++ format. ++ ++ Example JSON: ++ {'public': [ ++ {'mac': '04:01:58:27:7f:01', ++ 'ipv4': {'gateway': '45.55.32.1', ++ 'netmask': '255.255.224.0', ++ 'ip_address': '45.55.50.93'}, ++ 'anchor_ipv4': { ++ 'gateway': '10.17.0.1', ++ 'netmask': '255.255.0.0', ++ 'ip_address': '10.17.0.9'}, ++ 'type': 'public', ++ 'ipv6': {'gateway': '....', ++ 'ip_address': '....', ++ 'cidr': 64}} ++ ], ++ 'private': [ ++ {'mac': '04:01:58:27:7f:02', ++ 'ipv4': {'gateway': '10.132.0.1', ++ 'netmask': '255.255.0.0', ++ 'ip_address': '10.132.75.35'}, ++ 'type': 'private'} ++ ] ++ } ++ """ ++ ++ def _get_subnet_part(pcfg, nameservers=None): ++ subpart = {'type': 'static', ++ 'control': 'auto', ++ 'address': pcfg.get('ip_address'), ++ 'gateway': pcfg.get('gateway')} ++ ++ if nameservers: ++ subpart['dns_nameservers'] = nameservers ++ ++ if ":" in pcfg.get('ip_address'): ++ subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'), ++ pcfg.get('cidr')) ++ else: ++ subpart['netmask'] = pcfg.get('netmask') ++ ++ return subpart ++ ++ all_nics = [] ++ for k in ('public', 'private'): ++ if k in config: ++ all_nics.extend(config[k]) ++ ++ macs_to_nics = cloudnet.get_interfaces_by_mac() ++ nic_configs = [] ++ ++ for nic in all_nics: ++ ++ mac_address = nic.get('mac') ++ sysfs_name = macs_to_nics.get(mac_address) ++ nic_type = nic.get('type', 'unknown') ++ # Note: the entry 'public' above contains a list, but ++ # the list will only ever have one nic inside it per digital ocean. ++ # If it ever had more than one nic, then this code would ++ # assign all 'public' the same name. ++ if_name = NIC_MAP.get(nic_type, sysfs_name) ++ ++ LOG.debug("mapped %s interface to %s, assigning name of %s", ++ mac_address, sysfs_name, if_name) ++ ++ ncfg = {'type': 'physical', ++ 'mac_address': mac_address, ++ 'name': if_name} ++ ++ subnets = [] ++ for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'): ++ raw_subnet = nic.get(netdef, None) ++ if not raw_subnet: ++ continue ++ ++ sub_part = _get_subnet_part(raw_subnet) ++ if nic_type == 'public' and 'anchor' not in netdef: ++ # add DNS resolvers to the public interfaces only ++ sub_part = _get_subnet_part(raw_subnet, dns_servers) ++ else: ++ # remove the gateway any non-public interfaces ++ if 'gateway' in sub_part: ++ del sub_part['gateway'] ++ ++ subnets.append(sub_part) ++ ++ ncfg['subnets'] = subnets ++ nic_configs.append(ncfg) ++ LOG.debug("nic '%s' configuration: %s", if_name, ncfg) ++ ++ return {'version': 1, 'config': nic_configs} ++ ++ ++def read_metadata(url, timeout=2, sec_between=2, retries=30): ++ response = url_helper.readurl(url, timeout=timeout, ++ sec_between=sec_between, retries=retries) ++ if not response.ok(): ++ raise RuntimeError("unable to read metadata at %s" % url) ++ return json.loads(response.contents.decode()) ++ ++ ++def read_sysinfo(): ++ # DigitalOcean embeds vendor ID and instance/droplet_id in the ++ # SMBIOS information ++ ++ # Detect if we are on DigitalOcean and return the Droplet's ID ++ vendor_name = util.read_dmi_data("system-manufacturer") ++ if vendor_name != "DigitalOcean": ++ return (False, None) ++ ++ droplet_id = util.read_dmi_data("system-serial-number") ++ if droplet_id: ++ LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s", ++ droplet_id) ++ else: ++ msg = ("system identified via SMBIOS as a DigitalOcean " ++ "Droplet, but did not provide an ID. Please file a " ++ "support ticket at: " ++ "https://cloud.digitalocean.com/support/tickets/new") ++ LOG.critical(msg) ++ raise RuntimeError(msg) ++ ++ return (True, droplet_id) +diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py +index f5d2ef3..bdfe0ba 100644 +--- a/tests/unittests/test_datasource/test_digitalocean.py ++++ b/tests/unittests/test_datasource/test_digitalocean.py +@@ -20,25 +20,123 @@ import json + from cloudinit import helpers + from cloudinit import settings + from cloudinit.sources import DataSourceDigitalOcean ++from cloudinit.sources.helpers import digitalocean + +-from .. import helpers as test_helpers +-from ..helpers import HttprettyTestCase +- +-httpretty = test_helpers.import_httpretty() ++from ..helpers import mock, TestCase + + DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] + DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" + +-DO_META = { +- 'user_data': 'user_data_here', +- 'vendor_data': 'vendor_data_here', +- 'public_keys': DO_SINGLE_KEY, +- 'region': 'nyc3', +- 'id': '2000000', +- 'hostname': 'cloudinit-test', ++# the following JSON was taken from droplet (that's why its a string) ++DO_META = json.loads(""" ++{ ++ "droplet_id": "22532410", ++ "hostname": "utl-96268", ++ "vendor_data": "vendordata goes here", ++ "user_data": "userdata goes here", ++ "public_keys": "", ++ "auth_key": "authorization_key", ++ "region": "nyc3", ++ "interfaces": { ++ "private": [ ++ { ++ "ipv4": { ++ "ip_address": "10.132.6.205", ++ "netmask": "255.255.0.0", ++ "gateway": "10.132.0.1" ++ }, ++ "mac": "04:01:57:d1:9e:02", ++ "type": "private" ++ } ++ ], ++ "public": [ ++ { ++ "ipv4": { ++ "ip_address": "192.0.0.20", ++ "netmask": "255.255.255.0", ++ "gateway": "104.236.0.1" ++ }, ++ "ipv6": { ++ "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", ++ "cidr": 64, ++ "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" ++ }, ++ "anchor_ipv4": { ++ "ip_address": "10.0.0.5", ++ "netmask": "255.255.0.0", ++ "gateway": "10.0.0.1" ++ }, ++ "mac": "04:01:57:d1:9e:01", ++ "type": "public" ++ } ++ ] ++ }, ++ "floating_ip": { ++ "ipv4": { ++ "active": false ++ } ++ }, ++ "dns": { ++ "nameservers": [ ++ "2001:4860:4860::8844", ++ "2001:4860:4860::8888", ++ "8.8.8.8" ++ ] ++ } ++} ++""") ++ ++# This has no private interface ++DO_META_2 = { ++ "droplet_id": 27223699, ++ "hostname": "smtest1", ++ "vendor_data": "\n".join([ ++ ('"Content-Type: multipart/mixed; ' ++ 'boundary=\"===============8645434374073493512==\"'), ++ 'MIME-Version: 1.0', ++ '', ++ '--===============8645434374073493512==', ++ 'MIME-Version: 1.0' ++ 'Content-Type: text/cloud-config; charset="us-ascii"' ++ 'Content-Transfer-Encoding: 7bit' ++ 'Content-Disposition: attachment; filename="cloud-config"' ++ '', ++ '#cloud-config', ++ 'disable_root: false', ++ 'manage_etc_hosts: true', ++ '', ++ '', ++ '--===============8645434374073493512==' ++ ]), ++ "public_keys": [ ++ "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" ++ ], ++ "auth_key": "88888888888888888888888888888888", ++ "region": "nyc3", ++ "interfaces": { ++ "public": [{ ++ "ipv4": { ++ "ip_address": "45.55.249.133", ++ "netmask": "255.255.192.0", ++ "gateway": "45.55.192.1" ++ }, ++ "anchor_ipv4": { ++ "ip_address": "10.17.0.5", ++ "netmask": "255.255.0.0", ++ "gateway": "10.17.0.1" ++ }, ++ "mac": "ae:cc:08:7c:88:00", ++ "type": "public" ++ }] ++ }, ++ "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, ++ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, ++ "tags": None, + } + ++DO_META['public_keys'] = DO_SINGLE_KEY ++ + MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +@@ -46,69 +144,189 @@ def _mock_dmi(): + return (True, DO_META.get('id')) + + +-def _request_callback(method, uri, headers): +- return (200, headers, json.dumps(DO_META)) +- +- +-class TestDataSourceDigitalOcean(HttprettyTestCase): ++class TestDataSourceDigitalOcean(TestCase): + """ + Test reading the meta-data + """ + +- def setUp(self): +- self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean( +- settings.CFG_BUILTIN, None, +- helpers.Paths({})) +- self.ds._get_sysinfo = _mock_dmi +- super(TestDataSourceDigitalOcean, self).setUp() +- +- @httpretty.activate +- def test_connection(self): +- httpretty.register_uri( +- httpretty.GET, MD_URL, +- body=json.dumps(DO_META)) +- +- success = self.ds.get_data() +- self.assertTrue(success) +- +- @httpretty.activate +- def test_metadata(self): +- httpretty.register_uri( +- httpretty.GET, MD_URL, +- body=_request_callback) +- self.ds.get_data() ++ def get_ds(self, get_sysinfo=_mock_dmi): ++ ds = DataSourceDigitalOcean.DataSourceDigitalOcean( ++ settings.CFG_BUILTIN, None, helpers.Paths({})) ++ ds.use_ip4LL = False ++ if get_sysinfo is not None: ++ ds._get_sysinfo = get_sysinfo ++ return ds + +- self.assertEqual(DO_META.get('user_data'), +- self.ds.get_userdata_raw()) ++ @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') ++ def test_returns_false_not_on_docean(self, m_read_sysinfo): ++ m_read_sysinfo.return_value = (False, None) ++ ds = self.get_ds(get_sysinfo=None) ++ self.assertEqual(False, ds.get_data()) ++ m_read_sysinfo.assert_called() + +- self.assertEqual(DO_META.get('vendor_data'), +- self.ds.get_vendordata_raw()) ++ @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') ++ def test_metadata(self, mock_readmd): ++ mock_readmd.return_value = DO_META.copy() + +- self.assertEqual(DO_META.get('region'), +- self.ds.availability_zone) ++ ds = self.get_ds() ++ ret = ds.get_data() ++ self.assertTrue(ret) + +- self.assertEqual(DO_META.get('id'), +- self.ds.get_instance_id()) ++ mock_readmd.assert_called() + +- self.assertEqual(DO_META.get('hostname'), +- self.ds.get_hostname()) ++ self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) ++ self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) ++ self.assertEqual(DO_META.get('region'), ds.availability_zone) ++ self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) ++ self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) + + # Single key + self.assertEqual([DO_META.get('public_keys')], +- self.ds.get_public_ssh_keys()) ++ ds.get_public_ssh_keys()) + +- self.assertIsInstance(self.ds.get_public_ssh_keys(), list) ++ self.assertIsInstance(ds.get_public_ssh_keys(), list) + +- @httpretty.activate +- def test_multiple_ssh_keys(self): +- DO_META['public_keys'] = DO_MULTIPLE_KEYS +- httpretty.register_uri( +- httpretty.GET, MD_URL, +- body=_request_callback) +- self.ds.get_data() ++ @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') ++ def test_multiple_ssh_keys(self, mock_readmd): ++ metadata = DO_META.copy() ++ metadata['public_keys'] = DO_MULTIPLE_KEYS ++ mock_readmd.return_value = metadata.copy() ++ ++ ds = self.get_ds() ++ ret = ds.get_data() ++ self.assertTrue(ret) ++ ++ mock_readmd.assert_called() + + # Multiple keys +- self.assertEqual(DO_META.get('public_keys'), +- self.ds.get_public_ssh_keys()) ++ self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) ++ self.assertIsInstance(ds.get_public_ssh_keys(), list) ++ ++ ++class TestNetworkConvert(TestCase): ++ ++ def _get_networking(self): ++ netcfg = digitalocean.convert_network_configuration( ++ DO_META['interfaces'], DO_META['dns']['nameservers']) ++ self.assertIn('config', netcfg) ++ return netcfg ++ ++ def test_networking_defined(self): ++ netcfg = self._get_networking() ++ self.assertIsNotNone(netcfg) ++ ++ for nic_def in netcfg.get('config'): ++ print(json.dumps(nic_def, indent=3)) ++ n_type = nic_def.get('type') ++ n_subnets = nic_def.get('type') ++ n_name = nic_def.get('name') ++ n_mac = nic_def.get('mac_address') ++ ++ self.assertIsNotNone(n_type) ++ self.assertIsNotNone(n_subnets) ++ self.assertIsNotNone(n_name) ++ self.assertIsNotNone(n_mac) ++ ++ def _get_nic_definition(self, int_type, expected_name): ++ """helper function to return if_type (i.e. public) and the expected ++ name used by cloud-init (i.e eth0)""" ++ netcfg = self._get_networking() ++ meta_def = (DO_META.get('interfaces')).get(int_type)[0] ++ ++ self.assertEqual(int_type, meta_def.get('type')) ++ ++ for nic_def in netcfg.get('config'): ++ print(nic_def) ++ if nic_def.get('name') == expected_name: ++ return nic_def, meta_def ++ ++ def _get_match_subn(self, subnets, ip_addr): ++ """get the matching subnet definition based on ip address""" ++ for subn in subnets: ++ address = subn.get('address') ++ self.assertIsNotNone(address) ++ ++ # equals won't work because of ipv6 addressing being in ++ # cidr notation, i.e fe00::1/64 ++ if ip_addr in address: ++ print(json.dumps(subn, indent=3)) ++ return subn ++ ++ def test_public_interface_defined(self): ++ """test that the public interface is defined as eth0""" ++ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') ++ self.assertEqual('eth0', nic_def.get('name')) ++ self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) ++ self.assertEqual('physical', nic_def.get('type')) ++ ++ def test_private_interface_defined(self): ++ """test that the private interface is defined as eth1""" ++ (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') ++ self.assertEqual('eth1', nic_def.get('name')) ++ self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) ++ self.assertEqual('physical', nic_def.get('type')) ++ ++ def _check_dns_nameservers(self, subn_def): ++ self.assertIn('dns_nameservers', subn_def) ++ expected_nameservers = DO_META['dns']['nameservers'] ++ nic_nameservers = subn_def.get('dns_nameservers') ++ self.assertEqual(expected_nameservers, nic_nameservers) ++ ++ def test_public_interface_ipv6(self): ++ """test public ipv6 addressing""" ++ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') ++ ipv6_def = meta_def.get('ipv6') ++ self.assertIsNotNone(ipv6_def) ++ ++ subn_def = self._get_match_subn(nic_def.get('subnets'), ++ ipv6_def.get('ip_address')) ++ ++ cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), ++ ipv6_def.get('cidr')) ++ ++ self.assertEqual(cidr_notated_address, subn_def.get('address')) ++ self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) ++ self._check_dns_nameservers(subn_def) ++ ++ def test_public_interface_ipv4(self): ++ """test public ipv4 addressing""" ++ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') ++ ipv4_def = meta_def.get('ipv4') ++ self.assertIsNotNone(ipv4_def) ++ ++ subn_def = self._get_match_subn(nic_def.get('subnets'), ++ ipv4_def.get('ip_address')) ++ ++ self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) ++ self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) ++ self._check_dns_nameservers(subn_def) ++ ++ def test_public_interface_anchor_ipv4(self): ++ """test public ipv4 addressing""" ++ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') ++ ipv4_def = meta_def.get('anchor_ipv4') ++ self.assertIsNotNone(ipv4_def) ++ ++ subn_def = self._get_match_subn(nic_def.get('subnets'), ++ ipv4_def.get('ip_address')) ++ ++ self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) ++ self.assertNotIn('gateway', subn_def) ++ ++ def test_convert_without_private(self): ++ netcfg = digitalocean.convert_network_configuration( ++ DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) + +- self.assertIsInstance(self.ds.get_public_ssh_keys(), list) ++ byname = {} ++ for i in netcfg['config']: ++ if 'name' in i: ++ if i['name'] in byname: ++ raise ValueError("name '%s' in config twice: %s" % ++ (i['name'], netcfg)) ++ byname[i['name']] = i ++ self.assertTrue('eth0' in byname) ++ self.assertTrue('subnets' in byname['eth0']) ++ eth0 = byname['eth0'] ++ self.assertEqual( ++ sorted(['45.55.249.133', '10.17.0.5']), ++ sorted([i['address'] for i in eth0['subnets']])) +-- +cgit v0.10.2 + diff --git a/cloud-init-handle-no-carrier.patch b/cloud-init-handle-no-carrier.patch new file mode 100644 index 0000000..7727a72 --- /dev/null +++ b/cloud-init-handle-no-carrier.patch @@ -0,0 +1,210 @@ +--- cloudinit/net/__init__.py.orig ++++ cloudinit/net/__init__.py +@@ -33,10 +33,12 @@ def sys_dev_path(devname, path=""): + + + def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None): ++ dev_path = sys_dev_path(devname, path) + try: +- contents = util.load_file(sys_dev_path(devname, path)) ++ contents = util.load_file(dev_path) + except (OSError, IOError) as e: +- if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR): ++ e_errno = getattr(e, 'errno', None) ++ if e_errno in (errno.ENOENT, errno.ENOTDIR): + if enoent is not None: + return enoent + raise +@@ -109,24 +111,9 @@ def is_disabled_cfg(cfg): + return cfg.get('config') == "disabled" + + +-def sys_netdev_info(name, field): +- if not os.path.exists(os.path.join(SYS_CLASS_NET, name)): +- raise OSError("%s: interface does not exist in %s" % +- (name, SYS_CLASS_NET)) +- fname = os.path.join(SYS_CLASS_NET, name, field) +- if not os.path.exists(fname): +- raise OSError("%s: could not find sysfs entry: %s" % (name, fname)) +- data = util.load_file(fname) +- if data[-1] == '\n': +- data = data[:-1] +- return data +- +- + def generate_fallback_config(): + """Determine which attached net dev is most likely to have a connection and + generate network state to run dhcp on that interface""" +- # by default use eth0 as primary interface +- nconf = {'config': [], 'version': 1} + + # get list of interfaces that could have connections + invalid_interfaces = set(['lo']) +@@ -143,28 +130,30 @@ def generate_fallback_config(): + # skip any bridges + continue + try: +- carrier = int(sys_netdev_info(interface, 'carrier')) ++ carrier = read_sys_net(interface, 'carrier', enoent=False) + if carrier: ++ carrier = int(carrier) + connected.append(interface) + continue +- except OSError: ++ except (IOError, OSError, TypeError): + pass + # check if nic is dormant or down, as this may make a nick appear to + # not have a carrier even though it could acquire one when brought + # online by dhclient + try: +- dormant = int(sys_netdev_info(interface, 'dormant')) ++ dormant = read_sys_net(interface, 'dormant', enoent=False) + if dormant: ++ domant = int(dormant) + possibly_connected.append(interface) + continue +- except OSError: ++ except (IOError, OSError, TypeError): + pass + try: +- operstate = sys_netdev_info(interface, 'operstate') ++ operstate = read_sys_net(interface, 'operstate', enoent=False) + if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']: + possibly_connected.append(interface) + continue +- except OSError: ++ except (IOError, OSError): + pass + + # don't bother with interfaces that might not be connected if there are +@@ -173,23 +162,29 @@ def generate_fallback_config(): + potential_interfaces = connected + else: + potential_interfaces = possibly_connected +- # if there are no interfaces, give up +- if not potential_interfaces: +- return ++ + # if eth0 exists use it above anything else, otherwise get the interface +- # that looks 'first' +- if DEFAULT_PRIMARY_INTERFACE in potential_interfaces: +- name = DEFAULT_PRIMARY_INTERFACE ++ # that we can read 'first' (using the sorted defintion of first). ++ names = [DEFAULT_PRIMARY_INTERFACE] ++ names.extend(sorted(potential_interfaces)) ++ target_name = None ++ target_mac = None ++ for name in names: ++ if name not in potential_interfaces: ++ continue ++ mac = read_sys_net(name, 'address', enoent=False) ++ if mac: ++ target_name = name ++ target_mac = mac ++ break ++ if target_mac and target_name: ++ nconf = {'config': [], 'version': 1} ++ nconf['config'].append( ++ {'type': 'physical', 'name': target_name, ++ 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) ++ return nconf + else: +- name = sorted(potential_interfaces)[0] +- +- mac = sys_netdev_info(name, 'address') +- target_name = name +- +- nconf['config'].append( +- {'type': 'physical', 'name': target_name, +- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) +- return nconf ++ return None + + + def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): +--- cloudinit/net/cmdline.py.orig ++++ cloudinit/net/cmdline.py +@@ -26,7 +26,7 @@ import sys + import six + + from . import get_devicelist +-from . import sys_netdev_info ++from . import read_sys_net + + from cloudinit import util + +@@ -197,7 +197,10 @@ def read_kernel_cmdline_config(files=Non + return None + + if mac_addrs is None: +- mac_addrs = dict((k, sys_netdev_info(k, 'address')) +- for k in get_devicelist()) ++ mac_addrs = {} ++ for k in get_devicelist(): ++ mac_addr = read_sys_net(k, 'address', enoent=False) ++ if mac_addr: ++ mac_addrs[k] = mac_addr + + return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) +--- tests/unittests/test_net.py.orig ++++ tests/unittests/test_net.py +@@ -422,7 +422,7 @@ pre-down route del -net 10.0.0.0 netmask + } + + +-def _setup_test(tmp_dir, mock_get_devicelist, mock_sys_netdev_info, ++def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path): + mock_get_devicelist.return_value = ['eth1000'] + dev_characteristics = { +@@ -435,10 +435,10 @@ def _setup_test(tmp_dir, mock_get_device + } + } + +- def netdev_info(name, field): ++ def fake_read(devname, path, translate=None, enoent=None, keyerror=None): + return dev_characteristics[name][field] + +- mock_sys_netdev_info.side_effect = netdev_info ++ mock_read_sys_net.side_effect = fake_read + + def sys_dev_path(devname, path=""): + return tmp_dir + devname + "/" + path +@@ -454,15 +454,15 @@ def _setup_test(tmp_dir, mock_get_device + class TestSysConfigRendering(TestCase): + + @mock.patch("cloudinit.net.sys_dev_path") +- @mock.patch("cloudinit.net.sys_netdev_info") ++ @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_default_generation(self, mock_get_devicelist, +- mock_sys_netdev_info, ++ mock_read_sys_net, + mock_sys_dev_path): + tmp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, tmp_dir) + _setup_test(tmp_dir, mock_get_devicelist, +- mock_sys_netdev_info, mock_sys_dev_path) ++ mock_read_sys_net, mock_sys_dev_path) + + network_cfg = net.generate_fallback_config() + ns = network_state.parse_net_config_data(network_cfg, +@@ -511,15 +511,15 @@ USERCTL=no + class TestEniNetRendering(TestCase): + + @mock.patch("cloudinit.net.sys_dev_path") +- @mock.patch("cloudinit.net.sys_netdev_info") ++ @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_default_generation(self, mock_get_devicelist, +- mock_sys_netdev_info, ++ mock_read_sys_net, + mock_sys_dev_path): + tmp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, tmp_dir) + _setup_test(tmp_dir, mock_get_devicelist, +- mock_sys_netdev_info, mock_sys_dev_path) ++ mock_read_sys_net, mock_sys_dev_path) + + network_cfg = net.generate_fallback_config() + ns = network_state.parse_net_config_data(network_cfg, diff --git a/cloud-init-no-dmidecode-on-ppc64.patch b/cloud-init-no-dmidecode-on-ppc64.patch index 72e1827..f1d8893 100644 --- a/cloud-init-no-dmidecode-on-ppc64.patch +++ b/cloud-init-no-dmidecode-on-ppc64.patch @@ -5,7 +5,18 @@ # running dmidecode can be problematic on some arches (LP: #1243287) uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": -+ if uname_arch.startswith("arm") or uname_arch == "aarch64" or uname_arch.startswith("ppc"): ++ if uname_arch.startswith("arm") or uname_arch.startswith("ppc"): LOG.debug("dmidata is not supported on %s", uname_arch) return None +--- tests/unittests/test_util.py.orig ++++ tests/unittests/test_util.py +@@ -384,7 +384,7 @@ class TestReadDMIData(helpers.Filesystem + dmi_name = 'use-dmidecode' + self._configure_dmidecode_return(dmi_name, dmi_val) + +- expected = {'armel': None, 'aarch64': None, 'x86_64': dmi_val} ++ expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val} + found = {} + # we do not run the 'dmi-decode' binary on some arches + # verify that anything requested that is not in the sysfs dir diff --git a/cloud-init-service.patch b/cloud-init-service.patch index 381f84a..f9b60d7 100644 --- a/cloud-init-service.patch +++ b/cloud-init-service.patch @@ -1,13 +1,51 @@ --- systemd/cloud-init.service.orig +++ systemd/cloud-init.service -@@ -1,8 +1,8 @@ +@@ -1,9 +1,18 @@ [Unit] Description=Initial cloud-init job (metadata service crawler) -After=cloud-init-local.service networking.service -+After=cloud-init-local.service network.service - Before=network-online.target sshd.service sshd-keygen.service systemd-user-sessions.service +-Before=network-online.target sshd.service sshd-keygen.service systemd-user-sessions.service -Requires=networking.service -+Requires=network.service - Wants=local-fs.target cloud-init-local.service sshd.service sshd-keygen.service +-Wants=local-fs.target cloud-init-local.service sshd.service sshd-keygen.service ++DefaultDependencies=no ++Wants=cloud-init-local.service ++Wants=local-fs.target ++Wants=sshd-keygen.service ++Wants=sshd.service ++After=cloud-init-local.service ++After=wicked.service ++Requires=wicked.service ++Before=network-online.target ++Before=sshd-keygen.service ++Before=sshd.service ++Before=systemd-user-sessions.service ++Conflicts=shutdown.target [Service] + Type=oneshot +--- systemd/cloud-init-local.service.orig ++++ systemd/cloud-init-local.service +@@ -4,9 +4,10 @@ DefaultDependencies=no + Wants=local-fs.target + Wants=network-pre.target + After=local-fs.target +-Conflicts=shutdown.target ++Before=basic.target + Before=network-pre.target + Before=shutdown.target ++Conflicts=shutdown.target + + [Service] + Type=oneshot +--- systemd/cloud-final.service.orig ++++ systemd/cloud-final.service +@@ -1,6 +1,8 @@ + [Unit] + Description=Execute cloud user/final scripts +-After=network-online.target cloud-config.service rc-local.service multi-user.target ++After=cloud-config.service ++After=network-online.target ++After=rc-local.service + Before=systemd-logind.service + Wants=network-online.target cloud-config.service + diff --git a/cloud-init-sysconfig-netpathfix.patch b/cloud-init-sysconfig-netpathfix.patch new file mode 100644 index 0000000..8c9c716 --- /dev/null +++ b/cloud-init-sysconfig-netpathfix.patch @@ -0,0 +1,20 @@ +--- cloudinit/net/sysconfig.py.orig ++++ cloudinit/net/sysconfig.py +@@ -94,7 +94,7 @@ class ConfigMap(object): + class Route(ConfigMap): + """Represents a route configuration.""" + +- route_fn_tpl = '%(base)s/network-scripts/route-%(name)s' ++ route_fn_tpl = '%(base)s/network/route-%(name)s' + + def __init__(self, route_name, base_sysconf_dir): + super(Route, self).__init__() +@@ -119,7 +119,7 @@ class Route(ConfigMap): + class NetInterface(ConfigMap): + """Represents a sysconfig/networking-script (and its config + children).""" + +- iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s' ++ iface_fn_tpl = '%(base)s/network/ifcfg-%(name)s' + + iface_types = { + 'ethernet': 'Ethernet', diff --git a/cloud-init.changes b/cloud-init.changes index 2eb16ef..674d2e4 100644 --- a/cloud-init.changes +++ b/cloud-init.changes @@ -1,3 +1,104 @@ +------------------------------------------------------------------- +Tue Nov 1 15:56:50 UTC 2016 - rjschwei@suse.com + +- Modify suseIntegratedHandler.patch (bsc#1007529) + + Fall back to the previous method of writing network information + We have to work out upstream how to have distro specific renderer + for sysconfig + +------------------------------------------------------------------- +Mon Oct 31 12:44:21 UTC 2016 - rjschwei@suse.com + +- Add cloud-init-sysconfig-netpathfix.patch (bsc#1007529) + + Fix the default path for network scripts +- Cosmetic changes to suseIntegratedHandler.patch + +------------------------------------------------------------------- +Sat Oct 29 13:24:13 UTC 2016 - rjschwei@suse.com + +- Update cloud-init-no-dmidecode-on-ppc64.patch (bsc#1005616) + + aarch64 does support dmidecode + +------------------------------------------------------------------- +Sat Oct 29 12:40:38 UTC 2016 - rjschwei@suse.com + +- Update cloud-init-service.patch + + Break another cycle this one in -final + +------------------------------------------------------------------- +Fri Oct 28 20:14:48 UTC 2016 - rjschwei@suse.com + +- Update cloud-init-service.patch + + Better match upstream intend Ubuntu networking.service is equivalent + to SUSE wicked, thus we cannot translate networking to network, but need + to translate it to wicked + +------------------------------------------------------------------- +Fri Oct 28 19:37:24 UTC 2016 - rjschwei@suse.com + +- Update cloud-init-service.patch + + We need the following order: + - something brings networking fully up (in our case wicked) + - cloud-init.service runs + - network-online.target is reached + +------------------------------------------------------------------- +Fri Oct 28 19:20:32 UTC 2016 - rjschwei@suse.com + +- Update cloud-init-service.patch + + The network must be up an running in order to get ssh key injected + +------------------------------------------------------------------- +Fri Oct 28 15:21:39 UTC 2016 - rjschwei@suse.com + +- Update cloud-init-service.patch + + Had self reference and thus cloud-init.service was never executed + which caused ssh key loading failure + +------------------------------------------------------------------- +Fri Oct 28 04:56:29 UTC 2016 - bwiedemann@suse.com + +- Do not own /lib/udev to not conflict with udev rpm + +------------------------------------------------------------------- +Thu Oct 27 19:40:31 UTC 2016 - rjschwei@suse.com + +- Forward port suseIntegratedHandler.patch + + Implement new abstract interfaces + + Some minor implementation fixes + +------------------------------------------------------------------- +Wed Oct 26 14:27:55 UTC 2016 - rjschwei@suse.com + +- Appease the build service, differences between OBS and IBS, + and own the directories + +------------------------------------------------------------------- +Wed Oct 26 11:31:34 UTC 2016 - rjschwei@suse.com + +- Fix package, udev rules should be in /usr for distros after + SLES 11 + +------------------------------------------------------------------- +Wed Oct 19 13:23:35 UTC 2016 - dmueller@suse.com + +- add cloud-init-digital-ocean-datasource-enable-by-default.patch, + cloud-init-digital-ocean-datasource.patch: add DigitalOcean support +- run tests on build + +------------------------------------------------------------------- +Tue Oct 11 15:42:35 UTC 2016 - rjschwei@suse.com + +- Add cloud-init-handle-no-carrier.patch (boo#1003977) + - Handle the exception when attempting to detect if the network + device is up when it is not + +------------------------------------------------------------------- +Mon Oct 10 20:28:47 UTC 2016 - rjschwei@suse.com + +- Update cloud-init-service.patch (boo#999942) + - Backport upstream commits 3705bb5964a and 6e45ffb21e96 + ------------------------------------------------------------------- Thu Sep 15 13:01:35 UTC 2016 - rjschwei@suse.com diff --git a/cloud-init.spec b/cloud-init.spec index 3f562c7..310ff02 100644 --- a/cloud-init.spec +++ b/cloud-init.spec @@ -45,12 +45,35 @@ Patch20: cloud-init-python2-sigpipe.patch Patch21: cloud-init-net-eni.patch Patch22: cloud-init-service.patch Patch23: cloud-init-fix-unicode-handling-binarydecode.patch +# From upstream patch +Patch24: cloud-init-handle-no-carrier.patch +Patch25: cloud-init-digital-ocean-datasource.patch +Patch26: cloud-init-digital-ocean-datasource-enable-by-default.patch +Patch27: cloud-init-sysconfig-netpathfix.patch BuildRequires: fdupes BuildRequires: filesystem BuildRequires: python-devel BuildRequires: python-setuptools # pkg-config is needed to find correct systemd unit dir BuildRequires: pkg-config +# needed for /lib/udev +BuildRequires: udev +%if 0%{?suse_version} > 1320 +# Test requirements +BuildRequires: python-Cheetah +BuildRequires: python-Jinja2 +BuildRequires: python-PrettyTable +BuildRequires: python-PyYAML +BuildRequires: python-argparse +BuildRequires: python-configobj +BuildRequires: python-contextlib2 +BuildRequires: python-httpretty +BuildRequires: python-jsonpatch +BuildRequires: python-mock +BuildRequires: python-oauthlib +BuildRequires: python-requests +BuildRequires: python-testtools +%endif Requires: bash Requires: file Requires: growpart @@ -149,6 +172,10 @@ Unit tests for the cloud-init tools %patch21 %patch22 %patch23 +%patch24 +%patch25 -p1 +%patch26 -p1 +%patch27 %if 0%{?suse_version} <= 1130 # disable ecdsa for SLE 11 (not available) @@ -158,6 +185,18 @@ echo "ssh_genkeytypes: ['rsa', 'dsa']" >> %{SOURCE1} %build python setup.py build + +%if 0%{?suse_version} > 1320 +%check +# these tests are currently failing due to suse patches +rm -v tests/unittests/test_distros/test_netconfig.py +rm -v tests/unittests/test_net.py +rm -v tests/unittests/test_datasource/test_opennebula.py +rm -v tests/unittests/test_datasource/test_cloudstack.py +python -m testtools.run +%endif + + %install python setup.py install --root=%{buildroot} --prefix=%{_prefix} --install-lib=%{python_sitelib} --init-system=%{initsys} find %{buildroot} \( -name .gitignore -o -name .placeholder \) -delete @@ -188,8 +227,10 @@ sed -i s/INSERT_SUSE_DISTRO/opensuse/ %{buildroot}/%{_sysconfdir}/cloud/cloud.cf %endif %endif %if 0%{?suse_version} && 0%{?suse_version} > 1110 -mkdir %{buildroot}/%{_sysconfdir}/rsyslog.d +mkdir -p %{buildroot}/%{_sysconfdir}/rsyslog.d +mkdir -p %{buildroot}/usr/lib/udev/rules.d/ cp -a %{SOURCE2} %{buildroot}/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf +mv %{buildroot}/lib/udev/rules.d/66-azure-ephemeral.rules %{buildroot}/usr/lib/udev/rules.d/ %endif # remove debian/ubuntu specific profile.d file (bnc#779553) @@ -252,16 +293,20 @@ popd %if 0%{?suse_version} && 0%{?suse_version} > 1110 %dir %{_sysconfdir}/rsyslog.d %{_sysconfdir}/rsyslog.d/21-cloudinit.conf -%endif +/usr/lib/udev/rules.d/66-azure-ephemeral.rules +# This if condition really distinquished between OBS and IBS. +# For SLE 12 builds in OBS owning the directories is not required, while +# SLE 12 builds in IBS require owning the directories +%else /lib/udev/rules.d/66-azure-ephemeral.rules +%endif %dir %attr(0755, root, root) %{_localstatedir}/lib/cloud %dir %{docdir} %dir /etc/NetworkManager %dir /etc/NetworkManager/dispatcher.d %dir /etc/dhcp %dir /etc/dhcp/dhclient-exit-hooks.d -%dir /lib/udev -%dir /lib/udev/rules.d + %files doc diff --git a/suseIntegratedHandler.patch b/suseIntegratedHandler.patch index 3f6112d..c411035 100644 --- a/suseIntegratedHandler.patch +++ b/suseIntegratedHandler.patch @@ -1,9 +1,9 @@ --- /dev/null +++ cloudinit/distros/opensuse.py -@@ -0,0 +1,212 @@ +@@ -0,0 +1,226 @@ +# vi: ts=4 expandtab +# -+# Copyright (C) 2014 SUSE LLC ++# Copyright (C) 2016 SUSE LLC +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Author: Robert Schweikert @@ -33,30 +33,34 @@ + +from cloudinit.distros import net_util +from cloudinit.distros import rhel_util as rhutil ++#from cloudinit.net import sysconfig ++#from cloudinit.net.network_state import parse_net_config_data +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + +class Distro(distros.Distro): + clock_conf_fn = '/etc/sysconfig/clock' -+ locale_conf_fn = '/etc/sysconfig/language' -+ network_conf_fn = '/etc/sysconfig/network' + hostname_conf_fn = '/etc/HOSTNAME' + init_cmd = ['service'] ++ locale_conf_fn = '/etc/sysconfig/language' ++ network_conf_fn = '/etc/sysconfig/network' + network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' + resolve_conf_fn = '/etc/resolv.conf' + route_conf_tpl = '/etc/sysconfig/network/ifroute-%s' -+ tz_local_fn = '/etc/localtime' ++ systemd_hostname_conf_fn = '/etc/hostname' + systemd_locale_conf_fn = '/etc/locale.conf' ++ tz_local_fn = '/etc/localtime' + + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) ++# self._net_renderer = sysconfig.Renderer() + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) -+ cfg['ssh_svcname'] = 'sshd' + self.osfamily = 'suse' ++ cfg['ssh_svcname'] = 'sshd' + self.systemdDist = util.which('systemctl') + if self.systemdDist: + self.init_cmd = ['systemctl'] @@ -149,7 +153,10 @@ + return conf + + def _read_system_hostname(self): -+ host_fn = self.hostname_conf_fn ++ if self.systemdDist: ++ host_fn = self.systemd_hostname_conf_fn ++ else: ++ host_fn = self.hostname_conf_fn + return (host_fn, self._read_hostname(host_fn)) + + def _write_hostname(self, hostname, out_fn): @@ -213,6 +220,13 @@ + rhutil.update_resolve_conf_file(self.resolve_conf_fn, + nameservers, searchservers) + return dev_names ++ ++# New interface cannot yet be implemented/used as we have to figure out ++# how to have a distro specific renderer ++# def _write_network_config(self, netconfig): ++# ns = parse_net_config_data(netconfig) ++# self._net_renderer.render_network_state("/", ns) ++# return [] --- cloudinit/distros/sles.py.orig +++ cloudinit/distros/sles.py @@ -1,10 +1,9 @@