Sync from SUSE:ALP:Source:Standard:1.0 cloud-init revision e64ca0b7929167b6abe9eb0e050ec357
This commit is contained in:
commit
e0b4dfe2be
23
.gitattributes
vendored
Normal file
23
.gitattributes
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
## Default LFS
|
||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bsp filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gem filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.jar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lzma filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.obscpio filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.oxt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pdf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.png filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rpm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tbz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tbz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ttf filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.txz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.whl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
BIN
cloud-init-23.3.tar.gz
(Stored with Git LFS)
Normal file
BIN
cloud-init-23.3.tar.gz
(Stored with Git LFS)
Normal file
Binary file not shown.
14
cloud-init-break-resolv-symlink.patch
Normal file
14
cloud-init-break-resolv-symlink.patch
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
--- cloudinit/net/sysconfig.py.orig
|
||||||
|
+++ cloudinit/net/sysconfig.py
|
||||||
|
@@ -998,6 +998,11 @@ class Renderer(renderer.Renderer):
|
||||||
|
network_state, existing_dns_path=dns_path
|
||||||
|
)
|
||||||
|
if resolv_content:
|
||||||
|
+ # netconfig checks if /etc/resolv.conf is a symlink and if
|
||||||
|
+ # that is true will write it's version of the file clobbering
|
||||||
|
+ # our changes.
|
||||||
|
+ if os.path.islink(dns_path):
|
||||||
|
+ os.unlink(dns_path)
|
||||||
|
util.write_file(dns_path, resolv_content, file_mode)
|
||||||
|
if self.networkmanager_conf_path:
|
||||||
|
nm_conf_path = subp.target_path(
|
11
cloud-init-keep-flake.patch
Normal file
11
cloud-init-keep-flake.patch
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
--- tools/run-lint.orig
|
||||||
|
+++ tools/run-lint
|
||||||
|
@@ -11,7 +11,7 @@ else
|
||||||
|
files=( "$@" )
|
||||||
|
fi
|
||||||
|
|
||||||
|
-cmd=( "python3" -m "ruff" "${files[@]}" )
|
||||||
|
+cmd=( "python3" -m "flake8" "${files[@]}" )
|
||||||
|
|
||||||
|
echo "Running: " "${cmd[@]}" 1>&2
|
||||||
|
exec "${cmd[@]}"
|
412
cloud-init-lint-fixes.patch
Normal file
412
cloud-init-lint-fixes.patch
Normal file
@ -0,0 +1,412 @@
|
|||||||
|
--- cloudinit/cmd/main.py.orig
|
||||||
|
+++ cloudinit/cmd/main.py
|
||||||
|
@@ -28,26 +28,27 @@ from cloudinit.config.modules import Mod
|
||||||
|
|
||||||
|
patcher.patch_logging()
|
||||||
|
|
||||||
|
-from cloudinit.config.schema import validate_cloudconfig_schema
|
||||||
|
-from cloudinit import log as logging
|
||||||
|
-from cloudinit import netinfo
|
||||||
|
-from cloudinit import signal_handler
|
||||||
|
-from cloudinit import sources
|
||||||
|
-from cloudinit import stages
|
||||||
|
-from cloudinit import url_helper
|
||||||
|
-from cloudinit import util
|
||||||
|
-from cloudinit import version
|
||||||
|
-from cloudinit import warnings
|
||||||
|
-
|
||||||
|
-from cloudinit import reporting
|
||||||
|
-from cloudinit.reporting import events
|
||||||
|
+from cloudinit.config.schema import validate_cloudconfig_schema # noqa: E402
|
||||||
|
+from cloudinit import log as logging # noqa: E402
|
||||||
|
+from cloudinit import netinfo # noqa: E402
|
||||||
|
+from cloudinit import signal_handler # noqa: E402
|
||||||
|
+from cloudinit import sources # noqa: E402
|
||||||
|
+from cloudinit import stages # noqa: E402
|
||||||
|
+from cloudinit import url_helper # noqa: E402
|
||||||
|
+from cloudinit import util # noqa: E402
|
||||||
|
+from cloudinit import version # noqa: E402
|
||||||
|
+from cloudinit import warnings # noqa: E402
|
||||||
|
+
|
||||||
|
+from cloudinit import reporting # noqa: E402
|
||||||
|
+from cloudinit.reporting import events # noqa: E402
|
||||||
|
|
||||||
|
-from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG
|
||||||
|
+from cloudinit.settings import ( # noqa: E402
|
||||||
|
+ PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG) # noqa: E402
|
||||||
|
|
||||||
|
-from cloudinit import atomic_helper
|
||||||
|
+from cloudinit import atomic_helper # noqa: E402
|
||||||
|
|
||||||
|
-from cloudinit.config import cc_set_hostname
|
||||||
|
-from cloudinit.cmd.devel import read_cfg_paths
|
||||||
|
+from cloudinit.config import cc_set_hostname # noqa: E402
|
||||||
|
+from cloudinit.cmd.devel import read_cfg_paths # noqa: E402
|
||||||
|
|
||||||
|
|
||||||
|
# Welcome message template
|
||||||
|
@@ -538,7 +539,7 @@ def di_report_warn(datasource, cfg):
|
||||||
|
# where Name is the thing that shows up in datasource_list.
|
||||||
|
modname = datasource.__module__.rpartition(".")[2]
|
||||||
|
if modname.startswith(sources.DS_PREFIX):
|
||||||
|
- modname = modname[len(sources.DS_PREFIX) :]
|
||||||
|
+ modname = modname[len(sources.DS_PREFIX):]
|
||||||
|
else:
|
||||||
|
LOG.warning(
|
||||||
|
"Datasource '%s' came from unexpected module '%s'.",
|
||||||
|
--- cloudinit/config/cc_apt_configure.py.orig
|
||||||
|
+++ cloudinit/config/cc_apt_configure.py
|
||||||
|
@@ -354,7 +354,7 @@ def mirrorurl_to_apt_fileprefix(mirror):
|
||||||
|
string = string[0:-1]
|
||||||
|
pos = string.find("://")
|
||||||
|
if pos >= 0:
|
||||||
|
- string = string[pos + 3 :]
|
||||||
|
+ string = string[pos + 3:]
|
||||||
|
string = string.replace("/", "_")
|
||||||
|
return string
|
||||||
|
|
||||||
|
--- cloudinit/config/cc_ssh_authkey_fingerprints.py.orig
|
||||||
|
+++ cloudinit/config/cc_ssh_authkey_fingerprints.py
|
||||||
|
@@ -44,7 +44,7 @@ LOG = logging.getLogger(__name__)
|
||||||
|
def _split_hash(bin_hash):
|
||||||
|
split_up = []
|
||||||
|
for i in range(0, len(bin_hash), 2):
|
||||||
|
- split_up.append(bin_hash[i : i + 2])
|
||||||
|
+ split_up.append(bin_hash[i: i + 2])
|
||||||
|
return split_up
|
||||||
|
|
||||||
|
|
||||||
|
--- cloudinit/config/modules.py.orig
|
||||||
|
+++ cloudinit/config/modules.py
|
||||||
|
@@ -39,7 +39,7 @@ class ModuleDetails(NamedTuple):
|
||||||
|
def form_module_name(name):
|
||||||
|
canon_name = name.replace("-", "_")
|
||||||
|
if canon_name.lower().endswith(".py"):
|
||||||
|
- canon_name = canon_name[0 : (len(canon_name) - 3)]
|
||||||
|
+ canon_name = canon_name[0: (len(canon_name) - 3)]
|
||||||
|
canon_name = canon_name.strip()
|
||||||
|
if not canon_name:
|
||||||
|
return None
|
||||||
|
--- cloudinit/distros/parsers/ifconfig.py.orig
|
||||||
|
+++ cloudinit/distros/parsers/ifconfig.py
|
||||||
|
@@ -140,7 +140,7 @@ class Ifconfig:
|
||||||
|
dev.index = int(toks[1])
|
||||||
|
|
||||||
|
if toks[0] == "description:":
|
||||||
|
- dev.description = line[line.index(":") + 2 :]
|
||||||
|
+ dev.description = line[line.index(":") + 2:]
|
||||||
|
|
||||||
|
if (
|
||||||
|
toks[0].startswith("options=")
|
||||||
|
@@ -165,7 +165,7 @@ class Ifconfig:
|
||||||
|
dev.groups += toks[1:]
|
||||||
|
|
||||||
|
if toks[0] == "media:":
|
||||||
|
- dev.media = line[line.index(": ") + 2 :]
|
||||||
|
+ dev.media = line[line.index(": ") + 2:]
|
||||||
|
|
||||||
|
if toks[0] == "nd6":
|
||||||
|
nd6_opts = re.split(r"<|>", toks[0])
|
||||||
|
--- cloudinit/net/dhcp.py.orig
|
||||||
|
+++ cloudinit/net/dhcp.py
|
||||||
|
@@ -415,24 +415,24 @@ class IscDhclient(DhcpClient):
|
||||||
|
if len(tokens[idx:]) < req_toks:
|
||||||
|
_trunc_error(net_length, req_toks, len(tokens[idx:]))
|
||||||
|
return static_routes
|
||||||
|
- net_address = ".".join(tokens[idx + 1 : idx + 5])
|
||||||
|
- gateway = ".".join(tokens[idx + 5 : idx + req_toks])
|
||||||
|
+ net_address = ".".join(tokens[idx + 1: idx + 5])
|
||||||
|
+ gateway = ".".join(tokens[idx + 5: idx + req_toks])
|
||||||
|
current_idx = idx + req_toks
|
||||||
|
elif net_length in range(17, 25):
|
||||||
|
req_toks = 8
|
||||||
|
if len(tokens[idx:]) < req_toks:
|
||||||
|
_trunc_error(net_length, req_toks, len(tokens[idx:]))
|
||||||
|
return static_routes
|
||||||
|
- net_address = ".".join(tokens[idx + 1 : idx + 4] + ["0"])
|
||||||
|
- gateway = ".".join(tokens[idx + 4 : idx + req_toks])
|
||||||
|
+ net_address = ".".join(tokens[idx + 1: idx + 4] + ["0"])
|
||||||
|
+ gateway = ".".join(tokens[idx + 4: idx + req_toks])
|
||||||
|
current_idx = idx + req_toks
|
||||||
|
elif net_length in range(9, 17):
|
||||||
|
req_toks = 7
|
||||||
|
if len(tokens[idx:]) < req_toks:
|
||||||
|
_trunc_error(net_length, req_toks, len(tokens[idx:]))
|
||||||
|
return static_routes
|
||||||
|
- net_address = ".".join(tokens[idx + 1 : idx + 3] + ["0", "0"])
|
||||||
|
- gateway = ".".join(tokens[idx + 3 : idx + req_toks])
|
||||||
|
+ net_address = ".".join(tokens[idx + 1: idx + 3] + ["0", "0"])
|
||||||
|
+ gateway = ".".join(tokens[idx + 3: idx + req_toks])
|
||||||
|
current_idx = idx + req_toks
|
||||||
|
elif net_length in range(1, 9):
|
||||||
|
req_toks = 6
|
||||||
|
@@ -440,9 +440,9 @@ class IscDhclient(DhcpClient):
|
||||||
|
_trunc_error(net_length, req_toks, len(tokens[idx:]))
|
||||||
|
return static_routes
|
||||||
|
net_address = ".".join(
|
||||||
|
- tokens[idx + 1 : idx + 2] + ["0", "0", "0"]
|
||||||
|
+ tokens[idx + 1: idx + 2] + ["0", "0", "0"]
|
||||||
|
)
|
||||||
|
- gateway = ".".join(tokens[idx + 2 : idx + req_toks])
|
||||||
|
+ gateway = ".".join(tokens[idx + 2: idx + req_toks])
|
||||||
|
current_idx = idx + req_toks
|
||||||
|
elif net_length == 0:
|
||||||
|
req_toks = 5
|
||||||
|
@@ -450,7 +450,7 @@ class IscDhclient(DhcpClient):
|
||||||
|
_trunc_error(net_length, req_toks, len(tokens[idx:]))
|
||||||
|
return static_routes
|
||||||
|
net_address = "0.0.0.0"
|
||||||
|
- gateway = ".".join(tokens[idx + 1 : idx + req_toks])
|
||||||
|
+ gateway = ".".join(tokens[idx + 1: idx + req_toks])
|
||||||
|
current_idx = idx + req_toks
|
||||||
|
else:
|
||||||
|
LOG.error(
|
||||||
|
--- cloudinit/net/network_state.py.orig
|
||||||
|
+++ cloudinit/net/network_state.py
|
||||||
|
@@ -135,7 +135,7 @@ class CommandHandlerMeta(type):
|
||||||
|
command_handlers = {}
|
||||||
|
for attr_name, attr in dct.items():
|
||||||
|
if callable(attr) and attr_name.startswith("handle_"):
|
||||||
|
- handles_what = attr_name[len("handle_") :]
|
||||||
|
+ handles_what = attr_name[len("handle_"):]
|
||||||
|
if handles_what:
|
||||||
|
command_handlers[handles_what] = attr
|
||||||
|
dct["command_handlers"] = command_handlers
|
||||||
|
--- cloudinit/reporting/handlers.py.orig
|
||||||
|
+++ cloudinit/reporting/handlers.py
|
||||||
|
@@ -295,13 +295,13 @@ class HyperVKvpReportingHandler(Reportin
|
||||||
|
)
|
||||||
|
)
|
||||||
|
k = (
|
||||||
|
- record_data[0 : self.HV_KVP_EXCHANGE_MAX_KEY_SIZE]
|
||||||
|
+ record_data[0: self.HV_KVP_EXCHANGE_MAX_KEY_SIZE]
|
||||||
|
.decode("utf-8")
|
||||||
|
.strip("\x00")
|
||||||
|
)
|
||||||
|
v = (
|
||||||
|
record_data[
|
||||||
|
- self.HV_KVP_EXCHANGE_MAX_KEY_SIZE : self.HV_KVP_RECORD_SIZE
|
||||||
|
+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE: self.HV_KVP_RECORD_SIZE
|
||||||
|
]
|
||||||
|
.decode("utf-8")
|
||||||
|
.strip("\x00")
|
||||||
|
@@ -320,7 +320,7 @@ class HyperVKvpReportingHandler(Reportin
|
||||||
|
def _break_down(self, key, meta_data, description):
|
||||||
|
del meta_data[self.MSG_KEY]
|
||||||
|
des_in_json = json.dumps(description)
|
||||||
|
- des_in_json = des_in_json[1 : (len(des_in_json) - 1)]
|
||||||
|
+ des_in_json = des_in_json[1: (len(des_in_json) - 1)]
|
||||||
|
i = 0
|
||||||
|
result_array = []
|
||||||
|
message_place_holder = '"' + self.MSG_KEY + '":""'
|
||||||
|
@@ -353,7 +353,7 @@ class HyperVKvpReportingHandler(Reportin
|
||||||
|
Values will be truncated as needed.
|
||||||
|
"""
|
||||||
|
if len(value) >= self.HV_KVP_AZURE_MAX_VALUE_SIZE:
|
||||||
|
- value = value[0 : self.HV_KVP_AZURE_MAX_VALUE_SIZE - 1]
|
||||||
|
+ value = value[0: self.HV_KVP_AZURE_MAX_VALUE_SIZE - 1]
|
||||||
|
|
||||||
|
data = [self._encode_kvp_item(key, value)]
|
||||||
|
|
||||||
|
--- cloudinit/sources/__init__.py.orig
|
||||||
|
+++ cloudinit/sources/__init__.py
|
||||||
|
@@ -747,7 +747,7 @@ class DataSource(CloudInitPickleMixin, m
|
||||||
|
if not short_name.startswith(nfrom):
|
||||||
|
continue
|
||||||
|
for nto in tlist:
|
||||||
|
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom) :])
|
||||||
|
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
|
||||||
|
if os.path.exists(cand):
|
||||||
|
return cand
|
||||||
|
return None
|
||||||
|
--- cloudinit/sources/helpers/azure.py.orig
|
||||||
|
+++ cloudinit/sources/helpers/azure.py
|
||||||
|
@@ -566,7 +566,7 @@ class OpenSSLManager:
|
||||||
|
"""
|
||||||
|
raw_fp = self._run_x509_action("-fingerprint", certificate)
|
||||||
|
eq = raw_fp.find("=")
|
||||||
|
- octets = raw_fp[eq + 1 : -1].split(":")
|
||||||
|
+ octets = raw_fp[eq + 1: -1].split(":")
|
||||||
|
return "".join(octets)
|
||||||
|
|
||||||
|
@azure_ds_telemetry_reporter
|
||||||
|
--- cloudinit/sources/helpers/netlink.py.orig
|
||||||
|
+++ cloudinit/sources/helpers/netlink.py
|
||||||
|
@@ -150,7 +150,7 @@ def unpack_rta_attr(data, offset):
|
||||||
|
return None # Should mean our offset is >= remaining data
|
||||||
|
|
||||||
|
# Unpack just the attribute's data. Offset by 4 to skip length/type header
|
||||||
|
- attr_data = data[offset + RTA_DATA_START_OFFSET : offset + length]
|
||||||
|
+ attr_data = data[offset + RTA_DATA_START_OFFSET: offset + length]
|
||||||
|
return RTAAttr(length, rta_type, attr_data)
|
||||||
|
|
||||||
|
|
||||||
|
--- cloudinit/ssh_util.py.orig
|
||||||
|
+++ cloudinit/ssh_util.py
|
||||||
|
@@ -659,7 +659,7 @@ def get_opensshd_version():
|
||||||
|
prefix = "OpenSSH_"
|
||||||
|
for line in err.split("\n"):
|
||||||
|
if line.startswith(prefix):
|
||||||
|
- return line[len(prefix) : line.find(",")]
|
||||||
|
+ return line[len(prefix): line.find(",")]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
--- cloudinit/url_helper.py.orig
|
||||||
|
+++ cloudinit/url_helper.py
|
||||||
|
@@ -73,7 +73,7 @@ def read_file_or_url(url, **kwargs) -> U
|
||||||
|
if url.lower().startswith("file://"):
|
||||||
|
if kwargs.get("data"):
|
||||||
|
LOG.warning("Unable to post data to file resource %s", url)
|
||||||
|
- file_path = url[len("file://") :]
|
||||||
|
+ file_path = url[len("file://"):]
|
||||||
|
try:
|
||||||
|
with open(file_path, "rb") as fp:
|
||||||
|
contents = fp.read()
|
||||||
|
--- cloudinit/user_data.py.orig
|
||||||
|
+++ cloudinit/user_data.py
|
||||||
|
@@ -211,13 +211,13 @@ class UserDataProcessor:
|
||||||
|
for line in content.splitlines():
|
||||||
|
lc_line = line.lower()
|
||||||
|
if lc_line.startswith("#include-once"):
|
||||||
|
- line = line[len("#include-once") :].lstrip()
|
||||||
|
+ line = line[len("#include-once"):].lstrip()
|
||||||
|
# Every following include will now
|
||||||
|
# not be refetched.... but will be
|
||||||
|
# re-read from a local urlcache (if it worked)
|
||||||
|
include_once_on = True
|
||||||
|
elif lc_line.startswith("#include"):
|
||||||
|
- line = line[len("#include") :].lstrip()
|
||||||
|
+ line = line[len("#include"):].lstrip()
|
||||||
|
# Disable the include once if it was on
|
||||||
|
# if it wasn't, then this has no effect.
|
||||||
|
include_once_on = False
|
||||||
|
--- cloudinit/util.py.orig
|
||||||
|
+++ cloudinit/util.py
|
||||||
|
@@ -1177,7 +1177,7 @@ def read_cc_from_cmdline(cmdline=None):
|
||||||
|
if end < 0:
|
||||||
|
end = clen
|
||||||
|
tokens.append(
|
||||||
|
- parse.unquote(cmdline[begin + begin_l : end].lstrip()).replace(
|
||||||
|
+ parse.unquote(cmdline[begin + begin_l: end].lstrip()).replace(
|
||||||
|
"\\n", "\n"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
@@ -1724,7 +1724,7 @@ def get_output_cfg(cfg, mode):
|
||||||
|
found = False
|
||||||
|
for s in swlist:
|
||||||
|
if val.startswith(s):
|
||||||
|
- val = "%s %s" % (s, val[len(s) :].strip())
|
||||||
|
+ val = "%s %s" % (s, val[len(s):].strip())
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if not found:
|
||||||
|
@@ -2362,7 +2362,7 @@ def shellify(cmdlist, add_header=True):
|
||||||
|
|
||||||
|
def strip_prefix_suffix(line, prefix=None, suffix=None):
|
||||||
|
if prefix and line.startswith(prefix):
|
||||||
|
- line = line[len(prefix) :]
|
||||||
|
+ line = line[len(prefix):]
|
||||||
|
if suffix and line.endswith(suffix):
|
||||||
|
line = line[: -len(suffix)]
|
||||||
|
return line
|
||||||
|
@@ -2942,7 +2942,7 @@ def human2bytes(size):
|
||||||
|
for m in mpliers:
|
||||||
|
if size.endswith(m):
|
||||||
|
mplier = m
|
||||||
|
- num = size[0 : -len(m)]
|
||||||
|
+ num = size[0: -len(m)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
num = float(num)
|
||||||
|
@@ -3022,12 +3022,12 @@ def rootdev_from_cmdline(cmdline):
|
||||||
|
if found.startswith("/dev/"):
|
||||||
|
return found
|
||||||
|
if found.startswith("LABEL="):
|
||||||
|
- return "/dev/disk/by-label/" + found[len("LABEL=") :]
|
||||||
|
+ return "/dev/disk/by-label/" + found[len("LABEL="):]
|
||||||
|
if found.startswith("UUID="):
|
||||||
|
- return "/dev/disk/by-uuid/" + found[len("UUID=") :].lower()
|
||||||
|
+ return "/dev/disk/by-uuid/" + found[len("UUID="):].lower()
|
||||||
|
if found.startswith("PARTUUID="):
|
||||||
|
disks_path = (
|
||||||
|
- "/dev/disk/by-partuuid/" + found[len("PARTUUID=") :].lower()
|
||||||
|
+ "/dev/disk/by-partuuid/" + found[len("PARTUUID="):].lower()
|
||||||
|
)
|
||||||
|
if os.path.exists(disks_path):
|
||||||
|
return disks_path
|
||||||
|
--- setup.py.orig
|
||||||
|
+++ setup.py
|
||||||
|
@@ -187,7 +187,7 @@ elif os.path.isfile("/etc/system-release
|
||||||
|
else:
|
||||||
|
# String formatted CPE
|
||||||
|
inc = 1
|
||||||
|
- (cpe_vendor, cpe_product, cpe_version) = cpe_data[2 + inc : 5 + inc]
|
||||||
|
+ (cpe_vendor, cpe_product, cpe_version) = cpe_data[2 + inc: 5 + inc]
|
||||||
|
if cpe_vendor == "amazon":
|
||||||
|
USR_LIB_EXEC = "usr/libexec"
|
||||||
|
|
||||||
|
--- tests/unittests/helpers.py.orig
|
||||||
|
+++ tests/unittests/helpers.py
|
||||||
|
@@ -265,7 +265,7 @@ class FilesystemMockingTestCase(Resource
|
||||||
|
real_root = os.path.join(real_root, "roots", example_root)
|
||||||
|
for (dir_path, _dirnames, filenames) in os.walk(real_root):
|
||||||
|
real_path = dir_path
|
||||||
|
- make_path = rebase_path(real_path[len(real_root) :], target_root)
|
||||||
|
+ make_path = rebase_path(real_path[len(real_root):], target_root)
|
||||||
|
util.ensure_dir(make_path)
|
||||||
|
for f in filenames:
|
||||||
|
real_path = util.abs_join(real_path, f)
|
||||||
|
@@ -469,7 +469,7 @@ def dir2dict(startdir, prefix=None):
|
||||||
|
for root, _dirs, files in os.walk(startdir):
|
||||||
|
for fname in files:
|
||||||
|
fpath = os.path.join(root, fname)
|
||||||
|
- key = fpath[len(prefix) :]
|
||||||
|
+ key = fpath[len(prefix):]
|
||||||
|
flist[key] = util.load_file(fpath)
|
||||||
|
return flist
|
||||||
|
|
||||||
|
--- tests/unittests/reporting/test_reporting_hyperv.py.orig
|
||||||
|
+++ tests/unittests/reporting/test_reporting_hyperv.py
|
||||||
|
@@ -293,7 +293,7 @@ class TextKvpReporter(CiTestCase):
|
||||||
|
reporter,
|
||||||
|
2,
|
||||||
|
[
|
||||||
|
- log_content[-azure.MAX_LOG_TO_KVP_LENGTH :].encode(),
|
||||||
|
+ log_content[-azure.MAX_LOG_TO_KVP_LENGTH:].encode(),
|
||||||
|
extra_content.encode(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
--- tests/unittests/sources/test_configdrive.py.orig
|
||||||
|
+++ tests/unittests/sources/test_configdrive.py
|
||||||
|
@@ -412,7 +412,7 @@ class TestConfigDriveDataSource(CiTestCa
|
||||||
|
}
|
||||||
|
for name, dev_name in name_tests.items():
|
||||||
|
with ExitStack() as mocks:
|
||||||
|
- provided_name = dev_name[len("/dev/") :]
|
||||||
|
+ provided_name = dev_name[len("/dev/"):]
|
||||||
|
provided_name = "s" + provided_name[1:]
|
||||||
|
find_mock = mocks.enter_context(
|
||||||
|
mock.patch.object(
|
||||||
|
--- tests/unittests/sources/test_maas.py.orig
|
||||||
|
+++ tests/unittests/sources/test_maas.py
|
||||||
|
@@ -131,7 +131,7 @@ class TestMAASDataSource(CiTestCase):
|
||||||
|
if not url.startswith(prefix):
|
||||||
|
raise ValueError("unexpected call %s" % url)
|
||||||
|
|
||||||
|
- short = url[len(prefix) :]
|
||||||
|
+ short = url[len(prefix):]
|
||||||
|
if short not in data:
|
||||||
|
raise url_helper.UrlError("not found", code=404, url=url)
|
||||||
|
return url_helper.StringResponse(data[short])
|
||||||
|
--- tests/unittests/sources/test_smartos.py.orig
|
||||||
|
+++ tests/unittests/sources/test_smartos.py
|
||||||
|
@@ -766,7 +766,7 @@ class ShortReader:
|
||||||
|
rsize = next_null - self.index + 1
|
||||||
|
i = self.index
|
||||||
|
self.index += rsize
|
||||||
|
- ret = self.data[i : i + rsize]
|
||||||
|
+ ret = self.data[i: i + rsize]
|
||||||
|
if len(ret) and ret[-1:] == self.endbyte:
|
||||||
|
ret = ret[:-1]
|
||||||
|
return ret
|
21
cloud-init-no-tempnet-oci.patch
Normal file
21
cloud-init-no-tempnet-oci.patch
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
--- cloudinit/sources/DataSourceOracle.py.orig
|
||||||
|
+++ cloudinit/sources/DataSourceOracle.py
|
||||||
|
@@ -204,6 +204,8 @@ class DataSourceOracle(sources.DataSourc
|
||||||
|
|
||||||
|
def _is_iscsi_root(self) -> bool:
|
||||||
|
"""Return whether we are on a iscsi machine."""
|
||||||
|
+ # SUSE images are built with iSCSI setup.
|
||||||
|
+ return True
|
||||||
|
return self._network_config_source.is_applicable()
|
||||||
|
|
||||||
|
def _get_iscsi_config(self) -> dict:
|
||||||
|
--- tests/unittests/sources/test_oracle.py.orig
|
||||||
|
+++ tests/unittests/sources/test_oracle.py
|
||||||
|
@@ -996,6 +996,7 @@ class TestNonIscsiRoot_GetDataBehaviour:
|
||||||
|
def test_read_opc_metadata_called_with_ephemeral_dhcp(
|
||||||
|
self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
|
||||||
|
):
|
||||||
|
+ return
|
||||||
|
in_context_manager = False
|
||||||
|
|
||||||
|
def enter_context_manager():
|
12
cloud-init-sysconf-path.patch
Normal file
12
cloud-init-sysconf-path.patch
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
--- cloudinit/net/sysconfig.py.orig
|
||||||
|
+++ cloudinit/net/sysconfig.py
|
||||||
|
@@ -1056,8 +1056,7 @@ def available(target=None):
|
||||||
|
return False
|
||||||
|
|
||||||
|
expected_paths = [
|
||||||
|
- "etc/sysconfig/network-scripts/network-functions",
|
||||||
|
- "etc/sysconfig/config",
|
||||||
|
+ 'etc/sysconfig/network/scripts/functions.netconfig'
|
||||||
|
]
|
||||||
|
for p in expected_paths:
|
||||||
|
if os.path.isfile(subp.target_path(target, p)):
|
177
cloud-init-write-routes.patch
Normal file
177
cloud-init-write-routes.patch
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
--- cloudinit/distros/__init__.py.orig
|
||||||
|
+++ cloudinit/distros/__init__.py
|
||||||
|
@@ -287,6 +287,15 @@ class Distro(persistence.CloudInitPickle
|
||||||
|
|
||||||
|
network_state = parse_net_config_data(netconfig, renderer=renderer)
|
||||||
|
self._write_network_state(network_state, renderer)
|
||||||
|
+ # The sysconfig renderer has no route writing implementation
|
||||||
|
+ # for SUSE yet use the old code for now that depends on the
|
||||||
|
+ # raw config.
|
||||||
|
+ try:
|
||||||
|
+ # Only exists for SUSE distro via this patch all other
|
||||||
|
+ # implementations throw which breaks testing
|
||||||
|
+ self._write_routes(netconfig)
|
||||||
|
+ except AttributeError:
|
||||||
|
+ pass
|
||||||
|
|
||||||
|
# Now try to bring them up
|
||||||
|
if bring_up:
|
||||||
|
--- cloudinit/distros/opensuse.py.orig
|
||||||
|
+++ cloudinit/distros/opensuse.py
|
||||||
|
@@ -10,7 +10,7 @@
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
-from cloudinit import distros, helpers
|
||||||
|
+from cloudinit import distros, helpers, net
|
||||||
|
from cloudinit import log as logging
|
||||||
|
from cloudinit import subp, util
|
||||||
|
from cloudinit.distros import rhel_util as rhutil
|
||||||
|
@@ -238,6 +238,147 @@ class Distro(distros.Distro):
|
||||||
|
conf.set_hostname(hostname)
|
||||||
|
util.write_file(filename, str(conf), 0o644)
|
||||||
|
|
||||||
|
+ def _write_routes_v1(self, netconfig):
|
||||||
|
+ """Write route files, not part of the standard distro interface"""
|
||||||
|
+ # Due to the implementation of the sysconfig renderer default routes
|
||||||
|
+ # are setup in ifcfg-* files. But this does not work on SLES or
|
||||||
|
+ # openSUSE https://bugs.launchpad.net/cloud-init/+bug/1812117
|
||||||
|
+ # this is a very hacky way to get around the problem until a real
|
||||||
|
+ # solution is found in the sysconfig renderer
|
||||||
|
+ device_configs = netconfig.get('config', [])
|
||||||
|
+ default_nets = ('::', '0.0.0.0')
|
||||||
|
+ for config in device_configs:
|
||||||
|
+ if_name = config.get('name')
|
||||||
|
+ subnets = config.get('subnets', [])
|
||||||
|
+ config_routes = ''
|
||||||
|
+ has_default_route = False
|
||||||
|
+ seen_default_gateway = None
|
||||||
|
+ for subnet in subnets:
|
||||||
|
+ # Render the default gateway if it is present
|
||||||
|
+ gateway = subnet.get('gateway')
|
||||||
|
+ if gateway:
|
||||||
|
+ config_routes += ' '.join(
|
||||||
|
+ ['default', gateway, '-', '-\n']
|
||||||
|
+ )
|
||||||
|
+ has_default_route = True
|
||||||
|
+ if not seen_default_gateway:
|
||||||
|
+ seen_default_gateway = gateway
|
||||||
|
+ # Render subnet routes
|
||||||
|
+ routes = subnet.get('routes', [])
|
||||||
|
+ for route in routes:
|
||||||
|
+ dest = route.get('destination') or route.get('network')
|
||||||
|
+ if not dest or dest in default_nets:
|
||||||
|
+ dest = 'default'
|
||||||
|
+ if not has_default_route:
|
||||||
|
+ has_default_route = True
|
||||||
|
+ if dest != 'default':
|
||||||
|
+ netmask = route.get('netmask')
|
||||||
|
+ if netmask:
|
||||||
|
+ if net.is_ipv4_network(netmask):
|
||||||
|
+ prefix = net.ipv4_mask_to_net_prefix(netmask)
|
||||||
|
+ if net.is_ipv6_network(netmask):
|
||||||
|
+ prefix = net.ipv6_mask_to_net_prefix(netmask)
|
||||||
|
+ dest += '/' + str(prefix)
|
||||||
|
+ if '/' not in dest:
|
||||||
|
+ LOG.warning(
|
||||||
|
+ 'Skipping route; has no prefix "%s"', dest
|
||||||
|
+ )
|
||||||
|
+ continue
|
||||||
|
+ gateway = route.get('gateway')
|
||||||
|
+ if not gateway:
|
||||||
|
+ LOG.warning(
|
||||||
|
+ 'Missing gateway for "%s", skipping', dest
|
||||||
|
+ )
|
||||||
|
+ continue
|
||||||
|
+ if (
|
||||||
|
+ dest == 'default'
|
||||||
|
+ and has_default_route
|
||||||
|
+ and gateway == seen_default_gateway
|
||||||
|
+ ):
|
||||||
|
+ dest_info = dest
|
||||||
|
+ if gateway:
|
||||||
|
+ dest_info = ' '.join([dest, gateway, '-', '-'])
|
||||||
|
+ LOG.warning(
|
||||||
|
+ '%s already has default route, skipping "%s"',
|
||||||
|
+ if_name, dest_info
|
||||||
|
+ )
|
||||||
|
+ continue
|
||||||
|
+ config_routes += ' '.join(
|
||||||
|
+ [dest, gateway, '-', '-\n']
|
||||||
|
+ )
|
||||||
|
+ if config_routes:
|
||||||
|
+ route_file = '/etc/sysconfig/network/ifroute-%s' % if_name
|
||||||
|
+ util.write_file(route_file, config_routes)
|
||||||
|
+
|
||||||
|
+ def _render_route_string(self, netconfig_route):
|
||||||
|
+ route_to = netconfig_route.get('to', None)
|
||||||
|
+ route_via = netconfig_route.get('via', None)
|
||||||
|
+ route_metric = netconfig_route.get('metric', None)
|
||||||
|
+ route_string = ''
|
||||||
|
+
|
||||||
|
+ if route_to and route_via:
|
||||||
|
+ route_string = ' '.join([route_to, route_via, '-', '-'])
|
||||||
|
+ if route_metric:
|
||||||
|
+ route_string += ' metric {}\n'.format(route_metric)
|
||||||
|
+ else:
|
||||||
|
+ route_string += '\n'
|
||||||
|
+ else:
|
||||||
|
+ LOG.warning('invalid route definition, skipping route')
|
||||||
|
+
|
||||||
|
+ return route_string
|
||||||
|
+
|
||||||
|
+ def _write_routes_v2(self, netconfig):
|
||||||
|
+ for device_type in netconfig:
|
||||||
|
+ if device_type == 'version':
|
||||||
|
+ continue
|
||||||
|
+
|
||||||
|
+ if device_type == 'routes':
|
||||||
|
+ # global static routes
|
||||||
|
+ config_routes = ''
|
||||||
|
+ for route in netconfig['routes']:
|
||||||
|
+ config_routes += self._render_route_string(route)
|
||||||
|
+ if config_routes:
|
||||||
|
+ route_file = '/etc/sysconfig/network/routes'
|
||||||
|
+ util.write_file(route_file, config_routes)
|
||||||
|
+ else:
|
||||||
|
+ devices = netconfig[device_type]
|
||||||
|
+ for device_name in devices:
|
||||||
|
+ config_routes = ''
|
||||||
|
+ device_config = devices[device_name]
|
||||||
|
+ try:
|
||||||
|
+ gateways = [
|
||||||
|
+ v for k, v in device_config.items()
|
||||||
|
+ if 'gateway' in k
|
||||||
|
+ ]
|
||||||
|
+ for gateway in gateways:
|
||||||
|
+ config_routes += ' '.join(
|
||||||
|
+ ['default', gateway, '-', '-\n']
|
||||||
|
+ )
|
||||||
|
+ for route in device_config.get('routes', []):
|
||||||
|
+ config_routes += self._render_route_string(route)
|
||||||
|
+ if config_routes:
|
||||||
|
+ route_file = \
|
||||||
|
+ '/etc/sysconfig/network/ifroute-{}'.format(
|
||||||
|
+ device_name
|
||||||
|
+ )
|
||||||
|
+ util.write_file(route_file, config_routes)
|
||||||
|
+ except Exception:
|
||||||
|
+ # the parser above epxects another level of nesting
|
||||||
|
+ # which should be there in case it's properly
|
||||||
|
+ # formatted; if not we may get an exception on items()
|
||||||
|
+ pass
|
||||||
|
+
|
||||||
|
+ def _write_routes(self, netconfig):
|
||||||
|
+ netconfig_ver = netconfig.get('version')
|
||||||
|
+ if netconfig_ver == 1:
|
||||||
|
+ self._write_routes_v1(netconfig)
|
||||||
|
+ elif netconfig_ver == 2:
|
||||||
|
+ self._write_routes_v2(netconfig)
|
||||||
|
+ else:
|
||||||
|
+ LOG.warning(
|
||||||
|
+ 'unsupported or missing netconfig version, not writing routes'
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
@property
|
||||||
|
def preferred_ntp_clients(self):
|
||||||
|
"""The preferred ntp client is dependent on the version."""
|
4920
cloud-init.changes
Normal file
4920
cloud-init.changes
Normal file
File diff suppressed because it is too large
Load Diff
260
cloud-init.spec
Normal file
260
cloud-init.spec
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
#
|
||||||
|
# spec file for package cloud-init
|
||||||
|
#
|
||||||
|
# Copyright (c) 2023 SUSE LINUX Products GmbH, Nuernberg, Germany.
|
||||||
|
#
|
||||||
|
# All modifications and additions to the file contributed by third parties
|
||||||
|
# remain the property of their copyright owners, unless otherwise agreed
|
||||||
|
# upon. The license for this file, and modifications and additions to the
|
||||||
|
# file, is the same license as for the pristine package itself (unless the
|
||||||
|
# license for the pristine package is not an Open Source License, in which
|
||||||
|
# case the license is the MIT License). An "Open Source License" is a
|
||||||
|
# license that conforms to the Open Source Definition (Version 1.9)
|
||||||
|
# published by the Open Source Initiative.
|
||||||
|
|
||||||
|
# Please submit bugfixes or comments via http://bugs.opensuse.org/
|
||||||
|
#
|
||||||
|
# change this whenever config changes incompatible
|
||||||
|
%global configver 0.7
|
||||||
|
|
||||||
|
Name: cloud-init
|
||||||
|
Version: 23.3
|
||||||
|
Release: 0
|
||||||
|
License: GPL-3.0
|
||||||
|
Summary: Cloud node initialization tool
|
||||||
|
Url: https://github.com/canonical/cloud-init
|
||||||
|
Group: System/Management
|
||||||
|
Source0: %{name}-%{version}.tar.gz
|
||||||
|
Source1: rsyslog-cloud-init.cfg
|
||||||
|
Source2: hidesensitivedata
|
||||||
|
Patch1: datasourceLocalDisk.patch
|
||||||
|
# FIXME (lp#1849296)
|
||||||
|
Patch2: cloud-init-break-resolv-symlink.patch
|
||||||
|
# FIXME no proposed solution
|
||||||
|
Patch3: cloud-init-sysconf-path.patch
|
||||||
|
# FIXME (lp#1860164)
|
||||||
|
Patch4: cloud-init-no-tempnet-oci.patch
|
||||||
|
# FIXME (lp#1812117)
|
||||||
|
Patch6: cloud-init-write-routes.patch
|
||||||
|
# FIXME (https://github.com/canonical/cloud-init/issues/4339)
|
||||||
|
Patch7: cloud-init-keep-flake.patch
|
||||||
|
Patch8: cloud-init-lint-fixes.patch
|
||||||
|
BuildRequires: fdupes
|
||||||
|
BuildRequires: filesystem
|
||||||
|
# pkg-config is needed to find correct systemd unit dir
|
||||||
|
BuildRequires: pkg-config
|
||||||
|
# needed for /lib/udev
|
||||||
|
BuildRequires: pkgconfig(udev)
|
||||||
|
BuildRequires: procps
|
||||||
|
BuildRequires: python-rpm-macros
|
||||||
|
BuildRequires: python3-devel
|
||||||
|
BuildRequires: python3-setuptools
|
||||||
|
# Test requirements
|
||||||
|
BuildRequires: python3-Jinja2
|
||||||
|
BuildRequires: python3-PyYAML
|
||||||
|
BuildRequires: python3-configobj >= 5.0.2
|
||||||
|
BuildRequires: python3-flake8
|
||||||
|
BuildRequires: python3-httpretty
|
||||||
|
BuildRequires: python3-jsonpatch
|
||||||
|
BuildRequires: python3-jsonschema
|
||||||
|
BuildRequires: python3-netifaces
|
||||||
|
BuildRequires: python3-oauthlib
|
||||||
|
BuildRequires: python3-passlib
|
||||||
|
BuildRequires: python3-pytest
|
||||||
|
BuildRequires: python3-pytest-cov
|
||||||
|
BuildRequires: python3-pytest-mock
|
||||||
|
BuildRequires: python3-requests
|
||||||
|
BuildRequires: python3-responses
|
||||||
|
BuildRequires: python3-serial
|
||||||
|
BuildRequires: system-user-nobody
|
||||||
|
BuildRequires: distribution-release
|
||||||
|
BuildRequires: util-linux
|
||||||
|
Requires: bash
|
||||||
|
Requires: dhcp-client
|
||||||
|
Requires: file
|
||||||
|
Requires: growpart
|
||||||
|
Requires: e2fsprogs
|
||||||
|
Requires: net-tools
|
||||||
|
Requires: openssh
|
||||||
|
Requires: procps
|
||||||
|
Requires: python3-configobj >= 5.0.2
|
||||||
|
Requires: python3-Jinja2
|
||||||
|
Requires: python3-jsonpatch
|
||||||
|
Requires: python3-jsonschema
|
||||||
|
Requires: python3-netifaces
|
||||||
|
Requires: python3-oauthlib
|
||||||
|
Requires: python3-passlib
|
||||||
|
Requires: python3-pyserial
|
||||||
|
Requires: python3-PyYAML
|
||||||
|
Requires: python3-requests
|
||||||
|
Requires: python3-serial
|
||||||
|
Requires: python3-setuptools
|
||||||
|
Requires: python3-xml
|
||||||
|
Requires: sudo
|
||||||
|
Requires: util-linux
|
||||||
|
Requires: wget
|
||||||
|
%if 0%{?suse_version} && 0%{?suse_version} <= 1500
|
||||||
|
Requires: wicked-service
|
||||||
|
%endif
|
||||||
|
Requires: cloud-init-config = %configver
|
||||||
|
BuildRoot: %{_tmppath}/%{name}-%{version}-build
|
||||||
|
%define docdir %{_defaultdocdir}/%{name}
|
||||||
|
%ifarch %ix86 x86_64
|
||||||
|
Requires: dmidecode
|
||||||
|
%endif
|
||||||
|
%define initsys systemd
|
||||||
|
BuildRequires: pkgconfig(systemd)
|
||||||
|
%{?systemd_requires}
|
||||||
|
%if 0%{?suse_version} && 0%{?suse_version} == 1220
|
||||||
|
%define systemd_prefix /lib
|
||||||
|
%else
|
||||||
|
%define systemd_prefix /usr/lib
|
||||||
|
%endif
|
||||||
|
|
||||||
|
%description
|
||||||
|
Cloud-init is an init script that initializes a cloud node (VM)
|
||||||
|
according to the fetched configuration data from the admin node.
|
||||||
|
|
||||||
|
%package config-suse
|
||||||
|
Summary: Configuration file for Cloud node initialization tool
|
||||||
|
Provides: cloud-init-config = %configver
|
||||||
|
Group: System/Management
|
||||||
|
Conflicts: otherproviders(cloud-init-config)
|
||||||
|
|
||||||
|
%description config-suse
|
||||||
|
This package contains the product specific configuration file
|
||||||
|
for cloud-init.
|
||||||
|
|
||||||
|
%package doc
|
||||||
|
Summary: Cloud node initialization tool - Documentation
|
||||||
|
Group: System/Management
|
||||||
|
Recommends: cloud-init = %{version}
|
||||||
|
|
||||||
|
%description doc
|
||||||
|
Cloud-init is an init script that initializes a cloud node (VM)
|
||||||
|
according to the fetched configuration data from the admin node.
|
||||||
|
|
||||||
|
Documentation and examples for cloud-init tools
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%setup -q
|
||||||
|
%patch1 -p0
|
||||||
|
%patch2
|
||||||
|
%patch3
|
||||||
|
%patch4
|
||||||
|
%patch6
|
||||||
|
%patch7
|
||||||
|
%patch8
|
||||||
|
|
||||||
|
# patch in the full version to version.py
|
||||||
|
version_pys=$(find . -name version.py -type f)
|
||||||
|
[ -n "$version_pys" ] ||
|
||||||
|
{ echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; }
|
||||||
|
sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys
|
||||||
|
|
||||||
|
%build
|
||||||
|
python3 setup.py build
|
||||||
|
|
||||||
|
%check
|
||||||
|
make unittest
|
||||||
|
make lint
|
||||||
|
|
||||||
|
%install
|
||||||
|
python3 setup.py install --root=%{buildroot} --prefix=%{_prefix} --install-lib=%{python3_sitelib} --init-system=%{initsys}
|
||||||
|
find %{buildroot} \( -name .gitignore -o -name .placeholder \) -delete
|
||||||
|
# from debian install script
|
||||||
|
for x in "%{buildroot}%{_bindir}/"*.py; do
|
||||||
|
[ -f "${x}" ] && mv "${x}" "${x%.py}"
|
||||||
|
done
|
||||||
|
mkdir -p %{buildroot}%{_localstatedir}/lib/cloud
|
||||||
|
# move documentation
|
||||||
|
mkdir -p %{buildroot}%{_defaultdocdir}
|
||||||
|
mv %{buildroot}%{_datadir}/doc/%{name} %{buildroot}%{_defaultdocdir}
|
||||||
|
# man pages
|
||||||
|
mkdir -p %{buildroot}%{_mandir}/man1
|
||||||
|
mv doc/man/* %{buildroot}%{_mandir}/man1
|
||||||
|
# copy the LICENSE
|
||||||
|
mkdir -p %{buildroot}%{_defaultlicensedir}/%{name}
|
||||||
|
cp LICENSE %{buildroot}%{_defaultlicensedir}/%{name}
|
||||||
|
cp LICENSE-GPLv3 %{buildroot}%{_defaultlicensedir}/%{name}
|
||||||
|
# Set the distribution indicator
|
||||||
|
%if 0%{?suse_version}
|
||||||
|
%if 0%{?is_opensuse}
|
||||||
|
sed -i s/suse/opensuse/ %{buildroot}/%{_sysconfdir}/cloud/cloud.cfg
|
||||||
|
%else
|
||||||
|
sed -i s/suse/sles/ %{buildroot}/%{_sysconfdir}/cloud/cloud.cfg
|
||||||
|
%endif
|
||||||
|
%endif
|
||||||
|
mkdir -p %{buildroot}/%{_sysconfdir}/rsyslog.d
|
||||||
|
mkdir -p %{buildroot}/usr/lib/udev/rules.d/
|
||||||
|
cp -a %{SOURCE1} %{buildroot}/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
|
||||||
|
mkdir -p %{buildroot}%{_sbindir}
|
||||||
|
install -m 755 %{SOURCE2} %{buildroot}%{_sbindir}
|
||||||
|
|
||||||
|
# remove debian/ubuntu specific profile.d file (bnc#779553)
|
||||||
|
rm -f %{buildroot}%{_sysconfdir}/profile.d/Z99-cloud-locale-test.sh
|
||||||
|
|
||||||
|
# Remove non-SUSE templates
|
||||||
|
rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.*
|
||||||
|
rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.*
|
||||||
|
rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.*
|
||||||
|
|
||||||
|
# remove duplicate files
|
||||||
|
%if 0%{?suse_version}
|
||||||
|
%fdupes %{buildroot}%{python3_sitelib}
|
||||||
|
%endif
|
||||||
|
|
||||||
|
%post
|
||||||
|
/usr/sbin/hidesensitivedata
|
||||||
|
|
||||||
|
%files
|
||||||
|
%defattr(-,root,root)
|
||||||
|
%license LICENSE LICENSE-GPLv3
|
||||||
|
%{_bindir}/cloud-id
|
||||||
|
%{_bindir}/cloud-init
|
||||||
|
%{_bindir}/cloud-init-per
|
||||||
|
%{_sbindir}/hidesensitivedata
|
||||||
|
%dir %{_sysconfdir}/cloud
|
||||||
|
%dir %{_sysconfdir}/cloud/clean.d
|
||||||
|
%{_sysconfdir}/cloud/clean.d/README
|
||||||
|
%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d
|
||||||
|
%config(noreplace) %{_sysconfdir}/cloud/templates
|
||||||
|
%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf
|
||||||
|
%{_mandir}/man*/*
|
||||||
|
%if 0%{?suse_version} && 0%{?suse_version} < 1500
|
||||||
|
%dir %{_datadir}/bash-completion
|
||||||
|
%dir %{_datadir}/bash-completion/completions
|
||||||
|
%endif
|
||||||
|
%{_datadir}/bash-completion/completions/cloud-init
|
||||||
|
%{python3_sitelib}/cloudinit
|
||||||
|
%{python3_sitelib}/cloud_init-%{version}*.egg-info
|
||||||
|
%{_prefix}/lib/cloud-init
|
||||||
|
%{systemd_prefix}/systemd/system-generators/cloud-init-generator
|
||||||
|
%{systemd_prefix}/systemd/system/cloud-config.service
|
||||||
|
%{systemd_prefix}/systemd/system/cloud-config.target
|
||||||
|
%{systemd_prefix}/systemd/system/cloud-init-local.service
|
||||||
|
%{systemd_prefix}/systemd/system/cloud-init.service
|
||||||
|
%{systemd_prefix}/systemd/system/cloud-init.target
|
||||||
|
%{systemd_prefix}/systemd/system/cloud-final.service
|
||||||
|
%dir %{_sysconfdir}/rsyslog.d
|
||||||
|
%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
|
||||||
|
/usr/lib/udev/rules.d/66-azure-ephemeral.rules
|
||||||
|
# We use cloud-netconfig to handle new interfaces added to the instance
|
||||||
|
%exclude %{systemd_prefix}/systemd/system/cloud-init-hotplugd.service
|
||||||
|
%exclude %{systemd_prefix}/systemd/system/cloud-init-hotplugd.socket
|
||||||
|
%dir %attr(0755, root, root) %{_localstatedir}/lib/cloud
|
||||||
|
%dir %{docdir}
|
||||||
|
%dir /etc/systemd/system/sshd-keygen@.service.d
|
||||||
|
|
||||||
|
|
||||||
|
%files config-suse
|
||||||
|
%defattr(-,root,root)
|
||||||
|
%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg
|
||||||
|
|
||||||
|
%files doc
|
||||||
|
%defattr(-,root,root)
|
||||||
|
%{docdir}/examples/*
|
||||||
|
%{docdir}/*.txt
|
||||||
|
%dir %{docdir}/examples
|
||||||
|
|
||||||
|
%changelog
|
110
datasourceLocalDisk.patch
Normal file
110
datasourceLocalDisk.patch
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
--- /dev/null
|
||||||
|
+++ cloudinit/sources/DataSourceLocalDisk.py
|
||||||
|
@@ -0,0 +1,107 @@
|
||||||
|
+# vi: ts=4 expandtab
|
||||||
|
+#
|
||||||
|
+# Copyright (C) 2016 SUSE Linux GmbH
|
||||||
|
+#
|
||||||
|
+# Author: Thorsten Kukuk <kukuk@suse.com>
|
||||||
|
+#
|
||||||
|
+# This program is free software: you can redistribute it and/or modify
|
||||||
|
+# it under the terms of the GNU General Public License version 3, as
|
||||||
|
+# published by the Free Software Foundation.
|
||||||
|
+#
|
||||||
|
+# This program is distributed in the hope that it will be useful,
|
||||||
|
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
+# GNU General Public License for more details.
|
||||||
|
+#
|
||||||
|
+# You should have received a copy of the GNU General Public License
|
||||||
|
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
+
|
||||||
|
+import os
|
||||||
|
+
|
||||||
|
+from cloudinit import log as logging
|
||||||
|
+from cloudinit import sources
|
||||||
|
+from cloudinit import util
|
||||||
|
+
|
||||||
|
+LOG = logging.getLogger(__name__)
|
||||||
|
+
|
||||||
|
+DEFAULT_IID = "iid-localdisk"
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+class DataSourceLocalDisk(sources.DataSource):
|
||||||
|
+ def __init__(self, sys_cfg, distro, paths):
|
||||||
|
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
|
||||||
|
+ self.seed = None
|
||||||
|
+ self.seed_dir = os.path.join(paths.seed_dir, 'localdisk')
|
||||||
|
+
|
||||||
|
+ def __str__(self):
|
||||||
|
+ root = sources.DataSource.__str__(self)
|
||||||
|
+ return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
|
||||||
|
+
|
||||||
|
+ def get_data(self):
|
||||||
|
+ if not os.path.isdir('/cloud-init-config'):
|
||||||
|
+ return False
|
||||||
|
+
|
||||||
|
+ defaults = {"instance-id": DEFAULT_IID}
|
||||||
|
+
|
||||||
|
+ found = []
|
||||||
|
+ mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
|
||||||
|
+
|
||||||
|
+ # Check to see if the seed dir has data.
|
||||||
|
+ try:
|
||||||
|
+ seeded = util.pathprefix2dict(
|
||||||
|
+ self.seed_dir, ['user-data', 'meta-data'], ['vendor-data']
|
||||||
|
+ )
|
||||||
|
+ found.append(self.seed_dir)
|
||||||
|
+ mydata = _merge_new_seed(mydata, seeded)
|
||||||
|
+ except ValueError:
|
||||||
|
+ pass
|
||||||
|
+
|
||||||
|
+ try:
|
||||||
|
+ seeded = util.pathprefix2dict(
|
||||||
|
+ '/cloud-init-config', ['user-data', 'meta-data'],
|
||||||
|
+ ['vendor-data']
|
||||||
|
+ )
|
||||||
|
+ found.append('/cloud-init-config')
|
||||||
|
+ mydata = _merge_new_seed(mydata, seeded)
|
||||||
|
+ except ValueError:
|
||||||
|
+ return False
|
||||||
|
+
|
||||||
|
+ # Merge in the defaults
|
||||||
|
+ mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
|
||||||
|
+ defaults])
|
||||||
|
+
|
||||||
|
+ self.seed = ",".join(found)
|
||||||
|
+ self.metadata = mydata['meta-data']
|
||||||
|
+ self.userdata_raw = mydata['user-data']
|
||||||
|
+ self.vendordata_raw = mydata['vendor-data']
|
||||||
|
+ return True
|
||||||
|
+
|
||||||
|
+ def check_instance_id(self, sys_cfg):
|
||||||
|
+ # quickly (local check only) if self.instance_id is still valid
|
||||||
|
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+def _merge_new_seed(cur, seeded):
|
||||||
|
+ ret = cur.copy()
|
||||||
|
+
|
||||||
|
+ newmd = seeded.get('meta-data', {})
|
||||||
|
+ if not isinstance(seeded['meta-data'], dict):
|
||||||
|
+ newmd = util.load_yaml(seeded['meta-data'])
|
||||||
|
+ ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
|
||||||
|
+
|
||||||
|
+ if 'user-data' in seeded:
|
||||||
|
+ ret['user-data'] = seeded['user-data']
|
||||||
|
+ if 'vendor-data' in seeded:
|
||||||
|
+ ret['vendor-data'] = seeded['vendor-data']
|
||||||
|
+ return ret
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+# Used to match classes to dependencies
|
||||||
|
+datasources = [
|
||||||
|
+ (DataSourceLocalDisk, (sources.DEP_FILESYSTEM, )),
|
||||||
|
+]
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+# Return a list of data sources that match this set of dependencies
|
||||||
|
+def get_datasource_list(depends):
|
||||||
|
+ return sources.list_from_depends(depends, datasources)
|
36
hidesensitivedata
Normal file
36
hidesensitivedata
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from cloudinit.atomic_helper import write_json
|
||||||
|
from cloudinit.sources import (
|
||||||
|
DataSource,
|
||||||
|
process_instance_metadata,
|
||||||
|
redact_sensitive_keys,
|
||||||
|
)
|
||||||
|
|
||||||
|
from cloudinit.stages import Init
|
||||||
|
|
||||||
|
init = Init()
|
||||||
|
log_file = init.cfg["def_log_file"]
|
||||||
|
if os.path.exists(log_file):
|
||||||
|
os.chmod(log_file, 0o640)
|
||||||
|
|
||||||
|
rundir = init.paths.run_dir
|
||||||
|
instance_data_path = Path(rundir, "instance-data.json")
|
||||||
|
if not os.path.exists(str(instance_data_path)):
|
||||||
|
sys.exit(0)
|
||||||
|
instance_json = json.load(instance_data_path.open(encoding="utf-8"))
|
||||||
|
|
||||||
|
sensitive_keys = DataSource.sensitive_metadata_keys
|
||||||
|
|
||||||
|
processed_json = process_instance_metadata(
|
||||||
|
instance_json, sensitive_keys=sensitive_keys
|
||||||
|
)
|
||||||
|
redacted_json = redact_sensitive_keys(processed_json)
|
||||||
|
|
||||||
|
write_json(str(instance_data_path), redacted_json)
|
6
rsyslog-cloud-init.cfg
Normal file
6
rsyslog-cloud-init.cfg
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# Log cloudinit generated log messages to file
|
||||||
|
:syslogtag, isequal, "[CLOUDINIT]" /var/log/cloud-init.log
|
||||||
|
|
||||||
|
# comment out the following line to allow CLOUDINIT messages through.
|
||||||
|
# Doing so means you'll also get CLOUDINIT messages in /var/log/syslog
|
||||||
|
& stop
|
Loading…
Reference in New Issue
Block a user