33f565eb59
OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=146
13326 lines
451 KiB
Diff
13326 lines
451 KiB
Diff
From 121e37f185057f1135ccf89738e3a59c581d9efb Mon Sep 17 00:00:00 2001
|
|
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
|
|
<psuarezhernandez@suse.com>
|
|
Date: Thu, 20 Jun 2019 12:52:45 +0100
|
|
Subject: [PATCH] Provide the missing features required for Yomi (Yet one
|
|
more installer)
|
|
|
|
---
|
|
doc/ref/modules/all/index.rst | 1 +
|
|
doc/ref/modules/all/salt.modules.kubeadm.rst | 5 +
|
|
salt/grains/core.py | 30 +-
|
|
salt/loader.py | 49 +-
|
|
salt/modules/aixpkg.py | 2 +-
|
|
salt/modules/apkpkg.py | 12 +-
|
|
salt/modules/aptpkg.py | 14 +-
|
|
salt/modules/btrfs.py | 496 ++++++-
|
|
salt/modules/chroot.py | 165 +++
|
|
salt/modules/cmdmod.py | 44 +-
|
|
salt/modules/disk.py | 26 +-
|
|
salt/modules/dpkg_lowpkg.py | 6 +-
|
|
salt/modules/ebuildpkg.py | 8 +-
|
|
salt/modules/freebsdpkg.py | 6 +-
|
|
salt/modules/freezer.py | 294 ++++
|
|
salt/modules/groupadd.py | 177 ++-
|
|
salt/modules/kubeadm.py | 1265 ++++++++++++++++++
|
|
salt/modules/linux_lvm.py | 36 +-
|
|
salt/modules/mac_brew_pkg.py | 8 +-
|
|
salt/modules/mac_portspkg.py | 6 +-
|
|
salt/modules/mdadm_raid.py | 14 +-
|
|
salt/modules/mount.py | 56 +-
|
|
salt/modules/openbsdpkg.py | 2 +-
|
|
salt/modules/pacmanpkg.py | 10 +-
|
|
salt/modules/parted_partition.py | 91 +-
|
|
salt/modules/pkgin.py | 8 +-
|
|
salt/modules/pkgng.py | 4 +-
|
|
salt/modules/rpm_lowpkg.py | 101 +-
|
|
salt/modules/shadow.py | 300 ++++-
|
|
salt/modules/solarisipspkg.py | 4 +-
|
|
salt/modules/solarispkg.py | 2 +-
|
|
salt/modules/systemd_service.py | 264 +++-
|
|
salt/modules/useradd.py | 390 ++++--
|
|
salt/modules/xbpspkg.py | 12 +-
|
|
salt/modules/yumpkg.py | 34 +-
|
|
salt/modules/zypperpkg.py | 503 +++++--
|
|
salt/states/blockdev.py | 3 +-
|
|
salt/states/btrfs.py | 385 ++++++
|
|
salt/states/cmd.py | 18 +-
|
|
salt/states/file.py | 6 +-
|
|
salt/states/loop.py | 4 +
|
|
salt/states/lvm.py | 16 +-
|
|
salt/states/mdadm_raid.py | 2 +-
|
|
salt/states/mount.py | 305 +++++
|
|
salt/states/pkg.py | 28 +-
|
|
salt/states/pkgrepo.py | 14 +-
|
|
salt/utils/oset.py | 7 +-
|
|
salt/utils/path.py | 7 +-
|
|
salt/utils/systemd.py | 5 +-
|
|
tests/conftest.py | 10 +-
|
|
tests/unit/grains/test_core.py | 39 +
|
|
tests/unit/modules/test_btrfs.py | 370 ++++-
|
|
tests/unit/modules/test_chroot.py | 184 +++
|
|
tests/unit/modules/test_freezer.py | 274 ++++
|
|
tests/unit/modules/test_groupadd.py | 16 +-
|
|
tests/unit/modules/test_kubeadm.py | 1144 ++++++++++++++++
|
|
tests/unit/modules/test_mount.py | 154 ++-
|
|
tests/unit/modules/test_parted_partition.py | 17 +
|
|
tests/unit/modules/test_rpm_lowpkg.py | 92 +-
|
|
tests/unit/modules/test_systemd_service.py | 57 +-
|
|
tests/unit/modules/test_useradd.py | 5 +-
|
|
tests/unit/modules/test_zypperpkg.py | 100 +-
|
|
tests/unit/states/test_btrfs.py | 782 +++++++++++
|
|
tests/unit/states/test_mount.py | 605 +++++++++
|
|
tests/unit/states/test_pkg.py | 7 +-
|
|
tests/unit/test_loader.py | 96 +-
|
|
tests/unit/utils/test_systemd.py | 21 +
|
|
67 files changed, 8570 insertions(+), 648 deletions(-)
|
|
create mode 100644 doc/ref/modules/all/salt.modules.kubeadm.rst
|
|
create mode 100644 salt/modules/chroot.py
|
|
create mode 100644 salt/modules/freezer.py
|
|
create mode 100644 salt/modules/kubeadm.py
|
|
create mode 100644 salt/states/btrfs.py
|
|
create mode 100644 tests/unit/modules/test_chroot.py
|
|
create mode 100644 tests/unit/modules/test_freezer.py
|
|
create mode 100644 tests/unit/modules/test_kubeadm.py
|
|
create mode 100644 tests/unit/states/test_btrfs.py
|
|
|
|
diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst
|
|
index 9762347faf..eaf7e25450 100644
|
|
--- a/doc/ref/modules/all/index.rst
|
|
+++ b/doc/ref/modules/all/index.rst
|
|
@@ -208,6 +208,7 @@ execution modules
|
|
keystone
|
|
keystoneng
|
|
kmod
|
|
+ kubeadm
|
|
kubernetes
|
|
launchctl_service
|
|
layman
|
|
diff --git a/doc/ref/modules/all/salt.modules.kubeadm.rst b/doc/ref/modules/all/salt.modules.kubeadm.rst
|
|
new file mode 100644
|
|
index 0000000000..137c779da2
|
|
--- /dev/null
|
|
+++ b/doc/ref/modules/all/salt.modules.kubeadm.rst
|
|
@@ -0,0 +1,5 @@
|
|
+salt.modules.kubeadm module
|
|
+===========================
|
|
+
|
|
+.. automodule:: salt.modules.kubeadm
|
|
+ :members:
|
|
diff --git a/salt/grains/core.py b/salt/grains/core.py
|
|
index fec7b204bc..ce64620a24 100644
|
|
--- a/salt/grains/core.py
|
|
+++ b/salt/grains/core.py
|
|
@@ -108,6 +108,10 @@ if not hasattr(os, 'uname'):
|
|
|
|
_INTERFACES = {}
|
|
|
|
+# Possible value for h_errno defined in netdb.h
|
|
+HOST_NOT_FOUND = 1
|
|
+NO_DATA = 4
|
|
+
|
|
|
|
def _windows_cpudata():
|
|
'''
|
|
@@ -2208,7 +2212,7 @@ def fqdns():
|
|
name, aliaslist, addresslist = socket.gethostbyaddr(ip)
|
|
return [socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)]
|
|
except socket.herror as err:
|
|
- if err.errno == 0:
|
|
+ if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
|
|
# No FQDN for this IP address, so we don't need to know this all the time.
|
|
log.debug("Unable to resolve address %s: %s", ip, err)
|
|
else:
|
|
@@ -2529,6 +2533,10 @@ def _hw_data(osdata):
|
|
grains[key] = salt.utils.stringutils.to_unicode(ifile.read().strip(), errors='replace')
|
|
if key == 'uuid':
|
|
grains['uuid'] = grains['uuid'].lower()
|
|
+ except UnicodeDecodeError:
|
|
+ # Some firmwares provide non-valid 'product_name'
|
|
+ # files, ignore them
|
|
+ pass
|
|
except (IOError, OSError) as err:
|
|
# PermissionError is new to Python 3, but corresponds to the EACESS and
|
|
# EPERM error numbers. Use those instead here for PY2 compatibility.
|
|
@@ -2760,26 +2768,6 @@ def _hw_data(osdata):
|
|
else:
|
|
log.error('The \'prtconf\' binary was not found in $PATH.')
|
|
|
|
- elif osdata['kernel'] == 'AIX':
|
|
- cmd = salt.utils.path.which('prtconf')
|
|
- if data:
|
|
- data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
|
|
- for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
|
|
- ('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
|
|
- for regex in [re.compile(r) for r in [regstring]]:
|
|
- res = regex.search(data)
|
|
- if res and len(res.groups()) >= 1:
|
|
- grains[dest] = res.group(1).strip().replace("'", '')
|
|
-
|
|
- product_regexes = [re.compile(r'(?im)^\s*System\s+Model:\s+(\S+)')]
|
|
- for regex in product_regexes:
|
|
- res = regex.search(data)
|
|
- if res and len(res.groups()) >= 1:
|
|
- grains['manufacturer'], grains['productname'] = res.group(1).strip().replace("'", "").split(",")
|
|
- break
|
|
- else:
|
|
- log.error('The \'prtconf\' binary was not found in $PATH.')
|
|
-
|
|
return grains
|
|
|
|
|
|
diff --git a/salt/loader.py b/salt/loader.py
|
|
index 6760d9e720..1d8a4b90fd 100644
|
|
--- a/salt/loader.py
|
|
+++ b/salt/loader.py
|
|
@@ -253,6 +253,7 @@ def minion_mods(
|
|
whitelist=whitelist,
|
|
loaded_base_name=loaded_base_name,
|
|
static_modules=static_modules,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
ret.pack['__salt__'] = ret
|
|
@@ -334,6 +335,7 @@ def engines(opts, functions, runners, utils, proxy=None):
|
|
opts,
|
|
tag='engines',
|
|
pack=pack,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
|
|
@@ -346,6 +348,7 @@ def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
|
|
opts,
|
|
tag='proxy',
|
|
pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
ret.pack['__proxy__'] = ret
|
|
@@ -383,12 +386,14 @@ def pillars(opts, functions, context=None):
|
|
'''
|
|
Returns the pillars modules
|
|
'''
|
|
+ _utils = utils(opts)
|
|
ret = LazyLoader(_module_dirs(opts, 'pillar'),
|
|
opts,
|
|
tag='pillar',
|
|
pack={'__salt__': functions,
|
|
'__context__': context,
|
|
- '__utils__': utils(opts)})
|
|
+ '__utils__': _utils},
|
|
+ extra_module_dirs=_utils.module_dirs)
|
|
ret.pack['__ext_pillar__'] = ret
|
|
return FilterDictWrapper(ret, '.ext_pillar')
|
|
|
|
@@ -488,11 +493,13 @@ def fileserver(opts, backends):
|
|
'''
|
|
Returns the file server modules
|
|
'''
|
|
+ _utils = utils(opts)
|
|
return LazyLoader(_module_dirs(opts, 'fileserver'),
|
|
opts,
|
|
tag='fileserver',
|
|
whitelist=backends,
|
|
- pack={'__utils__': utils(opts)})
|
|
+ pack={'__utils__': _utils},
|
|
+ extra_module_dirs=_utils.module_dirs)
|
|
|
|
|
|
def roster(opts, runner=None, utils=None, whitelist=None):
|
|
@@ -508,6 +515,7 @@ def roster(opts, runner=None, utils=None, whitelist=None):
|
|
'__runner__': runner,
|
|
'__utils__': utils,
|
|
},
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
|
|
@@ -546,6 +554,7 @@ def states(opts, functions, utils, serializers, whitelist=None, proxy=None):
|
|
tag='states',
|
|
pack={'__salt__': functions, '__proxy__': proxy or {}},
|
|
whitelist=whitelist,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
ret.pack['__states__'] = ret
|
|
ret.pack['__utils__'] = utils
|
|
@@ -652,7 +661,8 @@ def grain_funcs(opts, proxy=None):
|
|
__opts__ = salt.config.minion_config('/etc/salt/minion')
|
|
grainfuncs = salt.loader.grain_funcs(__opts__)
|
|
'''
|
|
- return LazyLoader(
|
|
+ _utils = utils(opts, proxy=proxy)
|
|
+ ret = LazyLoader(
|
|
_module_dirs(
|
|
opts,
|
|
'grains',
|
|
@@ -661,7 +671,10 @@ def grain_funcs(opts, proxy=None):
|
|
),
|
|
opts,
|
|
tag='grains',
|
|
+ extra_module_dirs=_utils.module_dirs,
|
|
)
|
|
+ ret.pack['__utils__'] = _utils
|
|
+ return ret
|
|
|
|
|
|
def _load_cached_grains(opts, cfn):
|
|
@@ -895,6 +908,7 @@ def runner(opts, utils=None, context=None, whitelist=None):
|
|
tag='runners',
|
|
pack={'__utils__': utils, '__context__': context},
|
|
whitelist=whitelist,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
# TODO: change from __salt__ to something else, we overload __salt__ too much
|
|
ret.pack['__salt__'] = ret
|
|
@@ -930,6 +944,7 @@ def sdb(opts, functions=None, whitelist=None, utils=None):
|
|
'__salt__': minion_mods(opts, utils),
|
|
},
|
|
whitelist=whitelist,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
|
|
@@ -971,6 +986,7 @@ def clouds(opts):
|
|
'''
|
|
Return the cloud functions
|
|
'''
|
|
+ _utils = salt.loader.utils(opts)
|
|
# Let's bring __active_provider_name__, defaulting to None, to all cloud
|
|
# drivers. This will get temporarily updated/overridden with a context
|
|
# manager when needed.
|
|
@@ -982,8 +998,9 @@ def clouds(opts):
|
|
int_type='clouds'),
|
|
opts,
|
|
tag='clouds',
|
|
- pack={'__utils__': salt.loader.utils(opts),
|
|
+ pack={'__utils__': _utils,
|
|
'__active_provider_name__': None},
|
|
+ extra_module_dirs=_utils.module_dirs,
|
|
)
|
|
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
|
|
log.trace(
|
|
@@ -1097,6 +1114,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
|
|
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
|
|
If not true, the module will not load.
|
|
+ :param list extra_module_dirs: A list of directories that will be able to import from
|
|
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
|
|
are function references themselves which are loaded on-demand.
|
|
# TODO:
|
|
@@ -1118,6 +1136,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
static_modules=None,
|
|
proxy=None,
|
|
virtual_funcs=None,
|
|
+ extra_module_dirs=None,
|
|
): # pylint: disable=W0231
|
|
'''
|
|
In pack, if any of the values are None they will be replaced with an
|
|
@@ -1159,6 +1178,9 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
virtual_funcs = []
|
|
self.virtual_funcs = virtual_funcs
|
|
|
|
+ self.extra_module_dirs = extra_module_dirs if extra_module_dirs else []
|
|
+ self._clean_module_dirs = []
|
|
+
|
|
self.disabled = set(
|
|
self.opts.get(
|
|
'disable_{0}{1}'.format(
|
|
@@ -1465,12 +1487,30 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
reload_module(submodule)
|
|
self._reload_submodules(submodule)
|
|
|
|
+ def __populate_sys_path(self):
|
|
+ for directory in self.extra_module_dirs:
|
|
+ if directory not in sys.path:
|
|
+ sys.path.append(directory)
|
|
+ self._clean_module_dirs.append(directory)
|
|
+
|
|
+ def __clean_sys_path(self):
|
|
+ for directory in self._clean_module_dirs:
|
|
+ if directory in sys.path:
|
|
+ sys.path.remove(directory)
|
|
+ self._clean_module_dirs = []
|
|
+
|
|
+ # Be sure that sys.path_importer_cache do not contains any
|
|
+ # invalid FileFinder references
|
|
+ if USE_IMPORTLIB:
|
|
+ importlib.invalidate_caches()
|
|
+
|
|
def _load_module(self, name):
|
|
mod = None
|
|
fpath, suffix = self.file_mapping[name][:2]
|
|
self.loaded_files.add(name)
|
|
fpath_dirname = os.path.dirname(fpath)
|
|
try:
|
|
+ self.__populate_sys_path()
|
|
sys.path.append(fpath_dirname)
|
|
if suffix == '.pyx':
|
|
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
|
|
@@ -1593,6 +1633,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
return False
|
|
finally:
|
|
sys.path.remove(fpath_dirname)
|
|
+ self.__clean_sys_path()
|
|
|
|
if hasattr(mod, '__opts__'):
|
|
mod.__opts__.update(self.opts)
|
|
diff --git a/salt/modules/aixpkg.py b/salt/modules/aixpkg.py
|
|
index 4f9852b504..d35946f397 100644
|
|
--- a/salt/modules/aixpkg.py
|
|
+++ b/salt/modules/aixpkg.py
|
|
@@ -400,7 +400,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
diff --git a/salt/modules/apkpkg.py b/salt/modules/apkpkg.py
|
|
index 2e9a2a952e..4f84642e02 100644
|
|
--- a/salt/modules/apkpkg.py
|
|
+++ b/salt/modules/apkpkg.py
|
|
@@ -83,7 +83,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Updates the package list
|
|
|
|
@@ -425,7 +425,7 @@ def remove(name=None, pkgs=None, purge=False, **kwargs): # pylint: disable=unus
|
|
return ret
|
|
|
|
|
|
-def upgrade(name=None, pkgs=None, refresh=True):
|
|
+def upgrade(name=None, pkgs=None, refresh=True, **kwargs):
|
|
'''
|
|
Upgrades all packages via ``apk upgrade`` or a specific package if name or
|
|
pkgs is specified. Name is ignored if pkgs is specified
|
|
@@ -485,7 +485,7 @@ def upgrade(name=None, pkgs=None, refresh=True):
|
|
return ret
|
|
|
|
|
|
-def list_upgrades(refresh=True):
|
|
+def list_upgrades(refresh=True, **kwargs):
|
|
'''
|
|
List all available package upgrades.
|
|
|
|
@@ -524,7 +524,7 @@ def list_upgrades(refresh=True):
|
|
return ret
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -541,7 +541,7 @@ def file_list(*packages):
|
|
return file_dict(*packages)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
@@ -580,7 +580,7 @@ def file_dict(*packages):
|
|
return {'errors': errors, 'packages': ret}
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
Return the name of the package that owns the file. Multiple file paths can
|
|
be passed. Like :mod:`pkg.version <salt.modules.apk.version`, if a single
|
|
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
|
|
index f51b6958e5..e537f5b007 100644
|
|
--- a/salt/modules/aptpkg.py
|
|
+++ b/salt/modules/aptpkg.py
|
|
@@ -332,7 +332,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def refresh_db(cache_valid_time=0, failhard=False):
|
|
+def refresh_db(cache_valid_time=0, failhard=False, **kwargs):
|
|
'''
|
|
Updates the APT database to latest packages based upon repositories
|
|
|
|
@@ -1430,7 +1430,7 @@ def list_upgrades(refresh=True, dist_upgrade=True, **kwargs):
|
|
return _get_upgradable(dist_upgrade, **kwargs)
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -1443,7 +1443,7 @@ def upgrade_available(name):
|
|
return latest_version(name) != ''
|
|
|
|
|
|
-def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
|
+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
|
|
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
|
|
@@ -1633,7 +1633,7 @@ def _skip_source(source):
|
|
return False
|
|
|
|
|
|
-def list_repos():
|
|
+def list_repos(**kwargs):
|
|
'''
|
|
Lists all repos in the sources.list (and sources.lists.d) files
|
|
|
|
@@ -2400,7 +2400,7 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
|
}
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -2417,7 +2417,7 @@ def file_list(*packages):
|
|
return __salt__['lowpkg.file_list'](*packages)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
@@ -2701,7 +2701,7 @@ def _resolve_deps(name, pkgs, **kwargs):
|
|
return
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
diff --git a/salt/modules/btrfs.py b/salt/modules/btrfs.py
|
|
index 36bfaeb12e..6fd2ac58a4 100644
|
|
--- a/salt/modules/btrfs.py
|
|
+++ b/salt/modules/btrfs.py
|
|
@@ -20,11 +20,11 @@ Module for managing BTRFS file systems.
|
|
|
|
# Import Python libs
|
|
from __future__ import absolute_import, print_function, unicode_literals
|
|
+import itertools
|
|
import os
|
|
import re
|
|
import uuid
|
|
|
|
-
|
|
# Import Salt libs
|
|
import salt.utils.fsutils
|
|
import salt.utils.platform
|
|
@@ -673,3 +673,497 @@ def properties(obj, type=None, set=None):
|
|
ret[prop]['value'] = value and value.split("=")[-1] or "N/A"
|
|
|
|
return ret
|
|
+
|
|
+
|
|
+def subvolume_exists(path):
|
|
+ '''
|
|
+ Check if a subvolume is present in the filesystem.
|
|
+
|
|
+ path
|
|
+ Mount point for the subvolume (full path)
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_exists /mnt/var
|
|
+
|
|
+ '''
|
|
+ cmd = ['btrfs', 'subvolume', 'show', path]
|
|
+ return __salt__['cmd.retcode'](cmd, ignore_retcode=True) == 0
|
|
+
|
|
+
|
|
+def subvolume_create(name, dest=None, qgroupids=None):
|
|
+ '''
|
|
+ Create subvolume `name` in `dest`.
|
|
+
|
|
+ Return True if the subvolume is created, False is the subvolume is
|
|
+ already there.
|
|
+
|
|
+ name
|
|
+ Name of the new subvolume
|
|
+
|
|
+ dest
|
|
+ If not given, the subvolume will be created in the current
|
|
+ directory, if given will be in /dest/name
|
|
+
|
|
+ qgroupids
|
|
+ Add the newly created subcolume to a qgroup. This parameter
|
|
+ is a list
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_create var
|
|
+ salt '*' btrfs.subvolume_create var dest=/mnt
|
|
+ salt '*' btrfs.subvolume_create var qgroupids='[200]'
|
|
+
|
|
+ '''
|
|
+ if qgroupids and type(qgroupids) is not list:
|
|
+ raise CommandExecutionError('Qgroupids parameter must be a list')
|
|
+
|
|
+ if dest:
|
|
+ name = os.path.join(dest, name)
|
|
+
|
|
+ # If the subvolume is there, we are done
|
|
+ if subvolume_exists(name):
|
|
+ return False
|
|
+
|
|
+ cmd = ['btrfs', 'subvolume', 'create']
|
|
+ if type(qgroupids) is list:
|
|
+ cmd.append('-i')
|
|
+ cmd.extend(qgroupids)
|
|
+ cmd.append(name)
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+ return True
|
|
+
|
|
+
|
|
+def subvolume_delete(name=None, names=None, commit=None):
|
|
+ '''
|
|
+ Delete the subvolume(s) from the filesystem
|
|
+
|
|
+ The user can remove one single subvolume (name) or multiple of
|
|
+ then at the same time (names). One of the two parameters needs to
|
|
+ specified.
|
|
+
|
|
+ Please, refer to the documentation to understand the implication
|
|
+ on the transactions, and when the subvolume is really deleted.
|
|
+
|
|
+ Return True if the subvolume is deleted, False is the subvolume
|
|
+ was already missing.
|
|
+
|
|
+ name
|
|
+ Name of the subvolume to remove
|
|
+
|
|
+ names
|
|
+ List of names of subvolumes to remove
|
|
+
|
|
+ commit
|
|
+ * 'after': Wait for transaction commit at the end
|
|
+ * 'each': Wait for transaction commit after each delete
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_delete /var/volumes/tmp
|
|
+ salt '*' btrfs.subvolume_delete /var/volumes/tmp commit=after
|
|
+
|
|
+ '''
|
|
+ if not name and not (names and type(names) is list):
|
|
+ raise CommandExecutionError('Provide a value for the name parameter')
|
|
+
|
|
+ if commit and commit not in ('after', 'each'):
|
|
+ raise CommandExecutionError('Value for commit not recognized')
|
|
+
|
|
+ # Filter the names and take the ones that are still there
|
|
+ names = [n for n in itertools.chain([name], names or [])
|
|
+ if n and subvolume_exists(n)]
|
|
+
|
|
+ # If the subvolumes are gone, we are done
|
|
+ if not names:
|
|
+ return False
|
|
+
|
|
+ cmd = ['btrfs', 'subvolume', 'delete']
|
|
+ if commit == 'after':
|
|
+ cmd.append('--commit-after')
|
|
+ elif commit == 'each':
|
|
+ cmd.append('--commit-each')
|
|
+ cmd.extend(names)
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+ return True
|
|
+
|
|
+
|
|
+def subvolume_find_new(name, last_gen):
|
|
+ '''
|
|
+ List the recently modified files in a subvolume
|
|
+
|
|
+ name
|
|
+ Name of the subvolume
|
|
+
|
|
+ last_gen
|
|
+ Last transid marker from where to compare
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_find_new /var/volumes/tmp 1024
|
|
+
|
|
+ '''
|
|
+ cmd = ['btrfs', 'subvolume', 'find-new', name, last_gen]
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+
|
|
+ lines = res['stdout'].splitlines()
|
|
+ # Filenames are at the end of each inode line
|
|
+ files = [l.split()[-1] for l in lines if l.startswith('inode')]
|
|
+ # The last transid is in the last line
|
|
+ transid = lines[-1].split()[-1]
|
|
+ return {
|
|
+ 'files': files,
|
|
+ 'transid': transid,
|
|
+ }
|
|
+
|
|
+
|
|
+def subvolume_get_default(path):
|
|
+ '''
|
|
+ Get the default subvolume of the filesystem path
|
|
+
|
|
+ path
|
|
+ Mount point for the subvolume
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_get_default /var/volumes/tmp
|
|
+
|
|
+ '''
|
|
+ cmd = ['btrfs', 'subvolume', 'get-default', path]
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+
|
|
+ line = res['stdout'].strip()
|
|
+ # The ID is the second parameter, and the name the last one, or
|
|
+ # '(FS_TREE)'
|
|
+ #
|
|
+ # When the default one is set:
|
|
+ # ID 5 (FS_TREE)
|
|
+ #
|
|
+ # When we manually set a different one (var):
|
|
+ # ID 257 gen 8 top level 5 path var
|
|
+ #
|
|
+ id_ = line.split()[1]
|
|
+ name = line.split()[-1]
|
|
+ return {
|
|
+ 'id': id_,
|
|
+ 'name': name,
|
|
+ }
|
|
+
|
|
+
|
|
+def _pop(line, key, use_rest):
|
|
+ '''
|
|
+ Helper for the line parser.
|
|
+
|
|
+ If key is a prefix of line, will remove ir from the line and will
|
|
+ extract the value (space separation), and the rest of the line.
|
|
+
|
|
+ If use_rest is True, the value will be the rest of the line.
|
|
+
|
|
+ Return a tuple with the value and the rest of the line.
|
|
+ '''
|
|
+ value = None
|
|
+ if line.startswith(key):
|
|
+ line = line[len(key):].strip()
|
|
+ if use_rest:
|
|
+ value = line
|
|
+ line = ''
|
|
+ else:
|
|
+ value, line = line.split(' ', 1)
|
|
+ return value, line.strip()
|
|
+
|
|
+
|
|
+def subvolume_list(path, parent_id=False, absolute=False,
|
|
+ ogeneration=False, generation=False,
|
|
+ subvolumes=False, uuid=False, parent_uuid=False,
|
|
+ sent_subvolume_uuid=False, snapshots=False,
|
|
+ readonly=False, deleted=False, generation_cmp=None,
|
|
+ ogeneration_cmp=None, sort=None):
|
|
+ '''
|
|
+ List the subvolumes present in the filesystem.
|
|
+
|
|
+ path
|
|
+ Mount point for the subvolume
|
|
+
|
|
+ parent_id
|
|
+ Print parent ID
|
|
+
|
|
+ absolute
|
|
+ Print all the subvolumes in the filesystem and distinguish
|
|
+ between absolute and relative path with respect to the given
|
|
+ <path>
|
|
+
|
|
+ ogeneration
|
|
+ Print the ogeneration of the subvolume
|
|
+
|
|
+ generation
|
|
+ Print the generation of the subvolume
|
|
+
|
|
+ subvolumes
|
|
+ Print only subvolumes below specified <path>
|
|
+
|
|
+ uuid
|
|
+ Print the UUID of the subvolume
|
|
+
|
|
+ parent_uuid
|
|
+ Print the parent uuid of subvolumes (and snapshots)
|
|
+
|
|
+ sent_subvolume_uuid
|
|
+ Print the UUID of the sent subvolume, where the subvolume is
|
|
+ the result of a receive operation
|
|
+
|
|
+ snapshots
|
|
+ Only snapshot subvolumes in the filesystem will be listed
|
|
+
|
|
+ readonly
|
|
+ Only readonly subvolumes in the filesystem will be listed
|
|
+
|
|
+ deleted
|
|
+ Only deleted subvolumens that are ye not cleaned
|
|
+
|
|
+ generation_cmp
|
|
+ List subvolumes in the filesystem that its generation is >=,
|
|
+ <= or = value. '+' means >= value, '-' means <= value, If
|
|
+ there is neither '+' nor '-', it means = value
|
|
+
|
|
+ ogeneration_cmp
|
|
+ List subvolumes in the filesystem that its ogeneration is >=,
|
|
+ <= or = value
|
|
+
|
|
+ sort
|
|
+ List subvolumes in order by specified items. Possible values:
|
|
+ * rootid
|
|
+ * gen
|
|
+ * ogen
|
|
+ * path
|
|
+ You can add '+' or '-' in front of each items, '+' means
|
|
+ ascending, '-' means descending. The default is ascending. You
|
|
+ can combite it in a list.
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_list /var/volumes/tmp
|
|
+ salt '*' btrfs.subvolume_list /var/volumes/tmp path=True
|
|
+ salt '*' btrfs.subvolume_list /var/volumes/tmp sort='[-rootid]'
|
|
+
|
|
+ '''
|
|
+ if sort and type(sort) is not list:
|
|
+ raise CommandExecutionError('Sort parameter must be a list')
|
|
+
|
|
+ valid_sorts = [
|
|
+ ''.join((order, attrib)) for order, attrib in itertools.product(
|
|
+ ('-', '', '+'), ('rootid', 'gen', 'ogen', 'path'))
|
|
+ ]
|
|
+ if sort and not all(s in valid_sorts for s in sort):
|
|
+ raise CommandExecutionError('Value for sort not recognized')
|
|
+
|
|
+ cmd = ['btrfs', 'subvolume', 'list']
|
|
+
|
|
+ params = ((parent_id, '-p'),
|
|
+ (absolute, '-a'),
|
|
+ (ogeneration, '-c'),
|
|
+ (generation, '-g'),
|
|
+ (subvolumes, '-o'),
|
|
+ (uuid, '-u'),
|
|
+ (parent_uuid, '-q'),
|
|
+ (sent_subvolume_uuid, '-R'),
|
|
+ (snapshots, '-s'),
|
|
+ (readonly, '-r'),
|
|
+ (deleted, '-d'))
|
|
+ cmd.extend(p[1] for p in params if p[0])
|
|
+
|
|
+ if generation_cmp:
|
|
+ cmd.extend(['-G', generation_cmp])
|
|
+
|
|
+ if ogeneration_cmp:
|
|
+ cmd.extend(['-C', ogeneration_cmp])
|
|
+
|
|
+ # We already validated the content of the list
|
|
+ if sort:
|
|
+ cmd.append('--sort={}'.format(','.join(sort)))
|
|
+
|
|
+ cmd.append(path)
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+
|
|
+ # Parse the output. ID and gen are always at the begining, and
|
|
+ # path is always at the end. There is only one column that
|
|
+ # contains space (top level), and the path value can also have
|
|
+ # spaces. The issue is that we do not know how many spaces do we
|
|
+ # have in the path name, so any classic solution based on split
|
|
+ # will fail.
|
|
+ #
|
|
+ # This list is in order.
|
|
+ columns = ('ID', 'gen', 'cgen', 'parent', 'top level', 'otime',
|
|
+ 'parent_uuid', 'received_uuid', 'uuid', 'path')
|
|
+ result = []
|
|
+ for line in res['stdout'].splitlines():
|
|
+ table = {}
|
|
+ for key in columns:
|
|
+ value, line = _pop(line, key, key == 'path')
|
|
+ if value:
|
|
+ table[key.lower()] = value
|
|
+ # If line is not empty here, we are not able to parse it
|
|
+ if not line:
|
|
+ result.append(table)
|
|
+
|
|
+ return result
|
|
+
|
|
+
|
|
+def subvolume_set_default(subvolid, path):
|
|
+ '''
|
|
+ Set the subvolume as default
|
|
+
|
|
+ subvolid
|
|
+ ID of the new default subvolume
|
|
+
|
|
+ path
|
|
+ Mount point for the filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_set_default 257 /var/volumes/tmp
|
|
+
|
|
+ '''
|
|
+ cmd = ['btrfs', 'subvolume', 'set-default', subvolid, path]
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+ return True
|
|
+
|
|
+
|
|
+def subvolume_show(path):
|
|
+ '''
|
|
+ Show information of a given subvolume
|
|
+
|
|
+ path
|
|
+ Mount point for the filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_show /var/volumes/tmp
|
|
+
|
|
+ '''
|
|
+ cmd = ['btrfs', 'subvolume', 'show', path]
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+
|
|
+ result = {}
|
|
+ table = {}
|
|
+ # The real name is the first line, later there is a table of
|
|
+ # values separated with colon.
|
|
+ stdout = res['stdout'].splitlines()
|
|
+ key = stdout.pop(0)
|
|
+ result[key.strip()] = table
|
|
+
|
|
+ for line in stdout:
|
|
+ key, value = line.split(':', 1)
|
|
+ table[key.lower().strip()] = value.strip()
|
|
+ return result
|
|
+
|
|
+
|
|
+def subvolume_snapshot(source, dest=None, name=None, read_only=False):
|
|
+ '''
|
|
+ Create a snapshot of a source subvolume
|
|
+
|
|
+ source
|
|
+ Source subvolume from where to create the snapshot
|
|
+
|
|
+ dest
|
|
+ If only dest is given, the subvolume will be named as the
|
|
+ basename of the source
|
|
+
|
|
+ name
|
|
+ Name of the snapshot
|
|
+
|
|
+ read_only
|
|
+ Create a read only snapshot
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_snapshot /var/volumes/tmp dest=/.snapshots
|
|
+ salt '*' btrfs.subvolume_snapshot /var/volumes/tmp name=backup
|
|
+
|
|
+ '''
|
|
+ if not dest and not name:
|
|
+ raise CommandExecutionError('Provide parameter dest, name, or both')
|
|
+
|
|
+ cmd = ['btrfs', 'subvolume', 'snapshot']
|
|
+ if read_only:
|
|
+ cmd.append('-r')
|
|
+ if dest and not name:
|
|
+ cmd.append(dest)
|
|
+ if dest and name:
|
|
+ name = os.path.join(dest, name)
|
|
+ if name:
|
|
+ cmd.append(name)
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+ return True
|
|
+
|
|
+
|
|
+def subvolume_sync(path, subvolids=None, sleep=None):
|
|
+ '''
|
|
+ Wait until given subvolume are completely removed from the
|
|
+ filesystem after deletion.
|
|
+
|
|
+ path
|
|
+ Mount point for the filesystem
|
|
+
|
|
+ subvolids
|
|
+ List of IDs of subvolumes to wait for
|
|
+
|
|
+ sleep
|
|
+ Sleep N seconds betwenn checks (default: 1)
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' btrfs.subvolume_sync /var/volumes/tmp
|
|
+ salt '*' btrfs.subvolume_sync /var/volumes/tmp subvolids='[257]'
|
|
+
|
|
+ '''
|
|
+ if subvolids and type(subvolids) is not list:
|
|
+ raise CommandExecutionError('Subvolids parameter must be a list')
|
|
+
|
|
+ cmd = ['btrfs', 'subvolume', 'sync']
|
|
+ if sleep:
|
|
+ cmd.extend(['-s', sleep])
|
|
+
|
|
+ cmd.append(path)
|
|
+ if subvolids:
|
|
+ cmd.extend(subvolids)
|
|
+
|
|
+ res = __salt__['cmd.run_all'](cmd)
|
|
+ salt.utils.fsutils._verify_run(res)
|
|
+ return True
|
|
diff --git a/salt/modules/chroot.py b/salt/modules/chroot.py
|
|
new file mode 100644
|
|
index 0000000000..6e4705b67e
|
|
--- /dev/null
|
|
+++ b/salt/modules/chroot.py
|
|
@@ -0,0 +1,165 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:maturity: new
|
|
+:depends: None
|
|
+:platform: Linux
|
|
+'''
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import logging
|
|
+import os
|
|
+import sys
|
|
+import tempfile
|
|
+
|
|
+from salt.defaults.exitcodes import EX_OK
|
|
+from salt.exceptions import CommandExecutionError
|
|
+from salt.utils.args import clean_kwargs
|
|
+
|
|
+log = logging.getLogger(__name__)
|
|
+
|
|
+
|
|
+def __virtual__():
|
|
+ '''
|
|
+ Chroot command is required.
|
|
+ '''
|
|
+ if __utils__['path.which']('chroot') is not None:
|
|
+ return True
|
|
+ else:
|
|
+ return (False, 'Module chroot requires the command chroot')
|
|
+
|
|
+
|
|
+def exist(name):
|
|
+ '''
|
|
+ Return True if the chroot environment is present.
|
|
+ '''
|
|
+ dev = os.path.join(name, 'dev')
|
|
+ proc = os.path.join(name, 'proc')
|
|
+ return all(os.path.isdir(i) for i in (name, dev, proc))
|
|
+
|
|
+
|
|
+def create(name):
|
|
+ '''
|
|
+ Create a basic chroot environment.
|
|
+
|
|
+ Note that this environment is not functional. The caller needs to
|
|
+ install the minimal required binaries, including Python if
|
|
+ chroot.call is called.
|
|
+
|
|
+ name
|
|
+ Path to the chroot environment
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt myminion chroot.create /chroot
|
|
+
|
|
+ '''
|
|
+ if not exist(name):
|
|
+ dev = os.path.join(name, 'dev')
|
|
+ proc = os.path.join(name, 'proc')
|
|
+ try:
|
|
+ os.makedirs(dev, mode=0o755)
|
|
+ os.makedirs(proc, mode=0o555)
|
|
+ except OSError as e:
|
|
+ log.error('Error when trying to create chroot directories: %s', e)
|
|
+ return False
|
|
+ return True
|
|
+
|
|
+
|
|
+def call(name, function, *args, **kwargs):
|
|
+ '''
|
|
+ Executes a Salt function inside a chroot environment.
|
|
+
|
|
+ The chroot does not need to have Salt installed, but Python is
|
|
+ required.
|
|
+
|
|
+ name
|
|
+ Path to the chroot environment
|
|
+
|
|
+ function
|
|
+ Salt execution module function
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt myminion chroot.call /chroot test.ping
|
|
+
|
|
+ '''
|
|
+
|
|
+ if not function:
|
|
+ raise CommandExecutionError('Missing function parameter')
|
|
+
|
|
+ if not exist(name):
|
|
+ raise CommandExecutionError('Chroot environment not found')
|
|
+
|
|
+ # Create a temporary directory inside the chroot where we can
|
|
+ # untar salt-thin
|
|
+ thin_dest_path = tempfile.mkdtemp(dir=name)
|
|
+ thin_path = __utils__['thin.gen_thin'](
|
|
+ __opts__['cachedir'],
|
|
+ extra_mods=__salt__['config.option']('thin_extra_mods', ''),
|
|
+ so_mods=__salt__['config.option']('thin_so_mods', '')
|
|
+ )
|
|
+ stdout = __salt__['archive.tar']('xzf', thin_path, dest=thin_dest_path)
|
|
+ if stdout:
|
|
+ __utils__['files.rm_rf'](thin_dest_path)
|
|
+ return {'result': False, 'comment': stdout}
|
|
+
|
|
+ chroot_path = os.path.join(os.path.sep,
|
|
+ os.path.relpath(thin_dest_path, name))
|
|
+ try:
|
|
+ safe_kwargs = clean_kwargs(**kwargs)
|
|
+ salt_argv = [
|
|
+ 'python{}'.format(sys.version_info[0]),
|
|
+ os.path.join(chroot_path, 'salt-call'),
|
|
+ '--metadata',
|
|
+ '--local',
|
|
+ '--log-file', os.path.join(chroot_path, 'log'),
|
|
+ '--cachedir', os.path.join(chroot_path, 'cache'),
|
|
+ '--out', 'json',
|
|
+ '-l', 'quiet',
|
|
+ '--',
|
|
+ function
|
|
+ ] + list(args) + ['{}={}'.format(k, v) for (k, v) in safe_kwargs]
|
|
+ ret = __salt__['cmd.run_chroot'](name, [str(x) for x in salt_argv])
|
|
+ if ret['retcode'] != EX_OK:
|
|
+ raise CommandExecutionError(ret['stderr'])
|
|
+
|
|
+ # Process "real" result in stdout
|
|
+ try:
|
|
+ data = __utils__['json.find_json'](ret['stdout'])
|
|
+ local = data.get('local', data)
|
|
+ if isinstance(local, dict) and 'retcode' in local:
|
|
+ __context__['retcode'] = local['retcode']
|
|
+ return local.get('return', data)
|
|
+ except ValueError:
|
|
+ return {
|
|
+ 'result': False,
|
|
+ 'comment': "Can't parse container command output"
|
|
+ }
|
|
+ finally:
|
|
+ __utils__['files.rm_rf'](thin_dest_path)
|
|
diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py
|
|
index 81c4d3f811..d0819f2f79 100644
|
|
--- a/salt/modules/cmdmod.py
|
|
+++ b/salt/modules/cmdmod.py
|
|
@@ -2889,6 +2889,7 @@ def run_chroot(root,
|
|
group=None,
|
|
shell=DEFAULT_SHELL,
|
|
python_shell=True,
|
|
+ binds=None,
|
|
env=None,
|
|
clean_env=False,
|
|
template=None,
|
|
@@ -2914,19 +2915,17 @@ def run_chroot(root,
|
|
|
|
:param str root: Path to the root of the jail to use.
|
|
|
|
- stdin
|
|
- A string of standard input can be specified for the command to be run using
|
|
- the ``stdin`` parameter. This can be useful in cases where sensitive
|
|
- information must be read from standard input.:
|
|
+ :param str stdin: A string of standard input can be specified for
|
|
+ the command to be run using the ``stdin`` parameter. This can
|
|
+ be useful in cases where sensitive information must be read
|
|
+ from standard input.:
|
|
|
|
- runas
|
|
- User to run script as.
|
|
+ :param str runas: User to run script as.
|
|
|
|
- group
|
|
- Group to run script as.
|
|
+ :param str group: Group to run script as.
|
|
|
|
- shell
|
|
- Shell to execute under. Defaults to the system default shell.
|
|
+ :param str shell: Shell to execute under. Defaults to the system
|
|
+ default shell.
|
|
|
|
:param str cmd: The command to run. ex: ``ls -lart /home``
|
|
|
|
@@ -2950,6 +2949,9 @@ def run_chroot(root,
|
|
arguments. Set to True to use shell features, such as pipes or
|
|
redirection.
|
|
|
|
+ :param list binds: List of directories that will be exported inside
|
|
+ the chroot with the bind option.
|
|
+
|
|
:param dict env: Environment variables to be set prior to execution.
|
|
|
|
.. note::
|
|
@@ -2968,11 +2970,11 @@ def run_chroot(root,
|
|
engine will be used to render the downloaded file. Currently jinja,
|
|
mako, and wempy are supported.
|
|
|
|
- :param bool rstrip:
|
|
- Strip all whitespace off the end of output before it is returned.
|
|
+ :param bool rstrip: Strip all whitespace off the end of output
|
|
+ before it is returned.
|
|
|
|
- :param str umask:
|
|
- The umask (in octal) to use when running the command.
|
|
+ :param str umask: The umask (in octal) to use when running the
|
|
+ command.
|
|
|
|
:param str output_encoding: Control the encoding used to decode the
|
|
command's output.
|
|
@@ -3046,6 +3048,15 @@ def run_chroot(root,
|
|
'sysfs',
|
|
fstype='sysfs')
|
|
|
|
+ binds = binds if binds else []
|
|
+ for bind_exported in binds:
|
|
+ bind_exported_to = os.path.relpath(bind_exported, os.path.sep)
|
|
+ bind_exported_to = os.path.join(root, bind_exported_to)
|
|
+ __salt__['mount.mount'](
|
|
+ bind_exported_to,
|
|
+ bind_exported,
|
|
+ opts='default,bind')
|
|
+
|
|
# Execute chroot routine
|
|
sh_ = '/bin/sh'
|
|
if os.path.isfile(os.path.join(root, 'bin/bash')):
|
|
@@ -3096,6 +3107,11 @@ def run_chroot(root,
|
|
log.error('Processes running in chroot could not be killed, '
|
|
'filesystem will remain mounted')
|
|
|
|
+ for bind_exported in binds:
|
|
+ bind_exported_to = os.path.relpath(bind_exported, os.path.sep)
|
|
+ bind_exported_to = os.path.join(root, bind_exported_to)
|
|
+ __salt__['mount.umount'](bind_exported_to)
|
|
+
|
|
__salt__['mount.umount'](os.path.join(root, 'sys'))
|
|
__salt__['mount.umount'](os.path.join(root, 'proc'))
|
|
__salt__['mount.umount'](os.path.join(root, 'dev'))
|
|
diff --git a/salt/modules/disk.py b/salt/modules/disk.py
|
|
index 0e0f6eef55..9b0c001e35 100644
|
|
--- a/salt/modules/disk.py
|
|
+++ b/salt/modules/disk.py
|
|
@@ -268,24 +268,34 @@ def percent(args=None):
|
|
|
|
|
|
@salt.utils.decorators.path.which('blkid')
|
|
-def blkid(device=None):
|
|
+def blkid(device=None, token=None):
|
|
'''
|
|
Return block device attributes: UUID, LABEL, etc. This function only works
|
|
on systems where blkid is available.
|
|
|
|
+ device
|
|
+ Device name from the system
|
|
+
|
|
+ token
|
|
+ Any valid token used for the search
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' disk.blkid
|
|
salt '*' disk.blkid /dev/sda
|
|
+ salt '*' disk.blkid token='UUID=6a38ee5-7235-44e7-8b22-816a403bad5d'
|
|
+ salt '*' disk.blkid token='TYPE=ext4'
|
|
'''
|
|
- args = ""
|
|
+ cmd = ['blkid']
|
|
if device:
|
|
- args = " " + device
|
|
+ cmd.append(device)
|
|
+ elif token:
|
|
+ cmd.extend(['-t', token])
|
|
|
|
ret = {}
|
|
- blkid_result = __salt__['cmd.run_all']('blkid' + args, python_shell=False)
|
|
+ blkid_result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
|
|
|
if blkid_result['retcode'] > 0:
|
|
return ret
|
|
@@ -422,6 +432,7 @@ def format_(device,
|
|
fs_type='ext4',
|
|
inode_size=None,
|
|
lazy_itable_init=None,
|
|
+ fat=None,
|
|
force=False):
|
|
'''
|
|
Format a filesystem onto a device
|
|
@@ -449,6 +460,10 @@ def format_(device,
|
|
|
|
This option is only enabled for ext filesystems
|
|
|
|
+ fat
|
|
+ FAT size option. Can be 12, 16 or 32, and can only be used on
|
|
+ fat or vfat filesystems.
|
|
+
|
|
force
|
|
Force mke2fs to create a filesystem, even if the specified device is
|
|
not a partition on a block special device. This option is only enabled
|
|
@@ -471,6 +486,9 @@ def format_(device,
|
|
if lazy_itable_init is not None:
|
|
if fs_type[:3] == 'ext':
|
|
cmd.extend(['-E', 'lazy_itable_init={0}'.format(lazy_itable_init)])
|
|
+ if fat is not None and fat in (12, 16, 32):
|
|
+ if fs_type[-3:] == 'fat':
|
|
+ cmd.extend(['-F', fat])
|
|
if force:
|
|
if fs_type[:3] == 'ext':
|
|
cmd.append('-F')
|
|
diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py
|
|
index 26ca5dcf5a..d5c89cb195 100644
|
|
--- a/salt/modules/dpkg_lowpkg.py
|
|
+++ b/salt/modules/dpkg_lowpkg.py
|
|
@@ -135,7 +135,7 @@ def unpurge(*packages):
|
|
return salt.utils.data.compare_dicts(old, new)
|
|
|
|
|
|
-def list_pkgs(*packages):
|
|
+def list_pkgs(*packages, **kwargs):
|
|
'''
|
|
List the packages currently installed in a dict::
|
|
|
|
@@ -169,7 +169,7 @@ def list_pkgs(*packages):
|
|
return pkgs
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -211,7 +211,7 @@ def file_list(*packages):
|
|
return {'errors': errors, 'files': list(ret)}
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
diff --git a/salt/modules/ebuildpkg.py b/salt/modules/ebuildpkg.py
|
|
index cb77ff7852..205318f579 100644
|
|
--- a/salt/modules/ebuildpkg.py
|
|
+++ b/salt/modules/ebuildpkg.py
|
|
@@ -358,7 +358,7 @@ def list_upgrades(refresh=True, backtrack=3, **kwargs): # pylint: disable=W0613
|
|
return _get_upgradable(backtrack)
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -440,7 +440,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
return ret
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update the portage tree using the first available method from the following
|
|
list:
|
|
@@ -765,7 +765,7 @@ def install(name=None,
|
|
return changes
|
|
|
|
|
|
-def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
|
|
+def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None, **kwargs):
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -858,7 +858,7 @@ def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
|
|
return ret
|
|
|
|
|
|
-def upgrade(refresh=True, binhost=None, backtrack=3):
|
|
+def upgrade(refresh=True, binhost=None, backtrack=3, **kwargs):
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
diff --git a/salt/modules/freebsdpkg.py b/salt/modules/freebsdpkg.py
|
|
index 43f127ef35..0bae7a3bab 100644
|
|
--- a/salt/modules/freebsdpkg.py
|
|
+++ b/salt/modules/freebsdpkg.py
|
|
@@ -238,7 +238,7 @@ def version(*names, **kwargs):
|
|
])
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
``pkg_add(1)`` does not use a local database of available packages, so this
|
|
function simply returns ``True``. it exists merely for API compatibility.
|
|
@@ -503,7 +503,7 @@ def _rehash():
|
|
__salt__['cmd.shell']('rehash', output_loglevel='trace')
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -525,7 +525,7 @@ def file_list(*packages):
|
|
return ret
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the
|
|
diff --git a/salt/modules/freezer.py b/salt/modules/freezer.py
|
|
new file mode 100644
|
|
index 0000000000..786dfe4515
|
|
--- /dev/null
|
|
+++ b/salt/modules/freezer.py
|
|
@@ -0,0 +1,294 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:maturity: new
|
|
+:depends: None
|
|
+:platform: Linux
|
|
+'''
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import logging
|
|
+import os
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+from salt.utils.args import clean_kwargs
|
|
+from salt.utils.files import fopen
|
|
+import salt.utils.json as json
|
|
+from salt.ext.six.moves import zip
|
|
+
|
|
+log = logging.getLogger(__name__)
|
|
+
|
|
+__func_alias__ = {
|
|
+ 'list_': 'list',
|
|
+}
|
|
+
|
|
+
|
|
+def __virtual__():
|
|
+ '''
|
|
+ Freezer is based on top of the pkg module.
|
|
+
|
|
+ Return True as pkg is going to be there, so we can avoid of
|
|
+ loading all modules.
|
|
+
|
|
+ '''
|
|
+ return True
|
|
+
|
|
+
|
|
+def _states_path():
|
|
+ '''
|
|
+ Return the path where we will store the states.
|
|
+ '''
|
|
+ return os.path.join(__opts__['cachedir'], 'freezer')
|
|
+
|
|
+
|
|
+def _paths(name=None):
|
|
+ '''
|
|
+ Return the full path for the packages and repository freezer
|
|
+ files.
|
|
+
|
|
+ '''
|
|
+ name = 'freezer' if not name else name
|
|
+ states_path = _states_path()
|
|
+ return (
|
|
+ os.path.join(states_path, '{}-pkgs.yml'.format(name)),
|
|
+ os.path.join(states_path, '{}-reps.yml'.format(name)),
|
|
+ )
|
|
+
|
|
+
|
|
+def status(name=None):
|
|
+ '''
|
|
+ Return True if there is already a frozen state.
|
|
+
|
|
+ A frozen state is merely a list of packages (including the
|
|
+ version) in a specific time. This information can be used to
|
|
+ compare with the current list of packages, and revert the
|
|
+ installation of some extra packages that are in the system.
|
|
+
|
|
+ name
|
|
+ Name of the frozen state. Optional.
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' freezer.status
|
|
+ salt '*' freezer.status pre_install
|
|
+
|
|
+ '''
|
|
+ name = 'freezer' if not name else name
|
|
+ return all(os.path.isfile(i) for i in _paths(name))
|
|
+
|
|
+
|
|
+def list_():
|
|
+ '''
|
|
+ Return the list of frozen states.
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' freezer.list
|
|
+
|
|
+ '''
|
|
+ ret = []
|
|
+ states_path = _states_path()
|
|
+ if not os.path.isdir(states_path):
|
|
+ return ret
|
|
+
|
|
+ for state in os.listdir(states_path):
|
|
+ if state.endswith(('-pkgs.yml', '-reps.yml')):
|
|
+ # Remove the suffix, as both share the same size
|
|
+ ret.append(state[:-9])
|
|
+ return sorted(set(ret))
|
|
+
|
|
+
|
|
+def freeze(name=None, force=False, **kwargs):
|
|
+ '''
|
|
+ Save the list of package and repos in a freeze file.
|
|
+
|
|
+ As this module is build on top of the pkg module, the user can
|
|
+ send extra attributes to the underlying pkg module via kwargs.
|
|
+ This function will call ``pkg.list_pkgs`` and ``pkg.list_repos``,
|
|
+ and any additional arguments will be passed through to those
|
|
+ functions.
|
|
+
|
|
+ name
|
|
+ Name of the frozen state. Optional.
|
|
+
|
|
+ force
|
|
+ If true, overwrite the state. Optional.
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' freezer.freeze
|
|
+ salt '*' freezer.freeze pre_install
|
|
+ salt '*' freezer.freeze force=True root=/chroot
|
|
+
|
|
+ '''
|
|
+ states_path = _states_path()
|
|
+
|
|
+ try:
|
|
+ os.makedirs(states_path)
|
|
+ except OSError as e:
|
|
+ msg = 'Error when trying to create the freezer storage %s: %s'
|
|
+ log.error(msg, states_path, e)
|
|
+ raise CommandExecutionError(msg % (states_path, e))
|
|
+
|
|
+ if status(name) and not force:
|
|
+ raise CommandExecutionError('The state is already present. Use '
|
|
+ 'force parameter to overwrite.')
|
|
+ safe_kwargs = clean_kwargs(**kwargs)
|
|
+ pkgs = __salt__['pkg.list_pkgs'](**safe_kwargs)
|
|
+ repos = __salt__['pkg.list_repos'](**safe_kwargs)
|
|
+ for name, content in zip(_paths(name), (pkgs, repos)):
|
|
+ with fopen(name, 'w') as fp:
|
|
+ json.dump(content, fp)
|
|
+ return True
|
|
+
|
|
+
|
|
+def restore(name=None, **kwargs):
|
|
+ '''
|
|
+ Make sure that the system contains the packages and repos from a
|
|
+ frozen state.
|
|
+
|
|
+ Read the list of packages and repositories from the freeze file,
|
|
+ and compare it with the current list of packages and repos. If
|
|
+ there is any difference, all the missing packages are repos will
|
|
+ be installed, and all the extra packages and repos will be
|
|
+ removed.
|
|
+
|
|
+ As this module is build on top of the pkg module, the user can
|
|
+ send extra attributes to the underlying pkg module via kwargs.
|
|
+ This function will call ``pkg.list_repos``, ``pkg.mod_repo``,
|
|
+ ``pkg.list_pkgs``, ``pkg.install``, ``pkg.remove`` and
|
|
+ ``pkg.del_repo``, and any additional arguments will be passed
|
|
+ through to those functions.
|
|
+
|
|
+ name
|
|
+ Name of the frozen state. Optional.
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' freezer.restore
|
|
+ salt '*' freezer.restore root=/chroot
|
|
+
|
|
+ '''
|
|
+ if not status(name):
|
|
+ raise CommandExecutionError('Frozen state not found.')
|
|
+
|
|
+ frozen_pkgs = {}
|
|
+ frozen_repos = {}
|
|
+ for name, content in zip(_paths(name), (frozen_pkgs, frozen_repos)):
|
|
+ with fopen(name) as fp:
|
|
+ content.update(json.load(fp))
|
|
+
|
|
+ # The ordering of removing or adding packages and repos can be
|
|
+ # relevant, as maybe some missing package comes from a repo that
|
|
+ # is also missing, so it cannot be installed. But can also happend
|
|
+ # that a missing package comes from a repo that is present, but
|
|
+ # will be removed.
|
|
+ #
|
|
+ # So the proposed order is;
|
|
+ # - Add missing repos
|
|
+ # - Add missing packages
|
|
+ # - Remove extra packages
|
|
+ # - Remove extra repos
|
|
+
|
|
+ safe_kwargs = clean_kwargs(**kwargs)
|
|
+
|
|
+ # Note that we expect that the information stored in list_XXX
|
|
+ # match with the mod_XXX counterpart. If this is not the case the
|
|
+ # recovery will be partial.
|
|
+
|
|
+ res = {
|
|
+ 'pkgs': {'add': [], 'remove': []},
|
|
+ 'repos': {'add': [], 'remove': []},
|
|
+ 'comment': [],
|
|
+ }
|
|
+
|
|
+ # Add missing repositories
|
|
+ repos = __salt__['pkg.list_repos'](**safe_kwargs)
|
|
+ missing_repos = set(frozen_repos) - set(repos)
|
|
+ for repo in missing_repos:
|
|
+ try:
|
|
+ # In Python 2 we cannot do advance destructuring, so we
|
|
+ # need to create a temporary dictionary that will merge
|
|
+ # all the parameters
|
|
+ _tmp_kwargs = frozen_repos[repo].copy()
|
|
+ _tmp_kwargs.update(safe_kwargs)
|
|
+ __salt__['pkg.mod_repo'](repo, **_tmp_kwargs)
|
|
+ res['repos']['add'].append(repo)
|
|
+ log.info('Added missing repository %s', repo)
|
|
+ except Exception as e:
|
|
+ msg = 'Error adding %s repository: %s'
|
|
+ log.error(msg, repo, e)
|
|
+ res['comment'].append(msg % (repo, e))
|
|
+
|
|
+ # Add missing packages
|
|
+ # NOTE: we can remove the `for` using `pkgs`. This will improve
|
|
+ # performance, but I want to have a more detalied report of what
|
|
+ # packages are installed or failled.
|
|
+ pkgs = __salt__['pkg.list_pkgs'](**safe_kwargs)
|
|
+ missing_pkgs = set(frozen_pkgs) - set(pkgs)
|
|
+ for pkg in missing_pkgs:
|
|
+ try:
|
|
+ __salt__['pkg.install'](name=pkg, **safe_kwargs)
|
|
+ res['pkgs']['add'].append(pkg)
|
|
+ log.info('Added missing package %s', pkg)
|
|
+ except Exception as e:
|
|
+ msg = 'Error adding %s package: %s'
|
|
+ log.error(msg, pkg, e)
|
|
+ res['comment'].append(msg % (pkg, e))
|
|
+
|
|
+ # Remove extra packages
|
|
+ pkgs = __salt__['pkg.list_pkgs'](**safe_kwargs)
|
|
+ extra_pkgs = set(pkgs) - set(frozen_pkgs)
|
|
+ for pkg in extra_pkgs:
|
|
+ try:
|
|
+ __salt__['pkg.remove'](name=pkg, **safe_kwargs)
|
|
+ res['pkgs']['remove'].append(pkg)
|
|
+ log.info('Removed extra package %s', pkg)
|
|
+ except Exception as e:
|
|
+ msg = 'Error removing %s package: %s'
|
|
+ log.error(msg, pkg, e)
|
|
+ res['comment'].append(msg % (pkg, e))
|
|
+
|
|
+ # Remove extra repositories
|
|
+ repos = __salt__['pkg.list_repos'](**safe_kwargs)
|
|
+ extra_repos = set(repos) - set(frozen_repos)
|
|
+ for repo in extra_repos:
|
|
+ try:
|
|
+ __salt__['pkg.del_repo'](repo, **safe_kwargs)
|
|
+ res['repos']['remove'].append(repo)
|
|
+ log.info('Removed extra repository %s', repo)
|
|
+ except Exception as e:
|
|
+ msg = 'Error removing %s repository: %s'
|
|
+ log.error(msg, repo, e)
|
|
+ res['comment'].append(msg % (repo, e))
|
|
+
|
|
+ return res
|
|
diff --git a/salt/modules/groupadd.py b/salt/modules/groupadd.py
|
|
index e2e1560ab0..15dec6e898 100644
|
|
--- a/salt/modules/groupadd.py
|
|
+++ b/salt/modules/groupadd.py
|
|
@@ -12,8 +12,12 @@ Manage groups on Linux, OpenBSD and NetBSD
|
|
# Import python libs
|
|
from __future__ import absolute_import, print_function, unicode_literals
|
|
import logging
|
|
+import functools
|
|
+import os
|
|
|
|
from salt.ext import six
|
|
+import salt.utils.files
|
|
+import salt.utils.stringutils
|
|
try:
|
|
import grp
|
|
except ImportError:
|
|
@@ -40,6 +44,18 @@ def add(name, gid=None, system=False, root=None):
|
|
'''
|
|
Add the specified group
|
|
|
|
+ name
|
|
+ Name of the new group
|
|
+
|
|
+ gid
|
|
+ Use GID for the new group
|
|
+
|
|
+ system
|
|
+ Create a system account
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -51,11 +67,12 @@ def add(name, gid=None, system=False, root=None):
|
|
cmd.append('-g {0}'.format(gid))
|
|
if system and __grains__['kernel'] != 'OpenBSD':
|
|
cmd.append('-r')
|
|
- cmd.append(name)
|
|
|
|
if root is not None:
|
|
cmd.extend(('-R', root))
|
|
|
|
+ cmd.append(name)
|
|
+
|
|
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
|
|
|
return not ret['retcode']
|
|
@@ -65,34 +82,53 @@ def delete(name, root=None):
|
|
'''
|
|
Remove the named group
|
|
|
|
+ name
|
|
+ Name group to delete
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' group.delete foo
|
|
'''
|
|
- cmd = ['groupdel', name]
|
|
+ cmd = ['groupdel']
|
|
|
|
if root is not None:
|
|
cmd.extend(('-R', root))
|
|
|
|
+ cmd.append(name)
|
|
+
|
|
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
|
|
|
return not ret['retcode']
|
|
|
|
|
|
-def info(name):
|
|
+def info(name, root=None):
|
|
'''
|
|
Return information about a group
|
|
|
|
+ name
|
|
+ Name of the group
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' group.info foo
|
|
'''
|
|
+ if root is not None:
|
|
+ getgrnam = functools.partial(_getgrnam, root=root)
|
|
+ else:
|
|
+ getgrnam = functools.partial(grp.getgrnam)
|
|
+
|
|
try:
|
|
- grinfo = grp.getgrnam(name)
|
|
+ grinfo = getgrnam(name)
|
|
except KeyError:
|
|
return {}
|
|
else:
|
|
@@ -109,10 +145,16 @@ def _format_info(data):
|
|
'members': data.gr_mem}
|
|
|
|
|
|
-def getent(refresh=False):
|
|
+def getent(refresh=False, root=None):
|
|
'''
|
|
Return info on all groups
|
|
|
|
+ refresh
|
|
+ Force a refresh of group information
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -123,41 +165,74 @@ def getent(refresh=False):
|
|
return __context__['group.getent']
|
|
|
|
ret = []
|
|
- for grinfo in grp.getgrall():
|
|
+ if root is not None:
|
|
+ getgrall = functools.partial(_getgrall, root=root)
|
|
+ else:
|
|
+ getgrall = functools.partial(grp.getgrall)
|
|
+
|
|
+ for grinfo in getgrall():
|
|
ret.append(_format_info(grinfo))
|
|
__context__['group.getent'] = ret
|
|
return ret
|
|
|
|
|
|
+def _chattrib(name, key, value, param, root=None):
|
|
+ '''
|
|
+ Change an attribute for a named user
|
|
+ '''
|
|
+ pre_info = info(name, root=root)
|
|
+ if not pre_info:
|
|
+ return False
|
|
+
|
|
+ if value == pre_info[key]:
|
|
+ return True
|
|
+
|
|
+ cmd = ['groupmod']
|
|
+
|
|
+ if root is not None:
|
|
+ cmd.extend(('-R', root))
|
|
+
|
|
+ cmd.extend((param, value, name))
|
|
+
|
|
+ __salt__['cmd.run'](cmd, python_shell=False)
|
|
+ return info(name, root=root).get(key) == value
|
|
+
|
|
+
|
|
def chgid(name, gid, root=None):
|
|
'''
|
|
Change the gid for a named group
|
|
|
|
+ name
|
|
+ Name of the group to modify
|
|
+
|
|
+ gid
|
|
+ Change the group ID to GID
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' group.chgid foo 4376
|
|
'''
|
|
- pre_gid = __salt__['file.group_to_gid'](name)
|
|
- if gid == pre_gid:
|
|
- return True
|
|
- cmd = ['groupmod', '-g', gid, name]
|
|
-
|
|
- if root is not None:
|
|
- cmd.extend(('-R', root))
|
|
-
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- post_gid = __salt__['file.group_to_gid'](name)
|
|
- if post_gid != pre_gid:
|
|
- return post_gid == gid
|
|
- return False
|
|
+ return _chattrib(name, 'gid', gid, '-g', root=root)
|
|
|
|
|
|
def adduser(name, username, root=None):
|
|
'''
|
|
Add a user in the group.
|
|
|
|
+ name
|
|
+ Name of the group to modify
|
|
+
|
|
+ username
|
|
+ Username to add to the group
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -178,7 +253,7 @@ def adduser(name, username, root=None):
|
|
else:
|
|
cmd = ['gpasswd', '--add', username, name]
|
|
if root is not None:
|
|
- cmd.extend(('-Q', root))
|
|
+ cmd.extend(('--root', root))
|
|
else:
|
|
cmd = ['usermod', '-G', name, username]
|
|
if root is not None:
|
|
@@ -193,6 +268,15 @@ def deluser(name, username, root=None):
|
|
'''
|
|
Remove a user from the group.
|
|
|
|
+ name
|
|
+ Name of the group to modify
|
|
+
|
|
+ username
|
|
+ Username to delete from the group
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -216,7 +300,7 @@ def deluser(name, username, root=None):
|
|
else:
|
|
cmd = ['gpasswd', '--del', username, name]
|
|
if root is not None:
|
|
- cmd.extend(('-R', root))
|
|
+ cmd.extend(('--root', root))
|
|
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
|
elif __grains__['kernel'] == 'OpenBSD':
|
|
out = __salt__['cmd.run_stdout']('id -Gn {0}'.format(username),
|
|
@@ -239,6 +323,15 @@ def members(name, members_list, root=None):
|
|
'''
|
|
Replaces members of the group with a provided list.
|
|
|
|
+ name
|
|
+ Name of the group to modify
|
|
+
|
|
+ members_list
|
|
+ Username list to set into the group
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
salt '*' group.members foo 'user1,user2,user3,...'
|
|
@@ -259,7 +352,7 @@ def members(name, members_list, root=None):
|
|
else:
|
|
cmd = ['gpasswd', '--members', members_list, name]
|
|
if root is not None:
|
|
- cmd.extend(('-R', root))
|
|
+ cmd.extend(('--root', root))
|
|
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
|
elif __grains__['kernel'] == 'OpenBSD':
|
|
retcode = 1
|
|
@@ -284,3 +377,43 @@ def members(name, members_list, root=None):
|
|
return False
|
|
|
|
return not retcode
|
|
+
|
|
+
|
|
+def _getgrnam(name, root=None):
|
|
+ '''
|
|
+ Alternative implementation for getgrnam, that use only /etc/group
|
|
+ '''
|
|
+ root = root or '/'
|
|
+ passwd = os.path.join(root, 'etc/group')
|
|
+ with salt.utils.files.fopen(passwd) as fp_:
|
|
+ for line in fp_:
|
|
+ line = salt.utils.stringutils.to_unicode(line)
|
|
+ comps = line.strip().split(':')
|
|
+ if len(comps) < 4:
|
|
+ log.debug('Ignoring group line: %s', line)
|
|
+ continue
|
|
+ if comps[0] == name:
|
|
+ # Generate a getpwnam compatible output
|
|
+ comps[2] = int(comps[2])
|
|
+ comps[3] = comps[3].split(',') if comps[3] else []
|
|
+ return grp.struct_group(comps)
|
|
+ raise KeyError('getgrnam(): name not found: {}'.format(name))
|
|
+
|
|
+
|
|
+def _getgrall(root=None):
|
|
+ '''
|
|
+ Alternative implemetantion for getgrall, that use only /etc/group
|
|
+ '''
|
|
+ root = root or '/'
|
|
+ passwd = os.path.join(root, 'etc/group')
|
|
+ with salt.utils.files.fopen(passwd) as fp_:
|
|
+ for line in fp_:
|
|
+ line = salt.utils.stringutils.to_unicode(line)
|
|
+ comps = line.strip().split(':')
|
|
+ if len(comps) < 4:
|
|
+ log.debug('Ignoring group line: %s', line)
|
|
+ continue
|
|
+ # Generate a getgrall compatible output
|
|
+ comps[2] = int(comps[2])
|
|
+ comps[3] = comps[3].split(',') if comps[3] else []
|
|
+ yield grp.struct_group(comps)
|
|
diff --git a/salt/modules/kubeadm.py b/salt/modules/kubeadm.py
|
|
new file mode 100644
|
|
index 0000000000..2b1e7906a1
|
|
--- /dev/null
|
|
+++ b/salt/modules/kubeadm.py
|
|
@@ -0,0 +1,1265 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2019 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:maturity: new
|
|
+:depends: None
|
|
+:platform: Linux
|
|
+'''
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import json
|
|
+import logging
|
|
+import re
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+from salt.ext.six.moves import zip
|
|
+import salt.utils.files
|
|
+
|
|
+ADMIN_CFG = '/etc/kubernetes/admin.conf'
|
|
+
|
|
+log = logging.getLogger(__name__)
|
|
+
|
|
+__virtualname__ = 'kubeadm'
|
|
+
|
|
+# Define not exported variables from Salt, so this can be imported as
|
|
+# a normal module
|
|
+try:
|
|
+ __salt__
|
|
+except NameError:
|
|
+ __salt__ = {}
|
|
+
|
|
+
|
|
+def _api_server_endpoint(config=None):
|
|
+ '''
|
|
+ Return the API server endpoint
|
|
+ '''
|
|
+ config = config if config else ADMIN_CFG
|
|
+ endpoint = None
|
|
+ try:
|
|
+ with salt.utils.files.fopen(config, 'r') as fp_:
|
|
+ endpoint = re.search(r'^\s*server: https?://(.*)$',
|
|
+ fp_.read(),
|
|
+ re.MULTILINE).group(1)
|
|
+ except Exception:
|
|
+ # Any error or exception is mapped to None
|
|
+ pass
|
|
+ return endpoint
|
|
+
|
|
+
|
|
+def _token(create_if_needed=True):
|
|
+ '''
|
|
+ Return a valid bootstrap token
|
|
+ '''
|
|
+ tokens = token_list()
|
|
+ if not tokens:
|
|
+ token_create(description='Token created by kubeadm salt module')
|
|
+ tokens = token_list()
|
|
+ # We expect that the token is valid for authestication and signing
|
|
+ return tokens[0]['token']
|
|
+
|
|
+
|
|
+def _discovery_token_ca_cert_hash():
|
|
+ cmd = ['openssl', 'x509', '-pubkey', '-in', '/etc/kubernetes/pki/ca.crt',
|
|
+ '|', 'openssl', 'rsa', '-pubin', '-outform', 'der', '2>/dev/null',
|
|
+ '|', 'openssl', 'dgst', '-sha256', '-hex',
|
|
+ '|', 'sed', "'s/^.* //'"]
|
|
+ result = __salt__['cmd.run_all'](' '.join(cmd), python_shell=True)
|
|
+ if result['retcode']:
|
|
+ raise CommandExecutionError(result['stderr'])
|
|
+
|
|
+ return 'sha256:{}'.format(result['stdout'])
|
|
+
|
|
+
|
|
+def join_params(create_if_needed=False):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return the parameters required for joining into the cluster
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.join_params
|
|
+ salt '*' kubeadm.join_params create_if_needed=True
|
|
+
|
|
+ '''
|
|
+
|
|
+ params = {
|
|
+ 'api-server-endpoint': _api_server_endpoint(),
|
|
+ 'token': _token(create_if_needed),
|
|
+ 'discovery-token-ca-cert-hash': _discovery_token_ca_cert_hash(),
|
|
+ }
|
|
+ return params
|
|
+
|
|
+
|
|
+def version(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return the version of kubeadm
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.version
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'version']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ cmd.extend(['--output', 'json'])
|
|
+
|
|
+ return json.loads(__salt__['cmd.run_stdout'](cmd))
|
|
+
|
|
+
|
|
+def _cmd(cmd):
|
|
+ '''Utility function to run commands.'''
|
|
+ result = __salt__['cmd.run_all'](cmd)
|
|
+ if result['retcode']:
|
|
+ raise CommandExecutionError(result['stderr'])
|
|
+ return result['stdout']
|
|
+
|
|
+
|
|
+def token_create(token=None, config=None, description=None,
|
|
+ groups=None, ttl=None, usages=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Create bootstrap tokens on the server
|
|
+
|
|
+ token
|
|
+ Token to write, if None one will be gerenared. The token must
|
|
+ match a regular expression, that by default is
|
|
+ [a-z0-9]{6}.[a-z0-9]{16}
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ description
|
|
+ A human friendly description of how this token is used
|
|
+
|
|
+ groups
|
|
+ List of extra groups that this token will authenticate, defaut
|
|
+ to ['system:bootstrappers:kubeadm:default-node-token']
|
|
+
|
|
+ ttl
|
|
+ The duration defore the token is automatically deleted (1s, 2m,
|
|
+ 3h). If set to '0' the token will never expire. Default value
|
|
+ is 24h0m0s
|
|
+
|
|
+ usages
|
|
+ Describes the ways in wich this token can be used. The default
|
|
+ value is ['signing', 'authentication']
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_create
|
|
+ salt '*' kubeadm.token_create a1b2c.0123456789abcdef
|
|
+ salt '*' kubeadm.token_create ttl='6h'
|
|
+ salt '*' kubeadm.token_create usages="['signing']"
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'create']
|
|
+ if token:
|
|
+ cmd.append(token)
|
|
+
|
|
+ parameters = [('config', config), ('description', description),
|
|
+ ('groups', groups), ('ttl', ttl), ('usages', usages),
|
|
+ ('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ if parameter in ('groups', 'usages'):
|
|
+ cmd.extend(['--{}'.format(parameter), json.dumps(value)])
|
|
+ else:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def token_delete(token, kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Delete bootstrap tokens on the server
|
|
+
|
|
+ token
|
|
+ Token to write, if None one will be gerenared. The token must
|
|
+ match a regular expression, that by default is
|
|
+ [a-z0-9]{6}.[a-z0-9]{16}
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_delete a1b2c
|
|
+ salt '*' kubeadm.token_create a1b2c.0123456789abcdef
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'delete', token]
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return bool(_cmd(cmd))
|
|
+
|
|
+
|
|
+def token_generate(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Generate and return a bootstrap token, but do not create it on the
|
|
+ server
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_generate
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'generate']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def token_list(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ List bootstrap tokens on the server
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_list
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'list']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ lines = _cmd(cmd).splitlines()
|
|
+
|
|
+ # Find the header and parse it. We do not need to validate the
|
|
+ # content, as the regex will take care of future changes.
|
|
+ header = lines.pop(0)
|
|
+ header = [i.lower() for i in re.findall(r'(\w+(?:\s\w+)*)', header)]
|
|
+
|
|
+ tokens = []
|
|
+ for line in lines:
|
|
+ # TODO(aplanas): descriptions with multiple spaces can break
|
|
+ # the parser.
|
|
+ values = re.findall(r'(\S+(?:\s\S+)*)', line)
|
|
+ if len(header) != len(values):
|
|
+ log.error('Error parsing line: {}'.format(line))
|
|
+ continue
|
|
+ tokens.append({key: value for key, value in zip(header, values)})
|
|
+ return tokens
|
|
+
|
|
+
|
|
+def alpha_certs_renew(rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Renews certificates for a Kubernetes cluster
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_certs_renew
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'certs', 'renew']
|
|
+
|
|
+ parameters = [('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_kubeconfig_user(client_name,
|
|
+ apiserver_advertise_address=None,
|
|
+ apiserver_bind_port=None, cert_dir=None,
|
|
+ org=None, token=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Outputs a kubeconfig file for an additional user
|
|
+
|
|
+ client_name
|
|
+ The name of the user. It will be used as the CN if client
|
|
+ certificates are created
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ The IP address the API server is accessible on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ The port the API server is accessible on (default 6443)
|
|
+
|
|
+ cert_dir
|
|
+ The path where certificates are stored (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ org
|
|
+ The organization of the client certificate
|
|
+
|
|
+ token
|
|
+ The token that show be used as the authentication mechanism for
|
|
+ this kubeconfig, instead of client certificates
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_kubeconfig_user client_name=user
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'kubeconfig', 'user', '--client-name',
|
|
+ client_name]
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('cert-dir', cert_dir), ('org', org),
|
|
+ ('token', token), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_kubelet_config_download(kubeconfig=None, kubelet_version=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Downloads the kubelet configuration from the cluster ConfigMap
|
|
+ kubelet-config-1.X
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ kubelet_version
|
|
+ The desired version for the kubelet
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_kubelet_config_download
|
|
+ salt '*' kubeadm.alpha_kubelet_config_download kubelet_version='1.14.0'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'kubelet', 'config', 'download']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('kubelet-version', kubelet_version),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_kubelet_config_enable_dynamic(node_name, kubeconfig=None,
|
|
+ kubelet_version=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Enables or updates dynamic kubelet configuration for a node
|
|
+
|
|
+ node_name
|
|
+ Name of the node that should enable the dynamic kubelet
|
|
+ configuration
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ kubelet_version
|
|
+ The desired version for the kubelet
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_kubelet_config_enable_dynamic node-1
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
|
|
+ '--node-name', node_name]
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('kubelet-version', kubelet_version),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_selfhosting_pivot(cert_dir=None, config=None,
|
|
+ kubeconfig=None,
|
|
+ store_certs_in_secrets=False, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Converts a static Pod-hosted control plane into a selt-hosted one
|
|
+
|
|
+ cert_dir
|
|
+ The path where certificates are stored (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ store_certs_in_secrets
|
|
+ Enable storing certs in secrets
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_selfhost_pivot
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force']
|
|
+
|
|
+ if store_certs_in_secrets:
|
|
+ cmd.append('--store-certs-in-secrets')
|
|
+
|
|
+ parameters = [('cert-dir', cert_dir),
|
|
+ ('config', config),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_images_list(config=None, feature_gates=None,
|
|
+ kubernetes_version=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Print a list of images kubeadm will use
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_images_list
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'images', 'list']
|
|
+
|
|
+ parameters = [('config', config),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd).splitlines()
|
|
+
|
|
+
|
|
+def config_images_pull(config=None, cri_socket=None,
|
|
+ feature_gates=None, kubernetes_version=None,
|
|
+ kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Pull images used by kubeadm
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_images_pull
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'images', 'pull']
|
|
+
|
|
+ parameters = [('config', config),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ prefix = '[config/images] Pulled '
|
|
+ return [(line.replace(prefix, '')) for line in _cmd(cmd).splitlines()]
|
|
+
|
|
+
|
|
+def config_migrate(old_config, new_config=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Read an older version of the kubeadm configuration API types from
|
|
+ a file, and output the similar config object for the newer version
|
|
+
|
|
+ old_config
|
|
+ Path to the kubeadm config file that is usin the old API
|
|
+ version and should be converted
|
|
+
|
|
+ new_config
|
|
+ Path to the resulting equivalent kubeadm config file using the
|
|
+ new API version. If not specified the output will be returned
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_migrate /oldconfig.cfg
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'migrate', '--old-config', old_config]
|
|
+
|
|
+ parameters = [('new-config', new_config),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_print_init_defaults(component_configs=None,
|
|
+ kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return default init configuration, that can be used for 'kubeadm
|
|
+ init'
|
|
+
|
|
+ component_config
|
|
+ A comma-separated list for component config API object to print
|
|
+ the default values for (valid values: KubeProxyConfiguration,
|
|
+ KubeletConfiguration)
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_print_init_defaults
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'print', 'init-defaults']
|
|
+
|
|
+ parameters = [('component-configs', component_configs),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_print_join_defaults(component_configs=None,
|
|
+ kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return default join configuration, that can be used for 'kubeadm
|
|
+ join'
|
|
+
|
|
+ component_config
|
|
+ A comma-separated list for component config API object to print
|
|
+ the default values for (valid values: KubeProxyConfiguration,
|
|
+ KubeletConfiguration)
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_print_join_defaults
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'print', 'join-defaults']
|
|
+
|
|
+ parameters = [('component-configs', component_configs),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_upload_from_file(config, kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Upload a configuration file to the in-cluster ConfigMap for
|
|
+ kubeadm configuration
|
|
+
|
|
+ config
|
|
+ Path to a kubeadm configuration file
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_upload_from_file /config.cfg
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'upload', 'from-file', '--config', config]
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_upload_from_flags(apiserver_advertise_address=None,
|
|
+ apiserver_bind_port=None,
|
|
+ apiserver_cert_extra_sans=None,
|
|
+ cert_dir=None, cri_socket=None,
|
|
+ feature_gates=None,
|
|
+ kubernetes_version=None, node_name=None,
|
|
+ pod_network_cidr=None, service_cidr=None,
|
|
+ service_dns_domain=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Create the in-cluster configuration file for the first time using
|
|
+ flags
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ The IP address the API server will adversite it's listening on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ The port the API server is accessible on (default 6443)
|
|
+
|
|
+ apiserver_cert_extra_sans
|
|
+ Optional extra Subject Alternative Names (SANs) to use for the
|
|
+ API Server serving certificate
|
|
+
|
|
+ cert_dir
|
|
+ The path where to save and store the certificates (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ node_name
|
|
+ Specify the node name
|
|
+
|
|
+ pod_network_cidr
|
|
+ Specify range of IP addresses for the pod network
|
|
+
|
|
+ service_cidr
|
|
+ Use alternative range of IP address dor service VIPs (default
|
|
+ "10.96.0.0/12")
|
|
+
|
|
+ service_dns_domain
|
|
+ Use alternative domain for serivces (default "cluster.local")
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_upload_from_flags
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'upload', 'from-flags']
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('apiserver-cert-extra-sans', apiserver_cert_extra_sans),
|
|
+ ('cert-dir', cert_dir),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('node-name', node_name),
|
|
+ ('pod-network-cidr', pod_network_cidr),
|
|
+ ('service-cidr', service_cidr),
|
|
+ ('service-dns-domain', service_dns_domain),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_view(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ View the kubeadm configuration stored inside the cluster
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_view
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'view']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def init(apiserver_advertise_address=None, apiserver_bind_port=None,
|
|
+ apiserver_cert_extra_sans=None, cert_dir=None,
|
|
+ certificate_key=None, config=None, cri_socket=None,
|
|
+ experimental_upload_certs=False, feature_gates=None,
|
|
+ ignore_preflight_errors=None, image_repository=None,
|
|
+ kubernetes_version=None, node_name=None,
|
|
+ pod_network_cidr=None, service_cidr=None,
|
|
+ service_dns_domain=None, skip_certificate_key_print=False,
|
|
+ skip_phases=None, skip_token_print=False, token=None,
|
|
+ token_ttl=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Command to set up the Kubernetes control plane
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ The IP address the API server will adversite it's listening on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ The port the API server is accessible on (default 6443)
|
|
+
|
|
+ apiserver_cert_extra_sans
|
|
+ Optional extra Subject Alternative Names (SANs) to use for the
|
|
+ API Server serving certificate
|
|
+
|
|
+ cert_dir
|
|
+ The path where to save and store the certificates (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ certificate_key
|
|
+ Key used to encrypt the control-plane certificates in the
|
|
+ kubeadm-certs Secret
|
|
+
|
|
+ config
|
|
+ Path to a kubeadm configuration file
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ experimental_upload_certs
|
|
+ Upload control-plane certificate to the kubeadm-certs Secret
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ ignore_preflight_errors
|
|
+ A list of checkt whose errors will be shown as warnings
|
|
+
|
|
+ image_repository
|
|
+ Choose a container registry to pull controll plane images from
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ node_name
|
|
+ Specify the node name
|
|
+
|
|
+ pod_network_cidr
|
|
+ Specify range of IP addresses for the pod network
|
|
+
|
|
+ service_cidr
|
|
+ Use alternative range of IP address dor service VIPs (default
|
|
+ "10.96.0.0/12")
|
|
+
|
|
+ service_dns_domain
|
|
+ Use alternative domain for serivces (default "cluster.local")
|
|
+
|
|
+ skip_certificate_key_print
|
|
+ Don't print the key used to encrypt the control-plane
|
|
+ certificates
|
|
+
|
|
+ skip_phases
|
|
+ List of phases to be skipped
|
|
+
|
|
+ skip_token_print
|
|
+ Skip printing of the default bootstrap token generated by
|
|
+ 'kubeadm init'
|
|
+
|
|
+ token
|
|
+ The token to use for establishing bidirectional trust between
|
|
+ nodes and control-plane nodes. The token must match a regular
|
|
+ expression, that by default is [a-z0-9]{6}.[a-z0-9]{16}
|
|
+
|
|
+ token_ttl
|
|
+ The duration defore the token is automatically deleted (1s, 2m,
|
|
+ 3h). If set to '0' the token will never expire. Default value
|
|
+ is 24h0m0s
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.init pod_network_cidr='10.244.0.0/16'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'init']
|
|
+
|
|
+ if experimental_upload_certs:
|
|
+ cmd.append('--experimental-upload-certs')
|
|
+ if skip_certificate_key_print:
|
|
+ cmd.append('--skip-certificate-key-print')
|
|
+ if skip_token_print:
|
|
+ cmd.append('--skip-token-print')
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('apiserver-cert-extra-sans', apiserver_cert_extra_sans),
|
|
+ ('cert-dir', cert_dir),
|
|
+ ('certificate-key', certificate_key),
|
|
+ ('config', config),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('ignore-preflight-errors', ignore_preflight_errors),
|
|
+ ('image-repository', image_repository),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('node-name', node_name),
|
|
+ ('pod-network-cidr', pod_network_cidr),
|
|
+ ('service-cidr', service_cidr),
|
|
+ ('service-dns-domain', service_dns_domain),
|
|
+ ('skip-phases', skip_phases),
|
|
+ ('token', token),
|
|
+ ('token-ttl', token_ttl),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+# TODO(aplanas):
|
|
+# * init_phase_addon_all
|
|
+# * init_phase_addon_coredns
|
|
+# * init_phase_addon_kube_proxy
|
|
+# * init_phase_bootstrap_token
|
|
+# * init_phase_certs_all
|
|
+# * init_phase_certs_apiserver
|
|
+# * init_phase_certs_apiserver_etcd_client
|
|
+# * init_phase_certs_apiserver_kubelet_client
|
|
+# * init_phase_certs_ca
|
|
+# * init_phase_certs_etcd_ca
|
|
+# * init_phase_certs_etcd_healthcheck_client
|
|
+# * init_phase_certs_etcd_peer
|
|
+# * init_phase_certs_etcd_server
|
|
+# * init_phase_certs_front_proxy_ca
|
|
+# * init_phase_certs_front_proxy_client
|
|
+# * init_phase_certs_sa
|
|
+# * init_phase_control_plane_all
|
|
+# * init_phase_control_plane_apiserver
|
|
+# * init_phase_control_plane_controller_manager
|
|
+# * init_phase_control_plane_scheduler
|
|
+# * init_phase_etcd_local
|
|
+# * init_phase_kubeconfig_admin
|
|
+# * init_phase_kubeconfig_all
|
|
+# * init_phase_kubeconfig_controller_manager
|
|
+# * init_phase_kubeconfig_kubelet
|
|
+# * init_phase_kubeconfig_scheduler
|
|
+# * init_phase_kubelet_start
|
|
+# * init_phase_mark_control_plane
|
|
+# * init_phase_preflight
|
|
+# * init_phase_upload_certs
|
|
+# * init_phase_upload_config_all
|
|
+# * init_phase_upload_config_kuneadm
|
|
+# * init_phase_upload_config_kubelet
|
|
+
|
|
+
|
|
+def join(api_server_endpoint=None,
|
|
+ apiserver_advertise_address=None, apiserver_bind_port=None,
|
|
+ certificate_key=None, config=None, cri_socket=None,
|
|
+ discovery_file=None, discovery_token=None,
|
|
+ discovery_token_ca_cert_hash=None,
|
|
+ discovery_token_unsafe_skip_ca_verification=False,
|
|
+ experimental_control_plane=False,
|
|
+ ignore_preflight_errors=None, node_name=None,
|
|
+ skip_phases=None, tls_bootstrap_token=None, token=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Command to join to an existing cluster
|
|
+
|
|
+ api_server_endpoint
|
|
+ IP address or domain name and port of the API Server
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ If the node should host a new control plane instance, the IP
|
|
+ address the API Server will adversise it's listening on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ If the node shoult host a new control plane instance, the port
|
|
+ the API Server to bind to (default 6443)
|
|
+
|
|
+ certificate_key
|
|
+ Use this key to decrypt the certificate secrets uploaded by
|
|
+ init
|
|
+
|
|
+ config
|
|
+ Path to a kubeadm configuration file
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ discovery_file
|
|
+ For file-based discovery, a file or URL from which to load
|
|
+ cluster information
|
|
+
|
|
+ discovery_token
|
|
+ For token-based discovery, the token used to validate cluster
|
|
+ information fetched from the API Server
|
|
+
|
|
+ discovery_token_ca_cert_hash
|
|
+ For token-based discovery, validate that the root CA public key
|
|
+ matches this hash (format: "<type>:<value>")
|
|
+
|
|
+ discovery_token_unsafe_skip_ca_verification
|
|
+ For token-based discovery, allow joining without
|
|
+ 'discovery-token-ca-cert-hash' pinning
|
|
+
|
|
+ experimental_control_plane
|
|
+ Create a new control plane instance on this node
|
|
+
|
|
+ ignore_preflight_errors
|
|
+ A list of checks whose errors will be shown as warnings
|
|
+
|
|
+ node_name
|
|
+ Specify the node name
|
|
+
|
|
+ skip_phases
|
|
+ List of phases to be skipped
|
|
+
|
|
+ tls_bootstrap_token
|
|
+ Specify the token used to temporarily authenticate with the
|
|
+ Kubernetes Control Plane while joining the node
|
|
+
|
|
+ token
|
|
+ Use this token for both discovery-token and tls-bootstrap-token
|
|
+ when those values are not provided
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.join 10.160.65.165:6443 token='token'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'join']
|
|
+
|
|
+ if api_server_endpoint:
|
|
+ cmd.append(api_server_endpoint)
|
|
+ if discovery_token_unsafe_skip_ca_verification:
|
|
+ cmd.append('--discovery-token-unsafe-skip-ca-verification')
|
|
+ if experimental_control_plane:
|
|
+ cmd.append('--experimental-control-plane')
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('certificate-key', certificate_key),
|
|
+ ('config', config),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('discovery-file', discovery_file),
|
|
+ ('discovery-token', discovery_token),
|
|
+ ('discovery-token-ca-cert-hash',
|
|
+ discovery_token_ca_cert_hash),
|
|
+ ('ignore-preflight-errors', ignore_preflight_errors),
|
|
+ ('node-name', node_name),
|
|
+ ('skip-phases', skip_phases),
|
|
+ ('tls-bootstrap-token', tls_bootstrap_token),
|
|
+ ('token', token),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+# TODO(aplanas):
|
|
+# * join_phase_control_plane_join_all
|
|
+# * join_phase_control_plane_join_etcd
|
|
+# * join_phase_control_plane_join_mark_control_plane
|
|
+# * join_phase_control_plane_join_update_status
|
|
+# * join_phase_control_plane_prepare_all
|
|
+# * join_phase_control_plane_prepare_certs
|
|
+# * join_phase_control_plane_prepare_control_plane
|
|
+# * join_phase_control_plane_prepare_download_certs
|
|
+# * join_phase_control_plane_prepare_kubeconfig
|
|
+# * join_phase_kubelet_start
|
|
+# * join_phase_preflight
|
|
+
|
|
+
|
|
+def reset(cert_dir=None, cri_socket=None,
|
|
+ ignore_preflight_errors=None, kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Revert any changes made to this host by 'kubeadm init' or 'kubeadm
|
|
+ join'
|
|
+
|
|
+ cert_dir
|
|
+ The path to the directory where the certificates are stored
|
|
+ (default "/etc/kubernetes/pki")
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ ignore_preflight_errors
|
|
+ A list of checks whose errors will be shown as warnings
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.join 10.160.65.165:6443 token='token'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'reset', '--force']
|
|
+
|
|
+ parameters = [('cert-dir', cert_dir),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('ignore-preflight-errors', ignore_preflight_errors),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+# TODO(aplanas):
|
|
+# * upgrade_apply
|
|
+# * upgrade_diff
|
|
+# * upgrade_node
|
|
+# * upgrade_plan
|
|
diff --git a/salt/modules/linux_lvm.py b/salt/modules/linux_lvm.py
|
|
index 003d6c0b06..0a975324af 100644
|
|
--- a/salt/modules/linux_lvm.py
|
|
+++ b/salt/modules/linux_lvm.py
|
|
@@ -64,17 +64,21 @@ def fullversion():
|
|
return ret
|
|
|
|
|
|
-def pvdisplay(pvname='', real=False):
|
|
+def pvdisplay(pvname='', real=False, quiet=False):
|
|
'''
|
|
Return information about the physical volume(s)
|
|
|
|
pvname
|
|
physical device name
|
|
+
|
|
real
|
|
dereference any symlinks and report the real device
|
|
|
|
.. versionadded:: 2015.8.7
|
|
|
|
+ quiet
|
|
+ if the physical volume is not present, do not show any error
|
|
+
|
|
|
|
CLI Examples:
|
|
|
|
@@ -87,7 +91,8 @@ def pvdisplay(pvname='', real=False):
|
|
cmd = ['pvdisplay', '-c']
|
|
if pvname:
|
|
cmd.append(pvname)
|
|
- cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
|
+ cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False,
|
|
+ ignore_retcode=quiet)
|
|
|
|
if cmd_ret['retcode'] != 0:
|
|
return {}
|
|
@@ -118,10 +123,16 @@ def pvdisplay(pvname='', real=False):
|
|
return ret
|
|
|
|
|
|
-def vgdisplay(vgname=''):
|
|
+def vgdisplay(vgname='', quiet=False):
|
|
'''
|
|
Return information about the volume group(s)
|
|
|
|
+ vgname
|
|
+ volume group name
|
|
+
|
|
+ quiet
|
|
+ if the volume group is not present, do not show any error
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -133,7 +144,8 @@ def vgdisplay(vgname=''):
|
|
cmd = ['vgdisplay', '-c']
|
|
if vgname:
|
|
cmd.append(vgname)
|
|
- cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
|
+ cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False,
|
|
+ ignore_retcode=quiet)
|
|
|
|
if cmd_ret['retcode'] != 0:
|
|
return {}
|
|
@@ -167,6 +179,12 @@ def lvdisplay(lvname='', quiet=False):
|
|
'''
|
|
Return information about the logical volume(s)
|
|
|
|
+ lvname
|
|
+ logical device name
|
|
+
|
|
+ quiet
|
|
+ if the logical volume is not present, do not show any error
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -178,10 +196,8 @@ def lvdisplay(lvname='', quiet=False):
|
|
cmd = ['lvdisplay', '-c']
|
|
if lvname:
|
|
cmd.append(lvname)
|
|
- if quiet:
|
|
- cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet')
|
|
- else:
|
|
- cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
|
+ cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False,
|
|
+ ignore_retcode=quiet)
|
|
|
|
if cmd_ret['retcode'] != 0:
|
|
return {}
|
|
@@ -230,7 +246,7 @@ def pvcreate(devices, override=True, **kwargs):
|
|
for device in devices:
|
|
if not os.path.exists(device):
|
|
raise CommandExecutionError('{0} does not exist'.format(device))
|
|
- if not pvdisplay(device):
|
|
+ if not pvdisplay(device, quiet=True):
|
|
cmd.append(device)
|
|
elif not override:
|
|
raise CommandExecutionError('Device "{0}" is already an LVM physical volume.'.format(device))
|
|
@@ -295,7 +311,7 @@ def pvremove(devices, override=True):
|
|
|
|
# Verify pvcremove was successful
|
|
for device in devices:
|
|
- if pvdisplay(device):
|
|
+ if pvdisplay(device, quiet=True):
|
|
raise CommandExecutionError('Device "{0}" was not affected.'.format(device))
|
|
|
|
return True
|
|
diff --git a/salt/modules/mac_brew_pkg.py b/salt/modules/mac_brew_pkg.py
|
|
index c4507d8267..0152908fb6 100644
|
|
--- a/salt/modules/mac_brew_pkg.py
|
|
+++ b/salt/modules/mac_brew_pkg.py
|
|
@@ -276,7 +276,7 @@ def remove(name=None, pkgs=None, **kwargs):
|
|
return ret
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update the homebrew package repository.
|
|
|
|
@@ -459,7 +459,7 @@ def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
|
|
return ret
|
|
|
|
|
|
-def upgrade_available(pkg):
|
|
+def upgrade_available(pkg, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -472,7 +472,7 @@ def upgrade_available(pkg):
|
|
return pkg in list_upgrades()
|
|
|
|
|
|
-def upgrade(refresh=True):
|
|
+def upgrade(refresh=True, **kwargs):
|
|
'''
|
|
Upgrade outdated, unpinned brews.
|
|
|
|
@@ -517,7 +517,7 @@ def upgrade(refresh=True):
|
|
return ret
|
|
|
|
|
|
-def info_installed(*names):
|
|
+def info_installed(*names, **kwargs):
|
|
'''
|
|
Return the information of the named package(s) installed on the system.
|
|
|
|
diff --git a/salt/modules/mac_portspkg.py b/salt/modules/mac_portspkg.py
|
|
index 78a38d54a9..d403d0e29b 100644
|
|
--- a/salt/modules/mac_portspkg.py
|
|
+++ b/salt/modules/mac_portspkg.py
|
|
@@ -376,7 +376,7 @@ def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
|
|
return _list('outdated')
|
|
|
|
|
|
-def upgrade_available(pkg, refresh=True):
|
|
+def upgrade_available(pkg, refresh=True, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -389,7 +389,7 @@ def upgrade_available(pkg, refresh=True):
|
|
return pkg in list_upgrades(refresh=refresh)
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update ports with ``port selfupdate``
|
|
|
|
@@ -405,7 +405,7 @@ def refresh_db():
|
|
return salt.utils.mac_utils.execute_return_success(cmd)
|
|
|
|
|
|
-def upgrade(refresh=True): # pylint: disable=W0613
|
|
+def upgrade(refresh=True, **kwargs): # pylint: disable=W0613
|
|
'''
|
|
Run a full upgrade using MacPorts 'port upgrade outdated'
|
|
|
|
diff --git a/salt/modules/mdadm_raid.py b/salt/modules/mdadm_raid.py
|
|
index 829f4cdd24..1581a558b6 100644
|
|
--- a/salt/modules/mdadm_raid.py
|
|
+++ b/salt/modules/mdadm_raid.py
|
|
@@ -247,7 +247,7 @@ def create(name,
|
|
'-v',
|
|
'-l', six.text_type(level),
|
|
] + opts + [
|
|
- '-e', metadata,
|
|
+ '-e', six.text_type(metadata),
|
|
'-n', six.text_type(raid_devices),
|
|
] + devices
|
|
|
|
@@ -360,17 +360,25 @@ def assemble(name,
|
|
return __salt__['cmd.run'](cmd, python_shell=False)
|
|
|
|
|
|
-def examine(device):
|
|
+def examine(device, quiet=False):
|
|
'''
|
|
Show detail for a specified RAID component device
|
|
|
|
+ device
|
|
+ Device to examine, that is part of the RAID
|
|
+
|
|
+ quiet
|
|
+ If the device is not part of the RAID, do not show any error
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' raid.examine '/dev/sda1'
|
|
'''
|
|
- res = __salt__['cmd.run_stdout']('mdadm -Y -E {0}'.format(device), output_loglevel='trace', python_shell=False)
|
|
+ res = __salt__['cmd.run_stdout']('mdadm -Y -E {0}'.format(device),
|
|
+ python_shell=False,
|
|
+ ignore_retcode=quiet)
|
|
ret = {}
|
|
|
|
for line in res.splitlines():
|
|
diff --git a/salt/modules/mount.py b/salt/modules/mount.py
|
|
index e807b1729e..f2737b9a5c 100644
|
|
--- a/salt/modules/mount.py
|
|
+++ b/salt/modules/mount.py
|
|
@@ -711,11 +711,15 @@ def set_fstab(
|
|
config='/etc/fstab',
|
|
test=False,
|
|
match_on='auto',
|
|
+ not_change=False,
|
|
**kwargs):
|
|
'''
|
|
Verify that this mount is represented in the fstab, change the mount
|
|
to match the data passed, or add the mount if it is not present.
|
|
|
|
+ If the entry is found via `match_on` and `not_change` is True, the
|
|
+ current line will be preserved.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -793,7 +797,7 @@ def set_fstab(
|
|
# Note: If ret isn't None here,
|
|
# we've matched multiple lines
|
|
ret = 'present'
|
|
- if entry.match(line):
|
|
+ if entry.match(line) or not_change:
|
|
lines.append(line)
|
|
else:
|
|
ret = 'change'
|
|
@@ -837,12 +841,16 @@ def set_vfstab(
|
|
config='/etc/vfstab',
|
|
test=False,
|
|
match_on='auto',
|
|
+ not_change=False,
|
|
**kwargs):
|
|
'''
|
|
..verionadded:: 2016.3.2
|
|
Verify that this mount is represented in the fstab, change the mount
|
|
to match the data passed, or add the mount if it is not present.
|
|
|
|
+ If the entry is found via `match_on` and `not_change` is True, the
|
|
+ current line will be preserved.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -922,7 +930,7 @@ def set_vfstab(
|
|
# Note: If ret isn't None here,
|
|
# we've matched multiple lines
|
|
ret = 'present'
|
|
- if entry.match(line):
|
|
+ if entry.match(line) or not_change:
|
|
lines.append(line)
|
|
else:
|
|
ret = 'change'
|
|
@@ -1023,6 +1031,7 @@ def set_automaster(
|
|
opts='',
|
|
config='/etc/auto_salt',
|
|
test=False,
|
|
+ not_change=False,
|
|
**kwargs):
|
|
'''
|
|
Verify that this mount is represented in the auto_salt, change the mount
|
|
@@ -1071,9 +1080,11 @@ def set_automaster(
|
|
lines.append(line)
|
|
continue
|
|
if comps[0] == name or comps[2] == device_fmt:
|
|
+ present = True
|
|
+ if not_change:
|
|
+ continue
|
|
# check to see if there are changes
|
|
# and fix them if there are any
|
|
- present = True
|
|
if comps[0] != name:
|
|
change = True
|
|
comps[0] = name
|
|
@@ -1212,14 +1223,17 @@ def mount(name, device, mkmnt=False, fstype='', opts='defaults', user=None, util
|
|
lopts = ','.join(opts)
|
|
args = '-o {0}'.format(lopts)
|
|
|
|
- # use of fstype on AIX differs from typical Linux use of -t functionality
|
|
- # AIX uses -v vfsname, -t fstype mounts all with fstype in /etc/filesystems
|
|
- if 'AIX' in __grains__['os']:
|
|
- if fstype:
|
|
+ if fstype:
|
|
+ # use of fstype on AIX differs from typical Linux use of -t
|
|
+ # functionality AIX uses -v vfsname, -t fstype mounts all with
|
|
+ # fstype in /etc/filesystems
|
|
+ if 'AIX' in __grains__['os']:
|
|
args += ' -v {0}'.format(fstype)
|
|
- else:
|
|
- if fstype:
|
|
+ elif 'solaris' in __grains__['os'].lower():
|
|
+ args += ' -F {0}'.format(fstype)
|
|
+ else:
|
|
args += ' -t {0}'.format(fstype)
|
|
+
|
|
cmd = 'mount {0} {1} {2} '.format(args, device, name)
|
|
out = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
|
|
if out['retcode']:
|
|
@@ -1247,7 +1261,7 @@ def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
|
|
|
|
if 'AIX' in __grains__['os']:
|
|
if opts == 'defaults':
|
|
- opts = ''
|
|
+ opts = []
|
|
|
|
if isinstance(opts, six.string_types):
|
|
opts = opts.split(',')
|
|
@@ -1262,14 +1276,16 @@ def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
|
|
lopts = ','.join(opts)
|
|
args = '-o {0}'.format(lopts)
|
|
|
|
- # use of fstype on AIX differs from typical Linux use of -t functionality
|
|
- # AIX uses -v vfsname, -t fstype mounts all with fstype in /etc/filesystems
|
|
- if 'AIX' in __grains__['os']:
|
|
- if fstype:
|
|
+ if fstype:
|
|
+ # use of fstype on AIX differs from typical Linux use of
|
|
+ # -t functionality AIX uses -v vfsname, -t fstype mounts
|
|
+ # all with fstype in /etc/filesystems
|
|
+ if 'AIX' in __grains__['os']:
|
|
args += ' -v {0}'.format(fstype)
|
|
- args += ' -o remount'
|
|
- else:
|
|
- args += ' -t {0}'.format(fstype)
|
|
+ elif 'solaris' in __grains__['os'].lower():
|
|
+ args += ' -F {0}'.format(fstype)
|
|
+ else:
|
|
+ args += ' -t {0}'.format(fstype)
|
|
|
|
if __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin'] or force_mount:
|
|
cmd = 'mount {0} {1} {2} '.format(args, device, name)
|
|
@@ -1667,6 +1683,7 @@ def set_filesystems(
|
|
config='/etc/filesystems',
|
|
test=False,
|
|
match_on='auto',
|
|
+ not_change=False,
|
|
**kwargs):
|
|
'''
|
|
.. versionadded:: 2018.3.3
|
|
@@ -1674,6 +1691,9 @@ def set_filesystems(
|
|
Verify that this mount is represented in the filesystems, change the mount
|
|
to match the data passed, or add the mount if it is not present on AIX
|
|
|
|
+ If the entry is found via `match_on` and `not_change` is True, the
|
|
+ current line will be preserved.
|
|
+
|
|
Provide information if the path is mounted
|
|
|
|
:param name: The name of the mount point where the device is mounted.
|
|
@@ -1773,7 +1793,7 @@ def set_filesystems(
|
|
for fsys_view in six.viewitems(fsys_filedict):
|
|
if criteria.match(fsys_view):
|
|
ret = 'present'
|
|
- if entry_ip.match(fsys_view):
|
|
+ if entry_ip.match(fsys_view) or not_change:
|
|
view_lines.append(fsys_view)
|
|
else:
|
|
ret = 'change'
|
|
diff --git a/salt/modules/openbsdpkg.py b/salt/modules/openbsdpkg.py
|
|
index b3b6bab912..819a24afb1 100644
|
|
--- a/salt/modules/openbsdpkg.py
|
|
+++ b/salt/modules/openbsdpkg.py
|
|
@@ -344,7 +344,7 @@ def purge(name=None, pkgs=None, **kwargs):
|
|
return remove(name=name, pkgs=pkgs, purge=True)
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
diff --git a/salt/modules/pacmanpkg.py b/salt/modules/pacmanpkg.py
|
|
index e30296e8c8..35007e27f5 100644
|
|
--- a/salt/modules/pacmanpkg.py
|
|
+++ b/salt/modules/pacmanpkg.py
|
|
@@ -111,7 +111,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -393,7 +393,7 @@ def group_diff(name):
|
|
return ret
|
|
|
|
|
|
-def refresh_db(root=None):
|
|
+def refresh_db(root=None, **kwargs):
|
|
'''
|
|
Just run a ``pacman -Sy``, return a dict::
|
|
|
|
@@ -843,7 +843,7 @@ def purge(name=None, pkgs=None, **kwargs):
|
|
return _uninstall(action='purge', name=name, pkgs=pkgs)
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -877,7 +877,7 @@ def file_list(*packages):
|
|
return {'errors': errors, 'files': ret}
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
@@ -913,7 +913,7 @@ def file_dict(*packages):
|
|
return {'errors': errors, 'packages': ret}
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
diff --git a/salt/modules/parted_partition.py b/salt/modules/parted_partition.py
|
|
index 1757e7118f..c2e0ebb882 100644
|
|
--- a/salt/modules/parted_partition.py
|
|
+++ b/salt/modules/parted_partition.py
|
|
@@ -44,6 +44,15 @@ __func_alias__ = {
|
|
VALID_UNITS = set(['s', 'B', 'kB', 'MB', 'MiB', 'GB', 'GiB', 'TB', 'TiB', '%',
|
|
'cyl', 'chs', 'compact'])
|
|
|
|
+VALID_DISK_FLAGS = set(['cylinder_alignment', 'pmbr_boot',
|
|
+ 'implicit_partition_table'])
|
|
+
|
|
+VALID_PARTITION_FLAGS = set(['boot', 'root', 'swap', 'hidden', 'raid',
|
|
+ 'lvm', 'lba', 'hp-service', 'palo',
|
|
+ 'prep', 'msftres', 'bios_grub', 'atvrecv',
|
|
+ 'diag', 'legacy_boot', 'msftdata', 'irst',
|
|
+ 'esp', 'type'])
|
|
+
|
|
|
|
def __virtual__():
|
|
'''
|
|
@@ -641,8 +650,26 @@ def set_(device, minor, flag, state):
|
|
:ref:`YAML Idiosyncrasies <yaml-idiosyncrasies>`). Some or all of these
|
|
flags will be available, depending on what disk label you are using.
|
|
|
|
- Valid flags are: bios_grub, legacy_boot, boot, lba, root, swap, hidden, raid,
|
|
- LVM, PALO, PREP, DIAG
|
|
+ Valid flags are:
|
|
+ * boot
|
|
+ * root
|
|
+ * swap
|
|
+ * hidden
|
|
+ * raid
|
|
+ * lvm
|
|
+ * lba
|
|
+ * hp-service
|
|
+ * palo
|
|
+ * prep
|
|
+ * msftres
|
|
+ * bios_grub
|
|
+ * atvrecv
|
|
+ * diag
|
|
+ * legacy_boot
|
|
+ * msftdata
|
|
+ * irst
|
|
+ * esp
|
|
+ * type
|
|
|
|
CLI Example:
|
|
|
|
@@ -659,8 +686,7 @@ def set_(device, minor, flag, state):
|
|
'Invalid minor number passed to partition.set'
|
|
)
|
|
|
|
- if flag not in set(['bios_grub', 'legacy_boot', 'boot', 'lba', 'root',
|
|
- 'swap', 'hidden', 'raid', 'LVM', 'PALO', 'PREP', 'DIAG']):
|
|
+ if flag not in VALID_PARTITION_FLAGS:
|
|
raise CommandExecutionError('Invalid flag passed to partition.set')
|
|
|
|
if state not in set(['on', 'off']):
|
|
@@ -691,8 +717,7 @@ def toggle(device, partition, flag):
|
|
'Invalid partition number passed to partition.toggle'
|
|
)
|
|
|
|
- if flag not in set(['bios_grub', 'legacy_boot', 'boot', 'lba', 'root',
|
|
- 'swap', 'hidden', 'raid', 'LVM', 'PALO', 'PREP', 'DIAG']):
|
|
+ if flag not in VALID_PARTITION_FLAGS:
|
|
raise CommandExecutionError('Invalid flag passed to partition.toggle')
|
|
|
|
cmd = 'parted -m -s {0} toggle {1} {2}'.format(device, partition, flag)
|
|
@@ -700,6 +725,60 @@ def toggle(device, partition, flag):
|
|
return out
|
|
|
|
|
|
+def disk_set(device, flag, state):
|
|
+ '''
|
|
+ Changes a flag on selected device.
|
|
+
|
|
+ A flag can be either "on" or "off" (make sure to use proper
|
|
+ quoting, see :ref:`YAML Idiosyncrasies
|
|
+ <yaml-idiosyncrasies>`). Some or all of these flags will be
|
|
+ available, depending on what disk label you are using.
|
|
+
|
|
+ Valid flags are:
|
|
+ * cylinder_alignment
|
|
+ * pmbr_boot
|
|
+ * implicit_partition_table
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' partition.disk_set /dev/sda pmbr_boot '"on"'
|
|
+ '''
|
|
+ _validate_device(device)
|
|
+
|
|
+ if flag not in VALID_DISK_FLAGS:
|
|
+ raise CommandExecutionError('Invalid flag passed to partition.disk_set')
|
|
+
|
|
+ if state not in set(['on', 'off']):
|
|
+ raise CommandExecutionError('Invalid state passed to partition.disk_set')
|
|
+
|
|
+ cmd = ['parted', '-m', '-s', device, 'disk_set', flag, state]
|
|
+ out = __salt__['cmd.run'](cmd).splitlines()
|
|
+ return out
|
|
+
|
|
+
|
|
+def disk_toggle(device, flag):
|
|
+ '''
|
|
+ Toggle the state of <flag> on <device>. Valid flags are the same
|
|
+ as the disk_set command.
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' partition.disk_toggle /dev/sda pmbr_boot
|
|
+ '''
|
|
+ _validate_device(device)
|
|
+
|
|
+ if flag not in VALID_DISK_FLAGS:
|
|
+ raise CommandExecutionError('Invalid flag passed to partition.disk_toggle')
|
|
+
|
|
+ cmd = ['parted', '-m', '-s', device, 'disk_toggle', flag]
|
|
+ out = __salt__['cmd.run'](cmd).splitlines()
|
|
+ return out
|
|
+
|
|
+
|
|
def exists(device=''):
|
|
'''
|
|
Check to see if the partition exists
|
|
diff --git a/salt/modules/pkgin.py b/salt/modules/pkgin.py
|
|
index 240f79ca26..dd5257c80d 100644
|
|
--- a/salt/modules/pkgin.py
|
|
+++ b/salt/modules/pkgin.py
|
|
@@ -112,7 +112,7 @@ def _splitpkg(name):
|
|
return name.split(';', 1)[0].rsplit('-', 1)
|
|
|
|
|
|
-def search(pkg_name):
|
|
+def search(pkg_name, **kwargs):
|
|
'''
|
|
Searches for an exact match using pkgin ^package$
|
|
|
|
@@ -225,7 +225,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def refresh_db(force=False):
|
|
+def refresh_db(force=False, **kwargs):
|
|
'''
|
|
Use pkg update to get latest pkg_summary
|
|
|
|
@@ -637,7 +637,7 @@ def _rehash():
|
|
__salt__['cmd.run']('rehash', output_loglevel='trace')
|
|
|
|
|
|
-def file_list(package):
|
|
+def file_list(package, **kwargs):
|
|
'''
|
|
List the files that belong to a package.
|
|
|
|
@@ -655,7 +655,7 @@ def file_list(package):
|
|
return ret
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
.. versionchanged: 2016.3.0
|
|
|
|
diff --git a/salt/modules/pkgng.py b/salt/modules/pkgng.py
|
|
index dabd817fbf..ab20d05ef2 100644
|
|
--- a/salt/modules/pkgng.py
|
|
+++ b/salt/modules/pkgng.py
|
|
@@ -224,7 +224,7 @@ def version(*names, **kwargs):
|
|
info = salt.utils.functools.alias_function(version, 'info')
|
|
|
|
|
|
-def refresh_db(jail=None, chroot=None, root=None, force=False):
|
|
+def refresh_db(jail=None, chroot=None, root=None, force=False, **kwargs):
|
|
'''
|
|
Refresh PACKAGESITE contents
|
|
|
|
@@ -2441,7 +2441,7 @@ def _parse_upgrade(stdout):
|
|
return result
|
|
|
|
|
|
-def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
|
+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
|
|
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
|
|
diff --git a/salt/modules/rpm_lowpkg.py b/salt/modules/rpm_lowpkg.py
|
|
index 893ae4f817..e577c4391a 100644
|
|
--- a/salt/modules/rpm_lowpkg.py
|
|
+++ b/salt/modules/rpm_lowpkg.py
|
|
@@ -76,7 +76,7 @@ def bin_pkg_info(path, saltenv='base'):
|
|
minion so that it can be examined.
|
|
|
|
saltenv : base
|
|
- Salt fileserver envrionment from which to retrieve the package. Ignored
|
|
+ Salt fileserver environment from which to retrieve the package. Ignored
|
|
if ``path`` is a local file path on the minion.
|
|
|
|
CLI Example:
|
|
@@ -128,12 +128,15 @@ def bin_pkg_info(path, saltenv='base'):
|
|
return ret
|
|
|
|
|
|
-def list_pkgs(*packages):
|
|
+def list_pkgs(*packages, **kwargs):
|
|
'''
|
|
List the packages currently installed in a dict::
|
|
|
|
{'<package_name>': '<version>'}
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -141,8 +144,11 @@ def list_pkgs(*packages):
|
|
salt '*' lowpkg.list_pkgs
|
|
'''
|
|
pkgs = {}
|
|
- cmd = ['rpm', '-q' if packages else '-qa',
|
|
- '--queryformat', r'%{NAME} %{VERSION}\n']
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-q' if packages else '-qa',
|
|
+ '--queryformat', r'%{NAME} %{VERSION}\n'])
|
|
if packages:
|
|
cmd.extend(packages)
|
|
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
|
|
@@ -158,6 +164,9 @@ def verify(*packages, **kwargs):
|
|
'''
|
|
Runs an rpm -Va on a system, and returns the results in a dict
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
Files with an attribute of config, doc, ghost, license or readme in the
|
|
package header can be ignored using the ``ignore_types`` keyword argument
|
|
|
|
@@ -199,6 +208,8 @@ def verify(*packages, **kwargs):
|
|
verify_options = [x.strip() for x in six.text_type(verify_options).split(',')]
|
|
|
|
cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
cmd.extend(['--' + x for x in verify_options])
|
|
if packages:
|
|
cmd.append('-V')
|
|
@@ -258,6 +269,9 @@ def modified(*packages, **flags):
|
|
|
|
.. versionadded:: 2015.5.0
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -266,10 +280,12 @@ def modified(*packages, **flags):
|
|
salt '*' lowpkg.modified httpd postfix
|
|
salt '*' lowpkg.modified
|
|
'''
|
|
- ret = __salt__['cmd.run_all'](
|
|
- ['rpm', '-Va'] + list(packages),
|
|
- output_loglevel='trace',
|
|
- python_shell=False)
|
|
+ cmd = ['rpm']
|
|
+ if flags.get('root'):
|
|
+ cmd.extend(['--root', flags.pop('root')])
|
|
+ cmd.append('-Va')
|
|
+ cmd.extend(packages)
|
|
+ ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
|
|
|
|
data = {}
|
|
|
|
@@ -324,12 +340,15 @@ def modified(*packages, **flags):
|
|
return filtered_data
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's rpm database (not generally
|
|
recommended).
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -338,12 +357,15 @@ def file_list(*packages):
|
|
salt '*' lowpkg.file_list httpd postfix
|
|
salt '*' lowpkg.file_list
|
|
'''
|
|
- if not packages:
|
|
- cmd = ['rpm', '-qla']
|
|
- else:
|
|
- cmd = ['rpm', '-ql']
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+
|
|
+ cmd.append('-ql' if packages else '-qla')
|
|
+ if packages:
|
|
# Can't concatenate a tuple, must do a list.extend()
|
|
cmd.extend(packages)
|
|
+
|
|
ret = __salt__['cmd.run'](
|
|
cmd,
|
|
output_loglevel='trace',
|
|
@@ -351,12 +373,15 @@ def file_list(*packages):
|
|
return {'errors': [], 'files': ret}
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, sorted by group. Not specifying
|
|
any packages will return a list of _every_ file on the system's rpm
|
|
database (not generally recommended).
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -368,8 +393,11 @@ def file_dict(*packages):
|
|
errors = []
|
|
ret = {}
|
|
pkgs = {}
|
|
- cmd = ['rpm', '-q' if packages else '-qa',
|
|
- '--queryformat', r'%{NAME} %{VERSION}\n']
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-q' if packages else '-qa',
|
|
+ '--queryformat', r'%{NAME} %{VERSION}\n'])
|
|
if packages:
|
|
cmd.extend(packages)
|
|
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
|
|
@@ -380,8 +408,10 @@ def file_dict(*packages):
|
|
comps = line.split()
|
|
pkgs[comps[0]] = {'version': comps[1]}
|
|
for pkg in pkgs:
|
|
- files = []
|
|
- cmd = ['rpm', '-ql', pkg]
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-ql', pkg])
|
|
out = __salt__['cmd.run'](
|
|
['rpm', '-ql', pkg],
|
|
output_loglevel='trace',
|
|
@@ -390,7 +420,7 @@ def file_dict(*packages):
|
|
return {'errors': errors, 'packages': ret}
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
Return the name of the package that owns the file. Multiple file paths can
|
|
be passed. If a single path is passed, a string will be returned,
|
|
@@ -400,6 +430,9 @@ def owner(*paths):
|
|
If the file is not owned by a package, or is not present on the minion,
|
|
then an empty string will be returned for that path.
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -411,7 +444,10 @@ def owner(*paths):
|
|
return ''
|
|
ret = {}
|
|
for path in paths:
|
|
- cmd = ['rpm', '-qf', '--queryformat', '%{name}', path]
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-qf', '--queryformat', '%{name}', path])
|
|
ret[path] = __salt__['cmd.run_stdout'](cmd,
|
|
output_loglevel='trace',
|
|
python_shell=False)
|
|
@@ -471,6 +507,9 @@ def info(*packages, **kwargs):
|
|
:param all_versions:
|
|
Return information for all installed versions of the packages
|
|
|
|
+ :param root:
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
:return:
|
|
|
|
CLI example:
|
|
@@ -493,7 +532,14 @@ def info(*packages, **kwargs):
|
|
else:
|
|
size_tag = '%{SIZE}'
|
|
|
|
- cmd = packages and "rpm -q {0}".format(' '.join(packages)) or "rpm -qa"
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ if packages:
|
|
+ cmd.append('-q')
|
|
+ cmd.extend(packages)
|
|
+ else:
|
|
+ cmd.append('-qa')
|
|
|
|
# Construct query format
|
|
attr_map = {
|
|
@@ -544,6 +590,7 @@ def info(*packages, **kwargs):
|
|
query.append(attr_map['description'])
|
|
query.append("-----\\n")
|
|
|
|
+ cmd = ' '.join(cmd)
|
|
call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))),
|
|
output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True)
|
|
if call['retcode'] != 0:
|
|
@@ -744,10 +791,13 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
|
|
return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False)
|
|
|
|
|
|
-def checksum(*paths):
|
|
+def checksum(*paths, **kwargs):
|
|
'''
|
|
Return if the signature of a RPM file is valid.
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -760,9 +810,14 @@ def checksum(*paths):
|
|
if not paths:
|
|
raise CommandExecutionError("No package files has been specified.")
|
|
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-K', '--quiet'])
|
|
for package_file in paths:
|
|
+ cmd_ = cmd + [package_file]
|
|
ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and
|
|
- not __salt__['cmd.retcode'](["rpm", "-K", "--quiet", package_file],
|
|
+ not __salt__['cmd.retcode'](cmd_,
|
|
ignore_retcode=True,
|
|
output_loglevel='trace',
|
|
python_shell=False))
|
|
diff --git a/salt/modules/shadow.py b/salt/modules/shadow.py
|
|
index 9659867f05..98c7369c5e 100644
|
|
--- a/salt/modules/shadow.py
|
|
+++ b/salt/modules/shadow.py
|
|
@@ -13,6 +13,7 @@ from __future__ import absolute_import, unicode_literals, print_function
|
|
# Import python libs
|
|
import os
|
|
import datetime
|
|
+import functools
|
|
try:
|
|
import spwd
|
|
except ImportError:
|
|
@@ -24,6 +25,7 @@ import salt.utils.files
|
|
import salt.utils.stringutils
|
|
from salt.exceptions import CommandExecutionError
|
|
from salt.ext import six
|
|
+from salt.ext.six.moves import range
|
|
try:
|
|
import salt.utils.pycrypto
|
|
HAS_CRYPT = True
|
|
@@ -48,21 +50,32 @@ def default_hash():
|
|
return '!'
|
|
|
|
|
|
-def info(name):
|
|
+def info(name, root=None):
|
|
'''
|
|
Return information for the specified user
|
|
|
|
+ name
|
|
+ User to get the information for
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.info root
|
|
'''
|
|
+ if root is not None:
|
|
+ getspnam = functools.partial(_getspnam, root=root)
|
|
+ else:
|
|
+ getspnam = functools.partial(spwd.getspnam)
|
|
+
|
|
try:
|
|
- data = spwd.getspnam(name)
|
|
+ data = getspnam(name)
|
|
ret = {
|
|
- 'name': data.sp_nam,
|
|
- 'passwd': data.sp_pwd,
|
|
+ 'name': data.sp_namp if hasattr(data, 'sp_namp') else data.sp_nam,
|
|
+ 'passwd': data.sp_pwdp if hasattr(data, 'sp_pwdp') else data.sp_pwd,
|
|
'lstchg': data.sp_lstchg,
|
|
'min': data.sp_min,
|
|
'max': data.sp_max,
|
|
@@ -82,69 +95,99 @@ def info(name):
|
|
return ret
|
|
|
|
|
|
-def set_inactdays(name, inactdays):
|
|
+def _set_attrib(name, key, value, param, root=None, validate=True):
|
|
+ '''
|
|
+ Set a parameter in /etc/shadow
|
|
+ '''
|
|
+ pre_info = info(name, root=root)
|
|
+
|
|
+ # If the user is not present or the attribute is already present,
|
|
+ # we return early
|
|
+ if not pre_info['name']:
|
|
+ return False
|
|
+
|
|
+ if value == pre_info[key]:
|
|
+ return True
|
|
+
|
|
+ cmd = ['chage']
|
|
+
|
|
+ if root is not None:
|
|
+ cmd.extend(('-R', root))
|
|
+
|
|
+ cmd.extend((param, value, name))
|
|
+
|
|
+ ret = not __salt__['cmd.run'](cmd, python_shell=False)
|
|
+ if validate:
|
|
+ ret = info(name, root=root).get(key) == value
|
|
+ return ret
|
|
+
|
|
+
|
|
+def set_inactdays(name, inactdays, root=None):
|
|
'''
|
|
Set the number of days of inactivity after a password has expired before
|
|
the account is locked. See man chage.
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ inactdays
|
|
+ Set password inactive after this number of days
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.set_inactdays username 7
|
|
'''
|
|
- pre_info = info(name)
|
|
- if inactdays == pre_info['inact']:
|
|
- return True
|
|
- cmd = 'chage -I {0} {1}'.format(inactdays, name)
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- post_info = info(name)
|
|
- if post_info['inact'] != pre_info['inact']:
|
|
- return post_info['inact'] == inactdays
|
|
- return False
|
|
+ return _set_attrib(name, 'inact', inactdays, '-I', root=root)
|
|
|
|
|
|
-def set_maxdays(name, maxdays):
|
|
+def set_maxdays(name, maxdays, root=None):
|
|
'''
|
|
Set the maximum number of days during which a password is valid.
|
|
See man chage.
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ maxdays
|
|
+ Maximum number of days during which a password is valid
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.set_maxdays username 90
|
|
'''
|
|
- pre_info = info(name)
|
|
- if maxdays == pre_info['max']:
|
|
- return True
|
|
- cmd = 'chage -M {0} {1}'.format(maxdays, name)
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- post_info = info(name)
|
|
- if post_info['max'] != pre_info['max']:
|
|
- return post_info['max'] == maxdays
|
|
- return False
|
|
+ return _set_attrib(name, 'max', maxdays, '-M', root=root)
|
|
|
|
|
|
-def set_mindays(name, mindays):
|
|
+def set_mindays(name, mindays, root=None):
|
|
'''
|
|
Set the minimum number of days between password changes. See man chage.
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ mindays
|
|
+ Minimum number of days between password changes
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.set_mindays username 7
|
|
'''
|
|
- pre_info = info(name)
|
|
- if mindays == pre_info['min']:
|
|
- return True
|
|
- cmd = 'chage -m {0} {1}'.format(mindays, name)
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- post_info = info(name)
|
|
- if post_info['min'] != pre_info['min']:
|
|
- return post_info['min'] == mindays
|
|
- return False
|
|
+ return _set_attrib(name, 'min', mindays, '-m', root=root)
|
|
|
|
|
|
def gen_password(password, crypt_salt=None, algorithm='sha512'):
|
|
@@ -189,77 +232,107 @@ def gen_password(password, crypt_salt=None, algorithm='sha512'):
|
|
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm)
|
|
|
|
|
|
-def del_password(name):
|
|
+def del_password(name, root=None):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
Delete the password from name user
|
|
|
|
+ name
|
|
+ User to delete
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.del_password username
|
|
'''
|
|
- cmd = 'passwd -d {0}'.format(name)
|
|
+ cmd = ['passwd']
|
|
+ if root is not None:
|
|
+ cmd.extend(('-R', root))
|
|
+ cmd.extend(('-d', name))
|
|
+
|
|
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet')
|
|
- uinfo = info(name)
|
|
+ uinfo = info(name, root=root)
|
|
return not uinfo['passwd'] and uinfo['name'] == name
|
|
|
|
|
|
-def lock_password(name):
|
|
+def lock_password(name, root=None):
|
|
'''
|
|
.. versionadded:: 2016.11.0
|
|
|
|
Lock the password from specified user
|
|
|
|
+ name
|
|
+ User to lock
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.lock_password username
|
|
'''
|
|
- pre_info = info(name)
|
|
- if pre_info['name'] == '':
|
|
+ pre_info = info(name, root=root)
|
|
+ if not pre_info['name']:
|
|
return False
|
|
+
|
|
if pre_info['passwd'].startswith('!'):
|
|
return True
|
|
|
|
- cmd = 'passwd -l {0}'.format(name)
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
+ cmd = ['passwd']
|
|
|
|
- post_info = info(name)
|
|
+ if root is not None:
|
|
+ cmd.extend(('-R', root))
|
|
|
|
- return post_info['passwd'].startswith('!')
|
|
+ cmd.extend(('-l', name))
|
|
|
|
+ __salt__['cmd.run'](cmd, python_shell=False)
|
|
+ return info(name, root=root)['passwd'].startswith('!')
|
|
|
|
-def unlock_password(name):
|
|
+
|
|
+def unlock_password(name, root=None):
|
|
'''
|
|
.. versionadded:: 2016.11.0
|
|
|
|
Unlock the password from name user
|
|
|
|
+ name
|
|
+ User to unlock
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.unlock_password username
|
|
'''
|
|
- pre_info = info(name)
|
|
- if pre_info['name'] == '':
|
|
+ pre_info = info(name, root=root)
|
|
+ if not pre_info['name']:
|
|
return False
|
|
- if pre_info['passwd'][0] != '!':
|
|
+
|
|
+ if not pre_info['passwd'].startswith('!'):
|
|
return True
|
|
|
|
- cmd = 'passwd -u {0}'.format(name)
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
+ cmd = ['passwd']
|
|
|
|
- post_info = info(name)
|
|
+ if root is not None:
|
|
+ cmd.extend(('-R', root))
|
|
|
|
- return post_info['passwd'][0] != '!'
|
|
+ cmd.extend(('-u', name))
|
|
+
|
|
+ __salt__['cmd.run'](cmd, python_shell=False)
|
|
+ return not info(name, root=root)['passwd'].startswith('!')
|
|
|
|
|
|
-def set_password(name, password, use_usermod=False):
|
|
+def set_password(name, password, use_usermod=False, root=None):
|
|
'''
|
|
Set the password for a named user. The password must be a properly defined
|
|
hash. The password hash can be generated with this command:
|
|
@@ -273,6 +346,18 @@ def set_password(name, password, use_usermod=False):
|
|
Keep in mind that the $6 represents a sha512 hash, if your OS is using a
|
|
different hashing algorithm this needs to be changed accordingly
|
|
|
|
+ name
|
|
+ User to set the password
|
|
+
|
|
+ password
|
|
+ Password already hashed
|
|
+
|
|
+ use_usermod
|
|
+ Use usermod command to better compatibility
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -287,6 +372,9 @@ def set_password(name, password, use_usermod=False):
|
|
s_file = '/etc/tcb/{0}/shadow'.format(name)
|
|
else:
|
|
s_file = '/etc/shadow'
|
|
+ if root:
|
|
+ s_file = os.path.join(root, os.path.relpath(s_file, os.path.sep))
|
|
+
|
|
ret = {}
|
|
if not os.path.isfile(s_file):
|
|
return ret
|
|
@@ -306,54 +394,67 @@ def set_password(name, password, use_usermod=False):
|
|
with salt.utils.files.fopen(s_file, 'w+') as fp_:
|
|
lines = [salt.utils.stringutils.to_str(_l) for _l in lines]
|
|
fp_.writelines(lines)
|
|
- uinfo = info(name)
|
|
+ uinfo = info(name, root=root)
|
|
return uinfo['passwd'] == password
|
|
else:
|
|
# Use usermod -p (less secure, but more feature-complete)
|
|
- cmd = 'usermod -p {0} {1}'.format(password, name)
|
|
+ cmd = ['usermod']
|
|
+ if root is not None:
|
|
+ cmd.extend(('-R', root))
|
|
+ cmd.extend(('-p', password, name))
|
|
+
|
|
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet')
|
|
- uinfo = info(name)
|
|
+ uinfo = info(name, root=root)
|
|
return uinfo['passwd'] == password
|
|
|
|
|
|
-def set_warndays(name, warndays):
|
|
+def set_warndays(name, warndays, root=None):
|
|
'''
|
|
Set the number of days of warning before a password change is required.
|
|
See man chage.
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ warndays
|
|
+ Number of days of warning before a password change is required
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.set_warndays username 7
|
|
'''
|
|
- pre_info = info(name)
|
|
- if warndays == pre_info['warn']:
|
|
- return True
|
|
- cmd = 'chage -W {0} {1}'.format(warndays, name)
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- post_info = info(name)
|
|
- if post_info['warn'] != pre_info['warn']:
|
|
- return post_info['warn'] == warndays
|
|
- return False
|
|
+ return _set_attrib(name, 'warn', warndays, '-W', root=root)
|
|
|
|
|
|
-def set_date(name, date):
|
|
+def set_date(name, date, root=None):
|
|
'''
|
|
Sets the value for the date the password was last changed to days since the
|
|
epoch (January 1, 1970). See man chage.
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ date
|
|
+ Date the password was last changed
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.set_date username 0
|
|
'''
|
|
- cmd = ['chage', '-d', date, name]
|
|
- return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
|
|
+ return _set_attrib(name, 'lstchg', date, '-d', root=root, validate=False)
|
|
|
|
|
|
-def set_expire(name, expire):
|
|
+def set_expire(name, expire, root=None):
|
|
'''
|
|
.. versionchanged:: 2014.7.0
|
|
|
|
@@ -361,26 +462,77 @@ def set_expire(name, expire):
|
|
(January 1, 1970). Using a value of -1 will clear expiration. See man
|
|
chage.
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ date
|
|
+ Date the account expires
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.set_expire username -1
|
|
'''
|
|
- cmd = ['chage', '-E', expire, name]
|
|
- return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
|
|
+ return _set_attrib(name, 'expire', expire, '-E', root=root, validate=False)
|
|
|
|
|
|
-def list_users():
|
|
+def list_users(root=None):
|
|
'''
|
|
.. versionadded:: 2018.3.0
|
|
|
|
Return a list of all shadow users
|
|
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' shadow.list_users
|
|
'''
|
|
- return sorted([user.sp_nam for user in spwd.getspall()])
|
|
+ if root is not None:
|
|
+ getspall = functools.partial(_getspall, root=root)
|
|
+ else:
|
|
+ getspall = functools.partial(spwd.getspall)
|
|
+
|
|
+ return sorted([user.sp_namp if hasattr(user, 'sp_namp') else user.sp_nam
|
|
+ for user in getspall()])
|
|
+
|
|
+
|
|
+def _getspnam(name, root=None):
|
|
+ '''
|
|
+ Alternative implementation for getspnam, that use only /etc/shadow
|
|
+ '''
|
|
+ root = '/' if not root else root
|
|
+ passwd = os.path.join(root, 'etc/shadow')
|
|
+ with salt.utils.files.fopen(passwd) as fp_:
|
|
+ for line in fp_:
|
|
+ line = salt.utils.stringutils.to_unicode(line)
|
|
+ comps = line.strip().split(':')
|
|
+ if comps[0] == name:
|
|
+ # Generate a getspnam compatible output
|
|
+ for i in range(2, 9):
|
|
+ comps[i] = int(comps[i]) if comps[i] else -1
|
|
+ return spwd.struct_spwd(comps)
|
|
+ raise KeyError
|
|
+
|
|
+
|
|
+def _getspall(root=None):
|
|
+ '''
|
|
+ Alternative implementation for getspnam, that use only /etc/shadow
|
|
+ '''
|
|
+ root = '/' if not root else root
|
|
+ passwd = os.path.join(root, 'etc/shadow')
|
|
+ with salt.utils.files.fopen(passwd) as fp_:
|
|
+ for line in fp_:
|
|
+ line = salt.utils.stringutils.to_unicode(line)
|
|
+ comps = line.strip().split(':')
|
|
+ # Generate a getspall compatible output
|
|
+ for i in range(2, 9):
|
|
+ comps[i] = int(comps[i]) if comps[i] else -1
|
|
+ yield spwd.struct_spwd(comps)
|
|
diff --git a/salt/modules/solarisipspkg.py b/salt/modules/solarisipspkg.py
|
|
index b249bd1b3d..cb6f754a59 100644
|
|
--- a/salt/modules/solarisipspkg.py
|
|
+++ b/salt/modules/solarisipspkg.py
|
|
@@ -109,7 +109,7 @@ def _ips_get_pkgversion(line):
|
|
return line.split()[0].split('@')[1].strip()
|
|
|
|
|
|
-def refresh_db(full=False):
|
|
+def refresh_db(full=False, **kwargs):
|
|
'''
|
|
Updates the remote repos database.
|
|
|
|
@@ -133,7 +133,7 @@ def refresh_db(full=False):
|
|
return __salt__['cmd.retcode']('/bin/pkg refresh') == 0
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check if there is an upgrade available for a certain package
|
|
Accepts full or partial FMRI. Returns all matches found.
|
|
diff --git a/salt/modules/solarispkg.py b/salt/modules/solarispkg.py
|
|
index 2a828f6e9c..b28349a7d8 100644
|
|
--- a/salt/modules/solarispkg.py
|
|
+++ b/salt/modules/solarispkg.py
|
|
@@ -169,7 +169,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
diff --git a/salt/modules/systemd_service.py b/salt/modules/systemd_service.py
|
|
index fb349d30e6..c530d7c3b7 100644
|
|
--- a/salt/modules/systemd_service.py
|
|
+++ b/salt/modules/systemd_service.py
|
|
@@ -56,7 +56,7 @@ def __virtual__():
|
|
Only work on systems that have been booted with systemd
|
|
'''
|
|
if __grains__['kernel'] == 'Linux' \
|
|
- and salt.utils.systemd.booted(__context__):
|
|
+ and salt.utils.systemd.booted(__context__):
|
|
return __virtualname__
|
|
return (
|
|
False,
|
|
@@ -65,6 +65,16 @@ def __virtual__():
|
|
)
|
|
|
|
|
|
+def _root(path, root):
|
|
+ '''
|
|
+ Relocate an absolute path to a new root directory.
|
|
+ '''
|
|
+ if root:
|
|
+ return os.path.join(root, os.path.relpath(path, os.path.sep))
|
|
+ else:
|
|
+ return path
|
|
+
|
|
+
|
|
def _canonical_unit_name(name):
|
|
'''
|
|
Build a canonical unit name treating unit names without one
|
|
@@ -123,15 +133,15 @@ def _check_for_unit_changes(name):
|
|
__context__[contextkey] = True
|
|
|
|
|
|
-def _check_unmask(name, unmask, unmask_runtime):
|
|
+def _check_unmask(name, unmask, unmask_runtime, root=None):
|
|
'''
|
|
Common code for conditionally removing masks before making changes to a
|
|
service's state.
|
|
'''
|
|
if unmask:
|
|
- unmask_(name, runtime=False)
|
|
+ unmask_(name, runtime=False, root=root)
|
|
if unmask_runtime:
|
|
- unmask_(name, runtime=True)
|
|
+ unmask_(name, runtime=True, root=root)
|
|
|
|
|
|
def _clear_context():
|
|
@@ -193,15 +203,16 @@ def _default_runlevel():
|
|
return runlevel
|
|
|
|
|
|
-def _get_systemd_services():
|
|
+def _get_systemd_services(root):
|
|
'''
|
|
Use os.listdir() to get all the unit files
|
|
'''
|
|
ret = set()
|
|
for path in SYSTEM_CONFIG_PATHS + (LOCAL_CONFIG_PATH,):
|
|
- # Make sure user has access to the path, and if the path is a link
|
|
- # it's likely that another entry in SYSTEM_CONFIG_PATHS or LOCAL_CONFIG_PATH
|
|
- # points to it, so we can ignore it.
|
|
+ # Make sure user has access to the path, and if the path is a
|
|
+ # link it's likely that another entry in SYSTEM_CONFIG_PATHS
|
|
+ # or LOCAL_CONFIG_PATH points to it, so we can ignore it.
|
|
+ path = _root(path, root)
|
|
if os.access(path, os.R_OK) and not os.path.islink(path):
|
|
for fullname in os.listdir(path):
|
|
try:
|
|
@@ -213,19 +224,20 @@ def _get_systemd_services():
|
|
return ret
|
|
|
|
|
|
-def _get_sysv_services(systemd_services=None):
|
|
+def _get_sysv_services(root, systemd_services=None):
|
|
'''
|
|
Use os.listdir() and os.access() to get all the initscripts
|
|
'''
|
|
+ initscript_path = _root(INITSCRIPT_PATH, root)
|
|
try:
|
|
- sysv_services = os.listdir(INITSCRIPT_PATH)
|
|
+ sysv_services = os.listdir(initscript_path)
|
|
except OSError as exc:
|
|
if exc.errno == errno.ENOENT:
|
|
pass
|
|
elif exc.errno == errno.EACCES:
|
|
log.error(
|
|
'Unable to check sysvinit scripts, permission denied to %s',
|
|
- INITSCRIPT_PATH
|
|
+ initscript_path
|
|
)
|
|
else:
|
|
log.error(
|
|
@@ -236,11 +248,11 @@ def _get_sysv_services(systemd_services=None):
|
|
return []
|
|
|
|
if systemd_services is None:
|
|
- systemd_services = _get_systemd_services()
|
|
+ systemd_services = _get_systemd_services(root)
|
|
|
|
ret = []
|
|
for sysv_service in sysv_services:
|
|
- if os.access(os.path.join(INITSCRIPT_PATH, sysv_service), os.X_OK):
|
|
+ if os.access(os.path.join(initscript_path, sysv_service), os.X_OK):
|
|
if sysv_service in systemd_services:
|
|
log.debug(
|
|
'sysvinit script \'%s\' found, but systemd unit '
|
|
@@ -303,7 +315,8 @@ def _strip_scope(msg):
|
|
return '\n'.join(ret).strip()
|
|
|
|
|
|
-def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False):
|
|
+def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False,
|
|
+ root=None):
|
|
'''
|
|
Build a systemctl command line. Treat unit names without one
|
|
of the valid suffixes as a service.
|
|
@@ -316,6 +329,8 @@ def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False):
|
|
ret.append('systemctl')
|
|
if no_block:
|
|
ret.append('--no-block')
|
|
+ if root:
|
|
+ ret.extend(['--root', root])
|
|
if isinstance(action, six.string_types):
|
|
action = shlex.split(action)
|
|
ret.extend(action)
|
|
@@ -343,26 +358,27 @@ def _systemctl_status(name):
|
|
return __context__[contextkey]
|
|
|
|
|
|
-def _sysv_enabled(name):
|
|
+def _sysv_enabled(name, root):
|
|
'''
|
|
A System-V style service is assumed disabled if the "startup" symlink
|
|
(starts with "S") to its script is found in /etc/init.d in the current
|
|
runlevel.
|
|
'''
|
|
# Find exact match (disambiguate matches like "S01anacron" for cron)
|
|
- for match in glob.glob('/etc/rc%s.d/S*%s' % (_runlevel(), name)):
|
|
+ rc = _root('/etc/rc{}.d/S*{}'.format(_runlevel(), name), root)
|
|
+ for match in glob.glob(rc):
|
|
if re.match(r'S\d{,2}%s' % name, os.path.basename(match)):
|
|
return True
|
|
return False
|
|
|
|
|
|
-def _untracked_custom_unit_found(name):
|
|
+def _untracked_custom_unit_found(name, root=None):
|
|
'''
|
|
If the passed service name is not available, but a unit file exist in
|
|
/etc/systemd/system, return True. Otherwise, return False.
|
|
'''
|
|
- unit_path = os.path.join('/etc/systemd/system',
|
|
- _canonical_unit_name(name))
|
|
+ system = _root('/etc/systemd/system', root)
|
|
+ unit_path = os.path.join(system, _canonical_unit_name(name))
|
|
return os.access(unit_path, os.R_OK) and not _check_available(name)
|
|
|
|
|
|
@@ -371,7 +387,8 @@ def _unit_file_changed(name):
|
|
Returns True if systemctl reports that the unit file has changed, otherwise
|
|
returns False.
|
|
'''
|
|
- return "'systemctl daemon-reload'" in _systemctl_status(name)['stdout'].lower()
|
|
+ status = _systemctl_status(name)['stdout'].lower()
|
|
+ return "'systemctl daemon-reload'" in status
|
|
|
|
|
|
def systemctl_reload():
|
|
@@ -389,8 +406,7 @@ def systemctl_reload():
|
|
out = __salt__['cmd.run_all'](
|
|
_systemctl_cmd('--system daemon-reload'),
|
|
python_shell=False,
|
|
- redirect_stderr=True
|
|
- )
|
|
+ redirect_stderr=True)
|
|
if out['retcode'] != 0:
|
|
raise CommandExecutionError(
|
|
'Problem performing systemctl daemon-reload: %s' % out['stdout']
|
|
@@ -414,8 +430,7 @@ def get_running():
|
|
out = __salt__['cmd.run'](
|
|
_systemctl_cmd('--full --no-legend --no-pager'),
|
|
python_shell=False,
|
|
- ignore_retcode=True,
|
|
- )
|
|
+ ignore_retcode=True)
|
|
for line in salt.utils.itertools.split(out, '\n'):
|
|
try:
|
|
comps = line.strip().split()
|
|
@@ -438,10 +453,13 @@ def get_running():
|
|
return sorted(ret)
|
|
|
|
|
|
-def get_enabled():
|
|
+def get_enabled(root=None):
|
|
'''
|
|
Return a list of all enabled services
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -452,10 +470,10 @@ def get_enabled():
|
|
# Get enabled systemd units. Can't use --state=enabled here because it's
|
|
# not present until systemd 216.
|
|
out = __salt__['cmd.run'](
|
|
- _systemctl_cmd('--full --no-legend --no-pager list-unit-files'),
|
|
+ _systemctl_cmd('--full --no-legend --no-pager list-unit-files',
|
|
+ root=root),
|
|
python_shell=False,
|
|
- ignore_retcode=True,
|
|
- )
|
|
+ ignore_retcode=True)
|
|
for line in salt.utils.itertools.split(out, '\n'):
|
|
try:
|
|
fullname, unit_state = line.strip().split(None, 1)
|
|
@@ -473,15 +491,18 @@ def get_enabled():
|
|
|
|
# Add in any sysvinit services that are enabled
|
|
ret.update(set(
|
|
- [x for x in _get_sysv_services() if _sysv_enabled(x)]
|
|
+ [x for x in _get_sysv_services(root) if _sysv_enabled(x, root)]
|
|
))
|
|
return sorted(ret)
|
|
|
|
|
|
-def get_disabled():
|
|
+def get_disabled(root=None):
|
|
'''
|
|
Return a list of all disabled services
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -492,10 +513,10 @@ def get_disabled():
|
|
# Get disabled systemd units. Can't use --state=disabled here because it's
|
|
# not present until systemd 216.
|
|
out = __salt__['cmd.run'](
|
|
- _systemctl_cmd('--full --no-legend --no-pager list-unit-files'),
|
|
+ _systemctl_cmd('--full --no-legend --no-pager list-unit-files',
|
|
+ root=root),
|
|
python_shell=False,
|
|
- ignore_retcode=True,
|
|
- )
|
|
+ ignore_retcode=True)
|
|
for line in salt.utils.itertools.split(out, '\n'):
|
|
try:
|
|
fullname, unit_state = line.strip().split(None, 1)
|
|
@@ -513,17 +534,20 @@ def get_disabled():
|
|
|
|
# Add in any sysvinit services that are disabled
|
|
ret.update(set(
|
|
- [x for x in _get_sysv_services() if not _sysv_enabled(x)]
|
|
+ [x for x in _get_sysv_services(root) if not _sysv_enabled(x, root)]
|
|
))
|
|
return sorted(ret)
|
|
|
|
|
|
-def get_static():
|
|
+def get_static(root=None):
|
|
'''
|
|
.. versionadded:: 2015.8.5
|
|
|
|
Return a list of all static services
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -534,10 +558,10 @@ def get_static():
|
|
# Get static systemd units. Can't use --state=static here because it's
|
|
# not present until systemd 216.
|
|
out = __salt__['cmd.run'](
|
|
- _systemctl_cmd('--full --no-legend --no-pager list-unit-files'),
|
|
+ _systemctl_cmd('--full --no-legend --no-pager list-unit-files',
|
|
+ root=root),
|
|
python_shell=False,
|
|
- ignore_retcode=True,
|
|
- )
|
|
+ ignore_retcode=True)
|
|
for line in salt.utils.itertools.split(out, '\n'):
|
|
try:
|
|
fullname, unit_state = line.strip().split(None, 1)
|
|
@@ -557,18 +581,21 @@ def get_static():
|
|
return sorted(ret)
|
|
|
|
|
|
-def get_all():
|
|
+def get_all(root=None):
|
|
'''
|
|
Return a list of all available services
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' service.get_all
|
|
'''
|
|
- ret = _get_systemd_services()
|
|
- ret.update(set(_get_sysv_services(systemd_services=ret)))
|
|
+ ret = _get_systemd_services(root)
|
|
+ ret.update(set(_get_sysv_services(root, systemd_services=ret)))
|
|
return sorted(ret)
|
|
|
|
|
|
@@ -606,7 +633,7 @@ def missing(name):
|
|
return not available(name)
|
|
|
|
|
|
-def unmask_(name, runtime=False):
|
|
+def unmask_(name, runtime=False, root=None):
|
|
'''
|
|
.. versionadded:: 2015.5.0
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
@@ -633,6 +660,9 @@ def unmask_(name, runtime=False):
|
|
removes a runtime mask only when this argument is set to ``True``,
|
|
and otherwise removes an indefinite mask.
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -641,15 +671,16 @@ def unmask_(name, runtime=False):
|
|
salt '*' service.unmask foo runtime=True
|
|
'''
|
|
_check_for_unit_changes(name)
|
|
- if not masked(name, runtime):
|
|
+ if not masked(name, runtime, root=root):
|
|
log.debug('Service \'%s\' is not %smasked',
|
|
name, 'runtime-' if runtime else '')
|
|
return True
|
|
|
|
cmd = 'unmask --runtime' if runtime else 'unmask'
|
|
- out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name, systemd_scope=True),
|
|
- python_shell=False,
|
|
- redirect_stderr=True)
|
|
+ out = __salt__['cmd.run_all'](
|
|
+ _systemctl_cmd(cmd, name, systemd_scope=True, root=root),
|
|
+ python_shell=False,
|
|
+ redirect_stderr=True)
|
|
|
|
if out['retcode'] != 0:
|
|
raise CommandExecutionError('Failed to unmask service \'%s\'' % name)
|
|
@@ -657,7 +688,7 @@ def unmask_(name, runtime=False):
|
|
return True
|
|
|
|
|
|
-def mask(name, runtime=False):
|
|
+def mask(name, runtime=False, root=None):
|
|
'''
|
|
.. versionadded:: 2015.5.0
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
@@ -678,6 +709,9 @@ def mask(name, runtime=False):
|
|
|
|
.. versionadded:: 2015.8.5
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -688,9 +722,10 @@ def mask(name, runtime=False):
|
|
_check_for_unit_changes(name)
|
|
|
|
cmd = 'mask --runtime' if runtime else 'mask'
|
|
- out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name, systemd_scope=True),
|
|
- python_shell=False,
|
|
- redirect_stderr=True)
|
|
+ out = __salt__['cmd.run_all'](
|
|
+ _systemctl_cmd(cmd, name, systemd_scope=True, root=root),
|
|
+ python_shell=False,
|
|
+ redirect_stderr=True)
|
|
|
|
if out['retcode'] != 0:
|
|
raise CommandExecutionError(
|
|
@@ -701,7 +736,7 @@ def mask(name, runtime=False):
|
|
return True
|
|
|
|
|
|
-def masked(name, runtime=False):
|
|
+def masked(name, runtime=False, root=None):
|
|
'''
|
|
.. versionadded:: 2015.8.0
|
|
.. versionchanged:: 2015.8.5
|
|
@@ -731,6 +766,9 @@ def masked(name, runtime=False):
|
|
only checks for runtime masks if this argument is set to ``True``.
|
|
Otherwise, it will check for an indefinite mask.
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -739,7 +777,7 @@ def masked(name, runtime=False):
|
|
salt '*' service.masked foo runtime=True
|
|
'''
|
|
_check_for_unit_changes(name)
|
|
- root_dir = '/run' if runtime else '/etc'
|
|
+ root_dir = _root('/run' if runtime else '/etc', root)
|
|
link_path = os.path.join(root_dir,
|
|
'systemd',
|
|
'system',
|
|
@@ -1055,9 +1093,10 @@ def status(name, sig=None): # pylint: disable=unused-argument
|
|
results = {}
|
|
for service in services:
|
|
_check_for_unit_changes(service)
|
|
- results[service] = __salt__['cmd.retcode'](_systemctl_cmd('is-active', service),
|
|
- python_shell=False,
|
|
- ignore_retcode=True) == 0
|
|
+ results[service] = __salt__['cmd.retcode'](
|
|
+ _systemctl_cmd('is-active', service),
|
|
+ python_shell=False,
|
|
+ ignore_retcode=True) == 0
|
|
if contains_globbing:
|
|
return results
|
|
return results[name]
|
|
@@ -1065,7 +1104,8 @@ def status(name, sig=None): # pylint: disable=unused-argument
|
|
|
|
# **kwargs is required to maintain consistency with the API established by
|
|
# Salt's service management states.
|
|
-def enable(name, no_block=False, unmask=False, unmask_runtime=False, **kwargs): # pylint: disable=unused-argument
|
|
+def enable(name, no_block=False, unmask=False, unmask_runtime=False,
|
|
+ root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -1101,6 +1141,9 @@ def enable(name, no_block=False, unmask=False, unmask_runtime=False, **kwargs):
|
|
In previous releases, Salt would simply unmask a service before
|
|
enabling. This behavior is no longer the default.
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1108,8 +1151,8 @@ def enable(name, no_block=False, unmask=False, unmask_runtime=False, **kwargs):
|
|
salt '*' service.enable <service name>
|
|
'''
|
|
_check_for_unit_changes(name)
|
|
- _check_unmask(name, unmask, unmask_runtime)
|
|
- if name in _get_sysv_services():
|
|
+ _check_unmask(name, unmask, unmask_runtime, root)
|
|
+ if name in _get_sysv_services(root):
|
|
cmd = []
|
|
if salt.utils.systemd.has_scope(__context__) \
|
|
and __salt__['config.get']('systemd.scope', True):
|
|
@@ -1123,7 +1166,8 @@ def enable(name, no_block=False, unmask=False, unmask_runtime=False, **kwargs):
|
|
python_shell=False,
|
|
ignore_retcode=True) == 0
|
|
ret = __salt__['cmd.run_all'](
|
|
- _systemctl_cmd('enable', name, systemd_scope=True, no_block=no_block),
|
|
+ _systemctl_cmd('enable', name, systemd_scope=True, no_block=no_block,
|
|
+ root=root),
|
|
python_shell=False,
|
|
ignore_retcode=True)
|
|
|
|
@@ -1137,7 +1181,7 @@ def enable(name, no_block=False, unmask=False, unmask_runtime=False, **kwargs):
|
|
|
|
# The unused kwargs argument is required to maintain consistency with the API
|
|
# established by Salt's service management states.
|
|
-def disable(name, no_block=False, **kwargs): # pylint: disable=unused-argument
|
|
+def disable(name, no_block=False, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -1157,6 +1201,9 @@ def disable(name, no_block=False, **kwargs): # pylint: disable=unused-argument
|
|
|
|
.. versionadded:: 2017.7.0
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1164,7 +1211,7 @@ def disable(name, no_block=False, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' service.disable <service name>
|
|
'''
|
|
_check_for_unit_changes(name)
|
|
- if name in _get_sysv_services():
|
|
+ if name in _get_sysv_services(root):
|
|
cmd = []
|
|
if salt.utils.systemd.has_scope(__context__) \
|
|
and __salt__['config.get']('systemd.scope', True):
|
|
@@ -1179,17 +1226,21 @@ def disable(name, no_block=False, **kwargs): # pylint: disable=unused-argument
|
|
ignore_retcode=True) == 0
|
|
# Using cmd.run_all instead of cmd.retcode here to make unit tests easier
|
|
return __salt__['cmd.run_all'](
|
|
- _systemctl_cmd('disable', name, systemd_scope=True, no_block=no_block),
|
|
+ _systemctl_cmd('disable', name, systemd_scope=True, no_block=no_block,
|
|
+ root=root),
|
|
python_shell=False,
|
|
ignore_retcode=True)['retcode'] == 0
|
|
|
|
|
|
# The unused kwargs argument is required to maintain consistency with the API
|
|
# established by Salt's service management states.
|
|
-def enabled(name, **kwargs): # pylint: disable=unused-argument
|
|
+def enabled(name, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
Return if the named service is enabled to start on boot
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1199,7 +1250,7 @@ def enabled(name, **kwargs): # pylint: disable=unused-argument
|
|
# Try 'systemctl is-enabled' first, then look for a symlink created by
|
|
# systemctl (older systemd releases did not support using is-enabled to
|
|
# check templated services), and lastly check for a sysvinit service.
|
|
- if __salt__['cmd.retcode'](_systemctl_cmd('is-enabled', name),
|
|
+ if __salt__['cmd.retcode'](_systemctl_cmd('is-enabled', name, root=root),
|
|
python_shell=False,
|
|
ignore_retcode=True) == 0:
|
|
return True
|
|
@@ -1207,43 +1258,50 @@ def enabled(name, **kwargs): # pylint: disable=unused-argument
|
|
# On older systemd releases, templated services could not be checked
|
|
# with ``systemctl is-enabled``. As a fallback, look for the symlinks
|
|
# created by systemctl when enabling templated services.
|
|
- cmd = ['find', LOCAL_CONFIG_PATH, '-name', name,
|
|
+ local_config_path = _root(LOCAL_CONFIG_PATH, '/')
|
|
+ cmd = ['find', local_config_path, '-name', name,
|
|
'-type', 'l', '-print', '-quit']
|
|
# If the find command returns any matches, there will be output and the
|
|
# string will be non-empty.
|
|
if bool(__salt__['cmd.run'](cmd, python_shell=False)):
|
|
return True
|
|
- elif name in _get_sysv_services():
|
|
- return _sysv_enabled(name)
|
|
+ elif name in _get_sysv_services(root):
|
|
+ return _sysv_enabled(name, root)
|
|
|
|
return False
|
|
|
|
|
|
-def disabled(name):
|
|
+def disabled(name, root=None):
|
|
'''
|
|
Return if the named service is disabled from starting on boot
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' service.disabled <service name>
|
|
'''
|
|
- return not enabled(name)
|
|
+ return not enabled(name, root=root)
|
|
|
|
|
|
-def show(name):
|
|
+def show(name, root=None):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
Show properties of one or more units/jobs or the manager
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
salt '*' service.show <service name>
|
|
'''
|
|
ret = {}
|
|
- out = __salt__['cmd.run'](_systemctl_cmd('show', name),
|
|
+ out = __salt__['cmd.run'](_systemctl_cmd('show', name, root=root),
|
|
python_shell=False)
|
|
for line in salt.utils.itertools.split(out, '\n'):
|
|
comps = line.split('=')
|
|
@@ -1263,20 +1321,78 @@ def show(name):
|
|
return ret
|
|
|
|
|
|
-def execs():
|
|
+def execs(root=None):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
Return a list of all files specified as ``ExecStart`` for all services.
|
|
|
|
+ root
|
|
+ Enable/disable/mask unit files in the specified root directory
|
|
+
|
|
CLI Example:
|
|
|
|
salt '*' service.execs
|
|
'''
|
|
ret = {}
|
|
- for service in get_all():
|
|
- data = show(service)
|
|
+ for service in get_all(root=root):
|
|
+ data = show(service, root=root)
|
|
if 'ExecStart' not in data:
|
|
continue
|
|
ret[service] = data['ExecStart']['path']
|
|
return ret
|
|
+
|
|
+
|
|
+def firstboot(locale=None, locale_message=None, keymap=None,
|
|
+ timezone=None, hostname=None, machine_id=None,
|
|
+ root=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Call systemd-firstboot to configure basic settings of the system
|
|
+
|
|
+ locale
|
|
+ Set primary locale (LANG=)
|
|
+
|
|
+ locale_message
|
|
+ Set message locale (LC_MESSAGES=)
|
|
+
|
|
+ keymap
|
|
+ Set keymap
|
|
+
|
|
+ timezone
|
|
+ Set timezone
|
|
+
|
|
+ hostname
|
|
+ Set host name
|
|
+
|
|
+ machine_id
|
|
+ Set machine ID
|
|
+
|
|
+ root
|
|
+ Operate on an alternative filesystem root
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ salt '*' service.firstboot keymap=jp locale=en_US.UTF-8
|
|
+
|
|
+ '''
|
|
+ cmd = ['systemd-firstboot']
|
|
+ parameters = [('locale', locale),
|
|
+ ('locale-message', locale_message),
|
|
+ ('keymap', keymap),
|
|
+ ('timezone', timezone),
|
|
+ ('hostname', hostname),
|
|
+ ('machine-ID', machine_id),
|
|
+ ('root', root)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ out = __salt__['cmd.run_all'](cmd)
|
|
+
|
|
+ if out['retcode'] != 0:
|
|
+ raise CommandExecutionError(
|
|
+ 'systemd-firstboot error: {}'.format(out['stderr']))
|
|
+
|
|
+ return True
|
|
diff --git a/salt/modules/useradd.py b/salt/modules/useradd.py
|
|
index e370dd4bb3..e38a094ed2 100644
|
|
--- a/salt/modules/useradd.py
|
|
+++ b/salt/modules/useradd.py
|
|
@@ -17,6 +17,8 @@ except ImportError:
|
|
HAS_PWD = False
|
|
import logging
|
|
import copy
|
|
+import functools
|
|
+import os
|
|
|
|
# Import salt libs
|
|
import salt.utils.data
|
|
@@ -55,12 +57,17 @@ def _quote_username(name):
|
|
return salt.utils.stringutils.to_str(name)
|
|
|
|
|
|
-def _get_gecos(name):
|
|
+def _get_gecos(name, root=None):
|
|
'''
|
|
Retrieve GECOS field info and return it in dictionary form
|
|
'''
|
|
+ if root is not None and __grains__['kernel'] != 'AIX':
|
|
+ getpwnam = functools.partial(_getpwnam, root=root)
|
|
+ else:
|
|
+ getpwnam = functools.partial(pwd.getpwnam)
|
|
gecos_field = salt.utils.stringutils.to_unicode(
|
|
- pwd.getpwnam(_quote_username(name)).pw_gecos).split(',', 4)
|
|
+ getpwnam(_quote_username(name)).pw_gecos).split(',', 4)
|
|
+
|
|
if not gecos_field:
|
|
return {}
|
|
else:
|
|
@@ -96,7 +103,7 @@ def _update_gecos(name, key, value, root=None):
|
|
value = six.text_type(value)
|
|
else:
|
|
value = salt.utils.stringutils.to_unicode(value)
|
|
- pre_info = _get_gecos(name)
|
|
+ pre_info = _get_gecos(name, root=root)
|
|
if not pre_info:
|
|
return False
|
|
if value == pre_info[key]:
|
|
@@ -104,14 +111,13 @@ def _update_gecos(name, key, value, root=None):
|
|
gecos_data = copy.deepcopy(pre_info)
|
|
gecos_data[key] = value
|
|
|
|
- cmd = ['usermod', '-c', _build_gecos(gecos_data), name]
|
|
-
|
|
+ cmd = ['usermod']
|
|
if root is not None and __grains__['kernel'] != 'AIX':
|
|
cmd.extend(('-R', root))
|
|
+ cmd.extend(('-c', _build_gecos(gecos_data), name))
|
|
|
|
__salt__['cmd.run'](cmd, python_shell=False)
|
|
- post_info = info(name)
|
|
- return _get_gecos(name).get(key) == value
|
|
+ return _get_gecos(name, root=root).get(key) == value
|
|
|
|
|
|
def add(name,
|
|
@@ -129,11 +135,62 @@ def add(name,
|
|
other='',
|
|
createhome=True,
|
|
loginclass=None,
|
|
- root=None,
|
|
- nologinit=False):
|
|
+ nologinit=False,
|
|
+ root=None):
|
|
'''
|
|
Add a user to the minion
|
|
|
|
+ name
|
|
+ Username LOGIN to add
|
|
+
|
|
+ uid
|
|
+ User ID of the new account
|
|
+
|
|
+ gid
|
|
+ Name or ID of the primary group of the new accoun
|
|
+
|
|
+ groups
|
|
+ List of supplementary groups of the new account
|
|
+
|
|
+ home
|
|
+ Home directory of the new account
|
|
+
|
|
+ shell
|
|
+ Login shell of the new account
|
|
+
|
|
+ unique
|
|
+ Allow to create users with duplicate
|
|
+
|
|
+ system
|
|
+ Create a system account
|
|
+
|
|
+ fullname
|
|
+ GECOS field for the full name
|
|
+
|
|
+ roomnumber
|
|
+ GECOS field for the room number
|
|
+
|
|
+ workphone
|
|
+ GECOS field for the work phone
|
|
+
|
|
+ homephone
|
|
+ GECOS field for the home phone
|
|
+
|
|
+ other
|
|
+ GECOS field for other information
|
|
+
|
|
+ createhome
|
|
+ Create the user's home directory
|
|
+
|
|
+ loginclass
|
|
+ Login class for the new account (OpenBSD)
|
|
+
|
|
+ nologinit
|
|
+ Do not add the user to the lastlog and faillog databases
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -231,17 +288,17 @@ def add(name,
|
|
# user does exist, and B) running useradd again would result in a
|
|
# nonzero exit status and be interpreted as a False result.
|
|
if groups:
|
|
- chgroups(name, groups)
|
|
+ chgroups(name, groups, root=root)
|
|
if fullname:
|
|
- chfullname(name, fullname)
|
|
+ chfullname(name, fullname, root=root)
|
|
if roomnumber:
|
|
- chroomnumber(name, roomnumber)
|
|
+ chroomnumber(name, roomnumber, root=root)
|
|
if workphone:
|
|
- chworkphone(name, workphone)
|
|
+ chworkphone(name, workphone, root=root)
|
|
if homephone:
|
|
- chhomephone(name, homephone)
|
|
+ chhomephone(name, homephone, root=root)
|
|
if other:
|
|
- chother(name, other)
|
|
+ chother(name, other, root=root)
|
|
return True
|
|
|
|
|
|
@@ -249,6 +306,18 @@ def delete(name, remove=False, force=False, root=None):
|
|
'''
|
|
Remove a user from the minion
|
|
|
|
+ name
|
|
+ Username to delete
|
|
+
|
|
+ remove
|
|
+ Remove home directory and mail spool
|
|
+
|
|
+ force
|
|
+ Force some actions that would fail otherwise
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -292,10 +361,16 @@ def delete(name, remove=False, force=False, root=None):
|
|
return False
|
|
|
|
|
|
-def getent(refresh=False):
|
|
+def getent(refresh=False, root=None):
|
|
'''
|
|
Return the list of all info for all users
|
|
|
|
+ refresh
|
|
+ Force a refresh of user information
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -306,72 +381,106 @@ def getent(refresh=False):
|
|
return __context__['user.getent']
|
|
|
|
ret = []
|
|
- for data in pwd.getpwall():
|
|
+ if root is not None and __grains__['kernel'] != 'AIX':
|
|
+ getpwall = functools.partial(_getpwall, root=root)
|
|
+ else:
|
|
+ getpwall = functools.partial(pwd.getpwall)
|
|
+
|
|
+ for data in getpwall():
|
|
ret.append(_format_info(data))
|
|
__context__['user.getent'] = ret
|
|
return ret
|
|
|
|
|
|
-def chuid(name, uid):
|
|
+def _chattrib(name, key, value, param, persist=False, root=None):
|
|
+ '''
|
|
+ Change an attribute for a named user
|
|
+ '''
|
|
+ pre_info = info(name, root=root)
|
|
+ if not pre_info:
|
|
+ raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
|
|
+
|
|
+ if value == pre_info[key]:
|
|
+ return True
|
|
+
|
|
+ cmd = ['usermod']
|
|
+
|
|
+ if root is not None and __grains__['kernel'] != 'AIX':
|
|
+ cmd.extend(('-R', root))
|
|
+
|
|
+ if persist and __grains__['kernel'] != 'OpenBSD':
|
|
+ cmd.append('-m')
|
|
+
|
|
+ cmd.extend((param, value, name))
|
|
+
|
|
+ __salt__['cmd.run'](cmd, python_shell=False)
|
|
+ return info(name, root=root).get(key) == value
|
|
+
|
|
+
|
|
+def chuid(name, uid, root=None):
|
|
'''
|
|
Change the uid for a named user
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ uid
|
|
+ New UID for the user account
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chuid foo 4376
|
|
'''
|
|
- pre_info = info(name)
|
|
- if uid == pre_info['uid']:
|
|
- return True
|
|
- cmd = ['usermod', '-u', uid, name]
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- return info(name).get('uid') == uid
|
|
+ return _chattrib(name, 'uid', uid, '-u', root=root)
|
|
|
|
|
|
def chgid(name, gid, root=None):
|
|
'''
|
|
Change the default group of the user
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ gid
|
|
+ Force use GID as new primary group
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chgid foo 4376
|
|
'''
|
|
- pre_info = info(name)
|
|
- if gid == pre_info['gid']:
|
|
- return True
|
|
- cmd = ['usermod', '-g', gid, name]
|
|
-
|
|
- if root is not None and __grains__['kernel'] != 'AIX':
|
|
- cmd.extend(('-R', root))
|
|
-
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- return info(name).get('gid') == gid
|
|
+ return _chattrib(name, 'gid', gid, '-g', root=root)
|
|
|
|
|
|
def chshell(name, shell, root=None):
|
|
'''
|
|
Change the default shell of the user
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ shell
|
|
+ New login shell for the user account
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chshell foo /bin/zsh
|
|
'''
|
|
- pre_info = info(name)
|
|
- if shell == pre_info['shell']:
|
|
- return True
|
|
- cmd = ['usermod', '-s', shell, name]
|
|
-
|
|
- if root is not None and __grains__['kernel'] != 'AIX':
|
|
- cmd.extend(('-R', root))
|
|
-
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- return info(name).get('shell') == shell
|
|
+ return _chattrib(name, 'shell', shell, '-s', root=root)
|
|
|
|
|
|
def chhome(name, home, persist=False, root=None):
|
|
@@ -379,25 +488,25 @@ def chhome(name, home, persist=False, root=None):
|
|
Change the home directory of the user, pass True for persist to move files
|
|
to the new home directory if the old home directory exist.
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ home
|
|
+ New home directory for the user account
|
|
+
|
|
+ presist
|
|
+ Move contents of the home directory to the new location
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chhome foo /home/users/foo True
|
|
'''
|
|
- pre_info = info(name)
|
|
- if home == pre_info['home']:
|
|
- return True
|
|
- cmd = ['usermod', '-d', home]
|
|
-
|
|
- if root is not None and __grains__['kernel'] != 'AIX':
|
|
- cmd.extend(('-R', root))
|
|
-
|
|
- if persist and __grains__['kernel'] != 'OpenBSD':
|
|
- cmd.append('-m')
|
|
- cmd.append(name)
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- return info(name).get('home') == home
|
|
+ return _chattrib(name, 'home', home, '-d', persist=persist, root=root)
|
|
|
|
|
|
def chgroups(name, groups, append=False, root=None):
|
|
@@ -414,6 +523,9 @@ def chgroups(name, groups, append=False, root=None):
|
|
If ``True``, append the specified group(s). Otherwise, this function
|
|
will replace the user's groups with the specified group(s).
|
|
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -460,20 +572,29 @@ def chgroups(name, groups, append=False, root=None):
|
|
return result['retcode'] == 0
|
|
|
|
|
|
-def chfullname(name, fullname):
|
|
+def chfullname(name, fullname, root=None):
|
|
'''
|
|
Change the user's Full Name
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ fullname
|
|
+ GECOS field for the full name
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chfullname foo "Foo Bar"
|
|
'''
|
|
- return _update_gecos(name, 'fullname', fullname)
|
|
+ return _update_gecos(name, 'fullname', fullname, root=root)
|
|
|
|
|
|
-def chroomnumber(name, roomnumber):
|
|
+def chroomnumber(name, roomnumber, root=None):
|
|
'''
|
|
Change the user's Room Number
|
|
|
|
@@ -483,52 +604,88 @@ def chroomnumber(name, roomnumber):
|
|
|
|
salt '*' user.chroomnumber foo 123
|
|
'''
|
|
- return _update_gecos(name, 'roomnumber', roomnumber)
|
|
+ return _update_gecos(name, 'roomnumber', roomnumber, root=root)
|
|
|
|
|
|
-def chworkphone(name, workphone):
|
|
+def chworkphone(name, workphone, root=None):
|
|
'''
|
|
Change the user's Work Phone
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ workphone
|
|
+ GECOS field for the work phone
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chworkphone foo 7735550123
|
|
'''
|
|
- return _update_gecos(name, 'workphone', workphone)
|
|
+ return _update_gecos(name, 'workphone', workphone, root=root)
|
|
|
|
|
|
-def chhomephone(name, homephone):
|
|
+def chhomephone(name, homephone, root=None):
|
|
'''
|
|
Change the user's Home Phone
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ homephone
|
|
+ GECOS field for the home phone
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chhomephone foo 7735551234
|
|
'''
|
|
- return _update_gecos(name, 'homephone', homephone)
|
|
+ return _update_gecos(name, 'homephone', homephone, root=root)
|
|
|
|
|
|
-def chother(name, other):
|
|
+def chother(name, other, root=None):
|
|
'''
|
|
Change the user's other GECOS attribute
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ other
|
|
+ GECOS field for other information
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.chother foobar
|
|
'''
|
|
- return _update_gecos(name, 'other', other)
|
|
+ return _update_gecos(name, 'other', other, root=root)
|
|
|
|
|
|
def chloginclass(name, loginclass, root=None):
|
|
'''
|
|
Change the default login class of the user
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ loginclass
|
|
+ Login class for the new account
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
.. note::
|
|
This function only applies to OpenBSD systems.
|
|
|
|
@@ -546,25 +703,43 @@ def chloginclass(name, loginclass, root=None):
|
|
|
|
cmd = ['usermod', '-L', loginclass, name]
|
|
|
|
- if root is not None:
|
|
+ if root is not None and __grains__['kernel'] != 'AIX':
|
|
cmd.extend(('-R', root))
|
|
|
|
__salt__['cmd.run'](cmd, python_shell=False)
|
|
return get_loginclass(name) == loginclass
|
|
|
|
|
|
-def info(name):
|
|
+def info(name, root=None):
|
|
'''
|
|
Return user information
|
|
|
|
+ name
|
|
+ User to get the information
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.info root
|
|
'''
|
|
+ # If root is provided, we use a less portable solution that
|
|
+ # depends on analyzing /etc/passwd manually. Of course we cannot
|
|
+ # find users from NIS nor LDAP, but in those cases do not makes
|
|
+ # sense to provide a root parameter.
|
|
+ #
|
|
+ # Please, note that if the non-root /etc/passwd file is long the
|
|
+ # iteration can be slow.
|
|
+ if root is not None and __grains__['kernel'] != 'AIX':
|
|
+ getpwnam = functools.partial(_getpwnam, root=root)
|
|
+ else:
|
|
+ getpwnam = functools.partial(pwd.getpwnam)
|
|
+
|
|
try:
|
|
- data = pwd.getpwnam(_quote_username(name))
|
|
+ data = getpwnam(_quote_username(name))
|
|
except KeyError:
|
|
return {}
|
|
else:
|
|
@@ -575,6 +750,9 @@ def get_loginclass(name):
|
|
'''
|
|
Get the login class of the user
|
|
|
|
+ name
|
|
+ User to get the information
|
|
+
|
|
.. note::
|
|
This function only applies to OpenBSD systems.
|
|
|
|
@@ -632,6 +810,9 @@ def primary_group(name):
|
|
|
|
.. versionadded:: 2016.3.0
|
|
|
|
+ name
|
|
+ User to get the information
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -645,6 +826,9 @@ def list_groups(name):
|
|
'''
|
|
Return a list of groups the named user belongs to
|
|
|
|
+ name
|
|
+ User to get the information
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -654,43 +838,79 @@ def list_groups(name):
|
|
return salt.utils.user.get_group_list(name)
|
|
|
|
|
|
-def list_users():
|
|
+def list_users(root=None):
|
|
'''
|
|
Return a list of all users
|
|
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.list_users
|
|
'''
|
|
- return sorted([user.pw_name for user in pwd.getpwall()])
|
|
+ if root is not None and __grains__['kernel'] != 'AIX':
|
|
+ getpwall = functools.partial(_getpwall, root=root)
|
|
+ else:
|
|
+ getpwall = functools.partial(pwd.getpwall)
|
|
+
|
|
+ return sorted([user.pw_name for user in getpwall()])
|
|
|
|
|
|
def rename(name, new_name, root=None):
|
|
'''
|
|
Change the username for a named user
|
|
|
|
+ name
|
|
+ User to modify
|
|
+
|
|
+ new_name
|
|
+ New value of the login name
|
|
+
|
|
+ root
|
|
+ Directory to chroot into
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' user.rename name new_name
|
|
'''
|
|
- current_info = info(name)
|
|
- if not current_info:
|
|
- raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
|
|
+ if info(new_name, root=root):
|
|
+ raise CommandExecutionError('User \'{0}\' already exists'.format(new_name))
|
|
|
|
- new_info = info(new_name)
|
|
- if new_info:
|
|
- raise CommandExecutionError(
|
|
- 'User \'{0}\' already exists'.format(new_name)
|
|
- )
|
|
+ return _chattrib(name, 'name', new_name, '-l', root=root)
|
|
|
|
- cmd = ['usermod', '-l', new_name, name]
|
|
|
|
- if root is not None and __grains__['kernel'] != 'AIX':
|
|
- cmd.extend(('-R', root))
|
|
+def _getpwnam(name, root=None):
|
|
+ '''
|
|
+ Alternative implementation for getpwnam, that use only /etc/passwd
|
|
+ '''
|
|
+ root = '/' if not root else root
|
|
+ passwd = os.path.join(root, 'etc/passwd')
|
|
+ with salt.utils.files.fopen(passwd) as fp_:
|
|
+ for line in fp_:
|
|
+ line = salt.utils.stringutils.to_unicode(line)
|
|
+ comps = line.strip().split(':')
|
|
+ if comps[0] == name:
|
|
+ # Generate a getpwnam compatible output
|
|
+ comps[2], comps[3] = int(comps[2]), int(comps[3])
|
|
+ return pwd.struct_passwd(comps)
|
|
+ raise KeyError
|
|
|
|
- __salt__['cmd.run'](cmd, python_shell=False)
|
|
- return info(name).get('name') == new_name
|
|
+
|
|
+def _getpwall(root=None):
|
|
+ '''
|
|
+ Alternative implemetantion for getpwall, that use only /etc/passwd
|
|
+ '''
|
|
+ root = '/' if not root else root
|
|
+ passwd = os.path.join(root, 'etc/passwd')
|
|
+ with salt.utils.files.fopen(passwd) as fp_:
|
|
+ for line in fp_:
|
|
+ line = salt.utils.stringutils.to_unicode(line)
|
|
+ comps = line.strip().split(':')
|
|
+ # Generate a getpwall compatible output
|
|
+ comps[2], comps[3] = int(comps[2]), int(comps[3])
|
|
+ yield pwd.struct_passwd(comps)
|
|
diff --git a/salt/modules/xbpspkg.py b/salt/modules/xbpspkg.py
|
|
index e493f8c80f..b5d7d8a477 100644
|
|
--- a/salt/modules/xbpspkg.py
|
|
+++ b/salt/modules/xbpspkg.py
|
|
@@ -121,7 +121,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
return ret
|
|
|
|
|
|
-def list_upgrades(refresh=True):
|
|
+def list_upgrades(refresh=True, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for all packages
|
|
|
|
@@ -247,7 +247,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = latest_version
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -260,7 +260,7 @@ def upgrade_available(name):
|
|
return latest_version(name) != ''
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update list of available packages from installed repos
|
|
|
|
@@ -300,7 +300,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def upgrade(refresh=True):
|
|
+def upgrade(refresh=True, **kwargs):
|
|
'''
|
|
Run a full system upgrade
|
|
|
|
@@ -484,7 +484,7 @@ def remove(name=None, pkgs=None, recursive=True, **kwargs):
|
|
return salt.utils.data.compare_dicts(old, new)
|
|
|
|
|
|
-def list_repos():
|
|
+def list_repos(**kwargs):
|
|
'''
|
|
List all repos known by XBPS
|
|
|
|
@@ -607,7 +607,7 @@ def add_repo(repo, conffile='/usr/share/xbps.d/15-saltstack.conf'):
|
|
return True
|
|
|
|
|
|
-def del_repo(repo):
|
|
+def del_repo(repo, **kwargs):
|
|
'''
|
|
Remove an XBPS repository from the system.
|
|
|
|
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
|
|
index 3a4fe47a45..1c5327c700 100644
|
|
--- a/salt/modules/yumpkg.py
|
|
+++ b/salt/modules/yumpkg.py
|
|
@@ -619,7 +619,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
|
+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
.. versionadded:: 2015.5.4
|
|
|
|
@@ -1012,7 +1012,7 @@ def list_upgrades(refresh=True, **kwargs):
|
|
list_updates = salt.utils.functools.alias_function(list_upgrades, 'list_updates')
|
|
|
|
|
|
-def list_downloaded():
|
|
+def list_downloaded(**kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
@@ -1948,13 +1948,13 @@ def upgrade(name=None,
|
|
|
|
|
|
def update(name=None,
|
|
- pkgs=None,
|
|
- refresh=True,
|
|
- skip_verify=False,
|
|
- normalize=True,
|
|
- minimal=False,
|
|
- obsoletes=False,
|
|
- **kwargs):
|
|
+ pkgs=None,
|
|
+ refresh=True,
|
|
+ skip_verify=False,
|
|
+ normalize=True,
|
|
+ minimal=False,
|
|
+ obsoletes=False,
|
|
+ **kwargs):
|
|
'''
|
|
.. versionadded:: 2019.2.0
|
|
|
|
@@ -2647,7 +2647,7 @@ def group_install(name,
|
|
groupinstall = salt.utils.functools.alias_function(group_install, 'groupinstall')
|
|
|
|
|
|
-def list_repos(basedir=None):
|
|
+def list_repos(basedir=None, **kwargs):
|
|
'''
|
|
Lists all repos in <basedir> (default: all dirs in `reposdir` yum option).
|
|
|
|
@@ -2969,7 +2969,7 @@ def _parse_repo_file(filename):
|
|
return (headers, salt.utils.data.decode(config))
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.1.0
|
|
|
|
@@ -2988,7 +2988,7 @@ def file_list(*packages):
|
|
return __salt__['lowpkg.file_list'](*packages)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.1.0
|
|
|
|
@@ -3007,7 +3007,7 @@ def file_dict(*packages):
|
|
return __salt__['lowpkg.file_dict'](*packages)
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
@@ -3095,7 +3095,7 @@ def modified(*packages, **flags):
|
|
|
|
|
|
@salt.utils.decorators.path.which('yumdownloader')
|
|
-def download(*packages):
|
|
+def download(*packages, **kwargs):
|
|
'''
|
|
.. versionadded:: 2015.5.0
|
|
|
|
@@ -3168,7 +3168,7 @@ def download(*packages):
|
|
return ret
|
|
|
|
|
|
-def diff(*paths):
|
|
+def diff(*paths, **kwargs):
|
|
'''
|
|
Return a formatted diff between current files and original in a package.
|
|
NOTE: this function includes all files (configuration and not), but does
|
|
@@ -3235,7 +3235,7 @@ def _get_patches(installed_only=False):
|
|
return patches
|
|
|
|
|
|
-def list_patches(refresh=False):
|
|
+def list_patches(refresh=False, **kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
@@ -3258,7 +3258,7 @@ def list_patches(refresh=False):
|
|
return _get_patches()
|
|
|
|
|
|
-def list_installed_patches():
|
|
+def list_installed_patches(**kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
|
|
index 7ac0df26c6..9d0407e674 100644
|
|
--- a/salt/modules/zypperpkg.py
|
|
+++ b/salt/modules/zypperpkg.py
|
|
@@ -99,6 +99,7 @@ class _Zypper(object):
|
|
|
|
LOCK_EXIT_CODE = 7
|
|
XML_DIRECTIVES = ['-x', '--xmlout']
|
|
+ # ZYPPER_LOCK is not affected by --root
|
|
ZYPPER_LOCK = '/var/run/zypp.pid'
|
|
TAG_RELEASED = 'zypper/released'
|
|
TAG_BLOCKED = 'zypper/blocked'
|
|
@@ -107,7 +108,6 @@ class _Zypper(object):
|
|
'''
|
|
Constructor
|
|
'''
|
|
- self.__called = False
|
|
self._reset()
|
|
|
|
def _reset(self):
|
|
@@ -129,6 +129,10 @@ class _Zypper(object):
|
|
self.__refresh = False
|
|
self.__ignore_repo_failure = False
|
|
self.__systemd_scope = False
|
|
+ self.__root = None
|
|
+
|
|
+ # Call status
|
|
+ self.__called = False
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
'''
|
|
@@ -136,11 +140,17 @@ class _Zypper(object):
|
|
:param kwargs:
|
|
:return:
|
|
'''
|
|
+ # Reset after the call
|
|
+ if self.__called:
|
|
+ self._reset()
|
|
+
|
|
# Ignore exit code for 106 (repo is not available)
|
|
if 'no_repo_failure' in kwargs:
|
|
self.__ignore_repo_failure = kwargs['no_repo_failure']
|
|
if 'systemd_scope' in kwargs:
|
|
self.__systemd_scope = kwargs['systemd_scope']
|
|
+ if 'root' in kwargs:
|
|
+ self.__root = kwargs['root']
|
|
return self
|
|
|
|
def __getattr__(self, item):
|
|
@@ -153,7 +163,6 @@ class _Zypper(object):
|
|
# Reset after the call
|
|
if self.__called:
|
|
self._reset()
|
|
- self.__called = False
|
|
|
|
if item == 'xml':
|
|
self.__xml = True
|
|
@@ -284,6 +293,8 @@ class _Zypper(object):
|
|
self.__cmd.append('--xmlout')
|
|
if not self.__refresh and '--no-refresh' not in args:
|
|
self.__cmd.append('--no-refresh')
|
|
+ if self.__root:
|
|
+ self.__cmd.extend(['--root', self.__root])
|
|
|
|
self.__cmd.extend(args)
|
|
kwargs['output_loglevel'] = 'trace'
|
|
@@ -442,7 +453,7 @@ def _clean_cache():
|
|
__context__.pop(cache_name, None)
|
|
|
|
|
|
-def list_upgrades(refresh=True, **kwargs):
|
|
+def list_upgrades(refresh=True, root=None, **kwargs):
|
|
'''
|
|
List all available package upgrades on this system
|
|
|
|
@@ -451,6 +462,9 @@ def list_upgrades(refresh=True, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -458,7 +472,7 @@ def list_upgrades(refresh=True, **kwargs):
|
|
salt '*' pkg.list_upgrades
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
ret = dict()
|
|
cmd = ['list-updates']
|
|
@@ -467,7 +481,7 @@ def list_upgrades(refresh=True, **kwargs):
|
|
if not isinstance(repo_name, six.string_types):
|
|
repo_name = six.text_type(repo_name)
|
|
cmd.extend(['--repo', repo_name])
|
|
- for update_node in __zypper__.nolock.xml.call(*cmd).getElementsByTagName('update'):
|
|
+ for update_node in __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName('update'):
|
|
if update_node.getAttribute('kind') == 'package':
|
|
ret[update_node.getAttribute('name')] = update_node.getAttribute('edition')
|
|
|
|
@@ -504,6 +518,9 @@ def info_installed(*names, **kwargs):
|
|
:param all_versions:
|
|
Include information for all versions of the packages installed on the minion.
|
|
|
|
+ :param root:
|
|
+ Operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -544,6 +561,9 @@ def info_available(*names, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed or not.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -558,9 +578,11 @@ def info_available(*names, **kwargs):
|
|
else:
|
|
names = sorted(list(set(names)))
|
|
|
|
+ root = kwargs.get('root', None)
|
|
+
|
|
# Refresh db before extracting the latest package
|
|
if kwargs.get('refresh', True):
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
pkg_info = []
|
|
batch = names[:]
|
|
@@ -569,7 +591,8 @@ def info_available(*names, **kwargs):
|
|
# Run in batches
|
|
while batch:
|
|
pkg_info.extend(re.split(r"Information for package*",
|
|
- __zypper__.nolock.call('info', '-t', 'package', *batch[:batch_size])))
|
|
+ __zypper__(root=root).nolock.call('info', '-t', 'package',
|
|
+ *batch[:batch_size])))
|
|
batch = batch[batch_size:]
|
|
|
|
for pkg_data in pkg_info:
|
|
@@ -629,6 +652,9 @@ def latest_version(*names, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed or not.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -671,6 +697,9 @@ def upgrade_available(name, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed or not.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -687,6 +716,9 @@ def version(*names, **kwargs):
|
|
installed. If more than one package name is specified, a dict of
|
|
name/version pairs is returned.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -697,7 +729,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs) or {}
|
|
|
|
|
|
-def version_cmp(ver1, ver2, ignore_epoch=False):
|
|
+def version_cmp(ver1, ver2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
.. versionadded:: 2015.5.4
|
|
|
|
@@ -719,7 +751,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
|
|
return __salt__['lowpkg.version_cmp'](ver1, ver2, ignore_epoch=ignore_epoch)
|
|
|
|
|
|
-def list_pkgs(versions_as_list=False, **kwargs):
|
|
+def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
|
|
'''
|
|
List the packages currently installed as a dict. By default, the dict
|
|
contains versions as a comma separated string::
|
|
@@ -731,6 +763,13 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
|
|
{'<package_name>': ['<version>', '<version>']}
|
|
|
|
+ root:
|
|
+ operate on a different root directory.
|
|
+
|
|
+ includes:
|
|
+ List of types of packages to include (package, patch, pattern, product)
|
|
+ By default packages are always included
|
|
+
|
|
attr:
|
|
If a list of package attributes is specified, returned value will
|
|
contain them in addition to version, eg.::
|
|
@@ -768,12 +807,18 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
if attr is not None:
|
|
attr = salt.utils.args.split_input(attr)
|
|
|
|
+ includes = includes if includes else []
|
|
+
|
|
contextkey = 'pkg.list_pkgs'
|
|
|
|
+ # TODO(aplanas): this cached value depends on the parameters
|
|
if contextkey not in __context__:
|
|
ret = {}
|
|
- cmd = ['rpm', '-qa', '--queryformat',
|
|
- salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n']
|
|
+ cmd = ['rpm']
|
|
+ if root:
|
|
+ cmd.extend(['--root', root])
|
|
+ cmd.extend(['-qa', '--queryformat',
|
|
+ salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n'])
|
|
output = __salt__['cmd.run'](cmd,
|
|
python_shell=False,
|
|
output_loglevel='trace')
|
|
@@ -808,6 +853,28 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
continue
|
|
_ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version'])
|
|
|
|
+ for include in includes:
|
|
+ if include in ('pattern', 'patch'):
|
|
+ if include == 'pattern':
|
|
+ pkgs = list_installed_patterns(root=root)
|
|
+ elif include == 'patch':
|
|
+ pkgs = list_installed_patches(root=root)
|
|
+ else:
|
|
+ pkgs = []
|
|
+ for pkg in pkgs:
|
|
+ pkg_extended_name = '{}:{}'.format(include, pkg)
|
|
+ info = info_available(pkg_extended_name,
|
|
+ refresh=False,
|
|
+ root=root)
|
|
+ _ret[pkg_extended_name] = [{
|
|
+ 'epoch': None,
|
|
+ 'version': info[pkg]['version'],
|
|
+ 'release': None,
|
|
+ 'arch': info[pkg]['arch'],
|
|
+ 'install_date': None,
|
|
+ 'install_date_time_t': None,
|
|
+ }]
|
|
+
|
|
__context__[contextkey] = _ret
|
|
|
|
return __salt__['pkg_resource.format_pkg_list'](
|
|
@@ -859,6 +926,9 @@ def list_repo_pkgs(*args, **kwargs):
|
|
When ``True``, the return data for each package will be organized by
|
|
repository.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -891,7 +961,8 @@ def list_repo_pkgs(*args, **kwargs):
|
|
return True
|
|
return False
|
|
|
|
- for node in __zypper__.xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
|
|
+ root = kwargs.get('root') or None
|
|
+ for node in __zypper__(root=root).xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
|
|
pkginfo = dict(node.attributes.items())
|
|
try:
|
|
if pkginfo['kind'] != 'package':
|
|
@@ -933,23 +1004,27 @@ def list_repo_pkgs(*args, **kwargs):
|
|
return byrepo_ret
|
|
|
|
|
|
-def _get_configured_repos():
|
|
+def _get_configured_repos(root=None):
|
|
'''
|
|
Get all the info about repositories from the configurations.
|
|
'''
|
|
|
|
+ repos = os.path.join(root, os.path.relpath(REPOS, os.path.sep)) if root else REPOS
|
|
repos_cfg = configparser.ConfigParser()
|
|
- repos_cfg.read([REPOS + '/' + fname for fname in os.listdir(REPOS) if fname.endswith(".repo")])
|
|
+ if os.path.exists(repos):
|
|
+ repos_cfg.read([repos + '/' + fname for fname in os.listdir(repos) if fname.endswith(".repo")])
|
|
+ else:
|
|
+ log.warning('Repositories not found in {}'.format(repos))
|
|
|
|
return repos_cfg
|
|
|
|
|
|
-def _get_repo_info(alias, repos_cfg=None):
|
|
+def _get_repo_info(alias, repos_cfg=None, root=None):
|
|
'''
|
|
Get one repo meta-data.
|
|
'''
|
|
try:
|
|
- meta = dict((repos_cfg or _get_configured_repos()).items(alias))
|
|
+ meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias))
|
|
meta['alias'] = alias
|
|
for key, val in six.iteritems(meta):
|
|
if val in ['0', '1']:
|
|
@@ -961,51 +1036,60 @@ def _get_repo_info(alias, repos_cfg=None):
|
|
return {}
|
|
|
|
|
|
-def get_repo(repo, **kwargs): # pylint: disable=unused-argument
|
|
+def get_repo(repo, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
Display a repo.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.get_repo alias
|
|
'''
|
|
- return _get_repo_info(repo)
|
|
+ return _get_repo_info(repo, root=root)
|
|
|
|
|
|
-def list_repos():
|
|
+def list_repos(root=None, **kwargs):
|
|
'''
|
|
Lists all repos.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.list_repos
|
|
'''
|
|
- repos_cfg = _get_configured_repos()
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
all_repos = {}
|
|
for alias in repos_cfg.sections():
|
|
- all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg)
|
|
+ all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg, root=root)
|
|
|
|
return all_repos
|
|
|
|
|
|
-def del_repo(repo):
|
|
+def del_repo(repo, root=None):
|
|
'''
|
|
Delete a repo.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.del_repo alias
|
|
'''
|
|
- repos_cfg = _get_configured_repos()
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
for alias in repos_cfg.sections():
|
|
if alias == repo:
|
|
- doc = __zypper__.xml.call('rr', '--loose-auth', '--loose-query', alias)
|
|
+ doc = __zypper__(root=root).xml.call('rr', '--loose-auth', '--loose-query', alias)
|
|
msg = doc.getElementsByTagName('message')
|
|
if doc.getElementsByTagName('progress') and msg:
|
|
return {
|
|
@@ -1044,6 +1128,9 @@ def mod_repo(repo, **kwargs):
|
|
If set to True, automatically trust and import public GPG key for
|
|
the repository.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
Key/Value pairs may also be removed from a repo's configuration by setting
|
|
a key to a blank value. Bear in mind that a name cannot be deleted, and a
|
|
URL can only be deleted if a ``mirrorlist`` is specified (or vice versa).
|
|
@@ -1056,7 +1143,8 @@ def mod_repo(repo, **kwargs):
|
|
salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
|
|
'''
|
|
|
|
- repos_cfg = _get_configured_repos()
|
|
+ root = kwargs.get('root') or None
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
added = False
|
|
|
|
# An attempt to add new one?
|
|
@@ -1076,7 +1164,7 @@ def mod_repo(repo, **kwargs):
|
|
|
|
# Is there already such repo under different alias?
|
|
for alias in repos_cfg.sections():
|
|
- repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg)
|
|
+ repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg, root=root)
|
|
|
|
# Complete user URL, in case it is not
|
|
new_url = _urlparse(url)
|
|
@@ -1098,17 +1186,17 @@ def mod_repo(repo, **kwargs):
|
|
)
|
|
|
|
# Add new repo
|
|
- __zypper__.xml.call('ar', url, repo)
|
|
+ __zypper__(root=root).xml.call('ar', url, repo)
|
|
|
|
# Verify the repository has been added
|
|
- repos_cfg = _get_configured_repos()
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
if repo not in repos_cfg.sections():
|
|
raise CommandExecutionError(
|
|
'Failed add new repository \'{0}\' for unspecified reason. '
|
|
'Please check zypper logs.'.format(repo))
|
|
added = True
|
|
|
|
- repo_info = _get_repo_info(repo)
|
|
+ repo_info = _get_repo_info(repo, root=root)
|
|
if (
|
|
not added and 'baseurl' in kwargs and
|
|
not (kwargs['baseurl'] == repo_info['baseurl'])
|
|
@@ -1117,8 +1205,8 @@ def mod_repo(repo, **kwargs):
|
|
# we need to remove the repository and add it again with the new baseurl
|
|
repo_info.update(kwargs)
|
|
repo_info.setdefault('cache', False)
|
|
- del_repo(repo)
|
|
- return mod_repo(repo, **repo_info)
|
|
+ del_repo(repo, root=root)
|
|
+ return mod_repo(repo, root=root, **repo_info)
|
|
|
|
# Modify added or existing repo according to the options
|
|
cmd_opt = []
|
|
@@ -1151,7 +1239,7 @@ def mod_repo(repo, **kwargs):
|
|
|
|
if cmd_opt:
|
|
cmd_opt = global_cmd_opt + ['mr'] + cmd_opt + [repo]
|
|
- __zypper__.refreshable.xml.call(*cmd_opt)
|
|
+ __zypper__(root=root).refreshable.xml.call(*cmd_opt)
|
|
|
|
comment = None
|
|
if call_refresh:
|
|
@@ -1159,23 +1247,26 @@ def mod_repo(repo, **kwargs):
|
|
# --gpg-auto-import-keys is not doing anything
|
|
# so we need to specifically refresh here with --gpg-auto-import-keys
|
|
refresh_opts = global_cmd_opt + ['refresh'] + [repo]
|
|
- __zypper__.xml.call(*refresh_opts)
|
|
+ __zypper__(root=root).xml.call(*refresh_opts)
|
|
elif not added and not cmd_opt:
|
|
comment = 'Specified arguments did not result in modification of repo'
|
|
|
|
- repo = get_repo(repo)
|
|
+ repo = get_repo(repo, root=root)
|
|
if comment:
|
|
repo['comment'] = comment
|
|
|
|
return repo
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(root=None):
|
|
'''
|
|
Force a repository refresh by calling ``zypper refresh --force``, return a dict::
|
|
|
|
{'<database name>': Bool}
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1185,7 +1276,7 @@ def refresh_db():
|
|
# Remove rtag file to keep multiple refreshes from happening in pkg states
|
|
salt.utils.pkg.clear_rtag(__opts__)
|
|
ret = {}
|
|
- out = __zypper__.refreshable.call('refresh', '--force')
|
|
+ out = __zypper__(root=root).refreshable.call('refresh', '--force')
|
|
|
|
for line in out.splitlines():
|
|
if not line:
|
|
@@ -1204,6 +1295,12 @@ def refresh_db():
|
|
return ret
|
|
|
|
|
|
+def _find_types(pkgs):
|
|
+ '''Form a package names list, find prefixes of packages types.'''
|
|
+ return sorted({pkg.split(':', 1)[0] for pkg in pkgs
|
|
+ if len(pkg.split(':', 1)) == 2})
|
|
+
|
|
+
|
|
def install(name=None,
|
|
refresh=False,
|
|
fromrepo=None,
|
|
@@ -1213,6 +1310,8 @@ def install(name=None,
|
|
skip_verify=False,
|
|
version=None,
|
|
ignore_repo_failure=False,
|
|
+ no_recommends=False,
|
|
+ root=None,
|
|
**kwargs):
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
@@ -1301,6 +1400,12 @@ def install(name=None,
|
|
Zypper returns error code 106 if one of the repositories are not available for various reasons.
|
|
In case to set strict check, this parameter needs to be set to True. Default: False.
|
|
|
|
+ no_recommends
|
|
+ Do not install recommended packages, only required ones.
|
|
+
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
diff_attr:
|
|
If a list of package attributes is specified, returned value will
|
|
contain them, eg.::
|
|
@@ -1340,7 +1445,7 @@ def install(name=None,
|
|
'arch': '<new-arch>'}}}
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
try:
|
|
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs, sources, **kwargs)
|
|
@@ -1350,7 +1455,7 @@ def install(name=None,
|
|
if pkg_params is None or len(pkg_params) == 0:
|
|
return {}
|
|
|
|
- version_num = Wildcard(__zypper__)(name, version)
|
|
+ version_num = Wildcard(__zypper__(root=root))(name, version)
|
|
|
|
if version_num:
|
|
if pkgs is None and sources is None:
|
|
@@ -1375,17 +1480,20 @@ def install(name=None,
|
|
targets.append(target)
|
|
elif pkg_type == 'advisory':
|
|
targets = []
|
|
- cur_patches = list_patches()
|
|
+ cur_patches = list_patches(root=root)
|
|
for advisory_id in pkg_params:
|
|
if advisory_id not in cur_patches:
|
|
raise CommandExecutionError('Advisory id "{0}" not found'.format(advisory_id))
|
|
else:
|
|
- targets.append(advisory_id)
|
|
+ targets.append('patch:{}'.format(advisory_id))
|
|
else:
|
|
targets = pkg_params
|
|
|
|
diff_attr = kwargs.get("diff_attr")
|
|
- old = list_pkgs(attr=diff_attr) if not downloadonly else list_downloaded()
|
|
+
|
|
+ includes = _find_types(targets)
|
|
+ old = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root)
|
|
+
|
|
downgrades = []
|
|
if fromrepo:
|
|
fromrepoopt = ['--force', '--force-resolution', '--from', fromrepo]
|
|
@@ -1404,10 +1512,10 @@ def install(name=None,
|
|
cmd_install.append('--download-only')
|
|
if fromrepo:
|
|
cmd_install.extend(fromrepoopt)
|
|
+ if no_recommends:
|
|
+ cmd_install.append('--no-recommends')
|
|
|
|
errors = []
|
|
- if pkg_type == 'advisory':
|
|
- targets = ["patch:{0}".format(t) for t in targets]
|
|
|
|
# Split the targets into batches of 500 packages each, so that
|
|
# the maximal length of the command line is not broken
|
|
@@ -1415,7 +1523,7 @@ def install(name=None,
|
|
while targets:
|
|
cmd = cmd_install + targets[:500]
|
|
targets = targets[500:]
|
|
- for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope).call(*cmd).splitlines():
|
|
+ for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope, root=root).call(*cmd).splitlines():
|
|
match = re.match(r"^The selected package '([^']+)'.+has lower version", line)
|
|
if match:
|
|
downgrades.append(match.group(1))
|
|
@@ -1423,12 +1531,17 @@ def install(name=None,
|
|
while downgrades:
|
|
cmd = cmd_install + ['--force'] + downgrades[:500]
|
|
downgrades = downgrades[500:]
|
|
- __zypper__(no_repo_failure=ignore_repo_failure).call(*cmd)
|
|
+ __zypper__(no_repo_failure=ignore_repo_failure, root=root).call(*cmd)
|
|
|
|
_clean_cache()
|
|
- new = list_pkgs(attr=diff_attr) if not downloadonly else list_downloaded()
|
|
+ new = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root)
|
|
ret = salt.utils.data.compare_dicts(old, new)
|
|
|
|
+ # If something else from packages are included in the search,
|
|
+ # better clean the cache.
|
|
+ if includes:
|
|
+ _clean_cache()
|
|
+
|
|
if errors:
|
|
raise CommandExecutionError(
|
|
'Problem encountered {0} package(s)'.format(
|
|
@@ -1446,6 +1559,8 @@ def upgrade(refresh=True,
|
|
fromrepo=None,
|
|
novendorchange=False,
|
|
skip_verify=False,
|
|
+ no_recommends=False,
|
|
+ root=None,
|
|
**kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
@@ -1485,6 +1600,12 @@ def upgrade(refresh=True,
|
|
skip_verify
|
|
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
|
|
|
|
+ no_recommends
|
|
+ Do not install recommended packages, only required ones.
|
|
+
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
Returns a dictionary containing the changes:
|
|
|
|
.. code-block:: python
|
|
@@ -1507,7 +1628,7 @@ def upgrade(refresh=True,
|
|
cmd_update.insert(0, '--no-gpg-checks')
|
|
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
if dryrun:
|
|
cmd_update.append('--dry-run')
|
|
@@ -1526,16 +1647,20 @@ def upgrade(refresh=True,
|
|
else:
|
|
log.warning('Disabling vendor changes is not supported on this Zypper version')
|
|
|
|
+ if no_recommends:
|
|
+ cmd_update.append('--no-recommends')
|
|
+ log.info('Disabling recommendations')
|
|
+
|
|
if dryrun:
|
|
# Creates a solver test case for debugging.
|
|
log.info('Executing debugsolver and performing a dry-run dist-upgrade')
|
|
- __zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update + ['--debug-solver'])
|
|
+ __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update + ['--debug-solver'])
|
|
|
|
- old = list_pkgs()
|
|
+ old = list_pkgs(root=root)
|
|
|
|
- __zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update)
|
|
+ __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update)
|
|
_clean_cache()
|
|
- new = list_pkgs()
|
|
+ new = list_pkgs(root=root)
|
|
ret = salt.utils.data.compare_dicts(old, new)
|
|
|
|
if __zypper__.exit_code not in __zypper__.SUCCESS_EXIT_CODES:
|
|
@@ -1556,7 +1681,7 @@ def upgrade(refresh=True,
|
|
return ret
|
|
|
|
|
|
-def _uninstall(name=None, pkgs=None):
|
|
+def _uninstall(name=None, pkgs=None, root=None):
|
|
'''
|
|
Remove and purge do identical things but with different Zypper commands,
|
|
this function performs the common logic.
|
|
@@ -1566,7 +1691,8 @@ def _uninstall(name=None, pkgs=None):
|
|
except MinionError as exc:
|
|
raise CommandExecutionError(exc)
|
|
|
|
- old = list_pkgs()
|
|
+ includes = _find_types(pkg_params.keys())
|
|
+ old = list_pkgs(root=root, includes=includes)
|
|
targets = []
|
|
for target in pkg_params:
|
|
# Check if package version set to be removed is actually installed:
|
|
@@ -1582,11 +1708,12 @@ def _uninstall(name=None, pkgs=None):
|
|
|
|
errors = []
|
|
while targets:
|
|
- __zypper__(systemd_scope=systemd_scope).call('remove', *targets[:500])
|
|
+ __zypper__(systemd_scope=systemd_scope, root=root).call('remove', *targets[:500])
|
|
targets = targets[500:]
|
|
|
|
_clean_cache()
|
|
- ret = salt.utils.data.compare_dicts(old, list_pkgs())
|
|
+ new = list_pkgs(root=root, includes=includes)
|
|
+ ret = salt.utils.data.compare_dicts(old, new)
|
|
|
|
if errors:
|
|
raise CommandExecutionError(
|
|
@@ -1623,7 +1750,7 @@ def normalize_name(name):
|
|
return name
|
|
|
|
|
|
-def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
+def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -1651,6 +1778,9 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
A list of packages to delete. Must be passed as a python list. The
|
|
``name`` parameter will be ignored if this option is passed.
|
|
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
.. versionadded:: 0.16.0
|
|
|
|
|
|
@@ -1664,10 +1794,10 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.remove <package1>,<package2>,<package3>
|
|
salt '*' pkg.remove pkgs='["foo", "bar"]'
|
|
'''
|
|
- return _uninstall(name=name, pkgs=pkgs)
|
|
+ return _uninstall(name=name, pkgs=pkgs, root=root)
|
|
|
|
|
|
-def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
+def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -1696,6 +1826,9 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
A list of packages to delete. Must be passed as a python list. The
|
|
``name`` parameter will be ignored if this option is passed.
|
|
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
.. versionadded:: 0.16.0
|
|
|
|
|
|
@@ -1709,13 +1842,16 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.purge <package1>,<package2>,<package3>
|
|
salt '*' pkg.purge pkgs='["foo", "bar"]'
|
|
'''
|
|
- return _uninstall(name=name, pkgs=pkgs)
|
|
+ return _uninstall(name=name, pkgs=pkgs, root=root)
|
|
|
|
|
|
-def list_locks():
|
|
+def list_locks(root=None):
|
|
'''
|
|
List current package locks.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
Return a dict containing the locked package with attributes::
|
|
|
|
{'<package>': {'case_sensitive': '<case_sensitive>',
|
|
@@ -1729,8 +1865,9 @@ def list_locks():
|
|
salt '*' pkg.list_locks
|
|
'''
|
|
locks = {}
|
|
- if os.path.exists(LOCKS):
|
|
- with salt.utils.files.fopen(LOCKS) as fhr:
|
|
+ _locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
|
|
+ try:
|
|
+ with salt.utils.files.fopen(_locks) as fhr:
|
|
items = salt.utils.stringutils.to_unicode(fhr.read()).split('\n\n')
|
|
for meta in [item.split('\n') for item in items]:
|
|
lock = {}
|
|
@@ -1739,15 +1876,22 @@ def list_locks():
|
|
lock.update(dict([tuple([i.strip() for i in element.split(':', 1)]), ]))
|
|
if lock.get('solvable_name'):
|
|
locks[lock.pop('solvable_name')] = lock
|
|
+ except IOError:
|
|
+ pass
|
|
+ except Exception:
|
|
+ log.warning('Detected a problem when accessing {}'.format(_locks))
|
|
|
|
return locks
|
|
|
|
|
|
-def clean_locks():
|
|
+def clean_locks(root=None):
|
|
'''
|
|
Remove unused locks that do not currently (with regard to repositories
|
|
used) lock any package.
|
|
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1756,10 +1900,11 @@ def clean_locks():
|
|
'''
|
|
LCK = "removed"
|
|
out = {LCK: 0}
|
|
- if not os.path.exists("/etc/zypp/locks"):
|
|
+ locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
|
|
+ if not os.path.exists(locks):
|
|
return out
|
|
|
|
- for node in __zypper__.xml.call('cl').getElementsByTagName("message"):
|
|
+ for node in __zypper__(root=root).xml.call('cl').getElementsByTagName("message"):
|
|
text = node.childNodes[0].nodeValue.lower()
|
|
if text.startswith(LCK):
|
|
out[LCK] = text.split(" ")[1]
|
|
@@ -1772,6 +1917,9 @@ def unhold(name=None, pkgs=None, **kwargs):
|
|
'''
|
|
Remove specified package lock.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1781,12 +1929,13 @@ def unhold(name=None, pkgs=None, **kwargs):
|
|
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
|
|
'''
|
|
ret = {}
|
|
+ root = kwargs.get('root')
|
|
if (not name and not pkgs) or (name and pkgs):
|
|
raise CommandExecutionError('Name or packages must be specified.')
|
|
elif name:
|
|
pkgs = [name]
|
|
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root)
|
|
try:
|
|
pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
|
|
except MinionError as exc:
|
|
@@ -1803,15 +1952,18 @@ def unhold(name=None, pkgs=None, **kwargs):
|
|
ret[pkg]['comment'] = 'Package {0} unable to be unheld.'.format(pkg)
|
|
|
|
if removed:
|
|
- __zypper__.call('rl', *removed)
|
|
+ __zypper__(root=root).call('rl', *removed)
|
|
|
|
return ret
|
|
|
|
|
|
-def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
+def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
Remove specified package lock.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1821,7 +1973,7 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
|
|
'''
|
|
salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use unhold() instead.')
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root)
|
|
try:
|
|
packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
|
|
except MinionError as exc:
|
|
@@ -1836,7 +1988,7 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
missing.append(pkg)
|
|
|
|
if removed:
|
|
- __zypper__.call('rl', *removed)
|
|
+ __zypper__(root=root).call('rl', *removed)
|
|
|
|
return {'removed': len(removed), 'not_found': missing}
|
|
|
|
@@ -1845,6 +1997,9 @@ def hold(name=None, pkgs=None, **kwargs):
|
|
'''
|
|
Add a package lock. Specify packages to lock by exact name.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1859,12 +2014,13 @@ def hold(name=None, pkgs=None, **kwargs):
|
|
:return:
|
|
'''
|
|
ret = {}
|
|
+ root = kwargs.get('root')
|
|
if (not name and not pkgs) or (name and pkgs):
|
|
raise CommandExecutionError('Name or packages must be specified.')
|
|
elif name:
|
|
pkgs = [name]
|
|
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root=root)
|
|
added = []
|
|
try:
|
|
pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
|
|
@@ -1880,15 +2036,18 @@ def hold(name=None, pkgs=None, **kwargs):
|
|
ret[pkg]['comment'] = 'Package {0} is already set to be held.'.format(pkg)
|
|
|
|
if added:
|
|
- __zypper__.call('al', *added)
|
|
+ __zypper__(root=root).call('al', *added)
|
|
|
|
return ret
|
|
|
|
|
|
-def add_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
+def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
Add a package lock. Specify packages to lock by exact name.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1898,7 +2057,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.add_lock pkgs='["foo", "bar"]'
|
|
'''
|
|
salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use hold() instead.')
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root)
|
|
added = []
|
|
try:
|
|
packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
|
|
@@ -1910,7 +2069,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
added.append(pkg)
|
|
|
|
if added:
|
|
- __zypper__.call('al', *added)
|
|
+ __zypper__(root=root).call('al', *added)
|
|
|
|
return {'added': len(added), 'packages': added}
|
|
|
|
@@ -1920,7 +2079,9 @@ def verify(*names, **kwargs):
|
|
Runs an rpm -Va on a system, and returns the results in a dict
|
|
|
|
Files with an attribute of config, doc, ghost, license or readme in the
|
|
- package header can be ignored using the ``ignore_types`` keyword argument
|
|
+ package header can be ignored using the ``ignore_types`` keyword argument.
|
|
+
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
|
|
CLI Example:
|
|
|
|
@@ -1934,12 +2095,14 @@ def verify(*names, **kwargs):
|
|
return __salt__['lowpkg.verify'](*names, **kwargs)
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of *every* file on the system's rpm database (not generally
|
|
recommended).
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -1948,15 +2111,17 @@ def file_list(*packages):
|
|
salt '*' pkg.file_list httpd postfix
|
|
salt '*' pkg.file_list
|
|
'''
|
|
- return __salt__['lowpkg.file_list'](*packages)
|
|
+ return __salt__['lowpkg.file_list'](*packages, **kwargs)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of *every* file on the system's
|
|
rpm database (not generally recommended).
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -1965,7 +2130,7 @@ def file_dict(*packages):
|
|
salt '*' pkg.file_list httpd postfix
|
|
salt '*' pkg.file_list
|
|
'''
|
|
- return __salt__['lowpkg.file_dict'](*packages)
|
|
+ return __salt__['lowpkg.file_dict'](*packages, **kwargs)
|
|
|
|
|
|
def modified(*packages, **flags):
|
|
@@ -2004,6 +2169,9 @@ def modified(*packages, **flags):
|
|
capabilities
|
|
Include only files where capabilities differ or not. Note: supported only on newer RPM versions.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2017,7 +2185,7 @@ def modified(*packages, **flags):
|
|
return __salt__['lowpkg.modified'](*packages, **flags)
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
Return the name of the package that owns the file. Multiple file paths can
|
|
be passed. If a single path is passed, a string will be returned,
|
|
@@ -2027,6 +2195,8 @@ def owner(*paths):
|
|
If the file is not owned by a package, or is not present on the minion,
|
|
then an empty string will be returned for that path.
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2034,26 +2204,69 @@ def owner(*paths):
|
|
salt '*' pkg.owner /usr/bin/apachectl
|
|
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
|
|
'''
|
|
- return __salt__['lowpkg.owner'](*paths)
|
|
+ return __salt__['lowpkg.owner'](*paths, **kwargs)
|
|
|
|
|
|
-def _get_patterns(installed_only=None):
|
|
- '''
|
|
- List all known patterns in repos.
|
|
- '''
|
|
+def _get_visible_patterns(root=None):
|
|
+ '''Get all available patterns in the repo that are visible.'''
|
|
patterns = {}
|
|
- for element in __zypper__.nolock.xml.call('se', '-t', 'pattern').getElementsByTagName('solvable'):
|
|
+ search_patterns = __zypper__(root=root).nolock.xml.call('se', '-t', 'pattern')
|
|
+ for element in search_patterns.getElementsByTagName('solvable'):
|
|
installed = element.getAttribute('status') == 'installed'
|
|
- if (installed_only and installed) or not installed_only:
|
|
- patterns[element.getAttribute('name')] = {
|
|
- 'installed': installed,
|
|
- 'summary': element.getAttribute('summary'),
|
|
+ patterns[element.getAttribute('name')] = {
|
|
+ 'installed': installed,
|
|
+ 'summary': element.getAttribute('summary'),
|
|
+ }
|
|
+ return patterns
|
|
+
|
|
+
|
|
+def _get_installed_patterns(root=None):
|
|
+ '''
|
|
+ List all installed patterns.
|
|
+ '''
|
|
+ # Some patterns are non visible (`pattern-visible()` capability is
|
|
+ # not set), so they cannot be found via a normal `zypper se -t
|
|
+ # pattern`.
|
|
+ #
|
|
+ # Also patterns are not directly searchable in the local rpmdb.
|
|
+ #
|
|
+ # The proposed solution is, first search all the packages that
|
|
+ # containst the 'pattern()' capability, and deduce the name of the
|
|
+ # pattern from this capability.
|
|
+ #
|
|
+ # For example:
|
|
+ #
|
|
+ # 'pattern() = base' -> 'base'
|
|
+ # 'pattern() = microos_defaults' -> 'microos_defaults'
|
|
+
|
|
+ def _pattern_name(capability):
|
|
+ '''Return from a suitable capability the pattern name.'''
|
|
+ return capability.split('=')[-1].strip()
|
|
+
|
|
+ cmd = ['rpm']
|
|
+ if root:
|
|
+ cmd.extend(['--root', root])
|
|
+ cmd.extend(['-q', '--provides', '--whatprovides', 'pattern()'])
|
|
+ # If no `pattern()`s are found, RPM returns `1`, but for us is not
|
|
+ # a real error.
|
|
+ output = __salt__['cmd.run'](cmd, ignore_retcode=True)
|
|
+
|
|
+ installed_patterns = [_pattern_name(line) for line in output.splitlines()
|
|
+ if line.startswith('pattern() = ')]
|
|
+
|
|
+ patterns = {k: v for k, v in _get_visible_patterns(root=root).items() if v['installed']}
|
|
+
|
|
+ for pattern in installed_patterns:
|
|
+ if pattern not in patterns:
|
|
+ patterns[pattern] = {
|
|
+ 'installed': True,
|
|
+ 'summary': 'Non-visible pattern',
|
|
}
|
|
|
|
return patterns
|
|
|
|
|
|
-def list_patterns(refresh=False):
|
|
+def list_patterns(refresh=False, root=None):
|
|
'''
|
|
List all known patterns from available repos.
|
|
|
|
@@ -2062,6 +2275,9 @@ def list_patterns(refresh=False):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2069,27 +2285,30 @@ def list_patterns(refresh=False):
|
|
salt '*' pkg.list_patterns
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
- return _get_patterns()
|
|
+ return _get_visible_patterns(root=root)
|
|
|
|
|
|
-def list_installed_patterns():
|
|
+def list_installed_patterns(root=None):
|
|
'''
|
|
List installed patterns on the system.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.list_installed_patterns
|
|
'''
|
|
- return _get_patterns(installed_only=True)
|
|
+ return _get_installed_patterns(root=root)
|
|
|
|
|
|
def search(criteria, refresh=False, **kwargs):
|
|
'''
|
|
- List known packags, available to the system.
|
|
+ List known packages, available to the system.
|
|
|
|
refresh
|
|
force a refresh if set to True.
|
|
@@ -2137,6 +2356,9 @@ def search(criteria, refresh=False, **kwargs):
|
|
details (bool)
|
|
Show version and repository
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2157,8 +2379,11 @@ def search(criteria, refresh=False, **kwargs):
|
|
'not_installed_only': '-u',
|
|
'details': '--details'
|
|
}
|
|
+
|
|
+ root = kwargs.get('root', None)
|
|
+
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
cmd = ['search']
|
|
if kwargs.get('match') == 'exact':
|
|
@@ -2173,7 +2398,7 @@ def search(criteria, refresh=False, **kwargs):
|
|
cmd.append(ALLOWED_SEARCH_OPTIONS.get(opt))
|
|
|
|
cmd.append(criteria)
|
|
- solvables = __zypper__.nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable')
|
|
+ solvables = __zypper__(root=root).nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable')
|
|
if not solvables:
|
|
raise CommandExecutionError(
|
|
'No packages found matching \'{0}\''.format(criteria)
|
|
@@ -2202,7 +2427,7 @@ def _get_first_aggregate_text(node_list):
|
|
return '\n'.join(out)
|
|
|
|
|
|
-def list_products(all=False, refresh=False):
|
|
+def list_products(all=False, refresh=False, root=None):
|
|
'''
|
|
List all available or installed SUSE products.
|
|
|
|
@@ -2214,6 +2439,9 @@ def list_products(all=False, refresh=False):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
Includes handling for OEM products, which read the OEM productline file
|
|
and overwrite the release value.
|
|
|
|
@@ -2225,10 +2453,12 @@ def list_products(all=False, refresh=False):
|
|
salt '*' pkg.list_products all=True
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
ret = list()
|
|
- OEM_PATH = "/var/lib/suseRegister/OEM"
|
|
+ OEM_PATH = '/var/lib/suseRegister/OEM'
|
|
+ if root:
|
|
+ OEM_PATH = os.path.join(root, os.path.relpath(OEM_PATH, os.path.sep))
|
|
cmd = list()
|
|
if not all:
|
|
cmd.append('--disable-repos')
|
|
@@ -2236,7 +2466,7 @@ def list_products(all=False, refresh=False):
|
|
if not all:
|
|
cmd.append('-i')
|
|
|
|
- product_list = __zypper__.nolock.xml.call(*cmd).getElementsByTagName('product-list')
|
|
+ product_list = __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName('product-list')
|
|
if not product_list:
|
|
return ret # No products found
|
|
|
|
@@ -2278,6 +2508,9 @@ def download(*packages, **kwargs):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -2288,12 +2521,14 @@ def download(*packages, **kwargs):
|
|
if not packages:
|
|
raise SaltInvocationError('No packages specified')
|
|
|
|
+ root = kwargs.get('root', None)
|
|
+
|
|
refresh = kwargs.get('refresh', False)
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
pkg_ret = {}
|
|
- for dld_result in __zypper__.xml.call('download', *packages).getElementsByTagName("download-result"):
|
|
+ for dld_result in __zypper__(root=root).xml.call('download', *packages).getElementsByTagName("download-result"):
|
|
repo = dld_result.getElementsByTagName("repository")[0]
|
|
path = dld_result.getElementsByTagName("localfile")[0].getAttribute("path")
|
|
pkg_info = {
|
|
@@ -2304,7 +2539,7 @@ def download(*packages, **kwargs):
|
|
key = _get_first_aggregate_text(
|
|
dld_result.getElementsByTagName('name')
|
|
)
|
|
- if __salt__['lowpkg.checksum'](pkg_info['path']):
|
|
+ if __salt__['lowpkg.checksum'](pkg_info['path'], root=root):
|
|
pkg_ret[key] = pkg_info
|
|
|
|
if pkg_ret:
|
|
@@ -2318,12 +2553,15 @@ def download(*packages, **kwargs):
|
|
)
|
|
|
|
|
|
-def list_downloaded():
|
|
+def list_downloaded(root=None):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
List prefetched packages downloaded by Zypper in the local disk.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -2331,6 +2569,8 @@ def list_downloaded():
|
|
salt '*' pkg.list_downloaded
|
|
'''
|
|
CACHE_DIR = '/var/cache/zypp/packages/'
|
|
+ if root:
|
|
+ CACHE_DIR = os.path.join(root, os.path.relpath(CACHE_DIR, os.path.sep))
|
|
|
|
ret = {}
|
|
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
|
|
@@ -2347,12 +2587,14 @@ def list_downloaded():
|
|
return ret
|
|
|
|
|
|
-def diff(*paths):
|
|
+def diff(*paths, **kwargs):
|
|
'''
|
|
Return a formatted diff between current files and original in a package.
|
|
NOTE: this function includes all files (configuration and not), but does
|
|
not work on binary content.
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
:param path: Full path to the installed file
|
|
:return: Difference string or raises and exception if examined file is binary.
|
|
|
|
@@ -2366,7 +2608,7 @@ def diff(*paths):
|
|
|
|
pkg_to_paths = {}
|
|
for pth in paths:
|
|
- pth_pkg = __salt__['lowpkg.owner'](pth)
|
|
+ pth_pkg = __salt__['lowpkg.owner'](pth, **kwargs)
|
|
if not pth_pkg:
|
|
ret[pth] = os.path.exists(pth) and 'Not managed' or 'N/A'
|
|
else:
|
|
@@ -2375,7 +2617,7 @@ def diff(*paths):
|
|
pkg_to_paths[pth_pkg].append(pth)
|
|
|
|
if pkg_to_paths:
|
|
- local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys())
|
|
+ local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys(), **kwargs)
|
|
for pkg, files in six.iteritems(pkg_to_paths):
|
|
for path in files:
|
|
ret[path] = __salt__['lowpkg.diff'](
|
|
@@ -2386,12 +2628,12 @@ def diff(*paths):
|
|
return ret
|
|
|
|
|
|
-def _get_patches(installed_only=False):
|
|
+def _get_patches(installed_only=False, root=None):
|
|
'''
|
|
List all known patches in repos.
|
|
'''
|
|
patches = {}
|
|
- for element in __zypper__.nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'):
|
|
+ for element in __zypper__(root=root).nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'):
|
|
installed = element.getAttribute('status') == 'installed'
|
|
if (installed_only and installed) or not installed_only:
|
|
patches[element.getAttribute('name')] = {
|
|
@@ -2402,7 +2644,7 @@ def _get_patches(installed_only=False):
|
|
return patches
|
|
|
|
|
|
-def list_patches(refresh=False):
|
|
+def list_patches(refresh=False, root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
@@ -2413,6 +2655,9 @@ def list_patches(refresh=False):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2420,33 +2665,39 @@ def list_patches(refresh=False):
|
|
salt '*' pkg.list_patches
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
- return _get_patches()
|
|
+ return _get_patches(root=root)
|
|
|
|
|
|
-def list_installed_patches():
|
|
+def list_installed_patches(root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
List installed advisory patches on the system.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.list_installed_patches
|
|
'''
|
|
- return _get_patches(installed_only=True)
|
|
+ return _get_patches(installed_only=True, root=root)
|
|
|
|
|
|
-def list_provides(**kwargs):
|
|
+def list_provides(root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2018.3.0
|
|
|
|
List package provides of installed packages as a dict.
|
|
{'<provided_name>': ['<package_name>', '<package_name>', ...]}
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2455,7 +2706,10 @@ def list_provides(**kwargs):
|
|
'''
|
|
ret = __context__.get('pkg.list_provides')
|
|
if not ret:
|
|
- cmd = ['rpm', '-qa', '--queryformat', '%{PROVIDES}_|-%{NAME}\n']
|
|
+ cmd = ['rpm']
|
|
+ if root:
|
|
+ cmd.extend(['--root', root])
|
|
+ cmd.extend(['-qa', '--queryformat', '%{PROVIDES}_|-%{NAME}\n'])
|
|
ret = dict()
|
|
for line in __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False).splitlines():
|
|
provide, realname = line.split('_|-')
|
|
@@ -2471,7 +2725,7 @@ def list_provides(**kwargs):
|
|
return ret
|
|
|
|
|
|
-def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
+def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2018.3.0
|
|
|
|
@@ -2485,6 +2739,9 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
resolve_capabilities
|
|
If this option is set to True the input will be checked if
|
|
a package with this name exists. If not, this function will
|
|
@@ -2500,7 +2757,7 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
salt '*' pkg.resolve_capabilities resolve_capabilities=True w3m_ssl
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
ret = list()
|
|
for pkg in pkgs:
|
|
@@ -2513,12 +2770,12 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
|
|
if kwargs.get('resolve_capabilities', False):
|
|
try:
|
|
- search(name, match='exact')
|
|
+ search(name, root=root, match='exact')
|
|
except CommandExecutionError:
|
|
# no package this such a name found
|
|
# search for a package which provides this name
|
|
try:
|
|
- result = search(name, provides=True, match='exact')
|
|
+ result = search(name, root=root, provides=True, match='exact')
|
|
if len(result) == 1:
|
|
name = next(iter(result.keys()))
|
|
elif len(result) > 1:
|
|
diff --git a/salt/states/blockdev.py b/salt/states/blockdev.py
|
|
index 38543ac8a0..2db5d805c3 100644
|
|
--- a/salt/states/blockdev.py
|
|
+++ b/salt/states/blockdev.py
|
|
@@ -193,5 +193,6 @@ def _checkblk(name):
|
|
Check if the blk exists and return its fstype if ok
|
|
'''
|
|
|
|
- blk = __salt__['cmd.run']('blkid -o value -s TYPE {0}'.format(name))
|
|
+ blk = __salt__['cmd.run']('blkid -o value -s TYPE {0}'.format(name),
|
|
+ ignore_retcode=True)
|
|
return '' if not blk else blk
|
|
diff --git a/salt/states/btrfs.py b/salt/states/btrfs.py
|
|
new file mode 100644
|
|
index 0000000000..af78c8ae00
|
|
--- /dev/null
|
|
+++ b/salt/states/btrfs.py
|
|
@@ -0,0 +1,385 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:maturity: new
|
|
+:depends: None
|
|
+:platform: Linux
|
|
+'''
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import functools
|
|
+import logging
|
|
+import os.path
|
|
+import tempfile
|
|
+import traceback
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+
|
|
+log = logging.getLogger(__name__)
|
|
+
|
|
+__virtualname__ = 'btrfs'
|
|
+
|
|
+
|
|
+def _mount(device, use_default):
|
|
+ '''
|
|
+ Mount the device in a temporary place.
|
|
+ '''
|
|
+ opts = 'subvol=/' if not use_default else 'defaults'
|
|
+ dest = tempfile.mkdtemp()
|
|
+ res = __states__['mount.mounted'](dest, device=device, fstype='btrfs',
|
|
+ opts=opts, persist=False)
|
|
+ if not res['result']:
|
|
+ log.error('Cannot mount device %s in %s', device, dest)
|
|
+ _umount(dest)
|
|
+ return None
|
|
+ return dest
|
|
+
|
|
+
|
|
+def _umount(path):
|
|
+ '''
|
|
+ Umount and clean the temporary place.
|
|
+ '''
|
|
+ __states__['mount.unmounted'](path)
|
|
+ __utils__['files.rm_rf'](path)
|
|
+
|
|
+
|
|
+def _is_default(path, dest, name):
|
|
+ '''
|
|
+ Check if the subvolume is the current default.
|
|
+ '''
|
|
+ subvol_id = __salt__['btrfs.subvolume_show'](path)[name]['subvolume id']
|
|
+ def_id = __salt__['btrfs.subvolume_get_default'](dest)['id']
|
|
+ return subvol_id == def_id
|
|
+
|
|
+
|
|
+def _set_default(path, dest, name):
|
|
+ '''
|
|
+ Set the subvolume as the current default.
|
|
+ '''
|
|
+ subvol_id = __salt__['btrfs.subvolume_show'](path)[name]['subvolume id']
|
|
+ return __salt__['btrfs.subvolume_set_default'](subvol_id, dest)
|
|
+
|
|
+
|
|
+def _is_cow(path):
|
|
+ '''
|
|
+ Check if the subvolume is copy on write
|
|
+ '''
|
|
+ dirname = os.path.dirname(path)
|
|
+ return 'C' not in __salt__['file.lsattr'](dirname)[path]
|
|
+
|
|
+
|
|
+def _unset_cow(path):
|
|
+ '''
|
|
+ Disable the copy on write in a subvolume
|
|
+ '''
|
|
+ return __salt__['file.chattr'](path, operator='add', attributes='C')
|
|
+
|
|
+
|
|
+def __mount_device(action):
|
|
+ '''
|
|
+ Small decorator to makes sure that the mount and umount happends in
|
|
+ a transactional way.
|
|
+ '''
|
|
+ @functools.wraps(action)
|
|
+ def wrapper(*args, **kwargs):
|
|
+ name = kwargs['name']
|
|
+ device = kwargs['device']
|
|
+ use_default = kwargs.get('use_default', False)
|
|
+
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Some error happends during the operation.'],
|
|
+ }
|
|
+ try:
|
|
+ if device:
|
|
+ dest = _mount(device, use_default)
|
|
+ if not dest:
|
|
+ msg = 'Device {} cannot be mounted'.format(device)
|
|
+ ret['comment'].append(msg)
|
|
+ kwargs['__dest'] = dest
|
|
+ ret = action(*args, **kwargs)
|
|
+ except Exception:
|
|
+ tb = six.text_type(traceback.format_exc())
|
|
+ log.exception('Exception captured in wrapper %s', tb)
|
|
+ ret['comment'].append(tb)
|
|
+ finally:
|
|
+ if device:
|
|
+ _umount(dest)
|
|
+ return ret
|
|
+ return wrapper
|
|
+
|
|
+
|
|
+@__mount_device
|
|
+def subvolume_created(name, device, qgroupids=None, set_default=False,
|
|
+ copy_on_write=True, force_set_default=True,
|
|
+ __dest=None):
|
|
+ '''
|
|
+ Makes sure that a btrfs subvolume is present.
|
|
+
|
|
+ name
|
|
+ Name of the subvolume to add
|
|
+
|
|
+ device
|
|
+ Device where to create the subvolume
|
|
+
|
|
+ qgroupids
|
|
+ Add the newly created subcolume to a qgroup. This parameter
|
|
+ is a list
|
|
+
|
|
+ set_default
|
|
+ If True, this new subvolume will be set as default when
|
|
+ mounted, unless subvol option in mount is used
|
|
+
|
|
+ copy_on_write
|
|
+ If false, set the subvolume with chattr +C
|
|
+
|
|
+ force_set_default
|
|
+ If false and the subvolume is already present, it will not
|
|
+ force it as default if ``set_default`` is True
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+ path = os.path.join(__dest, name)
|
|
+
|
|
+ exists = __salt__['btrfs.subvolume_exists'](path)
|
|
+ if exists:
|
|
+ ret['comment'].append('Subvolume {} already present'.format(name))
|
|
+
|
|
+ # Resolve first the test case. The check is not complete, but at
|
|
+ # least we will report if a subvolume needs to be created. Can
|
|
+ # happend that the subvolume is there, but we also need to set it
|
|
+ # as default, or persist in fstab.
|
|
+ if __opts__['test']:
|
|
+ ret['result'] = None
|
|
+ if not exists:
|
|
+ ret['comment'].append('Subvolume {} will be created'.format(name))
|
|
+ return ret
|
|
+
|
|
+ if not exists:
|
|
+ # Create the directories where the subvolume lives
|
|
+ _path = os.path.dirname(path)
|
|
+ res = __states__['file.directory'](_path, makedirs=True)
|
|
+ if not res['result']:
|
|
+ ret['comment'].append('Error creating {} directory'.format(_path))
|
|
+ return ret
|
|
+
|
|
+ try:
|
|
+ __salt__['btrfs.subvolume_create'](name, dest=__dest,
|
|
+ qgroupids=qgroupids)
|
|
+ except CommandExecutionError:
|
|
+ ret['comment'].append('Error creating subvolume {}'.format(name))
|
|
+ return ret
|
|
+
|
|
+ ret['changes'][name] = 'Created subvolume {}'.format(name)
|
|
+
|
|
+ # If the volume was already present, we can opt-out the check for
|
|
+ # default subvolume.
|
|
+ if (not exists or (exists and force_set_default)) and \
|
|
+ set_default and not _is_default(path, __dest, name):
|
|
+ ret['changes'][name + '_default'] = _set_default(path, __dest, name)
|
|
+
|
|
+ if not copy_on_write and _is_cow(path):
|
|
+ ret['changes'][name + '_no_cow'] = _unset_cow(path)
|
|
+
|
|
+ ret['result'] = True
|
|
+ return ret
|
|
+
|
|
+
|
|
+@__mount_device
|
|
+def subvolume_deleted(name, device, commit=False, __dest=None):
|
|
+ '''
|
|
+ Makes sure that a btrfs subvolume is removed.
|
|
+
|
|
+ name
|
|
+ Name of the subvolume to remove
|
|
+
|
|
+ device
|
|
+ Device where to remove the subvolume
|
|
+
|
|
+ commit
|
|
+ Wait until the transaction is over
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+
|
|
+ path = os.path.join(__dest, name)
|
|
+
|
|
+ exists = __salt__['btrfs.subvolume_exists'](path)
|
|
+ if not exists:
|
|
+ ret['comment'].append('Subvolume {} already missing'.format(name))
|
|
+
|
|
+ if __opts__['test']:
|
|
+ ret['result'] = None
|
|
+ if exists:
|
|
+ ret['comment'].append('Subvolume {} will be removed'.format(name))
|
|
+ return ret
|
|
+
|
|
+ # If commit is set, we wait until all is over
|
|
+ commit = 'after' if commit else None
|
|
+
|
|
+ if not exists:
|
|
+ try:
|
|
+ __salt__['btrfs.subvolume_delete'](path, commit=commit)
|
|
+ except CommandExecutionError:
|
|
+ ret['comment'].append('Error removing subvolume {}'.format(name))
|
|
+ return ret
|
|
+
|
|
+ ret['changes'][name] = 'Removed subvolume {}'.format(name)
|
|
+
|
|
+ ret['result'] = True
|
|
+ return ret
|
|
+
|
|
+
|
|
+def _diff_properties(expected, current):
|
|
+ '''Calculate the difference between the current and the expected
|
|
+ properties
|
|
+
|
|
+ * 'expected' is expressed in a dictionary like: {'property': value}
|
|
+
|
|
+ * 'current' contains the same format retuned by 'btrfs.properties'
|
|
+
|
|
+ If the property is not available, will throw an exception.
|
|
+
|
|
+ '''
|
|
+ difference = {}
|
|
+ for _property, value in expected.items():
|
|
+ current_value = current[_property]['value']
|
|
+ if value is False and current_value == 'N/A':
|
|
+ needs_update = False
|
|
+ elif value != current_value:
|
|
+ needs_update = True
|
|
+ else:
|
|
+ needs_update = False
|
|
+ if needs_update:
|
|
+ difference[_property] = value
|
|
+ return difference
|
|
+
|
|
+
|
|
+@__mount_device
|
|
+def properties(name, device, use_default=False, __dest=None, **properties):
|
|
+ '''
|
|
+ Makes sure that a list of properties are set in a subvolume, file
|
|
+ or device.
|
|
+
|
|
+ name
|
|
+ Name of the object to change
|
|
+
|
|
+ device
|
|
+ Device where the object lives, if None, the device will be in
|
|
+ name
|
|
+
|
|
+ use_default
|
|
+ If True, this subvolume will be resolved to the default
|
|
+ subvolume assigned during the create operation
|
|
+
|
|
+ properties
|
|
+ Dictionary of properties
|
|
+
|
|
+ Valid properties are 'ro', 'label' or 'compression'. Check the
|
|
+ documentation to see where those properties are valid for each
|
|
+ object.
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+
|
|
+ # 'name' will have always the name of the object that we want to
|
|
+ # change, but if the object is a device, we do not repeat it again
|
|
+ # in 'device'. This makes device sometimes optional.
|
|
+ if device:
|
|
+ if os.path.isabs(name):
|
|
+ path = os.path.join(__dest, os.path.relpath(name, os.path.sep))
|
|
+ else:
|
|
+ path = os.path.join(__dest, name)
|
|
+ else:
|
|
+ path = name
|
|
+
|
|
+ if not os.path.exists(path):
|
|
+ ret['comment'].append('Object {} not found'.format(name))
|
|
+ return ret
|
|
+
|
|
+ # Convert the booleans to lowercase
|
|
+ properties = {k: v if type(v) is not bool else str(v).lower()
|
|
+ for k, v in properties.items()}
|
|
+
|
|
+ current_properties = {}
|
|
+ try:
|
|
+ current_properties = __salt__['btrfs.properties'](path)
|
|
+ except CommandExecutionError as e:
|
|
+ ret['comment'].append('Error reading properties from {}'.format(name))
|
|
+ ret['comment'].append('Current error {}'.format(e))
|
|
+ return ret
|
|
+
|
|
+ try:
|
|
+ properties_to_set = _diff_properties(properties, current_properties)
|
|
+ except KeyError:
|
|
+ ret['comment'].append('Some property not found in {}'.format(name))
|
|
+ return ret
|
|
+
|
|
+ if __opts__['test']:
|
|
+ ret['result'] = None
|
|
+ if properties_to_set:
|
|
+ msg = 'Properties {} will be changed in {}'.format(
|
|
+ properties_to_set, name)
|
|
+ else:
|
|
+ msg = 'No properties will be changed in {}'.format(name)
|
|
+ ret['comment'].append(msg)
|
|
+ return ret
|
|
+
|
|
+ if properties_to_set:
|
|
+ _properties = ','.join(
|
|
+ '{}={}'.format(k, v) for k, v in properties_to_set.items())
|
|
+ __salt__['btrfs.properties'](path, set=_properties)
|
|
+
|
|
+ current_properties = __salt__['btrfs.properties'](path)
|
|
+ properties_failed = _diff_properties(properties, current_properties)
|
|
+ if properties_failed:
|
|
+ msg = 'Properties {} failed to be changed in {}'.format(
|
|
+ properties_failed, name)
|
|
+ ret['comment'].append(msg)
|
|
+ return ret
|
|
+
|
|
+ ret['comment'].append('Properties changed in {}'.format(name))
|
|
+ ret['changes'] = properties_to_set
|
|
+ else:
|
|
+ ret['comment'].append('Properties not changed in {}'.format(name))
|
|
+
|
|
+ ret['result'] = True
|
|
+ return ret
|
|
diff --git a/salt/states/cmd.py b/salt/states/cmd.py
|
|
index 4d20b51381..86934f9ffc 100644
|
|
--- a/salt/states/cmd.py
|
|
+++ b/salt/states/cmd.py
|
|
@@ -402,6 +402,7 @@ def wait(name,
|
|
unless=None,
|
|
creates=None,
|
|
cwd=None,
|
|
+ root=None,
|
|
runas=None,
|
|
shell=None,
|
|
env=(),
|
|
@@ -436,6 +437,10 @@ def wait(name,
|
|
The current working directory to execute the command in, defaults to
|
|
/root
|
|
|
|
+ root
|
|
+ Path to the root of the jail to use. If this parameter is set, the command
|
|
+ will run inside a chroot
|
|
+
|
|
runas
|
|
The user name to run the command as
|
|
|
|
@@ -674,6 +679,7 @@ def run(name,
|
|
unless=None,
|
|
creates=None,
|
|
cwd=None,
|
|
+ root=None,
|
|
runas=None,
|
|
shell=None,
|
|
env=None,
|
|
@@ -707,6 +713,10 @@ def run(name,
|
|
The current working directory to execute the command in, defaults to
|
|
/root
|
|
|
|
+ root
|
|
+ Path to the root of the jail to use. If this parameter is set, the command
|
|
+ will run inside a chroot
|
|
+
|
|
runas
|
|
The user name to run the command as
|
|
|
|
@@ -882,6 +892,7 @@ def run(name,
|
|
|
|
cmd_kwargs = copy.deepcopy(kwargs)
|
|
cmd_kwargs.update({'cwd': cwd,
|
|
+ 'root': root,
|
|
'runas': runas,
|
|
'use_vt': use_vt,
|
|
'shell': shell or __grains__['shell'],
|
|
@@ -912,10 +923,11 @@ def run(name,
|
|
|
|
# Wow, we passed the test, run this sucker!
|
|
try:
|
|
- cmd_all = __salt__['cmd.run_all'](
|
|
- name, timeout=timeout, python_shell=True, **cmd_kwargs
|
|
+ run_cmd = 'cmd.run_all' if not root else 'cmd.run_chroot'
|
|
+ cmd_all = __salt__[run_cmd](
|
|
+ cmd=name, timeout=timeout, python_shell=True, **cmd_kwargs
|
|
)
|
|
- except CommandExecutionError as err:
|
|
+ except Exception as err:
|
|
ret['comment'] = six.text_type(err)
|
|
return ret
|
|
|
|
diff --git a/salt/states/file.py b/salt/states/file.py
|
|
index 15bb93e5ec..8823a7c8cf 100644
|
|
--- a/salt/states/file.py
|
|
+++ b/salt/states/file.py
|
|
@@ -276,7 +276,11 @@ import shutil
|
|
import sys
|
|
import time
|
|
import traceback
|
|
-from collections import Iterable, Mapping, defaultdict
|
|
+try:
|
|
+ from collections.abc import Iterable, Mapping
|
|
+except ImportError:
|
|
+ from collections import Iterable, Mapping
|
|
+from collections import defaultdict
|
|
from datetime import datetime, date # python3 problem in the making?
|
|
|
|
# Import salt libs
|
|
diff --git a/salt/states/loop.py b/salt/states/loop.py
|
|
index edaf8c3063..db4971ceb0 100644
|
|
--- a/salt/states/loop.py
|
|
+++ b/salt/states/loop.py
|
|
@@ -94,6 +94,10 @@ def until(name,
|
|
ret['comment'] = 'The execution module {0} will be run'.format(name)
|
|
ret['result'] = None
|
|
return ret
|
|
+ if not m_args:
|
|
+ m_args = []
|
|
+ if not m_kwargs:
|
|
+ m_kwargs = {}
|
|
|
|
def timed_out():
|
|
if time.time() >= timeout:
|
|
diff --git a/salt/states/lvm.py b/salt/states/lvm.py
|
|
index bc937a33ab..5cb15d0ed6 100644
|
|
--- a/salt/states/lvm.py
|
|
+++ b/salt/states/lvm.py
|
|
@@ -56,7 +56,7 @@ def pv_present(name, **kwargs):
|
|
'name': name,
|
|
'result': True}
|
|
|
|
- if __salt__['lvm.pvdisplay'](name):
|
|
+ if __salt__['lvm.pvdisplay'](name, quiet=True):
|
|
ret['comment'] = 'Physical Volume {0} already present'.format(name)
|
|
elif __opts__['test']:
|
|
ret['comment'] = 'Physical Volume {0} is set to be created'.format(name)
|
|
@@ -86,7 +86,7 @@ def pv_absent(name):
|
|
'name': name,
|
|
'result': True}
|
|
|
|
- if not __salt__['lvm.pvdisplay'](name):
|
|
+ if not __salt__['lvm.pvdisplay'](name, quiet=True):
|
|
ret['comment'] = 'Physical Volume {0} does not exist'.format(name)
|
|
elif __opts__['test']:
|
|
ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name)
|
|
@@ -95,7 +95,7 @@ def pv_absent(name):
|
|
else:
|
|
changes = __salt__['lvm.pvremove'](name)
|
|
|
|
- if __salt__['lvm.pvdisplay'](name):
|
|
+ if __salt__['lvm.pvdisplay'](name, quiet=True):
|
|
ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name)
|
|
ret['result'] = False
|
|
else:
|
|
@@ -125,7 +125,7 @@ def vg_present(name, devices=None, **kwargs):
|
|
if isinstance(devices, six.string_types):
|
|
devices = devices.split(',')
|
|
|
|
- if __salt__['lvm.vgdisplay'](name):
|
|
+ if __salt__['lvm.vgdisplay'](name, quiet=True):
|
|
ret['comment'] = 'Volume Group {0} already present'.format(name)
|
|
for device in devices:
|
|
realdev = os.path.realpath(device)
|
|
@@ -185,7 +185,7 @@ def vg_absent(name):
|
|
'name': name,
|
|
'result': True}
|
|
|
|
- if not __salt__['lvm.vgdisplay'](name):
|
|
+ if not __salt__['lvm.vgdisplay'](name, quiet=True):
|
|
ret['comment'] = 'Volume Group {0} already absent'.format(name)
|
|
elif __opts__['test']:
|
|
ret['comment'] = 'Volume Group {0} is set to be removed'.format(name)
|
|
@@ -194,7 +194,7 @@ def vg_absent(name):
|
|
else:
|
|
changes = __salt__['lvm.vgremove'](name)
|
|
|
|
- if not __salt__['lvm.vgdisplay'](name):
|
|
+ if not __salt__['lvm.vgdisplay'](name, quiet=True):
|
|
ret['comment'] = 'Removed Volume Group {0}'.format(name)
|
|
ret['changes']['removed'] = changes
|
|
else:
|
|
@@ -311,7 +311,7 @@ def lv_absent(name, vgname=None):
|
|
'result': True}
|
|
|
|
lvpath = '/dev/{0}/{1}'.format(vgname, name)
|
|
- if not __salt__['lvm.lvdisplay'](lvpath):
|
|
+ if not __salt__['lvm.lvdisplay'](lvpath, quiet=True):
|
|
ret['comment'] = 'Logical Volume {0} already absent'.format(name)
|
|
elif __opts__['test']:
|
|
ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name)
|
|
@@ -320,7 +320,7 @@ def lv_absent(name, vgname=None):
|
|
else:
|
|
changes = __salt__['lvm.lvremove'](name, vgname)
|
|
|
|
- if not __salt__['lvm.lvdisplay'](lvpath):
|
|
+ if not __salt__['lvm.lvdisplay'](lvpath, quiet=True):
|
|
ret['comment'] = 'Removed Logical Volume {0}'.format(name)
|
|
ret['changes']['removed'] = changes
|
|
else:
|
|
diff --git a/salt/states/mdadm_raid.py b/salt/states/mdadm_raid.py
|
|
index fd285b6ace..d634522c33 100644
|
|
--- a/salt/states/mdadm_raid.py
|
|
+++ b/salt/states/mdadm_raid.py
|
|
@@ -98,7 +98,7 @@ def present(name,
|
|
if dev == 'missing' or not __salt__['file.access'](dev, 'f'):
|
|
missing.append(dev)
|
|
continue
|
|
- superblock = __salt__['raid.examine'](dev)
|
|
+ superblock = __salt__['raid.examine'](dev, quiet=True)
|
|
|
|
if 'MD_UUID' in superblock:
|
|
uuid = superblock['MD_UUID']
|
|
diff --git a/salt/states/mount.py b/salt/states/mount.py
|
|
index 162da1ca62..0802bf4388 100644
|
|
--- a/salt/states/mount.py
|
|
+++ b/salt/states/mount.py
|
|
@@ -956,3 +956,308 @@ def mod_watch(name, user=None, **kwargs):
|
|
else:
|
|
ret['comment'] = 'Watch not supported in {0} at this time'.format(kwargs['sfun'])
|
|
return ret
|
|
+
|
|
+
|
|
+def _convert_to(maybe_device, convert_to):
|
|
+ '''
|
|
+ Convert a device name, UUID or LABEL to a device name, UUID or
|
|
+ LABEL.
|
|
+
|
|
+ Return the fs_spec required for fstab.
|
|
+
|
|
+ '''
|
|
+
|
|
+ # Fast path. If we already have the information required, we can
|
|
+ # save one blkid call
|
|
+ if not convert_to or \
|
|
+ (convert_to == 'device' and maybe_device.startswith('/')) or \
|
|
+ maybe_device.startswith('{}='.format(convert_to.upper())):
|
|
+ return maybe_device
|
|
+
|
|
+ # Get the device information
|
|
+ if maybe_device.startswith('/'):
|
|
+ blkid = __salt__['disk.blkid'](maybe_device)
|
|
+ else:
|
|
+ blkid = __salt__['disk.blkid'](token=maybe_device)
|
|
+
|
|
+ result = None
|
|
+ if len(blkid) == 1:
|
|
+ if convert_to == 'device':
|
|
+ result = list(blkid.keys())[0]
|
|
+ else:
|
|
+ key = convert_to.upper()
|
|
+ result = '{}={}'.format(key, list(blkid.values())[0][key])
|
|
+
|
|
+ return result
|
|
+
|
|
+
|
|
+def fstab_present(name, fs_file, fs_vfstype, fs_mntops='defaults',
|
|
+ fs_freq=0, fs_passno=0, mount_by=None,
|
|
+ config='/etc/fstab', mount=True, match_on='auto',
|
|
+ not_change=False):
|
|
+ '''Makes sure that a fstab mount point is pressent.
|
|
+
|
|
+ name
|
|
+ The name of block device. Can be any valid fs_spec value.
|
|
+
|
|
+ fs_file
|
|
+ Mount point (target) for the filesystem.
|
|
+
|
|
+ fs_vfstype
|
|
+ The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
|
|
+
|
|
+ fs_mntops
|
|
+ The mount options associated with the filesystem. Default is
|
|
+ ``defaults``.
|
|
+
|
|
+ fs_freq
|
|
+ Field is used by dump to determine which fs need to be
|
|
+ dumped. Default is ``0``
|
|
+
|
|
+ fs_passno
|
|
+ Field is used by fsck to determine the order in which
|
|
+ filesystem checks are done at boot time. Default is ``0``
|
|
+
|
|
+ mount_by
|
|
+ Select the final value for fs_spec. Can be [``None``,
|
|
+ ``device``, ``label``, ``uuid``, ``partlabel``,
|
|
+ ``partuuid``]. If ``None``, the value for fs_spect will be the
|
|
+ parameter ``name``, in other case will search the correct
|
|
+ value based on the device name. For example, for ``uuid``, the
|
|
+ value for fs_spec will be of type 'UUID=xxx' instead of the
|
|
+ device name set in ``name``.
|
|
+
|
|
+ config
|
|
+ Place where the fstab file lives. Default is ``/etc/fstab``
|
|
+
|
|
+ mount
|
|
+ Set if the mount should be mounted immediately. Default is
|
|
+ ``True``
|
|
+
|
|
+ match_on
|
|
+ A name or list of fstab properties on which this state should
|
|
+ be applied. Default is ``auto``, a special value indicating
|
|
+ to guess based on fstype. In general, ``auto`` matches on
|
|
+ name for recognized special devices and device otherwise.
|
|
+
|
|
+ not_change
|
|
+ By default, if the entry is found in the fstab file but is
|
|
+ different from the expected content (like different options),
|
|
+ the entry will be replaced with the correct content. If this
|
|
+ parameter is set to ``True`` and the line is found, the
|
|
+ original content will be preserved.
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+
|
|
+ # Adjust fs_mntops based on the OS
|
|
+ if fs_mntops == 'defaults':
|
|
+ if __grains__['os'] in ['MacOS', 'Darwin']:
|
|
+ fs_mntops = 'noowners'
|
|
+ elif __grains__['os'] == 'AIX':
|
|
+ fs_mntops = ''
|
|
+
|
|
+ # Adjust the config file based on the OS
|
|
+ if config == '/etc/fstab':
|
|
+ if __grains__['os'] in ['MacOS', 'Darwin']:
|
|
+ config = '/etc/auto_salt'
|
|
+ elif __grains__['os'] == 'AIX':
|
|
+ config = '/etc/filesystems'
|
|
+
|
|
+ if not fs_file == '/':
|
|
+ fs_file = fs_file.rstrip('/')
|
|
+
|
|
+ fs_spec = _convert_to(name, mount_by)
|
|
+
|
|
+ # Validate that the device is valid after the conversion
|
|
+ if not fs_spec:
|
|
+ msg = 'Device {} cannot be converted to {}'
|
|
+ ret['comment'].append(msg.format(name, mount_by))
|
|
+ return ret
|
|
+
|
|
+ if __opts__['test']:
|
|
+ if __grains__['os'] in ['MacOS', 'Darwin']:
|
|
+ out = __salt__['mount.set_automaster'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ fstype=fs_vfstype,
|
|
+ opts=fs_mntops,
|
|
+ config=config,
|
|
+ test=True,
|
|
+ not_change=not_change)
|
|
+ elif __grains__['os'] == 'AIX':
|
|
+ out = __salt__['mount.set_filesystems'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ fstype=fs_vfstype,
|
|
+ opts=fs_mntops,
|
|
+ mount=mount,
|
|
+ config=config,
|
|
+ test=True,
|
|
+ match_on=match_on,
|
|
+ not_change=not_change)
|
|
+ else:
|
|
+ out = __salt__['mount.set_fstab'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ fstype=fs_vfstype,
|
|
+ opts=fs_mntops,
|
|
+ dump=fs_freq,
|
|
+ pass_num=fs_passno,
|
|
+ config=config,
|
|
+ test=True,
|
|
+ match_on=match_on,
|
|
+ not_change=not_change)
|
|
+ ret['result'] = None
|
|
+ if out == 'present':
|
|
+ msg = '{} entry is already in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ elif out == 'new':
|
|
+ msg = '{} entry will be written in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ elif out == 'change':
|
|
+ msg = '{} entry will be updated in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ else:
|
|
+ ret['result'] = False
|
|
+ msg = '{} entry cannot be created in {}: {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config, out))
|
|
+ return ret
|
|
+
|
|
+ if __grains__['os'] in ['MacOS', 'Darwin']:
|
|
+ out = __salt__['mount.set_automaster'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ fstype=fs_vfstype,
|
|
+ opts=fs_mntops,
|
|
+ config=config,
|
|
+ not_change=not_change)
|
|
+ elif __grains__['os'] == 'AIX':
|
|
+ out = __salt__['mount.set_filesystems'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ fstype=fs_vfstype,
|
|
+ opts=fs_mntops,
|
|
+ mount=mount,
|
|
+ config=config,
|
|
+ match_on=match_on,
|
|
+ not_change=not_change)
|
|
+ else:
|
|
+ out = __salt__['mount.set_fstab'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ fstype=fs_vfstype,
|
|
+ opts=fs_mntops,
|
|
+ dump=fs_freq,
|
|
+ pass_num=fs_passno,
|
|
+ config=config,
|
|
+ match_on=match_on,
|
|
+ not_change=not_change)
|
|
+
|
|
+ ret['result'] = True
|
|
+ if out == 'present':
|
|
+ msg = '{} entry was already in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ elif out == 'new':
|
|
+ ret['changes']['persist'] = out
|
|
+ msg = '{} entry added in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ elif out == 'change':
|
|
+ ret['changes']['persist'] = out
|
|
+ msg = '{} entry updated in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ else:
|
|
+ ret['result'] = False
|
|
+ msg = '{} entry cannot be changed in {}: {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config, out))
|
|
+
|
|
+ return ret
|
|
+
|
|
+
|
|
+def fstab_absent(name, fs_file, mount_by=None, config='/etc/fstab'):
|
|
+ '''
|
|
+ Makes sure that a fstab mount point is absent.
|
|
+
|
|
+ name
|
|
+ The name of block device. Can be any valid fs_spec value.
|
|
+
|
|
+ fs_file
|
|
+ Mount point (target) for the filesystem.
|
|
+
|
|
+ mount_by
|
|
+ Select the final value for fs_spec. Can be [``None``,
|
|
+ ``device``, ``label``, ``uuid``, ``partlabel``,
|
|
+ ``partuuid``]. If ``None``, the value for fs_spect will be the
|
|
+ parameter ``name``, in other case will search the correct
|
|
+ value based on the device name. For example, for ``uuid``, the
|
|
+ value for fs_spec will be of type 'UUID=xxx' instead of the
|
|
+ device name set in ``name``.
|
|
+
|
|
+ config
|
|
+ Place where the fstab file lives
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+
|
|
+ # Adjust the config file based on the OS
|
|
+ if config == '/etc/fstab':
|
|
+ if __grains__['os'] in ['MacOS', 'Darwin']:
|
|
+ config = '/etc/auto_salt'
|
|
+ elif __grains__['os'] == 'AIX':
|
|
+ config = '/etc/filesystems'
|
|
+
|
|
+ if not fs_file == '/':
|
|
+ fs_file = fs_file.rstrip('/')
|
|
+
|
|
+ fs_spec = _convert_to(name, mount_by)
|
|
+
|
|
+ if __grains__['os'] in ['MacOS', 'Darwin']:
|
|
+ fstab_data = __salt__['mount.automaster'](config)
|
|
+ elif __grains__['os'] == 'AIX':
|
|
+ fstab_data = __salt__['mount.filesystems'](config)
|
|
+ else:
|
|
+ fstab_data = __salt__['mount.fstab'](config)
|
|
+
|
|
+ if __opts__['test']:
|
|
+ ret['result'] = None
|
|
+ if fs_file not in fstab_data:
|
|
+ msg = '{} entry is already missing in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ else:
|
|
+ msg = '{} entry will be removed from {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ return ret
|
|
+
|
|
+ if fs_file in fstab_data:
|
|
+ if __grains__['os'] in ['MacOS', 'Darwin']:
|
|
+ out = __salt__['mount.rm_automaster'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ config=config)
|
|
+ elif __grains__['os'] == 'AIX':
|
|
+ out = __salt__['mount.rm_filesystems'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ config=config)
|
|
+ else:
|
|
+ out = __salt__['mount.rm_fstab'](name=fs_file,
|
|
+ device=fs_spec,
|
|
+ config=config)
|
|
+
|
|
+ if out is not True:
|
|
+ ret['result'] = False
|
|
+ msg = '{} entry failed when removing from {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ else:
|
|
+ ret['result'] = True
|
|
+ ret['changes']['persist'] = 'removed'
|
|
+ msg = '{} entry removed from {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+ else:
|
|
+ ret['result'] = True
|
|
+ msg = '{} entry is already missing in {}.'
|
|
+ ret['comment'].append(msg.format(fs_file, config))
|
|
+
|
|
+ return ret
|
|
diff --git a/salt/states/pkg.py b/salt/states/pkg.py
|
|
index 0aca1e0af8..22a97fe98c 100644
|
|
--- a/salt/states/pkg.py
|
|
+++ b/salt/states/pkg.py
|
|
@@ -241,7 +241,7 @@ def _fulfills_version_spec(versions, oper, desired_version,
|
|
return False
|
|
|
|
|
|
-def _find_unpurge_targets(desired):
|
|
+def _find_unpurge_targets(desired, **kwargs):
|
|
'''
|
|
Find packages which are marked to be purged but can't yet be removed
|
|
because they are dependencies for other installed packages. These are the
|
|
@@ -250,7 +250,7 @@ def _find_unpurge_targets(desired):
|
|
'''
|
|
return [
|
|
x for x in desired
|
|
- if x in __salt__['pkg.list_pkgs'](purge_desired=True)
|
|
+ if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs)
|
|
]
|
|
|
|
|
|
@@ -265,7 +265,7 @@ def _find_download_targets(name=None,
|
|
Inspect the arguments to pkg.downloaded and discover what packages need to
|
|
be downloaded. Return a dict of packages to download.
|
|
'''
|
|
- cur_pkgs = __salt__['pkg.list_downloaded']()
|
|
+ cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs)
|
|
if pkgs:
|
|
to_download = _repack_pkgs(pkgs, normalize=normalize)
|
|
|
|
@@ -383,7 +383,7 @@ def _find_advisory_targets(name=None,
|
|
Inspect the arguments to pkg.patch_installed and discover what advisory
|
|
patches need to be installed. Return a dict of advisory patches to install.
|
|
'''
|
|
- cur_patches = __salt__['pkg.list_installed_patches']()
|
|
+ cur_patches = __salt__['pkg.list_installed_patches'](**kwargs)
|
|
if advisory_ids:
|
|
to_download = advisory_ids
|
|
else:
|
|
@@ -587,7 +587,7 @@ def _find_install_targets(name=None,
|
|
'minion log.'.format('pkgs' if pkgs
|
|
else 'sources')}
|
|
|
|
- to_unpurge = _find_unpurge_targets(desired)
|
|
+ to_unpurge = _find_unpurge_targets(desired, **kwargs)
|
|
else:
|
|
if salt.utils.platform.is_windows():
|
|
pkginfo = _get_package_info(name, saltenv=kwargs['saltenv'])
|
|
@@ -607,7 +607,7 @@ def _find_install_targets(name=None,
|
|
else:
|
|
desired = {name: version}
|
|
|
|
- to_unpurge = _find_unpurge_targets(desired)
|
|
+ to_unpurge = _find_unpurge_targets(desired, **kwargs)
|
|
|
|
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
|
|
origin = bool(re.search('/', name))
|
|
@@ -766,7 +766,8 @@ def _find_install_targets(name=None,
|
|
verify_result = __salt__['pkg.verify'](
|
|
package_name,
|
|
ignore_types=ignore_types,
|
|
- verify_options=verify_options
|
|
+ verify_options=verify_options,
|
|
+ **kwargs
|
|
)
|
|
except (CommandExecutionError, SaltInvocationError) as exc:
|
|
failed_verify = exc.strerror
|
|
@@ -795,7 +796,9 @@ def _find_install_targets(name=None,
|
|
verify_result = __salt__['pkg.verify'](
|
|
package_name,
|
|
ignore_types=ignore_types,
|
|
- verify_options=verify_options)
|
|
+ verify_options=verify_options,
|
|
+ **kwargs
|
|
+ )
|
|
except (CommandExecutionError, SaltInvocationError) as exc:
|
|
failed_verify = exc.strerror
|
|
continue
|
|
@@ -1910,7 +1913,8 @@ def installed(
|
|
# have caught invalid arguments earlier.
|
|
verify_result = __salt__['pkg.verify'](reinstall_pkg,
|
|
ignore_types=ignore_types,
|
|
- verify_options=verify_options)
|
|
+ verify_options=verify_options,
|
|
+ **kwargs)
|
|
if verify_result:
|
|
failed.append(reinstall_pkg)
|
|
altered_files[reinstall_pkg] = verify_result
|
|
@@ -2098,7 +2102,7 @@ def downloaded(name,
|
|
'package(s): {0}'.format(exc)
|
|
return ret
|
|
|
|
- new_pkgs = __salt__['pkg.list_downloaded']()
|
|
+ new_pkgs = __salt__['pkg.list_downloaded'](**kwargs)
|
|
ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch)
|
|
|
|
if failed:
|
|
@@ -2974,7 +2978,7 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs):
|
|
pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs)
|
|
try:
|
|
packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs)
|
|
- expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname)}
|
|
+ expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)}
|
|
for pkgname, pkgver in six.iteritems(packages)}
|
|
if isinstance(pkgs, list):
|
|
packages = [pkg for pkg in packages if pkg in pkgs]
|
|
@@ -3156,7 +3160,7 @@ def group_installed(name, skip=None, include=None, **kwargs):
|
|
.format(name, exc))
|
|
return ret
|
|
|
|
- failed = [x for x in targets if x not in __salt__['pkg.list_pkgs']()]
|
|
+ failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)]
|
|
if failed:
|
|
ret['comment'] = (
|
|
'Failed to install the following packages: {0}'
|
|
diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
|
|
index 4d5e9eea92..6d8e94aa18 100644
|
|
--- a/salt/states/pkgrepo.py
|
|
+++ b/salt/states/pkgrepo.py
|
|
@@ -393,10 +393,7 @@ def managed(name, ppa=None, **kwargs):
|
|
kwargs.pop(kwarg, None)
|
|
|
|
try:
|
|
- pre = __salt__['pkg.get_repo'](
|
|
- repo,
|
|
- ppa_auth=kwargs.get('ppa_auth', None)
|
|
- )
|
|
+ pre = __salt__['pkg.get_repo'](repo=repo, **kwargs)
|
|
except CommandExecutionError as exc:
|
|
ret['result'] = False
|
|
ret['comment'] = \
|
|
@@ -512,10 +509,7 @@ def managed(name, ppa=None, **kwargs):
|
|
return ret
|
|
|
|
try:
|
|
- post = __salt__['pkg.get_repo'](
|
|
- repo,
|
|
- ppa_auth=kwargs.get('ppa_auth', None)
|
|
- )
|
|
+ post = __salt__['pkg.get_repo'](repo=repo, **kwargs)
|
|
if pre:
|
|
for kwarg in sanitizedkwargs:
|
|
if post.get(kwarg) != pre.get(kwarg):
|
|
@@ -608,9 +602,7 @@ def absent(name, **kwargs):
|
|
return ret
|
|
|
|
try:
|
|
- repo = __salt__['pkg.get_repo'](
|
|
- name, ppa_auth=kwargs.get('ppa_auth', None)
|
|
- )
|
|
+ repo = __salt__['pkg.get_repo'](name, **kwargs)
|
|
except CommandExecutionError as exc:
|
|
ret['result'] = False
|
|
ret['comment'] = \
|
|
diff --git a/salt/utils/oset.py b/salt/utils/oset.py
|
|
index f5ab3c1e94..aae5965b86 100644
|
|
--- a/salt/utils/oset.py
|
|
+++ b/salt/utils/oset.py
|
|
@@ -22,7 +22,10 @@ Rob Speer's changes are as follows:
|
|
- added __getitem__
|
|
'''
|
|
from __future__ import absolute_import, unicode_literals, print_function
|
|
-import collections
|
|
+try:
|
|
+ from collections.abc import MutableSet
|
|
+except ImportError:
|
|
+ from collections import MutableSet
|
|
|
|
SLICE_ALL = slice(None)
|
|
__version__ = '2.0.1'
|
|
@@ -44,7 +47,7 @@ def is_iterable(obj):
|
|
return hasattr(obj, '__iter__') and not isinstance(obj, str) and not isinstance(obj, tuple)
|
|
|
|
|
|
-class OrderedSet(collections.MutableSet):
|
|
+class OrderedSet(MutableSet):
|
|
"""
|
|
An OrderedSet is a custom MutableSet that remembers its order, so that
|
|
every entry has an index that can be looked up.
|
|
diff --git a/salt/utils/path.py b/salt/utils/path.py
|
|
index b1d601e464..132190b271 100644
|
|
--- a/salt/utils/path.py
|
|
+++ b/salt/utils/path.py
|
|
@@ -6,7 +6,10 @@ lack of support for reading NTFS links.
|
|
|
|
# Import python libs
|
|
from __future__ import absolute_import, print_function, unicode_literals
|
|
-import collections
|
|
+try:
|
|
+ from collections.abc import Iterable
|
|
+except ImportError:
|
|
+ from collections import Iterable
|
|
import errno
|
|
import logging
|
|
import os
|
|
@@ -262,7 +265,7 @@ def which_bin(exes):
|
|
'''
|
|
Scan over some possible executables and return the first one that is found
|
|
'''
|
|
- if not isinstance(exes, collections.Iterable):
|
|
+ if not isinstance(exes, Iterable):
|
|
return None
|
|
for exe in exes:
|
|
path = which(exe)
|
|
diff --git a/salt/utils/systemd.py b/salt/utils/systemd.py
|
|
index 7790b3567d..060bc1e3fb 100644
|
|
--- a/salt/utils/systemd.py
|
|
+++ b/salt/utils/systemd.py
|
|
@@ -6,6 +6,7 @@ Contains systemd related help files
|
|
from __future__ import absolute_import, print_function, unicode_literals
|
|
import logging
|
|
import os
|
|
+import re
|
|
import subprocess
|
|
|
|
# Import Salt libs
|
|
@@ -65,8 +66,8 @@ def version(context=None):
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
|
|
outstr = salt.utils.stringutils.to_str(stdout)
|
|
try:
|
|
- ret = int(outstr.splitlines()[0].split()[-1])
|
|
- except (IndexError, ValueError):
|
|
+ ret = int(re.search(r'\w+ ([0-9]+)', outstr.splitlines()[0]).group(1))
|
|
+ except (AttributeError, IndexError, ValueError):
|
|
log.error(
|
|
'Unable to determine systemd version from systemctl '
|
|
'--version, output follows:\n%s', outstr
|
|
diff --git a/tests/conftest.py b/tests/conftest.py
|
|
index 906cca6dd5..1dbe2176b6 100644
|
|
--- a/tests/conftest.py
|
|
+++ b/tests/conftest.py
|
|
@@ -244,24 +244,24 @@ def pytest_runtest_setup(item):
|
|
'''
|
|
Fixtures injection based on markers or test skips based on CLI arguments
|
|
'''
|
|
- destructive_tests_marker = item.get_marker('destructive_test')
|
|
+ destructive_tests_marker = item.get_closest_marker('destructive_test')
|
|
if destructive_tests_marker is not None:
|
|
if item.config.getoption('--run-destructive') is False:
|
|
pytest.skip('Destructive tests are disabled')
|
|
os.environ['DESTRUCTIVE_TESTS'] = six.text_type(item.config.getoption('--run-destructive'))
|
|
|
|
- expensive_tests_marker = item.get_marker('expensive_test')
|
|
+ expensive_tests_marker = item.get_closest_marker('expensive_test')
|
|
if expensive_tests_marker is not None:
|
|
if item.config.getoption('--run-expensive') is False:
|
|
pytest.skip('Expensive tests are disabled')
|
|
os.environ['EXPENSIVE_TESTS'] = six.text_type(item.config.getoption('--run-expensive'))
|
|
|
|
- skip_if_not_root_marker = item.get_marker('skip_if_not_root')
|
|
+ skip_if_not_root_marker = item.get_closest_marker('skip_if_not_root')
|
|
if skip_if_not_root_marker is not None:
|
|
if os.getuid() != 0:
|
|
pytest.skip('You must be logged in as root to run this test')
|
|
|
|
- skip_if_binaries_missing_marker = item.get_marker('skip_if_binaries_missing')
|
|
+ skip_if_binaries_missing_marker = item.get_closest_marker('skip_if_binaries_missing')
|
|
if skip_if_binaries_missing_marker is not None:
|
|
binaries = skip_if_binaries_missing_marker.args
|
|
if len(binaries) == 1:
|
|
@@ -286,7 +286,7 @@ def pytest_runtest_setup(item):
|
|
)
|
|
)
|
|
|
|
- requires_network_marker = item.get_marker('requires_network')
|
|
+ requires_network_marker = item.get_closest_marker('requires_network')
|
|
if requires_network_marker is not None:
|
|
only_local_network = requires_network_marker.kwargs.get('only_local_network', False)
|
|
has_local_network = False
|
|
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
|
|
index 117e02c39f..5fa0ea06f1 100644
|
|
--- a/tests/unit/grains/test_core.py
|
|
+++ b/tests/unit/grains/test_core.py
|
|
@@ -1062,3 +1062,42 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
|
|
ret = core._osx_memdata()
|
|
assert ret['swap_total'] == 0
|
|
assert ret['mem_total'] == 4096
|
|
+
|
|
+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
|
|
+ @patch('os.path.exists')
|
|
+ @patch('salt.utils.platform.is_proxy')
|
|
+ def test__hw_data_linux_empty(self, is_proxy, exists):
|
|
+ is_proxy.return_value = False
|
|
+ exists.return_value = True
|
|
+ with patch('salt.utils.files.fopen', mock_open(read_data='')):
|
|
+ self.assertEqual(core._hw_data({'kernel': 'Linux'}), {
|
|
+ 'biosreleasedate': '',
|
|
+ 'biosversion': '',
|
|
+ 'manufacturer': '',
|
|
+ 'productname': '',
|
|
+ 'serialnumber': '',
|
|
+ 'uuid': ''
|
|
+ })
|
|
+
|
|
+ @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
|
|
+ @skipIf(six.PY2, 'UnicodeDecodeError is throw in Python 3')
|
|
+ @patch('os.path.exists')
|
|
+ @patch('salt.utils.platform.is_proxy')
|
|
+ def test__hw_data_linux_unicode_error(self, is_proxy, exists):
|
|
+ def _fopen(*args):
|
|
+ class _File(object):
|
|
+ def __enter__(self):
|
|
+ return self
|
|
+
|
|
+ def __exit__(self, *args):
|
|
+ pass
|
|
+
|
|
+ def read(self):
|
|
+ raise UnicodeDecodeError('enconding', b'', 1, 2, 'reason')
|
|
+
|
|
+ return _File()
|
|
+
|
|
+ is_proxy.return_value = False
|
|
+ exists.return_value = True
|
|
+ with patch('salt.utils.files.fopen', _fopen):
|
|
+ self.assertEqual(core._hw_data({'kernel': 'Linux'}), {})
|
|
diff --git a/tests/unit/modules/test_btrfs.py b/tests/unit/modules/test_btrfs.py
|
|
index ebd28a6451..b5f934034d 100644
|
|
--- a/tests/unit/modules/test_btrfs.py
|
|
+++ b/tests/unit/modules/test_btrfs.py
|
|
@@ -5,6 +5,8 @@
|
|
# Import python libs
|
|
from __future__ import absolute_import, print_function, unicode_literals
|
|
|
|
+import pytest
|
|
+
|
|
# Import Salt Testing Libs
|
|
from tests.support.mixins import LoaderModuleMockMixin
|
|
from tests.support.unit import TestCase, skipIf
|
|
@@ -29,7 +31,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
|
|
Test cases for salt.modules.btrfs
|
|
'''
|
|
def setup_loader_modules(self):
|
|
- return {btrfs: {}}
|
|
+ return {btrfs: {'__salt__': {}}}
|
|
|
|
# 'version' function tests: 1
|
|
def test_version(self):
|
|
@@ -362,3 +364,369 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
|
|
'''
|
|
self.assertRaises(CommandExecutionError, btrfs.properties,
|
|
'/dev/sda1', 'subvol', True)
|
|
+
|
|
+ def test_subvolume_exists(self):
|
|
+ '''
|
|
+ Test subvolume_exists
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.retcode': MagicMock(return_value=0),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_exists('/mnt/one')
|
|
+
|
|
+ def test_subvolume_not_exists(self):
|
|
+ '''
|
|
+ Test subvolume_exists
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.retcode': MagicMock(return_value=1),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert not btrfs.subvolume_exists('/mnt/nowhere')
|
|
+
|
|
+ def test_subvolume_create_fails_parameters(self):
|
|
+ '''
|
|
+ Test btrfs subvolume create
|
|
+ '''
|
|
+ # Fails when qgroupids is not a list
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ btrfs.subvolume_create('var', qgroupids='1')
|
|
+
|
|
+ @patch('salt.modules.btrfs.subvolume_exists')
|
|
+ def test_subvolume_create_already_exists(self, subvolume_exists):
|
|
+ '''
|
|
+ Test btrfs subvolume create
|
|
+ '''
|
|
+ subvolume_exists.return_value = True
|
|
+ assert not btrfs.subvolume_create('var', dest='/mnt')
|
|
+
|
|
+ @patch('salt.modules.btrfs.subvolume_exists')
|
|
+ def test_subvolume_create(self, subvolume_exists):
|
|
+ '''
|
|
+ Test btrfs subvolume create
|
|
+ '''
|
|
+ subvolume_exists.return_value = False
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={'recode': 0}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_create('var', dest='/mnt')
|
|
+ subvolume_exists.assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'create', '/mnt/var'])
|
|
+
|
|
+ def test_subvolume_delete_fails_parameters(self):
|
|
+ '''
|
|
+ Test btrfs subvolume delete
|
|
+ '''
|
|
+ # We need to provide name or names
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ btrfs.subvolume_delete()
|
|
+
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ btrfs.subvolume_delete(names='var')
|
|
+
|
|
+ def test_subvolume_delete_fails_parameter_commit(self):
|
|
+ '''
|
|
+ Test btrfs subvolume delete
|
|
+ '''
|
|
+ # Parameter commit can be 'after' or 'each'
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ btrfs.subvolume_delete(name='var', commit='maybe')
|
|
+
|
|
+ @patch('salt.modules.btrfs.subvolume_exists')
|
|
+ def test_subvolume_delete_already_missing(self, subvolume_exists):
|
|
+ '''
|
|
+ Test btrfs subvolume delete
|
|
+ '''
|
|
+ subvolume_exists.return_value = False
|
|
+ assert not btrfs.subvolume_delete(name='var', names=['tmp'])
|
|
+
|
|
+ @patch('salt.modules.btrfs.subvolume_exists')
|
|
+ def test_subvolume_delete_already_missing_name(self, subvolume_exists):
|
|
+ '''
|
|
+ Test btrfs subvolume delete
|
|
+ '''
|
|
+ subvolume_exists.return_value = False
|
|
+ assert not btrfs.subvolume_delete(name='var')
|
|
+
|
|
+ @patch('salt.modules.btrfs.subvolume_exists')
|
|
+ def test_subvolume_delete_already_missing_names(self, subvolume_exists):
|
|
+ '''
|
|
+ Test btrfs subvolume delete
|
|
+ '''
|
|
+ subvolume_exists.return_value = False
|
|
+ assert not btrfs.subvolume_delete(names=['tmp'])
|
|
+
|
|
+ @patch('salt.modules.btrfs.subvolume_exists')
|
|
+ def test_subvolume_delete(self, subvolume_exists):
|
|
+ '''
|
|
+ Test btrfs subvolume delete
|
|
+ '''
|
|
+ subvolume_exists.return_value = True
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={'recode': 0}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_delete('var', names=['tmp'])
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'delete', 'var', 'tmp'])
|
|
+
|
|
+ def test_subvolume_find_new_empty(self):
|
|
+ '''
|
|
+ Test btrfs subvolume find-new
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={
|
|
+ 'recode': 0,
|
|
+ 'stdout': 'transid marker was 1024'
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_find_new('var', '2000') == {
|
|
+ 'files': [],
|
|
+ 'transid': '1024'
|
|
+ }
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'find-new', 'var', '2000'])
|
|
+
|
|
+ def test_subvolume_find_new(self):
|
|
+ '''
|
|
+ Test btrfs subvolume find-new
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={
|
|
+ 'recode': 0,
|
|
+ 'stdout': '''inode 185148 ... gen 2108 flags NONE var/log/audit/audit.log
|
|
+inode 187390 ... INLINE etc/openvpn/openvpn-status.log
|
|
+transid marker was 1024'''
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_find_new('var', '1023') == {
|
|
+ 'files': ['var/log/audit/audit.log',
|
|
+ 'etc/openvpn/openvpn-status.log'],
|
|
+ 'transid': '1024'
|
|
+ }
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'find-new', 'var', '1023'])
|
|
+
|
|
+ def test_subvolume_get_default_free(self):
|
|
+ '''
|
|
+ Test btrfs subvolume get-default
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={
|
|
+ 'recode': 0,
|
|
+ 'stdout': 'ID 5 (FS_TREE)',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_get_default('/mnt') == {
|
|
+ 'id': '5',
|
|
+ 'name': '(FS_TREE)',
|
|
+ }
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'get-default', '/mnt'])
|
|
+
|
|
+ def test_subvolume_get_default(self):
|
|
+ '''
|
|
+ Test btrfs subvolume get-default
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={
|
|
+ 'recode': 0,
|
|
+ 'stdout': 'ID 257 gen 8 top level 5 path var',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_get_default('/mnt') == {
|
|
+ 'id': '257',
|
|
+ 'name': 'var',
|
|
+ }
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'get-default', '/mnt'])
|
|
+
|
|
+ def test_subvolume_list_fails_parameters(self):
|
|
+ '''
|
|
+ Test btrfs subvolume list
|
|
+ '''
|
|
+ # Fails when sort is not a list
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ btrfs.subvolume_list('/mnt', sort='-rootid')
|
|
+
|
|
+ # Fails when sort is not recognized
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ btrfs.subvolume_list('/mnt', sort=['-root'])
|
|
+
|
|
+ def test_subvolume_list_simple(self):
|
|
+ '''
|
|
+ Test btrfs subvolume list
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={
|
|
+ 'recode': 0,
|
|
+ 'stdout': '''ID 257 gen 8 top level 5 path one
|
|
+ID 258 gen 10 top level 5 path another one
|
|
+''',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_list('/mnt') == [
|
|
+ {
|
|
+ 'id': '257',
|
|
+ 'gen': '8',
|
|
+ 'top level': '5',
|
|
+ 'path': 'one',
|
|
+ },
|
|
+ {
|
|
+ 'id': '258',
|
|
+ 'gen': '10',
|
|
+ 'top level': '5',
|
|
+ 'path': 'another one',
|
|
+ },
|
|
+ ]
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'list', '/mnt'])
|
|
+
|
|
+ def test_subvolume_list(self):
|
|
+ '''
|
|
+ Test btrfs subvolume list
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={
|
|
+ 'recode': 0,
|
|
+ 'stdout': '''\
|
|
+ID 257 gen 8 cgen 8 parent 5 top level 5 parent_uuid - received_uuid - \
|
|
+ uuid 777...-..05 path one
|
|
+ID 258 gen 10 cgen 10 parent 5 top level 5 parent_uuid - received_uuid - \
|
|
+ uuid a90...-..01 path another one
|
|
+''',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_list('/mnt', parent_id=True,
|
|
+ absolute=True,
|
|
+ ogeneration=True,
|
|
+ generation=True,
|
|
+ subvolumes=True, uuid=True,
|
|
+ parent_uuid=True,
|
|
+ sent_subvolume_uuid=True,
|
|
+ generation_cmp='-100',
|
|
+ ogeneration_cmp='+5',
|
|
+ sort=['-rootid', 'gen']) == [
|
|
+ {
|
|
+ 'id': '257',
|
|
+ 'gen': '8',
|
|
+ 'cgen': '8',
|
|
+ 'parent': '5',
|
|
+ 'top level': '5',
|
|
+ 'parent_uuid': '-',
|
|
+ 'received_uuid': '-',
|
|
+ 'uuid': '777...-..05',
|
|
+ 'path': 'one',
|
|
+ },
|
|
+ {
|
|
+ 'id': '258',
|
|
+ 'gen': '10',
|
|
+ 'cgen': '10',
|
|
+ 'parent': '5',
|
|
+ 'top level': '5',
|
|
+ 'parent_uuid': '-',
|
|
+ 'received_uuid': '-',
|
|
+ 'uuid': 'a90...-..01',
|
|
+ 'path': 'another one',
|
|
+ },
|
|
+ ]
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'list', '-p', '-a', '-c', '-g',
|
|
+ '-o', '-u', '-q', '-R', '-G', '-100', '-C', '+5',
|
|
+ '--sort=-rootid,gen', '/mnt'])
|
|
+
|
|
+ def test_subvolume_set_default(self):
|
|
+ '''
|
|
+ Test btrfs subvolume set-default
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={'recode': 0}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_set_default('257', '/mnt')
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'set-default', '257', '/mnt'])
|
|
+
|
|
+ def test_subvolume_show(self):
|
|
+ '''
|
|
+ Test btrfs subvolume show
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={
|
|
+ 'recode': 0,
|
|
+ 'stdout': '''@/var
|
|
+ Name: var
|
|
+ UUID: 7a14...-...04
|
|
+ Parent UUID: -
|
|
+ Received UUID: -
|
|
+ Creation time: 2018-10-01 14:33:12 +0200
|
|
+ Subvolume ID: 258
|
|
+ Generation: 82479
|
|
+ Gen at creation: 10
|
|
+ Parent ID: 256
|
|
+ Top level ID: 256
|
|
+ Flags: -
|
|
+ Snapshot(s):
|
|
+''',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_show('/var') == {
|
|
+ '@/var': {
|
|
+ 'name': 'var',
|
|
+ 'uuid': '7a14...-...04',
|
|
+ 'parent uuid': '-',
|
|
+ 'received uuid': '-',
|
|
+ 'creation time': '2018-10-01 14:33:12 +0200',
|
|
+ 'subvolume id': '258',
|
|
+ 'generation': '82479',
|
|
+ 'gen at creation': '10',
|
|
+ 'parent id': '256',
|
|
+ 'top level id': '256',
|
|
+ 'flags': '-',
|
|
+ 'snapshot(s)': '',
|
|
+ },
|
|
+ }
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'show', '/var'])
|
|
+
|
|
+ def test_subvolume_sync_fail_parameters(self):
|
|
+ '''
|
|
+ Test btrfs subvolume sync
|
|
+ '''
|
|
+ # Fails when subvolids is not a list
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ btrfs.subvolume_sync('/mnt', subvolids='257')
|
|
+
|
|
+ def test_subvolume_sync(self):
|
|
+ '''
|
|
+ Test btrfs subvolume sync
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value={'recode': 0}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs.subvolume_sync('/mnt', subvolids=['257'],
|
|
+ sleep='1')
|
|
+ salt_mock['cmd.run_all'].assert_called_once()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['btrfs', 'subvolume', 'sync', '-s', '1', '/mnt', '257'])
|
|
diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py
|
|
new file mode 100644
|
|
index 0000000000..7181dd7e50
|
|
--- /dev/null
|
|
+++ b/tests/unit/modules/test_chroot.py
|
|
@@ -0,0 +1,184 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:platform: Linux
|
|
+'''
|
|
+
|
|
+# Import Python Libs
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+
|
|
+# Import Salt Testing Libs
|
|
+from tests.support.mixins import LoaderModuleMockMixin
|
|
+from tests.support.unit import skipIf, TestCase
|
|
+from tests.support.mock import (
|
|
+ MagicMock,
|
|
+ NO_MOCK,
|
|
+ NO_MOCK_REASON,
|
|
+ patch,
|
|
+)
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+import salt.modules.chroot as chroot
|
|
+
|
|
+
|
|
+@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
+class ChrootTestCase(TestCase, LoaderModuleMockMixin):
|
|
+ '''
|
|
+ Test cases for salt.modules.chroot
|
|
+ '''
|
|
+
|
|
+ def setup_loader_modules(self):
|
|
+ return {
|
|
+ chroot: {
|
|
+ '__salt__': {},
|
|
+ '__utils__': {},
|
|
+ '__opts__': {'cachedir': ''},
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @patch('os.path.isdir')
|
|
+ def test_exist(self, isdir):
|
|
+ '''
|
|
+ Test if the chroot environment exist.
|
|
+ '''
|
|
+ isdir.side_effect = (True, True, True)
|
|
+ self.assertTrue(chroot.exist('/chroot'))
|
|
+
|
|
+ isdir.side_effect = (True, True, False)
|
|
+ self.assertFalse(chroot.exist('/chroot'))
|
|
+
|
|
+ @patch('os.makedirs')
|
|
+ @patch('salt.modules.chroot.exist')
|
|
+ def test_create(self, exist, makedirs):
|
|
+ '''
|
|
+ Test the creation of an empty chroot environment.
|
|
+ '''
|
|
+ exist.return_value = True
|
|
+ self.assertTrue(chroot.create('/chroot'))
|
|
+ makedirs.assert_not_called()
|
|
+
|
|
+ exist.return_value = False
|
|
+ self.assertTrue(chroot.create('/chroot'))
|
|
+ makedirs.assert_called()
|
|
+
|
|
+ @patch('salt.modules.chroot.exist')
|
|
+ def test_call_fails_input_validation(self, exist):
|
|
+ '''
|
|
+ Test execution of Salt functions in chroot.
|
|
+ '''
|
|
+ # Basic input validation
|
|
+ exist.return_value = False
|
|
+ self.assertRaises(CommandExecutionError, chroot.call, '/chroot', '')
|
|
+ self.assertRaises(CommandExecutionError, chroot.call, '/chroot', 'test.ping')
|
|
+
|
|
+ @patch('salt.modules.chroot.exist')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test_call_fails_untar(self, mkdtemp, exist):
|
|
+ '''
|
|
+ Test execution of Salt functions in chroot.
|
|
+ '''
|
|
+ # Fail the tar command
|
|
+ exist.return_value = True
|
|
+ mkdtemp.return_value = '/chroot/tmp01'
|
|
+ utils_mock = {
|
|
+ 'thin.gen_thin': MagicMock(return_value='/salt-thin.tgz'),
|
|
+ 'files.rm_rf': MagicMock(),
|
|
+ }
|
|
+ salt_mock = {
|
|
+ 'archive.tar': MagicMock(return_value='Error'),
|
|
+ 'config.option': MagicMock(),
|
|
+ }
|
|
+ with patch.dict(chroot.__utils__, utils_mock), \
|
|
+ patch.dict(chroot.__salt__, salt_mock):
|
|
+ self.assertEqual(chroot.call('/chroot', 'test.ping'), {
|
|
+ 'result': False,
|
|
+ 'comment': 'Error'
|
|
+ })
|
|
+ utils_mock['thin.gen_thin'].assert_called_once()
|
|
+ salt_mock['config.option'].assert_called()
|
|
+ salt_mock['archive.tar'].assert_called_once()
|
|
+ utils_mock['files.rm_rf'].assert_called_once()
|
|
+
|
|
+ @patch('salt.modules.chroot.exist')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test_call_fails_salt_thin(self, mkdtemp, exist):
|
|
+ '''
|
|
+ Test execution of Salt functions in chroot.
|
|
+ '''
|
|
+ # Fail the inner command
|
|
+ exist.return_value = True
|
|
+ mkdtemp.return_value = '/chroot/tmp01'
|
|
+ utils_mock = {
|
|
+ 'thin.gen_thin': MagicMock(return_value='/salt-thin.tgz'),
|
|
+ 'files.rm_rf': MagicMock(),
|
|
+ }
|
|
+ salt_mock = {
|
|
+ 'archive.tar': MagicMock(return_value=''),
|
|
+ 'config.option': MagicMock(),
|
|
+ 'cmd.run_chroot': MagicMock(return_value={
|
|
+ 'retcode': 1,
|
|
+ 'stderr': 'Error',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(chroot.__utils__, utils_mock), \
|
|
+ patch.dict(chroot.__salt__, salt_mock):
|
|
+ self.assertRaises(CommandExecutionError, chroot.call, '/chroot',
|
|
+ 'test.ping')
|
|
+ utils_mock['thin.gen_thin'].assert_called_once()
|
|
+ salt_mock['config.option'].assert_called()
|
|
+ salt_mock['archive.tar'].assert_called_once()
|
|
+ salt_mock['cmd.run_chroot'].assert_called_once()
|
|
+ utils_mock['files.rm_rf'].assert_called_once()
|
|
+
|
|
+ @patch('salt.modules.chroot.exist')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test_call_success(self, mkdtemp, exist):
|
|
+ '''
|
|
+ Test execution of Salt functions in chroot.
|
|
+ '''
|
|
+ # Success test
|
|
+ exist.return_value = True
|
|
+ mkdtemp.return_value = '/chroot/tmp01'
|
|
+ utils_mock = {
|
|
+ 'thin.gen_thin': MagicMock(return_value='/salt-thin.tgz'),
|
|
+ 'files.rm_rf': MagicMock(),
|
|
+ 'json.find_json': MagicMock(return_value={'return': 'result'})
|
|
+ }
|
|
+ salt_mock = {
|
|
+ 'archive.tar': MagicMock(return_value=''),
|
|
+ 'config.option': MagicMock(),
|
|
+ 'cmd.run_chroot': MagicMock(return_value={
|
|
+ 'retcode': 0,
|
|
+ 'stdout': '',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(chroot.__utils__, utils_mock), \
|
|
+ patch.dict(chroot.__salt__, salt_mock):
|
|
+ self.assertEqual(chroot.call('/chroot', 'test.ping'), 'result')
|
|
+ utils_mock['thin.gen_thin'].assert_called_once()
|
|
+ salt_mock['config.option'].assert_called()
|
|
+ salt_mock['archive.tar'].assert_called_once()
|
|
+ salt_mock['cmd.run_chroot'].assert_called_once()
|
|
+ utils_mock['files.rm_rf'].assert_called_once()
|
|
diff --git a/tests/unit/modules/test_freezer.py b/tests/unit/modules/test_freezer.py
|
|
new file mode 100644
|
|
index 0000000000..f6cf2f374f
|
|
--- /dev/null
|
|
+++ b/tests/unit/modules/test_freezer.py
|
|
@@ -0,0 +1,274 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:platform: Linux
|
|
+'''
|
|
+
|
|
+# Import Python Libs
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+
|
|
+# Import Salt Testing Libs
|
|
+from tests.support.mixins import LoaderModuleMockMixin
|
|
+from tests.support.unit import skipIf, TestCase
|
|
+from tests.support.mock import (
|
|
+ MagicMock,
|
|
+ NO_MOCK,
|
|
+ NO_MOCK_REASON,
|
|
+ patch,
|
|
+)
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+import salt.modules.freezer as freezer
|
|
+
|
|
+
|
|
+@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
+class FreezerTestCase(TestCase, LoaderModuleMockMixin):
|
|
+ '''
|
|
+ Test cases for salt.modules.freezer
|
|
+ '''
|
|
+
|
|
+ def setup_loader_modules(self):
|
|
+ return {
|
|
+ freezer: {
|
|
+ '__salt__': {},
|
|
+ '__opts__': {'cachedir': ''},
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @patch('os.path.isfile')
|
|
+ def test_status(self, isfile):
|
|
+ '''
|
|
+ Test if a frozen state exist.
|
|
+ '''
|
|
+ isfile.side_effect = (True, True)
|
|
+ self.assertTrue(freezer.status())
|
|
+
|
|
+ isfile.side_effect = (True, False)
|
|
+ self.assertFalse(freezer.status())
|
|
+
|
|
+ @patch('os.listdir')
|
|
+ @patch('os.path.isdir')
|
|
+ def test_list(self, isdir, listdir):
|
|
+ '''
|
|
+ Test the listing of all frozen states.
|
|
+ '''
|
|
+ # There is no freezer directory
|
|
+ isdir.return_value = False
|
|
+ self.assertEqual(freezer.list_(), [])
|
|
+
|
|
+ # There is freezer directory, but is empty
|
|
+ isdir.return_value = True
|
|
+ listdir.return_value = []
|
|
+ self.assertEqual(freezer.list_(), [])
|
|
+
|
|
+ # There is freezer directory with states
|
|
+ isdir.return_value = True
|
|
+ listdir.return_value = [
|
|
+ 'freezer-pkgs.yml', 'freezer-reps.yml',
|
|
+ 'state-pkgs.yml', 'state-reps.yml',
|
|
+ 'random-file'
|
|
+ ]
|
|
+ self.assertEqual(freezer.list_(), ['freezer', 'state'])
|
|
+
|
|
+ @patch('os.makedirs')
|
|
+ def test_freeze_fails_cache(self, makedirs):
|
|
+ '''
|
|
+ Test to freeze a current installation
|
|
+ '''
|
|
+ # Fails when creating the freeze cache directory
|
|
+ makedirs.side_effect = OSError()
|
|
+ self.assertRaises(CommandExecutionError, freezer.freeze)
|
|
+
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ @patch('os.makedirs')
|
|
+ def test_freeze_fails_already_frozen(self, makedirs, status):
|
|
+ '''
|
|
+ Test to freeze a current installation
|
|
+ '''
|
|
+ # Fails when there is already a frozen state
|
|
+ status.return_value = True
|
|
+ self.assertRaises(CommandExecutionError, freezer.freeze)
|
|
+ makedirs.assert_called_once()
|
|
+
|
|
+ @patch('salt.utils.json.dump')
|
|
+ @patch('salt.modules.freezer.fopen')
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ @patch('os.makedirs')
|
|
+ def test_freeze_success_new_state(self, makedirs, status, fopen, dump):
|
|
+ '''
|
|
+ Test to freeze a current installation
|
|
+ '''
|
|
+ # Freeze the current new state
|
|
+ status.return_value = False
|
|
+ salt_mock = {
|
|
+ 'pkg.list_pkgs': MagicMock(return_value={}),
|
|
+ 'pkg.list_repos': MagicMock(return_value={}),
|
|
+ }
|
|
+ with patch.dict(freezer.__salt__, salt_mock):
|
|
+ self.assertTrue(freezer.freeze())
|
|
+ makedirs.assert_called_once()
|
|
+ salt_mock['pkg.list_pkgs'].assert_called_once()
|
|
+ salt_mock['pkg.list_repos'].assert_called_once()
|
|
+ fopen.assert_called()
|
|
+ dump.asster_called()
|
|
+
|
|
+ @patch('salt.utils.json.dump')
|
|
+ @patch('salt.modules.freezer.fopen')
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ @patch('os.makedirs')
|
|
+ def test_freeze_success_force(self, makedirs, status, fopen, dump):
|
|
+ '''
|
|
+ Test to freeze a current installation
|
|
+ '''
|
|
+ # Freeze the current old state
|
|
+ status.return_value = True
|
|
+ salt_mock = {
|
|
+ 'pkg.list_pkgs': MagicMock(return_value={}),
|
|
+ 'pkg.list_repos': MagicMock(return_value={}),
|
|
+ }
|
|
+ with patch.dict(freezer.__salt__, salt_mock):
|
|
+ self.assertTrue(freezer.freeze(force=True))
|
|
+ makedirs.assert_called_once()
|
|
+ salt_mock['pkg.list_pkgs'].assert_called_once()
|
|
+ salt_mock['pkg.list_repos'].assert_called_once()
|
|
+ fopen.assert_called()
|
|
+ dump.asster_called()
|
|
+
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ def test_restore_fails_missing_state(self, status):
|
|
+ '''
|
|
+ Test to restore an old state
|
|
+ '''
|
|
+ # Fails if the state is not found
|
|
+ status.return_value = False
|
|
+ self.assertRaises(CommandExecutionError, freezer.restore)
|
|
+
|
|
+ @patch('salt.utils.json.load')
|
|
+ @patch('salt.modules.freezer.fopen')
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ def test_restore_add_missing_repo(self, status, fopen, load):
|
|
+ '''
|
|
+ Test to restore an old state
|
|
+ '''
|
|
+ # Only a missing repo is installed
|
|
+ status.return_value = True
|
|
+ load.side_effect = ({}, {'missing-repo': {}})
|
|
+ salt_mock = {
|
|
+ 'pkg.list_pkgs': MagicMock(return_value={}),
|
|
+ 'pkg.list_repos': MagicMock(return_value={}),
|
|
+ 'pkg.mod_repo': MagicMock(),
|
|
+ }
|
|
+ with patch.dict(freezer.__salt__, salt_mock):
|
|
+ self.assertEqual(freezer.restore(), {
|
|
+ 'pkgs': {'add': [], 'remove': []},
|
|
+ 'repos': {'add': ['missing-repo'], 'remove': []},
|
|
+ 'comment': [],
|
|
+ })
|
|
+ salt_mock['pkg.list_pkgs'].assert_called()
|
|
+ salt_mock['pkg.list_repos'].assert_called()
|
|
+ salt_mock['pkg.mod_repo'].assert_called_once()
|
|
+ fopen.assert_called()
|
|
+ load.asster_called()
|
|
+
|
|
+ @patch('salt.utils.json.load')
|
|
+ @patch('salt.modules.freezer.fopen')
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ def test_restore_add_missing_package(self, status, fopen, load):
|
|
+ '''
|
|
+ Test to restore an old state
|
|
+ '''
|
|
+ # Only a missing package is installed
|
|
+ status.return_value = True
|
|
+ load.side_effect = ({'missing-package': {}}, {})
|
|
+ salt_mock = {
|
|
+ 'pkg.list_pkgs': MagicMock(return_value={}),
|
|
+ 'pkg.list_repos': MagicMock(return_value={}),
|
|
+ 'pkg.install': MagicMock(),
|
|
+ }
|
|
+ with patch.dict(freezer.__salt__, salt_mock):
|
|
+ self.assertEqual(freezer.restore(), {
|
|
+ 'pkgs': {'add': ['missing-package'], 'remove': []},
|
|
+ 'repos': {'add': [], 'remove': []},
|
|
+ 'comment': [],
|
|
+ })
|
|
+ salt_mock['pkg.list_pkgs'].assert_called()
|
|
+ salt_mock['pkg.list_repos'].assert_called()
|
|
+ salt_mock['pkg.install'].assert_called_once()
|
|
+ fopen.assert_called()
|
|
+ load.asster_called()
|
|
+
|
|
+ @patch('salt.utils.json.load')
|
|
+ @patch('salt.modules.freezer.fopen')
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ def test_restore_remove_extra_package(self, status, fopen, load):
|
|
+ '''
|
|
+ Test to restore an old state
|
|
+ '''
|
|
+ # Only an extra package is removed
|
|
+ status.return_value = True
|
|
+ load.side_effect = ({}, {})
|
|
+ salt_mock = {
|
|
+ 'pkg.list_pkgs': MagicMock(return_value={'extra-package': {}}),
|
|
+ 'pkg.list_repos': MagicMock(return_value={}),
|
|
+ 'pkg.remove': MagicMock(),
|
|
+ }
|
|
+ with patch.dict(freezer.__salt__, salt_mock):
|
|
+ self.assertEqual(freezer.restore(), {
|
|
+ 'pkgs': {'add': [], 'remove': ['extra-package']},
|
|
+ 'repos': {'add': [], 'remove': []},
|
|
+ 'comment': [],
|
|
+ })
|
|
+ salt_mock['pkg.list_pkgs'].assert_called()
|
|
+ salt_mock['pkg.list_repos'].assert_called()
|
|
+ salt_mock['pkg.remove'].assert_called_once()
|
|
+ fopen.assert_called()
|
|
+ load.asster_called()
|
|
+
|
|
+ @patch('salt.utils.json.load')
|
|
+ @patch('salt.modules.freezer.fopen')
|
|
+ @patch('salt.modules.freezer.status')
|
|
+ def test_restore_remove_extra_repo(self, status, fopen, load):
|
|
+ '''
|
|
+ Test to restore an old state
|
|
+ '''
|
|
+ # Only an extra repository is removed
|
|
+ status.return_value = True
|
|
+ load.side_effect = ({}, {})
|
|
+ salt_mock = {
|
|
+ 'pkg.list_pkgs': MagicMock(return_value={}),
|
|
+ 'pkg.list_repos': MagicMock(return_value={'extra-repo': {}}),
|
|
+ 'pkg.del_repo': MagicMock(),
|
|
+ }
|
|
+ with patch.dict(freezer.__salt__, salt_mock):
|
|
+ self.assertEqual(freezer.restore(), {
|
|
+ 'pkgs': {'add': [], 'remove': []},
|
|
+ 'repos': {'add': [], 'remove': ['extra-repo']},
|
|
+ 'comment': [],
|
|
+ })
|
|
+ salt_mock['pkg.list_pkgs'].assert_called()
|
|
+ salt_mock['pkg.list_repos'].assert_called()
|
|
+ salt_mock['pkg.del_repo'].assert_called_once()
|
|
+ fopen.assert_called()
|
|
+ load.asster_called()
|
|
diff --git a/tests/unit/modules/test_groupadd.py b/tests/unit/modules/test_groupadd.py
|
|
index 8e0e64749a..2ce7897a06 100644
|
|
--- a/tests/unit/modules/test_groupadd.py
|
|
+++ b/tests/unit/modules/test_groupadd.py
|
|
@@ -84,21 +84,19 @@ class GroupAddTestCase(TestCase, LoaderModuleMockMixin):
|
|
'''
|
|
Tests if the group id is the same as argument
|
|
'''
|
|
- mock_pre_gid = MagicMock(return_value=10)
|
|
- with patch.dict(groupadd.__salt__,
|
|
- {'file.group_to_gid': mock_pre_gid}):
|
|
+ mock = MagicMock(return_value={'gid': 10})
|
|
+ with patch.object(groupadd, 'info', mock):
|
|
self.assertTrue(groupadd.chgid('test', 10))
|
|
|
|
def test_chgid(self):
|
|
'''
|
|
Tests the gid for a named group was changed
|
|
'''
|
|
- mock_pre_gid = MagicMock(return_value=0)
|
|
- mock_cmdrun = MagicMock(return_value=0)
|
|
- with patch.dict(groupadd.__salt__,
|
|
- {'file.group_to_gid': mock_pre_gid}):
|
|
- with patch.dict(groupadd.__salt__, {'cmd.run': mock_cmdrun}):
|
|
- self.assertFalse(groupadd.chgid('test', 500))
|
|
+ mock = MagicMock(return_value=None)
|
|
+ with patch.dict(groupadd.__salt__, {'cmd.run': mock}):
|
|
+ mock = MagicMock(side_effect=[{'gid': 10}, {'gid': 500}])
|
|
+ with patch.object(groupadd, 'info', mock):
|
|
+ self.assertTrue(groupadd.chgid('test', 500))
|
|
|
|
# 'delete' function tests: 1
|
|
|
|
diff --git a/tests/unit/modules/test_kubeadm.py b/tests/unit/modules/test_kubeadm.py
|
|
new file mode 100644
|
|
index 0000000000..a58f54f118
|
|
--- /dev/null
|
|
+++ b/tests/unit/modules/test_kubeadm.py
|
|
@@ -0,0 +1,1144 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2019 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import pytest
|
|
+
|
|
+# Import Salt Testing Libs
|
|
+from tests.support.mixins import LoaderModuleMockMixin
|
|
+from tests.support.unit import TestCase, skipIf
|
|
+from tests.support.mock import (
|
|
+ MagicMock,
|
|
+ patch,
|
|
+ NO_MOCK,
|
|
+ NO_MOCK_REASON
|
|
+)
|
|
+
|
|
+import salt.modules.kubeadm as kubeadm
|
|
+from salt.exceptions import CommandExecutionError
|
|
+
|
|
+
|
|
+@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
+class KubeAdmTestCase(TestCase, LoaderModuleMockMixin):
|
|
+ '''
|
|
+ Test cases for salt.modules.kubeadm
|
|
+ '''
|
|
+
|
|
+ def setup_loader_modules(self):
|
|
+ return {
|
|
+ kubeadm: {
|
|
+ '__salt__': {},
|
|
+ '__utils__': {},
|
|
+ }
|
|
+ }
|
|
+
|
|
+ def test_version(self):
|
|
+ '''
|
|
+ Test kuebadm.version without parameters
|
|
+ '''
|
|
+ version = '{"clientVersion":{"major":"1"}}'
|
|
+ salt_mock = {
|
|
+ 'cmd.run_stdout': MagicMock(return_value=version),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.version() == {
|
|
+ 'clientVersion': {'major': '1'}
|
|
+ }
|
|
+ salt_mock['cmd.run_stdout'].assert_called_with(
|
|
+ ['kubeadm', 'version', '--output', 'json']
|
|
+ )
|
|
+
|
|
+ def test_version_params(self):
|
|
+ '''
|
|
+ Test kuebadm.version with parameters
|
|
+ '''
|
|
+ version = '{"clientVersion":{"major":"1"}}'
|
|
+ salt_mock = {
|
|
+ 'cmd.run_stdout': MagicMock(return_value=version),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.version(kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == {
|
|
+ 'clientVersion': {'major': '1'}
|
|
+ }
|
|
+ salt_mock['cmd.run_stdout'].assert_called_with(
|
|
+ ['kubeadm', 'version',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt',
|
|
+ '--output', 'json']
|
|
+ )
|
|
+
|
|
+ def test_token_create(self):
|
|
+ '''
|
|
+ Test kuebadm.token_create without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_create() == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'create']
|
|
+ )
|
|
+
|
|
+ def test_token_create_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_create with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_create(token='token',
|
|
+ config='/kubeadm.cfg',
|
|
+ description='a description',
|
|
+ groups=['g:1', 'g:2'],
|
|
+ ttl='1h1m1s',
|
|
+ usages=['u1', 'u2'],
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'create', 'token',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--description', 'a description',
|
|
+ '--groups', '["g:1", "g:2"]',
|
|
+ '--ttl', '1h1m1s',
|
|
+ '--usages', '["u1", "u2"]',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_create_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_create error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_create()
|
|
+
|
|
+ def test_token_delete(self):
|
|
+ '''
|
|
+ Test kuebadm.token_delete without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'deleted'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_delete('token')
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'delete', 'token']
|
|
+ )
|
|
+
|
|
+ def test_token_delete_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_delete with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'deleted'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_delete('token',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt')
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'delete', 'token',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_delete_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_delete error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_delete('token')
|
|
+
|
|
+ def test_token_generate(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_generate() == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'generate']
|
|
+ )
|
|
+
|
|
+ def test_token_generate_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_generate(kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'generate',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_generate_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_generate()
|
|
+
|
|
+ def test_token_list(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list without parameters
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_list() == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }]
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'list']
|
|
+ )
|
|
+
|
|
+ def test_token_list_multiple_lines(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list with multiple tokens
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4\na b c d e'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_list() == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }, {
|
|
+ 'h1': 'a', 'h2': 'b', 'h31 h32': 'c d', 'h4': 'e'
|
|
+ }]
|
|
+
|
|
+ def test_token_list_broken_lines(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list with multiple tokens, one broken
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4\na b c d e'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_list() == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }]
|
|
+
|
|
+ def test_token_list_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list with parameters
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ result = kubeadm.token_list(kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt')
|
|
+ assert result == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }]
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'list',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_list_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_list()
|
|
+
|
|
+ def test_alpha_certs_renew(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_certs_renew without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_certs_renew() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'certs', 'renew']
|
|
+ )
|
|
+
|
|
+ def test_alpha_certs_renew_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_certs_renew with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_certs_renew(rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'certs', 'renew',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_certs_renew_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_certs_renew error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_certs_renew()
|
|
+
|
|
+ def test_alpha_kubeconfig_user(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubeconfig_user without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubeconfig_user('user') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubeconfig', 'user',
|
|
+ '--client-name', 'user']
|
|
+ )
|
|
+
|
|
+ def test_alpha_kubeconfig_user_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubeconfig_user with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubeconfig_user(
|
|
+ 'user',
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ cert_dir='/pki',
|
|
+ org='org',
|
|
+ token='token',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubeconfig', 'user',
|
|
+ '--client-name', 'user',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--org', 'org',
|
|
+ '--token', 'token',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_kubeconfig_user_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubeconfig_user error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_kubeconfig_user('user')
|
|
+
|
|
+ def test_alpha_kubelet_config_download(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_download without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubelet_config_download() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'download']
|
|
+ )
|
|
+
|
|
+ def test_alpha_kubelet_config_download_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_download with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubelet_config_download(
|
|
+ kubeconfig='/kube.cfg',
|
|
+ kubelet_version='version',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'download',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--kubelet-version', 'version',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_kubelet_config_download_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_download error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_kubelet_config_download()
|
|
+
|
|
+ def test_alpha_kubelet_config_enable_dynamic(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_enable_dynamic without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ result = kubeadm.alpha_kubelet_config_enable_dynamic('node-1')
|
|
+ assert result == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
|
|
+ '--node-name', 'node-1']
|
|
+ )
|
|
+
|
|
+ def test_alpha_kubelet_config_enable_dynamic_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_enable_dynamic with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubelet_config_enable_dynamic(
|
|
+ 'node-1',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ kubelet_version='version',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
|
|
+ '--node-name', 'node-1',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--kubelet-version', 'version',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_kubelet_config_enable_dynamic_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_enable_dynamic error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_kubelet_config_enable_dynamic('node-1')
|
|
+
|
|
+ def test_alpha_selfhosting_pivot(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_selfhosting_pivot without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_selfhosting_pivot() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force']
|
|
+ )
|
|
+
|
|
+ def test_alpha_selfhosting_pivot_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_selfhosting_pivot with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_selfhosting_pivot(
|
|
+ cert_dir='/pki',
|
|
+ config='/kubeadm.cfg',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ store_certs_in_secrets=True,
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force',
|
|
+ '--store-certs-in-secrets',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_selfhosting_pivot_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_selfhosting_pivot error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_selfhosting_pivot()
|
|
+
|
|
+ def test_config_images_list(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_list without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'image1\nimage2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_list() == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'list']
|
|
+ )
|
|
+
|
|
+ def test_config_images_list_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_list with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'image1\nimage2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_list(
|
|
+ config='/kubeadm.cfg',
|
|
+ feature_gates='k=v',
|
|
+ kubernetes_version='version',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'list',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_images_list_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_list error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_images_list()
|
|
+
|
|
+ def test_config_images_pull(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_pull without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0,
|
|
+ 'stdout': '[config/images] Pulled image1\n'
|
|
+ '[config/images] Pulled image2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_pull() == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'pull']
|
|
+ )
|
|
+
|
|
+ def test_config_images_pull_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_pull with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0,
|
|
+ 'stdout': '[config/images] Pulled image1\n'
|
|
+ '[config/images] Pulled image2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_pull(
|
|
+ config='/kubeadm.cfg',
|
|
+ cri_socket='socket',
|
|
+ feature_gates='k=v',
|
|
+ kubernetes_version='version',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'pull',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_images_pull_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_pull error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_images_pull()
|
|
+
|
|
+ def test_config_migrate(self):
|
|
+ '''
|
|
+ Test kuebadm.config_migrate without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_migrate('/oldconfig.cfg') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'migrate',
|
|
+ '--old-config', '/oldconfig.cfg']
|
|
+ )
|
|
+
|
|
+ def test_config_migrate_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_migrate with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_migrate(
|
|
+ '/oldconfig.cfg',
|
|
+ new_config='/newconfig.cfg',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'migrate',
|
|
+ '--old-config', '/oldconfig.cfg',
|
|
+ '--new-config', '/newconfig.cfg',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_migrate_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_migrate error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_migrate('/oldconfig.cfg')
|
|
+
|
|
+ def test_config_print_init_defaults(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_init_defaults without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_init_defaults() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'init-defaults']
|
|
+ )
|
|
+
|
|
+ def test_config_print_init_defaults_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_init_defaults with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_init_defaults(
|
|
+ component_configs='component',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'init-defaults',
|
|
+ '--component-configs', 'component',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_print_init_defaults_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_init_defaults error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_print_init_defaults()
|
|
+
|
|
+ def test_config_print_join_defaults(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_join_defaults without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_join_defaults() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'join-defaults']
|
|
+ )
|
|
+
|
|
+ def test_config_print_join_defaults_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_join_defaults with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_join_defaults(
|
|
+ component_configs='component',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'join-defaults',
|
|
+ '--component-configs', 'component',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_print_join_defaults_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_join_defaults error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_print_join_defaults()
|
|
+
|
|
+ def test_config_upload_from_file(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_file without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_file('/config.cfg') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-file',
|
|
+ '--config', '/config.cfg']
|
|
+ )
|
|
+
|
|
+ def test_config_upload_from_file_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_file with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_file(
|
|
+ '/config.cfg',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-file',
|
|
+ '--config', '/config.cfg',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_upload_from_file_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_file error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_upload_from_file('/config.cfg')
|
|
+
|
|
+ def test_config_upload_from_flags(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_flags without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_flags() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-flags']
|
|
+ )
|
|
+
|
|
+ def test_config_upload_from_flags_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_flags with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_flags(
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ apiserver_cert_extra_sans='sans',
|
|
+ cert_dir='/pki',
|
|
+ cri_socket='socket',
|
|
+ feature_gates='k=v',
|
|
+ kubernetes_version='version',
|
|
+ node_name='node-1',
|
|
+ pod_network_cidr='10.1.0.0/12',
|
|
+ service_cidr='10.2.0.0/12',
|
|
+ service_dns_domain='example.org',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-flags',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--apiserver-cert-extra-sans', 'sans',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--node-name', 'node-1',
|
|
+ '--pod-network-cidr', '10.1.0.0/12',
|
|
+ '--service-cidr', '10.2.0.0/12',
|
|
+ '--service-dns-domain', 'example.org',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_upload_from_flags_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_flags error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_upload_from_flags()
|
|
+
|
|
+ def test_config_view(self):
|
|
+ '''
|
|
+ Test kuebadm.config_view without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_view() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'view']
|
|
+ )
|
|
+
|
|
+ def test_config_view_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_view with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_view(
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'view',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_view_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_view error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_view()
|
|
+
|
|
+ def test_init(self):
|
|
+ '''
|
|
+ Test kuebadm.init without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.init() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'init']
|
|
+ )
|
|
+
|
|
+ def test_init_params(self):
|
|
+ '''
|
|
+ Test kuebadm.init with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.init(
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ apiserver_cert_extra_sans='sans',
|
|
+ cert_dir='/pki',
|
|
+ certificate_key='secret',
|
|
+ config='/config.cfg',
|
|
+ cri_socket='socket',
|
|
+ experimental_upload_certs=True,
|
|
+ feature_gates='k=v',
|
|
+ ignore_preflight_errors='all',
|
|
+ image_repository='example.org',
|
|
+ kubernetes_version='version',
|
|
+ node_name='node-1',
|
|
+ pod_network_cidr='10.1.0.0/12',
|
|
+ service_cidr='10.2.0.0/12',
|
|
+ service_dns_domain='example.org',
|
|
+ skip_certificate_key_print=True,
|
|
+ skip_phases='all',
|
|
+ skip_token_print=True,
|
|
+ token='token',
|
|
+ token_ttl='1h1m1s',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'init',
|
|
+ '--experimental-upload-certs',
|
|
+ '--skip-certificate-key-print',
|
|
+ '--skip-token-print',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--apiserver-cert-extra-sans', 'sans',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--certificate-key', 'secret',
|
|
+ '--config', '/config.cfg',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--ignore-preflight-errors', 'all',
|
|
+ '--image-repository', 'example.org',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--node-name', 'node-1',
|
|
+ '--pod-network-cidr', '10.1.0.0/12',
|
|
+ '--service-cidr', '10.2.0.0/12',
|
|
+ '--service-dns-domain', 'example.org',
|
|
+ '--skip-phases', 'all',
|
|
+ '--token', 'token',
|
|
+ '--token-ttl', '1h1m1s',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_init_error(self):
|
|
+ '''
|
|
+ Test kuebadm.init error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.init()
|
|
+
|
|
+ def test_join(self):
|
|
+ '''
|
|
+ Test kuebadm.join without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.join() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'join']
|
|
+ )
|
|
+
|
|
+ def test_join_params(self):
|
|
+ '''
|
|
+ Test kuebadm.join with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.join(
|
|
+ api_server_endpoint='10.160.65.165:6443',
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ certificate_key='secret',
|
|
+ config='/config.cfg',
|
|
+ cri_socket='socket',
|
|
+ discovery_file='/discovery.cfg',
|
|
+ discovery_token='token',
|
|
+ discovery_token_ca_cert_hash='type:value',
|
|
+ discovery_token_unsafe_skip_ca_verification=True,
|
|
+ experimental_control_plane=True,
|
|
+ ignore_preflight_errors='all',
|
|
+ node_name='node-1',
|
|
+ skip_phases='all',
|
|
+ tls_bootstrap_token='token',
|
|
+ token='token',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'join',
|
|
+ '10.160.65.165:6443',
|
|
+ '--discovery-token-unsafe-skip-ca-verification',
|
|
+ '--experimental-control-plane',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--certificate-key', 'secret',
|
|
+ '--config', '/config.cfg',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--discovery-file', '/discovery.cfg',
|
|
+ '--discovery-token', 'token',
|
|
+ '--discovery-token-ca-cert-hash', 'type:value',
|
|
+ '--ignore-preflight-errors', 'all',
|
|
+ '--node-name', 'node-1',
|
|
+ '--skip-phases', 'all',
|
|
+ '--tls-bootstrap-token', 'token',
|
|
+ '--token', 'token',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_join_error(self):
|
|
+ '''
|
|
+ Test kuebadm.join error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.join()
|
|
+
|
|
+ def test_reset(self):
|
|
+ '''
|
|
+ Test kuebadm.reset without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.reset() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'reset', '--force']
|
|
+ )
|
|
+
|
|
+ def test_reset_params(self):
|
|
+ '''
|
|
+ Test kuebadm.reset with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.reset(
|
|
+ cert_dir='/pki',
|
|
+ cri_socket='socket',
|
|
+ ignore_preflight_errors='all',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'reset', '--force',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--ignore-preflight-errors', 'all',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_reset_error(self):
|
|
+ '''
|
|
+ Test kuebadm.reset error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.reset()
|
|
diff --git a/tests/unit/modules/test_mount.py b/tests/unit/modules/test_mount.py
|
|
index a9df52f1d8..fe9b067665 100644
|
|
--- a/tests/unit/modules/test_mount.py
|
|
+++ b/tests/unit/modules/test_mount.py
|
|
@@ -216,6 +216,21 @@ class MountTestCase(TestCase, LoaderModuleMockMixin):
|
|
mock_open(read_data=MOCK_SHELL_FILE)):
|
|
self.assertEqual(mount.set_fstab('A', 'B', 'C'), 'new')
|
|
|
|
+ mock = MagicMock(return_value=True)
|
|
+ with patch.object(os.path, 'isfile', mock):
|
|
+ with patch('salt.utils.files.fopen',
|
|
+ mock_open(read_data=MOCK_SHELL_FILE)):
|
|
+ self.assertEqual(mount.set_fstab('B', 'A', 'C', 'D', 'F', 'G'),
|
|
+ 'present')
|
|
+
|
|
+ mock = MagicMock(return_value=True)
|
|
+ with patch.object(os.path, 'isfile', mock):
|
|
+ with patch('salt.utils.files.fopen',
|
|
+ mock_open(read_data=MOCK_SHELL_FILE)):
|
|
+ self.assertEqual(mount.set_fstab('B', 'A', 'C',
|
|
+ not_change=True),
|
|
+ 'present')
|
|
+
|
|
def test_rm_automaster(self):
|
|
'''
|
|
Remove the mount point from the auto_master
|
|
@@ -239,6 +254,34 @@ class MountTestCase(TestCase, LoaderModuleMockMixin):
|
|
mount.set_automaster,
|
|
'A', 'B', 'C')
|
|
|
|
+ mock = MagicMock(return_value=True)
|
|
+ mock_read = MagicMock(side_effect=OSError)
|
|
+ with patch.object(os.path, 'isfile', mock):
|
|
+ with patch.object(salt.utils.files, 'fopen', mock_read):
|
|
+ self.assertRaises(CommandExecutionError,
|
|
+ mount.set_automaster, 'A', 'B', 'C')
|
|
+
|
|
+ mock = MagicMock(return_value=True)
|
|
+ with patch.object(os.path, 'isfile', mock):
|
|
+ with patch('salt.utils.files.fopen',
|
|
+ mock_open(read_data=MOCK_SHELL_FILE)):
|
|
+ self.assertEqual(mount.set_automaster('A', 'B', 'C'), 'new')
|
|
+
|
|
+ mock = MagicMock(return_value=True)
|
|
+ with patch.object(os.path, 'isfile', mock):
|
|
+ with patch('salt.utils.files.fopen',
|
|
+ mock_open(read_data='/..A -fstype=C,D C:B')):
|
|
+ self.assertEqual(mount.set_automaster('A', 'B', 'C', 'D'),
|
|
+ 'present')
|
|
+
|
|
+ mock = MagicMock(return_value=True)
|
|
+ with patch.object(os.path, 'isfile', mock):
|
|
+ with patch('salt.utils.files.fopen',
|
|
+ mock_open(read_data='/..A -fstype=XX C:B')):
|
|
+ self.assertEqual(mount.set_automaster('A', 'B', 'C', 'D',
|
|
+ not_change=True),
|
|
+ 'present')
|
|
+
|
|
def test_automaster(self):
|
|
'''
|
|
Test the list the contents of the fstab
|
|
@@ -284,7 +327,7 @@ class MountTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.dict(mount.__grains__, {'os': 'AIX', 'kernel': 'AIX'}):
|
|
with patch.object(os.path, 'isfile', mock):
|
|
self.assertRaises(CommandExecutionError,
|
|
- mount.set_filesystems, 'A', 'B', 'C')
|
|
+ mount.set_filesystems, 'A', 'B', 'C')
|
|
|
|
mock_read = MagicMock(side_effect=OSError)
|
|
with patch.object(os.path, 'isfile', mock):
|
|
@@ -305,6 +348,13 @@ class MountTestCase(TestCase, LoaderModuleMockMixin):
|
|
'stderr': True})
|
|
with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
self.assertTrue(mount.mount('name', 'device'))
|
|
+ mock.assert_called_with('mount device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.mount('name', 'device', fstype='fstype'))
|
|
+ mock.assert_called_with('mount -t fstype device name ',
|
|
+ python_shell=False, runas=None)
|
|
|
|
mock = MagicMock(return_value={'retcode': False,
|
|
'stderr': False})
|
|
@@ -320,13 +370,42 @@ class MountTestCase(TestCase, LoaderModuleMockMixin):
|
|
'stderr': True})
|
|
with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
self.assertTrue(mount.mount('name', 'device'))
|
|
+ mock.assert_called_with('mount device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.mount('name', 'device', fstype='fstype'))
|
|
+ mock.assert_called_with('mount -v fstype device name ',
|
|
+ python_shell=False, runas=None)
|
|
|
|
mock = MagicMock(return_value={'retcode': False,
|
|
'stderr': False})
|
|
with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
self.assertTrue(mount.mount('name', 'device'))
|
|
|
|
- def test_remount(self):
|
|
+ with patch.dict(mount.__grains__, {'os': 'Linux'}):
|
|
+ mock = MagicMock(return_value=True)
|
|
+ with patch.object(os.path, 'exists', mock):
|
|
+ mock = MagicMock(return_value=None)
|
|
+ with patch.dict(mount.__salt__, {'file.mkdir': None}):
|
|
+ mock = MagicMock(return_value={'retcode': True,
|
|
+ 'stderr': True})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.mount('name', 'device'))
|
|
+ mock.assert_called_with('mount -o defaults device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.mount('name', 'device', fstype='fstype'))
|
|
+ mock.assert_called_with('mount -o defaults -t fstype device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ mock = MagicMock(return_value={'retcode': False,
|
|
+ 'stderr': False})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.mount('name', 'device'))
|
|
+
|
|
+ def test_remount_non_mounted(self):
|
|
'''
|
|
Attempt to remount a device, if the device is not already mounted, mount
|
|
is called
|
|
@@ -345,6 +424,77 @@ class MountTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.object(mount, 'mount', mock):
|
|
self.assertTrue(mount.remount('name', 'device'))
|
|
|
|
+ with patch.dict(mount.__grains__, {'os': 'Linux'}):
|
|
+ mock = MagicMock(return_value=[])
|
|
+ with patch.object(mount, 'active', mock):
|
|
+ mock = MagicMock(return_value=True)
|
|
+ with patch.object(mount, 'mount', mock):
|
|
+ self.assertTrue(mount.remount('name', 'device'))
|
|
+
|
|
+ def test_remount_already_mounted_no_fstype(self):
|
|
+ '''
|
|
+ Attempt to remount a device already mounted that do not provides
|
|
+ fstype
|
|
+ '''
|
|
+ with patch.dict(mount.__grains__, {'os': 'MacOS'}):
|
|
+ mock = MagicMock(return_value=['name'])
|
|
+ with patch.object(mount, 'active', mock):
|
|
+ mock = MagicMock(return_value={'retcode': 0})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.remount('name', 'device'))
|
|
+ mock.assert_called_with('mount -u -o noowners device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ with patch.dict(mount.__grains__, {'os': 'AIX'}):
|
|
+ mock = MagicMock(return_value=['name'])
|
|
+ with patch.object(mount, 'active', mock):
|
|
+ mock = MagicMock(return_value={'retcode': 0})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.remount('name', 'device'))
|
|
+ mock.assert_called_with('mount -o remount device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ with patch.dict(mount.__grains__, {'os': 'Linux'}):
|
|
+ mock = MagicMock(return_value=['name'])
|
|
+ with patch.object(mount, 'active', mock):
|
|
+ mock = MagicMock(return_value={'retcode': 0})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.remount('name', 'device'))
|
|
+ mock.assert_called_with('mount -o defaults,remount device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ def test_remount_already_mounted_with_fstype(self):
|
|
+ '''
|
|
+ Attempt to remount a device already mounted that do not provides
|
|
+ fstype
|
|
+ '''
|
|
+ with patch.dict(mount.__grains__, {'os': 'MacOS'}):
|
|
+ mock = MagicMock(return_value=['name'])
|
|
+ with patch.object(mount, 'active', mock):
|
|
+ mock = MagicMock(return_value={'retcode': 0})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.remount('name', 'device', fstype='type'))
|
|
+ mock.assert_called_with('mount -u -o noowners -t type device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ with patch.dict(mount.__grains__, {'os': 'AIX'}):
|
|
+ mock = MagicMock(return_value=['name'])
|
|
+ with patch.object(mount, 'active', mock):
|
|
+ mock = MagicMock(return_value={'retcode': 0})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.remount('name', 'device', fstype='type'))
|
|
+ mock.assert_called_with('mount -o remount -v type device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
+ with patch.dict(mount.__grains__, {'os': 'Linux'}):
|
|
+ mock = MagicMock(return_value=['name'])
|
|
+ with patch.object(mount, 'active', mock):
|
|
+ mock = MagicMock(return_value={'retcode': 0})
|
|
+ with patch.dict(mount.__salt__, {'cmd.run_all': mock}):
|
|
+ self.assertTrue(mount.remount('name', 'device', fstype='type'))
|
|
+ mock.assert_called_with('mount -o defaults,remount -t type device name ',
|
|
+ python_shell=False, runas=None)
|
|
+
|
|
def test_umount(self):
|
|
'''
|
|
Attempt to unmount a device by specifying the directory it is
|
|
diff --git a/tests/unit/modules/test_parted_partition.py b/tests/unit/modules/test_parted_partition.py
|
|
index 8f381d55a6..1959e5978e 100644
|
|
--- a/tests/unit/modules/test_parted_partition.py
|
|
+++ b/tests/unit/modules/test_parted_partition.py
|
|
@@ -376,3 +376,20 @@ class PartedTestCase(TestCase, LoaderModuleMockMixin):
|
|
}
|
|
}
|
|
self.assertEqual(output, expected)
|
|
+
|
|
+ def test_disk_set(self):
|
|
+ with patch('salt.modules.parted_partition._validate_device', MagicMock()):
|
|
+ self.cmdrun.return_value = ''
|
|
+ output = parted.disk_set('/dev/sda', 'pmbr_boot', 'on')
|
|
+ self.cmdrun.assert_called_once_with(
|
|
+ ['parted', '-m', '-s', '/dev/sda', 'disk_set',
|
|
+ 'pmbr_boot', 'on'])
|
|
+ assert output == []
|
|
+
|
|
+ def test_disk_toggle(self):
|
|
+ with patch('salt.modules.parted_partition._validate_device', MagicMock()):
|
|
+ self.cmdrun.return_value = ''
|
|
+ output = parted.disk_toggle('/dev/sda', 'pmbr_boot')
|
|
+ self.cmdrun.assert_called_once_with(
|
|
+ ['parted', '-m', '-s', '/dev/sda', 'disk_toggle', 'pmbr_boot'])
|
|
+ assert output == []
|
|
diff --git a/tests/unit/modules/test_rpm_lowpkg.py b/tests/unit/modules/test_rpm_lowpkg.py
|
|
index 0a2359ccb2..dc9f52c572 100644
|
|
--- a/tests/unit/modules/test_rpm_lowpkg.py
|
|
+++ b/tests/unit/modules/test_rpm_lowpkg.py
|
|
@@ -20,6 +20,11 @@ from tests.support.mock import (
|
|
import salt.modules.rpm_lowpkg as rpm
|
|
|
|
|
|
+def _called_with_root(mock):
|
|
+ cmd = ' '.join(mock.call_args[0][0])
|
|
+ return cmd.startswith('rpm --root /')
|
|
+
|
|
+
|
|
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
'''
|
|
@@ -28,7 +33,7 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
def setup_loader_modules(self):
|
|
return {rpm: {'rpm': MagicMock(return_value=MagicMock)}}
|
|
|
|
- # 'list_pkgs' function tests: 1
|
|
+ # 'list_pkgs' function tests: 2
|
|
|
|
def test_list_pkgs(self):
|
|
'''
|
|
@@ -37,13 +42,24 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
mock = MagicMock(return_value='')
|
|
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
self.assertDictEqual(rpm.list_pkgs(), {})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_list_pkgs_root(self):
|
|
+ '''
|
|
+ Test if it list the packages currently installed in a dict,
|
|
+ called with root parameter
|
|
+ '''
|
|
+ mock = MagicMock(return_value='')
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
+ rpm.list_pkgs(root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
- # 'verify' function tests: 1
|
|
+ # 'verify' function tests: 2
|
|
|
|
def test_verify(self):
|
|
'''
|
|
- Test if it runs an rpm -Va on a system,
|
|
- and returns the results in a dict
|
|
+ Test if it runs an rpm -Va on a system, and returns the
|
|
+ results in a dict
|
|
'''
|
|
mock = MagicMock(return_value={'stdout': '',
|
|
'stderr': '',
|
|
@@ -51,8 +67,22 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
'pid': 12345})
|
|
with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
|
|
self.assertDictEqual(rpm.verify('httpd'), {})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_verify_root(self):
|
|
+ '''
|
|
+ Test if it runs an rpm -Va on a system, and returns the
|
|
+ results in a dict, called with root parameter
|
|
+ '''
|
|
+ mock = MagicMock(return_value={'stdout': '',
|
|
+ 'stderr': '',
|
|
+ 'retcode': 0,
|
|
+ 'pid': 12345})
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
|
|
+ rpm.verify('httpd', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
- # 'file_list' function tests: 1
|
|
+ # 'file_list' function tests: 2
|
|
|
|
def test_file_list(self):
|
|
'''
|
|
@@ -62,8 +92,20 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
self.assertDictEqual(rpm.file_list('httpd'),
|
|
{'errors': [], 'files': []})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
|
|
- # 'file_dict' function tests: 1
|
|
+ def test_file_list_root(self):
|
|
+ '''
|
|
+ Test if it list the files that belong to a package, using the
|
|
+ root parameter.
|
|
+ '''
|
|
+
|
|
+ mock = MagicMock(return_value='')
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
+ rpm.file_list('httpd', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
+
|
|
+ # 'file_dict' function tests: 2
|
|
|
|
def test_file_dict(self):
|
|
'''
|
|
@@ -73,6 +115,16 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
self.assertDictEqual(rpm.file_dict('httpd'),
|
|
{'errors': [], 'packages': {}})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_file_dict_root(self):
|
|
+ '''
|
|
+ Test if it list the files that belong to a package
|
|
+ '''
|
|
+ mock = MagicMock(return_value='')
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
+ rpm.file_dict('httpd', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
# 'owner' function tests: 1
|
|
|
|
@@ -86,6 +138,7 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
mock = MagicMock(return_value=ret)
|
|
with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
|
|
self.assertEqual(rpm.owner('/usr/bin/salt-jenkins-build'), '')
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
|
|
ret = {'/usr/bin/vim': 'vim-enhanced-7.4.160-1.e17.x86_64',
|
|
'/usr/bin/python': 'python-2.7.5-16.e17.x86_64'}
|
|
@@ -94,8 +147,22 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
|
|
self.assertDictEqual(rpm.owner('/usr/bin/python', '/usr/bin/vim'),
|
|
ret)
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
|
|
- # 'checksum' function tests: 1
|
|
+ def test_owner_root(self):
|
|
+ '''
|
|
+ Test if it return the name of the package that owns the file,
|
|
+ using the parameter root.
|
|
+ '''
|
|
+ self.assertEqual(rpm.owner(), '')
|
|
+
|
|
+ ret = 'file /usr/bin/salt-jenkins-build is not owned by any package'
|
|
+ mock = MagicMock(return_value=ret)
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
|
|
+ rpm.owner('/usr/bin/salt-jenkins-build', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
+
|
|
+ # 'checksum' function tests: 2
|
|
|
|
def test_checksum(self):
|
|
'''
|
|
@@ -110,6 +177,17 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
mock = MagicMock(side_effect=[True, 0, True, 1, False, 0])
|
|
with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
|
|
self.assertDictEqual(rpm.checksum("file1.rpm", "file2.rpm", "file3.rpm"), ret)
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_checksum_root(self):
|
|
+ '''
|
|
+ Test if checksum validate as expected, using the parameter
|
|
+ root
|
|
+ '''
|
|
+ mock = MagicMock(side_effect=[True, 0])
|
|
+ with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
|
|
+ rpm.checksum("file1.rpm", root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
def test_version_cmp_rpm(self):
|
|
'''
|
|
diff --git a/tests/unit/modules/test_systemd_service.py b/tests/unit/modules/test_systemd_service.py
|
|
index 1d3a760c13..ed44459f93 100644
|
|
--- a/tests/unit/modules/test_systemd_service.py
|
|
+++ b/tests/unit/modules/test_systemd_service.py
|
|
@@ -7,6 +7,8 @@
|
|
from __future__ import absolute_import, unicode_literals, print_function
|
|
import os
|
|
|
|
+import pytest
|
|
+
|
|
# Import Salt Testing Libs
|
|
from tests.support.mixins import LoaderModuleMockMixin
|
|
from tests.support.unit import TestCase, skipIf
|
|
@@ -110,7 +112,7 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin):
|
|
'README'
|
|
)
|
|
)
|
|
- sysv_enabled_mock = MagicMock(side_effect=lambda x: x == 'baz')
|
|
+ sysv_enabled_mock = MagicMock(side_effect=lambda x, _: x == 'baz')
|
|
|
|
with patch.dict(systemd.__salt__, {'cmd.run': cmd_mock}):
|
|
with patch.object(os, 'listdir', listdir_mock):
|
|
@@ -146,7 +148,7 @@ class SystemdTestCase(TestCase, LoaderModuleMockMixin):
|
|
'README'
|
|
)
|
|
)
|
|
- sysv_enabled_mock = MagicMock(side_effect=lambda x: x == 'baz')
|
|
+ sysv_enabled_mock = MagicMock(side_effect=lambda x, _: x == 'baz')
|
|
|
|
with patch.dict(systemd.__salt__, {'cmd.run': cmd_mock}):
|
|
with patch.object(os, 'listdir', listdir_mock):
|
|
@@ -571,3 +573,54 @@ class SystemdScopeTestCase(TestCase, LoaderModuleMockMixin):
|
|
|
|
def test_unmask_runtime(self):
|
|
self._mask_unmask('unmask_', True)
|
|
+
|
|
+ def test_firstboot(self):
|
|
+ '''
|
|
+ Test service.firstboot without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(systemd.__salt__, salt_mock):
|
|
+ assert systemd.firstboot()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(['systemd-firstboot'])
|
|
+
|
|
+ def test_firstboot_params(self):
|
|
+ '''
|
|
+ Test service.firstboot with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(systemd.__salt__, salt_mock):
|
|
+ assert systemd.firstboot(
|
|
+ locale='en_US.UTF-8',
|
|
+ locale_message='en_US.UTF-8',
|
|
+ keymap='jp',
|
|
+ timezone='Europe/Berlin',
|
|
+ hostname='node-001',
|
|
+ machine_id='1234567890abcdef',
|
|
+ root='/mnt')
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['systemd-firstboot',
|
|
+ '--locale', 'en_US.UTF-8',
|
|
+ '--locale-message', 'en_US.UTF-8',
|
|
+ '--keymap', 'jp',
|
|
+ '--timezone', 'Europe/Berlin',
|
|
+ '--hostname', 'node-001',
|
|
+ '--machine-ID', '1234567890abcdef',
|
|
+ '--root', '/mnt'])
|
|
+
|
|
+ def test_firstboot_error(self):
|
|
+ '''
|
|
+ Test service.firstboot error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(systemd.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert systemd.firstboot()
|
|
diff --git a/tests/unit/modules/test_useradd.py b/tests/unit/modules/test_useradd.py
|
|
index 18da8d8ce8..74cafc6440 100644
|
|
--- a/tests/unit/modules/test_useradd.py
|
|
+++ b/tests/unit/modules/test_useradd.py
|
|
@@ -415,14 +415,15 @@ class UserAddTestCase(TestCase, LoaderModuleMockMixin):
|
|
|
|
mock = MagicMock(return_value=None)
|
|
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
|
|
- mock = MagicMock(side_effect=[{'name': ''}, False,
|
|
+ mock = MagicMock(side_effect=[False, {'name': ''},
|
|
{'name': 'salt'}])
|
|
with patch.object(useradd, 'info', mock):
|
|
self.assertTrue(useradd.rename('name', 'salt'))
|
|
|
|
mock = MagicMock(return_value=None)
|
|
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
|
|
- mock = MagicMock(side_effect=[{'name': ''}, False, {'name': ''}])
|
|
+ mock = MagicMock(side_effect=[False, {'name': ''},
|
|
+ {'name': ''}])
|
|
with patch.object(useradd, 'info', mock):
|
|
self.assertFalse(useradd.rename('salt', 'salt'))
|
|
|
|
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
|
|
index a7063e47c6..d2ae06a98e 100644
|
|
--- a/tests/unit/modules/test_zypperpkg.py
|
|
+++ b/tests/unit/modules/test_zypperpkg.py
|
|
@@ -40,6 +40,9 @@ class ZyppCallMock(object):
|
|
return self
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
+ # If the call is for a configuration modifier, we return self
|
|
+ if any(i in kwargs for i in ('no_repo_failure', 'systemd_scope', 'root')):
|
|
+ return self
|
|
return MagicMock(return_value=self.__return_value)()
|
|
|
|
|
|
@@ -925,7 +928,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
'pico': '0.1.1',
|
|
}
|
|
|
|
- def __call__(self):
|
|
+ def __call__(self, root=None, includes=None):
|
|
pkgs = self._pkgs.copy()
|
|
for target in self._packages:
|
|
if self._pkgs.get(target):
|
|
@@ -991,10 +994,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('ar', url, name)]
|
|
)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_noadd_nomod_noref(self):
|
|
'''
|
|
@@ -1016,8 +1019,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
self.assertEqual(
|
|
out['comment'],
|
|
'Specified arguments did not result in modification of repo')
|
|
- self.assertTrue(zypper.__zypper__.xml.call.call_count == 0)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_noadd_modbaseurl_ref(self):
|
|
'''
|
|
@@ -1045,9 +1048,11 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
'priority': 1,
|
|
'cache': False,
|
|
'keeppackages': False,
|
|
- 'type': 'rpm-md'}
|
|
- self.assertTrue(zypper.mod_repo.call_count == 2)
|
|
- self.assertTrue(zypper.mod_repo.mock_calls[1] == call(name, **expected_params))
|
|
+ 'type': 'rpm-md',
|
|
+ 'root': None,
|
|
+ }
|
|
+ self.assertEqual(zypper.mod_repo.call_count, 2)
|
|
+ self.assertEqual(zypper.mod_repo.mock_calls[1], call(name, **expected_params))
|
|
|
|
def test_repo_add_mod_noref(self):
|
|
'''
|
|
@@ -1063,10 +1068,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'refresh': True})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('ar', url, name)]
|
|
)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1085,8 +1090,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
'salt.modules.zypperpkg', **self.zypper_patcher_config)
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'refresh': True})
|
|
- self.assertTrue(zypper.__zypper__.xml.call.call_count == 0)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ self.assertTrue(zypper.__zypper__(root=None).xml.call.call_count == 0)
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1105,13 +1110,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[
|
|
call('ar', url, name),
|
|
call('--gpg-auto-import-keys', 'refresh', name)
|
|
]
|
|
)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_noadd_nomod_ref(self):
|
|
'''
|
|
@@ -1132,10 +1137,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('--gpg-auto-import-keys', 'refresh', name)]
|
|
)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_add_mod_ref(self):
|
|
'''
|
|
@@ -1156,13 +1161,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
**{'url': url, 'refresh': True, 'gpgautoimport': True}
|
|
)
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[
|
|
call('ar', url, name),
|
|
call('--gpg-auto-import-keys', 'refresh', name)
|
|
]
|
|
)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'--gpg-auto-import-keys', 'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1188,10 +1193,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
**{'url': url, 'refresh': True, 'gpgautoimport': True}
|
|
)
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('--gpg-auto-import-keys', 'refresh', name)]
|
|
)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'--gpg-auto-import-keys', 'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1368,3 +1373,58 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with self.assertRaises(CommandExecutionError):
|
|
for op in ['>>', '==', '<<', '+']:
|
|
zypper.Wildcard(_zpr)('libzypp', '{0}*.1'.format(op))
|
|
+
|
|
+ @patch('salt.modules.zypperpkg._get_visible_patterns')
|
|
+ def test__get_installed_patterns(self, get_visible_patterns):
|
|
+ '''Test installed patterns in the system'''
|
|
+ get_visible_patterns.return_value = {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-b': {
|
|
+ 'installed': False,
|
|
+ 'summary': 'description b',
|
|
+ },
|
|
+ }
|
|
+
|
|
+ salt_mock = {
|
|
+ 'cmd.run': MagicMock(return_value='''pattern() = package-a
|
|
+pattern-visible()
|
|
+pattern() = package-c'''),
|
|
+ }
|
|
+ with patch.dict('salt.modules.zypperpkg.__salt__', salt_mock):
|
|
+ assert zypper._get_installed_patterns() == {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-c': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'Non-visible pattern',
|
|
+ },
|
|
+ }
|
|
+
|
|
+ @patch('salt.modules.zypperpkg._get_visible_patterns')
|
|
+ def test_list_patterns(self, get_visible_patterns):
|
|
+ '''Test available patterns in the repo'''
|
|
+ get_visible_patterns.return_value = {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-b': {
|
|
+ 'installed': False,
|
|
+ 'summary': 'description b',
|
|
+ },
|
|
+ }
|
|
+ assert zypper.list_patterns() == {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-b': {
|
|
+ 'installed': False,
|
|
+ 'summary': 'description b',
|
|
+ },
|
|
+ }
|
|
diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py
|
|
new file mode 100644
|
|
index 0000000000..3f45ed94f9
|
|
--- /dev/null
|
|
+++ b/tests/unit/states/test_btrfs.py
|
|
@@ -0,0 +1,782 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:platform: Linux
|
|
+'''
|
|
+# Import Python Libs
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+# Import Salt Testing Libs
|
|
+from tests.support.mixins import LoaderModuleMockMixin
|
|
+from tests.support.unit import skipIf, TestCase
|
|
+from tests.support.mock import (
|
|
+ MagicMock,
|
|
+ NO_MOCK,
|
|
+ NO_MOCK_REASON,
|
|
+ patch,
|
|
+)
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+import salt.states.btrfs as btrfs
|
|
+
|
|
+import pytest
|
|
+
|
|
+
|
|
+@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
+class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
|
|
+ '''
|
|
+ Test cases for salt.states.btrfs
|
|
+ '''
|
|
+
|
|
+ def setup_loader_modules(self):
|
|
+ return {
|
|
+ btrfs: {
|
|
+ '__salt__': {},
|
|
+ '__states__': {},
|
|
+ '__utils__': {},
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test__mount_fails(self, mkdtemp, umount):
|
|
+ '''
|
|
+ Test mounting a device in a temporary place.
|
|
+ '''
|
|
+ mkdtemp.return_value = '/tmp/xxx'
|
|
+ states_mock = {
|
|
+ 'mount.mounted': MagicMock(return_value={'result': False}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock):
|
|
+ assert btrfs._mount('/dev/sda1', use_default=False) is None
|
|
+ mkdtemp.assert_called_once()
|
|
+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
|
|
+ device='/dev/sda1',
|
|
+ fstype='btrfs',
|
|
+ opts='subvol=/',
|
|
+ persist=False)
|
|
+ umount.assert_called_with('/tmp/xxx')
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test__mount(self, mkdtemp, umount):
|
|
+ '''
|
|
+ Test mounting a device in a temporary place.
|
|
+ '''
|
|
+ mkdtemp.return_value = '/tmp/xxx'
|
|
+ states_mock = {
|
|
+ 'mount.mounted': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock):
|
|
+ assert btrfs._mount('/dev/sda1', use_default=False) == '/tmp/xxx'
|
|
+ mkdtemp.assert_called_once()
|
|
+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
|
|
+ device='/dev/sda1',
|
|
+ fstype='btrfs',
|
|
+ opts='subvol=/',
|
|
+ persist=False)
|
|
+ umount.assert_not_called()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test__mount_use_default(self, mkdtemp, umount):
|
|
+ '''
|
|
+ Test mounting a device in a temporary place.
|
|
+ '''
|
|
+ mkdtemp.return_value = '/tmp/xxx'
|
|
+ states_mock = {
|
|
+ 'mount.mounted': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock):
|
|
+ assert btrfs._mount('/dev/sda1', use_default=True) == '/tmp/xxx'
|
|
+ mkdtemp.assert_called_once()
|
|
+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
|
|
+ device='/dev/sda1',
|
|
+ fstype='btrfs',
|
|
+ opts='defaults',
|
|
+ persist=False)
|
|
+ umount.assert_not_called()
|
|
+
|
|
+ def test__umount(self):
|
|
+ '''
|
|
+ Test umounting and cleanning temporary place.
|
|
+ '''
|
|
+ states_mock = {
|
|
+ 'mount.unmounted': MagicMock(),
|
|
+ }
|
|
+ utils_mock = {
|
|
+ 'files.rm_rf': MagicMock(),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__utils__, utils_mock):
|
|
+ btrfs._umount('/tmp/xxx')
|
|
+ states_mock['mount.unmounted'].assert_called_with('/tmp/xxx')
|
|
+ utils_mock['files.rm_rf'].assert_called_with('/tmp/xxx')
|
|
+
|
|
+ def test__is_default_not_default(self):
|
|
+ '''
|
|
+ Test if the subvolume is the current default.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_show': MagicMock(return_value={
|
|
+ '@/var': {'subvolume id': '256'},
|
|
+ }),
|
|
+ 'btrfs.subvolume_get_default': MagicMock(return_value={
|
|
+ 'id': '5',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert not btrfs._is_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
|
|
+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_get_default'].assert_called_with('/tmp/xxx')
|
|
+
|
|
+ def test__is_default(self):
|
|
+ '''
|
|
+ Test if the subvolume is the current default.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_show': MagicMock(return_value={
|
|
+ '@/var': {'subvolume id': '256'},
|
|
+ }),
|
|
+ 'btrfs.subvolume_get_default': MagicMock(return_value={
|
|
+ 'id': '256',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._is_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
|
|
+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_get_default'].assert_called_with('/tmp/xxx')
|
|
+
|
|
+ def test__set_default(self):
|
|
+ '''
|
|
+ Test setting a subvolume as the current default.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_show': MagicMock(return_value={
|
|
+ '@/var': {'subvolume id': '256'},
|
|
+ }),
|
|
+ 'btrfs.subvolume_set_default': MagicMock(return_value=True),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._set_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
|
|
+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_set_default'].assert_called_with('256', '/tmp/xxx')
|
|
+
|
|
+ def test__is_cow_not_cow(self):
|
|
+ '''
|
|
+ Test if the subvolume is copy on write.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'file.lsattr': MagicMock(return_value={
|
|
+ '/tmp/xxx/@/var': ['C'],
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert not btrfs._is_cow('/tmp/xxx/@/var')
|
|
+ salt_mock['file.lsattr'].assert_called_with('/tmp/xxx/@')
|
|
+
|
|
+ def test__is_cow(self):
|
|
+ '''
|
|
+ Test if the subvolume is copy on write.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'file.lsattr': MagicMock(return_value={
|
|
+ '/tmp/xxx/@/var': [],
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._is_cow('/tmp/xxx/@/var')
|
|
+ salt_mock['file.lsattr'].assert_called_with('/tmp/xxx/@')
|
|
+
|
|
+ def test__unset_cow(self):
|
|
+ '''
|
|
+ Test disabling the subvolume as copy on write.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'file.chattr': MagicMock(return_value=True),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._unset_cow('/tmp/xxx/@/var')
|
|
+ salt_mock['file.chattr'].assert_called_with('/tmp/xxx/@/var',
|
|
+ operator='add',
|
|
+ attributes='C')
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_test(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': True,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._is_default')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_was_default(self, mount, umount,
|
|
+ is_default):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_default.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ set_default=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._set_default')
|
|
+ @patch('salt.states.btrfs._is_default')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_set_default(self, mount, umount,
|
|
+ is_default, set_default):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_default.return_value = False
|
|
+ set_default.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ set_default=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {
|
|
+ '@/var_default': True
|
|
+ },
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._set_default')
|
|
+ @patch('salt.states.btrfs._is_default')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_set_default_no_force(self,
|
|
+ mount,
|
|
+ umount,
|
|
+ is_default,
|
|
+ set_default):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_default.return_value = False
|
|
+ set_default.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ set_default=True,
|
|
+ force_set_default=False) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._is_cow')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_no_cow(self, mount, umount, is_cow):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_cow.return_value = False
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ copy_on_write=False) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._unset_cow')
|
|
+ @patch('salt.states.btrfs._is_cow')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_unset_cow(self, mount, umount,
|
|
+ is_cow, unset_cow):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_cow.return_value = True
|
|
+ unset_cow.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ copy_on_write=False) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {
|
|
+ '@/var_no_cow': True
|
|
+ },
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
|
|
+ 'btrfs.subvolume_create': MagicMock(),
|
|
+ }
|
|
+ states_mock = {
|
|
+ 'file.directory': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {
|
|
+ '@/var': 'Created subvolume @/var'
|
|
+ },
|
|
+ 'comment': [],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_create'].assert_called_once()
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_fails_directory(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
|
|
+ }
|
|
+ states_mock = {
|
|
+ 'file.directory': MagicMock(return_value={'result': False}),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Error creating /tmp/xxx/@ directory'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_fails(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
|
|
+ 'btrfs.subvolume_create': MagicMock(side_effect=CommandExecutionError),
|
|
+ }
|
|
+ states_mock = {
|
|
+ 'file.directory': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Error creating subvolume @/var'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_create'].assert_called_once()
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ def test_diff_properties_fails(self):
|
|
+ '''
|
|
+ Test when diff_properties do not found a property
|
|
+ '''
|
|
+ expected = {
|
|
+ 'wrong_property': True
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ with pytest.raises(Exception):
|
|
+ btrfs._diff_properties(expected, current)
|
|
+
|
|
+ def test_diff_properties_enable_ro(self):
|
|
+ '''
|
|
+ Test when diff_properties enable one single property
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': True
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {'ro': True}
|
|
+
|
|
+ def test_diff_properties_only_enable_ro(self):
|
|
+ '''
|
|
+ Test when diff_properties is half ready
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': True,
|
|
+ 'label': 'mylabel'
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'mylabel',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {'ro': True}
|
|
+
|
|
+ def test_diff_properties_disable_ro(self):
|
|
+ '''
|
|
+ Test when diff_properties enable one single property
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': False
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': True,
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {'ro': False}
|
|
+
|
|
+ def test_diff_properties_emty_na(self):
|
|
+ '''
|
|
+ Test when diff_properties is already disabled as N/A
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': False
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {}
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_subvolume_not_exists(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test when subvolume is not present
|
|
+ '''
|
|
+ exists.return_value = False
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ assert btrfs.properties(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Object @/var not found'],
|
|
+ }
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_default_root_subvolume(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test when root subvolume resolves to another subvolume
|
|
+ '''
|
|
+ exists.return_value = False
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ assert btrfs.properties(name='/',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '/',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Object / not found'],
|
|
+ }
|
|
+ exists.assert_called_with('/tmp/xxx/.')
|
|
+
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_device_fail(self, exists):
|
|
+ '''
|
|
+ Test when we try to set a device that is not pressent
|
|
+ '''
|
|
+ exists.return_value = False
|
|
+ assert btrfs.properties(name='/dev/sda1',
|
|
+ device=None) == {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Object /dev/sda1 not found'],
|
|
+ }
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_subvolume_fail(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test setting a wrong property in a subvolume
|
|
+ '''
|
|
+ exists.return_value = True
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.properties': MagicMock(side_effect=[
|
|
+ {
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ ]),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.properties(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ wrond_property=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Some property not found in @/var'],
|
|
+ }
|
|
+ salt_mock['btrfs.properties'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_enable_ro_subvolume(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test setting a ro property in a subvolume
|
|
+ '''
|
|
+ exists.return_value = True
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.properties': MagicMock(side_effect=[
|
|
+ {
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ },
|
|
+ None,
|
|
+ {
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'true',
|
|
+ },
|
|
+ }
|
|
+ ]),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.properties(name='@/var',
|
|
+ device='/dev/sda1', ro=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {'ro': 'true'},
|
|
+ 'comment': ['Properties changed in @/var'],
|
|
+ }
|
|
+ salt_mock['btrfs.properties'].assert_any_call('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.properties'].assert_any_call('/tmp/xxx/@/var',
|
|
+ set='ro=true')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
diff --git a/tests/unit/states/test_mount.py b/tests/unit/states/test_mount.py
|
|
index 3e3a75d3cd..8f21db0cbb 100644
|
|
--- a/tests/unit/states/test_mount.py
|
|
+++ b/tests/unit/states/test_mount.py
|
|
@@ -449,3 +449,608 @@ class MountTestCase(TestCase, LoaderModuleMockMixin):
|
|
'changes': {}}
|
|
|
|
self.assertDictEqual(mount.mod_watch(name, sfun='unmount'), ret)
|
|
+
|
|
+ def test__convert_to_fast_none(self):
|
|
+ '''
|
|
+ Test the device name conversor
|
|
+ '''
|
|
+ assert mount._convert_to('/dev/sda1', None) == '/dev/sda1'
|
|
+
|
|
+ def test__convert_to_fast_device(self):
|
|
+ '''
|
|
+ Test the device name conversor
|
|
+ '''
|
|
+ assert mount._convert_to('/dev/sda1', 'device') == '/dev/sda1'
|
|
+
|
|
+ def test__convert_to_fast_token(self):
|
|
+ '''
|
|
+ Test the device name conversor
|
|
+ '''
|
|
+ assert mount._convert_to('LABEL=home', 'label') == 'LABEL=home'
|
|
+
|
|
+ def test__convert_to_device_none(self):
|
|
+ '''
|
|
+ Test the device name conversor
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'disk.blkid': MagicMock(return_value={}),
|
|
+ }
|
|
+ with patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount._convert_to('/dev/sda1', 'uuid') is None
|
|
+ salt_mock['disk.blkid'].assert_called_with('/dev/sda1')
|
|
+
|
|
+ def test__convert_to_device_token(self):
|
|
+ '''
|
|
+ Test the device name conversor
|
|
+ '''
|
|
+ uuid = '988c663d-74a2-432b-ba52-3eea34015f22'
|
|
+ salt_mock = {
|
|
+ 'disk.blkid': MagicMock(return_value={
|
|
+ '/dev/sda1': {'UUID': uuid}
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(mount.__salt__, salt_mock):
|
|
+ uuid = 'UUID={}'.format(uuid)
|
|
+ assert mount._convert_to('/dev/sda1', 'uuid') == uuid
|
|
+ salt_mock['disk.blkid'].assert_called_with('/dev/sda1')
|
|
+
|
|
+ def test__convert_to_token_device(self):
|
|
+ '''
|
|
+ Test the device name conversor
|
|
+ '''
|
|
+ uuid = '988c663d-74a2-432b-ba52-3eea34015f22'
|
|
+ salt_mock = {
|
|
+ 'disk.blkid': MagicMock(return_value={
|
|
+ '/dev/sda1': {'UUID': uuid}
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(mount.__salt__, salt_mock):
|
|
+ uuid = 'UUID={}'.format(uuid)
|
|
+ assert mount._convert_to(uuid, 'device') == '/dev/sda1'
|
|
+ salt_mock['disk.blkid'].assert_called_with(token=uuid)
|
|
+
|
|
+ def test_fstab_present_macos_test_present(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry is already in /etc/auto_salt.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'MacOS'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.set_automaster': MagicMock(return_value='present')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_automaster'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='noowners',
|
|
+ config='/etc/auto_salt',
|
|
+ test=True,
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_aix_test_present(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry is already in /etc/filesystems.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'AIX'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.set_filesystems': MagicMock(return_value='present')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_filesystems'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ mount=True,
|
|
+ opts='',
|
|
+ config='/etc/filesystems',
|
|
+ test=True,
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_test_present(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry is already in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='present')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ test=True,
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_test_new(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry will be written in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='new')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ test=True,
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_test_change(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry will be updated in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='change')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ test=True,
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_test_error(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry cannot be created in /etc/fstab: error.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='error')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ test=True,
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_macos_present(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry was already in /etc/auto_salt.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'MacOS'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.set_automaster': MagicMock(return_value='present')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_automaster'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='noowners',
|
|
+ config='/etc/auto_salt',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_aix_present(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry was already in /etc/filesystems.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'AIX'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.set_filesystems': MagicMock(return_value='present')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_filesystems'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ mount=True,
|
|
+ opts='',
|
|
+ config='/etc/filesystems',
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_present(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry was already in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='present')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_new(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {'persist': 'new'},
|
|
+ 'comment': ['/home entry added in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='new')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_change(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {'persist': 'change'},
|
|
+ 'comment': ['/home entry updated in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='change')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_present_fail(self):
|
|
+ '''
|
|
+ Test fstab_present
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry cannot be changed in /etc/fstab: error.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.set_fstab': MagicMock(return_value='error')
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_present('/dev/sda1', '/home', 'ext2') == ret
|
|
+ salt_mock['mount.set_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ fstype='ext2',
|
|
+ opts='defaults',
|
|
+ dump=0,
|
|
+ pass_num=0,
|
|
+ config='/etc/fstab',
|
|
+ match_on='auto',
|
|
+ not_change=False)
|
|
+
|
|
+ def test_fstab_absent_macos_test_absent(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry is already missing in /etc/auto_salt.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'MacOS'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.automaster': MagicMock(return_value={})
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.automaster'].assert_called_with('/etc/auto_salt')
|
|
+
|
|
+ def test_fstab_absent_aix_test_absent(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry is already missing in /etc/filesystems.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'AIX'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.filesystems': MagicMock(return_value={})
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.filesystems'].assert_called_with('/etc/filesystems')
|
|
+
|
|
+ def test_fstab_absent_test_absent(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry is already missing in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.fstab': MagicMock(return_value={})
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.fstab'].assert_called_with('/etc/fstab')
|
|
+
|
|
+ def test_fstab_absent_test_present(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry will be removed from /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': True}
|
|
+ salt_mock = {
|
|
+ 'mount.fstab': MagicMock(return_value={'/home': {}})
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.fstab'].assert_called_with('/etc/fstab')
|
|
+
|
|
+ def test_fstab_absent_macos_present(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {'persist': 'removed'},
|
|
+ 'comment': ['/home entry removed from /etc/auto_salt.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'MacOS'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.automaster': MagicMock(return_value={'/home': {}}),
|
|
+ 'mount.rm_automaster': MagicMock(return_value=True)
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.automaster'].assert_called_with('/etc/auto_salt')
|
|
+ salt_mock['mount.rm_automaster'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ config='/etc/auto_salt')
|
|
+
|
|
+ def test_fstab_absent_aix_present(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {'persist': 'removed'},
|
|
+ 'comment': ['/home entry removed from /etc/filesystems.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'AIX'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.filesystems': MagicMock(return_value={'/home': {}}),
|
|
+ 'mount.rm_filesystems': MagicMock(return_value=True)
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.filesystems'].assert_called_with('/etc/filesystems')
|
|
+ salt_mock['mount.rm_filesystems'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ config='/etc/filesystems')
|
|
+
|
|
+ def test_fstab_absent_present(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {'persist': 'removed'},
|
|
+ 'comment': ['/home entry removed from /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.fstab': MagicMock(return_value={'/home': {}}),
|
|
+ 'mount.rm_fstab': MagicMock(return_value=True)
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.fstab'].assert_called_with('/etc/fstab')
|
|
+ salt_mock['mount.rm_fstab'].assert_called_with(name='/home',
|
|
+ device='/dev/sda1',
|
|
+ config='/etc/fstab')
|
|
+
|
|
+ def test_fstab_absent_absent(self):
|
|
+ '''
|
|
+ Test fstab_absent
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['/home entry is already missing in /etc/fstab.'],
|
|
+ }
|
|
+
|
|
+ grains_mock = {'os': 'Linux'}
|
|
+ opts_mock = {'test': False}
|
|
+ salt_mock = {
|
|
+ 'mount.fstab': MagicMock(return_value={})
|
|
+ }
|
|
+ with patch.dict(mount.__grains__, grains_mock), \
|
|
+ patch.dict(mount.__opts__, opts_mock), \
|
|
+ patch.dict(mount.__salt__, salt_mock):
|
|
+ assert mount.fstab_absent('/dev/sda1', '/home') == ret
|
|
+ salt_mock['mount.fstab'].assert_called_with('/etc/fstab')
|
|
diff --git a/tests/unit/states/test_pkg.py b/tests/unit/states/test_pkg.py
|
|
index 42fe6c6867..d30e064167 100644
|
|
--- a/tests/unit/states/test_pkg.py
|
|
+++ b/tests/unit/states/test_pkg.py
|
|
@@ -46,7 +46,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
|
|
})
|
|
upgrade = MagicMock(return_value=self.pkgs)
|
|
- version = MagicMock(side_effect=lambda pkgname: self.pkgs[pkgname]['old'])
|
|
+ version = MagicMock(side_effect=lambda pkgname, **_: self.pkgs[pkgname]['old'])
|
|
|
|
with patch.dict(pkg.__salt__,
|
|
{'pkg.list_upgrades': list_upgrades,
|
|
@@ -55,7 +55,6 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
|
|
# Run state with test=false
|
|
with patch.dict(pkg.__opts__, {'test': False}):
|
|
-
|
|
ret = pkg.uptodate('dummy', test=True)
|
|
self.assertTrue(ret['result'])
|
|
self.assertDictEqual(ret['changes'], self.pkgs)
|
|
@@ -81,7 +80,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
|
|
})
|
|
upgrade = MagicMock(return_value=self.pkgs)
|
|
- version = MagicMock(side_effect=lambda pkgname: pkgs[pkgname]['old'])
|
|
+ version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]['old'])
|
|
|
|
with patch.dict(pkg.__salt__,
|
|
{'pkg.list_upgrades': list_upgrades,
|
|
@@ -160,7 +159,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
|
|
})
|
|
upgrade = MagicMock(return_value={})
|
|
- version = MagicMock(side_effect=lambda pkgname: pkgs[pkgname]['old'])
|
|
+ version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]['old'])
|
|
|
|
with patch.dict(pkg.__salt__,
|
|
{'pkg.list_upgrades': list_upgrades,
|
|
diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py
|
|
index 0f22334559..ba8739a6b2 100644
|
|
--- a/tests/unit/test_loader.py
|
|
+++ b/tests/unit/test_loader.py
|
|
@@ -119,6 +119,97 @@ class LazyLoaderTest(TestCase):
|
|
self.assertTrue(self.module_name + '.not_loaded' not in self.loader)
|
|
|
|
|
|
+loader_template_module = '''
|
|
+import my_utils
|
|
+
|
|
+def run():
|
|
+ return my_utils.run()
|
|
+'''
|
|
+
|
|
+loader_template_utils = '''
|
|
+def run():
|
|
+ return True
|
|
+'''
|
|
+
|
|
+
|
|
+class LazyLoaderUtilsTest(TestCase):
|
|
+ '''
|
|
+ Test the loader
|
|
+ '''
|
|
+ module_name = 'lazyloaderutilstest'
|
|
+ utils_name = 'my_utils'
|
|
+
|
|
+ @classmethod
|
|
+ def setUpClass(cls):
|
|
+ cls.opts = salt.config.minion_config(None)
|
|
+ cls.opts['grains'] = salt.loader.grains(cls.opts)
|
|
+ if not os.path.isdir(TMP):
|
|
+ os.makedirs(TMP)
|
|
+
|
|
+ def setUp(self):
|
|
+ # Setup the module
|
|
+ self.module_dir = tempfile.mkdtemp(dir=TMP)
|
|
+ self.module_file = os.path.join(self.module_dir,
|
|
+ '{}.py'.format(self.module_name))
|
|
+ with salt.utils.files.fopen(self.module_file, 'w') as fh:
|
|
+ fh.write(salt.utils.stringutils.to_str(loader_template_module))
|
|
+ fh.flush()
|
|
+ os.fsync(fh.fileno())
|
|
+
|
|
+ self.utils_dir = tempfile.mkdtemp(dir=TMP)
|
|
+ self.utils_file = os.path.join(self.utils_dir,
|
|
+ '{}.py'.format(self.utils_name))
|
|
+ with salt.utils.files.fopen(self.utils_file, 'w') as fh:
|
|
+ fh.write(salt.utils.stringutils.to_str(loader_template_utils))
|
|
+ fh.flush()
|
|
+ os.fsync(fh.fileno())
|
|
+
|
|
+ def tearDown(self):
|
|
+ shutil.rmtree(self.module_dir)
|
|
+ if os.path.isdir(self.module_dir):
|
|
+ shutil.rmtree(self.module_dir)
|
|
+ shutil.rmtree(self.utils_dir)
|
|
+ if os.path.isdir(self.utils_dir):
|
|
+ shutil.rmtree(self.utils_dir)
|
|
+ del self.module_dir
|
|
+ del self.module_file
|
|
+ del self.utils_dir
|
|
+ del self.utils_file
|
|
+
|
|
+ if self.module_name in sys.modules:
|
|
+ del sys.modules[self.module_name]
|
|
+ if self.utils_name in sys.modules:
|
|
+ del sys.modules[self.utils_name]
|
|
+
|
|
+ @classmethod
|
|
+ def tearDownClass(cls):
|
|
+ del cls.opts
|
|
+
|
|
+ def test_utils_found(self):
|
|
+ '''
|
|
+ Test that the extra module directory is available for imports
|
|
+ '''
|
|
+ loader = salt.loader.LazyLoader(
|
|
+ [self.module_dir],
|
|
+ copy.deepcopy(self.opts),
|
|
+ tag='module',
|
|
+ extra_module_dirs=[self.utils_dir])
|
|
+ self.assertTrue(
|
|
+ inspect.isfunction(
|
|
+ loader[self.module_name + '.run']))
|
|
+ self.assertTrue(loader[self.module_name + '.run']())
|
|
+
|
|
+ def test_utils_not_found(self):
|
|
+ '''
|
|
+ Test that the extra module directory is not available for imports
|
|
+ '''
|
|
+ loader = salt.loader.LazyLoader(
|
|
+ [self.module_dir],
|
|
+ copy.deepcopy(self.opts),
|
|
+ tag='module')
|
|
+ self.assertTrue(self.module_name + '.run' not in loader)
|
|
+
|
|
+
|
|
class LazyLoaderVirtualEnabledTest(TestCase):
|
|
'''
|
|
Test the base loader of salt.
|
|
@@ -995,8 +1086,9 @@ class LoaderGlobalsTest(ModuleCase):
|
|
|
|
# Now, test each module!
|
|
for item in global_vars:
|
|
- for name in names:
|
|
- self.assertIn(name, list(item.keys()))
|
|
+ if item['__name__'].startswith('salt.loaded'):
|
|
+ for name in names:
|
|
+ self.assertIn(name, list(item.keys()))
|
|
|
|
def test_auth(self):
|
|
'''
|
|
diff --git a/tests/unit/utils/test_systemd.py b/tests/unit/utils/test_systemd.py
|
|
index 248ff44579..bddfee5e54 100644
|
|
--- a/tests/unit/utils/test_systemd.py
|
|
+++ b/tests/unit/utils/test_systemd.py
|
|
@@ -100,6 +100,27 @@ class SystemdTestCase(TestCase):
|
|
self.assertTrue(_systemd.version(context))
|
|
self.assertEqual(context, {'salt.utils.systemd.version': _version})
|
|
|
|
+ def test_version_generated_from_git_describe(self):
|
|
+ '''
|
|
+ Test with version string matching versions generated by git describe
|
|
+ in systemd. This feature is used in systemd>=241.
|
|
+ '''
|
|
+ with patch('subprocess.Popen') as popen_mock:
|
|
+ _version = 241
|
|
+ output = 'systemd {0} ({0}.0-0-dist)\n-SYSVINIT'.format(_version)
|
|
+ popen_mock.return_value = Mock(
|
|
+ communicate=lambda *args, **kwargs: (output, None),
|
|
+ pid=lambda: 12345,
|
|
+ retcode=0
|
|
+ )
|
|
+
|
|
+ # Test without context dict passed
|
|
+ self.assertEqual(_systemd.version(), _version)
|
|
+ # Test that context key is set when context dict is passed
|
|
+ context = {}
|
|
+ self.assertTrue(_systemd.version(context))
|
|
+ self.assertEqual(context, {'salt.utils.systemd.version': _version})
|
|
+
|
|
def test_version_return_from_context(self):
|
|
'''
|
|
Test that the context data is returned when present. To ensure we're
|
|
--
|
|
2.21.0
|
|
|
|
|