dede5bd1d4
OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=166
7075 lines
241 KiB
Diff
7075 lines
241 KiB
Diff
From d8e0602b36fcfc8b6a446ef56726eae08726e5ae Mon Sep 17 00:00:00 2001
|
|
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
|
|
<psuarezhernandez@suse.com>
|
|
Date: Thu, 20 Jun 2019 12:52:45 +0100
|
|
Subject: [PATCH] Provide the missing features required for Yomi (Yet one
|
|
more installer)
|
|
|
|
---
|
|
doc/ref/modules/all/index.rst | 1 +
|
|
doc/ref/modules/all/salt.modules.kubeadm.rst | 5 +
|
|
salt/grains/core.py | 4 +
|
|
salt/loader.py | 44 +-
|
|
salt/modules/aixpkg.py | 2 +-
|
|
salt/modules/apkpkg.py | 12 +-
|
|
salt/modules/aptpkg.py | 14 +-
|
|
salt/modules/dpkg_lowpkg.py | 6 +-
|
|
salt/modules/ebuildpkg.py | 8 +-
|
|
salt/modules/freebsdpkg.py | 6 +-
|
|
salt/modules/kubeadm.py | 1265 ++++++++++++++++++++++++++
|
|
salt/modules/mac_brew_pkg.py | 8 +-
|
|
salt/modules/mac_portspkg.py | 6 +-
|
|
salt/modules/openbsdpkg.py | 2 +-
|
|
salt/modules/pacmanpkg.py | 10 +-
|
|
salt/modules/pkgin.py | 8 +-
|
|
salt/modules/pkgng.py | 4 +-
|
|
salt/modules/rpm_lowpkg.py | 101 +-
|
|
salt/modules/solarisipspkg.py | 4 +-
|
|
salt/modules/solarispkg.py | 2 +-
|
|
salt/modules/systemd_service.py | 55 ++
|
|
salt/modules/xbpspkg.py | 12 +-
|
|
salt/modules/yumpkg.py | 34 +-
|
|
salt/modules/zypperpkg.py | 501 +++++++---
|
|
salt/states/btrfs.py | 385 ++++++++
|
|
salt/states/file.py | 6 +-
|
|
salt/states/loop.py | 4 +
|
|
salt/states/pkg.py | 26 +-
|
|
salt/states/pkgrepo.py | 14 +-
|
|
salt/utils/oset.py | 7 +-
|
|
tests/unit/modules/test_kubeadm.py | 1144 +++++++++++++++++++++++
|
|
tests/unit/modules/test_rpm_lowpkg.py | 87 +-
|
|
tests/unit/modules/test_systemd_service.py | 53 ++
|
|
tests/unit/modules/test_zypperpkg.py | 100 +-
|
|
tests/unit/states/test_btrfs.py | 782 ++++++++++++++++
|
|
tests/unit/states/test_pkg.py | 7 +-
|
|
tests/unit/test_loader.py | 96 +-
|
|
37 files changed, 4550 insertions(+), 275 deletions(-)
|
|
create mode 100644 doc/ref/modules/all/salt.modules.kubeadm.rst
|
|
create mode 100644 salt/modules/kubeadm.py
|
|
create mode 100644 salt/states/btrfs.py
|
|
create mode 100644 tests/unit/modules/test_kubeadm.py
|
|
create mode 100644 tests/unit/states/test_btrfs.py
|
|
|
|
diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst
|
|
index 359af7e1e0..8e1bf2ecf1 100644
|
|
--- a/doc/ref/modules/all/index.rst
|
|
+++ b/doc/ref/modules/all/index.rst
|
|
@@ -214,6 +214,7 @@ execution modules
|
|
keystoneng
|
|
keystore
|
|
kmod
|
|
+ kubeadm
|
|
kubernetesmod
|
|
launchctl_service
|
|
layman
|
|
diff --git a/doc/ref/modules/all/salt.modules.kubeadm.rst b/doc/ref/modules/all/salt.modules.kubeadm.rst
|
|
new file mode 100644
|
|
index 0000000000..137c779da2
|
|
--- /dev/null
|
|
+++ b/doc/ref/modules/all/salt.modules.kubeadm.rst
|
|
@@ -0,0 +1,5 @@
|
|
+salt.modules.kubeadm module
|
|
+===========================
|
|
+
|
|
+.. automodule:: salt.modules.kubeadm
|
|
+ :members:
|
|
diff --git a/salt/grains/core.py b/salt/grains/core.py
|
|
index f1e3ebe9d2..b58c29dbc3 100644
|
|
--- a/salt/grains/core.py
|
|
+++ b/salt/grains/core.py
|
|
@@ -2611,6 +2611,10 @@ def _hw_data(osdata):
|
|
grains[key] = salt.utils.stringutils.to_unicode(ifile.read().strip(), errors='replace')
|
|
if key == 'uuid':
|
|
grains['uuid'] = grains['uuid'].lower()
|
|
+ except UnicodeDecodeError:
|
|
+ # Some firmwares provide non-valid 'product_name'
|
|
+ # files, ignore them
|
|
+ pass
|
|
except (IOError, OSError) as err:
|
|
# PermissionError is new to Python 3, but corresponds to the EACESS and
|
|
# EPERM error numbers. Use those instead here for PY2 compatibility.
|
|
diff --git a/salt/loader.py b/salt/loader.py
|
|
index 860162b791..c68562988d 100644
|
|
--- a/salt/loader.py
|
|
+++ b/salt/loader.py
|
|
@@ -254,6 +254,7 @@ def minion_mods(
|
|
whitelist=whitelist,
|
|
loaded_base_name=loaded_base_name,
|
|
static_modules=static_modules,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
ret.pack['__salt__'] = ret
|
|
@@ -347,6 +348,7 @@ def engines(opts, functions, runners, utils, proxy=None):
|
|
opts,
|
|
tag='engines',
|
|
pack=pack,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
|
|
@@ -359,6 +361,7 @@ def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
|
|
opts,
|
|
tag='proxy',
|
|
pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
ret.pack['__proxy__'] = ret
|
|
@@ -396,12 +399,14 @@ def pillars(opts, functions, context=None):
|
|
'''
|
|
Returns the pillars modules
|
|
'''
|
|
+ _utils = utils(opts)
|
|
ret = LazyLoader(_module_dirs(opts, 'pillar'),
|
|
opts,
|
|
tag='pillar',
|
|
pack={'__salt__': functions,
|
|
'__context__': context,
|
|
- '__utils__': utils(opts)})
|
|
+ '__utils__': _utils},
|
|
+ extra_module_dirs=_utils.module_dirs)
|
|
ret.pack['__ext_pillar__'] = ret
|
|
return FilterDictWrapper(ret, '.ext_pillar')
|
|
|
|
@@ -501,11 +506,13 @@ def fileserver(opts, backends):
|
|
'''
|
|
Returns the file server modules
|
|
'''
|
|
+ _utils = utils(opts)
|
|
return LazyLoader(_module_dirs(opts, 'fileserver'),
|
|
opts,
|
|
tag='fileserver',
|
|
whitelist=backends,
|
|
- pack={'__utils__': utils(opts)})
|
|
+ pack={'__utils__': _utils},
|
|
+ extra_module_dirs=_utils.module_dirs)
|
|
|
|
|
|
def roster(opts, runner=None, utils=None, whitelist=None):
|
|
@@ -521,6 +528,7 @@ def roster(opts, runner=None, utils=None, whitelist=None):
|
|
'__runner__': runner,
|
|
'__utils__': utils,
|
|
},
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
|
|
@@ -562,6 +570,7 @@ def states(opts, functions, utils, serializers, whitelist=None, proxy=None, cont
|
|
tag='states',
|
|
pack={'__salt__': functions, '__proxy__': proxy or {}},
|
|
whitelist=whitelist,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
ret.pack['__states__'] = ret
|
|
ret.pack['__utils__'] = utils
|
|
@@ -683,6 +692,7 @@ def grain_funcs(opts, proxy=None):
|
|
),
|
|
opts,
|
|
tag='grains',
|
|
+ extra_module_dirs=_utils.module_dirs,
|
|
)
|
|
ret.pack['__utils__'] = utils(opts, proxy=proxy)
|
|
return ret
|
|
@@ -947,6 +957,7 @@ def runner(opts, utils=None, context=None, whitelist=None):
|
|
tag='runners',
|
|
pack={'__utils__': utils, '__context__': context},
|
|
whitelist=whitelist,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
# TODO: change from __salt__ to something else, we overload __salt__ too much
|
|
ret.pack['__salt__'] = ret
|
|
@@ -982,6 +993,7 @@ def sdb(opts, functions=None, whitelist=None, utils=None):
|
|
'__salt__': minion_mods(opts, utils=utils),
|
|
},
|
|
whitelist=whitelist,
|
|
+ extra_module_dirs=utils.module_dirs if utils else None,
|
|
)
|
|
|
|
|
|
@@ -1023,6 +1035,7 @@ def clouds(opts):
|
|
'''
|
|
Return the cloud functions
|
|
'''
|
|
+ _utils = salt.loader.utils(opts)
|
|
# Let's bring __active_provider_name__, defaulting to None, to all cloud
|
|
# drivers. This will get temporarily updated/overridden with a context
|
|
# manager when needed.
|
|
@@ -1034,8 +1047,9 @@ def clouds(opts):
|
|
int_type='clouds'),
|
|
opts,
|
|
tag='clouds',
|
|
- pack={'__utils__': salt.loader.utils(opts),
|
|
+ pack={'__utils__': _utils,
|
|
'__active_provider_name__': None},
|
|
+ extra_module_dirs=_utils.module_dirs,
|
|
)
|
|
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
|
|
log.trace(
|
|
@@ -1149,6 +1163,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
|
|
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
|
|
If not true, the module will not load.
|
|
+ :param list extra_module_dirs: A list of directories that will be able to import from
|
|
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
|
|
are function references themselves which are loaded on-demand.
|
|
# TODO:
|
|
@@ -1170,6 +1185,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
static_modules=None,
|
|
proxy=None,
|
|
virtual_funcs=None,
|
|
+ extra_module_dirs=None,
|
|
): # pylint: disable=W0231
|
|
'''
|
|
In pack, if any of the values are None they will be replaced with an
|
|
@@ -1211,6 +1227,9 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
virtual_funcs = []
|
|
self.virtual_funcs = virtual_funcs
|
|
|
|
+ self.extra_module_dirs = extra_module_dirs if extra_module_dirs else []
|
|
+ self._clean_module_dirs = []
|
|
+
|
|
self.disabled = set(
|
|
self.opts.get(
|
|
'disable_{0}{1}'.format(
|
|
@@ -1517,12 +1536,30 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
reload_module(submodule)
|
|
self._reload_submodules(submodule)
|
|
|
|
+ def __populate_sys_path(self):
|
|
+ for directory in self.extra_module_dirs:
|
|
+ if directory not in sys.path:
|
|
+ sys.path.append(directory)
|
|
+ self._clean_module_dirs.append(directory)
|
|
+
|
|
+ def __clean_sys_path(self):
|
|
+ for directory in self._clean_module_dirs:
|
|
+ if directory in sys.path:
|
|
+ sys.path.remove(directory)
|
|
+ self._clean_module_dirs = []
|
|
+
|
|
+ # Be sure that sys.path_importer_cache do not contains any
|
|
+ # invalid FileFinder references
|
|
+ if USE_IMPORTLIB:
|
|
+ importlib.invalidate_caches()
|
|
+
|
|
def _load_module(self, name):
|
|
mod = None
|
|
fpath, suffix = self.file_mapping[name][:2]
|
|
self.loaded_files.add(name)
|
|
fpath_dirname = os.path.dirname(fpath)
|
|
try:
|
|
+ self.__populate_sys_path()
|
|
sys.path.append(fpath_dirname)
|
|
if suffix == '.pyx':
|
|
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
|
|
@@ -1645,6 +1682,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|
return False
|
|
finally:
|
|
sys.path.remove(fpath_dirname)
|
|
+ self.__clean_sys_path()
|
|
|
|
if hasattr(mod, '__opts__'):
|
|
mod.__opts__.update(self.opts)
|
|
diff --git a/salt/modules/aixpkg.py b/salt/modules/aixpkg.py
|
|
index 4f9852b504..d35946f397 100644
|
|
--- a/salt/modules/aixpkg.py
|
|
+++ b/salt/modules/aixpkg.py
|
|
@@ -400,7 +400,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
diff --git a/salt/modules/apkpkg.py b/salt/modules/apkpkg.py
|
|
index 2e9a2a952e..4f84642e02 100644
|
|
--- a/salt/modules/apkpkg.py
|
|
+++ b/salt/modules/apkpkg.py
|
|
@@ -83,7 +83,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Updates the package list
|
|
|
|
@@ -425,7 +425,7 @@ def remove(name=None, pkgs=None, purge=False, **kwargs): # pylint: disable=unus
|
|
return ret
|
|
|
|
|
|
-def upgrade(name=None, pkgs=None, refresh=True):
|
|
+def upgrade(name=None, pkgs=None, refresh=True, **kwargs):
|
|
'''
|
|
Upgrades all packages via ``apk upgrade`` or a specific package if name or
|
|
pkgs is specified. Name is ignored if pkgs is specified
|
|
@@ -485,7 +485,7 @@ def upgrade(name=None, pkgs=None, refresh=True):
|
|
return ret
|
|
|
|
|
|
-def list_upgrades(refresh=True):
|
|
+def list_upgrades(refresh=True, **kwargs):
|
|
'''
|
|
List all available package upgrades.
|
|
|
|
@@ -524,7 +524,7 @@ def list_upgrades(refresh=True):
|
|
return ret
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -541,7 +541,7 @@ def file_list(*packages):
|
|
return file_dict(*packages)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
@@ -580,7 +580,7 @@ def file_dict(*packages):
|
|
return {'errors': errors, 'packages': ret}
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
Return the name of the package that owns the file. Multiple file paths can
|
|
be passed. Like :mod:`pkg.version <salt.modules.apk.version`, if a single
|
|
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
|
|
index 345b8422d9..13484c96bc 100644
|
|
--- a/salt/modules/aptpkg.py
|
|
+++ b/salt/modules/aptpkg.py
|
|
@@ -335,7 +335,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def refresh_db(cache_valid_time=0, failhard=False):
|
|
+def refresh_db(cache_valid_time=0, failhard=False, **kwargs):
|
|
'''
|
|
Updates the APT database to latest packages based upon repositories
|
|
|
|
@@ -1438,7 +1438,7 @@ def list_upgrades(refresh=True, dist_upgrade=True, **kwargs):
|
|
return _get_upgradable(dist_upgrade, **kwargs)
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -1451,7 +1451,7 @@ def upgrade_available(name):
|
|
return latest_version(name) != ''
|
|
|
|
|
|
-def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
|
+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
|
|
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
|
|
@@ -1641,7 +1641,7 @@ def _skip_source(source):
|
|
return False
|
|
|
|
|
|
-def list_repos():
|
|
+def list_repos(**kwargs):
|
|
'''
|
|
Lists all repos in the sources.list (and sources.lists.d) files
|
|
|
|
@@ -2411,7 +2411,7 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
|
}
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -2428,7 +2428,7 @@ def file_list(*packages):
|
|
return __salt__['lowpkg.file_list'](*packages)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
@@ -2712,7 +2712,7 @@ def _resolve_deps(name, pkgs, **kwargs):
|
|
return
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
diff --git a/salt/modules/dpkg_lowpkg.py b/salt/modules/dpkg_lowpkg.py
|
|
index b78e844830..a64e6d57da 100644
|
|
--- a/salt/modules/dpkg_lowpkg.py
|
|
+++ b/salt/modules/dpkg_lowpkg.py
|
|
@@ -135,7 +135,7 @@ def unpurge(*packages):
|
|
return salt.utils.data.compare_dicts(old, new)
|
|
|
|
|
|
-def list_pkgs(*packages):
|
|
+def list_pkgs(*packages, **kwargs):
|
|
'''
|
|
List the packages currently installed in a dict::
|
|
|
|
@@ -169,7 +169,7 @@ def list_pkgs(*packages):
|
|
return pkgs
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -211,7 +211,7 @@ def file_list(*packages):
|
|
return {'errors': errors, 'files': list(ret)}
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
diff --git a/salt/modules/ebuildpkg.py b/salt/modules/ebuildpkg.py
|
|
index cb77ff7852..205318f579 100644
|
|
--- a/salt/modules/ebuildpkg.py
|
|
+++ b/salt/modules/ebuildpkg.py
|
|
@@ -358,7 +358,7 @@ def list_upgrades(refresh=True, backtrack=3, **kwargs): # pylint: disable=W0613
|
|
return _get_upgradable(backtrack)
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -440,7 +440,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
return ret
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update the portage tree using the first available method from the following
|
|
list:
|
|
@@ -765,7 +765,7 @@ def install(name=None,
|
|
return changes
|
|
|
|
|
|
-def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
|
|
+def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None, **kwargs):
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -858,7 +858,7 @@ def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
|
|
return ret
|
|
|
|
|
|
-def upgrade(refresh=True, binhost=None, backtrack=3):
|
|
+def upgrade(refresh=True, binhost=None, backtrack=3, **kwargs):
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
diff --git a/salt/modules/freebsdpkg.py b/salt/modules/freebsdpkg.py
|
|
index 43f127ef35..0bae7a3bab 100644
|
|
--- a/salt/modules/freebsdpkg.py
|
|
+++ b/salt/modules/freebsdpkg.py
|
|
@@ -238,7 +238,7 @@ def version(*names, **kwargs):
|
|
])
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
``pkg_add(1)`` does not use a local database of available packages, so this
|
|
function simply returns ``True``. it exists merely for API compatibility.
|
|
@@ -503,7 +503,7 @@ def _rehash():
|
|
__salt__['cmd.shell']('rehash', output_loglevel='trace')
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -525,7 +525,7 @@ def file_list(*packages):
|
|
return ret
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the
|
|
diff --git a/salt/modules/kubeadm.py b/salt/modules/kubeadm.py
|
|
new file mode 100644
|
|
index 0000000000..2b1e7906a1
|
|
--- /dev/null
|
|
+++ b/salt/modules/kubeadm.py
|
|
@@ -0,0 +1,1265 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2019 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:maturity: new
|
|
+:depends: None
|
|
+:platform: Linux
|
|
+'''
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import json
|
|
+import logging
|
|
+import re
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+from salt.ext.six.moves import zip
|
|
+import salt.utils.files
|
|
+
|
|
+ADMIN_CFG = '/etc/kubernetes/admin.conf'
|
|
+
|
|
+log = logging.getLogger(__name__)
|
|
+
|
|
+__virtualname__ = 'kubeadm'
|
|
+
|
|
+# Define not exported variables from Salt, so this can be imported as
|
|
+# a normal module
|
|
+try:
|
|
+ __salt__
|
|
+except NameError:
|
|
+ __salt__ = {}
|
|
+
|
|
+
|
|
+def _api_server_endpoint(config=None):
|
|
+ '''
|
|
+ Return the API server endpoint
|
|
+ '''
|
|
+ config = config if config else ADMIN_CFG
|
|
+ endpoint = None
|
|
+ try:
|
|
+ with salt.utils.files.fopen(config, 'r') as fp_:
|
|
+ endpoint = re.search(r'^\s*server: https?://(.*)$',
|
|
+ fp_.read(),
|
|
+ re.MULTILINE).group(1)
|
|
+ except Exception:
|
|
+ # Any error or exception is mapped to None
|
|
+ pass
|
|
+ return endpoint
|
|
+
|
|
+
|
|
+def _token(create_if_needed=True):
|
|
+ '''
|
|
+ Return a valid bootstrap token
|
|
+ '''
|
|
+ tokens = token_list()
|
|
+ if not tokens:
|
|
+ token_create(description='Token created by kubeadm salt module')
|
|
+ tokens = token_list()
|
|
+ # We expect that the token is valid for authestication and signing
|
|
+ return tokens[0]['token']
|
|
+
|
|
+
|
|
+def _discovery_token_ca_cert_hash():
|
|
+ cmd = ['openssl', 'x509', '-pubkey', '-in', '/etc/kubernetes/pki/ca.crt',
|
|
+ '|', 'openssl', 'rsa', '-pubin', '-outform', 'der', '2>/dev/null',
|
|
+ '|', 'openssl', 'dgst', '-sha256', '-hex',
|
|
+ '|', 'sed', "'s/^.* //'"]
|
|
+ result = __salt__['cmd.run_all'](' '.join(cmd), python_shell=True)
|
|
+ if result['retcode']:
|
|
+ raise CommandExecutionError(result['stderr'])
|
|
+
|
|
+ return 'sha256:{}'.format(result['stdout'])
|
|
+
|
|
+
|
|
+def join_params(create_if_needed=False):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return the parameters required for joining into the cluster
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.join_params
|
|
+ salt '*' kubeadm.join_params create_if_needed=True
|
|
+
|
|
+ '''
|
|
+
|
|
+ params = {
|
|
+ 'api-server-endpoint': _api_server_endpoint(),
|
|
+ 'token': _token(create_if_needed),
|
|
+ 'discovery-token-ca-cert-hash': _discovery_token_ca_cert_hash(),
|
|
+ }
|
|
+ return params
|
|
+
|
|
+
|
|
+def version(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return the version of kubeadm
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.version
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'version']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ cmd.extend(['--output', 'json'])
|
|
+
|
|
+ return json.loads(__salt__['cmd.run_stdout'](cmd))
|
|
+
|
|
+
|
|
+def _cmd(cmd):
|
|
+ '''Utility function to run commands.'''
|
|
+ result = __salt__['cmd.run_all'](cmd)
|
|
+ if result['retcode']:
|
|
+ raise CommandExecutionError(result['stderr'])
|
|
+ return result['stdout']
|
|
+
|
|
+
|
|
+def token_create(token=None, config=None, description=None,
|
|
+ groups=None, ttl=None, usages=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Create bootstrap tokens on the server
|
|
+
|
|
+ token
|
|
+ Token to write, if None one will be gerenared. The token must
|
|
+ match a regular expression, that by default is
|
|
+ [a-z0-9]{6}.[a-z0-9]{16}
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ description
|
|
+ A human friendly description of how this token is used
|
|
+
|
|
+ groups
|
|
+ List of extra groups that this token will authenticate, defaut
|
|
+ to ['system:bootstrappers:kubeadm:default-node-token']
|
|
+
|
|
+ ttl
|
|
+ The duration defore the token is automatically deleted (1s, 2m,
|
|
+ 3h). If set to '0' the token will never expire. Default value
|
|
+ is 24h0m0s
|
|
+
|
|
+ usages
|
|
+ Describes the ways in wich this token can be used. The default
|
|
+ value is ['signing', 'authentication']
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_create
|
|
+ salt '*' kubeadm.token_create a1b2c.0123456789abcdef
|
|
+ salt '*' kubeadm.token_create ttl='6h'
|
|
+ salt '*' kubeadm.token_create usages="['signing']"
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'create']
|
|
+ if token:
|
|
+ cmd.append(token)
|
|
+
|
|
+ parameters = [('config', config), ('description', description),
|
|
+ ('groups', groups), ('ttl', ttl), ('usages', usages),
|
|
+ ('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ if parameter in ('groups', 'usages'):
|
|
+ cmd.extend(['--{}'.format(parameter), json.dumps(value)])
|
|
+ else:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def token_delete(token, kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Delete bootstrap tokens on the server
|
|
+
|
|
+ token
|
|
+ Token to write, if None one will be gerenared. The token must
|
|
+ match a regular expression, that by default is
|
|
+ [a-z0-9]{6}.[a-z0-9]{16}
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_delete a1b2c
|
|
+ salt '*' kubeadm.token_create a1b2c.0123456789abcdef
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'delete', token]
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return bool(_cmd(cmd))
|
|
+
|
|
+
|
|
+def token_generate(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Generate and return a bootstrap token, but do not create it on the
|
|
+ server
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_generate
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'generate']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def token_list(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ List bootstrap tokens on the server
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.token_list
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'token', 'list']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ lines = _cmd(cmd).splitlines()
|
|
+
|
|
+ # Find the header and parse it. We do not need to validate the
|
|
+ # content, as the regex will take care of future changes.
|
|
+ header = lines.pop(0)
|
|
+ header = [i.lower() for i in re.findall(r'(\w+(?:\s\w+)*)', header)]
|
|
+
|
|
+ tokens = []
|
|
+ for line in lines:
|
|
+ # TODO(aplanas): descriptions with multiple spaces can break
|
|
+ # the parser.
|
|
+ values = re.findall(r'(\S+(?:\s\S+)*)', line)
|
|
+ if len(header) != len(values):
|
|
+ log.error('Error parsing line: {}'.format(line))
|
|
+ continue
|
|
+ tokens.append({key: value for key, value in zip(header, values)})
|
|
+ return tokens
|
|
+
|
|
+
|
|
+def alpha_certs_renew(rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Renews certificates for a Kubernetes cluster
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_certs_renew
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'certs', 'renew']
|
|
+
|
|
+ parameters = [('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_kubeconfig_user(client_name,
|
|
+ apiserver_advertise_address=None,
|
|
+ apiserver_bind_port=None, cert_dir=None,
|
|
+ org=None, token=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Outputs a kubeconfig file for an additional user
|
|
+
|
|
+ client_name
|
|
+ The name of the user. It will be used as the CN if client
|
|
+ certificates are created
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ The IP address the API server is accessible on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ The port the API server is accessible on (default 6443)
|
|
+
|
|
+ cert_dir
|
|
+ The path where certificates are stored (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ org
|
|
+ The organization of the client certificate
|
|
+
|
|
+ token
|
|
+ The token that show be used as the authentication mechanism for
|
|
+ this kubeconfig, instead of client certificates
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_kubeconfig_user client_name=user
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'kubeconfig', 'user', '--client-name',
|
|
+ client_name]
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('cert-dir', cert_dir), ('org', org),
|
|
+ ('token', token), ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_kubelet_config_download(kubeconfig=None, kubelet_version=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Downloads the kubelet configuration from the cluster ConfigMap
|
|
+ kubelet-config-1.X
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ kubelet_version
|
|
+ The desired version for the kubelet
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_kubelet_config_download
|
|
+ salt '*' kubeadm.alpha_kubelet_config_download kubelet_version='1.14.0'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'kubelet', 'config', 'download']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('kubelet-version', kubelet_version),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_kubelet_config_enable_dynamic(node_name, kubeconfig=None,
|
|
+ kubelet_version=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Enables or updates dynamic kubelet configuration for a node
|
|
+
|
|
+ node_name
|
|
+ Name of the node that should enable the dynamic kubelet
|
|
+ configuration
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ kubelet_version
|
|
+ The desired version for the kubelet
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_kubelet_config_enable_dynamic node-1
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
|
|
+ '--node-name', node_name]
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('kubelet-version', kubelet_version),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def alpha_selfhosting_pivot(cert_dir=None, config=None,
|
|
+ kubeconfig=None,
|
|
+ store_certs_in_secrets=False, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Converts a static Pod-hosted control plane into a selt-hosted one
|
|
+
|
|
+ cert_dir
|
|
+ The path where certificates are stored (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ store_certs_in_secrets
|
|
+ Enable storing certs in secrets
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.alpha_selfhost_pivot
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force']
|
|
+
|
|
+ if store_certs_in_secrets:
|
|
+ cmd.append('--store-certs-in-secrets')
|
|
+
|
|
+ parameters = [('cert-dir', cert_dir),
|
|
+ ('config', config),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_images_list(config=None, feature_gates=None,
|
|
+ kubernetes_version=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Print a list of images kubeadm will use
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_images_list
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'images', 'list']
|
|
+
|
|
+ parameters = [('config', config),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd).splitlines()
|
|
+
|
|
+
|
|
+def config_images_pull(config=None, cri_socket=None,
|
|
+ feature_gates=None, kubernetes_version=None,
|
|
+ kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Pull images used by kubeadm
|
|
+
|
|
+ config
|
|
+ Path to kubeadm configuration file
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_images_pull
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'images', 'pull']
|
|
+
|
|
+ parameters = [('config', config),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ prefix = '[config/images] Pulled '
|
|
+ return [(line.replace(prefix, '')) for line in _cmd(cmd).splitlines()]
|
|
+
|
|
+
|
|
+def config_migrate(old_config, new_config=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Read an older version of the kubeadm configuration API types from
|
|
+ a file, and output the similar config object for the newer version
|
|
+
|
|
+ old_config
|
|
+ Path to the kubeadm config file that is usin the old API
|
|
+ version and should be converted
|
|
+
|
|
+ new_config
|
|
+ Path to the resulting equivalent kubeadm config file using the
|
|
+ new API version. If not specified the output will be returned
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_migrate /oldconfig.cfg
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'migrate', '--old-config', old_config]
|
|
+
|
|
+ parameters = [('new-config', new_config),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_print_init_defaults(component_configs=None,
|
|
+ kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return default init configuration, that can be used for 'kubeadm
|
|
+ init'
|
|
+
|
|
+ component_config
|
|
+ A comma-separated list for component config API object to print
|
|
+ the default values for (valid values: KubeProxyConfiguration,
|
|
+ KubeletConfiguration)
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_print_init_defaults
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'print', 'init-defaults']
|
|
+
|
|
+ parameters = [('component-configs', component_configs),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_print_join_defaults(component_configs=None,
|
|
+ kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Return default join configuration, that can be used for 'kubeadm
|
|
+ join'
|
|
+
|
|
+ component_config
|
|
+ A comma-separated list for component config API object to print
|
|
+ the default values for (valid values: KubeProxyConfiguration,
|
|
+ KubeletConfiguration)
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_print_join_defaults
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'print', 'join-defaults']
|
|
+
|
|
+ parameters = [('component-configs', component_configs),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_upload_from_file(config, kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Upload a configuration file to the in-cluster ConfigMap for
|
|
+ kubeadm configuration
|
|
+
|
|
+ config
|
|
+ Path to a kubeadm configuration file
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_upload_from_file /config.cfg
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'upload', 'from-file', '--config', config]
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_upload_from_flags(apiserver_advertise_address=None,
|
|
+ apiserver_bind_port=None,
|
|
+ apiserver_cert_extra_sans=None,
|
|
+ cert_dir=None, cri_socket=None,
|
|
+ feature_gates=None,
|
|
+ kubernetes_version=None, node_name=None,
|
|
+ pod_network_cidr=None, service_cidr=None,
|
|
+ service_dns_domain=None, kubeconfig=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Create the in-cluster configuration file for the first time using
|
|
+ flags
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ The IP address the API server will adversite it's listening on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ The port the API server is accessible on (default 6443)
|
|
+
|
|
+ apiserver_cert_extra_sans
|
|
+ Optional extra Subject Alternative Names (SANs) to use for the
|
|
+ API Server serving certificate
|
|
+
|
|
+ cert_dir
|
|
+ The path where to save and store the certificates (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ node_name
|
|
+ Specify the node name
|
|
+
|
|
+ pod_network_cidr
|
|
+ Specify range of IP addresses for the pod network
|
|
+
|
|
+ service_cidr
|
|
+ Use alternative range of IP address dor service VIPs (default
|
|
+ "10.96.0.0/12")
|
|
+
|
|
+ service_dns_domain
|
|
+ Use alternative domain for serivces (default "cluster.local")
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_upload_from_flags
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'upload', 'from-flags']
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('apiserver-cert-extra-sans', apiserver_cert_extra_sans),
|
|
+ ('cert-dir', cert_dir),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('node-name', node_name),
|
|
+ ('pod-network-cidr', pod_network_cidr),
|
|
+ ('service-cidr', service_cidr),
|
|
+ ('service-dns-domain', service_dns_domain),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def config_view(kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ View the kubeadm configuration stored inside the cluster
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.config_view
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'config', 'view']
|
|
+
|
|
+ parameters = [('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+def init(apiserver_advertise_address=None, apiserver_bind_port=None,
|
|
+ apiserver_cert_extra_sans=None, cert_dir=None,
|
|
+ certificate_key=None, config=None, cri_socket=None,
|
|
+ experimental_upload_certs=False, feature_gates=None,
|
|
+ ignore_preflight_errors=None, image_repository=None,
|
|
+ kubernetes_version=None, node_name=None,
|
|
+ pod_network_cidr=None, service_cidr=None,
|
|
+ service_dns_domain=None, skip_certificate_key_print=False,
|
|
+ skip_phases=None, skip_token_print=False, token=None,
|
|
+ token_ttl=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Command to set up the Kubernetes control plane
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ The IP address the API server will adversite it's listening on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ The port the API server is accessible on (default 6443)
|
|
+
|
|
+ apiserver_cert_extra_sans
|
|
+ Optional extra Subject Alternative Names (SANs) to use for the
|
|
+ API Server serving certificate
|
|
+
|
|
+ cert_dir
|
|
+ The path where to save and store the certificates (default
|
|
+ "/etc/kubernetes/pki")
|
|
+
|
|
+ certificate_key
|
|
+ Key used to encrypt the control-plane certificates in the
|
|
+ kubeadm-certs Secret
|
|
+
|
|
+ config
|
|
+ Path to a kubeadm configuration file
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ experimental_upload_certs
|
|
+ Upload control-plane certificate to the kubeadm-certs Secret
|
|
+
|
|
+ feature_gates
|
|
+ A set of key=value pairs that describe feature gates for
|
|
+ various features
|
|
+
|
|
+ ignore_preflight_errors
|
|
+ A list of checkt whose errors will be shown as warnings
|
|
+
|
|
+ image_repository
|
|
+ Choose a container registry to pull controll plane images from
|
|
+
|
|
+ kubernetes_version
|
|
+ Choose a specifig Kubernetes version for the control plane
|
|
+ (default "stable-1")
|
|
+
|
|
+ node_name
|
|
+ Specify the node name
|
|
+
|
|
+ pod_network_cidr
|
|
+ Specify range of IP addresses for the pod network
|
|
+
|
|
+ service_cidr
|
|
+ Use alternative range of IP address dor service VIPs (default
|
|
+ "10.96.0.0/12")
|
|
+
|
|
+ service_dns_domain
|
|
+ Use alternative domain for serivces (default "cluster.local")
|
|
+
|
|
+ skip_certificate_key_print
|
|
+ Don't print the key used to encrypt the control-plane
|
|
+ certificates
|
|
+
|
|
+ skip_phases
|
|
+ List of phases to be skipped
|
|
+
|
|
+ skip_token_print
|
|
+ Skip printing of the default bootstrap token generated by
|
|
+ 'kubeadm init'
|
|
+
|
|
+ token
|
|
+ The token to use for establishing bidirectional trust between
|
|
+ nodes and control-plane nodes. The token must match a regular
|
|
+ expression, that by default is [a-z0-9]{6}.[a-z0-9]{16}
|
|
+
|
|
+ token_ttl
|
|
+ The duration defore the token is automatically deleted (1s, 2m,
|
|
+ 3h). If set to '0' the token will never expire. Default value
|
|
+ is 24h0m0s
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.init pod_network_cidr='10.244.0.0/16'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'init']
|
|
+
|
|
+ if experimental_upload_certs:
|
|
+ cmd.append('--experimental-upload-certs')
|
|
+ if skip_certificate_key_print:
|
|
+ cmd.append('--skip-certificate-key-print')
|
|
+ if skip_token_print:
|
|
+ cmd.append('--skip-token-print')
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('apiserver-cert-extra-sans', apiserver_cert_extra_sans),
|
|
+ ('cert-dir', cert_dir),
|
|
+ ('certificate-key', certificate_key),
|
|
+ ('config', config),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('feature-gates', feature_gates),
|
|
+ ('ignore-preflight-errors', ignore_preflight_errors),
|
|
+ ('image-repository', image_repository),
|
|
+ ('kubernetes-version', kubernetes_version),
|
|
+ ('node-name', node_name),
|
|
+ ('pod-network-cidr', pod_network_cidr),
|
|
+ ('service-cidr', service_cidr),
|
|
+ ('service-dns-domain', service_dns_domain),
|
|
+ ('skip-phases', skip_phases),
|
|
+ ('token', token),
|
|
+ ('token-ttl', token_ttl),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+# TODO(aplanas):
|
|
+# * init_phase_addon_all
|
|
+# * init_phase_addon_coredns
|
|
+# * init_phase_addon_kube_proxy
|
|
+# * init_phase_bootstrap_token
|
|
+# * init_phase_certs_all
|
|
+# * init_phase_certs_apiserver
|
|
+# * init_phase_certs_apiserver_etcd_client
|
|
+# * init_phase_certs_apiserver_kubelet_client
|
|
+# * init_phase_certs_ca
|
|
+# * init_phase_certs_etcd_ca
|
|
+# * init_phase_certs_etcd_healthcheck_client
|
|
+# * init_phase_certs_etcd_peer
|
|
+# * init_phase_certs_etcd_server
|
|
+# * init_phase_certs_front_proxy_ca
|
|
+# * init_phase_certs_front_proxy_client
|
|
+# * init_phase_certs_sa
|
|
+# * init_phase_control_plane_all
|
|
+# * init_phase_control_plane_apiserver
|
|
+# * init_phase_control_plane_controller_manager
|
|
+# * init_phase_control_plane_scheduler
|
|
+# * init_phase_etcd_local
|
|
+# * init_phase_kubeconfig_admin
|
|
+# * init_phase_kubeconfig_all
|
|
+# * init_phase_kubeconfig_controller_manager
|
|
+# * init_phase_kubeconfig_kubelet
|
|
+# * init_phase_kubeconfig_scheduler
|
|
+# * init_phase_kubelet_start
|
|
+# * init_phase_mark_control_plane
|
|
+# * init_phase_preflight
|
|
+# * init_phase_upload_certs
|
|
+# * init_phase_upload_config_all
|
|
+# * init_phase_upload_config_kuneadm
|
|
+# * init_phase_upload_config_kubelet
|
|
+
|
|
+
|
|
+def join(api_server_endpoint=None,
|
|
+ apiserver_advertise_address=None, apiserver_bind_port=None,
|
|
+ certificate_key=None, config=None, cri_socket=None,
|
|
+ discovery_file=None, discovery_token=None,
|
|
+ discovery_token_ca_cert_hash=None,
|
|
+ discovery_token_unsafe_skip_ca_verification=False,
|
|
+ experimental_control_plane=False,
|
|
+ ignore_preflight_errors=None, node_name=None,
|
|
+ skip_phases=None, tls_bootstrap_token=None, token=None,
|
|
+ rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Command to join to an existing cluster
|
|
+
|
|
+ api_server_endpoint
|
|
+ IP address or domain name and port of the API Server
|
|
+
|
|
+ apiserver_advertise_address
|
|
+ If the node should host a new control plane instance, the IP
|
|
+ address the API Server will adversise it's listening on
|
|
+
|
|
+ apiserver_bind_port
|
|
+ If the node shoult host a new control plane instance, the port
|
|
+ the API Server to bind to (default 6443)
|
|
+
|
|
+ certificate_key
|
|
+ Use this key to decrypt the certificate secrets uploaded by
|
|
+ init
|
|
+
|
|
+ config
|
|
+ Path to a kubeadm configuration file
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ discovery_file
|
|
+ For file-based discovery, a file or URL from which to load
|
|
+ cluster information
|
|
+
|
|
+ discovery_token
|
|
+ For token-based discovery, the token used to validate cluster
|
|
+ information fetched from the API Server
|
|
+
|
|
+ discovery_token_ca_cert_hash
|
|
+ For token-based discovery, validate that the root CA public key
|
|
+ matches this hash (format: "<type>:<value>")
|
|
+
|
|
+ discovery_token_unsafe_skip_ca_verification
|
|
+ For token-based discovery, allow joining without
|
|
+ 'discovery-token-ca-cert-hash' pinning
|
|
+
|
|
+ experimental_control_plane
|
|
+ Create a new control plane instance on this node
|
|
+
|
|
+ ignore_preflight_errors
|
|
+ A list of checks whose errors will be shown as warnings
|
|
+
|
|
+ node_name
|
|
+ Specify the node name
|
|
+
|
|
+ skip_phases
|
|
+ List of phases to be skipped
|
|
+
|
|
+ tls_bootstrap_token
|
|
+ Specify the token used to temporarily authenticate with the
|
|
+ Kubernetes Control Plane while joining the node
|
|
+
|
|
+ token
|
|
+ Use this token for both discovery-token and tls-bootstrap-token
|
|
+ when those values are not provided
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.join 10.160.65.165:6443 token='token'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'join']
|
|
+
|
|
+ if api_server_endpoint:
|
|
+ cmd.append(api_server_endpoint)
|
|
+ if discovery_token_unsafe_skip_ca_verification:
|
|
+ cmd.append('--discovery-token-unsafe-skip-ca-verification')
|
|
+ if experimental_control_plane:
|
|
+ cmd.append('--experimental-control-plane')
|
|
+
|
|
+ parameters = [('apiserver-advertise-address', apiserver_advertise_address),
|
|
+ ('apiserver-bind-port', apiserver_bind_port),
|
|
+ ('certificate-key', certificate_key),
|
|
+ ('config', config),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('discovery-file', discovery_file),
|
|
+ ('discovery-token', discovery_token),
|
|
+ ('discovery-token-ca-cert-hash',
|
|
+ discovery_token_ca_cert_hash),
|
|
+ ('ignore-preflight-errors', ignore_preflight_errors),
|
|
+ ('node-name', node_name),
|
|
+ ('skip-phases', skip_phases),
|
|
+ ('tls-bootstrap-token', tls_bootstrap_token),
|
|
+ ('token', token),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+# TODO(aplanas):
|
|
+# * join_phase_control_plane_join_all
|
|
+# * join_phase_control_plane_join_etcd
|
|
+# * join_phase_control_plane_join_mark_control_plane
|
|
+# * join_phase_control_plane_join_update_status
|
|
+# * join_phase_control_plane_prepare_all
|
|
+# * join_phase_control_plane_prepare_certs
|
|
+# * join_phase_control_plane_prepare_control_plane
|
|
+# * join_phase_control_plane_prepare_download_certs
|
|
+# * join_phase_control_plane_prepare_kubeconfig
|
|
+# * join_phase_kubelet_start
|
|
+# * join_phase_preflight
|
|
+
|
|
+
|
|
+def reset(cert_dir=None, cri_socket=None,
|
|
+ ignore_preflight_errors=None, kubeconfig=None, rootfs=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Revert any changes made to this host by 'kubeadm init' or 'kubeadm
|
|
+ join'
|
|
+
|
|
+ cert_dir
|
|
+ The path to the directory where the certificates are stored
|
|
+ (default "/etc/kubernetes/pki")
|
|
+
|
|
+ cri_socket
|
|
+ Path to the CRI socket to connect
|
|
+
|
|
+ ignore_preflight_errors
|
|
+ A list of checks whose errors will be shown as warnings
|
|
+
|
|
+ kubeconfig
|
|
+ The kubeconfig file to use when talking to the cluster. The
|
|
+ default values in /etc/kubernetes/admin.conf
|
|
+
|
|
+ rootfs
|
|
+ The path to the real host root filesystem
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ .. code-block:: bash
|
|
+
|
|
+ salt '*' kubeadm.join 10.160.65.165:6443 token='token'
|
|
+
|
|
+ '''
|
|
+ cmd = ['kubeadm', 'reset', '--force']
|
|
+
|
|
+ parameters = [('cert-dir', cert_dir),
|
|
+ ('cri-socket', cri_socket),
|
|
+ ('ignore-preflight-errors', ignore_preflight_errors),
|
|
+ ('kubeconfig', kubeconfig),
|
|
+ ('rootfs', rootfs)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ return _cmd(cmd)
|
|
+
|
|
+
|
|
+# TODO(aplanas):
|
|
+# * upgrade_apply
|
|
+# * upgrade_diff
|
|
+# * upgrade_node
|
|
+# * upgrade_plan
|
|
diff --git a/salt/modules/mac_brew_pkg.py b/salt/modules/mac_brew_pkg.py
|
|
index 5484955edc..ee13fc2102 100644
|
|
--- a/salt/modules/mac_brew_pkg.py
|
|
+++ b/salt/modules/mac_brew_pkg.py
|
|
@@ -290,7 +290,7 @@ def remove(name=None, pkgs=None, **kwargs):
|
|
return ret
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update the homebrew package repository.
|
|
|
|
@@ -474,7 +474,7 @@ def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
|
|
return ret
|
|
|
|
|
|
-def upgrade_available(pkg):
|
|
+def upgrade_available(pkg, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -487,7 +487,7 @@ def upgrade_available(pkg):
|
|
return pkg in list_upgrades()
|
|
|
|
|
|
-def upgrade(refresh=True):
|
|
+def upgrade(refresh=True, **kwargs):
|
|
'''
|
|
Upgrade outdated, unpinned brews.
|
|
|
|
@@ -532,7 +532,7 @@ def upgrade(refresh=True):
|
|
return ret
|
|
|
|
|
|
-def info_installed(*names):
|
|
+def info_installed(*names, **kwargs):
|
|
'''
|
|
Return the information of the named package(s) installed on the system.
|
|
|
|
diff --git a/salt/modules/mac_portspkg.py b/salt/modules/mac_portspkg.py
|
|
index 78a38d54a9..d403d0e29b 100644
|
|
--- a/salt/modules/mac_portspkg.py
|
|
+++ b/salt/modules/mac_portspkg.py
|
|
@@ -376,7 +376,7 @@ def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
|
|
return _list('outdated')
|
|
|
|
|
|
-def upgrade_available(pkg, refresh=True):
|
|
+def upgrade_available(pkg, refresh=True, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -389,7 +389,7 @@ def upgrade_available(pkg, refresh=True):
|
|
return pkg in list_upgrades(refresh=refresh)
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update ports with ``port selfupdate``
|
|
|
|
@@ -405,7 +405,7 @@ def refresh_db():
|
|
return salt.utils.mac_utils.execute_return_success(cmd)
|
|
|
|
|
|
-def upgrade(refresh=True): # pylint: disable=W0613
|
|
+def upgrade(refresh=True, **kwargs): # pylint: disable=W0613
|
|
'''
|
|
Run a full upgrade using MacPorts 'port upgrade outdated'
|
|
|
|
diff --git a/salt/modules/openbsdpkg.py b/salt/modules/openbsdpkg.py
|
|
index b3b6bab912..819a24afb1 100644
|
|
--- a/salt/modules/openbsdpkg.py
|
|
+++ b/salt/modules/openbsdpkg.py
|
|
@@ -344,7 +344,7 @@ def purge(name=None, pkgs=None, **kwargs):
|
|
return remove(name=name, pkgs=pkgs, purge=True)
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
diff --git a/salt/modules/pacmanpkg.py b/salt/modules/pacmanpkg.py
|
|
index e30296e8c8..35007e27f5 100644
|
|
--- a/salt/modules/pacmanpkg.py
|
|
+++ b/salt/modules/pacmanpkg.py
|
|
@@ -111,7 +111,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -393,7 +393,7 @@ def group_diff(name):
|
|
return ret
|
|
|
|
|
|
-def refresh_db(root=None):
|
|
+def refresh_db(root=None, **kwargs):
|
|
'''
|
|
Just run a ``pacman -Sy``, return a dict::
|
|
|
|
@@ -843,7 +843,7 @@ def purge(name=None, pkgs=None, **kwargs):
|
|
return _uninstall(action='purge', name=name, pkgs=pkgs)
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's package database (not
|
|
@@ -877,7 +877,7 @@ def file_list(*packages):
|
|
return {'errors': errors, 'files': ret}
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of _every_ file on the system's
|
|
@@ -913,7 +913,7 @@ def file_dict(*packages):
|
|
return {'errors': errors, 'packages': ret}
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
diff --git a/salt/modules/pkgin.py b/salt/modules/pkgin.py
|
|
index 240f79ca26..dd5257c80d 100644
|
|
--- a/salt/modules/pkgin.py
|
|
+++ b/salt/modules/pkgin.py
|
|
@@ -112,7 +112,7 @@ def _splitpkg(name):
|
|
return name.split(';', 1)[0].rsplit('-', 1)
|
|
|
|
|
|
-def search(pkg_name):
|
|
+def search(pkg_name, **kwargs):
|
|
'''
|
|
Searches for an exact match using pkgin ^package$
|
|
|
|
@@ -225,7 +225,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def refresh_db(force=False):
|
|
+def refresh_db(force=False, **kwargs):
|
|
'''
|
|
Use pkg update to get latest pkg_summary
|
|
|
|
@@ -637,7 +637,7 @@ def _rehash():
|
|
__salt__['cmd.run']('rehash', output_loglevel='trace')
|
|
|
|
|
|
-def file_list(package):
|
|
+def file_list(package, **kwargs):
|
|
'''
|
|
List the files that belong to a package.
|
|
|
|
@@ -655,7 +655,7 @@ def file_list(package):
|
|
return ret
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
.. versionchanged: 2016.3.0
|
|
|
|
diff --git a/salt/modules/pkgng.py b/salt/modules/pkgng.py
|
|
index 4a908084ea..7435628112 100644
|
|
--- a/salt/modules/pkgng.py
|
|
+++ b/salt/modules/pkgng.py
|
|
@@ -224,7 +224,7 @@ def version(*names, **kwargs):
|
|
info = salt.utils.functools.alias_function(version, 'info')
|
|
|
|
|
|
-def refresh_db(jail=None, chroot=None, root=None, force=False):
|
|
+def refresh_db(jail=None, chroot=None, root=None, force=False, **kwargs):
|
|
'''
|
|
Refresh PACKAGESITE contents
|
|
|
|
@@ -2441,7 +2441,7 @@ def _parse_upgrade(stdout):
|
|
return result
|
|
|
|
|
|
-def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
|
+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
|
|
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
|
|
diff --git a/salt/modules/rpm_lowpkg.py b/salt/modules/rpm_lowpkg.py
|
|
index 439404ae90..c8a87276b2 100644
|
|
--- a/salt/modules/rpm_lowpkg.py
|
|
+++ b/salt/modules/rpm_lowpkg.py
|
|
@@ -76,7 +76,7 @@ def bin_pkg_info(path, saltenv='base'):
|
|
minion so that it can be examined.
|
|
|
|
saltenv : base
|
|
- Salt fileserver envrionment from which to retrieve the package. Ignored
|
|
+ Salt fileserver environment from which to retrieve the package. Ignored
|
|
if ``path`` is a local file path on the minion.
|
|
|
|
CLI Example:
|
|
@@ -128,12 +128,15 @@ def bin_pkg_info(path, saltenv='base'):
|
|
return ret
|
|
|
|
|
|
-def list_pkgs(*packages):
|
|
+def list_pkgs(*packages, **kwargs):
|
|
'''
|
|
List the packages currently installed in a dict::
|
|
|
|
{'<package_name>': '<version>'}
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -141,8 +144,11 @@ def list_pkgs(*packages):
|
|
salt '*' lowpkg.list_pkgs
|
|
'''
|
|
pkgs = {}
|
|
- cmd = ['rpm', '-q' if packages else '-qa',
|
|
- '--queryformat', r'%{NAME} %{VERSION}\n']
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-q' if packages else '-qa',
|
|
+ '--queryformat', r'%{NAME} %{VERSION}\n'])
|
|
if packages:
|
|
cmd.extend(packages)
|
|
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
|
|
@@ -158,6 +164,9 @@ def verify(*packages, **kwargs):
|
|
'''
|
|
Runs an rpm -Va on a system, and returns the results in a dict
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
Files with an attribute of config, doc, ghost, license or readme in the
|
|
package header can be ignored using the ``ignore_types`` keyword argument
|
|
|
|
@@ -199,6 +208,8 @@ def verify(*packages, **kwargs):
|
|
verify_options = [x.strip() for x in six.text_type(verify_options).split(',')]
|
|
|
|
cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
cmd.extend(['--' + x for x in verify_options])
|
|
if packages:
|
|
cmd.append('-V')
|
|
@@ -258,6 +269,9 @@ def modified(*packages, **flags):
|
|
|
|
.. versionadded:: 2015.5.0
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -266,10 +280,12 @@ def modified(*packages, **flags):
|
|
salt '*' lowpkg.modified httpd postfix
|
|
salt '*' lowpkg.modified
|
|
'''
|
|
- ret = __salt__['cmd.run_all'](
|
|
- ['rpm', '-Va'] + list(packages),
|
|
- output_loglevel='trace',
|
|
- python_shell=False)
|
|
+ cmd = ['rpm']
|
|
+ if flags.get('root'):
|
|
+ cmd.extend(['--root', flags.pop('root')])
|
|
+ cmd.append('-Va')
|
|
+ cmd.extend(packages)
|
|
+ ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
|
|
|
|
data = {}
|
|
|
|
@@ -324,12 +340,15 @@ def modified(*packages, **flags):
|
|
return filtered_data
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of _every_ file on the system's rpm database (not generally
|
|
recommended).
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -338,12 +357,15 @@ def file_list(*packages):
|
|
salt '*' lowpkg.file_list httpd postfix
|
|
salt '*' lowpkg.file_list
|
|
'''
|
|
- if not packages:
|
|
- cmd = ['rpm', '-qla']
|
|
- else:
|
|
- cmd = ['rpm', '-ql']
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+
|
|
+ cmd.append('-ql' if packages else '-qla')
|
|
+ if packages:
|
|
# Can't concatenate a tuple, must do a list.extend()
|
|
cmd.extend(packages)
|
|
+
|
|
ret = __salt__['cmd.run'](
|
|
cmd,
|
|
output_loglevel='trace',
|
|
@@ -351,12 +373,15 @@ def file_list(*packages):
|
|
return {'errors': [], 'files': ret}
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, sorted by group. Not specifying
|
|
any packages will return a list of _every_ file on the system's rpm
|
|
database (not generally recommended).
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -368,8 +393,11 @@ def file_dict(*packages):
|
|
errors = []
|
|
ret = {}
|
|
pkgs = {}
|
|
- cmd = ['rpm', '-q' if packages else '-qa',
|
|
- '--queryformat', r'%{NAME} %{VERSION}\n']
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-q' if packages else '-qa',
|
|
+ '--queryformat', r'%{NAME} %{VERSION}\n'])
|
|
if packages:
|
|
cmd.extend(packages)
|
|
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
|
|
@@ -380,8 +408,10 @@ def file_dict(*packages):
|
|
comps = line.split()
|
|
pkgs[comps[0]] = {'version': comps[1]}
|
|
for pkg in pkgs:
|
|
- files = []
|
|
- cmd = ['rpm', '-ql', pkg]
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-ql', pkg])
|
|
out = __salt__['cmd.run'](
|
|
['rpm', '-ql', pkg],
|
|
output_loglevel='trace',
|
|
@@ -390,7 +420,7 @@ def file_dict(*packages):
|
|
return {'errors': errors, 'packages': ret}
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
Return the name of the package that owns the file. Multiple file paths can
|
|
be passed. If a single path is passed, a string will be returned,
|
|
@@ -400,6 +430,9 @@ def owner(*paths):
|
|
If the file is not owned by a package, or is not present on the minion,
|
|
then an empty string will be returned for that path.
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -411,7 +444,10 @@ def owner(*paths):
|
|
return ''
|
|
ret = {}
|
|
for path in paths:
|
|
- cmd = ['rpm', '-qf', '--queryformat', '%{name}', path]
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-qf', '--queryformat', '%{name}', path])
|
|
ret[path] = __salt__['cmd.run_stdout'](cmd,
|
|
output_loglevel='trace',
|
|
python_shell=False)
|
|
@@ -471,6 +507,9 @@ def info(*packages, **kwargs):
|
|
:param all_versions:
|
|
Return information for all installed versions of the packages
|
|
|
|
+ :param root:
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
:return:
|
|
|
|
CLI example:
|
|
@@ -493,7 +532,14 @@ def info(*packages, **kwargs):
|
|
else:
|
|
size_tag = '%{SIZE}'
|
|
|
|
- cmd = packages and "rpm -q {0}".format(' '.join(packages)) or "rpm -qa"
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ if packages:
|
|
+ cmd.append('-q')
|
|
+ cmd.extend(packages)
|
|
+ else:
|
|
+ cmd.append('-qa')
|
|
|
|
# Construct query format
|
|
attr_map = {
|
|
@@ -544,6 +590,7 @@ def info(*packages, **kwargs):
|
|
query.append(attr_map['description'])
|
|
query.append("-----\\n")
|
|
|
|
+ cmd = ' '.join(cmd)
|
|
call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))),
|
|
output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True)
|
|
if call['retcode'] != 0:
|
|
@@ -744,10 +791,13 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
|
|
return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False)
|
|
|
|
|
|
-def checksum(*paths):
|
|
+def checksum(*paths, **kwargs):
|
|
'''
|
|
Return if the signature of a RPM file is valid.
|
|
|
|
+ root
|
|
+ use root as top level directory (default: "/")
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -760,9 +810,14 @@ def checksum(*paths):
|
|
if not paths:
|
|
raise CommandExecutionError("No package files has been specified.")
|
|
|
|
+ cmd = ['rpm']
|
|
+ if kwargs.get('root'):
|
|
+ cmd.extend(['--root', kwargs['root']])
|
|
+ cmd.extend(['-K', '--quiet'])
|
|
for package_file in paths:
|
|
+ cmd_ = cmd + [package_file]
|
|
ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and
|
|
- not __salt__['cmd.retcode'](["rpm", "-K", "--quiet", package_file],
|
|
+ not __salt__['cmd.retcode'](cmd_,
|
|
ignore_retcode=True,
|
|
output_loglevel='trace',
|
|
python_shell=False))
|
|
diff --git a/salt/modules/solarisipspkg.py b/salt/modules/solarisipspkg.py
|
|
index 3da1dbe5a2..43fd213726 100644
|
|
--- a/salt/modules/solarisipspkg.py
|
|
+++ b/salt/modules/solarisipspkg.py
|
|
@@ -105,7 +105,7 @@ def _ips_get_pkgversion(line):
|
|
return line.split()[0].split('@')[1].strip()
|
|
|
|
|
|
-def refresh_db(full=False):
|
|
+def refresh_db(full=False, **kwargs):
|
|
'''
|
|
Updates the remote repos database.
|
|
|
|
@@ -129,7 +129,7 @@ def refresh_db(full=False):
|
|
return __salt__['cmd.retcode']('/bin/pkg refresh') == 0
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check if there is an upgrade available for a certain package
|
|
Accepts full or partial FMRI. Returns all matches found.
|
|
diff --git a/salt/modules/solarispkg.py b/salt/modules/solarispkg.py
|
|
index 2a828f6e9c..b28349a7d8 100644
|
|
--- a/salt/modules/solarispkg.py
|
|
+++ b/salt/modules/solarispkg.py
|
|
@@ -169,7 +169,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
diff --git a/salt/modules/systemd_service.py b/salt/modules/systemd_service.py
|
|
index 743758bf9c..e39962f9ac 100644
|
|
--- a/salt/modules/systemd_service.py
|
|
+++ b/salt/modules/systemd_service.py
|
|
@@ -1364,3 +1364,58 @@ def execs(root=None):
|
|
continue
|
|
ret[service] = data['ExecStart']['path']
|
|
return ret
|
|
+
|
|
+
|
|
+def firstboot(locale=None, locale_message=None, keymap=None,
|
|
+ timezone=None, hostname=None, machine_id=None,
|
|
+ root=None):
|
|
+ '''
|
|
+ .. versionadded:: TBD
|
|
+
|
|
+ Call systemd-firstboot to configure basic settings of the system
|
|
+
|
|
+ locale
|
|
+ Set primary locale (LANG=)
|
|
+
|
|
+ locale_message
|
|
+ Set message locale (LC_MESSAGES=)
|
|
+
|
|
+ keymap
|
|
+ Set keymap
|
|
+
|
|
+ timezone
|
|
+ Set timezone
|
|
+
|
|
+ hostname
|
|
+ Set host name
|
|
+
|
|
+ machine_id
|
|
+ Set machine ID
|
|
+
|
|
+ root
|
|
+ Operate on an alternative filesystem root
|
|
+
|
|
+ CLI Example:
|
|
+
|
|
+ salt '*' service.firstboot keymap=jp locale=en_US.UTF-8
|
|
+
|
|
+ '''
|
|
+ cmd = ['systemd-firstboot']
|
|
+ parameters = [('locale', locale),
|
|
+ ('locale-message', locale_message),
|
|
+ ('keymap', keymap),
|
|
+ ('timezone', timezone),
|
|
+ ('hostname', hostname),
|
|
+ ('machine-ID', machine_id),
|
|
+ ('root', root)]
|
|
+ for parameter, value in parameters:
|
|
+ if value:
|
|
+ cmd.extend(['--{}'.format(parameter), str(value)])
|
|
+
|
|
+ out = __salt__['cmd.run_all'](cmd)
|
|
+
|
|
+ if out['retcode'] != 0:
|
|
+ raise CommandExecutionError(
|
|
+ 'systemd-firstboot error: {}'.format(out['stderr']))
|
|
+
|
|
+ return True
|
|
diff --git a/salt/modules/xbpspkg.py b/salt/modules/xbpspkg.py
|
|
index e493f8c80f..b5d7d8a477 100644
|
|
--- a/salt/modules/xbpspkg.py
|
|
+++ b/salt/modules/xbpspkg.py
|
|
@@ -121,7 +121,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
return ret
|
|
|
|
|
|
-def list_upgrades(refresh=True):
|
|
+def list_upgrades(refresh=True, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for all packages
|
|
|
|
@@ -247,7 +247,7 @@ def latest_version(*names, **kwargs):
|
|
available_version = latest_version
|
|
|
|
|
|
-def upgrade_available(name):
|
|
+def upgrade_available(name, **kwargs):
|
|
'''
|
|
Check whether or not an upgrade is available for a given package
|
|
|
|
@@ -260,7 +260,7 @@ def upgrade_available(name):
|
|
return latest_version(name) != ''
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(**kwargs):
|
|
'''
|
|
Update list of available packages from installed repos
|
|
|
|
@@ -300,7 +300,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def upgrade(refresh=True):
|
|
+def upgrade(refresh=True, **kwargs):
|
|
'''
|
|
Run a full system upgrade
|
|
|
|
@@ -484,7 +484,7 @@ def remove(name=None, pkgs=None, recursive=True, **kwargs):
|
|
return salt.utils.data.compare_dicts(old, new)
|
|
|
|
|
|
-def list_repos():
|
|
+def list_repos(**kwargs):
|
|
'''
|
|
List all repos known by XBPS
|
|
|
|
@@ -607,7 +607,7 @@ def add_repo(repo, conffile='/usr/share/xbps.d/15-saltstack.conf'):
|
|
return True
|
|
|
|
|
|
-def del_repo(repo):
|
|
+def del_repo(repo, **kwargs):
|
|
'''
|
|
Remove an XBPS repository from the system.
|
|
|
|
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
|
|
index 3ddf989511..88d74020b3 100644
|
|
--- a/salt/modules/yumpkg.py
|
|
+++ b/salt/modules/yumpkg.py
|
|
@@ -619,7 +619,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs)
|
|
|
|
|
|
-def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
|
+def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
.. versionadded:: 2015.5.4
|
|
|
|
@@ -1012,7 +1012,7 @@ def list_upgrades(refresh=True, **kwargs):
|
|
list_updates = salt.utils.functools.alias_function(list_upgrades, 'list_updates')
|
|
|
|
|
|
-def list_downloaded():
|
|
+def list_downloaded(**kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
@@ -1948,13 +1948,13 @@ def upgrade(name=None,
|
|
|
|
|
|
def update(name=None,
|
|
- pkgs=None,
|
|
- refresh=True,
|
|
- skip_verify=False,
|
|
- normalize=True,
|
|
- minimal=False,
|
|
- obsoletes=False,
|
|
- **kwargs):
|
|
+ pkgs=None,
|
|
+ refresh=True,
|
|
+ skip_verify=False,
|
|
+ normalize=True,
|
|
+ minimal=False,
|
|
+ obsoletes=False,
|
|
+ **kwargs):
|
|
'''
|
|
.. versionadded:: 2019.2.0
|
|
|
|
@@ -2647,7 +2647,7 @@ def group_install(name,
|
|
groupinstall = salt.utils.functools.alias_function(group_install, 'groupinstall')
|
|
|
|
|
|
-def list_repos(basedir=None):
|
|
+def list_repos(basedir=None, **kwargs):
|
|
'''
|
|
Lists all repos in <basedir> (default: all dirs in `reposdir` yum option).
|
|
|
|
@@ -2969,7 +2969,7 @@ def _parse_repo_file(filename):
|
|
return (headers, salt.utils.data.decode(config))
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.1.0
|
|
|
|
@@ -2988,7 +2988,7 @@ def file_list(*packages):
|
|
return __salt__['lowpkg.file_list'](*packages)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.1.0
|
|
|
|
@@ -3007,7 +3007,7 @@ def file_dict(*packages):
|
|
return __salt__['lowpkg.file_dict'](*packages)
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
.. versionadded:: 2014.7.0
|
|
|
|
@@ -3095,7 +3095,7 @@ def modified(*packages, **flags):
|
|
|
|
|
|
@salt.utils.decorators.path.which('yumdownloader')
|
|
-def download(*packages):
|
|
+def download(*packages, **kwargs):
|
|
'''
|
|
.. versionadded:: 2015.5.0
|
|
|
|
@@ -3168,7 +3168,7 @@ def download(*packages):
|
|
return ret
|
|
|
|
|
|
-def diff(*paths):
|
|
+def diff(*paths, **kwargs):
|
|
'''
|
|
Return a formatted diff between current files and original in a package.
|
|
NOTE: this function includes all files (configuration and not), but does
|
|
@@ -3239,7 +3239,7 @@ def _get_patches(installed_only=False):
|
|
return patches
|
|
|
|
|
|
-def list_patches(refresh=False):
|
|
+def list_patches(refresh=False, **kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
@@ -3262,7 +3262,7 @@ def list_patches(refresh=False):
|
|
return _get_patches()
|
|
|
|
|
|
-def list_installed_patches():
|
|
+def list_installed_patches(**kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
|
|
index 37428cf67c..582caffb59 100644
|
|
--- a/salt/modules/zypperpkg.py
|
|
+++ b/salt/modules/zypperpkg.py
|
|
@@ -99,6 +99,7 @@ class _Zypper(object):
|
|
|
|
LOCK_EXIT_CODE = 7
|
|
XML_DIRECTIVES = ['-x', '--xmlout']
|
|
+ # ZYPPER_LOCK is not affected by --root
|
|
ZYPPER_LOCK = '/var/run/zypp.pid'
|
|
TAG_RELEASED = 'zypper/released'
|
|
TAG_BLOCKED = 'zypper/blocked'
|
|
@@ -107,7 +108,6 @@ class _Zypper(object):
|
|
'''
|
|
Constructor
|
|
'''
|
|
- self.__called = False
|
|
self._reset()
|
|
|
|
def _reset(self):
|
|
@@ -129,6 +129,10 @@ class _Zypper(object):
|
|
self.__refresh = False
|
|
self.__ignore_repo_failure = False
|
|
self.__systemd_scope = False
|
|
+ self.__root = None
|
|
+
|
|
+ # Call status
|
|
+ self.__called = False
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
'''
|
|
@@ -136,11 +140,17 @@ class _Zypper(object):
|
|
:param kwargs:
|
|
:return:
|
|
'''
|
|
+ # Reset after the call
|
|
+ if self.__called:
|
|
+ self._reset()
|
|
+
|
|
# Ignore exit code for 106 (repo is not available)
|
|
if 'no_repo_failure' in kwargs:
|
|
self.__ignore_repo_failure = kwargs['no_repo_failure']
|
|
if 'systemd_scope' in kwargs:
|
|
self.__systemd_scope = kwargs['systemd_scope']
|
|
+ if 'root' in kwargs:
|
|
+ self.__root = kwargs['root']
|
|
return self
|
|
|
|
def __getattr__(self, item):
|
|
@@ -153,7 +163,6 @@ class _Zypper(object):
|
|
# Reset after the call
|
|
if self.__called:
|
|
self._reset()
|
|
- self.__called = False
|
|
|
|
if item == 'xml':
|
|
self.__xml = True
|
|
@@ -284,6 +293,8 @@ class _Zypper(object):
|
|
self.__cmd.append('--xmlout')
|
|
if not self.__refresh and '--no-refresh' not in args:
|
|
self.__cmd.append('--no-refresh')
|
|
+ if self.__root:
|
|
+ self.__cmd.extend(['--root', self.__root])
|
|
|
|
self.__cmd.extend(args)
|
|
kwargs['output_loglevel'] = 'trace'
|
|
@@ -442,7 +453,7 @@ def _clean_cache():
|
|
__context__.pop(cache_name, None)
|
|
|
|
|
|
-def list_upgrades(refresh=True, **kwargs):
|
|
+def list_upgrades(refresh=True, root=None, **kwargs):
|
|
'''
|
|
List all available package upgrades on this system
|
|
|
|
@@ -451,6 +462,9 @@ def list_upgrades(refresh=True, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -458,7 +472,7 @@ def list_upgrades(refresh=True, **kwargs):
|
|
salt '*' pkg.list_upgrades
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
ret = dict()
|
|
cmd = ['list-updates']
|
|
@@ -506,6 +520,9 @@ def info_installed(*names, **kwargs):
|
|
:param all_versions:
|
|
Include information for all versions of the packages installed on the minion.
|
|
|
|
+ :param root:
|
|
+ Operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -546,6 +563,9 @@ def info_available(*names, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed or not.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -560,9 +580,11 @@ def info_available(*names, **kwargs):
|
|
else:
|
|
names = sorted(list(set(names)))
|
|
|
|
+ root = kwargs.get('root', None)
|
|
+
|
|
# Refresh db before extracting the latest package
|
|
if kwargs.get('refresh', True):
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
pkg_info = []
|
|
batch = names[:]
|
|
@@ -571,7 +593,8 @@ def info_available(*names, **kwargs):
|
|
# Run in batches
|
|
while batch:
|
|
pkg_info.extend(re.split(r"Information for package*",
|
|
- __zypper__.nolock.call('info', '-t', 'package', *batch[:batch_size])))
|
|
+ __zypper__(root=root).nolock.call('info', '-t', 'package',
|
|
+ *batch[:batch_size])))
|
|
batch = batch[batch_size:]
|
|
|
|
for pkg_data in pkg_info:
|
|
@@ -631,6 +654,9 @@ def latest_version(*names, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed or not.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -673,6 +699,9 @@ def upgrade_available(name, **kwargs):
|
|
If set to False it depends on zypper if a refresh is
|
|
executed or not.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -689,6 +718,9 @@ def version(*names, **kwargs):
|
|
installed. If more than one package name is specified, a dict of
|
|
name/version pairs is returned.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -699,7 +731,7 @@ def version(*names, **kwargs):
|
|
return __salt__['pkg_resource.version'](*names, **kwargs) or {}
|
|
|
|
|
|
-def version_cmp(ver1, ver2, ignore_epoch=False):
|
|
+def version_cmp(ver1, ver2, ignore_epoch=False, **kwargs):
|
|
'''
|
|
.. versionadded:: 2015.5.4
|
|
|
|
@@ -721,7 +753,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False):
|
|
return __salt__['lowpkg.version_cmp'](ver1, ver2, ignore_epoch=ignore_epoch)
|
|
|
|
|
|
-def list_pkgs(versions_as_list=False, **kwargs):
|
|
+def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
|
|
'''
|
|
List the packages currently installed as a dict. By default, the dict
|
|
contains versions as a comma separated string::
|
|
@@ -733,6 +765,13 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
|
|
{'<package_name>': ['<version>', '<version>']}
|
|
|
|
+ root:
|
|
+ operate on a different root directory.
|
|
+
|
|
+ includes:
|
|
+ List of types of packages to include (package, patch, pattern, product)
|
|
+ By default packages are always included
|
|
+
|
|
attr:
|
|
If a list of package attributes is specified, returned value will
|
|
contain them in addition to version, eg.::
|
|
@@ -770,12 +809,18 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
if attr is not None:
|
|
attr = salt.utils.args.split_input(attr)
|
|
|
|
+ includes = includes if includes else []
|
|
+
|
|
contextkey = 'pkg.list_pkgs'
|
|
|
|
+ # TODO(aplanas): this cached value depends on the parameters
|
|
if contextkey not in __context__:
|
|
ret = {}
|
|
- cmd = ['rpm', '-qa', '--queryformat',
|
|
- salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n']
|
|
+ cmd = ['rpm']
|
|
+ if root:
|
|
+ cmd.extend(['--root', root])
|
|
+ cmd.extend(['-qa', '--queryformat',
|
|
+ salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n'])
|
|
output = __salt__['cmd.run'](cmd,
|
|
python_shell=False,
|
|
output_loglevel='trace')
|
|
@@ -810,6 +855,28 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|
continue
|
|
_ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version'])
|
|
|
|
+ for include in includes:
|
|
+ if include in ('pattern', 'patch'):
|
|
+ if include == 'pattern':
|
|
+ pkgs = list_installed_patterns(root=root)
|
|
+ elif include == 'patch':
|
|
+ pkgs = list_installed_patches(root=root)
|
|
+ else:
|
|
+ pkgs = []
|
|
+ for pkg in pkgs:
|
|
+ pkg_extended_name = '{}:{}'.format(include, pkg)
|
|
+ info = info_available(pkg_extended_name,
|
|
+ refresh=False,
|
|
+ root=root)
|
|
+ _ret[pkg_extended_name] = [{
|
|
+ 'epoch': None,
|
|
+ 'version': info[pkg]['version'],
|
|
+ 'release': None,
|
|
+ 'arch': info[pkg]['arch'],
|
|
+ 'install_date': None,
|
|
+ 'install_date_time_t': None,
|
|
+ }]
|
|
+
|
|
__context__[contextkey] = _ret
|
|
|
|
return __salt__['pkg_resource.format_pkg_list'](
|
|
@@ -861,6 +928,9 @@ def list_repo_pkgs(*args, **kwargs):
|
|
When ``True``, the return data for each package will be organized by
|
|
repository.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -893,7 +963,8 @@ def list_repo_pkgs(*args, **kwargs):
|
|
return True
|
|
return False
|
|
|
|
- for node in __zypper__.xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
|
|
+ root = kwargs.get('root') or None
|
|
+ for node in __zypper__(root=root).xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
|
|
pkginfo = dict(node.attributes.items())
|
|
try:
|
|
if pkginfo['kind'] != 'package':
|
|
@@ -935,23 +1006,27 @@ def list_repo_pkgs(*args, **kwargs):
|
|
return byrepo_ret
|
|
|
|
|
|
-def _get_configured_repos():
|
|
+def _get_configured_repos(root=None):
|
|
'''
|
|
Get all the info about repositories from the configurations.
|
|
'''
|
|
|
|
+ repos = os.path.join(root, os.path.relpath(REPOS, os.path.sep)) if root else REPOS
|
|
repos_cfg = configparser.ConfigParser()
|
|
- repos_cfg.read([REPOS + '/' + fname for fname in os.listdir(REPOS) if fname.endswith(".repo")])
|
|
+ if os.path.exists(repos):
|
|
+ repos_cfg.read([repos + '/' + fname for fname in os.listdir(repos) if fname.endswith(".repo")])
|
|
+ else:
|
|
+ log.warning('Repositories not found in {}'.format(repos))
|
|
|
|
return repos_cfg
|
|
|
|
|
|
-def _get_repo_info(alias, repos_cfg=None):
|
|
+def _get_repo_info(alias, repos_cfg=None, root=None):
|
|
'''
|
|
Get one repo meta-data.
|
|
'''
|
|
try:
|
|
- meta = dict((repos_cfg or _get_configured_repos()).items(alias))
|
|
+ meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias))
|
|
meta['alias'] = alias
|
|
for key, val in six.iteritems(meta):
|
|
if val in ['0', '1']:
|
|
@@ -963,51 +1038,60 @@ def _get_repo_info(alias, repos_cfg=None):
|
|
return {}
|
|
|
|
|
|
-def get_repo(repo, **kwargs): # pylint: disable=unused-argument
|
|
+def get_repo(repo, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
Display a repo.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.get_repo alias
|
|
'''
|
|
- return _get_repo_info(repo)
|
|
+ return _get_repo_info(repo, root=root)
|
|
|
|
|
|
-def list_repos():
|
|
+def list_repos(root=None, **kwargs):
|
|
'''
|
|
Lists all repos.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.list_repos
|
|
'''
|
|
- repos_cfg = _get_configured_repos()
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
all_repos = {}
|
|
for alias in repos_cfg.sections():
|
|
- all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg)
|
|
+ all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg, root=root)
|
|
|
|
return all_repos
|
|
|
|
|
|
-def del_repo(repo):
|
|
+def del_repo(repo, root=None):
|
|
'''
|
|
Delete a repo.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.del_repo alias
|
|
'''
|
|
- repos_cfg = _get_configured_repos()
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
for alias in repos_cfg.sections():
|
|
if alias == repo:
|
|
- doc = __zypper__.xml.call('rr', '--loose-auth', '--loose-query', alias)
|
|
+ doc = __zypper__(root=root).xml.call('rr', '--loose-auth', '--loose-query', alias)
|
|
msg = doc.getElementsByTagName('message')
|
|
if doc.getElementsByTagName('progress') and msg:
|
|
return {
|
|
@@ -1046,6 +1130,9 @@ def mod_repo(repo, **kwargs):
|
|
If set to True, automatically trust and import public GPG key for
|
|
the repository.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
Key/Value pairs may also be removed from a repo's configuration by setting
|
|
a key to a blank value. Bear in mind that a name cannot be deleted, and a
|
|
URL can only be deleted if a ``mirrorlist`` is specified (or vice versa).
|
|
@@ -1058,7 +1145,8 @@ def mod_repo(repo, **kwargs):
|
|
salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
|
|
'''
|
|
|
|
- repos_cfg = _get_configured_repos()
|
|
+ root = kwargs.get('root') or None
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
added = False
|
|
|
|
# An attempt to add new one?
|
|
@@ -1078,7 +1166,7 @@ def mod_repo(repo, **kwargs):
|
|
|
|
# Is there already such repo under different alias?
|
|
for alias in repos_cfg.sections():
|
|
- repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg)
|
|
+ repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg, root=root)
|
|
|
|
# Complete user URL, in case it is not
|
|
new_url = _urlparse(url)
|
|
@@ -1100,17 +1188,17 @@ def mod_repo(repo, **kwargs):
|
|
)
|
|
|
|
# Add new repo
|
|
- __zypper__.xml.call('ar', url, repo)
|
|
+ __zypper__(root=root).xml.call('ar', url, repo)
|
|
|
|
# Verify the repository has been added
|
|
- repos_cfg = _get_configured_repos()
|
|
+ repos_cfg = _get_configured_repos(root=root)
|
|
if repo not in repos_cfg.sections():
|
|
raise CommandExecutionError(
|
|
'Failed add new repository \'{0}\' for unspecified reason. '
|
|
'Please check zypper logs.'.format(repo))
|
|
added = True
|
|
|
|
- repo_info = _get_repo_info(repo)
|
|
+ repo_info = _get_repo_info(repo, root=root)
|
|
if (
|
|
not added and 'baseurl' in kwargs and
|
|
not (kwargs['baseurl'] == repo_info['baseurl'])
|
|
@@ -1119,8 +1207,8 @@ def mod_repo(repo, **kwargs):
|
|
# we need to remove the repository and add it again with the new baseurl
|
|
repo_info.update(kwargs)
|
|
repo_info.setdefault('cache', False)
|
|
- del_repo(repo)
|
|
- return mod_repo(repo, **repo_info)
|
|
+ del_repo(repo, root=root)
|
|
+ return mod_repo(repo, root=root, **repo_info)
|
|
|
|
# Modify added or existing repo according to the options
|
|
cmd_opt = []
|
|
@@ -1153,7 +1241,7 @@ def mod_repo(repo, **kwargs):
|
|
|
|
if cmd_opt:
|
|
cmd_opt = global_cmd_opt + ['mr'] + cmd_opt + [repo]
|
|
- __zypper__.refreshable.xml.call(*cmd_opt)
|
|
+ __zypper__(root=root).refreshable.xml.call(*cmd_opt)
|
|
|
|
comment = None
|
|
if call_refresh:
|
|
@@ -1161,23 +1249,26 @@ def mod_repo(repo, **kwargs):
|
|
# --gpg-auto-import-keys is not doing anything
|
|
# so we need to specifically refresh here with --gpg-auto-import-keys
|
|
refresh_opts = global_cmd_opt + ['refresh'] + [repo]
|
|
- __zypper__.xml.call(*refresh_opts)
|
|
+ __zypper__(root=root).xml.call(*refresh_opts)
|
|
elif not added and not cmd_opt:
|
|
comment = 'Specified arguments did not result in modification of repo'
|
|
|
|
- repo = get_repo(repo)
|
|
+ repo = get_repo(repo, root=root)
|
|
if comment:
|
|
repo['comment'] = comment
|
|
|
|
return repo
|
|
|
|
|
|
-def refresh_db():
|
|
+def refresh_db(root=None):
|
|
'''
|
|
Force a repository refresh by calling ``zypper refresh --force``, return a dict::
|
|
|
|
{'<database name>': Bool}
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1187,7 +1278,7 @@ def refresh_db():
|
|
# Remove rtag file to keep multiple refreshes from happening in pkg states
|
|
salt.utils.pkg.clear_rtag(__opts__)
|
|
ret = {}
|
|
- out = __zypper__.refreshable.call('refresh', '--force')
|
|
+ out = __zypper__(root=root).refreshable.call('refresh', '--force')
|
|
|
|
for line in out.splitlines():
|
|
if not line:
|
|
@@ -1206,6 +1297,12 @@ def refresh_db():
|
|
return ret
|
|
|
|
|
|
+def _find_types(pkgs):
|
|
+ '''Form a package names list, find prefixes of packages types.'''
|
|
+ return sorted({pkg.split(':', 1)[0] for pkg in pkgs
|
|
+ if len(pkg.split(':', 1)) == 2})
|
|
+
|
|
+
|
|
def install(name=None,
|
|
refresh=False,
|
|
fromrepo=None,
|
|
@@ -1215,6 +1312,8 @@ def install(name=None,
|
|
skip_verify=False,
|
|
version=None,
|
|
ignore_repo_failure=False,
|
|
+ no_recommends=False,
|
|
+ root=None,
|
|
**kwargs):
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
@@ -1303,6 +1402,12 @@ def install(name=None,
|
|
Zypper returns error code 106 if one of the repositories are not available for various reasons.
|
|
In case to set strict check, this parameter needs to be set to True. Default: False.
|
|
|
|
+ no_recommends
|
|
+ Do not install recommended packages, only required ones.
|
|
+
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
diff_attr:
|
|
If a list of package attributes is specified, returned value will
|
|
contain them, eg.::
|
|
@@ -1342,7 +1447,7 @@ def install(name=None,
|
|
'arch': '<new-arch>'}}}
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
try:
|
|
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs, sources, **kwargs)
|
|
@@ -1352,7 +1457,7 @@ def install(name=None,
|
|
if pkg_params is None or len(pkg_params) == 0:
|
|
return {}
|
|
|
|
- version_num = Wildcard(__zypper__)(name, version)
|
|
+ version_num = Wildcard(__zypper__(root=root))(name, version)
|
|
|
|
if version_num:
|
|
if pkgs is None and sources is None:
|
|
@@ -1377,17 +1482,20 @@ def install(name=None,
|
|
targets.append(target)
|
|
elif pkg_type == 'advisory':
|
|
targets = []
|
|
- cur_patches = list_patches()
|
|
+ cur_patches = list_patches(root=root)
|
|
for advisory_id in pkg_params:
|
|
if advisory_id not in cur_patches:
|
|
raise CommandExecutionError('Advisory id "{0}" not found'.format(advisory_id))
|
|
else:
|
|
- targets.append(advisory_id)
|
|
+ targets.append('patch:{}'.format(advisory_id))
|
|
else:
|
|
targets = pkg_params
|
|
|
|
diff_attr = kwargs.get("diff_attr")
|
|
- old = list_pkgs(attr=diff_attr) if not downloadonly else list_downloaded()
|
|
+
|
|
+ includes = _find_types(targets)
|
|
+ old = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root)
|
|
+
|
|
downgrades = []
|
|
if fromrepo:
|
|
fromrepoopt = ['--force', '--force-resolution', '--from', fromrepo]
|
|
@@ -1406,10 +1514,10 @@ def install(name=None,
|
|
cmd_install.append('--download-only')
|
|
if fromrepo:
|
|
cmd_install.extend(fromrepoopt)
|
|
+ if no_recommends:
|
|
+ cmd_install.append('--no-recommends')
|
|
|
|
errors = []
|
|
- if pkg_type == 'advisory':
|
|
- targets = ["patch:{0}".format(t) for t in targets]
|
|
|
|
# Split the targets into batches of 500 packages each, so that
|
|
# the maximal length of the command line is not broken
|
|
@@ -1417,7 +1525,7 @@ def install(name=None,
|
|
while targets:
|
|
cmd = cmd_install + targets[:500]
|
|
targets = targets[500:]
|
|
- for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope).call(*cmd).splitlines():
|
|
+ for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope, root=root).call(*cmd).splitlines():
|
|
match = re.match(r"^The selected package '([^']+)'.+has lower version", line)
|
|
if match:
|
|
downgrades.append(match.group(1))
|
|
@@ -1425,12 +1533,17 @@ def install(name=None,
|
|
while downgrades:
|
|
cmd = cmd_install + ['--force'] + downgrades[:500]
|
|
downgrades = downgrades[500:]
|
|
- __zypper__(no_repo_failure=ignore_repo_failure).call(*cmd)
|
|
+ __zypper__(no_repo_failure=ignore_repo_failure, root=root).call(*cmd)
|
|
|
|
_clean_cache()
|
|
- new = list_pkgs(attr=diff_attr) if not downloadonly else list_downloaded()
|
|
+ new = list_pkgs(attr=diff_attr, root=root, includes=includes) if not downloadonly else list_downloaded(root)
|
|
ret = salt.utils.data.compare_dicts(old, new)
|
|
|
|
+ # If something else from packages are included in the search,
|
|
+ # better clean the cache.
|
|
+ if includes:
|
|
+ _clean_cache()
|
|
+
|
|
if errors:
|
|
raise CommandExecutionError(
|
|
'Problem encountered {0} package(s)'.format(
|
|
@@ -1448,6 +1561,8 @@ def upgrade(refresh=True,
|
|
fromrepo=None,
|
|
novendorchange=False,
|
|
skip_verify=False,
|
|
+ no_recommends=False,
|
|
+ root=None,
|
|
**kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
@@ -1487,6 +1602,12 @@ def upgrade(refresh=True,
|
|
skip_verify
|
|
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
|
|
|
|
+ no_recommends
|
|
+ Do not install recommended packages, only required ones.
|
|
+
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
Returns a dictionary containing the changes:
|
|
|
|
.. code-block:: python
|
|
@@ -1509,7 +1630,7 @@ def upgrade(refresh=True,
|
|
cmd_update.insert(0, '--no-gpg-checks')
|
|
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
if dryrun:
|
|
cmd_update.append('--dry-run')
|
|
@@ -1530,16 +1651,20 @@ def upgrade(refresh=True,
|
|
else:
|
|
log.warning('Disabling vendor changes is not supported on this Zypper version')
|
|
|
|
+ if no_recommends:
|
|
+ cmd_update.append('--no-recommends')
|
|
+ log.info('Disabling recommendations')
|
|
+
|
|
if dryrun:
|
|
# Creates a solver test case for debugging.
|
|
log.info('Executing debugsolver and performing a dry-run dist-upgrade')
|
|
- __zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update + ['--debug-solver'])
|
|
+ __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update + ['--debug-solver'])
|
|
|
|
- old = list_pkgs()
|
|
+ old = list_pkgs(root=root)
|
|
|
|
- __zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update)
|
|
+ __zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update)
|
|
_clean_cache()
|
|
- new = list_pkgs()
|
|
+ new = list_pkgs(root=root)
|
|
ret = salt.utils.data.compare_dicts(old, new)
|
|
|
|
if __zypper__.exit_code not in __zypper__.SUCCESS_EXIT_CODES:
|
|
@@ -1560,7 +1685,7 @@ def upgrade(refresh=True,
|
|
return ret
|
|
|
|
|
|
-def _uninstall(name=None, pkgs=None):
|
|
+def _uninstall(name=None, pkgs=None, root=None):
|
|
'''
|
|
Remove and purge do identical things but with different Zypper commands,
|
|
this function performs the common logic.
|
|
@@ -1570,7 +1695,8 @@ def _uninstall(name=None, pkgs=None):
|
|
except MinionError as exc:
|
|
raise CommandExecutionError(exc)
|
|
|
|
- old = list_pkgs()
|
|
+ includes = _find_types(pkg_params.keys())
|
|
+ old = list_pkgs(root=root, includes=includes)
|
|
targets = []
|
|
for target in pkg_params:
|
|
# Check if package version set to be removed is actually installed:
|
|
@@ -1586,11 +1712,12 @@ def _uninstall(name=None, pkgs=None):
|
|
|
|
errors = []
|
|
while targets:
|
|
- __zypper__(systemd_scope=systemd_scope).call('remove', *targets[:500])
|
|
+ __zypper__(systemd_scope=systemd_scope, root=root).call('remove', *targets[:500])
|
|
targets = targets[500:]
|
|
|
|
_clean_cache()
|
|
- ret = salt.utils.data.compare_dicts(old, list_pkgs())
|
|
+ new = list_pkgs(root=root, includes=includes)
|
|
+ ret = salt.utils.data.compare_dicts(old, new)
|
|
|
|
if errors:
|
|
raise CommandExecutionError(
|
|
@@ -1627,7 +1754,7 @@ def normalize_name(name):
|
|
return name
|
|
|
|
|
|
-def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
+def remove(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -1655,6 +1782,9 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
A list of packages to delete. Must be passed as a python list. The
|
|
``name`` parameter will be ignored if this option is passed.
|
|
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
.. versionadded:: 0.16.0
|
|
|
|
|
|
@@ -1668,10 +1798,10 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.remove <package1>,<package2>,<package3>
|
|
salt '*' pkg.remove pkgs='["foo", "bar"]'
|
|
'''
|
|
- return _uninstall(name=name, pkgs=pkgs)
|
|
+ return _uninstall(name=name, pkgs=pkgs, root=root)
|
|
|
|
|
|
-def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
+def purge(name=None, pkgs=None, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
|
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
|
@@ -1700,6 +1830,9 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
A list of packages to delete. Must be passed as a python list. The
|
|
``name`` parameter will be ignored if this option is passed.
|
|
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
.. versionadded:: 0.16.0
|
|
|
|
|
|
@@ -1713,13 +1846,16 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.purge <package1>,<package2>,<package3>
|
|
salt '*' pkg.purge pkgs='["foo", "bar"]'
|
|
'''
|
|
- return _uninstall(name=name, pkgs=pkgs)
|
|
+ return _uninstall(name=name, pkgs=pkgs, root=root)
|
|
|
|
|
|
-def list_locks():
|
|
+def list_locks(root=None):
|
|
'''
|
|
List current package locks.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
Return a dict containing the locked package with attributes::
|
|
|
|
{'<package>': {'case_sensitive': '<case_sensitive>',
|
|
@@ -1733,8 +1869,9 @@ def list_locks():
|
|
salt '*' pkg.list_locks
|
|
'''
|
|
locks = {}
|
|
- if os.path.exists(LOCKS):
|
|
- with salt.utils.files.fopen(LOCKS) as fhr:
|
|
+ _locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
|
|
+ try:
|
|
+ with salt.utils.files.fopen(_locks) as fhr:
|
|
items = salt.utils.stringutils.to_unicode(fhr.read()).split('\n\n')
|
|
for meta in [item.split('\n') for item in items]:
|
|
lock = {}
|
|
@@ -1743,15 +1880,22 @@ def list_locks():
|
|
lock.update(dict([tuple([i.strip() for i in element.split(':', 1)]), ]))
|
|
if lock.get('solvable_name'):
|
|
locks[lock.pop('solvable_name')] = lock
|
|
+ except IOError:
|
|
+ pass
|
|
+ except Exception:
|
|
+ log.warning('Detected a problem when accessing {}'.format(_locks))
|
|
|
|
return locks
|
|
|
|
|
|
-def clean_locks():
|
|
+def clean_locks(root=None):
|
|
'''
|
|
Remove unused locks that do not currently (with regard to repositories
|
|
used) lock any package.
|
|
|
|
+ root
|
|
+ Operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1760,10 +1904,11 @@ def clean_locks():
|
|
'''
|
|
LCK = "removed"
|
|
out = {LCK: 0}
|
|
- if not os.path.exists("/etc/zypp/locks"):
|
|
+ locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
|
|
+ if not os.path.exists(locks):
|
|
return out
|
|
|
|
- for node in __zypper__.xml.call('cl').getElementsByTagName("message"):
|
|
+ for node in __zypper__(root=root).xml.call('cl').getElementsByTagName("message"):
|
|
text = node.childNodes[0].nodeValue.lower()
|
|
if text.startswith(LCK):
|
|
out[LCK] = text.split(" ")[1]
|
|
@@ -1776,6 +1921,9 @@ def unhold(name=None, pkgs=None, **kwargs):
|
|
'''
|
|
Remove specified package lock.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1785,12 +1933,13 @@ def unhold(name=None, pkgs=None, **kwargs):
|
|
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
|
|
'''
|
|
ret = {}
|
|
+ root = kwargs.get('root')
|
|
if (not name and not pkgs) or (name and pkgs):
|
|
raise CommandExecutionError('Name or packages must be specified.')
|
|
elif name:
|
|
pkgs = [name]
|
|
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root)
|
|
try:
|
|
pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
|
|
except MinionError as exc:
|
|
@@ -1807,15 +1956,18 @@ def unhold(name=None, pkgs=None, **kwargs):
|
|
ret[pkg]['comment'] = 'Package {0} unable to be unheld.'.format(pkg)
|
|
|
|
if removed:
|
|
- __zypper__.call('rl', *removed)
|
|
+ __zypper__(root=root).call('rl', *removed)
|
|
|
|
return ret
|
|
|
|
|
|
-def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
+def remove_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
Remove specified package lock.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1825,7 +1977,7 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
|
|
'''
|
|
salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use unhold() instead.')
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root)
|
|
try:
|
|
packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
|
|
except MinionError as exc:
|
|
@@ -1840,7 +1992,7 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
missing.append(pkg)
|
|
|
|
if removed:
|
|
- __zypper__.call('rl', *removed)
|
|
+ __zypper__(root=root).call('rl', *removed)
|
|
|
|
return {'removed': len(removed), 'not_found': missing}
|
|
|
|
@@ -1849,6 +2001,9 @@ def hold(name=None, pkgs=None, **kwargs):
|
|
'''
|
|
Add a package lock. Specify packages to lock by exact name.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1863,12 +2018,13 @@ def hold(name=None, pkgs=None, **kwargs):
|
|
:return:
|
|
'''
|
|
ret = {}
|
|
+ root = kwargs.get('root')
|
|
if (not name and not pkgs) or (name and pkgs):
|
|
raise CommandExecutionError('Name or packages must be specified.')
|
|
elif name:
|
|
pkgs = [name]
|
|
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root=root)
|
|
added = []
|
|
try:
|
|
pkgs = list(__salt__['pkg_resource.parse_targets'](pkgs)[0].keys())
|
|
@@ -1884,15 +2040,18 @@ def hold(name=None, pkgs=None, **kwargs):
|
|
ret[pkg]['comment'] = 'Package {0} is already set to be held.'.format(pkg)
|
|
|
|
if added:
|
|
- __zypper__.call('al', *added)
|
|
+ __zypper__(root=root).call('al', *added)
|
|
|
|
return ret
|
|
|
|
|
|
-def add_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
+def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument
|
|
'''
|
|
Add a package lock. Specify packages to lock by exact name.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Example:
|
|
|
|
.. code-block:: bash
|
|
@@ -1902,7 +2061,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
salt '*' pkg.add_lock pkgs='["foo", "bar"]'
|
|
'''
|
|
salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use hold() instead.')
|
|
- locks = list_locks()
|
|
+ locks = list_locks(root)
|
|
added = []
|
|
try:
|
|
packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
|
|
@@ -1914,7 +2073,7 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|
added.append(pkg)
|
|
|
|
if added:
|
|
- __zypper__.call('al', *added)
|
|
+ __zypper__(root=root).call('al', *added)
|
|
|
|
return {'added': len(added), 'packages': added}
|
|
|
|
@@ -1924,7 +2083,9 @@ def verify(*names, **kwargs):
|
|
Runs an rpm -Va on a system, and returns the results in a dict
|
|
|
|
Files with an attribute of config, doc, ghost, license or readme in the
|
|
- package header can be ignored using the ``ignore_types`` keyword argument
|
|
+ package header can be ignored using the ``ignore_types`` keyword argument.
|
|
+
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
|
|
CLI Example:
|
|
|
|
@@ -1938,12 +2099,14 @@ def verify(*names, **kwargs):
|
|
return __salt__['lowpkg.verify'](*names, **kwargs)
|
|
|
|
|
|
-def file_list(*packages):
|
|
+def file_list(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package. Not specifying any packages will
|
|
return a list of *every* file on the system's rpm database (not generally
|
|
recommended).
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -1952,15 +2115,17 @@ def file_list(*packages):
|
|
salt '*' pkg.file_list httpd postfix
|
|
salt '*' pkg.file_list
|
|
'''
|
|
- return __salt__['lowpkg.file_list'](*packages)
|
|
+ return __salt__['lowpkg.file_list'](*packages, **kwargs)
|
|
|
|
|
|
-def file_dict(*packages):
|
|
+def file_dict(*packages, **kwargs):
|
|
'''
|
|
List the files that belong to a package, grouped by package. Not
|
|
specifying any packages will return a list of *every* file on the system's
|
|
rpm database (not generally recommended).
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -1969,7 +2134,7 @@ def file_dict(*packages):
|
|
salt '*' pkg.file_list httpd postfix
|
|
salt '*' pkg.file_list
|
|
'''
|
|
- return __salt__['lowpkg.file_dict'](*packages)
|
|
+ return __salt__['lowpkg.file_dict'](*packages, **kwargs)
|
|
|
|
|
|
def modified(*packages, **flags):
|
|
@@ -2008,6 +2173,9 @@ def modified(*packages, **flags):
|
|
capabilities
|
|
Include only files where capabilities differ or not. Note: supported only on newer RPM versions.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2021,7 +2189,7 @@ def modified(*packages, **flags):
|
|
return __salt__['lowpkg.modified'](*packages, **flags)
|
|
|
|
|
|
-def owner(*paths):
|
|
+def owner(*paths, **kwargs):
|
|
'''
|
|
Return the name of the package that owns the file. Multiple file paths can
|
|
be passed. If a single path is passed, a string will be returned,
|
|
@@ -2031,6 +2199,8 @@ def owner(*paths):
|
|
If the file is not owned by a package, or is not present on the minion,
|
|
then an empty string will be returned for that path.
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2038,26 +2208,69 @@ def owner(*paths):
|
|
salt '*' pkg.owner /usr/bin/apachectl
|
|
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
|
|
'''
|
|
- return __salt__['lowpkg.owner'](*paths)
|
|
+ return __salt__['lowpkg.owner'](*paths, **kwargs)
|
|
|
|
|
|
-def _get_patterns(installed_only=None):
|
|
- '''
|
|
- List all known patterns in repos.
|
|
- '''
|
|
+def _get_visible_patterns(root=None):
|
|
+ '''Get all available patterns in the repo that are visible.'''
|
|
patterns = {}
|
|
- for element in __zypper__.nolock.xml.call('se', '-t', 'pattern').getElementsByTagName('solvable'):
|
|
+ search_patterns = __zypper__(root=root).nolock.xml.call('se', '-t', 'pattern')
|
|
+ for element in search_patterns.getElementsByTagName('solvable'):
|
|
installed = element.getAttribute('status') == 'installed'
|
|
- if (installed_only and installed) or not installed_only:
|
|
- patterns[element.getAttribute('name')] = {
|
|
- 'installed': installed,
|
|
- 'summary': element.getAttribute('summary'),
|
|
+ patterns[element.getAttribute('name')] = {
|
|
+ 'installed': installed,
|
|
+ 'summary': element.getAttribute('summary'),
|
|
+ }
|
|
+ return patterns
|
|
+
|
|
+
|
|
+def _get_installed_patterns(root=None):
|
|
+ '''
|
|
+ List all installed patterns.
|
|
+ '''
|
|
+ # Some patterns are non visible (`pattern-visible()` capability is
|
|
+ # not set), so they cannot be found via a normal `zypper se -t
|
|
+ # pattern`.
|
|
+ #
|
|
+ # Also patterns are not directly searchable in the local rpmdb.
|
|
+ #
|
|
+ # The proposed solution is, first search all the packages that
|
|
+ # containst the 'pattern()' capability, and deduce the name of the
|
|
+ # pattern from this capability.
|
|
+ #
|
|
+ # For example:
|
|
+ #
|
|
+ # 'pattern() = base' -> 'base'
|
|
+ # 'pattern() = microos_defaults' -> 'microos_defaults'
|
|
+
|
|
+ def _pattern_name(capability):
|
|
+ '''Return from a suitable capability the pattern name.'''
|
|
+ return capability.split('=')[-1].strip()
|
|
+
|
|
+ cmd = ['rpm']
|
|
+ if root:
|
|
+ cmd.extend(['--root', root])
|
|
+ cmd.extend(['-q', '--provides', '--whatprovides', 'pattern()'])
|
|
+ # If no `pattern()`s are found, RPM returns `1`, but for us is not
|
|
+ # a real error.
|
|
+ output = __salt__['cmd.run'](cmd, ignore_retcode=True)
|
|
+
|
|
+ installed_patterns = [_pattern_name(line) for line in output.splitlines()
|
|
+ if line.startswith('pattern() = ')]
|
|
+
|
|
+ patterns = {k: v for k, v in _get_visible_patterns(root=root).items() if v['installed']}
|
|
+
|
|
+ for pattern in installed_patterns:
|
|
+ if pattern not in patterns:
|
|
+ patterns[pattern] = {
|
|
+ 'installed': True,
|
|
+ 'summary': 'Non-visible pattern',
|
|
}
|
|
|
|
return patterns
|
|
|
|
|
|
-def list_patterns(refresh=False):
|
|
+def list_patterns(refresh=False, root=None):
|
|
'''
|
|
List all known patterns from available repos.
|
|
|
|
@@ -2066,6 +2279,9 @@ def list_patterns(refresh=False):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2073,27 +2289,30 @@ def list_patterns(refresh=False):
|
|
salt '*' pkg.list_patterns
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
- return _get_patterns()
|
|
+ return _get_visible_patterns(root=root)
|
|
|
|
|
|
-def list_installed_patterns():
|
|
+def list_installed_patterns(root=None):
|
|
'''
|
|
List installed patterns on the system.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.list_installed_patterns
|
|
'''
|
|
- return _get_patterns(installed_only=True)
|
|
+ return _get_installed_patterns(root=root)
|
|
|
|
|
|
def search(criteria, refresh=False, **kwargs):
|
|
'''
|
|
- List known packags, available to the system.
|
|
+ List known packages, available to the system.
|
|
|
|
refresh
|
|
force a refresh if set to True.
|
|
@@ -2141,6 +2360,9 @@ def search(criteria, refresh=False, **kwargs):
|
|
details (bool)
|
|
Show version and repository
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2161,8 +2383,11 @@ def search(criteria, refresh=False, **kwargs):
|
|
'not_installed_only': '-u',
|
|
'details': '--details'
|
|
}
|
|
+
|
|
+ root = kwargs.get('root', None)
|
|
+
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
cmd = ['search']
|
|
if kwargs.get('match') == 'exact':
|
|
@@ -2177,7 +2402,7 @@ def search(criteria, refresh=False, **kwargs):
|
|
cmd.append(ALLOWED_SEARCH_OPTIONS.get(opt))
|
|
|
|
cmd.append(criteria)
|
|
- solvables = __zypper__.nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable')
|
|
+ solvables = __zypper__(root=root).nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable')
|
|
if not solvables:
|
|
raise CommandExecutionError(
|
|
'No packages found matching \'{0}\''.format(criteria)
|
|
@@ -2206,7 +2431,7 @@ def _get_first_aggregate_text(node_list):
|
|
return '\n'.join(out)
|
|
|
|
|
|
-def list_products(all=False, refresh=False):
|
|
+def list_products(all=False, refresh=False, root=None):
|
|
'''
|
|
List all available or installed SUSE products.
|
|
|
|
@@ -2218,6 +2443,9 @@ def list_products(all=False, refresh=False):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
Includes handling for OEM products, which read the OEM productline file
|
|
and overwrite the release value.
|
|
|
|
@@ -2229,10 +2457,12 @@ def list_products(all=False, refresh=False):
|
|
salt '*' pkg.list_products all=True
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
ret = list()
|
|
- OEM_PATH = "/var/lib/suseRegister/OEM"
|
|
+ OEM_PATH = '/var/lib/suseRegister/OEM'
|
|
+ if root:
|
|
+ OEM_PATH = os.path.join(root, os.path.relpath(OEM_PATH, os.path.sep))
|
|
cmd = list()
|
|
if not all:
|
|
cmd.append('--disable-repos')
|
|
@@ -2240,7 +2470,7 @@ def list_products(all=False, refresh=False):
|
|
if not all:
|
|
cmd.append('-i')
|
|
|
|
- product_list = __zypper__.nolock.xml.call(*cmd).getElementsByTagName('product-list')
|
|
+ product_list = __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName('product-list')
|
|
if not product_list:
|
|
return ret # No products found
|
|
|
|
@@ -2282,6 +2512,9 @@ def download(*packages, **kwargs):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -2292,12 +2525,14 @@ def download(*packages, **kwargs):
|
|
if not packages:
|
|
raise SaltInvocationError('No packages specified')
|
|
|
|
+ root = kwargs.get('root', None)
|
|
+
|
|
refresh = kwargs.get('refresh', False)
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
pkg_ret = {}
|
|
- for dld_result in __zypper__.xml.call('download', *packages).getElementsByTagName("download-result"):
|
|
+ for dld_result in __zypper__(root=root).xml.call('download', *packages).getElementsByTagName("download-result"):
|
|
repo = dld_result.getElementsByTagName("repository")[0]
|
|
path = dld_result.getElementsByTagName("localfile")[0].getAttribute("path")
|
|
pkg_info = {
|
|
@@ -2308,7 +2543,7 @@ def download(*packages, **kwargs):
|
|
key = _get_first_aggregate_text(
|
|
dld_result.getElementsByTagName('name')
|
|
)
|
|
- if __salt__['lowpkg.checksum'](pkg_info['path']):
|
|
+ if __salt__['lowpkg.checksum'](pkg_info['path'], root=root):
|
|
pkg_ret[key] = pkg_info
|
|
|
|
if pkg_ret:
|
|
@@ -2322,12 +2557,15 @@ def download(*packages, **kwargs):
|
|
)
|
|
|
|
|
|
-def list_downloaded():
|
|
+def list_downloaded(root=None):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
List prefetched packages downloaded by Zypper in the local disk.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI example:
|
|
|
|
.. code-block:: bash
|
|
@@ -2335,6 +2573,8 @@ def list_downloaded():
|
|
salt '*' pkg.list_downloaded
|
|
'''
|
|
CACHE_DIR = '/var/cache/zypp/packages/'
|
|
+ if root:
|
|
+ CACHE_DIR = os.path.join(root, os.path.relpath(CACHE_DIR, os.path.sep))
|
|
|
|
ret = {}
|
|
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
|
|
@@ -2351,12 +2591,14 @@ def list_downloaded():
|
|
return ret
|
|
|
|
|
|
-def diff(*paths):
|
|
+def diff(*paths, **kwargs):
|
|
'''
|
|
Return a formatted diff between current files and original in a package.
|
|
NOTE: this function includes all files (configuration and not), but does
|
|
not work on binary content.
|
|
|
|
+ The root parameter can also be passed via the keyword argument.
|
|
+
|
|
:param path: Full path to the installed file
|
|
:return: Difference string or raises and exception if examined file is binary.
|
|
|
|
@@ -2370,7 +2612,7 @@ def diff(*paths):
|
|
|
|
pkg_to_paths = {}
|
|
for pth in paths:
|
|
- pth_pkg = __salt__['lowpkg.owner'](pth)
|
|
+ pth_pkg = __salt__['lowpkg.owner'](pth, **kwargs)
|
|
if not pth_pkg:
|
|
ret[pth] = os.path.exists(pth) and 'Not managed' or 'N/A'
|
|
else:
|
|
@@ -2379,7 +2621,7 @@ def diff(*paths):
|
|
pkg_to_paths[pth_pkg].append(pth)
|
|
|
|
if pkg_to_paths:
|
|
- local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys())
|
|
+ local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys(), **kwargs)
|
|
for pkg, files in six.iteritems(pkg_to_paths):
|
|
for path in files:
|
|
ret[path] = __salt__['lowpkg.diff'](
|
|
@@ -2390,12 +2632,12 @@ def diff(*paths):
|
|
return ret
|
|
|
|
|
|
-def _get_patches(installed_only=False):
|
|
+def _get_patches(installed_only=False, root=None):
|
|
'''
|
|
List all known patches in repos.
|
|
'''
|
|
patches = {}
|
|
- for element in __zypper__.nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'):
|
|
+ for element in __zypper__(root=root).nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'):
|
|
installed = element.getAttribute('status') == 'installed'
|
|
if (installed_only and installed) or not installed_only:
|
|
patches[element.getAttribute('name')] = {
|
|
@@ -2406,7 +2648,7 @@ def _get_patches(installed_only=False):
|
|
return patches
|
|
|
|
|
|
-def list_patches(refresh=False):
|
|
+def list_patches(refresh=False, root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
@@ -2417,6 +2659,9 @@ def list_patches(refresh=False):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2424,33 +2669,39 @@ def list_patches(refresh=False):
|
|
salt '*' pkg.list_patches
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
- return _get_patches()
|
|
+ return _get_patches(root=root)
|
|
|
|
|
|
-def list_installed_patches():
|
|
+def list_installed_patches(root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2017.7.0
|
|
|
|
List installed advisory patches on the system.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
|
|
salt '*' pkg.list_installed_patches
|
|
'''
|
|
- return _get_patches(installed_only=True)
|
|
+ return _get_patches(installed_only=True, root=root)
|
|
|
|
|
|
-def list_provides(**kwargs):
|
|
+def list_provides(root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2018.3.0
|
|
|
|
List package provides of installed packages as a dict.
|
|
{'<provided_name>': ['<package_name>', '<package_name>', ...]}
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
CLI Examples:
|
|
|
|
.. code-block:: bash
|
|
@@ -2459,7 +2710,10 @@ def list_provides(**kwargs):
|
|
'''
|
|
ret = __context__.get('pkg.list_provides')
|
|
if not ret:
|
|
- cmd = ['rpm', '-qa', '--queryformat', '%{PROVIDES}_|-%{NAME}\n']
|
|
+ cmd = ['rpm']
|
|
+ if root:
|
|
+ cmd.extend(['--root', root])
|
|
+ cmd.extend(['-qa', '--queryformat', '%{PROVIDES}_|-%{NAME}\n'])
|
|
ret = dict()
|
|
for line in __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False).splitlines():
|
|
provide, realname = line.split('_|-')
|
|
@@ -2475,7 +2729,7 @@ def list_provides(**kwargs):
|
|
return ret
|
|
|
|
|
|
-def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
+def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs):
|
|
'''
|
|
.. versionadded:: 2018.3.0
|
|
|
|
@@ -2489,6 +2743,9 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
If set to False (default) it depends on zypper if a refresh is
|
|
executed.
|
|
|
|
+ root
|
|
+ operate on a different root directory.
|
|
+
|
|
resolve_capabilities
|
|
If this option is set to True the input will be checked if
|
|
a package with this name exists. If not, this function will
|
|
@@ -2504,7 +2761,7 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
salt '*' pkg.resolve_capabilities resolve_capabilities=True w3m_ssl
|
|
'''
|
|
if refresh:
|
|
- refresh_db()
|
|
+ refresh_db(root)
|
|
|
|
ret = list()
|
|
for pkg in pkgs:
|
|
@@ -2517,12 +2774,12 @@ def resolve_capabilities(pkgs, refresh, **kwargs):
|
|
|
|
if kwargs.get('resolve_capabilities', False):
|
|
try:
|
|
- search(name, match='exact')
|
|
+ search(name, root=root, match='exact')
|
|
except CommandExecutionError:
|
|
# no package this such a name found
|
|
# search for a package which provides this name
|
|
try:
|
|
- result = search(name, provides=True, match='exact')
|
|
+ result = search(name, root=root, provides=True, match='exact')
|
|
if len(result) == 1:
|
|
name = next(iter(result.keys()))
|
|
elif len(result) > 1:
|
|
diff --git a/salt/states/btrfs.py b/salt/states/btrfs.py
|
|
new file mode 100644
|
|
index 0000000000..af78c8ae00
|
|
--- /dev/null
|
|
+++ b/salt/states/btrfs.py
|
|
@@ -0,0 +1,385 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:maturity: new
|
|
+:depends: None
|
|
+:platform: Linux
|
|
+'''
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import functools
|
|
+import logging
|
|
+import os.path
|
|
+import tempfile
|
|
+import traceback
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+
|
|
+log = logging.getLogger(__name__)
|
|
+
|
|
+__virtualname__ = 'btrfs'
|
|
+
|
|
+
|
|
+def _mount(device, use_default):
|
|
+ '''
|
|
+ Mount the device in a temporary place.
|
|
+ '''
|
|
+ opts = 'subvol=/' if not use_default else 'defaults'
|
|
+ dest = tempfile.mkdtemp()
|
|
+ res = __states__['mount.mounted'](dest, device=device, fstype='btrfs',
|
|
+ opts=opts, persist=False)
|
|
+ if not res['result']:
|
|
+ log.error('Cannot mount device %s in %s', device, dest)
|
|
+ _umount(dest)
|
|
+ return None
|
|
+ return dest
|
|
+
|
|
+
|
|
+def _umount(path):
|
|
+ '''
|
|
+ Umount and clean the temporary place.
|
|
+ '''
|
|
+ __states__['mount.unmounted'](path)
|
|
+ __utils__['files.rm_rf'](path)
|
|
+
|
|
+
|
|
+def _is_default(path, dest, name):
|
|
+ '''
|
|
+ Check if the subvolume is the current default.
|
|
+ '''
|
|
+ subvol_id = __salt__['btrfs.subvolume_show'](path)[name]['subvolume id']
|
|
+ def_id = __salt__['btrfs.subvolume_get_default'](dest)['id']
|
|
+ return subvol_id == def_id
|
|
+
|
|
+
|
|
+def _set_default(path, dest, name):
|
|
+ '''
|
|
+ Set the subvolume as the current default.
|
|
+ '''
|
|
+ subvol_id = __salt__['btrfs.subvolume_show'](path)[name]['subvolume id']
|
|
+ return __salt__['btrfs.subvolume_set_default'](subvol_id, dest)
|
|
+
|
|
+
|
|
+def _is_cow(path):
|
|
+ '''
|
|
+ Check if the subvolume is copy on write
|
|
+ '''
|
|
+ dirname = os.path.dirname(path)
|
|
+ return 'C' not in __salt__['file.lsattr'](dirname)[path]
|
|
+
|
|
+
|
|
+def _unset_cow(path):
|
|
+ '''
|
|
+ Disable the copy on write in a subvolume
|
|
+ '''
|
|
+ return __salt__['file.chattr'](path, operator='add', attributes='C')
|
|
+
|
|
+
|
|
+def __mount_device(action):
|
|
+ '''
|
|
+ Small decorator to makes sure that the mount and umount happends in
|
|
+ a transactional way.
|
|
+ '''
|
|
+ @functools.wraps(action)
|
|
+ def wrapper(*args, **kwargs):
|
|
+ name = kwargs['name']
|
|
+ device = kwargs['device']
|
|
+ use_default = kwargs.get('use_default', False)
|
|
+
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Some error happends during the operation.'],
|
|
+ }
|
|
+ try:
|
|
+ if device:
|
|
+ dest = _mount(device, use_default)
|
|
+ if not dest:
|
|
+ msg = 'Device {} cannot be mounted'.format(device)
|
|
+ ret['comment'].append(msg)
|
|
+ kwargs['__dest'] = dest
|
|
+ ret = action(*args, **kwargs)
|
|
+ except Exception:
|
|
+ tb = six.text_type(traceback.format_exc())
|
|
+ log.exception('Exception captured in wrapper %s', tb)
|
|
+ ret['comment'].append(tb)
|
|
+ finally:
|
|
+ if device:
|
|
+ _umount(dest)
|
|
+ return ret
|
|
+ return wrapper
|
|
+
|
|
+
|
|
+@__mount_device
|
|
+def subvolume_created(name, device, qgroupids=None, set_default=False,
|
|
+ copy_on_write=True, force_set_default=True,
|
|
+ __dest=None):
|
|
+ '''
|
|
+ Makes sure that a btrfs subvolume is present.
|
|
+
|
|
+ name
|
|
+ Name of the subvolume to add
|
|
+
|
|
+ device
|
|
+ Device where to create the subvolume
|
|
+
|
|
+ qgroupids
|
|
+ Add the newly created subcolume to a qgroup. This parameter
|
|
+ is a list
|
|
+
|
|
+ set_default
|
|
+ If True, this new subvolume will be set as default when
|
|
+ mounted, unless subvol option in mount is used
|
|
+
|
|
+ copy_on_write
|
|
+ If false, set the subvolume with chattr +C
|
|
+
|
|
+ force_set_default
|
|
+ If false and the subvolume is already present, it will not
|
|
+ force it as default if ``set_default`` is True
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+ path = os.path.join(__dest, name)
|
|
+
|
|
+ exists = __salt__['btrfs.subvolume_exists'](path)
|
|
+ if exists:
|
|
+ ret['comment'].append('Subvolume {} already present'.format(name))
|
|
+
|
|
+ # Resolve first the test case. The check is not complete, but at
|
|
+ # least we will report if a subvolume needs to be created. Can
|
|
+ # happend that the subvolume is there, but we also need to set it
|
|
+ # as default, or persist in fstab.
|
|
+ if __opts__['test']:
|
|
+ ret['result'] = None
|
|
+ if not exists:
|
|
+ ret['comment'].append('Subvolume {} will be created'.format(name))
|
|
+ return ret
|
|
+
|
|
+ if not exists:
|
|
+ # Create the directories where the subvolume lives
|
|
+ _path = os.path.dirname(path)
|
|
+ res = __states__['file.directory'](_path, makedirs=True)
|
|
+ if not res['result']:
|
|
+ ret['comment'].append('Error creating {} directory'.format(_path))
|
|
+ return ret
|
|
+
|
|
+ try:
|
|
+ __salt__['btrfs.subvolume_create'](name, dest=__dest,
|
|
+ qgroupids=qgroupids)
|
|
+ except CommandExecutionError:
|
|
+ ret['comment'].append('Error creating subvolume {}'.format(name))
|
|
+ return ret
|
|
+
|
|
+ ret['changes'][name] = 'Created subvolume {}'.format(name)
|
|
+
|
|
+ # If the volume was already present, we can opt-out the check for
|
|
+ # default subvolume.
|
|
+ if (not exists or (exists and force_set_default)) and \
|
|
+ set_default and not _is_default(path, __dest, name):
|
|
+ ret['changes'][name + '_default'] = _set_default(path, __dest, name)
|
|
+
|
|
+ if not copy_on_write and _is_cow(path):
|
|
+ ret['changes'][name + '_no_cow'] = _unset_cow(path)
|
|
+
|
|
+ ret['result'] = True
|
|
+ return ret
|
|
+
|
|
+
|
|
+@__mount_device
|
|
+def subvolume_deleted(name, device, commit=False, __dest=None):
|
|
+ '''
|
|
+ Makes sure that a btrfs subvolume is removed.
|
|
+
|
|
+ name
|
|
+ Name of the subvolume to remove
|
|
+
|
|
+ device
|
|
+ Device where to remove the subvolume
|
|
+
|
|
+ commit
|
|
+ Wait until the transaction is over
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+
|
|
+ path = os.path.join(__dest, name)
|
|
+
|
|
+ exists = __salt__['btrfs.subvolume_exists'](path)
|
|
+ if not exists:
|
|
+ ret['comment'].append('Subvolume {} already missing'.format(name))
|
|
+
|
|
+ if __opts__['test']:
|
|
+ ret['result'] = None
|
|
+ if exists:
|
|
+ ret['comment'].append('Subvolume {} will be removed'.format(name))
|
|
+ return ret
|
|
+
|
|
+ # If commit is set, we wait until all is over
|
|
+ commit = 'after' if commit else None
|
|
+
|
|
+ if not exists:
|
|
+ try:
|
|
+ __salt__['btrfs.subvolume_delete'](path, commit=commit)
|
|
+ except CommandExecutionError:
|
|
+ ret['comment'].append('Error removing subvolume {}'.format(name))
|
|
+ return ret
|
|
+
|
|
+ ret['changes'][name] = 'Removed subvolume {}'.format(name)
|
|
+
|
|
+ ret['result'] = True
|
|
+ return ret
|
|
+
|
|
+
|
|
+def _diff_properties(expected, current):
|
|
+ '''Calculate the difference between the current and the expected
|
|
+ properties
|
|
+
|
|
+ * 'expected' is expressed in a dictionary like: {'property': value}
|
|
+
|
|
+ * 'current' contains the same format retuned by 'btrfs.properties'
|
|
+
|
|
+ If the property is not available, will throw an exception.
|
|
+
|
|
+ '''
|
|
+ difference = {}
|
|
+ for _property, value in expected.items():
|
|
+ current_value = current[_property]['value']
|
|
+ if value is False and current_value == 'N/A':
|
|
+ needs_update = False
|
|
+ elif value != current_value:
|
|
+ needs_update = True
|
|
+ else:
|
|
+ needs_update = False
|
|
+ if needs_update:
|
|
+ difference[_property] = value
|
|
+ return difference
|
|
+
|
|
+
|
|
+@__mount_device
|
|
+def properties(name, device, use_default=False, __dest=None, **properties):
|
|
+ '''
|
|
+ Makes sure that a list of properties are set in a subvolume, file
|
|
+ or device.
|
|
+
|
|
+ name
|
|
+ Name of the object to change
|
|
+
|
|
+ device
|
|
+ Device where the object lives, if None, the device will be in
|
|
+ name
|
|
+
|
|
+ use_default
|
|
+ If True, this subvolume will be resolved to the default
|
|
+ subvolume assigned during the create operation
|
|
+
|
|
+ properties
|
|
+ Dictionary of properties
|
|
+
|
|
+ Valid properties are 'ro', 'label' or 'compression'. Check the
|
|
+ documentation to see where those properties are valid for each
|
|
+ object.
|
|
+
|
|
+ '''
|
|
+ ret = {
|
|
+ 'name': name,
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': [],
|
|
+ }
|
|
+
|
|
+ # 'name' will have always the name of the object that we want to
|
|
+ # change, but if the object is a device, we do not repeat it again
|
|
+ # in 'device'. This makes device sometimes optional.
|
|
+ if device:
|
|
+ if os.path.isabs(name):
|
|
+ path = os.path.join(__dest, os.path.relpath(name, os.path.sep))
|
|
+ else:
|
|
+ path = os.path.join(__dest, name)
|
|
+ else:
|
|
+ path = name
|
|
+
|
|
+ if not os.path.exists(path):
|
|
+ ret['comment'].append('Object {} not found'.format(name))
|
|
+ return ret
|
|
+
|
|
+ # Convert the booleans to lowercase
|
|
+ properties = {k: v if type(v) is not bool else str(v).lower()
|
|
+ for k, v in properties.items()}
|
|
+
|
|
+ current_properties = {}
|
|
+ try:
|
|
+ current_properties = __salt__['btrfs.properties'](path)
|
|
+ except CommandExecutionError as e:
|
|
+ ret['comment'].append('Error reading properties from {}'.format(name))
|
|
+ ret['comment'].append('Current error {}'.format(e))
|
|
+ return ret
|
|
+
|
|
+ try:
|
|
+ properties_to_set = _diff_properties(properties, current_properties)
|
|
+ except KeyError:
|
|
+ ret['comment'].append('Some property not found in {}'.format(name))
|
|
+ return ret
|
|
+
|
|
+ if __opts__['test']:
|
|
+ ret['result'] = None
|
|
+ if properties_to_set:
|
|
+ msg = 'Properties {} will be changed in {}'.format(
|
|
+ properties_to_set, name)
|
|
+ else:
|
|
+ msg = 'No properties will be changed in {}'.format(name)
|
|
+ ret['comment'].append(msg)
|
|
+ return ret
|
|
+
|
|
+ if properties_to_set:
|
|
+ _properties = ','.join(
|
|
+ '{}={}'.format(k, v) for k, v in properties_to_set.items())
|
|
+ __salt__['btrfs.properties'](path, set=_properties)
|
|
+
|
|
+ current_properties = __salt__['btrfs.properties'](path)
|
|
+ properties_failed = _diff_properties(properties, current_properties)
|
|
+ if properties_failed:
|
|
+ msg = 'Properties {} failed to be changed in {}'.format(
|
|
+ properties_failed, name)
|
|
+ ret['comment'].append(msg)
|
|
+ return ret
|
|
+
|
|
+ ret['comment'].append('Properties changed in {}'.format(name))
|
|
+ ret['changes'] = properties_to_set
|
|
+ else:
|
|
+ ret['comment'].append('Properties not changed in {}'.format(name))
|
|
+
|
|
+ ret['result'] = True
|
|
+ return ret
|
|
diff --git a/salt/states/file.py b/salt/states/file.py
|
|
index dd5bcec62a..0e925bb2ed 100644
|
|
--- a/salt/states/file.py
|
|
+++ b/salt/states/file.py
|
|
@@ -291,7 +291,11 @@ import shutil
|
|
import sys
|
|
import time
|
|
import traceback
|
|
-from collections import Iterable, Mapping, defaultdict
|
|
+try:
|
|
+ from collections.abc import Iterable, Mapping
|
|
+except ImportError:
|
|
+ from collections import Iterable, Mapping
|
|
+from collections import defaultdict
|
|
from datetime import datetime, date # python3 problem in the making?
|
|
|
|
# Import salt libs
|
|
diff --git a/salt/states/loop.py b/salt/states/loop.py
|
|
index 524fa56c1a..726c8c8016 100644
|
|
--- a/salt/states/loop.py
|
|
+++ b/salt/states/loop.py
|
|
@@ -185,6 +185,10 @@ def until_no_eval(
|
|
''.format(name, expected))
|
|
if ret['comment']:
|
|
return ret
|
|
+ if not m_args:
|
|
+ m_args = []
|
|
+ if not m_kwargs:
|
|
+ m_kwargs = {}
|
|
|
|
if init_wait:
|
|
time.sleep(init_wait)
|
|
diff --git a/salt/states/pkg.py b/salt/states/pkg.py
|
|
index a13d418400..71ba29a27c 100644
|
|
--- a/salt/states/pkg.py
|
|
+++ b/salt/states/pkg.py
|
|
@@ -236,7 +236,7 @@ def _fulfills_version_spec(versions, oper, desired_version,
|
|
return False
|
|
|
|
|
|
-def _find_unpurge_targets(desired):
|
|
+def _find_unpurge_targets(desired, **kwargs):
|
|
'''
|
|
Find packages which are marked to be purged but can't yet be removed
|
|
because they are dependencies for other installed packages. These are the
|
|
@@ -245,7 +245,7 @@ def _find_unpurge_targets(desired):
|
|
'''
|
|
return [
|
|
x for x in desired
|
|
- if x in __salt__['pkg.list_pkgs'](purge_desired=True)
|
|
+ if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs)
|
|
]
|
|
|
|
|
|
@@ -260,7 +260,7 @@ def _find_download_targets(name=None,
|
|
Inspect the arguments to pkg.downloaded and discover what packages need to
|
|
be downloaded. Return a dict of packages to download.
|
|
'''
|
|
- cur_pkgs = __salt__['pkg.list_downloaded']()
|
|
+ cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs)
|
|
if pkgs:
|
|
to_download = _repack_pkgs(pkgs, normalize=normalize) # pylint: disable=not-callable
|
|
|
|
@@ -378,7 +378,7 @@ def _find_advisory_targets(name=None,
|
|
Inspect the arguments to pkg.patch_installed and discover what advisory
|
|
patches need to be installed. Return a dict of advisory patches to install.
|
|
'''
|
|
- cur_patches = __salt__['pkg.list_installed_patches']()
|
|
+ cur_patches = __salt__['pkg.list_installed_patches'](**kwargs)
|
|
if advisory_ids:
|
|
to_download = advisory_ids
|
|
else:
|
|
@@ -582,7 +582,7 @@ def _find_install_targets(name=None,
|
|
'minion log.'.format('pkgs' if pkgs
|
|
else 'sources')}
|
|
|
|
- to_unpurge = _find_unpurge_targets(desired)
|
|
+ to_unpurge = _find_unpurge_targets(desired, **kwargs)
|
|
else:
|
|
if salt.utils.platform.is_windows():
|
|
pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) # pylint: disable=not-callable
|
|
@@ -602,7 +602,7 @@ def _find_install_targets(name=None,
|
|
else:
|
|
desired = {name: version}
|
|
|
|
- to_unpurge = _find_unpurge_targets(desired)
|
|
+ to_unpurge = _find_unpurge_targets(desired, **kwargs)
|
|
|
|
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
|
|
origin = bool(re.search('/', name))
|
|
@@ -761,7 +761,8 @@ def _find_install_targets(name=None,
|
|
verify_result = __salt__['pkg.verify'](
|
|
package_name,
|
|
ignore_types=ignore_types,
|
|
- verify_options=verify_options
|
|
+ verify_options=verify_options,
|
|
+ **kwargs
|
|
)
|
|
except (CommandExecutionError, SaltInvocationError) as exc:
|
|
failed_verify = exc.strerror
|
|
@@ -790,7 +791,9 @@ def _find_install_targets(name=None,
|
|
verify_result = __salt__['pkg.verify'](
|
|
package_name,
|
|
ignore_types=ignore_types,
|
|
- verify_options=verify_options)
|
|
+ verify_options=verify_options,
|
|
+ **kwargs
|
|
+ )
|
|
except (CommandExecutionError, SaltInvocationError) as exc:
|
|
failed_verify = exc.strerror
|
|
continue
|
|
@@ -1974,7 +1977,8 @@ def installed(
|
|
# have caught invalid arguments earlier.
|
|
verify_result = __salt__['pkg.verify'](reinstall_pkg,
|
|
ignore_types=ignore_types,
|
|
- verify_options=verify_options)
|
|
+ verify_options=verify_options,
|
|
+ **kwargs)
|
|
if verify_result:
|
|
failed.append(reinstall_pkg)
|
|
altered_files[reinstall_pkg] = verify_result
|
|
@@ -3038,7 +3042,7 @@ def uptodate(name, refresh=False, pkgs=None, **kwargs):
|
|
pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs)
|
|
try:
|
|
packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs)
|
|
- expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname)}
|
|
+ expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)}
|
|
for pkgname, pkgver in six.iteritems(packages)}
|
|
if isinstance(pkgs, list):
|
|
packages = [pkg for pkg in packages if pkg in pkgs]
|
|
@@ -3220,7 +3224,7 @@ def group_installed(name, skip=None, include=None, **kwargs):
|
|
.format(name, exc))
|
|
return ret
|
|
|
|
- failed = [x for x in targets if x not in __salt__['pkg.list_pkgs']()]
|
|
+ failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)]
|
|
if failed:
|
|
ret['comment'] = (
|
|
'Failed to install the following packages: {0}'
|
|
diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
|
|
index f1ae3a0f6f..c39e857580 100644
|
|
--- a/salt/states/pkgrepo.py
|
|
+++ b/salt/states/pkgrepo.py
|
|
@@ -385,10 +385,7 @@ def managed(name, ppa=None, **kwargs):
|
|
kwargs.pop(kwarg, None)
|
|
|
|
try:
|
|
- pre = __salt__['pkg.get_repo'](
|
|
- repo,
|
|
- ppa_auth=kwargs.get('ppa_auth', None)
|
|
- )
|
|
+ pre = __salt__['pkg.get_repo'](repo=repo, **kwargs)
|
|
except CommandExecutionError as exc:
|
|
ret['result'] = False
|
|
ret['comment'] = \
|
|
@@ -504,10 +501,7 @@ def managed(name, ppa=None, **kwargs):
|
|
return ret
|
|
|
|
try:
|
|
- post = __salt__['pkg.get_repo'](
|
|
- repo,
|
|
- ppa_auth=kwargs.get('ppa_auth', None)
|
|
- )
|
|
+ post = __salt__['pkg.get_repo'](repo=repo, **kwargs)
|
|
if pre:
|
|
for kwarg in sanitizedkwargs:
|
|
if post.get(kwarg) != pre.get(kwarg):
|
|
@@ -600,9 +594,7 @@ def absent(name, **kwargs):
|
|
return ret
|
|
|
|
try:
|
|
- repo = __salt__['pkg.get_repo'](
|
|
- name, ppa_auth=kwargs.get('ppa_auth', None)
|
|
- )
|
|
+ repo = __salt__['pkg.get_repo'](name, **kwargs)
|
|
except CommandExecutionError as exc:
|
|
ret['result'] = False
|
|
ret['comment'] = \
|
|
diff --git a/salt/utils/oset.py b/salt/utils/oset.py
|
|
index acfd59b53b..cd4e88be40 100644
|
|
--- a/salt/utils/oset.py
|
|
+++ b/salt/utils/oset.py
|
|
@@ -22,7 +22,10 @@ Rob Speer's changes are as follows:
|
|
- added __getitem__
|
|
'''
|
|
from __future__ import absolute_import, unicode_literals, print_function
|
|
-import collections
|
|
+try:
|
|
+ from collections.abc import MutableSet
|
|
+except ImportError:
|
|
+ from collections import MutableSet
|
|
|
|
SLICE_ALL = slice(None)
|
|
__version__ = '2.0.1'
|
|
@@ -44,7 +47,7 @@ def is_iterable(obj):
|
|
return hasattr(obj, '__iter__') and not isinstance(obj, str) and not isinstance(obj, tuple)
|
|
|
|
|
|
-class OrderedSet(collections.MutableSet):
|
|
+class OrderedSet(MutableSet):
|
|
"""
|
|
An OrderedSet is a custom MutableSet that remembers its order, so that
|
|
every entry has an index that can be looked up.
|
|
diff --git a/tests/unit/modules/test_kubeadm.py b/tests/unit/modules/test_kubeadm.py
|
|
new file mode 100644
|
|
index 0000000000..a58f54f118
|
|
--- /dev/null
|
|
+++ b/tests/unit/modules/test_kubeadm.py
|
|
@@ -0,0 +1,1144 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2019 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+import pytest
|
|
+
|
|
+# Import Salt Testing Libs
|
|
+from tests.support.mixins import LoaderModuleMockMixin
|
|
+from tests.support.unit import TestCase, skipIf
|
|
+from tests.support.mock import (
|
|
+ MagicMock,
|
|
+ patch,
|
|
+ NO_MOCK,
|
|
+ NO_MOCK_REASON
|
|
+)
|
|
+
|
|
+import salt.modules.kubeadm as kubeadm
|
|
+from salt.exceptions import CommandExecutionError
|
|
+
|
|
+
|
|
+@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
+class KubeAdmTestCase(TestCase, LoaderModuleMockMixin):
|
|
+ '''
|
|
+ Test cases for salt.modules.kubeadm
|
|
+ '''
|
|
+
|
|
+ def setup_loader_modules(self):
|
|
+ return {
|
|
+ kubeadm: {
|
|
+ '__salt__': {},
|
|
+ '__utils__': {},
|
|
+ }
|
|
+ }
|
|
+
|
|
+ def test_version(self):
|
|
+ '''
|
|
+ Test kuebadm.version without parameters
|
|
+ '''
|
|
+ version = '{"clientVersion":{"major":"1"}}'
|
|
+ salt_mock = {
|
|
+ 'cmd.run_stdout': MagicMock(return_value=version),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.version() == {
|
|
+ 'clientVersion': {'major': '1'}
|
|
+ }
|
|
+ salt_mock['cmd.run_stdout'].assert_called_with(
|
|
+ ['kubeadm', 'version', '--output', 'json']
|
|
+ )
|
|
+
|
|
+ def test_version_params(self):
|
|
+ '''
|
|
+ Test kuebadm.version with parameters
|
|
+ '''
|
|
+ version = '{"clientVersion":{"major":"1"}}'
|
|
+ salt_mock = {
|
|
+ 'cmd.run_stdout': MagicMock(return_value=version),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.version(kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == {
|
|
+ 'clientVersion': {'major': '1'}
|
|
+ }
|
|
+ salt_mock['cmd.run_stdout'].assert_called_with(
|
|
+ ['kubeadm', 'version',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt',
|
|
+ '--output', 'json']
|
|
+ )
|
|
+
|
|
+ def test_token_create(self):
|
|
+ '''
|
|
+ Test kuebadm.token_create without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_create() == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'create']
|
|
+ )
|
|
+
|
|
+ def test_token_create_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_create with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_create(token='token',
|
|
+ config='/kubeadm.cfg',
|
|
+ description='a description',
|
|
+ groups=['g:1', 'g:2'],
|
|
+ ttl='1h1m1s',
|
|
+ usages=['u1', 'u2'],
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'create', 'token',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--description', 'a description',
|
|
+ '--groups', '["g:1", "g:2"]',
|
|
+ '--ttl', '1h1m1s',
|
|
+ '--usages', '["u1", "u2"]',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_create_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_create error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_create()
|
|
+
|
|
+ def test_token_delete(self):
|
|
+ '''
|
|
+ Test kuebadm.token_delete without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'deleted'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_delete('token')
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'delete', 'token']
|
|
+ )
|
|
+
|
|
+ def test_token_delete_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_delete with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'deleted'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_delete('token',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt')
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'delete', 'token',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_delete_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_delete error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_delete('token')
|
|
+
|
|
+ def test_token_generate(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_generate() == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'generate']
|
|
+ )
|
|
+
|
|
+ def test_token_generate_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'token'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_generate(kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'token'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'generate',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_generate_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_generate()
|
|
+
|
|
+ def test_token_list(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list without parameters
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_list() == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }]
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'list']
|
|
+ )
|
|
+
|
|
+ def test_token_list_multiple_lines(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list with multiple tokens
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4\na b c d e'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_list() == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }, {
|
|
+ 'h1': 'a', 'h2': 'b', 'h31 h32': 'c d', 'h4': 'e'
|
|
+ }]
|
|
+
|
|
+ def test_token_list_broken_lines(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list with multiple tokens, one broken
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4\na b c d e'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.token_list() == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }]
|
|
+
|
|
+ def test_token_list_params(self):
|
|
+ '''
|
|
+ Test kuebadm.token_list with parameters
|
|
+ '''
|
|
+ output = 'H1 H2 H31 H32 H4\n1 2 3.1 3.2 4'
|
|
+ result = {'retcode': 0, 'stdout': output}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ result = kubeadm.token_list(kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt')
|
|
+ assert result == [{
|
|
+ 'h1': '1', 'h2': '2', 'h31 h32': '3.1 3.2', 'h4': '4'
|
|
+ }]
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'token', 'list',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_token_list_error(self):
|
|
+ '''
|
|
+ Test kuebadm.token_generate error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.token_list()
|
|
+
|
|
+ def test_alpha_certs_renew(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_certs_renew without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_certs_renew() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'certs', 'renew']
|
|
+ )
|
|
+
|
|
+ def test_alpha_certs_renew_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_certs_renew with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_certs_renew(rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'certs', 'renew',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_certs_renew_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_certs_renew error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_certs_renew()
|
|
+
|
|
+ def test_alpha_kubeconfig_user(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubeconfig_user without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubeconfig_user('user') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubeconfig', 'user',
|
|
+ '--client-name', 'user']
|
|
+ )
|
|
+
|
|
+ def test_alpha_kubeconfig_user_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubeconfig_user with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubeconfig_user(
|
|
+ 'user',
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ cert_dir='/pki',
|
|
+ org='org',
|
|
+ token='token',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubeconfig', 'user',
|
|
+ '--client-name', 'user',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--org', 'org',
|
|
+ '--token', 'token',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_kubeconfig_user_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubeconfig_user error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_kubeconfig_user('user')
|
|
+
|
|
+ def test_alpha_kubelet_config_download(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_download without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubelet_config_download() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'download']
|
|
+ )
|
|
+
|
|
+ def test_alpha_kubelet_config_download_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_download with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubelet_config_download(
|
|
+ kubeconfig='/kube.cfg',
|
|
+ kubelet_version='version',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'download',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--kubelet-version', 'version',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_kubelet_config_download_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_download error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_kubelet_config_download()
|
|
+
|
|
+ def test_alpha_kubelet_config_enable_dynamic(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_enable_dynamic without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ result = kubeadm.alpha_kubelet_config_enable_dynamic('node-1')
|
|
+ assert result == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
|
|
+ '--node-name', 'node-1']
|
|
+ )
|
|
+
|
|
+ def test_alpha_kubelet_config_enable_dynamic_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_enable_dynamic with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_kubelet_config_enable_dynamic(
|
|
+ 'node-1',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ kubelet_version='version',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'kubelet', 'config', 'enable-dynamic',
|
|
+ '--node-name', 'node-1',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--kubelet-version', 'version',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_kubelet_config_enable_dynamic_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_kubelet_config_enable_dynamic error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_kubelet_config_enable_dynamic('node-1')
|
|
+
|
|
+ def test_alpha_selfhosting_pivot(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_selfhosting_pivot without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_selfhosting_pivot() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force']
|
|
+ )
|
|
+
|
|
+ def test_alpha_selfhosting_pivot_params(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_selfhosting_pivot with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.alpha_selfhosting_pivot(
|
|
+ cert_dir='/pki',
|
|
+ config='/kubeadm.cfg',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ store_certs_in_secrets=True,
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'alpha', 'selfhosting', 'pivot', '--force',
|
|
+ '--store-certs-in-secrets',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_alpha_selfhosting_pivot_error(self):
|
|
+ '''
|
|
+ Test kuebadm.alpha_selfhosting_pivot error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.alpha_selfhosting_pivot()
|
|
+
|
|
+ def test_config_images_list(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_list without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'image1\nimage2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_list() == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'list']
|
|
+ )
|
|
+
|
|
+ def test_config_images_list_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_list with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'image1\nimage2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_list(
|
|
+ config='/kubeadm.cfg',
|
|
+ feature_gates='k=v',
|
|
+ kubernetes_version='version',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'list',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_images_list_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_list error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_images_list()
|
|
+
|
|
+ def test_config_images_pull(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_pull without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0,
|
|
+ 'stdout': '[config/images] Pulled image1\n'
|
|
+ '[config/images] Pulled image2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_pull() == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'pull']
|
|
+ )
|
|
+
|
|
+ def test_config_images_pull_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_pull with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0,
|
|
+ 'stdout': '[config/images] Pulled image1\n'
|
|
+ '[config/images] Pulled image2\n'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_images_pull(
|
|
+ config='/kubeadm.cfg',
|
|
+ cri_socket='socket',
|
|
+ feature_gates='k=v',
|
|
+ kubernetes_version='version',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == ['image1', 'image2']
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'images', 'pull',
|
|
+ '--config', '/kubeadm.cfg',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_images_pull_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_images_pull error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_images_pull()
|
|
+
|
|
+ def test_config_migrate(self):
|
|
+ '''
|
|
+ Test kuebadm.config_migrate without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_migrate('/oldconfig.cfg') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'migrate',
|
|
+ '--old-config', '/oldconfig.cfg']
|
|
+ )
|
|
+
|
|
+ def test_config_migrate_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_migrate with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_migrate(
|
|
+ '/oldconfig.cfg',
|
|
+ new_config='/newconfig.cfg',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'migrate',
|
|
+ '--old-config', '/oldconfig.cfg',
|
|
+ '--new-config', '/newconfig.cfg',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_migrate_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_migrate error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_migrate('/oldconfig.cfg')
|
|
+
|
|
+ def test_config_print_init_defaults(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_init_defaults without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_init_defaults() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'init-defaults']
|
|
+ )
|
|
+
|
|
+ def test_config_print_init_defaults_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_init_defaults with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_init_defaults(
|
|
+ component_configs='component',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'init-defaults',
|
|
+ '--component-configs', 'component',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_print_init_defaults_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_init_defaults error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_print_init_defaults()
|
|
+
|
|
+ def test_config_print_join_defaults(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_join_defaults without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_join_defaults() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'join-defaults']
|
|
+ )
|
|
+
|
|
+ def test_config_print_join_defaults_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_join_defaults with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_print_join_defaults(
|
|
+ component_configs='component',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'print', 'join-defaults',
|
|
+ '--component-configs', 'component',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_print_join_defaults_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_print_join_defaults error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_print_join_defaults()
|
|
+
|
|
+ def test_config_upload_from_file(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_file without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_file('/config.cfg') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-file',
|
|
+ '--config', '/config.cfg']
|
|
+ )
|
|
+
|
|
+ def test_config_upload_from_file_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_file with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_file(
|
|
+ '/config.cfg',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-file',
|
|
+ '--config', '/config.cfg',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_upload_from_file_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_file error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_upload_from_file('/config.cfg')
|
|
+
|
|
+ def test_config_upload_from_flags(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_flags without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_flags() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-flags']
|
|
+ )
|
|
+
|
|
+ def test_config_upload_from_flags_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_flags with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_upload_from_flags(
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ apiserver_cert_extra_sans='sans',
|
|
+ cert_dir='/pki',
|
|
+ cri_socket='socket',
|
|
+ feature_gates='k=v',
|
|
+ kubernetes_version='version',
|
|
+ node_name='node-1',
|
|
+ pod_network_cidr='10.1.0.0/12',
|
|
+ service_cidr='10.2.0.0/12',
|
|
+ service_dns_domain='example.org',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'upload', 'from-flags',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--apiserver-cert-extra-sans', 'sans',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--node-name', 'node-1',
|
|
+ '--pod-network-cidr', '10.1.0.0/12',
|
|
+ '--service-cidr', '10.2.0.0/12',
|
|
+ '--service-dns-domain', 'example.org',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_upload_from_flags_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_upload_from_flags error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_upload_from_flags()
|
|
+
|
|
+ def test_config_view(self):
|
|
+ '''
|
|
+ Test kuebadm.config_view without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_view() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'view']
|
|
+ )
|
|
+
|
|
+ def test_config_view_params(self):
|
|
+ '''
|
|
+ Test kuebadm.config_view with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.config_view(
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'config', 'view',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_config_view_error(self):
|
|
+ '''
|
|
+ Test kuebadm.config_view error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.config_view()
|
|
+
|
|
+ def test_init(self):
|
|
+ '''
|
|
+ Test kuebadm.init without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.init() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'init']
|
|
+ )
|
|
+
|
|
+ def test_init_params(self):
|
|
+ '''
|
|
+ Test kuebadm.init with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.init(
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ apiserver_cert_extra_sans='sans',
|
|
+ cert_dir='/pki',
|
|
+ certificate_key='secret',
|
|
+ config='/config.cfg',
|
|
+ cri_socket='socket',
|
|
+ experimental_upload_certs=True,
|
|
+ feature_gates='k=v',
|
|
+ ignore_preflight_errors='all',
|
|
+ image_repository='example.org',
|
|
+ kubernetes_version='version',
|
|
+ node_name='node-1',
|
|
+ pod_network_cidr='10.1.0.0/12',
|
|
+ service_cidr='10.2.0.0/12',
|
|
+ service_dns_domain='example.org',
|
|
+ skip_certificate_key_print=True,
|
|
+ skip_phases='all',
|
|
+ skip_token_print=True,
|
|
+ token='token',
|
|
+ token_ttl='1h1m1s',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'init',
|
|
+ '--experimental-upload-certs',
|
|
+ '--skip-certificate-key-print',
|
|
+ '--skip-token-print',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--apiserver-cert-extra-sans', 'sans',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--certificate-key', 'secret',
|
|
+ '--config', '/config.cfg',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--feature-gates', 'k=v',
|
|
+ '--ignore-preflight-errors', 'all',
|
|
+ '--image-repository', 'example.org',
|
|
+ '--kubernetes-version', 'version',
|
|
+ '--node-name', 'node-1',
|
|
+ '--pod-network-cidr', '10.1.0.0/12',
|
|
+ '--service-cidr', '10.2.0.0/12',
|
|
+ '--service-dns-domain', 'example.org',
|
|
+ '--skip-phases', 'all',
|
|
+ '--token', 'token',
|
|
+ '--token-ttl', '1h1m1s',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_init_error(self):
|
|
+ '''
|
|
+ Test kuebadm.init error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.init()
|
|
+
|
|
+ def test_join(self):
|
|
+ '''
|
|
+ Test kuebadm.join without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.join() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'join']
|
|
+ )
|
|
+
|
|
+ def test_join_params(self):
|
|
+ '''
|
|
+ Test kuebadm.join with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.join(
|
|
+ api_server_endpoint='10.160.65.165:6443',
|
|
+ apiserver_advertise_address='127.0.0.1',
|
|
+ apiserver_bind_port='1234',
|
|
+ certificate_key='secret',
|
|
+ config='/config.cfg',
|
|
+ cri_socket='socket',
|
|
+ discovery_file='/discovery.cfg',
|
|
+ discovery_token='token',
|
|
+ discovery_token_ca_cert_hash='type:value',
|
|
+ discovery_token_unsafe_skip_ca_verification=True,
|
|
+ experimental_control_plane=True,
|
|
+ ignore_preflight_errors='all',
|
|
+ node_name='node-1',
|
|
+ skip_phases='all',
|
|
+ tls_bootstrap_token='token',
|
|
+ token='token',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'join',
|
|
+ '10.160.65.165:6443',
|
|
+ '--discovery-token-unsafe-skip-ca-verification',
|
|
+ '--experimental-control-plane',
|
|
+ '--apiserver-advertise-address', '127.0.0.1',
|
|
+ '--apiserver-bind-port', '1234',
|
|
+ '--certificate-key', 'secret',
|
|
+ '--config', '/config.cfg',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--discovery-file', '/discovery.cfg',
|
|
+ '--discovery-token', 'token',
|
|
+ '--discovery-token-ca-cert-hash', 'type:value',
|
|
+ '--ignore-preflight-errors', 'all',
|
|
+ '--node-name', 'node-1',
|
|
+ '--skip-phases', 'all',
|
|
+ '--tls-bootstrap-token', 'token',
|
|
+ '--token', 'token',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_join_error(self):
|
|
+ '''
|
|
+ Test kuebadm.join error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.join()
|
|
+
|
|
+ def test_reset(self):
|
|
+ '''
|
|
+ Test kuebadm.reset without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.reset() == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'reset', '--force']
|
|
+ )
|
|
+
|
|
+ def test_reset_params(self):
|
|
+ '''
|
|
+ Test kuebadm.reset with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ assert kubeadm.reset(
|
|
+ cert_dir='/pki',
|
|
+ cri_socket='socket',
|
|
+ ignore_preflight_errors='all',
|
|
+ kubeconfig='/kube.cfg',
|
|
+ rootfs='/mnt') == 'stdout'
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['kubeadm', 'reset', '--force',
|
|
+ '--cert-dir', '/pki',
|
|
+ '--cri-socket', 'socket',
|
|
+ '--ignore-preflight-errors', 'all',
|
|
+ '--kubeconfig', '/kube.cfg',
|
|
+ '--rootfs', '/mnt'])
|
|
+
|
|
+ def test_reset_error(self):
|
|
+ '''
|
|
+ Test kuebadm.reset error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(kubeadm.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert kubeadm.reset()
|
|
diff --git a/tests/unit/modules/test_rpm_lowpkg.py b/tests/unit/modules/test_rpm_lowpkg.py
|
|
index 527c8d3bf8..54b81f6972 100644
|
|
--- a/tests/unit/modules/test_rpm_lowpkg.py
|
|
+++ b/tests/unit/modules/test_rpm_lowpkg.py
|
|
@@ -25,7 +25,7 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
def setup_loader_modules(self):
|
|
return {rpm: {'rpm': MagicMock(return_value=MagicMock)}}
|
|
|
|
- # 'list_pkgs' function tests: 1
|
|
+ # 'list_pkgs' function tests: 2
|
|
|
|
def test_list_pkgs(self):
|
|
'''
|
|
@@ -34,13 +34,24 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
mock = MagicMock(return_value='')
|
|
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
self.assertDictEqual(rpm.list_pkgs(), {})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
|
|
- # 'verify' function tests: 1
|
|
+ def test_list_pkgs_root(self):
|
|
+ '''
|
|
+ Test if it list the packages currently installed in a dict,
|
|
+ called with root parameter
|
|
+ '''
|
|
+ mock = MagicMock(return_value='')
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
+ rpm.list_pkgs(root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
+
|
|
+ # 'verify' function tests: 2
|
|
|
|
def test_verify(self):
|
|
'''
|
|
- Test if it runs an rpm -Va on a system,
|
|
- and returns the results in a dict
|
|
+ Test if it runs an rpm -Va on a system, and returns the
|
|
+ results in a dict
|
|
'''
|
|
mock = MagicMock(return_value={'stdout': '',
|
|
'stderr': '',
|
|
@@ -48,8 +59,22 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
'pid': 12345})
|
|
with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
|
|
self.assertDictEqual(rpm.verify('httpd'), {})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
|
|
- # 'file_list' function tests: 1
|
|
+ def test_verify_root(self):
|
|
+ '''
|
|
+ Test if it runs an rpm -Va on a system, and returns the
|
|
+ results in a dict, called with root parameter
|
|
+ '''
|
|
+ mock = MagicMock(return_value={'stdout': '',
|
|
+ 'stderr': '',
|
|
+ 'retcode': 0,
|
|
+ 'pid': 12345})
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
|
|
+ rpm.verify('httpd', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
+
|
|
+ # 'file_list' function tests: 2
|
|
|
|
def test_file_list(self):
|
|
'''
|
|
@@ -59,8 +84,20 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
self.assertDictEqual(rpm.file_list('httpd'),
|
|
{'errors': [], 'files': []})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_file_list_root(self):
|
|
+ '''
|
|
+ Test if it list the files that belong to a package, using the
|
|
+ root parameter.
|
|
+ '''
|
|
+
|
|
+ mock = MagicMock(return_value='')
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
+ rpm.file_list('httpd', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
- # 'file_dict' function tests: 1
|
|
+ # 'file_dict' function tests: 2
|
|
|
|
def test_file_dict(self):
|
|
'''
|
|
@@ -70,6 +107,16 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
self.assertDictEqual(rpm.file_dict('httpd'),
|
|
{'errors': [], 'packages': {}})
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_file_dict_root(self):
|
|
+ '''
|
|
+ Test if it list the files that belong to a package
|
|
+ '''
|
|
+ mock = MagicMock(return_value='')
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run': mock}):
|
|
+ rpm.file_dict('httpd', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
# 'owner' function tests: 1
|
|
|
|
@@ -83,6 +130,7 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
mock = MagicMock(return_value=ret)
|
|
with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
|
|
self.assertEqual(rpm.owner('/usr/bin/salt-jenkins-build'), '')
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
|
|
ret = {'/usr/bin/vim': 'vim-enhanced-7.4.160-1.e17.x86_64',
|
|
'/usr/bin/python': 'python-2.7.5-16.e17.x86_64'}
|
|
@@ -91,8 +139,22 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
|
|
self.assertDictEqual(rpm.owner('/usr/bin/python', '/usr/bin/vim'),
|
|
ret)
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_owner_root(self):
|
|
+ '''
|
|
+ Test if it return the name of the package that owns the file,
|
|
+ using the parameter root.
|
|
+ '''
|
|
+ self.assertEqual(rpm.owner(), '')
|
|
+
|
|
+ ret = 'file /usr/bin/salt-jenkins-build is not owned by any package'
|
|
+ mock = MagicMock(return_value=ret)
|
|
+ with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
|
|
+ rpm.owner('/usr/bin/salt-jenkins-build', root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
- # 'checksum' function tests: 1
|
|
+ # 'checksum' function tests: 2
|
|
|
|
def test_checksum(self):
|
|
'''
|
|
@@ -107,6 +169,17 @@ class RpmTestCase(TestCase, LoaderModuleMockMixin):
|
|
mock = MagicMock(side_effect=[True, 0, True, 1, False, 0])
|
|
with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
|
|
self.assertDictEqual(rpm.checksum("file1.rpm", "file2.rpm", "file3.rpm"), ret)
|
|
+ self.assertFalse(_called_with_root(mock))
|
|
+
|
|
+ def test_checksum_root(self):
|
|
+ '''
|
|
+ Test if checksum validate as expected, using the parameter
|
|
+ root
|
|
+ '''
|
|
+ mock = MagicMock(side_effect=[True, 0])
|
|
+ with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
|
|
+ rpm.checksum("file1.rpm", root='/')
|
|
+ self.assertTrue(_called_with_root(mock))
|
|
|
|
def test_version_cmp_rpm(self):
|
|
'''
|
|
diff --git a/tests/unit/modules/test_systemd_service.py b/tests/unit/modules/test_systemd_service.py
|
|
index 13ddc394be..752fb1d659 100644
|
|
--- a/tests/unit/modules/test_systemd_service.py
|
|
+++ b/tests/unit/modules/test_systemd_service.py
|
|
@@ -7,6 +7,8 @@
|
|
from __future__ import absolute_import, unicode_literals, print_function
|
|
import os
|
|
|
|
+import pytest
|
|
+
|
|
# Import Salt Testing Libs
|
|
from tests.support.mixins import LoaderModuleMockMixin
|
|
from tests.support.unit import TestCase
|
|
@@ -643,3 +645,54 @@ class SystemdScopeTestCase(TestCase, LoaderModuleMockMixin):
|
|
|
|
def test_unmask_runtime(self):
|
|
self._mask_unmask('unmask_', True)
|
|
+
|
|
+ def test_firstboot(self):
|
|
+ '''
|
|
+ Test service.firstboot without parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(systemd.__salt__, salt_mock):
|
|
+ assert systemd.firstboot()
|
|
+ salt_mock['cmd.run_all'].assert_called_with(['systemd-firstboot'])
|
|
+
|
|
+ def test_firstboot_params(self):
|
|
+ '''
|
|
+ Test service.firstboot with parameters
|
|
+ '''
|
|
+ result = {'retcode': 0, 'stdout': 'stdout'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(systemd.__salt__, salt_mock):
|
|
+ assert systemd.firstboot(
|
|
+ locale='en_US.UTF-8',
|
|
+ locale_message='en_US.UTF-8',
|
|
+ keymap='jp',
|
|
+ timezone='Europe/Berlin',
|
|
+ hostname='node-001',
|
|
+ machine_id='1234567890abcdef',
|
|
+ root='/mnt')
|
|
+ salt_mock['cmd.run_all'].assert_called_with(
|
|
+ ['systemd-firstboot',
|
|
+ '--locale', 'en_US.UTF-8',
|
|
+ '--locale-message', 'en_US.UTF-8',
|
|
+ '--keymap', 'jp',
|
|
+ '--timezone', 'Europe/Berlin',
|
|
+ '--hostname', 'node-001',
|
|
+ '--machine-ID', '1234567890abcdef',
|
|
+ '--root', '/mnt'])
|
|
+
|
|
+ def test_firstboot_error(self):
|
|
+ '''
|
|
+ Test service.firstboot error
|
|
+ '''
|
|
+ result = {'retcode': 1, 'stderr': 'error'}
|
|
+ salt_mock = {
|
|
+ 'cmd.run_all': MagicMock(return_value=result),
|
|
+ }
|
|
+ with patch.dict(systemd.__salt__, salt_mock):
|
|
+ with pytest.raises(CommandExecutionError):
|
|
+ assert systemd.firstboot()
|
|
diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py
|
|
index 956902eab3..3a6466f061 100644
|
|
--- a/tests/unit/modules/test_zypperpkg.py
|
|
+++ b/tests/unit/modules/test_zypperpkg.py
|
|
@@ -38,6 +38,9 @@ class ZyppCallMock(object):
|
|
return self
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
+ # If the call is for a configuration modifier, we return self
|
|
+ if any(i in kwargs for i in ('no_repo_failure', 'systemd_scope', 'root')):
|
|
+ return self
|
|
return MagicMock(return_value=self.__return_value)()
|
|
|
|
|
|
@@ -926,7 +929,7 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
'pico': '0.1.1',
|
|
}
|
|
|
|
- def __call__(self):
|
|
+ def __call__(self, root=None, includes=None):
|
|
pkgs = self._pkgs.copy()
|
|
for target in self._packages:
|
|
if self._pkgs.get(target):
|
|
@@ -992,10 +995,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('ar', url, name)]
|
|
)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_noadd_nomod_noref(self):
|
|
'''
|
|
@@ -1017,8 +1020,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
self.assertEqual(
|
|
out['comment'],
|
|
'Specified arguments did not result in modification of repo')
|
|
- self.assertTrue(zypper.__zypper__.xml.call.call_count == 0)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_noadd_modbaseurl_ref(self):
|
|
'''
|
|
@@ -1046,9 +1049,11 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
'priority': 1,
|
|
'cache': False,
|
|
'keeppackages': False,
|
|
- 'type': 'rpm-md'}
|
|
- self.assertTrue(zypper.mod_repo.call_count == 2)
|
|
- self.assertTrue(zypper.mod_repo.mock_calls[1] == call(name, **expected_params))
|
|
+ 'type': 'rpm-md',
|
|
+ 'root': None,
|
|
+ }
|
|
+ self.assertEqual(zypper.mod_repo.call_count, 2)
|
|
+ self.assertEqual(zypper.mod_repo.mock_calls[1], call(name, **expected_params))
|
|
|
|
def test_repo_add_mod_noref(self):
|
|
'''
|
|
@@ -1064,10 +1069,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'refresh': True})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('ar', url, name)]
|
|
)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1086,8 +1091,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
'salt.modules.zypperpkg', **self.zypper_patcher_config)
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'refresh': True})
|
|
- self.assertTrue(zypper.__zypper__.xml.call.call_count == 0)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ self.assertTrue(zypper.__zypper__(root=None).xml.call.call_count == 0)
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1106,13 +1111,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[
|
|
call('ar', url, name),
|
|
call('--gpg-auto-import-keys', 'refresh', name)
|
|
]
|
|
)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_noadd_nomod_ref(self):
|
|
'''
|
|
@@ -1133,10 +1138,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with zypper_patcher:
|
|
zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('--gpg-auto-import-keys', 'refresh', name)]
|
|
)
|
|
- self.assertTrue(zypper.__zypper__.refreshable.xml.call.call_count == 0)
|
|
+ self.assertTrue(zypper.__zypper__(root=None).refreshable.xml.call.call_count == 0)
|
|
|
|
def test_repo_add_mod_ref(self):
|
|
'''
|
|
@@ -1157,13 +1162,13 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
**{'url': url, 'refresh': True, 'gpgautoimport': True}
|
|
)
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[
|
|
call('ar', url, name),
|
|
call('--gpg-auto-import-keys', 'refresh', name)
|
|
]
|
|
)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'--gpg-auto-import-keys', 'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1189,10 +1194,10 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
**{'url': url, 'refresh': True, 'gpgautoimport': True}
|
|
)
|
|
self.assertEqual(
|
|
- zypper.__zypper__.xml.call.call_args_list,
|
|
+ zypper.__zypper__(root=None).xml.call.call_args_list,
|
|
[call('--gpg-auto-import-keys', 'refresh', name)]
|
|
)
|
|
- zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
|
|
+ zypper.__zypper__(root=None).refreshable.xml.call.assert_called_once_with(
|
|
'--gpg-auto-import-keys', 'mr', '--refresh', name
|
|
)
|
|
|
|
@@ -1369,3 +1374,58 @@ Repository 'DUMMY' not found by its alias, number, or URI.
|
|
with self.assertRaises(CommandExecutionError):
|
|
for op in ['>>', '==', '<<', '+']:
|
|
zypper.Wildcard(_zpr)('libzypp', '{0}*.1'.format(op))
|
|
+
|
|
+ @patch('salt.modules.zypperpkg._get_visible_patterns')
|
|
+ def test__get_installed_patterns(self, get_visible_patterns):
|
|
+ '''Test installed patterns in the system'''
|
|
+ get_visible_patterns.return_value = {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-b': {
|
|
+ 'installed': False,
|
|
+ 'summary': 'description b',
|
|
+ },
|
|
+ }
|
|
+
|
|
+ salt_mock = {
|
|
+ 'cmd.run': MagicMock(return_value='''pattern() = package-a
|
|
+pattern-visible()
|
|
+pattern() = package-c'''),
|
|
+ }
|
|
+ with patch.dict('salt.modules.zypperpkg.__salt__', salt_mock):
|
|
+ assert zypper._get_installed_patterns() == {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-c': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'Non-visible pattern',
|
|
+ },
|
|
+ }
|
|
+
|
|
+ @patch('salt.modules.zypperpkg._get_visible_patterns')
|
|
+ def test_list_patterns(self, get_visible_patterns):
|
|
+ '''Test available patterns in the repo'''
|
|
+ get_visible_patterns.return_value = {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-b': {
|
|
+ 'installed': False,
|
|
+ 'summary': 'description b',
|
|
+ },
|
|
+ }
|
|
+ assert zypper.list_patterns() == {
|
|
+ 'package-a': {
|
|
+ 'installed': True,
|
|
+ 'summary': 'description a',
|
|
+ },
|
|
+ 'package-b': {
|
|
+ 'installed': False,
|
|
+ 'summary': 'description b',
|
|
+ },
|
|
+ }
|
|
diff --git a/tests/unit/states/test_btrfs.py b/tests/unit/states/test_btrfs.py
|
|
new file mode 100644
|
|
index 0000000000..3f45ed94f9
|
|
--- /dev/null
|
|
+++ b/tests/unit/states/test_btrfs.py
|
|
@@ -0,0 +1,782 @@
|
|
+# -*- coding: utf-8 -*-
|
|
+#
|
|
+# Author: Alberto Planas <aplanas@suse.com>
|
|
+#
|
|
+# Copyright 2018 SUSE LINUX GmbH, Nuernberg, Germany.
|
|
+#
|
|
+# Licensed to the Apache Software Foundation (ASF) under one
|
|
+# or more contributor license agreements. See the NOTICE file
|
|
+# distributed with this work for additional information
|
|
+# regarding copyright ownership. The ASF licenses this file
|
|
+# to you under the Apache License, Version 2.0 (the
|
|
+# "License"); you may not use this file except in compliance
|
|
+# with the License. You may obtain a copy of the License at
|
|
+#
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
+#
|
|
+# Unless required by applicable law or agreed to in writing,
|
|
+# software distributed under the License is distributed on an
|
|
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
+# KIND, either express or implied. See the License for the
|
|
+# specific language governing permissions and limitations
|
|
+# under the License.
|
|
+
|
|
+'''
|
|
+:maintainer: Alberto Planas <aplanas@suse.com>
|
|
+:platform: Linux
|
|
+'''
|
|
+# Import Python Libs
|
|
+from __future__ import absolute_import, print_function, unicode_literals
|
|
+# Import Salt Testing Libs
|
|
+from tests.support.mixins import LoaderModuleMockMixin
|
|
+from tests.support.unit import skipIf, TestCase
|
|
+from tests.support.mock import (
|
|
+ MagicMock,
|
|
+ NO_MOCK,
|
|
+ NO_MOCK_REASON,
|
|
+ patch,
|
|
+)
|
|
+
|
|
+from salt.exceptions import CommandExecutionError
|
|
+import salt.states.btrfs as btrfs
|
|
+
|
|
+import pytest
|
|
+
|
|
+
|
|
+@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
+class BtrfsTestCase(TestCase, LoaderModuleMockMixin):
|
|
+ '''
|
|
+ Test cases for salt.states.btrfs
|
|
+ '''
|
|
+
|
|
+ def setup_loader_modules(self):
|
|
+ return {
|
|
+ btrfs: {
|
|
+ '__salt__': {},
|
|
+ '__states__': {},
|
|
+ '__utils__': {},
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test__mount_fails(self, mkdtemp, umount):
|
|
+ '''
|
|
+ Test mounting a device in a temporary place.
|
|
+ '''
|
|
+ mkdtemp.return_value = '/tmp/xxx'
|
|
+ states_mock = {
|
|
+ 'mount.mounted': MagicMock(return_value={'result': False}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock):
|
|
+ assert btrfs._mount('/dev/sda1', use_default=False) is None
|
|
+ mkdtemp.assert_called_once()
|
|
+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
|
|
+ device='/dev/sda1',
|
|
+ fstype='btrfs',
|
|
+ opts='subvol=/',
|
|
+ persist=False)
|
|
+ umount.assert_called_with('/tmp/xxx')
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test__mount(self, mkdtemp, umount):
|
|
+ '''
|
|
+ Test mounting a device in a temporary place.
|
|
+ '''
|
|
+ mkdtemp.return_value = '/tmp/xxx'
|
|
+ states_mock = {
|
|
+ 'mount.mounted': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock):
|
|
+ assert btrfs._mount('/dev/sda1', use_default=False) == '/tmp/xxx'
|
|
+ mkdtemp.assert_called_once()
|
|
+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
|
|
+ device='/dev/sda1',
|
|
+ fstype='btrfs',
|
|
+ opts='subvol=/',
|
|
+ persist=False)
|
|
+ umount.assert_not_called()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('tempfile.mkdtemp')
|
|
+ def test__mount_use_default(self, mkdtemp, umount):
|
|
+ '''
|
|
+ Test mounting a device in a temporary place.
|
|
+ '''
|
|
+ mkdtemp.return_value = '/tmp/xxx'
|
|
+ states_mock = {
|
|
+ 'mount.mounted': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock):
|
|
+ assert btrfs._mount('/dev/sda1', use_default=True) == '/tmp/xxx'
|
|
+ mkdtemp.assert_called_once()
|
|
+ states_mock['mount.mounted'].assert_called_with('/tmp/xxx',
|
|
+ device='/dev/sda1',
|
|
+ fstype='btrfs',
|
|
+ opts='defaults',
|
|
+ persist=False)
|
|
+ umount.assert_not_called()
|
|
+
|
|
+ def test__umount(self):
|
|
+ '''
|
|
+ Test umounting and cleanning temporary place.
|
|
+ '''
|
|
+ states_mock = {
|
|
+ 'mount.unmounted': MagicMock(),
|
|
+ }
|
|
+ utils_mock = {
|
|
+ 'files.rm_rf': MagicMock(),
|
|
+ }
|
|
+ with patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__utils__, utils_mock):
|
|
+ btrfs._umount('/tmp/xxx')
|
|
+ states_mock['mount.unmounted'].assert_called_with('/tmp/xxx')
|
|
+ utils_mock['files.rm_rf'].assert_called_with('/tmp/xxx')
|
|
+
|
|
+ def test__is_default_not_default(self):
|
|
+ '''
|
|
+ Test if the subvolume is the current default.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_show': MagicMock(return_value={
|
|
+ '@/var': {'subvolume id': '256'},
|
|
+ }),
|
|
+ 'btrfs.subvolume_get_default': MagicMock(return_value={
|
|
+ 'id': '5',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert not btrfs._is_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
|
|
+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_get_default'].assert_called_with('/tmp/xxx')
|
|
+
|
|
+ def test__is_default(self):
|
|
+ '''
|
|
+ Test if the subvolume is the current default.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_show': MagicMock(return_value={
|
|
+ '@/var': {'subvolume id': '256'},
|
|
+ }),
|
|
+ 'btrfs.subvolume_get_default': MagicMock(return_value={
|
|
+ 'id': '256',
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._is_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
|
|
+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_get_default'].assert_called_with('/tmp/xxx')
|
|
+
|
|
+ def test__set_default(self):
|
|
+ '''
|
|
+ Test setting a subvolume as the current default.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_show': MagicMock(return_value={
|
|
+ '@/var': {'subvolume id': '256'},
|
|
+ }),
|
|
+ 'btrfs.subvolume_set_default': MagicMock(return_value=True),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._set_default('/tmp/xxx/@/var', '/tmp/xxx', '@/var')
|
|
+ salt_mock['btrfs.subvolume_show'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_set_default'].assert_called_with('256', '/tmp/xxx')
|
|
+
|
|
+ def test__is_cow_not_cow(self):
|
|
+ '''
|
|
+ Test if the subvolume is copy on write.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'file.lsattr': MagicMock(return_value={
|
|
+ '/tmp/xxx/@/var': ['C'],
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert not btrfs._is_cow('/tmp/xxx/@/var')
|
|
+ salt_mock['file.lsattr'].assert_called_with('/tmp/xxx/@')
|
|
+
|
|
+ def test__is_cow(self):
|
|
+ '''
|
|
+ Test if the subvolume is copy on write.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'file.lsattr': MagicMock(return_value={
|
|
+ '/tmp/xxx/@/var': [],
|
|
+ }),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._is_cow('/tmp/xxx/@/var')
|
|
+ salt_mock['file.lsattr'].assert_called_with('/tmp/xxx/@')
|
|
+
|
|
+ def test__unset_cow(self):
|
|
+ '''
|
|
+ Test disabling the subvolume as copy on write.
|
|
+ '''
|
|
+ salt_mock = {
|
|
+ 'file.chattr': MagicMock(return_value=True),
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock):
|
|
+ assert btrfs._unset_cow('/tmp/xxx/@/var')
|
|
+ salt_mock['file.chattr'].assert_called_with('/tmp/xxx/@/var',
|
|
+ operator='add',
|
|
+ attributes='C')
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_test(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': True,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': None,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._is_default')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_was_default(self, mount, umount,
|
|
+ is_default):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_default.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ set_default=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._set_default')
|
|
+ @patch('salt.states.btrfs._is_default')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_set_default(self, mount, umount,
|
|
+ is_default, set_default):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_default.return_value = False
|
|
+ set_default.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ set_default=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {
|
|
+ '@/var_default': True
|
|
+ },
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._set_default')
|
|
+ @patch('salt.states.btrfs._is_default')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_set_default_no_force(self,
|
|
+ mount,
|
|
+ umount,
|
|
+ is_default,
|
|
+ set_default):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_default.return_value = False
|
|
+ set_default.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ set_default=True,
|
|
+ force_set_default=False) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._is_cow')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_no_cow(self, mount, umount, is_cow):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_cow.return_value = False
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ copy_on_write=False) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._unset_cow')
|
|
+ @patch('salt.states.btrfs._is_cow')
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_exists_unset_cow(self, mount, umount,
|
|
+ is_cow, unset_cow):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ is_cow.return_value = True
|
|
+ unset_cow.return_value = True
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=True),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ copy_on_write=False) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {
|
|
+ '@/var_no_cow': True
|
|
+ },
|
|
+ 'comment': ['Subvolume @/var already present'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
|
|
+ 'btrfs.subvolume_create': MagicMock(),
|
|
+ }
|
|
+ states_mock = {
|
|
+ 'file.directory': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {
|
|
+ '@/var': 'Created subvolume @/var'
|
|
+ },
|
|
+ 'comment': [],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_create'].assert_called_once()
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_fails_directory(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
|
|
+ }
|
|
+ states_mock = {
|
|
+ 'file.directory': MagicMock(return_value={'result': False}),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Error creating /tmp/xxx/@ directory'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ def test_subvolume_created_fails(self, mount, umount):
|
|
+ '''
|
|
+ Test creating a subvolume.
|
|
+ '''
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.subvolume_exists': MagicMock(return_value=False),
|
|
+ 'btrfs.subvolume_create': MagicMock(side_effect=CommandExecutionError),
|
|
+ }
|
|
+ states_mock = {
|
|
+ 'file.directory': MagicMock(return_value={'result': True}),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__states__, states_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.subvolume_created(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Error creating subvolume @/var'],
|
|
+ }
|
|
+ salt_mock['btrfs.subvolume_exists'].assert_called_with('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.subvolume_create'].assert_called_once()
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ def test_diff_properties_fails(self):
|
|
+ '''
|
|
+ Test when diff_properties do not found a property
|
|
+ '''
|
|
+ expected = {
|
|
+ 'wrong_property': True
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ with pytest.raises(Exception):
|
|
+ btrfs._diff_properties(expected, current)
|
|
+
|
|
+ def test_diff_properties_enable_ro(self):
|
|
+ '''
|
|
+ Test when diff_properties enable one single property
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': True
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {'ro': True}
|
|
+
|
|
+ def test_diff_properties_only_enable_ro(self):
|
|
+ '''
|
|
+ Test when diff_properties is half ready
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': True,
|
|
+ 'label': 'mylabel'
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'mylabel',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {'ro': True}
|
|
+
|
|
+ def test_diff_properties_disable_ro(self):
|
|
+ '''
|
|
+ Test when diff_properties enable one single property
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': False
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': True,
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {'ro': False}
|
|
+
|
|
+ def test_diff_properties_emty_na(self):
|
|
+ '''
|
|
+ Test when diff_properties is already disabled as N/A
|
|
+ '''
|
|
+ expected = {
|
|
+ 'ro': False
|
|
+ }
|
|
+ current = {
|
|
+ 'compression': {
|
|
+ 'description': 'Set/get compression for a file or directory',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'label': {
|
|
+ 'description': 'Set/get label of device.',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ assert btrfs._diff_properties(expected, current) == {}
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_subvolume_not_exists(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test when subvolume is not present
|
|
+ '''
|
|
+ exists.return_value = False
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ assert btrfs.properties(name='@/var',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Object @/var not found'],
|
|
+ }
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_default_root_subvolume(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test when root subvolume resolves to another subvolume
|
|
+ '''
|
|
+ exists.return_value = False
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ assert btrfs.properties(name='/',
|
|
+ device='/dev/sda1') == {
|
|
+ 'name': '/',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Object / not found'],
|
|
+ }
|
|
+ exists.assert_called_with('/tmp/xxx/.')
|
|
+
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_device_fail(self, exists):
|
|
+ '''
|
|
+ Test when we try to set a device that is not pressent
|
|
+ '''
|
|
+ exists.return_value = False
|
|
+ assert btrfs.properties(name='/dev/sda1',
|
|
+ device=None) == {
|
|
+ 'name': '/dev/sda1',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Object /dev/sda1 not found'],
|
|
+ }
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_subvolume_fail(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test setting a wrong property in a subvolume
|
|
+ '''
|
|
+ exists.return_value = True
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.properties': MagicMock(side_effect=[
|
|
+ {
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ }
|
|
+ ]),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.properties(name='@/var',
|
|
+ device='/dev/sda1',
|
|
+ wrond_property=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': False,
|
|
+ 'changes': {},
|
|
+ 'comment': ['Some property not found in @/var'],
|
|
+ }
|
|
+ salt_mock['btrfs.properties'].assert_called_with('/tmp/xxx/@/var')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
+
|
|
+ @patch('salt.states.btrfs._umount')
|
|
+ @patch('salt.states.btrfs._mount')
|
|
+ @patch('os.path.exists')
|
|
+ def test_properties_enable_ro_subvolume(self, exists, mount, umount):
|
|
+ '''
|
|
+ Test setting a ro property in a subvolume
|
|
+ '''
|
|
+ exists.return_value = True
|
|
+ mount.return_value = '/tmp/xxx'
|
|
+ salt_mock = {
|
|
+ 'btrfs.properties': MagicMock(side_effect=[
|
|
+ {
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'N/A',
|
|
+ },
|
|
+ },
|
|
+ None,
|
|
+ {
|
|
+ 'ro': {
|
|
+ 'description': 'Set/get read-only flag or subvolume',
|
|
+ 'value': 'true',
|
|
+ },
|
|
+ }
|
|
+ ]),
|
|
+ }
|
|
+ opts_mock = {
|
|
+ 'test': False,
|
|
+ }
|
|
+ with patch.dict(btrfs.__salt__, salt_mock), \
|
|
+ patch.dict(btrfs.__opts__, opts_mock):
|
|
+ assert btrfs.properties(name='@/var',
|
|
+ device='/dev/sda1', ro=True) == {
|
|
+ 'name': '@/var',
|
|
+ 'result': True,
|
|
+ 'changes': {'ro': 'true'},
|
|
+ 'comment': ['Properties changed in @/var'],
|
|
+ }
|
|
+ salt_mock['btrfs.properties'].assert_any_call('/tmp/xxx/@/var')
|
|
+ salt_mock['btrfs.properties'].assert_any_call('/tmp/xxx/@/var',
|
|
+ set='ro=true')
|
|
+ mount.assert_called_once()
|
|
+ umount.assert_called_once()
|
|
diff --git a/tests/unit/states/test_pkg.py b/tests/unit/states/test_pkg.py
|
|
index 174ab65ab8..38f72353fa 100644
|
|
--- a/tests/unit/states/test_pkg.py
|
|
+++ b/tests/unit/states/test_pkg.py
|
|
@@ -43,7 +43,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
|
|
})
|
|
upgrade = MagicMock(return_value=self.pkgs)
|
|
- version = MagicMock(side_effect=lambda pkgname: self.pkgs[pkgname]['old'])
|
|
+ version = MagicMock(side_effect=lambda pkgname, **_: self.pkgs[pkgname]['old'])
|
|
|
|
with patch.dict(pkg.__salt__,
|
|
{'pkg.list_upgrades': list_upgrades,
|
|
@@ -52,7 +52,6 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
|
|
# Run state with test=false
|
|
with patch.dict(pkg.__opts__, {'test': False}):
|
|
-
|
|
ret = pkg.uptodate('dummy', test=True)
|
|
self.assertTrue(ret['result'])
|
|
self.assertDictEqual(ret['changes'], self.pkgs)
|
|
@@ -78,7 +77,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
|
|
})
|
|
upgrade = MagicMock(return_value=self.pkgs)
|
|
- version = MagicMock(side_effect=lambda pkgname: pkgs[pkgname]['old'])
|
|
+ version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]['old'])
|
|
|
|
with patch.dict(pkg.__salt__,
|
|
{'pkg.list_upgrades': list_upgrades,
|
|
@@ -157,7 +156,7 @@ class PkgTestCase(TestCase, LoaderModuleMockMixin):
|
|
pkgname: pkgver['new'] for pkgname, pkgver in six.iteritems(self.pkgs)
|
|
})
|
|
upgrade = MagicMock(return_value={})
|
|
- version = MagicMock(side_effect=lambda pkgname: pkgs[pkgname]['old'])
|
|
+ version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]['old'])
|
|
|
|
with patch.dict(pkg.__salt__,
|
|
{'pkg.list_upgrades': list_upgrades,
|
|
diff --git a/tests/unit/test_loader.py b/tests/unit/test_loader.py
|
|
index c0877ff811..fe11cd0681 100644
|
|
--- a/tests/unit/test_loader.py
|
|
+++ b/tests/unit/test_loader.py
|
|
@@ -128,6 +128,97 @@ class LazyLoaderTest(TestCase):
|
|
self.assertTrue(self.module_name + '.not_loaded' not in self.loader)
|
|
|
|
|
|
+loader_template_module = '''
|
|
+import my_utils
|
|
+
|
|
+def run():
|
|
+ return my_utils.run()
|
|
+'''
|
|
+
|
|
+loader_template_utils = '''
|
|
+def run():
|
|
+ return True
|
|
+'''
|
|
+
|
|
+
|
|
+class LazyLoaderUtilsTest(TestCase):
|
|
+ '''
|
|
+ Test the loader
|
|
+ '''
|
|
+ module_name = 'lazyloaderutilstest'
|
|
+ utils_name = 'my_utils'
|
|
+
|
|
+ @classmethod
|
|
+ def setUpClass(cls):
|
|
+ cls.opts = salt.config.minion_config(None)
|
|
+ cls.opts['grains'] = salt.loader.grains(cls.opts)
|
|
+ if not os.path.isdir(TMP):
|
|
+ os.makedirs(TMP)
|
|
+
|
|
+ def setUp(self):
|
|
+ # Setup the module
|
|
+ self.module_dir = tempfile.mkdtemp(dir=TMP)
|
|
+ self.module_file = os.path.join(self.module_dir,
|
|
+ '{}.py'.format(self.module_name))
|
|
+ with salt.utils.files.fopen(self.module_file, 'w') as fh:
|
|
+ fh.write(salt.utils.stringutils.to_str(loader_template_module))
|
|
+ fh.flush()
|
|
+ os.fsync(fh.fileno())
|
|
+
|
|
+ self.utils_dir = tempfile.mkdtemp(dir=TMP)
|
|
+ self.utils_file = os.path.join(self.utils_dir,
|
|
+ '{}.py'.format(self.utils_name))
|
|
+ with salt.utils.files.fopen(self.utils_file, 'w') as fh:
|
|
+ fh.write(salt.utils.stringutils.to_str(loader_template_utils))
|
|
+ fh.flush()
|
|
+ os.fsync(fh.fileno())
|
|
+
|
|
+ def tearDown(self):
|
|
+ shutil.rmtree(self.module_dir)
|
|
+ if os.path.isdir(self.module_dir):
|
|
+ shutil.rmtree(self.module_dir)
|
|
+ shutil.rmtree(self.utils_dir)
|
|
+ if os.path.isdir(self.utils_dir):
|
|
+ shutil.rmtree(self.utils_dir)
|
|
+ del self.module_dir
|
|
+ del self.module_file
|
|
+ del self.utils_dir
|
|
+ del self.utils_file
|
|
+
|
|
+ if self.module_name in sys.modules:
|
|
+ del sys.modules[self.module_name]
|
|
+ if self.utils_name in sys.modules:
|
|
+ del sys.modules[self.utils_name]
|
|
+
|
|
+ @classmethod
|
|
+ def tearDownClass(cls):
|
|
+ del cls.opts
|
|
+
|
|
+ def test_utils_found(self):
|
|
+ '''
|
|
+ Test that the extra module directory is available for imports
|
|
+ '''
|
|
+ loader = salt.loader.LazyLoader(
|
|
+ [self.module_dir],
|
|
+ copy.deepcopy(self.opts),
|
|
+ tag='module',
|
|
+ extra_module_dirs=[self.utils_dir])
|
|
+ self.assertTrue(
|
|
+ inspect.isfunction(
|
|
+ loader[self.module_name + '.run']))
|
|
+ self.assertTrue(loader[self.module_name + '.run']())
|
|
+
|
|
+ def test_utils_not_found(self):
|
|
+ '''
|
|
+ Test that the extra module directory is not available for imports
|
|
+ '''
|
|
+ loader = salt.loader.LazyLoader(
|
|
+ [self.module_dir],
|
|
+ copy.deepcopy(self.opts),
|
|
+ tag='module')
|
|
+ self.assertTrue(self.module_name + '.run' not in loader)
|
|
+
|
|
+
|
|
class LazyLoaderVirtualEnabledTest(TestCase):
|
|
'''
|
|
Test the base loader of salt.
|
|
@@ -1078,8 +1169,9 @@ class LoaderGlobalsTest(ModuleCase):
|
|
|
|
# Now, test each module!
|
|
for item in global_vars:
|
|
- for name in names:
|
|
- self.assertIn(name, list(item.keys()))
|
|
+ if item['__name__'].startswith('salt.loaded'):
|
|
+ for name in names:
|
|
+ self.assertIn(name, list(item.keys()))
|
|
|
|
def test_auth(self):
|
|
'''
|
|
--
|
|
2.16.4
|
|
|
|
|