Accepting request 900349 from Base:System

OBS-URL: https://build.opensuse.org/request/show/900349
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/lvm2?expand=0&rev=147
This commit is contained in:
Dominique Leuenberger 2021-06-19 21:01:46 +00:00 committed by Git OBS Bridge
commit d176024635
56 changed files with 8265 additions and 1505 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,708 @@
From d02f5392a0ed8a5f6b7aa14642db1f46ac0682d8 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Fri, 7 May 2021 10:25:13 +0800
Subject: [PATCH 02/33] lvmlockd: idm: Hook Seagate IDM wrapper APIs
To allow the IDM locking scheme be used by users, this patch hooks the
IDM wrapper; it also introducs a new locking type "idm" and we can use
it for global lock with option '-g idm'.
To support IDM locking type, the main change in the data structure is to
add pvs path arrary. The pvs list is transferred from the lvm commands,
when lvmlockd core layer receives message, it extracts the message with
the keyword "path[idx]". Finally, the pv list will pass to IDM lock
manager as the target drives for sending IDM SCSI commands.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
daemons/lvmlockd/lvmlockd-core.c | 281 +++++++++++++++++++++++++++++++----
daemons/lvmlockd/lvmlockd-internal.h | 4 +-
2 files changed, 257 insertions(+), 28 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index 238ec718b45a..ea76f2214b3e 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -421,6 +421,63 @@ struct lockspace *alloc_lockspace(void)
return ls;
}
+static char **alloc_pvs_path(struct pvs *pvs, int num)
+{
+ if (!num)
+ return NULL;
+
+ pvs->path = malloc(sizeof(char *) * num);
+ if (!pvs->path)
+ return NULL;
+
+ memset(pvs->path, 0x0, sizeof(char *) * num);
+ return pvs->path;
+}
+
+static void free_pvs_path(struct pvs *pvs)
+{
+ int i;
+
+ for (i = 0; i < pvs->num; i++) {
+ if (!pvs->path[i])
+ continue;
+
+ free((char *)pvs->path[i]);
+ pvs->path[i] = NULL;
+ }
+
+ if (!pvs->path) {
+ free(pvs->path);
+ pvs->path = NULL;
+ }
+}
+
+static char **alloc_and_copy_pvs_path(struct pvs *dst, struct pvs *src)
+{
+ int i;
+
+ if (!alloc_pvs_path(dst, src->num))
+ return NULL;
+
+ dst->num = 0;
+ for (i = 0; i < src->num; i++) {
+ if (!src->path[i] || !strcmp(src->path[i], "none"))
+ continue;
+
+ dst->path[dst->num] = strdup(src->path[i]);
+ if (!dst->path[dst->num]) {
+ log_error("out of memory for copying pvs path");
+ goto failed;
+ }
+ dst->num++;
+ }
+ return dst->path;
+
+failed:
+ free_pvs_path(dst);
+ return NULL;
+}
+
static struct action *alloc_action(void)
{
struct action *act;
@@ -510,6 +567,9 @@ static void free_action(struct action *act)
free(act->path);
act->path = NULL;
}
+
+ free_pvs_path(&act->pvs);
+
pthread_mutex_lock(&unused_struct_mutex);
if (unused_action_count >= MAX_UNUSED_ACTION) {
free(act);
@@ -564,9 +624,12 @@ static int setup_structs(void)
struct lock *lk;
int data_san = lm_data_size_sanlock();
int data_dlm = lm_data_size_dlm();
+ int data_idm = lm_data_size_idm();
int i;
resource_lm_data_size = data_san > data_dlm ? data_san : data_dlm;
+ resource_lm_data_size = resource_lm_data_size > data_idm ?
+ resource_lm_data_size : data_idm;
pthread_mutex_init(&unused_struct_mutex, NULL);
INIT_LIST_HEAD(&unused_action);
@@ -683,6 +746,8 @@ static const char *lm_str(int x)
return "dlm";
case LD_LM_SANLOCK:
return "sanlock";
+ case LD_LM_IDM:
+ return "idm";
default:
return "lm_unknown";
}
@@ -968,6 +1033,8 @@ static int lm_prepare_lockspace(struct lockspace *ls, struct action *act)
rv = lm_prepare_lockspace_dlm(ls);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_prepare_lockspace_sanlock(ls);
+ else if (ls->lm_type == LD_LM_IDM)
+ rv = lm_prepare_lockspace_idm(ls);
else
return -1;
@@ -984,6 +1051,8 @@ static int lm_add_lockspace(struct lockspace *ls, struct action *act, int adopt)
rv = lm_add_lockspace_dlm(ls, adopt);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_add_lockspace_sanlock(ls, adopt);
+ else if (ls->lm_type == LD_LM_IDM)
+ rv = lm_add_lockspace_idm(ls, adopt);
else
return -1;
@@ -1000,6 +1069,8 @@ static int lm_rem_lockspace(struct lockspace *ls, struct action *act, int free_v
rv = lm_rem_lockspace_dlm(ls, free_vg);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_rem_lockspace_sanlock(ls, free_vg);
+ else if (ls->lm_type == LD_LM_IDM)
+ rv = lm_rem_lockspace_idm(ls, free_vg);
else
return -1;
@@ -1017,6 +1088,9 @@ static int lm_lock(struct lockspace *ls, struct resource *r, int mode, struct ac
rv = lm_lock_dlm(ls, r, mode, vb_out, adopt);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_lock_sanlock(ls, r, mode, vb_out, retry, adopt);
+ else if (ls->lm_type == LD_LM_IDM)
+ rv = lm_lock_idm(ls, r, mode, vb_out, act->lv_uuid,
+ &act->pvs, adopt);
else
return -1;
@@ -1034,6 +1108,8 @@ static int lm_convert(struct lockspace *ls, struct resource *r,
rv = lm_convert_dlm(ls, r, mode, r_version);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_convert_sanlock(ls, r, mode, r_version);
+ else if (ls->lm_type == LD_LM_IDM)
+ rv = lm_convert_idm(ls, r, mode, r_version);
else
return -1;
@@ -1051,6 +1127,8 @@ static int lm_unlock(struct lockspace *ls, struct resource *r, struct action *ac
rv = lm_unlock_dlm(ls, r, r_version, lmu_flags);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_unlock_sanlock(ls, r, r_version, lmu_flags);
+ else if (ls->lm_type == LD_LM_IDM)
+ rv = lm_unlock_idm(ls, r, r_version, lmu_flags);
else
return -1;
@@ -1065,6 +1143,8 @@ static int lm_hosts(struct lockspace *ls, int notify)
return lm_hosts_dlm(ls, notify);
else if (ls->lm_type == LD_LM_SANLOCK)
return lm_hosts_sanlock(ls, notify);
+ else if (ls->lm_type == LD_LM_IDM)
+ return lm_hosts_idm(ls, notify);
return -1;
}
@@ -1074,6 +1154,8 @@ static void lm_rem_resource(struct lockspace *ls, struct resource *r)
lm_rem_resource_dlm(ls, r);
else if (ls->lm_type == LD_LM_SANLOCK)
lm_rem_resource_sanlock(ls, r);
+ else if (ls->lm_type == LD_LM_IDM)
+ lm_rem_resource_idm(ls, r);
}
static int lm_find_free_lock(struct lockspace *ls, uint64_t *free_offset, int *sector_size, int *align_size)
@@ -1082,6 +1164,8 @@ static int lm_find_free_lock(struct lockspace *ls, uint64_t *free_offset, int *s
return 0;
else if (ls->lm_type == LD_LM_SANLOCK)
return lm_find_free_lock_sanlock(ls, free_offset, sector_size, align_size);
+ else if (ls->lm_type == LD_LM_IDM)
+ return 0;
return -1;
}
@@ -1690,8 +1774,8 @@ static int res_update(struct lockspace *ls, struct resource *r,
}
/*
- * There is nothing to deallocate when freeing a dlm LV, the LV
- * will simply be unlocked by rem_resource.
+ * For DLM and IDM locking scheme, there is nothing to deallocate when freeing a
+ * LV, the LV will simply be unlocked by rem_resource.
*/
static int free_lv(struct lockspace *ls, struct resource *r)
@@ -1700,6 +1784,8 @@ static int free_lv(struct lockspace *ls, struct resource *r)
return lm_free_lv_sanlock(ls, r);
else if (ls->lm_type == LD_LM_DLM)
return 0;
+ else if (ls->lm_type == LD_LM_IDM)
+ return 0;
else
return -EINVAL;
}
@@ -2758,6 +2844,8 @@ out_act:
ls->drop_vg = drop_vg;
if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm))
global_dlm_lockspace_exists = 0;
+ if (ls->lm_type == LD_LM_IDM && !strcmp(ls->name, gl_lsname_idm))
+ global_idm_lockspace_exists = 0;
/*
* Avoid a name collision of the same lockspace is added again before
@@ -2849,6 +2937,8 @@ static void gl_ls_name(char *ls_name)
memcpy(ls_name, gl_lsname_dlm, MAX_NAME);
else if (gl_use_sanlock)
memcpy(ls_name, gl_lsname_sanlock, MAX_NAME);
+ else if (gl_use_idm)
+ memcpy(ls_name, gl_lsname_idm, MAX_NAME);
else
memset(ls_name, 0, MAX_NAME);
}
@@ -2877,9 +2967,20 @@ static int add_lockspace_thread(const char *ls_name,
strncpy(ls->name, ls_name, MAX_NAME);
ls->lm_type = lm_type;
- if (act)
+ if (act) {
ls->start_client_id = act->client_id;
+ /*
+ * Copy PV list to lockspact structure, so this is
+ * used for VG locking for idm scheme.
+ */
+ if (lm_type == LD_LM_IDM &&
+ !alloc_and_copy_pvs_path(&ls->pvs, &act->pvs)) {
+ free(ls);
+ return -ENOMEM;
+ }
+ }
+
if (vg_uuid)
strncpy(ls->vg_uuid, vg_uuid, 64);
@@ -2906,6 +3007,18 @@ static int add_lockspace_thread(const char *ls_name,
pthread_mutex_lock(&lockspaces_mutex);
ls2 = find_lockspace_name(ls->name);
if (ls2) {
+ /*
+ * If find an existed lockspace, we need to update the PV list
+ * based on the latest information, and release for the old
+ * PV list in case it keeps stale information.
+ */
+ free_pvs_path(&ls2->pvs);
+ if (lm_type == LD_LM_IDM &&
+ !alloc_and_copy_pvs_path(&ls2->pvs, &ls->pvs)) {
+ log_debug("add_lockspace_thread %s fails to allocate pvs", ls->name);
+ rv = -ENOMEM;
+ }
+
if (ls2->thread_stop) {
log_debug("add_lockspace_thread %s exists and stopping", ls->name);
rv = -EAGAIN;
@@ -2918,6 +3031,7 @@ static int add_lockspace_thread(const char *ls_name,
}
pthread_mutex_unlock(&lockspaces_mutex);
free_resource(r);
+ free_pvs_path(&ls->pvs);
free(ls);
return rv;
}
@@ -2931,6 +3045,8 @@ static int add_lockspace_thread(const char *ls_name,
if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm))
global_dlm_lockspace_exists = 1;
+ if (ls->lm_type == LD_LM_IDM && !strcmp(ls->name, gl_lsname_idm))
+ global_idm_lockspace_exists = 1;
list_add_tail(&ls->list, &lockspaces);
pthread_mutex_unlock(&lockspaces_mutex);
@@ -2941,6 +3057,7 @@ static int add_lockspace_thread(const char *ls_name,
list_del(&ls->list);
pthread_mutex_unlock(&lockspaces_mutex);
free_resource(r);
+ free_pvs_path(&ls->pvs);
free(ls);
return rv;
}
@@ -2949,16 +3066,15 @@ static int add_lockspace_thread(const char *ls_name,
}
/*
- * There is no add_sanlock_global_lockspace or
- * rem_sanlock_global_lockspace because with sanlock,
- * the global lockspace is one of the vg lockspaces.
+ * There is no variant for sanlock because, with sanlock, the global
+ * lockspace is one of the vg lockspaces.
*/
-
-static int add_dlm_global_lockspace(struct action *act)
+static int add_global_lockspace(char *ls_name, int lm_type,
+ struct action *act)
{
int rv;
- if (global_dlm_lockspace_exists)
+ if (global_dlm_lockspace_exists || global_idm_lockspace_exists)
return 0;
/*
@@ -2966,9 +3082,9 @@ static int add_dlm_global_lockspace(struct action *act)
* lock request, insert an internal gl sh lock request?
*/
- rv = add_lockspace_thread(gl_lsname_dlm, NULL, NULL, LD_LM_DLM, NULL, act);
+ rv = add_lockspace_thread(ls_name, NULL, NULL, lm_type, NULL, act);
if (rv < 0)
- log_debug("add_dlm_global_lockspace add_lockspace_thread %d", rv);
+ log_debug("add_global_lockspace add_lockspace_thread %d", rv);
/*
* EAGAIN may be returned for a short period because
@@ -2981,12 +3097,12 @@ static int add_dlm_global_lockspace(struct action *act)
}
/*
- * If dlm gl lockspace is the only one left, then stop it.
- * This is not used for an explicit rem_lockspace action from
- * the client, only for auto remove.
+ * When DLM or IDM locking scheme is used for global lock, if the global
+ * lockspace is the only one left, then stop it. This is not used for
+ * an explicit rem_lockspace action from the client, only for auto
+ * remove.
*/
-
-static int rem_dlm_global_lockspace(void)
+static int rem_global_lockspace(char *ls_name)
{
struct lockspace *ls, *ls_gl = NULL;
int others = 0;
@@ -2994,7 +3110,7 @@ static int rem_dlm_global_lockspace(void)
pthread_mutex_lock(&lockspaces_mutex);
list_for_each_entry(ls, &lockspaces, list) {
- if (!strcmp(ls->name, gl_lsname_dlm)) {
+ if (!strcmp(ls->name, ls_name)) {
ls_gl = ls;
continue;
}
@@ -3026,6 +3142,26 @@ out:
return rv;
}
+static int add_dlm_global_lockspace(struct action *act)
+{
+ return add_global_lockspace(gl_lsname_dlm, LD_LM_DLM, act);
+}
+
+static int rem_dlm_global_lockspace(void)
+{
+ return rem_global_lockspace(gl_lsname_dlm);
+}
+
+static int add_idm_global_lockspace(struct action *act)
+{
+ return add_global_lockspace(gl_lsname_idm, LD_LM_IDM, act);
+}
+
+static int rem_idm_global_lockspace(void)
+{
+ return rem_global_lockspace(gl_lsname_idm);
+}
+
/*
* When the first dlm lockspace is added for a vg, automatically add a separate
* dlm lockspace for the global lock.
@@ -3051,6 +3187,9 @@ static int add_lockspace(struct action *act)
if (gl_use_dlm) {
rv = add_dlm_global_lockspace(act);
return rv;
+ } else if (gl_use_idm) {
+ rv = add_idm_global_lockspace(act);
+ return rv;
} else {
return -EINVAL;
}
@@ -3059,6 +3198,8 @@ static int add_lockspace(struct action *act)
if (act->rt == LD_RT_VG) {
if (gl_use_dlm)
add_dlm_global_lockspace(NULL);
+ else if (gl_use_idm)
+ add_idm_global_lockspace(NULL);
vg_ls_name(act->vg_name, ls_name);
@@ -3126,14 +3267,15 @@ static int rem_lockspace(struct action *act)
pthread_mutex_unlock(&lockspaces_mutex);
/*
- * The dlm global lockspace was automatically added when
- * the first dlm vg lockspace was added, now reverse that
- * by automatically removing the dlm global lockspace when
- * the last dlm vg lockspace is removed.
+ * For DLM and IDM locking scheme, the global lockspace was
+ * automatically added when the first vg lockspace was added,
+ * now reverse that by automatically removing the dlm global
+ * lockspace when the last vg lockspace is removed.
*/
-
if (rt == LD_RT_VG && gl_use_dlm)
rem_dlm_global_lockspace();
+ else if (rt == LD_RT_VG && gl_use_idm)
+ rem_idm_global_lockspace();
return 0;
}
@@ -3257,6 +3399,7 @@ static int for_each_lockspace(int do_stop, int do_free, int do_force)
if (ls->free_vg) {
/* In future we may need to free ls->actions here */
free_ls_resources(ls);
+ free_pvs_path(&ls->pvs);
free(ls);
free_count++;
}
@@ -3270,6 +3413,7 @@ static int for_each_lockspace(int do_stop, int do_free, int do_force)
if (!gl_type_static) {
gl_use_dlm = 0;
gl_use_sanlock = 0;
+ gl_use_idm = 0;
}
}
pthread_mutex_unlock(&lockspaces_mutex);
@@ -3345,6 +3489,9 @@ static int work_init_vg(struct action *act)
rv = lm_init_vg_sanlock(ls_name, act->vg_name, act->flags, act->vg_args);
else if (act->lm_type == LD_LM_DLM)
rv = lm_init_vg_dlm(ls_name, act->vg_name, act->flags, act->vg_args);
+ else if (act->lm_type == LD_LM_IDM)
+ /* Non't do anything for IDM when initialize VG */
+ rv = 0;
else
rv = -EINVAL;
@@ -3448,6 +3595,8 @@ static int work_init_lv(struct action *act)
} else if (act->lm_type == LD_LM_DLM) {
return 0;
+ } else if (act->lm_type == LD_LM_IDM) {
+ return 0;
} else {
log_error("init_lv ls_name %s bad lm_type %d", ls_name, act->lm_type);
return -EINVAL;
@@ -3511,20 +3660,29 @@ static void *worker_thread_main(void *arg_in)
if (act->op == LD_OP_RUNNING_LM) {
int run_sanlock = lm_is_running_sanlock();
int run_dlm = lm_is_running_dlm();
+ int run_idm = lm_is_running_idm();
if (daemon_test) {
run_sanlock = gl_use_sanlock;
run_dlm = gl_use_dlm;
+ run_idm = gl_use_idm;
}
- if (run_sanlock && run_dlm)
+ /*
+ * It's not possible to enable multiple locking schemes
+ * for global lock, otherwise, it must be conflict and
+ * reports it!
+ */
+ if ((run_sanlock + run_dlm + run_idm) >= 2)
act->result = -EXFULL;
- else if (!run_sanlock && !run_dlm)
+ else if (!run_sanlock && !run_dlm && !run_idm)
act->result = -ENOLCK;
else if (run_sanlock)
act->result = LD_LM_SANLOCK;
else if (run_dlm)
act->result = LD_LM_DLM;
+ else if (run_idm)
+ act->result = LD_LM_IDM;
add_client_result(act);
} else if ((act->op == LD_OP_LOCK) && (act->flags & LD_AF_SEARCH_LS)) {
@@ -3812,6 +3970,9 @@ static int client_send_result(struct client *cl, struct action *act)
} else if (gl_use_dlm) {
if (!gl_lsname_dlm[0])
strcat(result_flags, "NO_GL_LS,");
+ } else if (gl_use_idm) {
+ if (!gl_lsname_idm[0])
+ strcat(result_flags, "NO_GL_LS,");
} else {
int found_lm = 0;
@@ -3819,6 +3980,8 @@ static int client_send_result(struct client *cl, struct action *act)
found_lm++;
if (lm_support_sanlock() && lm_is_running_sanlock())
found_lm++;
+ if (lm_support_idm() && lm_is_running_idm())
+ found_lm++;
if (!found_lm)
strcat(result_flags, "NO_GL_LS,NO_LM");
@@ -3994,11 +4157,13 @@ static int add_lock_action(struct action *act)
if (gl_use_sanlock && (act->op == LD_OP_ENABLE || act->op == LD_OP_DISABLE)) {
vg_ls_name(act->vg_name, ls_name);
} else {
- if (!gl_use_dlm && !gl_use_sanlock) {
+ if (!gl_use_dlm && !gl_use_sanlock && !gl_use_idm) {
if (lm_is_running_dlm())
gl_use_dlm = 1;
else if (lm_is_running_sanlock())
gl_use_sanlock = 1;
+ else if (lm_is_running_idm())
+ gl_use_idm = 1;
}
gl_ls_name(ls_name);
}
@@ -4046,6 +4211,17 @@ static int add_lock_action(struct action *act)
add_dlm_global_lockspace(NULL);
goto retry;
+ } else if (act->op == LD_OP_LOCK && act->rt == LD_RT_GL && act->mode != LD_LK_UN && gl_use_idm) {
+ /*
+ * Automatically start the idm global lockspace when
+ * a command tries to acquire the global lock.
+ */
+ log_debug("lockspace \"%s\" not found for idm gl, adding...", ls_name);
+ act->flags |= LD_AF_SEARCH_LS;
+ act->flags |= LD_AF_WAIT_STARTING;
+ add_idm_global_lockspace(NULL);
+ goto retry;
+
} else if (act->op == LD_OP_LOCK && act->mode == LD_LK_UN) {
log_debug("lockspace \"%s\" not found for unlock ignored", ls_name);
return -ENOLS;
@@ -4266,6 +4442,8 @@ static int str_to_lm(const char *str)
return LD_LM_SANLOCK;
if (!strcmp(str, "dlm"))
return LD_LM_DLM;
+ if (!strcmp(str, "idm"))
+ return LD_LM_IDM;
return -2;
}
@@ -4601,12 +4779,14 @@ static void client_recv_action(struct client *cl)
const char *vg_sysid;
const char *path;
const char *str;
+ struct pvs pvs;
+ char buf[11]; /* p a t h [ x x x x ] \0 */
int64_t val;
uint32_t opts = 0;
int result = 0;
int cl_pid;
int op, rt, lm, mode;
- int rv;
+ int rv, i;
buffer_init(&req.buffer);
@@ -4695,11 +4875,13 @@ static void client_recv_action(struct client *cl)
if (!cl->name[0] && cl_name)
strncpy(cl->name, cl_name, MAX_NAME);
- if (!gl_use_dlm && !gl_use_sanlock && (lm > 0)) {
+ if (!gl_use_dlm && !gl_use_sanlock && !gl_use_idm && (lm > 0)) {
if (lm == LD_LM_DLM && lm_support_dlm())
gl_use_dlm = 1;
else if (lm == LD_LM_SANLOCK && lm_support_sanlock())
gl_use_sanlock = 1;
+ else if (lm == LD_LM_IDM && lm_support_idm())
+ gl_use_idm = 1;
log_debug("set gl_use_%s", lm_str(lm));
}
@@ -4756,6 +4938,40 @@ static void client_recv_action(struct client *cl)
if (val)
act->host_id = val;
+ /* Create PV list for idm */
+ if (lm == LD_LM_IDM) {
+ memset(&pvs, 0x0, sizeof(pvs));
+
+ pvs.num = daemon_request_int(req, "path_num", 0);
+ log_error("pvs_num = %d", pvs.num);
+
+ if (!pvs.num)
+ goto skip_pvs_path;
+
+ /* Receive the pv list which is transferred from LVM command */
+ if (!alloc_pvs_path(&pvs, pvs.num)) {
+ log_error("fail to allocate pvs path");
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < pvs.num; i++) {
+ snprintf(buf, sizeof(buf), "path[%d]", i);
+ pvs.path[i] = (char *)daemon_request_str(req, buf, NULL);
+ }
+
+ if (!alloc_and_copy_pvs_path(&act->pvs, &pvs)) {
+ log_error("fail to allocate pvs path");
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ if (pvs.path)
+ free(pvs.path);
+ pvs.path = NULL;
+ }
+
+skip_pvs_path:
act->max_retries = daemon_request_int(req, "max_retries", DEFAULT_MAX_RETRIES);
dm_config_destroy(req.cft);
@@ -4777,6 +4993,12 @@ static void client_recv_action(struct client *cl)
goto out;
}
+ if (lm == LD_LM_IDM && !lm_support_idm()) {
+ log_debug("idm not supported");
+ rv = -EPROTONOSUPPORT;
+ goto out;
+ }
+
if (act->op == LD_OP_LOCK && act->mode != LD_LK_UN)
cl->lock_ops = 1;
@@ -5375,6 +5597,7 @@ static void adopt_locks(void)
}
list_del(&ls->list);
+ free_pvs_path(&ls->pvs);
free(ls);
}
@@ -5415,6 +5638,7 @@ static void adopt_locks(void)
if (rv < 0) {
log_error("Failed to create lockspace thread for VG %s", ls->vg_name);
list_del(&ls->list);
+ free_pvs_path(&ls->pvs);
free(ls);
free_action(act);
count_start_fail++;
@@ -5857,6 +6081,7 @@ static int main_loop(daemon_state *ds_arg)
}
strcpy(gl_lsname_dlm, S_NAME_GL_DLM);
+ strcpy(gl_lsname_idm, S_NAME_GL_IDM);
INIT_LIST_HEAD(&lockspaces);
pthread_mutex_init(&lockspaces_mutex, NULL);
@@ -6110,6 +6335,8 @@ int main(int argc, char *argv[])
gl_use_dlm = 1;
else if (lm == LD_LM_SANLOCK && lm_support_sanlock())
gl_use_sanlock = 1;
+ else if (lm == LD_LM_IDM && lm_support_idm())
+ gl_use_idm = 1;
else {
fprintf(stderr, "invalid gl-type option\n");
exit(EXIT_FAILURE);
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index 06bf07eb59cf..ad32eb3a40e2 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -121,7 +121,7 @@ struct client {
#define DEFAULT_MAX_RETRIES 4
struct pvs {
- const char **path;
+ char **path;
int num;
};
@@ -338,7 +338,9 @@ EXTERN int gl_use_idm;
EXTERN int gl_vg_removed;
EXTERN char gl_lsname_dlm[MAX_NAME+1];
EXTERN char gl_lsname_sanlock[MAX_NAME+1];
+EXTERN char gl_lsname_idm[MAX_NAME+1];
EXTERN int global_dlm_lockspace_exists;
+EXTERN int global_idm_lockspace_exists;
EXTERN int daemon_test; /* run as much as possible without a live lock manager */
EXTERN int daemon_debug;
--
1.8.3.1

View File

@ -0,0 +1,301 @@
From ef1c57e68fa67dc0bc144b3a745bc456c3232d3e Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Fri, 7 May 2021 10:25:14 +0800
Subject: [PATCH 03/33] lib: locking: Add new type "idm"
We can consider the drive firmware a server to handle the locking
request from nodes, this essentially is a client-server model.
DLM uses the kernel as a central place to manage locks, so it also
complies with client-server model for locking operations. This is
why IDM and DLM are similar with each other for their wrappers.
This patch largely works by generalizing the DLM code paths and then
providing degeneralized functions as wrappers for both IDM and DLM.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/display/display.c | 4 +++
lib/locking/lvmlockd.c | 72 ++++++++++++++++++++++++++++++++++------
lib/metadata/metadata-exported.h | 1 +
lib/metadata/metadata.c | 12 ++++++-
4 files changed, 78 insertions(+), 11 deletions(-)
diff --git a/lib/display/display.c b/lib/display/display.c
index f0f03c0a5411..f9c9ef83667d 100644
--- a/lib/display/display.c
+++ b/lib/display/display.c
@@ -95,6 +95,8 @@ const char *get_lock_type_string(lock_type_t lock_type)
return "dlm";
case LOCK_TYPE_SANLOCK:
return "sanlock";
+ case LOCK_TYPE_IDM:
+ return "idm";
}
return "invalid";
}
@@ -111,6 +113,8 @@ lock_type_t get_lock_type_from_string(const char *str)
return LOCK_TYPE_DLM;
if (!strcmp(str, "sanlock"))
return LOCK_TYPE_SANLOCK;
+ if (!strcmp(str, "idm"))
+ return LOCK_TYPE_IDM;
return LOCK_TYPE_INVALID;
}
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index 9a1b4f476f01..040c4246d718 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -553,7 +553,8 @@ static int _deactivate_sanlock_lv(struct cmd_context *cmd, struct volume_group *
return 1;
}
-static int _init_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
+static int _init_vg(struct cmd_context *cmd, struct volume_group *vg,
+ const char *lock_type)
{
daemon_reply reply;
const char *reply_str;
@@ -569,7 +570,7 @@ static int _init_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
reply = _lockd_send("init_vg",
"pid = " FMTd64, (int64_t) getpid(),
"vg_name = %s", vg->name,
- "vg_lock_type = %s", "dlm",
+ "vg_lock_type = %s", lock_type,
NULL);
if (!_lockd_result(reply, &result, NULL)) {
@@ -589,10 +590,12 @@ static int _init_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
log_error("VG %s init failed: invalid parameters for dlm", vg->name);
break;
case -EMANAGER:
- log_error("VG %s init failed: lock manager dlm is not running", vg->name);
+ log_error("VG %s init failed: lock manager %s is not running",
+ vg->name, lock_type);
break;
case -EPROTONOSUPPORT:
- log_error("VG %s init failed: lock manager dlm is not supported by lvmlockd", vg->name);
+ log_error("VG %s init failed: lock manager %s is not supported by lvmlockd",
+ vg->name, lock_type);
break;
case -EEXIST:
log_error("VG %s init failed: a lockspace with the same name exists", vg->name);
@@ -616,7 +619,7 @@ static int _init_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
goto out;
}
- vg->lock_type = "dlm";
+ vg->lock_type = lock_type;
vg->lock_args = vg_lock_args;
if (!vg_write(vg) || !vg_commit(vg)) {
@@ -631,6 +634,16 @@ out:
return ret;
}
+static int _init_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
+{
+ return _init_vg(cmd, vg, "dlm");
+}
+
+static int _init_vg_idm(struct cmd_context *cmd, struct volume_group *vg)
+{
+ return _init_vg(cmd, vg, "idm");
+}
+
static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg, int lv_lock_count)
{
daemon_reply reply;
@@ -794,7 +807,7 @@ out:
/* called after vg_remove on disk */
-static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
+static int _free_vg(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
uint32_t lockd_flags = 0;
@@ -820,16 +833,27 @@ static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
}
if (!ret)
- log_error("_free_vg_dlm lvmlockd result %d", result);
+ log_error("%s: lock type %s lvmlockd result %d",
+ __func__, vg->lock_type, result);
daemon_reply_destroy(reply);
return 1;
}
+static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
+{
+ return _free_vg(cmd, vg);
+}
+
+static int _free_vg_idm(struct cmd_context *cmd, struct volume_group *vg)
+{
+ return _free_vg(cmd, vg);
+}
+
/* called before vg_remove on disk */
-static int _busy_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
+static int _busy_vg(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
uint32_t lockd_flags = 0;
@@ -864,13 +888,24 @@ static int _busy_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
}
if (!ret)
- log_error("_busy_vg_dlm lvmlockd result %d", result);
+ log_error("%s: lock type %s lvmlockd result %d", __func__,
+ vg->lock_type, result);
out:
daemon_reply_destroy(reply);
return ret;
}
+static int _busy_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
+{
+ return _busy_vg(cmd, vg);
+}
+
+static int _busy_vg_idm(struct cmd_context *cmd, struct volume_group *vg)
+{
+ return _busy_vg(cmd, vg);
+}
+
/* called before vg_remove on disk */
static int _free_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
@@ -976,6 +1011,8 @@ int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg,
return _init_vg_dlm(cmd, vg);
case LOCK_TYPE_SANLOCK:
return _init_vg_sanlock(cmd, vg, lv_lock_count);
+ case LOCK_TYPE_IDM:
+ return _init_vg_idm(cmd, vg);
default:
log_error("Unknown lock_type.");
return 0;
@@ -1017,7 +1054,8 @@ int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg,
* When removing (not changing), each LV is locked
* when it is removed, they do not need checking here.
*/
- if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK) {
+ if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK ||
+ lock_type_num == LOCK_TYPE_IDM) {
if (changing && !_lockd_all_lvs(cmd, vg)) {
log_error("Cannot change VG %s with active LVs", vg->name);
return 0;
@@ -1041,6 +1079,9 @@ int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg,
case LOCK_TYPE_SANLOCK:
/* returning an error will prevent vg_remove() */
return _free_vg_sanlock(cmd, vg);
+ case LOCK_TYPE_IDM:
+ /* returning an error will prevent vg_remove() */
+ return _busy_vg_idm(cmd, vg);
default:
log_error("Unknown lock_type.");
return 0;
@@ -1059,6 +1100,9 @@ void lockd_free_vg_final(struct cmd_context *cmd, struct volume_group *vg)
case LOCK_TYPE_DLM:
_free_vg_dlm(cmd, vg);
break;
+ case LOCK_TYPE_IDM:
+ _free_vg_idm(cmd, vg);
+ break;
default:
log_error("Unknown lock_type.");
}
@@ -2679,6 +2723,7 @@ int lockd_init_lv(struct cmd_context *cmd, struct volume_group *vg, struct logic
return 1;
case LOCK_TYPE_SANLOCK:
case LOCK_TYPE_DLM:
+ case LOCK_TYPE_IDM:
break;
default:
log_error("lockd_init_lv: unknown lock_type.");
@@ -2821,6 +2866,8 @@ int lockd_init_lv(struct cmd_context *cmd, struct volume_group *vg, struct logic
lv->lock_args = "pending";
else if (!strcmp(vg->lock_type, "dlm"))
lv->lock_args = "dlm";
+ else if (!strcmp(vg->lock_type, "idm"))
+ lv->lock_args = "idm";
return 1;
}
@@ -2836,6 +2883,7 @@ int lockd_free_lv(struct cmd_context *cmd, struct volume_group *vg,
return 1;
case LOCK_TYPE_DLM:
case LOCK_TYPE_SANLOCK:
+ case LOCK_TYPE_IDM:
if (!lock_args)
return 1;
return _free_lv(cmd, vg, lv_name, lv_id, lock_args);
@@ -3007,6 +3055,10 @@ const char *lockd_running_lock_type(struct cmd_context *cmd, int *found_multiple
log_debug("lvmlockd found dlm");
lock_type = "dlm";
break;
+ case LOCK_TYPE_IDM:
+ log_debug("lvmlockd found idm");
+ lock_type = "idm";
+ break;
default:
log_error("Failed to find a running lock manager.");
break;
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index c6116350f643..52062a1002da 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -356,6 +356,7 @@ typedef enum {
LOCK_TYPE_CLVM = 1,
LOCK_TYPE_DLM = 2,
LOCK_TYPE_SANLOCK = 3,
+ LOCK_TYPE_IDM = 4,
} lock_type_t;
struct cmd_context;
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 002d80c010cb..110cbaed4e62 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -2235,6 +2235,13 @@ static int _validate_lv_lock_args(struct logical_volume *lv)
lv->vg->name, display_lvname(lv), lv->lock_args);
r = 0;
}
+
+ } else if (!strcmp(lv->vg->lock_type, "idm")) {
+ if (strcmp(lv->lock_args, "idm")) {
+ log_error(INTERNAL_ERROR "LV %s/%s has invalid lock_args \"%s\"",
+ lv->vg->name, display_lvname(lv), lv->lock_args);
+ r = 0;
+ }
}
return r;
@@ -2569,7 +2576,8 @@ int vg_validate(struct volume_group *vg)
r = 0;
}
- if (strcmp(vg->lock_type, "sanlock") && strcmp(vg->lock_type, "dlm")) {
+ if (strcmp(vg->lock_type, "sanlock") && strcmp(vg->lock_type, "dlm") &&
+ strcmp(vg->lock_type, "idm")) {
log_error(INTERNAL_ERROR "VG %s has unknown lock_type %s",
vg->name, vg->lock_type);
r = 0;
@@ -4355,6 +4363,8 @@ int is_lockd_type(const char *lock_type)
return 1;
if (!strcmp(lock_type, "sanlock"))
return 1;
+ if (!strcmp(lock_type, "idm"))
+ return 1;
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,412 @@
From affe1af148d5d939ffad7bde2ad51b0f386a44b7 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Fri, 7 May 2021 10:25:15 +0800
Subject: [PATCH 04/33] lib: locking: Parse PV list for IDM locking
For shared VG or LV locking, IDM locking scheme needs to use the PV
list assocated with VG or LV for sending SCSI commands, thus it requires
to use some places to generate PV list.
In reviewing the flow for LVM commands, the best place to generate PV
list is in the locking lib. So this is why this patch parses PV list as
shown. It iterates over all the PV nodes one by one, and compare with
the VG name or LV prefix string. If any PV matches, then the PV is
added into the PV list. Finally the PV list is sent to lvmlockd daemon.
Here as mentioned, it compares LV prefix string with the format
"lv_name_", the reason is it needs to find out all relevant PVs, e.g.
for the thin pool, it has LVs for metadata, pool, error, and raw LV, so
we can use the prefix string to find out all PVs belonging to the thin
pool.
For the global lock, it's not covered in this patch. To avoid the egg
and chicken issue, we need to prepare the global lock ahead before any
locking can be used. So the global lock's PV list is established in
lvmlockd daemon by iterating all drives with partition labeled with
"propeller".
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/locking/lvmlockd.c | 258 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 241 insertions(+), 17 deletions(-)
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index 040c4246d718..766be71badf3 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -25,6 +25,11 @@ static int _use_lvmlockd = 0; /* is 1 if command is configured to use lv
static int _lvmlockd_connected = 0; /* is 1 if command is connected to lvmlockd */
static int _lvmlockd_init_failed = 0; /* used to suppress further warnings */
+struct lvmlockd_pvs {
+ char **path;
+ int num;
+};
+
void lvmlockd_set_socket(const char *sock)
{
_lvmlockd_socket = sock;
@@ -178,18 +183,34 @@ static int _lockd_result(daemon_reply reply, int *result, uint32_t *lockd_flags)
return 1;
}
-static daemon_reply _lockd_send(const char *req_name, ...)
+static daemon_reply _lockd_send_with_pvs(const char *req_name,
+ const struct lvmlockd_pvs *lock_pvs, ...)
{
- va_list ap;
daemon_reply repl;
daemon_request req;
+ int i;
+ char key[32];
+ const char *val;
+ va_list ap;
req = daemon_request_make(req_name);
- va_start(ap, req_name);
+ va_start(ap, lock_pvs);
daemon_request_extend_v(req, ap);
va_end(ap);
+ /* Pass PV list */
+ if (lock_pvs && lock_pvs->num) {
+ daemon_request_extend(req, "path_num = " FMTd64,
+ (int64_t)(lock_pvs)->num, NULL);
+
+ for (i = 0; i < lock_pvs->num; i++) {
+ snprintf(key, sizeof(key), "path[%d] = %%s", i);
+ val = lock_pvs->path[i] ? lock_pvs->path[i] : "none";
+ daemon_request_extend(req, key, val, NULL);
+ }
+ }
+
repl = daemon_send(_lvmlockd, req);
daemon_request_destroy(req);
@@ -197,6 +218,166 @@ static daemon_reply _lockd_send(const char *req_name, ...)
return repl;
}
+#define _lockd_send(req_name, args...) \
+ _lockd_send_with_pvs(req_name, NULL, ##args)
+
+static int _lockd_retrive_vg_pv_num(struct volume_group *vg)
+{
+ struct pv_list *pvl;
+ int num = 0;
+
+ dm_list_iterate_items(pvl, &vg->pvs)
+ num++;
+
+ return num;
+}
+
+static void _lockd_retrive_vg_pv_list(struct volume_group *vg,
+ struct lvmlockd_pvs *lock_pvs)
+{
+ struct pv_list *pvl;
+ int pv_num, i;
+
+ memset(lock_pvs, 0x0, sizeof(*lock_pvs));
+
+ pv_num = _lockd_retrive_vg_pv_num(vg);
+ if (!pv_num) {
+ log_error("Fail to any PVs for VG %s", vg->name);
+ return;
+ }
+
+ /* Allocate buffer for PV list */
+ lock_pvs->path = zalloc(sizeof(*lock_pvs->path) * pv_num);
+ if (!lock_pvs->path) {
+ log_error("Fail to allocate PV list for VG %s", vg->name);
+ return;
+ }
+
+ i = 0;
+ dm_list_iterate_items(pvl, &vg->pvs) {
+ lock_pvs->path[i] = strdup(pv_dev_name(pvl->pv));
+ if (!lock_pvs->path[i]) {
+ log_error("Fail to allocate PV path for VG %s", vg->name);
+ goto fail;
+ }
+
+ log_debug("VG %s find PV device %s", vg->name, lock_pvs->path[i]);
+ i++;
+ }
+
+ lock_pvs->num = pv_num;
+ return;
+
+fail:
+ for (i = 0; i < pv_num; i++) {
+ if (!lock_pvs->path[i])
+ continue;
+ free(lock_pvs->path[i]);
+ }
+ free(lock_pvs->path);
+ return;
+}
+
+static int _lockd_retrive_lv_pv_num(struct volume_group *vg,
+ const char *lv_name)
+{
+ struct logical_volume *lv = find_lv(vg, lv_name);
+ struct pv_list *pvl;
+ int num;
+
+ if (!lv)
+ return 0;
+
+ num = 0;
+ dm_list_iterate_items(pvl, &vg->pvs) {
+ if (lv_is_on_pv(lv, pvl->pv))
+ num++;
+ }
+
+ return num;
+}
+
+static void _lockd_retrive_lv_pv_list(struct volume_group *vg,
+ const char *lv_name,
+ struct lvmlockd_pvs *lock_pvs)
+{
+ struct logical_volume *lv = find_lv(vg, lv_name);
+ struct pv_list *pvl;
+ int pv_num, i = 0;
+
+ memset(lock_pvs, 0x0, sizeof(*lock_pvs));
+
+ /* Cannot find any existed LV? */
+ if (!lv)
+ return;
+
+ pv_num = _lockd_retrive_lv_pv_num(vg, lv_name);
+ if (!pv_num) {
+ /*
+ * Fixup for 'lvcreate --type error -L1 -n $lv1 $vg', in this
+ * case, the drive path list is empty since it doesn't establish
+ * the structure 'pvseg->lvseg->lv->name'.
+ *
+ * So create drive path list with all drives in the VG.
+ */
+ log_error("Fail to find any PVs for %s/%s, try to find PVs from VG instead",
+ vg->name, lv_name);
+ _lockd_retrive_vg_pv_list(vg, lock_pvs);
+ return;
+ }
+
+ /* Allocate buffer for PV list */
+ lock_pvs->path = malloc(sizeof(*lock_pvs->path) * pv_num);
+ if (!lock_pvs->path) {
+ log_error("Fail to allocate PV list for %s/%s", vg->name, lv_name);
+ return;
+ }
+
+ dm_list_iterate_items(pvl, &vg->pvs) {
+ if (lv_is_on_pv(lv, pvl->pv)) {
+ lock_pvs->path[i] = strdup(pv_dev_name(pvl->pv));
+ if (!lock_pvs->path[i]) {
+ log_error("Fail to allocate PV path for LV %s/%s",
+ vg->name, lv_name);
+ goto fail;
+ }
+
+ log_debug("Find PV device %s for LV %s/%s",
+ lock_pvs->path[i], vg->name, lv_name);
+ i++;
+ }
+ }
+
+ lock_pvs->num = pv_num;
+ return;
+
+fail:
+ for (i = 0; i < pv_num; i++) {
+ if (!lock_pvs->path[i])
+ continue;
+ free(lock_pvs->path[i]);
+ lock_pvs->path[i] = NULL;
+ }
+ free(lock_pvs->path);
+ lock_pvs->path = NULL;
+ lock_pvs->num = 0;
+ return;
+}
+
+static void _lockd_free_pv_list(struct lvmlockd_pvs *lock_pvs)
+{
+ int i;
+
+ for (i = 0; i < lock_pvs->num; i++) {
+ free(lock_pvs->path[i]);
+ lock_pvs->path[i] = NULL;
+ }
+
+ free(lock_pvs->path);
+ lock_pvs->path = NULL;
+ lock_pvs->num = 0;
+}
+
/*
* result/lockd_flags are values returned from lvmlockd.
*
@@ -227,6 +408,7 @@ static int _lockd_request(struct cmd_context *cmd,
const char *lv_lock_args,
const char *mode,
const char *opts,
+ const struct lvmlockd_pvs *lock_pvs,
int *result,
uint32_t *lockd_flags)
{
@@ -251,7 +433,8 @@ static int _lockd_request(struct cmd_context *cmd,
cmd_name = "none";
if (vg_name && lv_name) {
- reply = _lockd_send(req_name,
+ reply = _lockd_send_with_pvs(req_name,
+ lock_pvs,
"cmd = %s", cmd_name,
"pid = " FMTd64, (int64_t) pid,
"mode = %s", mode,
@@ -271,7 +454,8 @@ static int _lockd_request(struct cmd_context *cmd,
req_name, mode, vg_name, lv_name, *result, *lockd_flags);
} else if (vg_name) {
- reply = _lockd_send(req_name,
+ reply = _lockd_send_with_pvs(req_name,
+ lock_pvs,
"cmd = %s", cmd_name,
"pid = " FMTd64, (int64_t) pid,
"mode = %s", mode,
@@ -288,7 +472,8 @@ static int _lockd_request(struct cmd_context *cmd,
req_name, mode, vg_name, *result, *lockd_flags);
} else {
- reply = _lockd_send(req_name,
+ reply = _lockd_send_with_pvs(req_name,
+ lock_pvs,
"cmd = %s", cmd_name,
"pid = " FMTd64, (int64_t) pid,
"mode = %s", mode,
@@ -1134,6 +1319,7 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
int host_id = 0;
int result;
int ret;
+ struct lvmlockd_pvs lock_pvs;
memset(uuid, 0, sizeof(uuid));
@@ -1169,7 +1355,28 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
host_id = find_config_tree_int(cmd, local_host_id_CFG, NULL);
}
- reply = _lockd_send("start_vg",
+ /*
+ * Create the VG's PV list when start the VG, the PV list
+ * is passed to lvmlockd, and the the PVs path will be used
+ * to send SCSI commands for idm locking scheme.
+ */
+ if (!strcmp(vg->lock_type, "idm")) {
+ _lockd_retrive_vg_pv_list(vg, &lock_pvs);
+ reply = _lockd_send_with_pvs("start_vg",
+ &lock_pvs,
+ "pid = " FMTd64, (int64_t) getpid(),
+ "vg_name = %s", vg->name,
+ "vg_lock_type = %s", vg->lock_type,
+ "vg_lock_args = %s", vg->lock_args ?: "none",
+ "vg_uuid = %s", uuid[0] ? uuid : "none",
+ "version = " FMTd64, (int64_t) vg->seqno,
+ "host_id = " FMTd64, (int64_t) host_id,
+ "opts = %s", start_init ? "start_init" : "none",
+ NULL);
+ _lockd_free_pv_list(&lock_pvs);
+ } else {
+ reply = _lockd_send_with_pvs("start_vg",
+ NULL,
"pid = " FMTd64, (int64_t) getpid(),
"vg_name = %s", vg->name,
"vg_lock_type = %s", vg->lock_type,
@@ -1179,6 +1386,7 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
"host_id = " FMTd64, (int64_t) host_id,
"opts = %s", start_init ? "start_init" : "none",
NULL);
+ }
if (!_lockd_result(reply, &result, &lockd_flags)) {
ret = 0;
@@ -1406,7 +1614,7 @@ int lockd_global_create(struct cmd_context *cmd, const char *def_mode, const cha
req:
if (!_lockd_request(cmd, "lock_gl",
NULL, vg_lock_type, NULL, NULL, NULL, NULL, mode, NULL,
- &result, &lockd_flags)) {
+ NULL, &result, &lockd_flags)) {
/* No result from lvmlockd, it is probably not running. */
log_error("Global lock failed: check that lvmlockd is running.");
return 0;
@@ -1642,7 +1850,7 @@ int lockd_global(struct cmd_context *cmd, const char *def_mode)
if (!_lockd_request(cmd, "lock_gl",
NULL, NULL, NULL, NULL, NULL, NULL, mode, opts,
- &result, &lockd_flags)) {
+ NULL, &result, &lockd_flags)) {
/* No result from lvmlockd, it is probably not running. */
/* We don't care if an unlock fails. */
@@ -1910,7 +2118,7 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
if (!_lockd_request(cmd, "lock_vg",
vg_name, NULL, NULL, NULL, NULL, NULL, mode, NULL,
- &result, &lockd_flags)) {
+ NULL, &result, &lockd_flags)) {
/*
* No result from lvmlockd, it is probably not running.
* Decide if it is ok to continue without a lock in
@@ -2170,6 +2378,7 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
uint32_t lockd_flags;
int refreshed = 0;
int result;
+ struct lvmlockd_pvs lock_pvs;
/*
* Verify that when --readonly is used, no LVs should be activated or used.
@@ -2235,13 +2444,28 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
retry:
log_debug("lockd LV %s/%s mode %s uuid %s", vg->name, lv_name, mode, lv_uuid);
- if (!_lockd_request(cmd, "lock_lv",
- vg->name, vg->lock_type, vg->lock_args,
- lv_name, lv_uuid, lock_args, mode, opts,
- &result, &lockd_flags)) {
- /* No result from lvmlockd, it is probably not running. */
- log_error("Locking failed for LV %s/%s", vg->name, lv_name);
- return 0;
+ /* Pass PV list for IDM lock type */
+ if (!strcmp(vg->lock_type, "idm")) {
+ _lockd_retrive_lv_pv_list(vg, lv_name, &lock_pvs);
+ if (!_lockd_request(cmd, "lock_lv",
+ vg->name, vg->lock_type, vg->lock_args,
+ lv_name, lv_uuid, lock_args, mode, opts,
+ &lock_pvs, &result, &lockd_flags)) {
+ _lockd_free_pv_list(&lock_pvs);
+ /* No result from lvmlockd, it is probably not running. */
+ log_error("Locking failed for LV %s/%s", vg->name, lv_name);
+ return 0;
+ }
+ _lockd_free_pv_list(&lock_pvs);
+ } else {
+ if (!_lockd_request(cmd, "lock_lv",
+ vg->name, vg->lock_type, vg->lock_args,
+ lv_name, lv_uuid, lock_args, mode, opts,
+ NULL, &result, &lockd_flags)) {
+ /* No result from lvmlockd, it is probably not running. */
+ log_error("Locking failed for LV %s/%s", vg->name, lv_name);
+ return 0;
+ }
}
/* The lv was not active/locked. */
--
1.8.3.1

View File

@ -0,0 +1,80 @@
From 8b904dc71143f4dc7553026f783aa1c0b1d4b954 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Fri, 7 May 2021 10:25:16 +0800
Subject: [PATCH 05/33] tools: Add support for "idm" lock type
This patch is to update the comment and code to support "idm" lock type
which is used for LVM toolkit.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/lvconvert.c | 2 ++
tools/toollib.c | 11 ++++++-----
2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index 8dd8a15c4054..71f7a7627fa1 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -3416,6 +3416,8 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
pool_lv->lock_args = "pending";
else if (!strcmp(vg->lock_type, "dlm"))
pool_lv->lock_args = "dlm";
+ else if (!strcmp(vg->lock_type, "idm"))
+ pool_lv->lock_args = "idm";
/* The lock_args will be set in vg_write(). */
}
}
diff --git a/tools/toollib.c b/tools/toollib.c
index 07f065322d94..f337f9fcf9d5 100644
--- a/tools/toollib.c
+++ b/tools/toollib.c
@@ -591,15 +591,15 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
* new VG, and is it compatible with current lvm.conf settings.
*
* The end result is to set vp_new->lock_type to:
- * none | clvm | dlm | sanlock.
+ * none | clvm | dlm | sanlock | idm.
*
* If 'vgcreate --lock-type <arg>' is set, the answer is given
- * directly by <arg> which is one of none|clvm|dlm|sanlock.
+ * directly by <arg> which is one of none|clvm|dlm|sanlock|idm.
*
* 'vgcreate --clustered y' is the way to create clvm VGs.
*
* 'vgcreate --shared' is the way to create lockd VGs.
- * lock_type of sanlock or dlm is selected based on
+ * lock_type of sanlock, dlm or idm is selected based on
* which lock manager is running.
*
*
@@ -646,7 +646,7 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
* - lvmlockd is used
* - VGs with CLUSTERED set are ignored (requires clvmd)
* - VGs with lockd type can be used
- * - vgcreate can create new VGs with lock_type sanlock or dlm
+ * - vgcreate can create new VGs with lock_type sanlock, dlm or idm
* - 'vgcreate --clustered y' fails
* - 'vgcreate --shared' works
* - 'vgcreate' (neither option) creates a local VG
@@ -658,7 +658,7 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
lock_type = arg_str_value(cmd, locktype_ARG, "");
if (arg_is_set(cmd, shared_ARG) && !is_lockd_type(lock_type)) {
- log_error("The --shared option requires lock type sanlock or dlm.");
+ log_error("The --shared option requires lock type sanlock, dlm or idm.");
return 0;
}
@@ -697,6 +697,7 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
case LOCK_TYPE_SANLOCK:
case LOCK_TYPE_DLM:
+ case LOCK_TYPE_IDM:
if (!use_lvmlockd) {
log_error("Using a shared lock type requires lvmlockd.");
return 0;
--
1.8.3.1

View File

@ -0,0 +1,34 @@
From 102294f9788f243509f1001a60924d6920fd9092 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Fri, 21 May 2021 10:56:37 +0800
Subject: [PATCH 06/33] configure: Add macro LOCKDIDM_SUPPORT
The macro LOCKDIDM_SUPPORT is missed in configure.h.in file, thus when
execute "configure" command, it has no chance to add this macro in the
automatic generated header include/configure.h.
This patch adds macro LOCKDIDM_SUPPORT into configure.h.in.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
include/configure.h.in | 3 +++
1 file changed, 3 insertions(+)
diff --git a/include/configure.h.in b/include/configure.h.in
index 671d201b2a7e..bcb282660694 100644
--- a/include/configure.h.in
+++ b/include/configure.h.in
@@ -561,6 +561,9 @@
/* Define to 1 to include code that uses lvmlockd sanlock option. */
#undef LOCKDSANLOCK_SUPPORT
+/* Define to 1 to include code that uses lvmlockd IDM option. */
+#undef LOCKDIDM_SUPPORT
+
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
slash. */
#undef LSTAT_FOLLOWS_SLASHED_SYMLINK
--
1.8.3.1

View File

@ -0,0 +1,38 @@
From a65f8e0a62b9ab3c2fc909a63abfa0e933619a8c Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Wed, 19 May 2021 14:36:40 -0500
Subject: [PATCH 07/33] enable command syntax for thin and writecache
converting an LV with a writecache to thin pool data in
addition to previous attaching writecache to thin pool data
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/command-lines.in | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/command-lines.in b/tools/command-lines.in
index 1107c1e026e1..67c37ffd033b 100644
--- a/tools/command-lines.in
+++ b/tools/command-lines.in
@@ -534,7 +534,7 @@ RULE: all and lv_is_visible
---
-lvconvert --type thin-pool LV_linear_striped_raid_cache_error_zero
+lvconvert --type thin-pool LV_linear_striped_raid_cache_writecache_error_zero
OO: --stripes_long Number, --stripesize SizeKB,
OO_LVCONVERT_THINPOOL, OO_LVCONVERT_POOL, OO_LVCONVERT
OP: PV ...
@@ -566,7 +566,7 @@ RULE: --poolmetadata not --readahead --stripesize --stripes_long
# This command syntax is deprecated, and the primary forms
# of creating a pool or swapping metadata should be used.
-lvconvert --thinpool LV_linear_striped_raid_cache_thinpool
+lvconvert --thinpool LV_linear_striped_raid_cache_writecache_thinpool
OO: --stripes_long Number, --stripesize SizeKB,
OO_LVCONVERT_THINPOOL, OO_LVCONVERT_POOL, OO_LVCONVERT
OP: PV ...
--
1.8.3.1

View File

@ -0,0 +1,161 @@
From 4a746f7ffcc8e61c9cb5ce9f9e8a061d1ef6b28e Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Mon, 24 May 2021 16:08:27 -0500
Subject: [PATCH 08/33] lvremove: fix removing thin pool with writecache on
data
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/metadata/lv_manip.c | 19 +++++++++
lib/metadata/metadata-exported.h | 2 +
lib/metadata/thin_manip.c | 12 ++++++
test/shell/lvremove-thindata-caches.sh | 71 ++++++++++++++++++++++++++++++++++
4 files changed, 104 insertions(+)
create mode 100644 test/shell/lvremove-thindata-caches.sh
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 508f78c132d0..37dd3611dde7 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -6692,6 +6692,25 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return_0;
}
+ /* if thin pool data lv is writecache, then detach and remove the writecache */
+ if (lv_is_thin_pool(lv)) {
+ struct logical_volume *data_lv = data_lv_from_thin_pool(lv);
+
+ if (data_lv && lv_is_writecache(data_lv)) {
+ struct logical_volume *cachevol_lv = first_seg(data_lv)->writecache;
+
+ if (!lv_detach_writecache_cachevol(data_lv, 1)) {
+ log_error("Failed to detach writecache from %s", display_lvname(data_lv));
+ return 0;
+ }
+
+ if (!lv_remove_single(cmd, cachevol_lv, force, 1)) {
+ log_error("Failed to remove cachevol %s.", display_lvname(cachevol_lv));
+ return 0;
+ }
+ }
+ }
+
if (lv_is_writecache(lv)) {
struct logical_volume *cachevol_lv = first_seg(lv)->writecache;
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 52062a1002da..9ac3c677ed82 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -928,6 +928,8 @@ int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents,
int vg_set_pool_metadata_spare(struct logical_volume *lv);
int vg_remove_pool_metadata_spare(struct volume_group *vg);
+struct logical_volume *data_lv_from_thin_pool(struct logical_volume *pool_lv);
+
int attach_thin_external_origin(struct lv_segment *seg,
struct logical_volume *external_lv);
int detach_thin_external_origin(struct lv_segment *seg);
diff --git a/lib/metadata/thin_manip.c b/lib/metadata/thin_manip.c
index 451c382600f7..6ce88bd3d6ee 100644
--- a/lib/metadata/thin_manip.c
+++ b/lib/metadata/thin_manip.c
@@ -21,6 +21,18 @@
#include "lib/config/defaults.h"
#include "lib/display/display.h"
+struct logical_volume *data_lv_from_thin_pool(struct logical_volume *pool_lv)
+{
+ struct lv_segment *seg_thinpool = first_seg(pool_lv);
+
+ if (!seg_thinpool || !seg_is_thin_pool(seg_thinpool)) {
+ log_error(INTERNAL_ERROR "data_lv_from_thin_pool arg not thin pool %s", pool_lv->name);
+ return NULL;
+ }
+
+ return seg_thinpool->areas[0].u.lv.lv;
+}
+
/* TODO: drop unused no_update */
int attach_pool_message(struct lv_segment *pool_seg, dm_thin_message_t type,
struct logical_volume *lv, uint32_t delete_id,
diff --git a/test/shell/lvremove-thindata-caches.sh b/test/shell/lvremove-thindata-caches.sh
new file mode 100644
index 000000000000..ba099c373b32
--- /dev/null
+++ b/test/shell/lvremove-thindata-caches.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2017-2020 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux have_cache 1 10 0 || skip
+aux have_writecache 1 0 0 || skip
+which mkfs.xfs || skip
+
+aux prepare_devs 6 70 # want 64M of usable space from each dev
+
+vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
+
+# lv1 is thinpool LV: 128M
+# lv2 is fast LV: 64M
+# lv3 is thin LV: 1G
+
+#
+# Test lvremove of a thinpool that uses cache|writecache on data
+#
+
+# attach writecache to thinpool data
+lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
+lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
+lvcreate -n $lv2 -L64M -an $vg "$dev3"
+lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
+lvchange -ay $vg/$lv1
+lvs -a $vg
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
+lvremove -y $vg/$lv1
+
+# attach cache/writeback (cachevol) to thinpool data
+lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
+lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
+lvcreate -n $lv2 -L64M -an $vg "$dev3"
+lvconvert -y --type cache --cachevol $lv2 --cachemode writeback $vg/$lv1
+lvchange -ay $vg/$lv1
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
+lvremove -y $vg/$lv1
+
+# attach cache/writethrough (cachevol) to thinpool data
+lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
+lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
+lvcreate -n $lv2 -L64M -an $vg "$dev3"
+lvconvert -y --type cache --cachevol $lv2 --cachemode writethrough $vg/$lv1
+lvchange -ay $vg/$lv1
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
+lvremove -y $vg/$lv1
+
+# attach cache (cachepool) to thinpool data
+lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
+lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
+lvcreate -y --type cache-pool -n $lv2 -L64M --poolmetadataspare n $vg "$dev3" "$dev6"
+lvconvert -y --type cache --cachepool $lv2 --poolmetadataspare n $vg/$lv1
+lvchange -ay $vg/$lv1
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
+lvremove -y $vg/$lv1
+
+vgremove -f $vg
+
--
1.8.3.1

View File

@ -0,0 +1,37 @@
From b725b5ea6ecfeef428fd7ffcd6855a38378d761b Mon Sep 17 00:00:00 2001
From: Zdenek Kabelac <zkabelac@redhat.com>
Date: Wed, 26 May 2021 00:19:28 +0200
Subject: [PATCH 09/33] vdo: fix preload of kvdo
Commit 5bf1dba9eb8a8b77410e386e59dadeb27801b14e broke load of kvdo
kernel module - correct it by loading kvdo instead of trying dm-vdo.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/activate/activate.c | 10 +++-------
1 files changed, 3 insertions(+), 7 deletions(-)
diff --git a/lib/activate/activate.c b/lib/activate/activate.c
index 71db98191506..6bda7385ba5c 100644
--- a/lib/activate/activate.c
+++ b/lib/activate/activate.c
@@ -574,13 +574,9 @@ int module_present(struct cmd_context *cmd, const char *target_name)
}
#ifdef MODPROBE_CMD
- if (strcmp(target_name, MODULE_NAME_VDO) == 0) {
- argv[1] = target_name; /* ATM kvdo is without dm- prefix */
- if ((ret = exec_cmd(cmd, argv, NULL, 0)))
- return ret;
- }
-
- if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
+ if (strcmp(target_name, TARGET_NAME_VDO) == 0)
+ argv[1] = MODULE_NAME_VDO; /* ATM kvdo is without dm- prefix */
+ else if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
log_error("module_present module name too long: %s",
target_name);
return 0;
--
1.8.3.1

View File

@ -0,0 +1,29 @@
From 247f69f9aafe731ef85268da1e6ce817295b265d Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Wed, 2 Jun 2021 10:51:12 -0500
Subject: [PATCH 10/33] writecache: fix lv_on_pmem
dev_is_pmem on pv->dev requires a pv segment or it could segfault.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/metadata/metadata.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 110cbaed4e62..0b284435b41b 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -4412,6 +4412,9 @@ int lv_on_pmem(struct logical_volume *lv)
dm_list_iterate_items(seg, &lv->segments) {
for (s = 0; s < seg->area_count; s++) {
+ if (seg_type(seg, s) != AREA_PV)
+ continue;
+
pv = seg_pv(seg, s);
if (dev_is_pmem(lv->vg->cmd->dev_types, pv->dev)) {
--
1.8.3.1

View File

@ -0,0 +1,36 @@
From e7f107c24666c8577f30e530b74f1ce0347e459b Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Wed, 2 Jun 2021 11:12:20 -0500
Subject: [PATCH 11/33] writecache: don't pvmove device used by writecache
The existing check didn't cover the unusual case where the
cachevol exists on the same device as the origin LV.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/pvmove.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/tools/pvmove.c b/tools/pvmove.c
index da635a662d4c..bb372f7dcaeb 100644
--- a/tools/pvmove.c
+++ b/tools/pvmove.c
@@ -387,6 +387,15 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
return NULL;
}
+ if (lv_is_writecache(lv)) {
+ struct logical_volume *lv_cachevol = first_seg(lv)->writecache;
+ if (lv_is_on_pvs(lv_cachevol, source_pvl)) {
+ log_error("Unable to move device used for writecache cachevol %s.", display_lvname(lv_cachevol));
+ return NULL;
+ }
+
+ }
+
if (lv_is_raid(lv) && lv_raid_has_integrity(lv)) {
log_error("Unable to pvmove device used for raid with integrity.");
return NULL;
--
1.8.3.1

View File

@ -0,0 +1,60 @@
From 2bce6faed017df8da3e659eff3f42f39d25c7f09 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Wed, 2 Jun 2021 16:29:54 -0500
Subject: [PATCH 12/33] pvchange: fix file locking deadlock
Calling clear_hint_file() to invalidate hints would acquire
the hints flock before the global flock which could cause deadlock.
The lock order requires the global lock to be taken first.
pvchange was always invalidating hints, which was unnecessary;
only invalidate hints when changing a PV uuid. Because of the
lock ordering, take the global lock before clear_hint_file which
locks the hints file.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/pvchange.c | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
diff --git a/tools/pvchange.c b/tools/pvchange.c
index d6e35d66f9cc..04cbb428dde1 100644
--- a/tools/pvchange.c
+++ b/tools/pvchange.c
@@ -248,7 +248,32 @@ int pvchange(struct cmd_context *cmd, int argc, char **argv)
set_pv_notify(cmd);
- clear_hint_file(cmd);
+ /*
+ * Changing a PV uuid is the only pvchange that invalidates hints.
+ * Invalidating hints (clear_hint_file) is called at the start of
+ * the command and takes the hints lock.
+ * The global lock must always be taken first, then the hints lock
+ * (the required lock ordering.)
+ *
+ * Because of these constraints, the global lock is taken ex here
+ * for any PV uuid change, even though the global lock is technically
+ * required only for changing an orphan PV (we don't know until later
+ * if the PV is an orphan). The VG lock is used when changing
+ * non-orphan PVs.
+ *
+ * For changes other than uuid on an orphan PV, the global lock is
+ * taken sh by process_each, then converted to ex in pvchange_single,
+ * which works because the hints lock is not held.
+ *
+ * (Eventually, perhaps always do lock_global(ex) here to simplify.)
+ */
+ if (arg_is_set(cmd, uuid_ARG)) {
+ if (!lock_global(cmd, "ex")) {
+ ret = ECMD_FAILED;
+ goto out;
+ }
+ clear_hint_file(cmd);
+ }
ret = process_each_pv(cmd, argc, argv, NULL, 0, READ_FOR_UPDATE, handle, _pvchange_single);
--
1.8.3.1

View File

@ -0,0 +1,222 @@
From c64dbc7ee80963a02f82f3257963f90b471fa90e Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:09 +0800
Subject: [PATCH 13/33] tests: Enable the testing for IDM locking scheme
This patch is to introduce testing option LVM_TEST_LOCK_TYPE_IDM, with
specifying this option, the Seagate IDM lock manager will be launched as
backend for testing. Also add the prepare and remove shell scripts for
IDM.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/Makefile.in | 9 +++++++++
test/lib/aux.sh | 25 +++++++++++++++++++++++++
test/lib/flavour-udev-lvmlockd-idm.sh | 5 +++++
test/lib/inittest.sh | 3 ++-
test/shell/aa-lvmlockd-idm-prepare.sh | 20 ++++++++++++++++++++
test/shell/lvmlockd-lv-types.sh | 6 ++++++
test/shell/zz-lvmlockd-idm-remove.sh | 29 +++++++++++++++++++++++++++++
7 files changed, 96 insertions(+), 1 deletion(-)
create mode 100644 test/lib/flavour-udev-lvmlockd-idm.sh
create mode 100644 test/shell/aa-lvmlockd-idm-prepare.sh
create mode 100644 test/shell/zz-lvmlockd-idm-remove.sh
diff --git a/test/Makefile.in b/test/Makefile.in
index e4cd3aac5116..662974be6ccb 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -85,6 +85,7 @@ help:
@echo " check_all_lvmpolld Run all tests with lvmpolld daemon."
@echo " check_lvmlockd_sanlock Run tests with lvmlockd and sanlock."
@echo " check_lvmlockd_dlm Run tests with lvmlockd and dlm."
+ @echo " check_lvmlockd_idm Run tests with lvmlockd and idm."
@echo " check_lvmlockd_test Run tests with lvmlockd --test."
@echo " run-unit-test Run only unit tests (root not needed)."
@echo " clean Clean dir."
@@ -169,6 +170,13 @@ check_lvmlockd_dlm: .tests-stamp
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
+check_lvmlockd_idm: .tests-stamp
+ VERBOSE=$(VERBOSE) ./lib/runner \
+ --testdir . --outdir $(LVM_TEST_RESULTS) \
+ --flavours udev-lvmlockd-idm --only shell/aa-lvmlockd-idm-prepare.sh,$(T),shell/zz-lvmlockd-idm-remove.sh --skip $(S)
+endif
+
+ifeq ("@BUILD_LVMLOCKD@", "yes")
check_lvmlockd_test: .tests-stamp
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir $(LVM_TEST_RESULTS) \
@@ -189,6 +197,7 @@ LIB_FLAVOURS = \
flavour-udev-lvmpolld\
flavour-udev-lvmlockd-sanlock\
flavour-udev-lvmlockd-dlm\
+ flavour-udev-lvmlockd-idm\
flavour-udev-lvmlockd-test\
flavour-udev-vanilla
diff --git a/test/lib/aux.sh b/test/lib/aux.sh
index 1a1f11a1d48f..97c7ac68b77b 100644
--- a/test/lib/aux.sh
+++ b/test/lib/aux.sh
@@ -119,6 +119,20 @@ prepare_sanlock() {
fi
}
+prepare_idm() {
+ if pgrep seagate_ilm; then
+ echo "Cannot run while existing seagate_ilm process exists"
+ exit 1
+ fi
+
+ seagate_ilm -D 0 -l 0 -L 7 -E 7 -S 7
+
+ if ! pgrep seagate_ilm; then
+ echo "Failed to start seagate_ilm"
+ exit 1
+ fi
+}
+
prepare_lvmlockd() {
if pgrep lvmlockd ; then
echo "Cannot run while existing lvmlockd process exists"
@@ -135,6 +149,11 @@ prepare_lvmlockd() {
echo "starting lvmlockd for dlm"
lvmlockd
+ elif test -n "$LVM_TEST_LOCK_TYPE_IDM"; then
+ # make check_lvmlockd_idm
+ echo "starting lvmlockd for idm"
+ lvmlockd -g idm
+
elif test -n "$LVM_TEST_LVMLOCKD_TEST_DLM"; then
# make check_lvmlockd_test
echo "starting lvmlockd --test (dlm)"
@@ -144,6 +163,12 @@ prepare_lvmlockd() {
# FIXME: add option for this combination of --test and sanlock
echo "starting lvmlockd --test (sanlock)"
lvmlockd --test -g sanlock -o 2
+
+ elif test -n "$LVM_TEST_LVMLOCKD_TEST_IDM"; then
+ # make check_lvmlockd_test
+ echo "starting lvmlockd --test (idm)"
+ lvmlockd --test -g idm
+
else
echo "not starting lvmlockd"
exit 0
diff --git a/test/lib/flavour-udev-lvmlockd-idm.sh b/test/lib/flavour-udev-lvmlockd-idm.sh
new file mode 100644
index 000000000000..e9f8908dfb49
--- /dev/null
+++ b/test/lib/flavour-udev-lvmlockd-idm.sh
@@ -0,0 +1,5 @@
+export LVM_TEST_LOCKING=1
+export LVM_TEST_LVMPOLLD=1
+export LVM_TEST_LVMLOCKD=1
+export LVM_TEST_LOCK_TYPE_IDM=1
+export LVM_TEST_DEVDIR=/dev
diff --git a/test/lib/inittest.sh b/test/lib/inittest.sh
index 0fd6517103a5..6b4bcb348010 100644
--- a/test/lib/inittest.sh
+++ b/test/lib/inittest.sh
@@ -40,6 +40,7 @@ LVM_TEST_LVMPOLLD=${LVM_TEST_LVMPOLLD-}
LVM_TEST_DEVICES_FILE=${LVM_TEST_DEVICES_FILE-}
LVM_TEST_LOCK_TYPE_DLM=${LVM_TEST_LOCK_TYPE_DLM-}
LVM_TEST_LOCK_TYPE_SANLOCK=${LVM_TEST_LOCK_TYPE_SANLOCK-}
+LVM_TEST_LOCK_TYPE_IDM=${LVM_TEST_LOCK_TYPE_IDM-}
SKIP_WITHOUT_CLVMD=${SKIP_WITHOUT_CLVMD-}
SKIP_WITH_CLVMD=${SKIP_WITH_CLVMD-}
@@ -64,7 +65,7 @@ unset CDPATH
export LVM_TEST_BACKING_DEVICE LVM_TEST_DEVDIR LVM_TEST_NODEBUG
export LVM_TEST_LVMLOCKD LVM_TEST_LVMLOCKD_TEST
-export LVM_TEST_LVMPOLLD LVM_TEST_LOCK_TYPE_DLM LVM_TEST_LOCK_TYPE_SANLOCK
+export LVM_TEST_LVMPOLLD LVM_TEST_LOCK_TYPE_DLM LVM_TEST_LOCK_TYPE_SANLOCK LVM_TEST_LOCK_TYPE_IDM
export LVM_TEST_DEVICES_FILE
# grab some common utilities
. lib/utils
diff --git a/test/shell/aa-lvmlockd-idm-prepare.sh b/test/shell/aa-lvmlockd-idm-prepare.sh
new file mode 100644
index 000000000000..8faff3bc2c66
--- /dev/null
+++ b/test/shell/aa-lvmlockd-idm-prepare.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+test_description='Set up things to run tests with idm'
+
+. lib/inittest
+
+[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
+
+aux prepare_idm
+aux prepare_lvmlockd
diff --git a/test/shell/lvmlockd-lv-types.sh b/test/shell/lvmlockd-lv-types.sh
index 6138e5623d77..ee350b1c68a3 100644
--- a/test/shell/lvmlockd-lv-types.sh
+++ b/test/shell/lvmlockd-lv-types.sh
@@ -36,6 +36,12 @@ LOCKARGS2="dlm"
LOCKARGS3="dlm"
fi
+if test -n "$LVM_TEST_LOCK_TYPE_IDM" ; then
+LOCKARGS1="idm"
+LOCKARGS2="idm"
+LOCKARGS3="idm"
+fi
+
aux prepare_devs 5
vgcreate --shared $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
diff --git a/test/shell/zz-lvmlockd-idm-remove.sh b/test/shell/zz-lvmlockd-idm-remove.sh
new file mode 100644
index 000000000000..25943a579d7e
--- /dev/null
+++ b/test/shell/zz-lvmlockd-idm-remove.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+test_description='Remove the idm test setup'
+
+. lib/inittest
+
+[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
+
+# FIXME: collect debug logs (only if a test failed?)
+# lvmlockctl -d > lvmlockd-debug.txt
+# dlm_tool dump > dlm-debug.txt
+
+lvmlockctl --stop-lockspaces
+sleep 1
+killall lvmlockd
+sleep 1
+killall lvmlockd || true
+sleep 1
+killall seagate_ilm
--
1.8.3.1

View File

@ -0,0 +1,87 @@
From 759b0392d5f0f15e7e503ae1b3ef82ea7b4df0c1 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:10 +0800
Subject: [PATCH 14/33] tests: Support multiple backing devices
In current implementation, the option "LVM_TEST_BACKING_DEVICE" only
supports to specify one backing device; this patch is to extend the
option to support multiple backing devices by using comma as separator,
e.g. below command specifies two backing devices:
make check_lvmlockd_idm LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3
This can allow the testing works on multiple drives and verify the
locking scheme if can work as expected for multiple drives case. For
example, for Seagate IDM locking scheme, if a VG uses two PVs, every PV
is resident on a drive, thus the locking operations will be sent to two
drives respectively; so the extension for "LVM_TEST_BACKING_DEVICE" can
help to verify different drive configurations for locking.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/lib/aux.sh | 31 ++++++++++++++++++++++++++++---
1 file changed, 28 insertions(+), 3 deletions(-)
diff --git a/test/lib/aux.sh b/test/lib/aux.sh
index 97c7ac68b77b..a592dad813b7 100644
--- a/test/lib/aux.sh
+++ b/test/lib/aux.sh
@@ -901,11 +901,22 @@ prepare_backing_dev() {
local size=${1=32}
shift
+ if test -n "$LVM_TEST_BACKING_DEVICE"; then
+ IFS=',' read -r -a BACKING_DEVICE_ARRAY <<< "$LVM_TEST_BACKING_DEVICE"
+
+ for d in "${BACKING_DEVICE_ARRAY[@]}"; do
+ if test ! -b "$d"; then
+ echo "Device $d doesn't exist!"
+ return 1
+ fi
+ done
+ fi
+
if test -f BACKING_DEV; then
BACKING_DEV=$(< BACKING_DEV)
return 0
- elif test -b "$LVM_TEST_BACKING_DEVICE"; then
- BACKING_DEV=$LVM_TEST_BACKING_DEVICE
+ elif test -n "$LVM_TEST_BACKING_DEVICE"; then
+ BACKING_DEV=${BACKING_DEVICE_ARRAY[0]}
echo "$BACKING_DEV" > BACKING_DEV
return 0
elif test "${LVM_TEST_PREFER_BRD-1}" = "1" && \
@@ -953,7 +964,14 @@ prepare_devs() {
local dev="$DM_DEV_DIR/mapper/$name"
DEVICES[$count]=$dev
count=$(( count + 1 ))
- echo 0 $size linear "$BACKING_DEV" $(( ( i - 1 ) * size + ( header_shift * 2048 ) )) > "$name.table"
+ # If the backing device number can meet the requirement for PV devices,
+ # then allocate a dedicated backing device for PV; otherwise, rollback
+ # to use single backing device for device-mapper.
+ if [ -n "$LVM_TEST_BACKING_DEVICE" ] && [ $n -le ${#BACKING_DEVICE_ARRAY[@]} ]; then
+ echo 0 $size linear "${BACKING_DEVICE_ARRAY[$(( count - 1 ))]}" $(( header_shift * 2048 )) > "$name.table"
+ else
+ echo 0 $size linear "$BACKING_DEV" $(( ( i - 1 ) * size + ( header_shift * 2048 ) )) > "$name.table"
+ fi
dmsetup create -u "TEST-$name" "$name" "$name.table" || touch CREATE_FAILED &
test -f CREATE_FAILED && break;
done
@@ -971,6 +989,13 @@ prepare_devs() {
return $?
fi
+ for d in "${BACKING_DEVICE_ARRAY[@]}"; do
+ cnt=$((`blockdev --getsize64 $d` / 1024 / 1024))
+ cnt=$(( cnt < 1000 ? cnt : 1000 ))
+ dd if=/dev/zero of="$d" bs=1MB count=$cnt
+ wipefs -a "$d" 2>/dev/null || true
+ done
+
# non-ephemeral devices need to be cleared between tests
test -f LOOP -o -f RAMDISK || for d in "${DEVICES[@]}"; do
# ensure disk header is always zeroed
--
1.8.3.1

View File

@ -0,0 +1,65 @@
From 2097c27c05de30003850eb667b9cf21b3181fddf Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:11 +0800
Subject: [PATCH 15/33] tests: Cleanup idm context when prepare devices
For testing idm locking scheme, it's good to cleanup the idm context
before run the test cases. This can give a clean environment for the
testing.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/lib/aux.sh | 29 +++++++++++++++++++++++------
1 file changed, 23 insertions(+), 6 deletions(-)
diff --git a/test/lib/aux.sh b/test/lib/aux.sh
index a592dad813b7..bb189f466cef 100644
--- a/test/lib/aux.sh
+++ b/test/lib/aux.sh
@@ -897,6 +897,20 @@ wipefs_a() {
udev_wait
}
+cleanup_idm_context() {
+ local dev=$1
+
+ if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
+ sg_dev=`sg_map26 ${dev}`
+ echo "Cleanup IDM context for drive ${dev} ($sg_dev)"
+ sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
+ 88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
+ sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
+ 8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
+ rm /tmp/idm_tmp_data.bin
+ fi
+}
+
prepare_backing_dev() {
local size=${1=32}
shift
@@ -989,12 +1003,15 @@ prepare_devs() {
return $?
fi
- for d in "${BACKING_DEVICE_ARRAY[@]}"; do
- cnt=$((`blockdev --getsize64 $d` / 1024 / 1024))
- cnt=$(( cnt < 1000 ? cnt : 1000 ))
- dd if=/dev/zero of="$d" bs=1MB count=$cnt
- wipefs -a "$d" 2>/dev/null || true
- done
+ if [ -n "$LVM_TEST_BACKING_DEVICE" ]; then
+ for d in "${BACKING_DEVICE_ARRAY[@]}"; do
+ cnt=$((`blockdev --getsize64 $d` / 1024 / 1024))
+ cnt=$(( cnt < 1000 ? cnt : 1000 ))
+ dd if=/dev/zero of="$d" bs=1MB count=$cnt
+ wipefs -a "$d" 2>/dev/null || true
+ cleanup_idm_context "$d"
+ done
+ fi
# non-ephemeral devices need to be cleared between tests
test -f LOOP -o -f RAMDISK || for d in "${DEVICES[@]}"; do
--
1.8.3.1

View File

@ -0,0 +1,33 @@
From 5b361b197e1a3fbbc5419b9c46f19539c2b7305c Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:12 +0800
Subject: [PATCH 16/33] tests: Add checking for lvmlockd log
Add checking for lvmlockd log, this can be used for the test cases which
are interested in the interaction with lvmlockd.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/lib/check.sh | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/test/lib/check.sh b/test/lib/check.sh
index 8493bde83e1d..1f261940af3b 100644
--- a/test/lib/check.sh
+++ b/test/lib/check.sh
@@ -456,6 +456,11 @@ grep_dmsetup() {
grep -q "${@:3}" out || die "Expected output \"" "${@:3}" "\" from dmsetup $1 not found!"
}
+grep_lvmlockd_dump() {
+ lvmlockctl --dump | tee out
+ grep -q "${@:1}" out || die "Expected output \"" "${@:1}" "\" from lvmlockctl --dump not found!"
+}
+
#set -x
unset LVM_VALGRIND
"$@"
--
1.8.3.1

View File

@ -0,0 +1,83 @@
From fe660467fa4c943bb0ce928e7af65572e2ddeddc Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:13 +0800
Subject: [PATCH 17/33] tests: stress: Add single thread stress testing
This patch is to add the stress testing, which loops to create LV,
activate and deactivate LV in the single thread.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/stress_single_thread.sh | 59 ++++++++++++++++++++++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100644 test/shell/stress_single_thread.sh
diff --git a/test/shell/stress_single_thread.sh b/test/shell/stress_single_thread.sh
new file mode 100644
index 000000000000..e18d4900b158
--- /dev/null
+++ b/test/shell/stress_single_thread.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux prepare_vg 3
+
+for i in {1..1000}
+do
+ # Create new logic volume and deactivate it
+ lvcreate -a n --zero n -l 1 -n foo $vg
+
+ # Set minor number
+ lvchange $vg/foo -My --major=255 --minor=123
+
+ # Activate logic volume
+ lvchange $vg/foo -a y
+
+ # Check device mapper
+ dmsetup info $vg-foo | tee info
+ grep -E "^Major, minor: *[0-9]+, 123" info
+
+ # Extend logic volume with 10%
+ lvextend -l+10 $vg/foo
+
+ # Deactivate logic volume
+ lvchange $vg/foo -a n
+
+ # Deactivate volume group
+ vgchange $vg -a n
+
+ # Activate volume group with shareable mode
+ vgchange $vg -a sy
+
+ # lvextend fails due to mismatched lock mode
+ not lvextend -l+10 $vg/foo
+
+ # Promote volume group to exclusive mode
+ vgchange $vg -a ey
+
+ lvreduce -f -l-4 $vg/foo
+
+ lvchange -an $vg/foo
+ lvremove $vg/foo
+done
+
+vgremove -ff $vg
--
1.8.3.1

View File

@ -0,0 +1,137 @@
From 692fe7bb31b6682151601f196e6274a3e8b772d5 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:14 +0800
Subject: [PATCH 18/33] tests: stress: Add multi-threads stress testing for
VG/LV
This patch is to add the stress testing, which launches two threads,
each thread creates LV, activate and deactivate LV in the loop; so this
can test for multi-threading in lvmlockd and its backend lock manager.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/stress_multi_threads_1.sh | 111 +++++++++++++++++++++++++++++++++++
1 file changed, 111 insertions(+)
create mode 100644 test/shell/stress_multi_threads_1.sh
diff --git a/test/shell/stress_multi_threads_1.sh b/test/shell/stress_multi_threads_1.sh
new file mode 100644
index 000000000000..c96fa244b1ba
--- /dev/null
+++ b/test/shell/stress_multi_threads_1.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux prepare_devs 6
+get_devs
+
+pvcreate -M2 "${DEVICES[@]}"
+
+vgcreate --shared -M2 "$vg1" "$dev1" "$dev2" "$dev3"
+vgcreate --shared -M2 "$vg2" "$dev4" "$dev5" "$dev6"
+
+test_vg_thread1()
+{
+ for i in {1..1000}
+ do
+ # Create new logic volume and deactivate it
+ lvcreate -a n --zero n -l 1 -n foo $vg1
+
+ # Set minor number
+ lvchange $vg1/foo -My --major=255 --minor=123
+
+ # Activate logic volume
+ lvchange $vg1/foo -a y
+
+ # Extend logic volume with 10%
+ lvextend -l+10 $vg1/foo
+
+ # Deactivate logic volume
+ lvchange $vg1/foo -a n
+
+ # Deactivate volume group
+ vgchange $vg1 -a n
+
+ # Activate volume group with shareable mode
+ vgchange $vg1 -a sy
+
+ # lvextend fails due to mismatched lock mode
+ not lvextend -l+10 $vg1/foo
+
+ # Promote volume group to exclusive mode
+ vgchange $vg1 -a ey
+
+ lvreduce -f -l-4 $vg1/foo
+
+ lvchange -an $vg1/foo
+ lvremove $vg1/foo
+ done
+}
+
+test_vg_thread2()
+{
+ for i in {1..1000}
+ do
+ # Create new logic volume and deactivate it
+ lvcreate -a n --zero n -l 1 -n foo $vg2
+
+ # Set minor number
+ lvchange $vg2/foo -My --major=255 --minor=124
+
+ # Activate logic volume
+ lvchange $vg2/foo -a y
+
+ # Extend logic volume with 10%
+ lvextend -l+10 $vg2/foo
+
+ # Deactivate logic volume
+ lvchange $vg2/foo -a n
+
+ # Deactivate volume group
+ vgchange $vg2 -a n
+
+ # Activate volume group with shareable mode
+ vgchange $vg2 -a sy
+
+ # lvextend fails due to mismatched lock mode
+ not lvextend -l+10 $vg2/foo
+
+ # Promote volume group to exclusive mode
+ vgchange $vg2 -a ey
+
+ lvreduce -f -l-4 $vg2/foo
+
+ lvchange -an $vg2/foo
+ lvremove $vg2/foo
+ done
+}
+
+test_vg_thread1 &
+WAITPID=$!
+
+test_vg_thread2 &
+WAITPID="$WAITPID "$!
+
+wait $WAITPID
+
+vgremove -ff $vg1
+vgremove -ff $vg2
--
1.8.3.1

View File

@ -0,0 +1,119 @@
From f83e11ff4332ce7ca24f6bfede4fe60c48123700 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:15 +0800
Subject: [PATCH 19/33] tests: stress: Add multi-threads stress testing for
PV/VG/LV
This patch is to add the stress testing, which launches three threads,
one thread is for creating/removing PV, one thread is for
creating/removing VG, and the last one thread is for LV operations.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/stress_multi_threads_2.sh | 93 ++++++++++++++++++++++++++++++++++++
1 file changed, 93 insertions(+)
create mode 100644 test/shell/stress_multi_threads_2.sh
diff --git a/test/shell/stress_multi_threads_2.sh b/test/shell/stress_multi_threads_2.sh
new file mode 100644
index 000000000000..a035b5727c97
--- /dev/null
+++ b/test/shell/stress_multi_threads_2.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux prepare_devs 8
+get_devs
+
+pvcreate -M2 "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
+
+test_vg_thread1()
+{
+ for i in {1..1000}
+ do
+ vgcreate --shared -M2 "$vg1" "$dev1" "$dev2" "$dev3"
+ vgremove -ff $vg1
+ done
+}
+
+test_vg_thread2()
+{
+ vgcreate --shared -M2 "$vg2" "$dev4" "$dev5" "$dev6"
+
+ for i in {1..1000}
+ do
+ # Create new logic volume and deactivate it
+ lvcreate -a n --zero n -l 1 -n foo $vg2
+
+ # Set minor number
+ lvchange $vg2/foo -My --major=255 --minor=124
+
+ # Activate logic volume
+ lvchange $vg2/foo -a y
+
+ # Extend logic volume with 10%
+ lvextend -l+10 $vg2/foo
+
+ # Deactivate logic volume
+ lvchange $vg2/foo -a n
+
+ # Deactivate volume group
+ vgchange $vg2 -a n
+
+ # Activate volume group with shareable mode
+ vgchange $vg2 -a sy
+
+ # lvextend fails due to mismatched lock mode
+ not lvextend -l+10 $vg2/foo
+
+ # Promote volume group to exclusive mode
+ vgchange $vg2 -a ey
+
+ lvreduce -f -l-4 $vg2/foo
+
+ lvchange -an $vg2/foo
+ lvremove $vg2/foo
+ done
+
+ vgremove -ff $vg2
+}
+
+test_vg_thread3()
+{
+ for i in {1..1000}
+ do
+ pvcreate -M2 "$dev7" "$dev8"
+ pvremove "$dev7"
+ pvremove "$dev8"
+ done
+}
+
+test_vg_thread1 &
+WAITPID=$!
+
+test_vg_thread2 &
+WAITPID="$WAITPID "$!
+
+test_vg_thread3 &
+WAITPID="$WAITPID "$!
+
+wait $WAITPID
--
1.8.3.1

View File

@ -0,0 +1,111 @@
From 8c7b2df41fdddcd1b1c504522ab79300882eb72f Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:16 +0800
Subject: [PATCH 20/33] tests: Support idm failure injection
When the drive failure occurs, the IDM lock manager and lvmlockd should
handle this case properly. E.g. when the IDM lock manager detects the
lease renewal failure caused by I/O errors, it should invoke the kill
path which is predefined by lvmlockd, so that the kill path program
(like lvmlockctl) can send requests to lvmlockd to stop and drop lock
for the relevant VG/LVs.
To verify the failure handling flow, this patch introduces an idm
failure injection program, it can input the "percentage" for drive
failures so that can emulate different failure cases.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/Makefile.in | 5 ++++
test/lib/idm_inject_failure.c | 55 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 60 insertions(+)
create mode 100644 test/lib/idm_inject_failure.c
diff --git a/test/Makefile.in b/test/Makefile.in
index 662974be6ccb..573df77a7ac8 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -171,6 +171,7 @@ endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
check_lvmlockd_idm: .tests-stamp
+ $(INSTALL_PROGRAM) lib/idm_inject_failure $(EXECDIR)
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir $(LVM_TEST_RESULTS) \
--flavours udev-lvmlockd-idm --only shell/aa-lvmlockd-idm-prepare.sh,$(T),shell/zz-lvmlockd-idm-remove.sh --skip $(S)
@@ -269,6 +270,10 @@ lib/securetest: lib/dmsecuretest.o .lib-dir-stamp
@echo " [CC] $@"
$(Q) $(CC) -g $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) -o $@ $< -L$(interfacebuilddir) -ldevmapper $(LIBS)
+lib/idm_inject_failure: lib/idm_inject_failure.o .lib-dir-stamp
+ @echo " [CC] $@"
+ $(Q) $(CC) -g $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) -o $@ $< $(INTERNAL_LIBS) $(LIBS) -lseagate_ilm
+
lib/runner.o: $(wildcard $(srcdir)/lib/*.h)
CFLAGS_runner.o += $(EXTRA_EXEC_CFLAGS)
diff --git a/test/lib/idm_inject_failure.c b/test/lib/idm_inject_failure.c
new file mode 100644
index 000000000000..4998b585af6e
--- /dev/null
+++ b/test/lib/idm_inject_failure.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2020-2021 Seagate Ltd.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU Lesser General Public License v.2.1.
+ */
+
+#include <errno.h>
+#include <limits.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/inotify.h>
+#include <uuid/uuid.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <ilm.h>
+
+int main(int argc, char *argv[])
+{
+ int pecent = atoi(argv[1]);
+ int ret, s;
+
+ ret = ilm_connect(&s);
+ if (ret == 0) {
+ printf("ilm_connect: SUCCESS\n");
+ } else {
+ printf("ilm_connect: FAIL\n");
+ exit(-1);
+ }
+
+ ret = ilm_inject_fault(s, pecent);
+ if (ret == 0) {
+ printf("ilm_inject_fault (100): SUCCESS\n");
+ } else {
+ printf("ilm_inject_fault (100): FAIL\n");
+ exit(-1);
+ }
+
+ ret = ilm_disconnect(s);
+ if (ret == 0) {
+ printf("ilm_disconnect: SUCCESS\n");
+ } else {
+ printf("ilm_disconnect: FAIL\n");
+ exit(-1);
+ }
+
+ return 0;
+}
--
1.8.3.1

View File

@ -0,0 +1,93 @@
From 874001ee6e0e7812e4e8d19994a1fd7de43d3249 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:17 +0800
Subject: [PATCH 21/33] tests: Add testing for lvmlockd failure
After the lvmlockd abnormally exits and relaunch the daemon, if LVM
commands continue to run, lvmlockd and the backend lock manager (e.g.
sanlock lock manager or IDM lock manager) should can continue to serve
the requests from LVM commands.
This patch adds a test to emulate lvmlockd failure, and verify the LVM
commands after lvmlockd recovers back. Below is an example for testing
the case:
# make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdo3,/dev/sdp3,/dev/sdp4 \
LVM_TEST_FAILURE=1 T=lvmlockd_failure.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/lib/inittest.sh | 3 ++-
test/shell/lvmlockd_failure.sh | 37 +++++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+), 1 deletion(-)
create mode 100644 test/shell/lvmlockd_failure.sh
diff --git a/test/lib/inittest.sh b/test/lib/inittest.sh
index 6b4bcb348010..98a916ca6e10 100644
--- a/test/lib/inittest.sh
+++ b/test/lib/inittest.sh
@@ -31,6 +31,7 @@ LVM_TEST_BACKING_DEVICE=${LVM_TEST_BACKING_DEVICE-}
LVM_TEST_DEVDIR=${LVM_TEST_DEVDIR-}
LVM_TEST_NODEBUG=${LVM_TEST_NODEBUG-}
LVM_TEST_LVM1=${LVM_TEST_LVM1-}
+LVM_TEST_FAILURE=${LVM_TEST_FAILURE-}
# TODO: LVM_TEST_SHARED
SHARED=${SHARED-}
@@ -63,7 +64,7 @@ test -n "$SKIP_WITH_LVMLOCKD" && test -n "$LVM_TEST_LVMLOCKD" && initskip
unset CDPATH
-export LVM_TEST_BACKING_DEVICE LVM_TEST_DEVDIR LVM_TEST_NODEBUG
+export LVM_TEST_BACKING_DEVICE LVM_TEST_DEVDIR LVM_TEST_NODEBUG LVM_TEST_FAILURE
export LVM_TEST_LVMLOCKD LVM_TEST_LVMLOCKD_TEST
export LVM_TEST_LVMPOLLD LVM_TEST_LOCK_TYPE_DLM LVM_TEST_LOCK_TYPE_SANLOCK LVM_TEST_LOCK_TYPE_IDM
export LVM_TEST_DEVICES_FILE
diff --git a/test/shell/lvmlockd_failure.sh b/test/shell/lvmlockd_failure.sh
new file mode 100644
index 000000000000..e0fccfb83b53
--- /dev/null
+++ b/test/shell/lvmlockd_failure.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020~2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_FAILURE" ] && skip;
+
+aux prepare_vg 3
+
+# Create new logic volume
+lvcreate -a ey --zero n -l 1 -n $lv1 $vg
+
+# Emulate lvmlockd abnormally exiting
+killall -9 lvmlockd
+
+systemctl start lvm2-lvmlockd
+
+vgchange --lock-start $vg
+
+lvchange -a n $vg/$lv1
+lvchange -a sy $vg/$lv1
+
+lvcreate -a ey --zero n -l 1 -n $lv2 $vg
+lvchange -a n $vg/$lv2
+
+vgremove -ff $vg
--
1.8.3.1

View File

@ -0,0 +1,94 @@
From fc0495ea04a96c0990726db98c80bd2732d3695a Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:18 +0800
Subject: [PATCH 22/33] tests: idm: Add testing for the fabric failure
When the fabric failure occurs, it will lose the connection with hosts
instantly, and after a while it can recovery back so that the hosts can
continue to access the drives.
For this case, the locking manager should be reliable for this case and
can dynamically handle this case and allows user to continue to use the
VG/LV with associated locking scheme.
This patch adds a testing to emulate the fabric faliure, verify LVM
commands for this case. The testing usage is:
# make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdo3,/dev/sdp3,/dev/sdp4 \
LVM_TEST_FAILURE=1 T=idm_fabric_failure.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/idm_fabric_failure.sh | 58 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 58 insertions(+)
create mode 100644 test/shell/idm_fabric_failure.sh
diff --git a/test/shell/idm_fabric_failure.sh b/test/shell/idm_fabric_failure.sh
new file mode 100644
index 000000000000..e68d6ad07be0
--- /dev/null
+++ b/test/shell/idm_fabric_failure.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_FAILURE" ] && skip;
+
+aux prepare_devs 3
+aux extend_filter_LVMTEST
+
+vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3"
+
+# Create new logic volume
+lvcreate -a ey --zero n -l 50%FREE -n $lv1 $vg
+
+DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+DRIVE2=`dmsetup deps -o devname $dev2 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+DRIVE3=`dmsetup deps -o devname $dev3 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+
+HOST1=`readlink /sys/block/$DRIVE1 | awk -F'/' '{print $6}'`
+HOST2=`readlink /sys/block/$DRIVE2 | awk -F'/' '{print $6}'`
+HOST3=`readlink /sys/block/$DRIVE3 | awk -F'/' '{print $6}'`
+
+# Emulate fabric failure
+echo 1 > /sys/block/$DRIVE1/device/delete
+[ -f /sys/block/$DRIVE2/device/delete ] && echo 1 > /sys/block/$DRIVE2/device/delete
+[ -f /sys/block/$DRIVE3/device/delete ] && echo 1 > /sys/block/$DRIVE3/device/delete
+
+# Wait for 10s and will not lead to timeout
+sleep 10
+
+# Rescan drives so can probe the deleted drives and join back them
+echo "- - -" > /sys/class/scsi_host/${HOST1}/scan
+echo "- - -" > /sys/class/scsi_host/${HOST2}/scan
+echo "- - -" > /sys/class/scsi_host/${HOST3}/scan
+
+not check grep_lvmlockd_dump "S lvm_$vg kill_vg"
+
+# The previous device-mapper are removed, but LVM still can directly
+# access VGs from the specified physical drives. So enable drives
+# for these drives.
+aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|" "a|/dev/$DRIVE2*|" "a|/dev/$DRIVE3*|"
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+lvcreate -a n --zero n -l 10 -n $lv2 $vg
+
+vgremove -ff $vg
--
1.8.3.1

View File

@ -0,0 +1,111 @@
From 91d3b568758cbb11ffd797c864d03f7f36426efc Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:19 +0800
Subject: [PATCH 23/33] tests: idm: Add testing for the fabric failure and
timeout
If the fabric is broken instantly, the drives connected on the fabric
will disappear from the system. For worst case, the lease is timeout
and the drives cannot recovery back. So a new test is added to emulate
this scenario, it uses a drive for LVM operations and this drive is also
used for locking scheme; if the drive and all its associated paths (if
the drive supports multiple paths) are disconnected, the lock manager
should stop the lockspace for the VG/LVs.
And afterwards, if the drive recovers back, the VG/LV resident in the
drive should be operated properly. The test command is as below:
# make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdp3 LVM_TEST_FAILURE=1 \
T=idm_fabric_failure_timeout.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/idm_fabric_failure_timeout.sh | 74 ++++++++++++++++++++++++++++++++
1 file changed, 74 insertions(+)
create mode 100644 test/shell/idm_fabric_failure_timeout.sh
diff --git a/test/shell/idm_fabric_failure_timeout.sh b/test/shell/idm_fabric_failure_timeout.sh
new file mode 100644
index 000000000000..cf71f760970a
--- /dev/null
+++ b/test/shell/idm_fabric_failure_timeout.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
+[ -z "$LVM_TEST_FAILURE" ] && skip;
+
+aux prepare_devs 1
+aux extend_filter_LVMTEST
+
+DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+
+# The previous device-mapper are removed, but LVM still can directly
+# access VGs from the specified physical drives. So enable drives
+# for these drives.
+aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|"
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+vgcreate $SHARED $vg "$dev1"
+
+# Create new logic volume
+lvcreate -a ey --zero n -l 1 -n $lv1 $vg
+
+drive_list=($DRIVE1)
+
+# Find all drives with the same WWN and delete them from system,
+# so that we can emulate the same drive with multiple paths are
+# disconnected with system.
+drive_wwn=`udevadm info /dev/${DRIVE1} | awk -F= '/E: ID_WWN=/ {print $2}'`
+for dev in /dev/*; do
+ if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
+ wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
+ if [ "$wwn" = "$drive_wwn" ]; then
+ base_name="$(basename -- ${dev})"
+ drive_list+=("$base_name")
+ host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
+ fi
+ fi
+done
+
+for d in "${drive_list[@]}"; do
+ [ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
+done
+
+# Fail to create new logic volume
+not lvcreate -a n --zero n -l 1 -n $lv2 $vg
+
+# Wait for lock time out caused by drive failure
+sleep 70
+
+check grep_lvmlockd_dump "S lvm_$vg kill_vg"
+lvmlockctl --drop $vg
+
+# Rescan drives so can probe the deleted drives and join back them
+for h in "${host_list[@]}"; do
+ [ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
+done
+
+# After the drive is reconnected, $vg should be visible again.
+vgchange --lock-start
+vgremove -ff $vg
--
1.8.3.1

View File

@ -0,0 +1,115 @@
From 38abd6bb2c3c35ad476f11fd3cd4ee8d119e364d Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:20 +0800
Subject: [PATCH 24/33] tests: idm: Add testing for the fabric's half brain
failure
If the fabric is broken instantly and the partial drives connected on
the fabric disappear from the system. For this case, according to the
locking algorithm in idm, the lease will not lose since the half drives
are still alive so can renew the lease for the half drives. On the
other hand, since the VG lock requires to acquire the majority of drive
number, but half drives failure cannot achieve the majority, so it
cannot acquire the lock for VG and thus cannot change metadata for VG.
This patch is to add half brain failure for idm; the test command is as
below:
# make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdp3,/dev/sdo3 LVM_TEST_FAILURE=1 \
T=idm_fabric_failure_half_brain.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/idm_fabric_failure_half_brain.sh | 78 +++++++++++++++++++++++++++++
1 file changed, 78 insertions(+)
create mode 100644 test/shell/idm_fabric_failure_half_brain.sh
diff --git a/test/shell/idm_fabric_failure_half_brain.sh b/test/shell/idm_fabric_failure_half_brain.sh
new file mode 100644
index 000000000000..c692a12ad9c4
--- /dev/null
+++ b/test/shell/idm_fabric_failure_half_brain.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
+[ -z "$LVM_TEST_FAILURE" ] && skip;
+
+aux prepare_devs 2
+aux extend_filter_LVMTEST
+
+DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+DRIVE2=`dmsetup deps -o devname $dev2 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+
+[ "$(basename -- $DRIVE1)" = "$(basename -- $DRIVE2)" ] && die "Need to pass two different drives!?"
+
+# The previous device-mapper are removed, but LVM still can directly
+# access VGs from the specified physical drives. So enable drives
+# for these drives.
+aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|" "a|/dev/$DRIVE2*|"
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+vgcreate $SHARED $vg "$dev1" "$dev2"
+
+# Create new logic volume
+lvcreate -a ey --zero n -l 100%FREE -n $lv1 $vg
+
+drive_list=($DRIVE1)
+
+# Find all drives with the same WWN and delete them from system,
+# so that we can emulate the same drive with multiple paths are
+# disconnected with system.
+drive_wwn=`udevadm info /dev/${DRIVE1} | awk -F= '/E: ID_WWN=/ {print $2}'`
+for dev in /dev/*; do
+ if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
+ wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
+ if [ "$wwn" = "$drive_wwn" ]; then
+ base_name="$(basename -- ${dev})"
+ drive_list+=("$base_name")
+ host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
+ fi
+ fi
+done
+
+for d in "${drive_list[@]}"; do
+ [ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
+done
+
+# Fail to create new logic volume
+not lvcreate -a n --zero n -l 1 -n $lv2 $vg
+
+# Wait for lock time out caused by drive failure
+sleep 70
+
+not check grep_lvmlockd_dump "S lvm_$vg kill_vg"
+
+# Rescan drives so can probe the deleted drives and join back them
+for h in "${host_list[@]}"; do
+ [ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
+done
+
+# After the drive is reconnected, $vg should be visible again.
+vgchange --lock-start
+lvremove -f $vg/$lv1
+lvcreate -a ey --zero n -l 1 -n $lv2 $vg
+vgremove -ff $vg
--
1.8.3.1

View File

@ -0,0 +1,117 @@
From 92b47d8eb8c4b717fd79d0b7c50ecac0dceb31a5 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:21 +0800
Subject: [PATCH 25/33] tests: idm: Add testing for IDM lock manager failure
If the IDM lock manager fails to access drives, might partially fail to
access drives (e.g. it fails to access one of three drives), or totally
fail to access drives, the lock manager should handle properly for these
cases. When the drives are partially failure, if the lock manager still
can renew the lease for the locking, then it doesn't need to take any
action for the drive failure; otherwise, if it detects it cannot renew
the locking majority, it needs ti immediately kill the VG from the
lvmlockd.
This patch adds the test for verification the IDM lock manager failure;
the command can be used as below:
# make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdp3,/dev/sdl3,/dev/sdq3 \
LVM_TEST_FAILURE=1 T=idm_ilm_failure.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/idm_ilm_failure.sh | 80 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 80 insertions(+)
create mode 100644 test/shell/idm_ilm_failure.sh
diff --git a/test/shell/idm_ilm_failure.sh b/test/shell/idm_ilm_failure.sh
new file mode 100644
index 000000000000..58bed270eaa7
--- /dev/null
+++ b/test/shell/idm_ilm_failure.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
+[ -z "$LVM_TEST_FAILURE" ] && skip;
+
+aux prepare_devs 3
+aux extend_filter_LVMTEST
+
+DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+DRIVE2=`dmsetup deps -o devname $dev2 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+DRIVE3=`dmsetup deps -o devname $dev3 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
+
+if [ "$DRIVE1" = "$DRIVE2" ] || [ "$DRIVE1" = "$DRIVE3" ] || [ "$DRIVE2" = "$DRIVE3" ]; then
+ die "Need to pass three different drives!?"
+fi
+
+# The previous device-mapper are removed, but LVM still can directly
+# access VGs from the specified physical drives. So enable drives
+# for these drives.
+aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|" "a|/dev/$DRIVE2*|" "a|/dev/$DRIVE3*|"
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3"
+
+# Create new logic volume and deactivate it
+lvcreate -a y --zero n -l 1 -n $lv1 $vg
+
+# Inject failure 40% so cannot send partially request to drives
+idm_inject_failure 40
+
+# Wait for 40s, but the lock will not be time out
+sleep 40
+
+# Inject failure with 0% so can access drives
+idm_inject_failure 0
+
+# Deactivate logic volume due to locking failure
+lvchange $vg/$lv1 -a n
+
+# Inject failure 100% so cannot send request to drives
+idm_inject_failure 100
+
+# Wait for 70s but should have no any alive locks
+sleep 70
+
+# Inject failure with 0% so can access drives
+idm_inject_failure 0
+
+# Activate logic volume
+lvchange $vg/$lv1 -a y
+
+# Inject failure so cannot send request to drives
+idm_inject_failure 100
+
+# Wait for 70s but will not time out
+sleep 70
+
+# Inject failure with 0% so can access drives
+idm_inject_failure 0
+
+check grep_lvmlockd_dump "S lvm_$vg kill_vg"
+lvmlockctl --drop $vg
+
+vgchange --lock-start
+vgremove -f $vg
--
1.8.3.1

View File

@ -0,0 +1,177 @@
From e75bd71aaea6e092b93533bdc948fd527821d297 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:22 +0800
Subject: [PATCH 26/33] tests: multi-hosts: Add VG testing
This patch is to add VG testing on multi hosts. There have two scripts,
the script multi_hosts_vg_hosta.sh is used to create VGs on one host,
and the second script multi_hosts_vg_hostb.sh afterwards will acquire
global lock and VG lock, and remove VGs. The testing flow verifies the
locking operations between two hosts with lvmlockd and the backend
locking manager.
On the host A:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hosta.sh
On the host B:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hostb.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/Makefile.in | 1 +
test/lib/inittest.sh | 2 ++
test/shell/multi_hosts_vg_hosta.sh | 45 +++++++++++++++++++++++++++++++++
test/shell/multi_hosts_vg_hostb.sh | 52 ++++++++++++++++++++++++++++++++++++++
4 files changed, 100 insertions(+)
create mode 100644 test/shell/multi_hosts_vg_hosta.sh
create mode 100644 test/shell/multi_hosts_vg_hostb.sh
diff --git a/test/Makefile.in b/test/Makefile.in
index 573df77a7ac8..cd134129be9e 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -93,6 +93,7 @@ help:
@echo -e "\nSupported variables:"
@echo " LVM_TEST_AUX_TRACE Set for verbose messages for aux scripts []."
@echo " LVM_TEST_BACKING_DEVICE Set device used for testing (see also LVM_TEST_DIR)."
+ @echo " LVM_TEST_MULTI_HOST Set multiple hosts used for testing."
@echo " LVM_TEST_CAN_CLOBBER_DMESG Allow to clobber dmesg buffer without /dev/kmsg. (1)"
@echo " LVM_TEST_DEVDIR Set to '/dev' to run on real /dev."
@echo " LVM_TEST_PREFER_BRD Prefer using brd (ramdisk) over loop for testing [1]."
diff --git a/test/lib/inittest.sh b/test/lib/inittest.sh
index 98a916ca6e10..4ca8ac59e957 100644
--- a/test/lib/inittest.sh
+++ b/test/lib/inittest.sh
@@ -32,6 +32,7 @@ LVM_TEST_DEVDIR=${LVM_TEST_DEVDIR-}
LVM_TEST_NODEBUG=${LVM_TEST_NODEBUG-}
LVM_TEST_LVM1=${LVM_TEST_LVM1-}
LVM_TEST_FAILURE=${LVM_TEST_FAILURE-}
+LVM_TEST_MULTI_HOST=${LVM_TEST_MULTI_HOST-}
# TODO: LVM_TEST_SHARED
SHARED=${SHARED-}
@@ -65,6 +66,7 @@ test -n "$SKIP_WITH_LVMLOCKD" && test -n "$LVM_TEST_LVMLOCKD" && initskip
unset CDPATH
export LVM_TEST_BACKING_DEVICE LVM_TEST_DEVDIR LVM_TEST_NODEBUG LVM_TEST_FAILURE
+export LVM_TEST_MULTI_HOST
export LVM_TEST_LVMLOCKD LVM_TEST_LVMLOCKD_TEST
export LVM_TEST_LVMPOLLD LVM_TEST_LOCK_TYPE_DLM LVM_TEST_LOCK_TYPE_SANLOCK LVM_TEST_LOCK_TYPE_IDM
export LVM_TEST_DEVICES_FILE
diff --git a/test/shell/multi_hosts_vg_hosta.sh b/test/shell/multi_hosts_vg_hosta.sh
new file mode 100644
index 000000000000..15347490c8f4
--- /dev/null
+++ b/test/shell/multi_hosts_vg_hosta.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing, the paired scripts
+# are: multi_hosts_vg_hosta.sh / multi_hosts_vg_hostb.sh
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+i=0
+for d in "${BLKDEVS[@]}"; do
+ echo $i
+ i=$((i+1))
+ vgcreate $SHARED TESTVG$i $d
+ vgchange -a n TESTVG$i
+done
diff --git a/test/shell/multi_hosts_vg_hostb.sh b/test/shell/multi_hosts_vg_hostb.sh
new file mode 100644
index 000000000000..bab65b68b35e
--- /dev/null
+++ b/test/shell/multi_hosts_vg_hostb.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing, the paired scripts
+# are: multi_hosts_vg_hosta.sh / multi_hosts_vg_hostb.sh
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+vgchange --lock-start
+
+i=0
+for d in "${BLKDEVS[@]}"; do
+ i=$((i+1))
+ check vg_field TESTVG$i lv_count 0
+done
+
+i=0
+for d in "${BLKDEVS[@]}"; do
+ i=$((i+1))
+ vgchange -a ey TESTVG$i
+ vgremove -ff TESTVG$i
+done
--
1.8.3.1

View File

@ -0,0 +1,185 @@
From e9950efff1d8cad43d6aec38fa30ff8b801960fb Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:23 +0800
Subject: [PATCH 27/33] tests: multi-hosts: Add LV testing
This patch is to add LV testing on multi hosts. There have two scripts,
the script multi_hosts_lv_hosta.sh is used to create LVs on one host,
and the second script multi_hosts_lv_hostb.sh will acquire
global lock and VG lock, and remove VGs. The testing flow verifies the
locking operations between two hosts with lvmlockd and the backend
locking manager.
On the host A:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hosta.sh
On the host B:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hostb.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/multi_hosts_lv_hosta.sh | 78 ++++++++++++++++++++++++++++++++++++++
test/shell/multi_hosts_lv_hostb.sh | 61 +++++++++++++++++++++++++++++
2 files changed, 139 insertions(+)
create mode 100644 test/shell/multi_hosts_lv_hosta.sh
create mode 100644 test/shell/multi_hosts_lv_hostb.sh
diff --git a/test/shell/multi_hosts_lv_hosta.sh b/test/shell/multi_hosts_lv_hosta.sh
new file mode 100644
index 000000000000..68404d251faa
--- /dev/null
+++ b/test/shell/multi_hosts_lv_hosta.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing, the paired scripts
+# are: multi_hosts_lv_hosta.sh / multi_hosts_lv_hostb.sh
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+BLKDEVS_NUM=${#BLKDEVS[@]}
+
+for d in "${BLKDEVS[@]}"; do
+ dd if=/dev/zero of="$d" bs=32k count=1
+ wipefs -a "$d" 2>/dev/null || true
+
+ sg_dev=`sg_map26 ${d}`
+ if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
+ echo "Cleanup IDM context for drive ${d} ($sg_dev)"
+ sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
+ 88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
+ sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
+ 8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
+ rm /tmp/idm_tmp_data.bin
+ fi
+done
+
+#aux prepare_pvs $BLKDEVS_NUM 6400
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ echo $i
+ d="dev$i"
+ vgcreate $SHARED TESTVG$i ${BLKDEVS[$(( i - 1 ))]}
+
+ for j in {1..20}; do
+ lvcreate -a n --zero n -l 1 -n foo$j TESTVG$i
+ done
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ for j in {1..20}; do
+ lvchange -a ey TESTVG$i/foo$j
+ done
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ for j in {1..20}; do
+ lvchange -a n TESTVG$i/foo$j
+ done
+done
diff --git a/test/shell/multi_hosts_lv_hostb.sh b/test/shell/multi_hosts_lv_hostb.sh
new file mode 100644
index 000000000000..13efd1a6b5b8
--- /dev/null
+++ b/test/shell/multi_hosts_lv_hostb.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2020 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing, the paired scripts
+# are: multi_hosts_lv_hosta.sh / multi_hosts_lv_hostb.sh
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+vgchange --lock-start
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ for j in {1..20}; do
+ lvchange -a sy TESTVG$i/foo$j
+ done
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ for j in {1..20}; do
+ lvchange -a ey TESTVG$i/foo$j
+ done
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ for j in {1..20}; do
+ lvchange -a n TESTVG$i/foo$j
+ done
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ vgremove -f TESTVG$i
+done
--
1.8.3.1

View File

@ -0,0 +1,197 @@
From 0a4d6d9d1d8ef4e3fe141c757dd5aad4b48b316c Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:24 +0800
Subject: [PATCH 28/33] tests: multi-hosts: Test lease timeout with LV
exclusive mode
This patch is to test timeout handling after activate LV with exclusive
mode. It contains two scripts for host A and host B separately.
The script on host A firstly creates VGs and LVs based on the passed
back devices, every back device is for a dedicated VG and a LV is
created as well in the VG. Afterwards, all LVs are activated by host A,
so host A acquires the lease for these LVs. Then the test is designed
to fail on host A.
After the host A fails, host B starts to run the paired testing script,
it firstly fails to activate the LVs since the locks are leased by
host A; after lease expiration (after 70s), host B can achieve the lease
for LVs and it can operate LVs and VGs.
On the host A:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hosta.sh
On the host B:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hostb.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/multi_hosts_lv_ex_timeout_hosta.sh | 87 +++++++++++++++++++++++++++
test/shell/multi_hosts_lv_ex_timeout_hostb.sh | 56 +++++++++++++++++
2 files changed, 143 insertions(+)
create mode 100644 test/shell/multi_hosts_lv_ex_timeout_hosta.sh
create mode 100644 test/shell/multi_hosts_lv_ex_timeout_hostb.sh
diff --git a/test/shell/multi_hosts_lv_ex_timeout_hosta.sh b/test/shell/multi_hosts_lv_ex_timeout_hosta.sh
new file mode 100644
index 000000000000..c8be91ee35f4
--- /dev/null
+++ b/test/shell/multi_hosts_lv_ex_timeout_hosta.sh
@@ -0,0 +1,87 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing.
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+for d in "${BLKDEVS[@]}"; do
+ dd if=/dev/zero of="$d" bs=32k count=1
+ wipefs -a "$d" 2>/dev/null || true
+
+ sg_dev=`sg_map26 ${d}`
+ if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
+ echo "Cleanup IDM context for drive ${d} ($sg_dev)"
+ sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
+ 88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
+ sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
+ 8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
+ rm /tmp/idm_tmp_data.bin
+ fi
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ vgcreate $SHARED TESTVG$i ${BLKDEVS[$(( i - 1 ))]}
+ lvcreate -a n --zero n -l 1 -n foo TESTVG$i
+ lvchange -a ey TESTVG$i/foo
+done
+
+for d in "${BLKDEVS[@]}"; do
+ drive_wwn=`udevadm info $d | awk -F= '/E: ID_WWN=/ {print $2}'`
+ for dev in /dev/*; do
+ if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
+ wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
+ if [ "$wwn" = "$drive_wwn" ]; then
+ base_name="$(basename -- ${dev})"
+ drive_list+=("$base_name")
+ host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
+ fi
+ fi
+ done
+done
+
+for d in "${drive_list[@]}"; do
+ [ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
+done
+
+sleep 100
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ check grep_lvmlockd_dump "S lvm_TESTVG$i kill_vg"
+ lvmlockctl --drop TESTVG$i
+done
+
+# Rescan drives so can probe the deleted drives and join back them
+for h in "${host_list[@]}"; do
+ [ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
+done
diff --git a/test/shell/multi_hosts_lv_ex_timeout_hostb.sh b/test/shell/multi_hosts_lv_ex_timeout_hostb.sh
new file mode 100644
index 000000000000..f0273fa44758
--- /dev/null
+++ b/test/shell/multi_hosts_lv_ex_timeout_hostb.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing.
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+vgchange --lock-start
+
+vgdisplay
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ not lvchange -a ey TESTVG$i/foo
+done
+
+# Sleep for 70 seconds so the previous lease is expired
+sleep 70
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ lvchange -a ey TESTVG$i/foo
+ lvchange -a n TESTVG$i/foo
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ vgremove -f TESTVG$i
+done
--
1.8.3.1

View File

@ -0,0 +1,187 @@
From fe05828e7e4c78a1ed4430ce4057c785d0b898a0 Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Thu, 3 Jun 2021 17:59:25 +0800
Subject: [PATCH 29/33] tests: multi-hosts: Test lease timeout with LV
shareable mode
This patch is to test timeout handling after activate LV with shareable
mode. It has the same logic with the testing for LV exclusive mode,
except it verifies the locking with shareable mode.
On the host A:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_sh_timeout_hosta.sh
On the host B:
make check_lvmlockd_idm \
LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_sh_timeout_hostb.sh
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/multi_hosts_lv_sh_timeout_hosta.sh | 87 +++++++++++++++++++++++++++
test/shell/multi_hosts_lv_sh_timeout_hostb.sh | 56 +++++++++++++++++
2 files changed, 143 insertions(+)
create mode 100644 test/shell/multi_hosts_lv_sh_timeout_hosta.sh
create mode 100644 test/shell/multi_hosts_lv_sh_timeout_hostb.sh
diff --git a/test/shell/multi_hosts_lv_sh_timeout_hosta.sh b/test/shell/multi_hosts_lv_sh_timeout_hosta.sh
new file mode 100644
index 000000000000..6b24f9290f1f
--- /dev/null
+++ b/test/shell/multi_hosts_lv_sh_timeout_hosta.sh
@@ -0,0 +1,87 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing.
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_sh_timeout_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_sh_timeout_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+for d in "${BLKDEVS[@]}"; do
+ dd if=/dev/zero of="$d" bs=32k count=1
+ wipefs -a "$d" 2>/dev/null || true
+
+ sg_dev=`sg_map26 ${d}`
+ if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
+ echo "Cleanup IDM context for drive ${d} ($sg_dev)"
+ sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
+ 88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
+ sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
+ 8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
+ rm /tmp/idm_tmp_data.bin
+ fi
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ vgcreate $SHARED TESTVG$i ${BLKDEVS[$(( i - 1 ))]}
+ lvcreate -a n --zero n -l 1 -n foo TESTVG$i
+ lvchange -a sy TESTVG$i/foo
+done
+
+for d in "${BLKDEVS[@]}"; do
+ drive_wwn=`udevadm info $d | awk -F= '/E: ID_WWN=/ {print $2}'`
+ for dev in /dev/*; do
+ if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
+ wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
+ if [ "$wwn" = "$drive_wwn" ]; then
+ base_name="$(basename -- ${dev})"
+ drive_list+=("$base_name")
+ host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
+ fi
+ fi
+ done
+done
+
+for d in "${drive_list[@]}"; do
+ [ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
+done
+
+sleep 100
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ check grep_lvmlockd_dump "S lvm_TESTVG$i kill_vg"
+ lvmlockctl --drop TESTVG$i
+done
+
+# Rescan drives so can probe the deleted drives and join back them
+for h in "${host_list[@]}"; do
+ [ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
+done
diff --git a/test/shell/multi_hosts_lv_sh_timeout_hostb.sh b/test/shell/multi_hosts_lv_sh_timeout_hostb.sh
new file mode 100644
index 000000000000..7aea2235dea1
--- /dev/null
+++ b/test/shell/multi_hosts_lv_sh_timeout_hostb.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 Seagate, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This testing script is for multi-hosts testing.
+#
+# On the host A:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hosta.sh
+# On the host B:
+# make check_lvmlockd_idm \
+# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
+# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hostb.sh
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
+
+IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
+
+for d in "${BLKDEVS[@]}"; do
+ aux extend_filter_LVMTEST "a|$d|"
+done
+
+aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
+
+vgchange --lock-start
+
+vgdisplay
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ lvchange -a sy TESTVG$i/foo
+done
+
+# Sleep for 70 seconds so the previous lease is expired
+sleep 70
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ lvchange -a ey TESTVG$i/foo
+ lvchange -a n TESTVG$i/foo
+done
+
+for i in $(seq 1 ${#BLKDEVS[@]}); do
+ vgremove -f TESTVG$i
+done
--
1.8.3.1

View File

@ -0,0 +1,30 @@
From c43f2f8ae08ed0555a300764c8644ea56f4f41e2 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Thu, 3 Jun 2021 14:44:55 -0500
Subject: [PATCH 30/33] fix empty mem pool leak
of "config" when LVM_SYSTEM_DIR=""
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/commands/toolcontext.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/commands/toolcontext.c b/lib/commands/toolcontext.c
index 296618686f7b..e2be89d0f480 100644
--- a/lib/commands/toolcontext.c
+++ b/lib/commands/toolcontext.c
@@ -966,8 +966,8 @@ static void _destroy_config(struct cmd_context *cmd)
/* CONFIG_FILE/CONFIG_MERGED_FILES */
if ((cft = remove_config_tree_by_source(cmd, CONFIG_MERGED_FILES)))
config_destroy(cft);
- else
- remove_config_tree_by_source(cmd, CONFIG_FILE);
+ else if ((cft = remove_config_tree_by_source(cmd, CONFIG_FILE)))
+ config_destroy(cft);
dm_list_iterate_items(cfl, &cmd->config_files)
config_destroy(cfl->cft);
--
1.8.3.1

View File

@ -0,0 +1,161 @@
From a7f334a53269ee6967417a01d37b7f9592637cfc Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Mon, 7 Jun 2021 12:11:12 -0500
Subject: [PATCH 31/33] tests: writecache-blocksize add dm-cache tests
Add the same tests for dm-cache as exist for dm-writecache,
dm-cache uses a different blocksize in a couple cases.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/writecache-blocksize.sh | 64 +++++++++++++++++++++++++++-----------
1 file changed, 45 insertions(+), 19 deletions(-)
diff --git a/test/shell/writecache-blocksize.sh b/test/shell/writecache-blocksize.sh
index a8bb1e49d752..2579ef7b7bac 100644
--- a/test/shell/writecache-blocksize.sh
+++ b/test/shell/writecache-blocksize.sh
@@ -10,7 +10,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-# Test writecache usage
+# Test dm-writecache and dm-cache with different block size combinations
SKIP_WITH_LVMPOLLD=1
@@ -94,6 +94,7 @@ _verify_data_on_lv() {
lvchange -an $vg/$lv1
}
+# Check that the LBS/PBS that were set up is accurately reported for the devs.
_check_env() {
check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "$1"
@@ -105,24 +106,33 @@ _check_env() {
blockdev --getpbsz "$dev2"
}
+#
+# _run_test $BS1 $BS2 $type $optname "..."
+#
+# $BS1: the xfs sectsz is verified to match $BS1, after mkfs
+# $BS2: the lv1 LBS is verified to match $BS2, after cache is added to lv1
+# $type is cache or writecache to use in lvconvert --type $type
+# $optname is either --cachevol or --cachepool to use in lvconvert
+# "..." a sector size option to use in mkfs.xfs
+#
+
_run_test() {
vgcreate $SHARED $vg "$dev1"
vgextend $vg "$dev2"
lvcreate -n $lv1 -l 8 -an $vg "$dev1"
lvcreate -n $lv2 -l 4 -an $vg "$dev2"
lvchange -ay $vg/$lv1
- mkfs.xfs -f $2 "$DM_DEV_DIR/$vg/$lv1" |tee out
+ mkfs.xfs -f $5 "$DM_DEV_DIR/$vg/$lv1" |tee out
grep "sectsz=$1" out
_add_new_data_to_mnt
- lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
+ lvconvert --yes --type $3 $4 $lv2 $vg/$lv1
blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
- grep "$1" out
+ grep "$2" out
blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
_add_more_data_to_mnt
_verify_data_on_mnt
lvconvert --splitcache $vg/$lv1
check lv_field $vg/$lv1 segtype linear
- check lv_field $vg/$lv2 segtype linear
blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
_verify_data_on_mnt
@@ -136,7 +146,7 @@ _run_test() {
vgremove $vg
}
-# scsi_debug devices with 512 LBS 512 PBS
+# Setup: LBS 512, PBS 512
aux prepare_scsi_debug_dev 256 || skip
aux prepare_devs 2 64
@@ -150,43 +160,58 @@ vgremove -ff $vg
_check_env "512" "512"
-# lbs 512, pbs 512, xfs 512, wc 512
-_run_test 512 ""
+# lbs 512, pbs 512, xfs 512, wc bs 512
+_run_test 512 512 "writecache" "--cachevol" ""
+# lbs 512, pbs 512, xfs 512, cache bs 512
+_run_test 512 512 "cache" "--cachevol" ""
+_run_test 512 512 "cache" "--cachepool" ""
-# lbs 512, pbs 512, xfs -s 4096, wc 4096
-_run_test 4096 "-s size=4096"
+# lbs 512, pbs 512, xfs -s 4096, wc bs 4096
+_run_test 4096 4096 "writecache" "--cachevol" "-s size=4096"
+# lbs 512, pbs 512, xfs -s 4096, cache bs 512
+_run_test 4096 512 "cache" "--cachevol" "-s size=4096"
+_run_test 4096 512 "cache" "--cachepool" "-s size=4096"
aux cleanup_scsi_debug_dev
-# lbs=512, pbs=4096
+# Setup: LBS 512, PBS 4096
aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
aux prepare_devs 2 64
_check_env "512" "4096"
-# lbs 512, pbs 4k, xfs 4k, wc 4k
-_run_test 4096 ""
+# lbs 512, pbs 4k, xfs 4k, wc bs 4k
+_run_test 4096 4096 "writecache" "--cachevol" ""
+# lbs 512, pbs 4k, xfs 4k, cache bs 512
+_run_test 4096 512 "cache" "--cachevol" ""
+_run_test 4096 512 "cache" "--cachepool" ""
-# lbs 512, pbs 4k, xfs -s 512, wc 512
-_run_test 512 "-s size=512"
+# lbs 512, pbs 4k, xfs -s 512, wc bs 512
+_run_test 512 512 "writecache" "--cachevol" "-s size=512"
+# lbs 512, pbs 4k, xfs -s 512, cache bs 512
+_run_test 512 512 "cache" "--cachevol" "-s size=512"
+_run_test 512 512 "cache" "--cachepool" "-s size=512"
aux cleanup_scsi_debug_dev
-# scsi_debug devices with 4K LBS and 4K PBS
+# Setup: LBS 4096, PBS 4096
aux prepare_scsi_debug_dev 256 sector_size=4096
aux prepare_devs 2 64
_check_env "4096" "4096"
-# lbs 4k, pbs 4k, xfs 4k, wc 4k
-_run_test 4096 ""
+# lbs 4k, pbs 4k, xfs 4k, wc bs 4k
+_run_test 4096 4096 "writecache" "--cachevol" ""
+# lbs 4k, pbs 4k, xfs 4k, cache bs 4k
+_run_test 4096 4096 "cache" "--cachevol" ""
+_run_test 4096 4096 "cache" "--cachepool" ""
aux cleanup_scsi_debug_dev
-# scsi_debug devices with 512 LBS 512 PBS
+# Setup: LBS 512, PBS 512
aux prepare_scsi_debug_dev 256 || skip
aux prepare_devs 2 64
@@ -222,3 +247,4 @@ lvremove $vg/$lv2
vgremove $vg
aux cleanup_scsi_debug_dev
+
--
1.8.3.1

View File

@ -0,0 +1,528 @@
From ff677aa69f8fc31e5733b0650e2324c826ce0794 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Mon, 7 Jun 2021 12:12:33 -0500
Subject: [PATCH 32/33] tests: rename test
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/writecache-blocksize.sh | 250 -------------------------------
test/shell/writecache-cache-blocksize.sh | 250 +++++++++++++++++++++++++++++++
2 files changed, 250 insertions(+), 250 deletions(-)
delete mode 100644 test/shell/writecache-blocksize.sh
create mode 100644 test/shell/writecache-cache-blocksize.sh
diff --git a/test/shell/writecache-blocksize.sh b/test/shell/writecache-blocksize.sh
deleted file mode 100644
index 2579ef7b7bac..000000000000
--- a/test/shell/writecache-blocksize.sh
+++ /dev/null
@@ -1,250 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
-#
-# This copyrighted material is made available to anyone wishing to use,
-# modify, copy, or redistribute it subject to the terms and conditions
-# of the GNU General Public License v.2.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-
-# Test dm-writecache and dm-cache with different block size combinations
-
-SKIP_WITH_LVMPOLLD=1
-
-. lib/inittest
-
-aux have_writecache 1 0 0 || skip
-which mkfs.xfs || skip
-
-mnt="mnt"
-mkdir -p $mnt
-
-awk 'BEGIN { while (z++ < 16384) printf "A" }' > fileA
-awk 'BEGIN { while (z++ < 16384) printf "B" }' > fileB
-awk 'BEGIN { while (z++ < 16384) printf "C" }' > fileC
-
-# generate random data
-dd if=/dev/urandom of=randA bs=512K count=2
-dd if=/dev/urandom of=randB bs=512K count=3
-dd if=/dev/urandom of=randC bs=512K count=4
-
-_add_new_data_to_mnt() {
- mount "$DM_DEV_DIR/$vg/$lv1" $mnt
-
- # add original data
- cp randA $mnt
- cp randB $mnt
- cp randC $mnt
- mkdir $mnt/1
- cp fileA $mnt/1
- cp fileB $mnt/1
- cp fileC $mnt/1
- mkdir $mnt/2
- cp fileA $mnt/2
- cp fileB $mnt/2
- cp fileC $mnt/2
- sync
-}
-
-_add_more_data_to_mnt() {
- mkdir $mnt/more
- cp fileA $mnt/more
- cp fileB $mnt/more
- cp fileC $mnt/more
- cp randA $mnt/more
- cp randB $mnt/more
- cp randC $mnt/more
- sync
-}
-
-_verify_data_on_mnt() {
- diff randA $mnt/randA
- diff randB $mnt/randB
- diff randC $mnt/randC
- diff fileA $mnt/1/fileA
- diff fileB $mnt/1/fileB
- diff fileC $mnt/1/fileC
- diff fileA $mnt/2/fileA
- diff fileB $mnt/2/fileB
- diff fileC $mnt/2/fileC
-}
-
-_verify_more_data_on_mnt() {
- diff randA $mnt/more/randA
- diff randB $mnt/more/randB
- diff randC $mnt/more/randC
- diff fileA $mnt/more/fileA
- diff fileB $mnt/more/fileB
- diff fileC $mnt/more/fileC
-}
-
-_verify_data_on_lv() {
- lvchange -ay $vg/$lv1
- mount "$DM_DEV_DIR/$vg/$lv1" $mnt
- _verify_data_on_mnt
- rm $mnt/randA
- rm $mnt/randB
- rm $mnt/randC
- rm -rf $mnt/1
- rm -rf $mnt/2
- umount $mnt
- lvchange -an $vg/$lv1
-}
-
-# Check that the LBS/PBS that were set up is accurately reported for the devs.
-_check_env() {
-
- check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "$1"
- check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "$2"
-
- blockdev --getss "$dev1"
- blockdev --getpbsz "$dev1"
- blockdev --getss "$dev2"
- blockdev --getpbsz "$dev2"
-}
-
-#
-# _run_test $BS1 $BS2 $type $optname "..."
-#
-# $BS1: the xfs sectsz is verified to match $BS1, after mkfs
-# $BS2: the lv1 LBS is verified to match $BS2, after cache is added to lv1
-# $type is cache or writecache to use in lvconvert --type $type
-# $optname is either --cachevol or --cachepool to use in lvconvert
-# "..." a sector size option to use in mkfs.xfs
-#
-
-_run_test() {
- vgcreate $SHARED $vg "$dev1"
- vgextend $vg "$dev2"
- lvcreate -n $lv1 -l 8 -an $vg "$dev1"
- lvcreate -n $lv2 -l 4 -an $vg "$dev2"
- lvchange -ay $vg/$lv1
- mkfs.xfs -f $5 "$DM_DEV_DIR/$vg/$lv1" |tee out
- grep "sectsz=$1" out
- _add_new_data_to_mnt
- lvconvert --yes --type $3 $4 $lv2 $vg/$lv1
- blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
- grep "$2" out
- blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
- _add_more_data_to_mnt
- _verify_data_on_mnt
- lvconvert --splitcache $vg/$lv1
- check lv_field $vg/$lv1 segtype linear
- blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
- blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
- _verify_data_on_mnt
- _verify_more_data_on_mnt
- umount $mnt
- lvchange -an $vg/$lv1
- lvchange -an $vg/$lv2
- _verify_data_on_lv
- lvremove $vg/$lv1
- lvremove $vg/$lv2
- vgremove $vg
-}
-
-# Setup: LBS 512, PBS 512
-aux prepare_scsi_debug_dev 256 || skip
-aux prepare_devs 2 64
-
-# Tests with fs block sizes require a libblkid version that shows BLOCK_SIZE
-vgcreate $vg "$dev1"
-lvcreate -n $lv1 -L50 $vg
-mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
-blkid -c /dev/null "$DM_DEV_DIR/$vg/$lv1" | grep BLOCK_SIZE || skip
-lvchange -an $vg
-vgremove -ff $vg
-
-_check_env "512" "512"
-
-# lbs 512, pbs 512, xfs 512, wc bs 512
-_run_test 512 512 "writecache" "--cachevol" ""
-# lbs 512, pbs 512, xfs 512, cache bs 512
-_run_test 512 512 "cache" "--cachevol" ""
-_run_test 512 512 "cache" "--cachepool" ""
-
-# lbs 512, pbs 512, xfs -s 4096, wc bs 4096
-_run_test 4096 4096 "writecache" "--cachevol" "-s size=4096"
-# lbs 512, pbs 512, xfs -s 4096, cache bs 512
-_run_test 4096 512 "cache" "--cachevol" "-s size=4096"
-_run_test 4096 512 "cache" "--cachepool" "-s size=4096"
-
-aux cleanup_scsi_debug_dev
-
-
-# Setup: LBS 512, PBS 4096
-aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
-aux prepare_devs 2 64
-
-_check_env "512" "4096"
-
-# lbs 512, pbs 4k, xfs 4k, wc bs 4k
-_run_test 4096 4096 "writecache" "--cachevol" ""
-# lbs 512, pbs 4k, xfs 4k, cache bs 512
-_run_test 4096 512 "cache" "--cachevol" ""
-_run_test 4096 512 "cache" "--cachepool" ""
-
-# lbs 512, pbs 4k, xfs -s 512, wc bs 512
-_run_test 512 512 "writecache" "--cachevol" "-s size=512"
-# lbs 512, pbs 4k, xfs -s 512, cache bs 512
-_run_test 512 512 "cache" "--cachevol" "-s size=512"
-_run_test 512 512 "cache" "--cachepool" "-s size=512"
-
-aux cleanup_scsi_debug_dev
-
-
-# Setup: LBS 4096, PBS 4096
-aux prepare_scsi_debug_dev 256 sector_size=4096
-aux prepare_devs 2 64
-
-_check_env "4096" "4096"
-
-# lbs 4k, pbs 4k, xfs 4k, wc bs 4k
-_run_test 4096 4096 "writecache" "--cachevol" ""
-# lbs 4k, pbs 4k, xfs 4k, cache bs 4k
-_run_test 4096 4096 "cache" "--cachevol" ""
-_run_test 4096 4096 "cache" "--cachepool" ""
-
-aux cleanup_scsi_debug_dev
-
-
-# Setup: LBS 512, PBS 512
-aux prepare_scsi_debug_dev 256 || skip
-aux prepare_devs 2 64
-
-_check_env "512" "512"
-
-vgcreate $SHARED $vg "$dev1"
-vgextend $vg "$dev2"
-lvcreate -n $lv1 -l 8 -an $vg "$dev1"
-lvcreate -n $lv2 -l 4 -an $vg "$dev2"
-lvconvert --yes --type writecache --cachevol $lv2 --cachesettings "block_size=4096" $vg/$lv1
-lvchange -ay $vg/$lv1
-mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out
-grep "sectsz=4096" out
-_add_new_data_to_mnt
-blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
-grep 4096 out
-blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
-_add_more_data_to_mnt
-_verify_data_on_mnt
-lvconvert --splitcache $vg/$lv1
-check lv_field $vg/$lv1 segtype linear
-check lv_field $vg/$lv2 segtype linear
-blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
-blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
-_verify_data_on_mnt
-_verify_more_data_on_mnt
-umount $mnt
-lvchange -an $vg/$lv1
-lvchange -an $vg/$lv2
-_verify_data_on_lv
-lvremove $vg/$lv1
-lvremove $vg/$lv2
-vgremove $vg
-
-aux cleanup_scsi_debug_dev
-
diff --git a/test/shell/writecache-cache-blocksize.sh b/test/shell/writecache-cache-blocksize.sh
new file mode 100644
index 000000000000..2579ef7b7bac
--- /dev/null
+++ b/test/shell/writecache-cache-blocksize.sh
@@ -0,0 +1,250 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Test dm-writecache and dm-cache with different block size combinations
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux have_writecache 1 0 0 || skip
+which mkfs.xfs || skip
+
+mnt="mnt"
+mkdir -p $mnt
+
+awk 'BEGIN { while (z++ < 16384) printf "A" }' > fileA
+awk 'BEGIN { while (z++ < 16384) printf "B" }' > fileB
+awk 'BEGIN { while (z++ < 16384) printf "C" }' > fileC
+
+# generate random data
+dd if=/dev/urandom of=randA bs=512K count=2
+dd if=/dev/urandom of=randB bs=512K count=3
+dd if=/dev/urandom of=randC bs=512K count=4
+
+_add_new_data_to_mnt() {
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+
+ # add original data
+ cp randA $mnt
+ cp randB $mnt
+ cp randC $mnt
+ mkdir $mnt/1
+ cp fileA $mnt/1
+ cp fileB $mnt/1
+ cp fileC $mnt/1
+ mkdir $mnt/2
+ cp fileA $mnt/2
+ cp fileB $mnt/2
+ cp fileC $mnt/2
+ sync
+}
+
+_add_more_data_to_mnt() {
+ mkdir $mnt/more
+ cp fileA $mnt/more
+ cp fileB $mnt/more
+ cp fileC $mnt/more
+ cp randA $mnt/more
+ cp randB $mnt/more
+ cp randC $mnt/more
+ sync
+}
+
+_verify_data_on_mnt() {
+ diff randA $mnt/randA
+ diff randB $mnt/randB
+ diff randC $mnt/randC
+ diff fileA $mnt/1/fileA
+ diff fileB $mnt/1/fileB
+ diff fileC $mnt/1/fileC
+ diff fileA $mnt/2/fileA
+ diff fileB $mnt/2/fileB
+ diff fileC $mnt/2/fileC
+}
+
+_verify_more_data_on_mnt() {
+ diff randA $mnt/more/randA
+ diff randB $mnt/more/randB
+ diff randC $mnt/more/randC
+ diff fileA $mnt/more/fileA
+ diff fileB $mnt/more/fileB
+ diff fileC $mnt/more/fileC
+}
+
+_verify_data_on_lv() {
+ lvchange -ay $vg/$lv1
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+ _verify_data_on_mnt
+ rm $mnt/randA
+ rm $mnt/randB
+ rm $mnt/randC
+ rm -rf $mnt/1
+ rm -rf $mnt/2
+ umount $mnt
+ lvchange -an $vg/$lv1
+}
+
+# Check that the LBS/PBS that were set up is accurately reported for the devs.
+_check_env() {
+
+ check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "$1"
+ check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "$2"
+
+ blockdev --getss "$dev1"
+ blockdev --getpbsz "$dev1"
+ blockdev --getss "$dev2"
+ blockdev --getpbsz "$dev2"
+}
+
+#
+# _run_test $BS1 $BS2 $type $optname "..."
+#
+# $BS1: the xfs sectsz is verified to match $BS1, after mkfs
+# $BS2: the lv1 LBS is verified to match $BS2, after cache is added to lv1
+# $type is cache or writecache to use in lvconvert --type $type
+# $optname is either --cachevol or --cachepool to use in lvconvert
+# "..." a sector size option to use in mkfs.xfs
+#
+
+_run_test() {
+ vgcreate $SHARED $vg "$dev1"
+ vgextend $vg "$dev2"
+ lvcreate -n $lv1 -l 8 -an $vg "$dev1"
+ lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+ lvchange -ay $vg/$lv1
+ mkfs.xfs -f $5 "$DM_DEV_DIR/$vg/$lv1" |tee out
+ grep "sectsz=$1" out
+ _add_new_data_to_mnt
+ lvconvert --yes --type $3 $4 $lv2 $vg/$lv1
+ blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
+ grep "$2" out
+ blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
+ _add_more_data_to_mnt
+ _verify_data_on_mnt
+ lvconvert --splitcache $vg/$lv1
+ check lv_field $vg/$lv1 segtype linear
+ blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
+ blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
+ _verify_data_on_mnt
+ _verify_more_data_on_mnt
+ umount $mnt
+ lvchange -an $vg/$lv1
+ lvchange -an $vg/$lv2
+ _verify_data_on_lv
+ lvremove $vg/$lv1
+ lvremove $vg/$lv2
+ vgremove $vg
+}
+
+# Setup: LBS 512, PBS 512
+aux prepare_scsi_debug_dev 256 || skip
+aux prepare_devs 2 64
+
+# Tests with fs block sizes require a libblkid version that shows BLOCK_SIZE
+vgcreate $vg "$dev1"
+lvcreate -n $lv1 -L50 $vg
+mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
+blkid -c /dev/null "$DM_DEV_DIR/$vg/$lv1" | grep BLOCK_SIZE || skip
+lvchange -an $vg
+vgremove -ff $vg
+
+_check_env "512" "512"
+
+# lbs 512, pbs 512, xfs 512, wc bs 512
+_run_test 512 512 "writecache" "--cachevol" ""
+# lbs 512, pbs 512, xfs 512, cache bs 512
+_run_test 512 512 "cache" "--cachevol" ""
+_run_test 512 512 "cache" "--cachepool" ""
+
+# lbs 512, pbs 512, xfs -s 4096, wc bs 4096
+_run_test 4096 4096 "writecache" "--cachevol" "-s size=4096"
+# lbs 512, pbs 512, xfs -s 4096, cache bs 512
+_run_test 4096 512 "cache" "--cachevol" "-s size=4096"
+_run_test 4096 512 "cache" "--cachepool" "-s size=4096"
+
+aux cleanup_scsi_debug_dev
+
+
+# Setup: LBS 512, PBS 4096
+aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
+aux prepare_devs 2 64
+
+_check_env "512" "4096"
+
+# lbs 512, pbs 4k, xfs 4k, wc bs 4k
+_run_test 4096 4096 "writecache" "--cachevol" ""
+# lbs 512, pbs 4k, xfs 4k, cache bs 512
+_run_test 4096 512 "cache" "--cachevol" ""
+_run_test 4096 512 "cache" "--cachepool" ""
+
+# lbs 512, pbs 4k, xfs -s 512, wc bs 512
+_run_test 512 512 "writecache" "--cachevol" "-s size=512"
+# lbs 512, pbs 4k, xfs -s 512, cache bs 512
+_run_test 512 512 "cache" "--cachevol" "-s size=512"
+_run_test 512 512 "cache" "--cachepool" "-s size=512"
+
+aux cleanup_scsi_debug_dev
+
+
+# Setup: LBS 4096, PBS 4096
+aux prepare_scsi_debug_dev 256 sector_size=4096
+aux prepare_devs 2 64
+
+_check_env "4096" "4096"
+
+# lbs 4k, pbs 4k, xfs 4k, wc bs 4k
+_run_test 4096 4096 "writecache" "--cachevol" ""
+# lbs 4k, pbs 4k, xfs 4k, cache bs 4k
+_run_test 4096 4096 "cache" "--cachevol" ""
+_run_test 4096 4096 "cache" "--cachepool" ""
+
+aux cleanup_scsi_debug_dev
+
+
+# Setup: LBS 512, PBS 512
+aux prepare_scsi_debug_dev 256 || skip
+aux prepare_devs 2 64
+
+_check_env "512" "512"
+
+vgcreate $SHARED $vg "$dev1"
+vgextend $vg "$dev2"
+lvcreate -n $lv1 -l 8 -an $vg "$dev1"
+lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+lvconvert --yes --type writecache --cachevol $lv2 --cachesettings "block_size=4096" $vg/$lv1
+lvchange -ay $vg/$lv1
+mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out
+grep "sectsz=4096" out
+_add_new_data_to_mnt
+blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
+grep 4096 out
+blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
+_add_more_data_to_mnt
+_verify_data_on_mnt
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
+blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
+_verify_data_on_mnt
+_verify_more_data_on_mnt
+umount $mnt
+lvchange -an $vg/$lv1
+lvchange -an $vg/$lv2
+_verify_data_on_lv
+lvremove $vg/$lv1
+lvremove $vg/$lv2
+vgremove $vg
+
+aux cleanup_scsi_debug_dev
+
--
1.8.3.1

View File

@ -0,0 +1,255 @@
From 9759f915e78044667681de395bf2694bd5c7f393 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Mon, 7 Jun 2021 15:40:40 -0500
Subject: [PATCH 33/33] tests: add writecache-cache-blocksize-2
inconsistent physical block size of devs used
for main LV and cache
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
test/shell/writecache-cache-blocksize-2.sh | 232 +++++++++++++++++++++++++++++
1 file changed, 232 insertions(+)
create mode 100644 test/shell/writecache-cache-blocksize-2.sh
diff --git a/test/shell/writecache-cache-blocksize-2.sh b/test/shell/writecache-cache-blocksize-2.sh
new file mode 100644
index 000000000000..af4f60e1dcf8
--- /dev/null
+++ b/test/shell/writecache-cache-blocksize-2.sh
@@ -0,0 +1,232 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Test dm-writecache and dm-cache with different block size combinations
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux have_writecache 1 0 0 || skip
+which mkfs.xfs || skip
+
+mnt="mnt"
+mkdir -p $mnt
+
+awk 'BEGIN { while (z++ < 16384) printf "A" }' > fileA
+awk 'BEGIN { while (z++ < 16384) printf "B" }' > fileB
+awk 'BEGIN { while (z++ < 16384) printf "C" }' > fileC
+
+# generate random data
+dd if=/dev/urandom of=randA bs=512K count=2
+dd if=/dev/urandom of=randB bs=512K count=3
+dd if=/dev/urandom of=randC bs=512K count=4
+
+_add_new_data_to_mnt() {
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+
+ # add original data
+ cp randA $mnt
+ cp randB $mnt
+ cp randC $mnt
+ mkdir $mnt/1
+ cp fileA $mnt/1
+ cp fileB $mnt/1
+ cp fileC $mnt/1
+ mkdir $mnt/2
+ cp fileA $mnt/2
+ cp fileB $mnt/2
+ cp fileC $mnt/2
+ sync
+}
+
+_add_more_data_to_mnt() {
+ mkdir $mnt/more
+ cp fileA $mnt/more
+ cp fileB $mnt/more
+ cp fileC $mnt/more
+ cp randA $mnt/more
+ cp randB $mnt/more
+ cp randC $mnt/more
+ sync
+}
+
+_verify_data_on_mnt() {
+ diff randA $mnt/randA
+ diff randB $mnt/randB
+ diff randC $mnt/randC
+ diff fileA $mnt/1/fileA
+ diff fileB $mnt/1/fileB
+ diff fileC $mnt/1/fileC
+ diff fileA $mnt/2/fileA
+ diff fileB $mnt/2/fileB
+ diff fileC $mnt/2/fileC
+}
+
+_verify_more_data_on_mnt() {
+ diff randA $mnt/more/randA
+ diff randB $mnt/more/randB
+ diff randC $mnt/more/randC
+ diff fileA $mnt/more/fileA
+ diff fileB $mnt/more/fileB
+ diff fileC $mnt/more/fileC
+}
+
+_verify_data_on_lv() {
+ lvchange -ay $vg/$lv1
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+ _verify_data_on_mnt
+ rm $mnt/randA
+ rm $mnt/randB
+ rm $mnt/randC
+ rm -rf $mnt/1
+ rm -rf $mnt/2
+ umount $mnt
+ lvchange -an $vg/$lv1
+}
+
+# Check that the LBS ($1) and PBS ($2) are accurately reported.
+_check_env() {
+
+ check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "$1"
+ check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "$2"
+
+ blockdev --getss "$dev1"
+ blockdev --getpbsz "$dev1"
+ blockdev --getss "$dev2"
+ blockdev --getpbsz "$dev2"
+}
+
+#
+# _run_test $BD1 $BD2 $type $optname "..."
+#
+# $BD1: device to place the main LV on
+# $BD2: device to place the cache on
+# $type is cache or writecache to use in lvconvert --type $type
+# $optname is either --cachevol or --cachepool to use in lvconvert
+# "..." a sector size option to use in mkfs.xfs
+#
+
+_run_test() {
+ vgcreate $SHARED $vg "$1"
+ vgextend $vg "$2"
+ lvcreate -n $lv1 -l 8 -an $vg "$1"
+ lvcreate -n $lv2 -l 4 -an $vg "$2"
+ lvchange -ay $vg/$lv1
+ mkfs.xfs -f $5 "$DM_DEV_DIR/$vg/$lv1" |tee out
+ _add_new_data_to_mnt
+ lvconvert --yes --type $3 $4 $lv2 $vg/$lv1
+
+ # TODO: check expected LBS of LV1
+ # blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
+ # grep "$N" out
+ # TODO: check expected PBS of LV1
+ # blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" |tee out
+ # grep "$N" out
+
+ _add_more_data_to_mnt
+ _verify_data_on_mnt
+ lvconvert --splitcache $vg/$lv1
+ check lv_field $vg/$lv1 segtype linear
+ blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
+ blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
+ _verify_data_on_mnt
+ _verify_more_data_on_mnt
+ umount $mnt
+ lvchange -an $vg/$lv1
+ lvchange -an $vg/$lv2
+ _verify_data_on_lv
+ lvremove $vg/$lv1
+ lvremove $vg/$lv2
+ vgremove $vg
+}
+
+# Setup: dev1 LBS 512, PBS 4096 (using scsi-debug)
+# dev2 LBS 512, PBS 4096 (using scsi-debug)
+# dev3 LBS 512, PBS 512 (using loop)
+# dev4 LBS 512, PBS 512 (using loop)
+#
+
+aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
+aux prepare_devs 2 64
+
+# loopa/loopb have LBS 512 PBS 512
+which fallocate || skip
+fallocate -l 64M loopa
+fallocate -l 64M loopb
+
+for i in {1..5}; do
+ LOOP1=$(losetup -f loopa --show || true)
+ test -n "$LOOP1" && break
+done
+for i in {1..5} ; do
+ LOOP2=$(losetup -f loopb --show || true)
+ test -n "$LOOP2" && break
+done
+
+# prepare devX mapping so it works for real & fake dev dir
+d=3
+for i in "$LOOP1" "$LOOP2"; do
+ echo "$i"
+ m=${i##*loop}
+ test -e "$DM_DEV_DIR/loop$m" || mknod "$DM_DEV_DIR/loop$m" b 7 "$m"
+ eval "dev$d=\"$DM_DEV_DIR/loop$m\""
+ d=$(( d + 1 ))
+done
+
+# verify dev1/dev2 have LBS 512 PBS 4096
+_check_env "512" "4096"
+
+# verify dev3/dev4 have LBS 512 PBS 512
+blockdev --getss "$LOOP1" | grep 512
+blockdev --getss "$LOOP2" | grep 512
+blockdev --getpbsz "$LOOP1" | grep 512
+blockdev --getpbsz "$LOOP2" | grep 512
+
+aux extend_filter "a|$dev3|" "a|$dev4|"
+
+# place main LV on dev1 with LBS 512, PBS 4096
+# and the cache on dev3 with LBS 512, PBS 512
+
+_run_test "$dev1" "$dev3" "writecache" "--cachevol" ""
+_run_test "$dev1" "$dev3" "cache" "--cachevol" ""
+_run_test "$dev1" "$dev3" "cache" "--cachepool" ""
+
+# place main LV on dev3 with LBS 512, PBS 512
+# and the cache on dev1 with LBS 512, PBS 4096
+
+_run_test "$dev3" "$dev1" "writecache" "--cachevol" ""
+_run_test "$dev3" "$dev1" "cache" "--cachevol" ""
+_run_test "$dev3" "$dev1" "cache" "--cachepool" ""
+
+# place main LV on dev1 with LBS 512, PBS 4096
+# and the cache on dev3 with LBS 512, PBS 512
+# and force xfs sectsz 512
+
+_run_test "$dev1" "$dev3" "writecache" "--cachevol" "-s size=512"
+_run_test "$dev1" "$dev3" "cache" "--cachevol" "-s size=512"
+_run_test "$dev1" "$dev3" "cache" "--cachepool" "-s size=512"
+
+# place main LV on dev3 with LBS 512, PBS 512
+# and the cache on dev1 with LBS 512, PBS 4096
+# and force xfs sectsz 4096
+
+_run_test "$dev3" "$dev1" "writecache" "--cachevol" "-s size=4096"
+_run_test "$dev3" "$dev1" "cache" "--cachevol" "-s size=4096"
+_run_test "$dev3" "$dev1" "cache" "--cachepool" "-s size=4096"
+
+
+losetup -d "$LOOP1" || true
+losetup -d "$LOOP2" || true
+rm loopa loopb
+
+aux cleanup_scsi_debug_dev
--
1.8.3.1

View File

@ -0,0 +1,42 @@
From 5e17203ff5dd4296760e5dad683e4cc84df2801d Mon Sep 17 00:00:00 2001
From: Leo Yan <leo.yan@linaro.org>
Date: Tue, 8 Jun 2021 14:45:09 +0800
Subject: [PATCH 1/8] lvmlockd: Fix the compilation warning
As SUSE build tool reports the warning:
lvmlockd-core.c: In function 'client_thread_main':
lvmlockd-core.c:4959:37: warning: '%d' directive output may be truncated writing between 1 and 10 bytes into a region of size 6 [-Wformat-truncation=]
snprintf(buf, sizeof(buf), "path[%d]", i);
^~
lvmlockd-core.c:4959:31: note: directive argument in the range [0, 2147483647]
snprintf(buf, sizeof(buf), "path[%d]", i);
^~~~~~~~~~
To dismiss the compilation warning, enlarge the array "buf" to 17
bytes to support the max signed integer: string format 6 bytes + signed
integer 10 bytes + terminal char "\0".
Reported-by: Heming Zhao <heming.zhao@suse.com>
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
daemons/lvmlockd/lvmlockd-core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index ea76f2214b3e..fef9589b9d98 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -4780,7 +4780,7 @@ static void client_recv_action(struct client *cl)
const char *path;
const char *str;
struct pvs pvs;
- char buf[11]; /* p a t h [ x x x x ] \0 */
+ char buf[17]; /* "path[%d]\0", %d outputs signed integer so max to 10 bytes */
int64_t val;
uint32_t opts = 0;
int result = 0;
--
1.8.3.1

View File

@ -0,0 +1,38 @@
From ca930bd936de2e7d4a83fa64add800baf6cfd116 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Tue, 8 Jun 2021 12:16:06 -0500
Subject: [PATCH 2/8] devices: don't use deleted loop backing file for device
id
check for "(deleted)" in the backing_file string and
fall back to devname for id.
$ cat /sys/block/loop0/loop/backing_file
/root/looptmp (deleted)
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/device/device_id.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/lib/device/device_id.c b/lib/device/device_id.c
index 67f72e51b1ba..1b98487ab3a6 100644
--- a/lib/device/device_id.c
+++ b/lib/device/device_id.c
@@ -325,8 +325,12 @@ const char *device_id_system_read(struct cmd_context *cmd, struct device *dev, u
else if (idtype == DEV_ID_TYPE_MD_UUID)
_read_sys_block(cmd, dev, "md/uuid", sysbuf, sizeof(sysbuf));
- else if (idtype == DEV_ID_TYPE_LOOP_FILE)
+ else if (idtype == DEV_ID_TYPE_LOOP_FILE) {
_read_sys_block(cmd, dev, "loop/backing_file", sysbuf, sizeof(sysbuf));
+ /* if backing file is deleted, fall back to devname */
+ if (strstr(sysbuf, "(deleted)"))
+ sysbuf[0] = '\0';
+ }
else if (idtype == DEV_ID_TYPE_DEVNAME) {
if (!(idname = strdup(dev_name(dev))))
--
1.8.3.1

View File

@ -0,0 +1,80 @@
From df27392c8c9ec5d1efd92c2214805471473f2a06 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Tue, 8 Jun 2021 14:07:39 -0500
Subject: [PATCH 3/8] man/help: fix common option listing
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/command.c | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/tools/command.c b/tools/command.c
index 1de739aaa0de..7205969e1c2f 100644
--- a/tools/command.c
+++ b/tools/command.c
@@ -2146,6 +2146,7 @@ void print_usage_common_lvm(struct command_name *cname, struct command *cmd)
void print_usage_common_cmd(struct command_name *cname, struct command *cmd)
{
int oo, opt_enum;
+ int found_common_command = 0;
/*
* when there's more than one variant, options that
@@ -2155,6 +2156,18 @@ void print_usage_common_cmd(struct command_name *cname, struct command *cmd)
if (cname->variants < 2)
return;
+ for (opt_enum = 0; opt_enum < ARG_COUNT; opt_enum++) {
+ if (!cname->common_options[opt_enum])
+ continue;
+ if (_is_lvm_all_opt(opt_enum))
+ continue;
+ found_common_command = 1;
+ break;
+ }
+
+ if (!found_common_command)
+ return;
+
printf(" Common options for command:");
/* print options with short opts */
@@ -2213,7 +2226,7 @@ void print_usage_common_cmd(struct command_name *cname, struct command *cmd)
printf(" ]");
}
- printf(".P\n");
+ printf("\n\n");
}
void print_usage_notes(struct command_name *cname)
@@ -2994,6 +3007,7 @@ static void _print_man_usage_common_cmd(struct command *cmd)
{
struct command_name *cname;
int i, sep, oo, opt_enum;
+ int found_common_command = 0;
if (!(cname = _find_command_name(cmd->name)))
return;
@@ -3001,6 +3015,18 @@ static void _print_man_usage_common_cmd(struct command *cmd)
if (cname->variants < 2)
return;
+ for (opt_enum = 0; opt_enum < ARG_COUNT; opt_enum++) {
+ if (!cname->common_options[opt_enum])
+ continue;
+ if (_is_lvm_all_opt(opt_enum))
+ continue;
+ found_common_command = 1;
+ break;
+ }
+
+ if (!found_common_command)
+ return;
+
printf("Common options for command:\n");
printf(".\n");
--
1.8.3.1

View File

@ -0,0 +1,417 @@
From ba3707d9539f9cc2e72c5368388ae795776379af Mon Sep 17 00:00:00 2001
From: Zdenek Kabelac <zkabelac@redhat.com>
Date: Tue, 8 Jun 2021 19:02:07 +0200
Subject: [PATCH 4/8] archiving: take archive automatically
Instead of calling explicit archive with command processing logic,
move this step towards 1st. vg_write() call, which will automatically
store archive of committed metadata.
This slightly changes some error path where the error in archiving
was detected earlier in the command, while now some on going command
'actions' might have been, but will be simply scratched in case
of error (since even new metadata would not have been even written).
So general effect should be only some command message ordering.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/metadata/lv_manip.c | 15 ---------------
lib/metadata/metadata.c | 6 +++---
lib/metadata/pv_manip.c | 3 ---
lib/metadata/vg.c | 3 ---
tools/lvconvert.c | 30 ------------------------------
tools/pvchange.c | 2 --
tools/pvmove.c | 3 ---
tools/vgchange.c | 8 --------
tools/vgcreate.c | 3 ---
tools/vgexport.c | 4 ----
tools/vgextend.c | 6 ------
tools/vgimport.c | 3 ---
tools/vgimportclone.c | 3 ---
tools/vgreduce.c | 3 ---
tools/vgrename.c | 3 ---
15 files changed, 3 insertions(+), 92 deletions(-)
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 37dd3611dde7..899297f28498 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -4834,9 +4834,6 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
}
}
- if (update_mda && !archive(vg))
- return_0;
-
if (old_lv_is_historical) {
/*
* Historical LVs have neither sub LVs nor any
@@ -6146,9 +6143,6 @@ int lv_resize(struct logical_volume *lv,
if (!lockd_lv_resize(cmd, lock_lv, "ex", 0, lp))
return_0;
- if (!archive(vg))
- return_0;
-
/* Remove any striped raid reshape space for LV resizing */
if (aux_lv && first_seg(aux_lv)->reshape_len)
if (!lv_raid_free_reshape_space(aux_lv))
@@ -6719,9 +6713,6 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
- if (!archive(vg))
- return_0;
-
if (!lv_detach_writecache_cachevol(lv, 1)) {
log_error("Failed to detach writecache from %s", display_lvname(lv));
return 0;
@@ -6742,9 +6733,6 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
/* FIXME Review and fix the snapshot error paths! */
return_0;
- if (!archive(vg))
- return_0;
-
/* Special case removing a striped raid LV with allocated reshape space */
if (seg && seg->reshape_len) {
if (!(seg->segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
@@ -8458,9 +8446,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return NULL;
}
- if (!archive(vg))
- return_NULL;
-
if (pool_lv && segtype_is_thin_volume(create_segtype)) {
/* Ensure all stacked messages are submitted */
if ((pool_is_active(pool_lv) || is_change_activating(lp->activate)) &&
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 0b284435b41b..6852d2a2a7fe 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -614,9 +614,6 @@ int vg_remove_check(struct volume_group *vg)
return 0;
}
- if (!archive(vg))
- return 0;
-
return 1;
}
@@ -2978,6 +2975,9 @@ int vg_write(struct volume_group *vg)
if (vg->cmd->wipe_outdated_pvs)
_wipe_outdated_pvs(vg->cmd, vg);
+ if (!vg_is_archived(vg) && vg->vg_committed && !archive(vg->vg_committed))
+ return_0;
+
if (critical_section())
log_error(INTERNAL_ERROR
"Writing metadata in critical section.");
diff --git a/lib/metadata/pv_manip.c b/lib/metadata/pv_manip.c
index 5fd80a2ceca3..fd97bbbc2462 100644
--- a/lib/metadata/pv_manip.c
+++ b/lib/metadata/pv_manip.c
@@ -623,9 +623,6 @@ int pv_resize_single(struct cmd_context *cmd,
const char *vg_name = pv->vg_name;
int vg_needs_pv_write = 0;
- if (!archive(vg))
- goto out;
-
if (!(pv->fmt->features & FMT_RESIZE_PV)) {
log_error("Physical volume %s format does not support resizing.",
pv_name);
diff --git a/lib/metadata/vg.c b/lib/metadata/vg.c
index 3f9ec8d350fb..428e5dca79c6 100644
--- a/lib/metadata/vg.c
+++ b/lib/metadata/vg.c
@@ -694,9 +694,6 @@ int vgreduce_single(struct cmd_context *cmd, struct volume_group *vg,
pvl = find_pv_in_vg(vg, name);
- if (!archive(vg))
- goto_bad;
-
log_verbose("Removing \"%s\" from volume group \"%s\"", name, vg->name);
if (pvl)
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index 71f7a7627fa1..c40031fe47a8 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -1861,9 +1861,6 @@ static int _lvconvert_splitsnapshot(struct cmd_context *cmd, struct logical_volu
}
}
- if (!archive(vg))
- return_0;
-
log_verbose("Splitting snapshot %s from its origin.", display_lvname(cow));
if (!vg_remove_snapshot(cow))
@@ -2796,9 +2793,6 @@ static int _lvconvert_to_thin_with_external(struct cmd_context *cmd,
if (!(lvc.segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_THIN)))
return_0;
- if (!archive(vg))
- return_0;
-
/*
* New thin LV needs to be created (all messages sent to pool) In this
* case thin volume is created READ-ONLY and also warn about not
@@ -2979,9 +2973,6 @@ static int _lvconvert_swap_pool_metadata(struct cmd_context *cmd,
return 0;
}
- if (!archive(vg))
- return_0;
-
/* Swap names between old and new metadata LV */
if (!detach_pool_metadata_lv(seg, &prev_metadata_lv))
@@ -3286,9 +3277,6 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
meta_readahead = arg_uint_value(cmd, readahead_ARG, cmd->default_settings.read_ahead);
meta_alloc = (alloc_policy_t) arg_uint_value(cmd, alloc_ARG, ALLOC_INHERIT);
- if (!archive(vg))
- goto_bad;
-
if (!(metadata_lv = alloc_pool_metadata(lv,
meta_name,
meta_readahead,
@@ -3305,9 +3293,6 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
goto bad;
}
- if (!archive(vg))
- goto_bad;
-
if (zero_metadata) {
metadata_lv->status |= LV_ACTIVATION_SKIP;
if (!activate_lv(cmd, metadata_lv)) {
@@ -3556,9 +3541,6 @@ static int _cache_vol_attach(struct cmd_context *cmd,
if (!get_cache_params(cmd, &chunk_size, &cache_metadata_format, &cache_mode, &policy_name, &policy_settings))
goto_out;
- if (!archive(vg))
- goto_out;
-
/*
* lv/cache_lv keeps the same lockd lock it had before, the lock for
* lv_fast is kept but is not used while it's attached, and
@@ -5525,9 +5507,6 @@ static int _lvconvert_to_vdopool_single(struct cmd_context *cmd,
}
}
- if (!archive(vg))
- goto_out;
-
if (!convert_vdo_pool_lv(lv, &vdo_params, &lvc.virtual_extents, zero_vdopool))
goto_out;
@@ -6198,9 +6177,6 @@ int lvconvert_writecache_attach_single(struct cmd_context *cmd,
if (fast_name && !lockd_lv(cmd, lv_fast, "ex", 0))
goto_bad;
- if (!archive(vg))
- goto_bad;
-
/*
* lv keeps the same lockd lock it had before, the lock for
* lv_fast is kept but is not used while it's attached, and
@@ -6339,9 +6315,6 @@ static int _lvconvert_integrity_remove(struct cmd_context *cmd, struct logical_v
if (!lockd_lv(cmd, lv, "ex", 0))
return_0;
- if (!archive(vg))
- return_0;
-
if (lv_is_raid(lv))
ret = lv_remove_integrity_from_raid(lv);
if (!ret)
@@ -6371,9 +6344,6 @@ static int _lvconvert_integrity_add(struct cmd_context *cmd, struct logical_volu
} else
use_pvh = &vg->pvs;
- if (!archive(vg))
- return_0;
-
if (lv_is_partial(lv)) {
log_error("Cannot add integrity while LV is missing PVs.");
return 0;
diff --git a/tools/pvchange.c b/tools/pvchange.c
index 04cbb428dde1..8b4a0643d3cd 100644
--- a/tools/pvchange.c
+++ b/tools/pvchange.c
@@ -65,8 +65,6 @@ static int _pvchange_single(struct cmd_context *cmd, struct volume_group *vg,
"logical volumes", pv_name);
goto bad;
}
- if (!archive(vg))
- goto_bad;
} else {
if (tagargs) {
log_error("Can't change tag on Physical Volume %s not "
diff --git a/tools/pvmove.c b/tools/pvmove.c
index bb372f7dcaeb..ed92f3ce4a6d 100644
--- a/tools/pvmove.c
+++ b/tools/pvmove.c
@@ -709,9 +709,6 @@ static int _pvmove_setup_single(struct cmd_context *cmd,
vg, pv, pp->alloc)))
goto_out;
- if (!archive(vg))
- goto_out;
-
if (!(lv_mirr = _set_up_pvmove_lv(cmd, vg, source_pvl, lv_name,
allocatable_pvs, pp->alloc,
&lvs_changed, &exclusive)))
diff --git a/tools/vgchange.c b/tools/vgchange.c
index 032f3efcfd04..625b68d46b56 100644
--- a/tools/vgchange.c
+++ b/tools/vgchange.c
@@ -675,8 +675,6 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
for (i = 0; i < DM_ARRAY_SIZE(_vgchange_args); ++i) {
if (arg_is_set(cmd, _vgchange_args[i].arg)) {
- if (!archive(vg))
- return_ECMD_FAILED;
if (!_vgchange_args[i].fn(cmd, vg))
return_ECMD_FAILED;
}
@@ -1002,9 +1000,6 @@ static int _vgchange_locktype_single(struct cmd_context *cmd, const char *vg_nam
struct volume_group *vg,
struct processing_handle *handle)
{
- if (!archive(vg))
- return_ECMD_FAILED;
-
if (!_vgchange_locktype(cmd, vg))
return_ECMD_FAILED;
@@ -1201,9 +1196,6 @@ static int _vgchange_systemid_single(struct cmd_context *cmd, const char *vg_nam
struct volume_group *vg,
struct processing_handle *handle)
{
- if (!archive(vg))
- return_ECMD_FAILED;
-
if (!_vgchange_system_id(cmd, vg))
return_ECMD_FAILED;
diff --git a/tools/vgcreate.c b/tools/vgcreate.c
index f9c40e86d646..d6d6bb61ddc3 100644
--- a/tools/vgcreate.c
+++ b/tools/vgcreate.c
@@ -148,9 +148,6 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv)
}
}
- if (!archive(vg))
- goto_bad;
-
/* Store VG on disk(s) */
if (!vg_write(vg) || !vg_commit(vg))
goto_bad;
diff --git a/tools/vgexport.c b/tools/vgexport.c
index 86dad3abb432..15cc3dd75d56 100644
--- a/tools/vgexport.c
+++ b/tools/vgexport.c
@@ -45,10 +45,6 @@ static int vgexport_single(struct cmd_context *cmd __attribute__((unused)),
}
}
-
- if (!archive(vg))
- goto_bad;
-
vg->status |= EXPORTED_VG;
vg->system_id = NULL;
diff --git a/tools/vgextend.c b/tools/vgextend.c
index 04d37f886d4d..b0f49569f492 100644
--- a/tools/vgextend.c
+++ b/tools/vgextend.c
@@ -60,9 +60,6 @@ static int _vgextend_restoremissing(struct cmd_context *cmd __attribute__((unuse
int fixed = 0;
unsigned i;
- if (!archive(vg))
- return_0;
-
for (i = 0; i < pp->pv_count; i++)
if (_restore_pv(vg, pp->pv_names[i]))
fixed++;
@@ -99,9 +96,6 @@ static int _vgextend_single(struct cmd_context *cmd, const char *vg_name,
return ECMD_FAILED;
}
- if (!archive(vg))
- return_ECMD_FAILED;
-
if (!vg_extend_each_pv(vg, pp))
goto_out;
diff --git a/tools/vgimport.c b/tools/vgimport.c
index 0d8b0f215704..4b25b468f017 100644
--- a/tools/vgimport.c
+++ b/tools/vgimport.c
@@ -33,9 +33,6 @@ static int _vgimport_single(struct cmd_context *cmd,
goto bad;
}
- if (!archive(vg))
- goto_bad;
-
vg->status &= ~EXPORTED_VG;
if (!vg_is_shared(vg))
diff --git a/tools/vgimportclone.c b/tools/vgimportclone.c
index 1e6bb2d69d83..a4778277108e 100644
--- a/tools/vgimportclone.c
+++ b/tools/vgimportclone.c
@@ -110,9 +110,6 @@ static int _update_vg(struct cmd_context *cmd, struct volume_group *vg,
* Write changes.
*/
- if (!archive(vg))
- goto_bad;
-
if (vp->import_vg)
vg->status &= ~EXPORTED_VG;
diff --git a/tools/vgreduce.c b/tools/vgreduce.c
index 4a4202e8683f..c759c664301c 100644
--- a/tools/vgreduce.c
+++ b/tools/vgreduce.c
@@ -157,9 +157,6 @@ static int _vgreduce_repair_single(struct cmd_context *cmd, const char *vg_name,
return ECMD_PROCESSED;
}
- if (!archive(vg))
- return_ECMD_FAILED;
-
if (vp->force) {
if (!_make_vg_consistent(cmd, vg))
return_ECMD_FAILED;
diff --git a/tools/vgrename.c b/tools/vgrename.c
index f442f731fd4a..71b4e16774af 100644
--- a/tools/vgrename.c
+++ b/tools/vgrename.c
@@ -103,9 +103,6 @@ static int _vgrename_single(struct cmd_context *cmd, const char *vg_name,
dev_dir = cmd->dev_dir;
- if (!archive(vg))
- goto error;
-
if (!lockd_rename_vg_before(cmd, vg)) {
stack;
goto error;
--
1.8.3.1

View File

@ -0,0 +1,565 @@
From bb45e33518b56a06df8a52226e383ca9ce938d0d Mon Sep 17 00:00:00 2001
From: Zdenek Kabelac <zkabelac@redhat.com>
Date: Tue, 8 Jun 2021 19:39:15 +0200
Subject: [PATCH 5/8] backup: automatically store data on vg_unlock
Previously there have been necessary explicit call of backup (often
either forgotten or over-used). With this patch the necessity to
store backup is remember at vg_commit and once the VG is unlocked,
the committed metadata are automatically store in backup file.
This may possibly alter some printed messages from command when the
backup is now taken later.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/format_text/archiver.c | 1 -
lib/locking/locking.h | 7 +++++--
lib/metadata/lv_manip.c | 17 +----------------
lib/metadata/metadata.c | 4 ++--
lib/metadata/pv_manip.c | 1 -
lib/metadata/raid_manip.c | 12 ------------
lib/metadata/vg.c | 11 +++++++++--
lib/metadata/vg.h | 1 +
tools/lvconvert.c | 25 -------------------------
tools/pvmove_poll.c | 3 ---
tools/toollib.c | 2 --
tools/vgchange.c | 6 ------
tools/vgcreate.c | 2 --
tools/vgexport.c | 2 --
tools/vgextend.c | 4 ----
tools/vgimport.c | 2 --
tools/vgimportdevices.c | 1 -
tools/vgreduce.c | 1 -
tools/vgrename.c | 2 --
19 files changed, 18 insertions(+), 86 deletions(-)
diff --git a/lib/format_text/archiver.c b/lib/format_text/archiver.c
index 68117f7dc38d..f1590b4604e0 100644
--- a/lib/format_text/archiver.c
+++ b/lib/format_text/archiver.c
@@ -279,7 +279,6 @@ int backup_locally(struct volume_group *vg)
int backup(struct volume_group *vg)
{
- vg->needs_backup = 0;
/* Unlock memory if possible */
memlock_unlock(vg->cmd);
diff --git a/lib/locking/locking.h b/lib/locking/locking.h
index 3e8ae6f0c27a..a60935d528f9 100644
--- a/lib/locking/locking.h
+++ b/lib/locking/locking.h
@@ -56,8 +56,11 @@ int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const str
#define unlock_vg(cmd, vg, vol) \
do { \
- if (is_real_vg(vol) && !sync_local_dev_names(cmd)) \
- stack; \
+ if (is_real_vg(vol)) { \
+ if (!sync_local_dev_names(cmd)) \
+ stack; \
+ vg_backup_if_needed(vg); \
+ } \
if (!lock_vol(cmd, vol, LCK_VG_UNLOCK, NULL)) \
stack; \
} while (0)
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 899297f28498..eb92d6eca275 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -6178,8 +6178,6 @@ int lv_resize(struct logical_volume *lv,
/* Update lvm pool metadata (drop messages). */
if (!update_pool_lv(lock_lv, 0))
goto_bad;
-
- backup(vg);
}
/* Check for over provisioning when extended */
@@ -7024,7 +7022,7 @@ no_remove:
static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
{
struct volume_group *vg = lv->vg;
- int do_backup = 0, r = 0;
+ int r = 0;
const struct logical_volume *lock_lv = lv_lock_holder(lv);
log_very_verbose("Updating logical volume %s on disk(s)%s.",
@@ -7048,8 +7046,6 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
return 0;
} else if (!(r = vg_commit(vg)))
stack; /* !vg_commit() has implict vg_revert() */
- else
- do_backup = 1;
log_very_verbose("Updating logical volume %s in kernel.",
display_lvname(lock_lv));
@@ -7060,9 +7056,6 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
r = 0;
}
- if (do_backup && !critical_section())
- backup(vg);
-
return r;
}
@@ -8595,8 +8588,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
/* Pool created metadata LV, but better avoid recover when vg_write/commit fails */
return_NULL;
- backup(vg);
-
if (test_mode()) {
log_verbose("Test mode: Skipping activation, zeroing and signature wiping.");
goto out;
@@ -8607,8 +8598,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (!lv_add_integrity_to_raid(lv, &lp->integrity_settings, lp->pvh, NULL))
goto revert_new_lv;
-
- backup(vg);
}
/* Do not scan this LV until properly zeroed/wiped. */
@@ -8708,7 +8697,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
goto revert_new_lv;
}
}
- backup(vg);
if (!lv_active_change(cmd, lv, lp->activate)) {
log_error("Failed to activate thin %s.", lv->name);
@@ -8829,7 +8817,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (!vg_write(vg) || !vg_commit(vg))
return_NULL; /* Metadata update fails, deep troubles */
- backup(vg);
/*
* FIXME We do not actually need snapshot-origin as an active device,
* as virtual origin is already 'hidden' private device without
@@ -8873,8 +8860,6 @@ revert_new_lv:
!lv_remove(lv) || !vg_write(vg) || !vg_commit(vg))
log_error("Manual intervention may be required to remove "
"abandoned LV(s) before retrying.");
- else
- backup(vg);
return NULL;
}
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 6852d2a2a7fe..d5b28a58f200 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -991,6 +991,7 @@ static void _vg_move_cached_precommitted_to_committed(struct volume_group *vg)
release_vg(vg->vg_committed);
vg->vg_committed = vg->vg_precommitted;
vg->vg_precommitted = NULL;
+ vg->needs_backup = 1;
}
int lv_has_unknown_segments(const struct logical_volume *lv)
@@ -3165,8 +3166,7 @@ int vg_commit(struct volume_group *vg)
dm_list_init(&vg->msg_list);
vg->needs_write_and_commit = 0;
}
- vg->needs_backup = 0;
- }
+ }
/* If at least one mda commit succeeded, it was committed */
return ret;
diff --git a/lib/metadata/pv_manip.c b/lib/metadata/pv_manip.c
index fd97bbbc2462..cfc983174623 100644
--- a/lib/metadata/pv_manip.c
+++ b/lib/metadata/pv_manip.c
@@ -687,7 +687,6 @@ int pv_resize_single(struct cmd_context *cmd,
"volume group \"%s\"", pv_name, vg_name);
goto out;
}
- backup(vg);
}
log_print_unless_silent("Physical volume \"%s\" changed", pv_name);
diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c
index 74adf02315b0..f0d401cdedc3 100644
--- a/lib/metadata/raid_manip.c
+++ b/lib/metadata/raid_manip.c
@@ -2210,9 +2210,6 @@ static int _vg_write_lv_suspend_commit_backup(struct volume_group *vg,
} else if (!(r = vg_commit(vg)))
stack; /* !vg_commit() has implicit vg_revert() */
- if (r && do_backup)
- backup(vg);
-
return r;
}
@@ -2221,8 +2218,6 @@ static int _vg_write_commit_backup(struct volume_group *vg)
if (!vg_write(vg) || !vg_commit(vg))
return_0;
- backup(vg);
-
return 1;
}
@@ -2847,7 +2842,6 @@ static int _raid_add_images(struct logical_volume *lv,
display_lvname(lv));
return 0;
}
- backup(lv->vg);
}
return 1;
@@ -3172,8 +3166,6 @@ static int _raid_remove_images(struct logical_volume *lv, int yes,
if (!lv_update_and_reload_origin(lv))
return_0;
- backup(lv->vg);
-
return 1;
}
@@ -3431,8 +3423,6 @@ int lv_raid_split(struct logical_volume *lv, int yes, const char *split_name,
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
- backup(lv->vg);
-
return 1;
}
@@ -3915,8 +3905,6 @@ static int _eliminate_extracted_lvs_optional_write_vg(struct volume_group *vg,
if (vg_write_requested) {
if (!vg_write(vg) || !vg_commit(vg))
return_0;
-
- backup(vg);
}
/* Wait for events following any deactivation. */
diff --git a/lib/metadata/vg.c b/lib/metadata/vg.c
index 428e5dca79c6..85482552aefe 100644
--- a/lib/metadata/vg.c
+++ b/lib/metadata/vg.c
@@ -739,8 +739,6 @@ int vgreduce_single(struct cmd_context *cmd, struct volume_group *vg,
goto bad;
}
- backup(vg);
-
log_print_unless_silent("Removed \"%s\" from volume group \"%s\"",
name, vg->name);
}
@@ -752,3 +750,12 @@ bad:
release_vg(orphan_vg);
return r;
}
+
+void vg_backup_if_needed(struct volume_group *vg)
+{
+ if (!vg || !vg->needs_backup)
+ return;
+
+ vg->needs_backup = 0;
+ backup(vg->vg_committed);
+}
diff --git a/lib/metadata/vg.h b/lib/metadata/vg.h
index 36d1ed1556c9..8ce57acdce01 100644
--- a/lib/metadata/vg.h
+++ b/lib/metadata/vg.h
@@ -170,6 +170,7 @@ uint32_t vg_mda_used_count(const struct volume_group *vg);
uint32_t vg_mda_copies(const struct volume_group *vg);
int vg_set_mda_copies(struct volume_group *vg, uint32_t mda_copies);
char *vg_profile_dup(const struct volume_group *vg);
+void vg_backup_if_needed(struct volume_group *vg);
/*
* Returns visible LV count - number of LVs from user perspective
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index c40031fe47a8..e19c445b17ee 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -1263,8 +1263,6 @@ static int _lvconvert_mirrors(struct cmd_context *cmd,
new_mimage_count, new_log_count, lp->pvh))
return_0;
- backup(lv->vg);
-
if (!lp->need_polling)
log_print_unless_silent("Logical volume %s converted.",
display_lvname(lv));
@@ -1866,8 +1864,6 @@ static int _lvconvert_splitsnapshot(struct cmd_context *cmd, struct logical_volu
if (!vg_remove_snapshot(cow))
return_0;
- backup(vg);
-
log_print_unless_silent("Logical Volume %s split from its origin.", display_lvname(cow));
return 1;
@@ -1941,8 +1937,6 @@ static int _lvconvert_split_and_keep_cachevol(struct cmd_context *cmd,
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
- backup(lv->vg);
-
return 1;
}
@@ -1989,8 +1983,6 @@ static int _lvconvert_split_and_keep_cachepool(struct cmd_context *cmd,
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
- backup(lv->vg);
-
log_print_unless_silent("Logical volume %s is not cached and %s is unused.",
display_lvname(lv), display_lvname(lv_fast));
@@ -2224,7 +2216,6 @@ static int _lvconvert_merge_old_snapshot(struct cmd_context *cmd,
/* Store and commit vg but skip starting the merge */
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
- backup(lv->vg);
} else {
/* Perform merge */
if (!lv_update_and_reload(origin))
@@ -2335,8 +2326,6 @@ static int _lvconvert_merge_thin_snapshot(struct cmd_context *cmd,
log_print_unless_silent("Merging of thin snapshot %s will occur on "
"next activation of %s.",
display_lvname(lv), display_lvname(origin));
- backup(lv->vg);
-
return 1;
}
@@ -2860,8 +2849,6 @@ revert_new_lv:
if (!lv_remove(thin_lv) || !vg_write(vg) || !vg_commit(vg))
log_error("Manual intervention may be required to remove "
"abandoned LV(s) before retrying.");
- else
- backup(vg);
return 0;
}
@@ -2999,7 +2986,6 @@ static int _lvconvert_swap_pool_metadata(struct cmd_context *cmd,
if (!vg_write(vg) || !vg_commit(vg))
return_0;
- backup(vg);
return 1;
}
@@ -3472,8 +3458,6 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
r = 1;
out:
- backup(vg);
-
if (r)
log_print_unless_silent("Converted %s to %s pool.",
converted_names, to_cachepool ? "cache" : "thin");
@@ -3509,8 +3493,6 @@ revert_new_lv:
if (!lv_remove(metadata_lv) || !vg_write(vg) || !vg_commit(vg))
log_error("Manual intervention may be required to remove "
"abandoned LV(s) before retrying.");
- else
- backup(vg);
}
return 0;
@@ -5701,8 +5683,6 @@ static int _lvconvert_detach_writecache(struct cmd_context *cmd,
if (!lv_detach_writecache_cachevol(lv, noflush))
return_0;
- backup(lv->vg);
-
log_print_unless_silent("Logical volume %s writecache has been detached.",
display_lvname(lv));
return 1;
@@ -5827,7 +5807,6 @@ static int _lvconvert_detach_writecache_when_clean(struct cmd_context *cmd,
}
ret = 1;
- backup(vg);
out_release:
if (ret)
@@ -6320,8 +6299,6 @@ static int _lvconvert_integrity_remove(struct cmd_context *cmd, struct logical_v
if (!ret)
return_0;
- backup(vg);
-
log_print_unless_silent("Logical volume %s has removed integrity.", display_lvname(lv));
return 1;
}
@@ -6354,8 +6331,6 @@ static int _lvconvert_integrity_add(struct cmd_context *cmd, struct logical_volu
if (!ret)
return_0;
- backup(vg);
-
log_print_unless_silent("Logical volume %s has added integrity.", display_lvname(lv));
return 1;
}
diff --git a/tools/pvmove_poll.c b/tools/pvmove_poll.c
index d379596f2f73..751313cd7e7a 100644
--- a/tools/pvmove_poll.c
+++ b/tools/pvmove_poll.c
@@ -120,8 +120,5 @@ int pvmove_finish(struct cmd_context *cmd, struct volume_group *vg,
return 0;
}
- /* FIXME backup positioning */
- backup(vg);
-
return 1;
}
diff --git a/tools/toollib.c b/tools/toollib.c
index f337f9fcf9d5..338551015e7c 100644
--- a/tools/toollib.c
+++ b/tools/toollib.c
@@ -3224,8 +3224,6 @@ int process_each_lv_in_vg(struct cmd_context *cmd, struct volume_group *vg,
if (vg->needs_write_and_commit && (ret_max == ECMD_PROCESSED) &&
(!vg_write(vg) || !vg_commit(vg)))
ret_max = ECMD_FAILED;
- else if (vg->needs_backup)
- backup(vg);
if (lvargs_supplied) {
/*
diff --git a/tools/vgchange.c b/tools/vgchange.c
index 625b68d46b56..9f972acdb4da 100644
--- a/tools/vgchange.c
+++ b/tools/vgchange.c
@@ -684,8 +684,6 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
if (!vg_write(vg) || !vg_commit(vg))
return_ECMD_FAILED;
- backup(vg);
-
log_print_unless_silent("Volume group \"%s\" successfully changed", vg->name);
}
@@ -1006,8 +1004,6 @@ static int _vgchange_locktype_single(struct cmd_context *cmd, const char *vg_nam
if (!vg_write(vg) || !vg_commit(vg))
return_ECMD_FAILED;
- backup(vg);
-
/*
* When init_vg_sanlock is called for vgcreate, the lockspace remains
* started and lvmlock remains active, but when called for
@@ -1202,8 +1198,6 @@ static int _vgchange_systemid_single(struct cmd_context *cmd, const char *vg_nam
if (!vg_write(vg) || !vg_commit(vg))
return_ECMD_FAILED;
- backup(vg);
-
log_print_unless_silent("Volume group \"%s\" successfully changed", vg->name);
return ECMD_PROCESSED;
diff --git a/tools/vgcreate.c b/tools/vgcreate.c
index d6d6bb61ddc3..dde3f1eac279 100644
--- a/tools/vgcreate.c
+++ b/tools/vgcreate.c
@@ -167,8 +167,6 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv)
unlock_vg(cmd, vg, vp_new.vg_name);
- backup(vg);
-
log_print_unless_silent("Volume group \"%s\" successfully created%s%s",
vg->name,
vg->system_id ? " with system ID " : "", vg->system_id ? : "");
diff --git a/tools/vgexport.c b/tools/vgexport.c
index 15cc3dd75d56..526ffed7db59 100644
--- a/tools/vgexport.c
+++ b/tools/vgexport.c
@@ -54,8 +54,6 @@ static int vgexport_single(struct cmd_context *cmd __attribute__((unused)),
if (!vg_write(vg) || !vg_commit(vg))
goto_bad;
- backup(vg);
-
log_print_unless_silent("Volume group \"%s\" successfully exported", vg->name);
return ECMD_PROCESSED;
diff --git a/tools/vgextend.c b/tools/vgextend.c
index b0f49569f492..0856b4c78d25 100644
--- a/tools/vgextend.c
+++ b/tools/vgextend.c
@@ -72,8 +72,6 @@ static int _vgextend_restoremissing(struct cmd_context *cmd __attribute__((unuse
if (!vg_write(vg) || !vg_commit(vg))
return_ECMD_FAILED;
- backup(vg);
-
log_print_unless_silent("Volume group \"%s\" successfully extended", vg_name);
return ECMD_PROCESSED;
@@ -116,8 +114,6 @@ static int _vgextend_single(struct cmd_context *cmd, const char *vg_name,
if (!vg_write(vg) || !vg_commit(vg))
goto_out;
- backup(vg);
-
log_print_unless_silent("Volume group \"%s\" successfully extended", vg_name);
ret = ECMD_PROCESSED;
out:
diff --git a/tools/vgimport.c b/tools/vgimport.c
index 4b25b468f017..84b76bd8d244 100644
--- a/tools/vgimport.c
+++ b/tools/vgimport.c
@@ -46,8 +46,6 @@ static int _vgimport_single(struct cmd_context *cmd,
if (!vg_write(vg) || !vg_commit(vg))
goto_bad;
- backup(vg);
-
log_print_unless_silent("Volume group \"%s\" successfully imported", vg->name);
return ECMD_PROCESSED;
diff --git a/tools/vgimportdevices.c b/tools/vgimportdevices.c
index af0e618aa932..1cf7ad31a827 100644
--- a/tools/vgimportdevices.c
+++ b/tools/vgimportdevices.c
@@ -72,7 +72,6 @@ static int _vgimportdevices_single(struct cmd_context *cmd,
if (updated_pvs) {
if (!vg_write(vg) || !vg_commit(vg))
goto_bad;
- backup(vg);
}
return ECMD_PROCESSED;
diff --git a/tools/vgreduce.c b/tools/vgreduce.c
index c759c664301c..f500b553add1 100644
--- a/tools/vgreduce.c
+++ b/tools/vgreduce.c
@@ -169,7 +169,6 @@ static int _vgreduce_repair_single(struct cmd_context *cmd, const char *vg_name,
return ECMD_FAILED;
}
- backup(vg);
return ECMD_PROCESSED;
}
diff --git a/tools/vgrename.c b/tools/vgrename.c
index 71b4e16774af..d627bd056d8e 100644
--- a/tools/vgrename.c
+++ b/tools/vgrename.c
@@ -141,8 +141,6 @@ static int _vgrename_single(struct cmd_context *cmd, const char *vg_name,
lockd_rename_vg_final(cmd, vg, 1);
- if (!backup(vg))
- stack;
if (!backup_remove(cmd, vg_name))
stack;
--
1.8.3.1

View File

@ -0,0 +1,41 @@
From 17b27464868ac7049624d9b90f68c59200866997 Mon Sep 17 00:00:00 2001
From: Zdenek Kabelac <zkabelac@redhat.com>
Date: Wed, 9 Jun 2021 16:16:26 +0200
Subject: [PATCH 6/8] archive: avoid abuse of internal flag
Since archive is now postponned we use internal variable 'changed'
to mark we need to commit new metadata.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/vgchange.c | 4 +++-
1 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/tools/vgchange.c b/tools/vgchange.c
index 9f972acdb4da..22038ba9b7da 100644
--- a/tools/vgchange.c
+++ b/tools/vgchange.c
@@ -640,6 +640,7 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
int ret = ECMD_PROCESSED;
unsigned i;
activation_change_t activate;
+ int changed = 0;
static const struct {
int arg;
@@ -677,10 +678,11 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
if (arg_is_set(cmd, _vgchange_args[i].arg)) {
if (!_vgchange_args[i].fn(cmd, vg))
return_ECMD_FAILED;
+ changed = 1;
}
}
- if (vg_is_archived(vg)) {
+ if (changed) {
if (!vg_write(vg) || !vg_commit(vg))
return_ECMD_FAILED;
--
1.8.3.1

View File

@ -0,0 +1,28 @@
From 8331321070899507b904d8a0ec78b413c826ae32 Mon Sep 17 00:00:00 2001
From: Wu Guanghao <wuguanghao3@huawei.com>
Date: Fri, 11 Jun 2021 10:18:56 -0500
Subject: [PATCH 7/8] pvck: add lock_global() before clean_hint_file()
Signed-off-by: Wu Guanghao <wuguanghao3@huawei.com>
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/pvck.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tools/pvck.c b/tools/pvck.c
index 74663ec43f65..aba6a9cc8787 100644
--- a/tools/pvck.c
+++ b/tools/pvck.c
@@ -3037,6 +3037,9 @@ int pvck(struct cmd_context *cmd, int argc, char **argv)
if (arg_is_set(cmd, repairtype_ARG) || arg_is_set(cmd, repair_ARG)) {
pv_name = argv[0];
+ if (!lock_global(cmd, "ex"))
+ return ECMD_FAILED;
+
clear_hint_file(cmd);
if (!setup_device(cmd, pv_name)) {
--
1.8.3.1

View File

@ -0,0 +1,230 @@
From 440d6ae79fb4df92c7992d3c1689ba4f2d242d6a Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Tue, 8 Jun 2021 14:49:34 -0500
Subject: [PATCH 8/8] lvmdevices: add deviceidtype option
When adding a device to the devices file with --adddev, lvm
by default chooses the best device ID type for the new device.
The new --deviceidtype option allows the user to override the
built in preference. This is useful if there's a problem with
the default type, or if a secondary type is preferrable.
If the specified deviceidtype does not produce a device ID,
then lvm falls back to the preference it would otherwise use.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/device/device_id.c | 32 ++++++++++++++----------
man/lvmdevices.8_des | 68 +++++++++++++++++++++++++++++++++++++++-----------
tools/args.h | 5 ++++
tools/command-lines.in | 1 +
tools/lvmdevices.c | 7 ++++--
5 files changed, 84 insertions(+), 29 deletions(-)
diff --git a/lib/device/device_id.c b/lib/device/device_id.c
index 1b98487ab3a6..f158e4f06dee 100644
--- a/lib/device/device_id.c
+++ b/lib/device/device_id.c
@@ -931,6 +931,7 @@ int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid_
/*
* Choose the device_id type for the device being added.
*
+ * 0. use an idtype specified by the user
* 1. use an idtype specific to a special/virtual device type
* e.g. loop, mpath, crypt, lvmlv, md, etc.
* 2. use an idtype specified by user option.
@@ -939,6 +940,24 @@ int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid_
* 5. use devname as the last resort.
*/
+ if (idtype_arg) {
+ if (!(idtype = idtype_from_str(idtype_arg)))
+ log_warn("WARNING: ignoring unknown device_id type %s.", idtype_arg);
+ else {
+ if (id_arg) {
+ if ((idname = strdup(id_arg)))
+ goto id_done;
+ log_warn("WARNING: ignoring device_id name %s.", id_arg);
+ }
+
+ if ((idname = device_id_system_read(cmd, dev, idtype)))
+ goto id_done;
+
+ log_warn("WARNING: ignoring deviceidtype %s which is not available for device.", idtype_arg);
+ idtype = 0;
+ }
+ }
+
if (MAJOR(dev->dev) == cmd->dev_types->device_mapper_major) {
if (_dev_has_mpath_uuid(cmd, dev, &idname)) {
idtype = DEV_ID_TYPE_MPATH_UUID;
@@ -972,19 +991,6 @@ int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid_
log_warn("Missing support for DRBD idtype");
}
- if (idtype_arg) {
- if (!(idtype = idtype_from_str(idtype_arg)))
- log_warn("WARNING: ignoring unknown device_id type %s.", idtype_arg);
- else {
- if (id_arg) {
- if (!(idname = strdup(id_arg)))
- stack;
- goto id_done;
- }
- goto id_name;
- }
- }
-
/*
* No device-specific, existing, or user-specified idtypes,
* so use first available of sys_wwid / sys_serial / devname.
diff --git a/man/lvmdevices.8_des b/man/lvmdevices.8_des
index 015aa1122731..2335456adbfd 100644
--- a/man/lvmdevices.8_des
+++ b/man/lvmdevices.8_des
@@ -9,18 +9,18 @@ remove it from the devices file with lvmdevices --deldev. The
vgimportdevices(8) command adds all PVs from a VG to the devices file,
and updates the VG metadata to include device IDs of the PVs.
.P
-Commands adding new devices to the devices file necessarily look outside
-the existing devices file to find the devices to add. pvcreate, vgcreate,
-and vgextend also look outside the devices file to create new PVs and add
-them to the devices file.
+Commands that add new devices to the devices file necessarily look outside
+the existing devices file to find the devices being added. pvcreate,
+vgcreate, and vgextend also look outside the devices file to create new
+PVs and add those PVs to the devices file.
.P
LVM records devices in the devices file using hardware-specific IDs, such
as the WWID, and attempts to use subsystem-specific IDs for virtual device
-types (which also aim to be as unique and stable as possible.)
-These device IDs are also written in the VG metadata. When no hardware or
+types (which also aim to be as unique and stable as possible.) These
+device IDs are also written in the VG metadata. When no hardware or
virtual ID is available, lvm falls back using the unstable device name as
-the device ID. When devnames are used, lvm performs extra scanning to
-find devices if their devname changes, e.g. after reboot.
+the device ID. When devnames are used as IDs, lvm performs extra scanning
+to find devices if their devname changes, e.g. after reboot.
.P
When proper device IDs are used, an lvm command will not look at devices
outside the devices file, but when devnames are used as a fallback, lvm
@@ -34,12 +34,13 @@ overriding the devices file. The listed devices act as a sort of devices
file in terms of limiting which devices lvm will see and use. Devices
that are not listed will appear to be missing to the lvm command.
.P
-Multiple devices files can be kept in \fI#DEFAULT_SYS_DIR#/devices\fP, which allows lvm
-to be used with different sets of devices, e.g. system devices do not need
-to be exposed to a specific application, and the application can use lvm on
-its own devices that are not exposed to the system. The option
---devicesfile <filename> is used to select the devices file to use with the
-command. Without the option set, the default system devices file is used.
+Multiple devices files can be kept \fI#DEFAULT_SYS_DIR#/devices\fP, which
+allows lvm to be used with different sets of devices. For example, system
+devices do not need to be exposed to a specific application, and the
+application can use lvm on its own devices that are not exposed to the
+system. The option --devicesfile <filename> is used to select the devices
+file to use with the command. Without the option set, the default system
+devices file is used.
.P
Setting --devicesfile "" causes lvm to not use a devices file.
.P
@@ -59,3 +60,42 @@ if it does not yet exist.
.P
It is recommended to use lvm commands to make changes to the devices file to
ensure proper updates.
+.P
+The device ID and device ID type are included in the VG metadata and can
+be reported with pvs -o deviceid,deviceidtype. (Note that the lvmdevices
+command does not update VG metadata, but subsequent lvm commands modifying
+the metadata will include the device ID.)
+.P
+Possible device ID types are:
+.br
+.IP \[bu] 2
+.B sys_wwid
+uses the wwid reported by sysfs. This is the first choice for non-virtual
+devices.
+.IP \[bu] 2
+.B sys_serial
+uses the serial number reported by sysfs. This is the second choice for
+non-virtual devices.
+.IP \[bu] 2
+.B mpath_uuid
+is used for dm multipath devices, reported by sysfs.
+.IP \[bu] 2
+.B crypt_uuid
+is used for dm crypt devices, reported by sysfs.
+.IP \[bu] 2
+.B md_uuid
+is used for md devices, reported by sysfs.
+.B lvmlv_uuid
+is used if a PV is placed on top of an lvm LV, reported by sysfs.
+.IP \[bu] 2
+.B loop_file
+is used for loop devices, the backing file name repored by sysfs.
+.IP \[bu] 2
+.B devname
+the device name is used if no other type applies.
+.P
+
+The default choice for device ID type can be overriden using lvmdevices
+--addev --deviceidtype <type>. If the specified type is available for the
+device it will be used, otherwise the device will be added using the type
+that would otherwise be chosen.
diff --git a/tools/args.h b/tools/args.h
index 741c82b9f644..d4f23f849278 100644
--- a/tools/args.h
+++ b/tools/args.h
@@ -228,6 +228,11 @@ arg(detachprofile_ARG, '\0', "detachprofile", 0, 0, 0,
"Detaches a metadata profile from a VG or LV.\n"
"See \\fBlvm.conf\\fP(5) for more information about profiles.\n")
+arg(deviceidtype_ARG, '\0', "deviceidtype", string_VAL, 0, 0,
+ "The type of device ID to use for the device.\n"
+ "If the specified type is available for the device,\n"
+ "then it will override the default type that lvm would use.\n")
+
arg(devices_ARG, '\0', "devices", pv_VAL, ARG_GROUPABLE, 0,
"Devices that the command can use. This option can be repeated\n"
"or accepts a comma separated list of devices. This overrides\n"
diff --git a/tools/command-lines.in b/tools/command-lines.in
index 67c37ffd033b..8607305cbb84 100644
--- a/tools/command-lines.in
+++ b/tools/command-lines.in
@@ -1430,6 +1430,7 @@ ID: lvmdevices_update
DESC: Update the devices file to fix incorrect values.
lvmdevices --adddev PV
+OO: --deviceidtype String
ID: lvmdevices_edit
DESC: Add a device to the devices file.
diff --git a/tools/lvmdevices.c b/tools/lvmdevices.c
index 6b3e05683991..3448bdd14722 100644
--- a/tools/lvmdevices.c
+++ b/tools/lvmdevices.c
@@ -265,6 +265,7 @@ int lvmdevices(struct cmd_context *cmd, int argc, char **argv)
if (arg_is_set(cmd, adddev_ARG)) {
const char *devname;
+ const char *deviceidtype;
if (!(devname = arg_str_value(cmd, adddev_ARG, NULL)))
goto_bad;
@@ -311,8 +312,10 @@ int lvmdevices(struct cmd_context *cmd, int argc, char **argv)
dev_name(dev), dev_filtered_reason(dev));
}
- /* allow deviceidtype_ARG/deviceid_ARG ? */
- if (!device_id_add(cmd, dev, dev->pvid, NULL, NULL))
+ /* also allow deviceid_ARG ? */
+ deviceidtype = arg_str_value(cmd, deviceidtype_ARG, NULL);
+
+ if (!device_id_add(cmd, dev, dev->pvid, deviceidtype, NULL))
goto_bad;
if (!device_ids_write(cmd))
goto_bad;
--
1.8.3.1

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5ad1645a480440892e35f31616682acba0dc204ed049635d2df3e5a5929d0ed0
size 2510568

View File

@ -1,17 +0,0 @@
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQIcBAABAgAGBQJfMBQmAAoJELkRJDHlCQOfTCoP/RF9P2RY7NXhXVxfpKbTqqhW
Xag1M5HAMmxflbiYI8Lrh1S7NrSHicWWitiHeKkYTwZxiC8E/HVwZq4UJyl5EDcA
F7FoZuVKB/NPVCjrnvDTwm9pZGZcYErufqb1sT/8cSCtr+vOvRQB5wAWtiu0lQA6
OgcqAzM6Vvx04DiufKYfGoii+VvvfmICtOcQtMBfXpMmp7MCtOlTVeMYCbyiKYr1
4YutnoB41lIyfARA6vu0E+VfbpgJX0KwJS01cWV5XES4kDGBdqqFPJVgagMRCCGo
ssBETAFybQBWVs1OUipIhiZAn1JGXmoZ10UIBPs1GBfKisz+NOr4UQtQV+hMKLex
Wx6fqRzZsof1hOLn/XO8h6626fDcf2YGV5ayIFAyv2IGpMJN0iqBkw4PHOtcP1ft
RqkjwWTm56q97eZN5o8clvAnIN6Anyyx8t1BJUWmZ/QTzMYC98CMZTa0/foq7kw4
qBrsqz1PmdKhWL8xtBdrEcwiuDyPaP/hfdfGEDNBiqVN9zVEVIfbJ2OK4xSUfIr5
hdyjYC+gGRQ/CX0o0YC8PQifzxXthw17XDiH15MhlplCrLJk2CwLMQqKLThe9ksE
3OcQnynS59gI2AfQdALVgSIvzUSMBY7V3I2H9kkBGhywJ64Ow2qpNWXd1wF9DDb5
6F0ElaB5hDrghbwMcFTg
=0jtu
-----END PGP SIGNATURE-----

3
LVM2.2.03.12.tgz Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:76075cf2a2dd12ef526a583f0b3c72a6deaa10691fab28216b0821d01cff0cea
size 2594040

17
LVM2.2.03.12.tgz.asc Normal file
View File

@ -0,0 +1,17 @@
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQIcBAABAgAGBQJgla2zAAoJELkRJDHlCQOfqvYP/j1wFtzE+dX8NmNpMRsS53qh
7AwkYZH0h6VJfzXc2o7jt7Xfytj2zNzq9ZD+HPLPiQg5M2Aa2NlHwk3tIzCAVk5t
LA3GKM4GT8PL/gS2uYaYAzkKW5NWd9cAItkWGRaydgqonwq5ZVekKeSurpfzwp2F
lE+iVclkmerZqGwQriV3Z1alLdm6h0UbkfRemaD8mKA+1IHFeG6Tq/lZEGvjkXL3
IlTZfGF5Ddp2ZJmiIyQiYymiFWhS/XUNrXyVJlAysjtBuWEiywGSoDpf+GkMNTVg
jYmW0wvW5TxGi8yjD2wHK4turEhDyWdHftIaa21v7PD+DAHc5Vhr6QlcCCVb/s00
qSrZaMZJ4RGCnEWf2mZe4m2Ckg7o4owP5CtCHuA4yZ5/SGZ2OhIRGIM/RAcUgC/u
wrzVZOCB4iFnDp7tEFWRq3uwfrHzl5l886nFYt62DZyoG3HlZOpPt0c7Xk5vyNgC
uoIgHHcasTJw6+2prxJ+u6I7+FP0yygKOdY0g3u+WOwYAlXgr2N1M+xFC8Q0tA/I
OQDCc8cHAFdl7K+lGITWGICPIkArW2HJVex2vtHFD23waCoxnsGXLE4mi6IJ7NPl
uSSsnw2cWcBGlVvJdwDCem7Bb50g2VPocOEW4SVgGQdrl76r/Y0epqYNREMGA82i
r30zWBTHczgZfDEIPqqV
=OKc0
-----END PGP SIGNATURE-----

View File

@ -1,30 +0,0 @@
From b918afb693a62938e8189a5d07b13df8007ae062 Mon Sep 17 00:00:00 2001
From: Zdenek Kabelac <zkabelac@redhat.com>
Date: Fri, 28 Aug 2020 19:15:42 +0200
Subject: [PATCH] tools: move struct element before variable lenght list
Move prio field before 'variable' struct array field.
Interesting why this has not been catched yet.
TODO: think about test case
---
tools/tools.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/tools.h b/tools/tools.h
index c3d780d..4b944e2 100644
--- a/tools/tools.h
+++ b/tools/tools.h
@@ -109,8 +109,8 @@ struct arg_values {
struct arg_value_group_list {
struct dm_list list;
- struct arg_values arg_values[0];
uint32_t prio;
+ struct arg_values arg_values[0];
};
#define PERMITTED_READ_ONLY 0x00000002
--
1.8.3.1

View File

@ -1,56 +0,0 @@
From cc2218b4014015bd2633454e683293851183e08c Mon Sep 17 00:00:00 2001
From: Zhao Heming <heming.zhao@suse.com>
Date: Fri, 21 Aug 2020 00:05:04 +0800
Subject: [PATCH] gcc: change zero-sized array to fexlible array
this patch makes gcc happy with compiling option: [-Wstringop-overflow=]
Signed-off-by: Zhao Heming <heming.zhao@suse.com>
---
device_mapper/libdm-common.c | 2 +-
lib/activate/fs.c | 2 +-
libdm/libdm-common.c | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/device_mapper/libdm-common.c b/device_mapper/libdm-common.c
index b6d08f5..9b87e4e 100644
--- a/device_mapper/libdm-common.c
+++ b/device_mapper/libdm-common.c
@@ -1445,7 +1445,7 @@ struct node_op_parms {
char *old_name;
int warn_if_udev_failed;
unsigned rely_on_udev;
- char names[0];
+ char names[];
};
static void _store_str(char **pos, char **ptr, const char *str)
diff --git a/lib/activate/fs.c b/lib/activate/fs.c
index b2c99fc..96f7df6 100644
--- a/lib/activate/fs.c
+++ b/lib/activate/fs.c
@@ -313,7 +313,7 @@ struct fs_op_parms {
char *lv_name;
char *dev;
char *old_lv_name;
- char names[0];
+ char names[];
};
static void _store_str(char **pos, char **ptr, const char *str)
diff --git a/libdm/libdm-common.c b/libdm/libdm-common.c
index 466c74b..a288297 100644
--- a/libdm/libdm-common.c
+++ b/libdm/libdm-common.c
@@ -1443,7 +1443,7 @@ struct node_op_parms {
char *old_name;
int warn_if_udev_failed;
unsigned rely_on_udev;
- char names[0];
+ char names[];
};
static void _store_str(char **pos, char **ptr, const char *str)
--
1.8.3.1

View File

@ -1,321 +0,0 @@
From fd96f1014b11d073d775ea4777f3b3ba2eb93520 Mon Sep 17 00:00:00 2001
From: Zdenek Kabelac <zkabelac@redhat.com>
Date: Fri, 28 Aug 2020 19:15:01 +0200
Subject: [PATCH] gcc: zero-sized array to fexlible array C99
Switch remaining zero sized struct to flexible arrays to be C99
complient.
These simple rules should apply:
- The incomplete array type must be the last element within the structure.
- There cannot be an array of structures that contain a flexible array member.
- Structures that contain a flexible array member cannot be used as a member of another structure.
- The structure must contain at least one named member in addition to the flexible array member.
Although some of the code pieces should be still improved.
---
WHATS_NEW | 1 +
base/data-struct/hash.c | 2 +-
base/data-struct/radix-tree-adaptive.c | 2 +-
daemons/lvmlockd/lvmlockd-internal.h | 2 +-
device_mapper/all.h | 6 +++---
device_mapper/misc/dm-ioctl.h | 8 ++++----
lib/device/dev-cache.c | 2 +-
lib/format_text/layout.h | 6 +++---
lib/label/label.c | 2 +-
lib/metadata/lv_manip.c | 2 +-
libdm/datastruct/hash.c | 2 +-
libdm/libdevmapper.h | 6 +++---
libdm/libdm-stats.c | 2 +-
libdm/misc/dm-ioctl.h | 8 ++++----
tools/tools.h | 2 +-
15 files changed, 27 insertions(+), 26 deletions(-)
diff --git a/base/data-struct/hash.c b/base/data-struct/hash.c
index 0a0541d..5ef5ed2 100644
--- a/base/data-struct/hash.c
+++ b/base/data-struct/hash.c
@@ -22,7 +22,7 @@ struct dm_hash_node {
void *data;
unsigned data_len;
unsigned keylen;
- char key[0];
+ char key[];
};
struct dm_hash_table {
diff --git a/base/data-struct/radix-tree-adaptive.c b/base/data-struct/radix-tree-adaptive.c
index b9ba417..3a46cc1 100644
--- a/base/data-struct/radix-tree-adaptive.c
+++ b/base/data-struct/radix-tree-adaptive.c
@@ -47,7 +47,7 @@ struct value_chain {
struct prefix_chain {
struct value child;
unsigned len;
- uint8_t prefix[0];
+ uint8_t prefix[];
};
struct node4 {
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index 191c449..14bdfee 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -158,7 +158,7 @@ struct resource {
struct list_head locks;
struct list_head actions;
char lv_args[MAX_ARGS+1];
- char lm_data[0]; /* lock manager specific data */
+ char lm_data[]; /* lock manager specific data */
};
#define LD_LF_PERSISTENT 0x00000001
diff --git a/device_mapper/all.h b/device_mapper/all.h
index c3c6219..ace3b69 100644
--- a/device_mapper/all.h
+++ b/device_mapper/all.h
@@ -164,20 +164,20 @@ struct dm_info {
struct dm_deps {
uint32_t count;
uint32_t filler;
- uint64_t device[0];
+ uint64_t device[];
};
struct dm_names {
uint64_t dev;
uint32_t next; /* Offset to next struct from start of this struct */
- char name[0];
+ char name[];
};
struct dm_versions {
uint32_t next; /* Offset to next struct from start of this struct */
uint32_t version[3];
- char name[0];
+ char name[];
};
int dm_get_library_version(char *version, size_t size);
diff --git a/device_mapper/misc/dm-ioctl.h b/device_mapper/misc/dm-ioctl.h
index cc2374c..49954a7 100644
--- a/device_mapper/misc/dm-ioctl.h
+++ b/device_mapper/misc/dm-ioctl.h
@@ -183,7 +183,7 @@ struct dm_target_spec {
struct dm_target_deps {
uint32_t count; /* Array size */
uint32_t padding; /* unused */
- uint64_t dev[0]; /* out */
+ uint64_t dev[]; /* out */
};
/*
@@ -193,7 +193,7 @@ struct dm_name_list {
uint64_t dev;
uint32_t next; /* offset to the next record from
the _start_ of this */
- char name[0];
+ char name[];
};
/*
@@ -203,7 +203,7 @@ struct dm_target_versions {
uint32_t next;
uint32_t version[3];
- char name[0];
+ char name[];
};
/*
@@ -212,7 +212,7 @@ struct dm_target_versions {
struct dm_target_msg {
uint64_t sector; /* Device sector */
- char message[0];
+ char message[];
};
/*
diff --git a/lib/device/dev-cache.c b/lib/device/dev-cache.c
index c3f7c49..d4e2658 100644
--- a/lib/device/dev-cache.c
+++ b/lib/device/dev-cache.c
@@ -35,7 +35,7 @@ struct dev_iter {
struct dir_list {
struct dm_list list;
- char dir[0];
+ char dir[];
};
static struct {
diff --git a/lib/format_text/layout.h b/lib/format_text/layout.h
index c3dfe8e..df7ed3a 100644
--- a/lib/format_text/layout.h
+++ b/lib/format_text/layout.h
@@ -33,7 +33,7 @@ struct pv_header_extension {
uint32_t version;
uint32_t flags;
/* NULL-terminated list of bootloader areas */
- struct disk_locn bootloader_areas_xl[0];
+ struct disk_locn bootloader_areas_xl[];
} __attribute__ ((packed));
/* Fields with the suffix _xl should be xlate'd wherever they appear */
@@ -46,7 +46,7 @@ struct pv_header {
/* NULL-terminated list of data areas followed by */
/* NULL-terminated list of metadata area headers */
- struct disk_locn disk_areas_xl[0]; /* Two lists */
+ struct disk_locn disk_areas_xl[]; /* Two lists */
} __attribute__ ((packed));
/*
@@ -76,7 +76,7 @@ struct mda_header {
uint64_t start; /* Absolute start byte of mda_header */
uint64_t size; /* Size of metadata area */
- struct raw_locn raw_locns[0]; /* NULL-terminated list */
+ struct raw_locn raw_locns[]; /* NULL-terminated list */
} __attribute__ ((packed));
struct mda_header *raw_read_mda_header(const struct format_type *fmt,
diff --git a/lib/label/label.c b/lib/label/label.c
index 4d369d4..8e68f93 100644
--- a/lib/label/label.c
+++ b/lib/label/label.c
@@ -43,7 +43,7 @@ struct labeller_i {
struct dm_list list;
struct labeller *l;
- char name[0];
+ char name[];
};
static struct dm_list _labellers;
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index c47ec72..95ca2df 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -1798,7 +1798,7 @@ struct alloc_handle {
* Contains area_count lists of areas allocated to data stripes
* followed by log_area_count lists of areas allocated to log stripes.
*/
- struct dm_list alloced_areas[0];
+ struct dm_list alloced_areas[];
};
/*
diff --git a/libdm/datastruct/hash.c b/libdm/datastruct/hash.c
index 4c94003..726347e 100644
--- a/libdm/datastruct/hash.c
+++ b/libdm/datastruct/hash.c
@@ -20,7 +20,7 @@ struct dm_hash_node {
void *data;
unsigned data_len;
unsigned keylen;
- char key[0];
+ char key[];
};
struct dm_hash_table {
diff --git a/libdm/libdevmapper.h b/libdm/libdevmapper.h
index e9b1405..059b96f 100644
--- a/libdm/libdevmapper.h
+++ b/libdm/libdevmapper.h
@@ -165,20 +165,20 @@ struct dm_info {
struct dm_deps {
uint32_t count;
uint32_t filler;
- uint64_t device[0];
+ uint64_t device[];
};
struct dm_names {
uint64_t dev;
uint32_t next; /* Offset to next struct from start of this struct */
- char name[0];
+ char name[];
};
struct dm_versions {
uint32_t next; /* Offset to next struct from start of this struct */
uint32_t version[3];
- char name[0];
+ char name[];
};
int dm_get_library_version(char *version, size_t size);
diff --git a/libdm/libdm-stats.c b/libdm/libdm-stats.c
index 5379bed..f8d79d8 100644
--- a/libdm/libdm-stats.c
+++ b/libdm/libdm-stats.c
@@ -65,7 +65,7 @@ struct dm_histogram {
const struct dm_stats_region *region;
uint64_t sum; /* Sum of histogram bin counts. */
int nr_bins; /* Number of histogram bins assigned. */
- struct dm_histogram_bin bins[0];
+ struct dm_histogram_bin bins[];
};
/*
diff --git a/libdm/misc/dm-ioctl.h b/libdm/misc/dm-ioctl.h
index 52a7a93..55dee21 100644
--- a/libdm/misc/dm-ioctl.h
+++ b/libdm/misc/dm-ioctl.h
@@ -183,7 +183,7 @@ struct dm_target_spec {
struct dm_target_deps {
uint32_t count; /* Array size */
uint32_t padding; /* unused */
- uint64_t dev[0]; /* out */
+ uint64_t dev[]; /* out */
};
/*
@@ -193,7 +193,7 @@ struct dm_name_list {
uint64_t dev;
uint32_t next; /* offset to the next record from
the _start_ of this */
- char name[0];
+ char name[];
};
/*
@@ -203,7 +203,7 @@ struct dm_target_versions {
uint32_t next;
uint32_t version[3];
- char name[0];
+ char name[];
};
/*
@@ -212,7 +212,7 @@ struct dm_target_versions {
struct dm_target_msg {
uint64_t sector; /* Device sector */
- char message[0];
+ char message[];
};
/*
diff --git a/tools/tools.h b/tools/tools.h
index 4b944e2..befff57 100644
--- a/tools/tools.h
+++ b/tools/tools.h
@@ -110,7 +110,7 @@ struct arg_values {
struct arg_value_group_list {
struct dm_list list;
uint32_t prio;
- struct arg_values arg_values[0];
+ struct arg_values arg_values[];
};
#define PERMITTED_READ_ONLY 0x00000002
--
1.8.3.1

View File

@ -1,874 +0,0 @@
From 0a28e3c44b05470061f15516e1c89a84fa2e8569 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Thu, 1 Apr 2021 17:20:00 -0500
Subject: [PATCH] Add metadata-based autoactivation property for VG and LV
The autoactivation property can be specified in lvcreate
or vgcreate for new LVs/VGs, and the property can be changed
by lvchange or vgchange for existing LVs/VGs.
--setautoactivation y|n
enables|disables autoactivation of a VG or LV.
Autoactivation is enabled by default, which is consistent with
past behavior. The disabled state is stored as a new flag
in the VG metadata, and the absence of the flag allows
autoactivation.
If autoactivation is disabled for the VG, then no LVs in the VG
will be autoactivated (the LV autoactivation property will have
no effect.) When autoactivation is enabled for the VG, then
autoactivation can be controlled on individual LVs.
The state of this property can be reported for LVs/VGs using
the "-o autoactivation" option in lvs/vgs commands, which will
report "enabled", or "" for the disabled state.
Previous versions of lvm do not recognize this property. Since
autoactivation is enabled by default, the disabled setting will
have no effect in older lvm versions. If the VG is modified by
older lvm versions, the disabled state will also be dropped from
the metadata.
The autoactivation property is an alternative to using the lvm.conf
auto_activation_volume_list, which is still applied to to VGs/LVs
in addition to the new property.
If VG or LV autoactivation is disabled either in metadata or in
auto_activation_volume_list, it will not be autoactivated.
An autoactivation command will silently skip activating an LV
when the autoactivation property is disabled.
To determine the effective autoactivation behavior for a specific
LV, multiple settings would need to be checked:
the VG autoactivation property, the LV autoactivation property,
the auto_activation_volume_list. The "activation skip" property
would also be relevant, since it applies to both normal and auto
activation.
---
lib/config/config_settings.h | 40 ++--
lib/format_text/flags.c | 2 +
lib/metadata/lv_manip.c | 4 +
lib/metadata/metadata-exported.h | 6 +-
lib/metadata/metadata.h | 1 -
lib/report/columns.h | 2 +
lib/report/properties.c | 4 +
lib/report/report.c | 17 ++
test/shell/autoactivation-metadata.sh | 335 ++++++++++++++++++++++++++++++++++
tools/args.h | 28 ++-
tools/command-lines.in | 9 +-
tools/lvchange.c | 32 ++++
tools/lvcreate.c | 3 +
tools/vgchange.c | 31 ++++
tools/vgcreate.c | 3 +
15 files changed, 486 insertions(+), 31 deletions(-)
create mode 100644 test/shell/autoactivation-metadata.sh
diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h
index a19402d9d..3946a7129 100644
--- a/lib/config/config_settings.h
+++ b/lib/config/config_settings.h
@@ -1119,10 +1119,12 @@ cfg(global_event_activation_CFG, "event_activation", global_CFG_SECTION, CFG_DEF
"Activate LVs based on system-generated device events.\n"
"When a device appears on the system, a system-generated event runs\n"
"the pvscan command to activate LVs if the new PV completes the VG.\n"
- "Use auto_activation_volume_list to select which LVs should be\n"
- "activated from these events (the default is all.)\n"
"When event_activation is disabled, the system will generally run\n"
- "a direct activation command to activate LVs in complete VGs.\n")
+ "a direct activation command to activate LVs in complete VGs.\n"
+ "Activation commands that are run by the system, either from events\n"
+ "or at fixed points during startup, use autoactivation (-aay). See\n"
+ "the --setautoactivation option or the auto_activation_volume_list\n"
+ "setting to configure autoactivation for specific VGs or LVs.\n")
cfg(global_use_lvmetad_CFG, "use_lvmetad", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 93), 0, vsn(2, 3, 0), NULL,
"This setting is no longer used.\n")
@@ -1402,22 +1404,22 @@ cfg_array(activation_volume_list_CFG, "volume_list", activation_CFG_SECTION, CFG
"#\n")
cfg_array(activation_auto_activation_volume_list_CFG, "auto_activation_volume_list", activation_CFG_SECTION, CFG_ALLOW_EMPTY | CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(2, 2, 97), NULL, 0, NULL,
- "Only LVs selected by this list are auto-activated.\n"
- "This list works like volume_list, but it is used only by\n"
- "auto-activation commands. It does not apply to direct activation\n"
- "commands. If this list is defined, an LV is only auto-activated\n"
- "if it matches an entry in this list. If this list is undefined, it\n"
- "imposes no limits on LV auto-activation (all are allowed.) If this\n"
- "list is defined and empty, i.e. \"[]\", then no LVs are selected for\n"
- "auto-activation. An LV that is selected by this list for\n"
- "auto-activation, must also be selected by volume_list (if defined)\n"
- "before it is activated. Auto-activation is an activation command that\n"
- "includes the 'a' argument: --activate ay or -a ay. The 'a' (auto)\n"
- "argument for auto-activation is meant to be used by activation\n"
- "commands that are run automatically by the system, as opposed to LVM\n"
- "commands run directly by a user. A user may also use the 'a' flag\n"
- "directly to perform auto-activation. Also see pvscan(8) for more\n"
- "information about auto-activation.\n"
+ "A list of VGs or LVs that should be autoactivated.\n"
+ "Autoactivation is an activation command run with -aay,\n"
+ "i.e. vgchange -aay, lvchange -aay, or pvscan --cache -aay.\n"
+ "When this list is defined, an autoactivation command will only\n"
+ "activate LVs included in the list. If this list is undefined,\n"
+ "it has no effect. If this list is defined but empty, then no\n"
+ "LVs will be autoactivated. LVs can be included in the list by\n"
+ "LV name, VG name (applies to all LVs in the VG), or tag name.\n"
+ "VGs and LVs can also have an autoactivation property set in\n"
+ "metadata, see --setautoactivation. LVs included in this list\n"
+ "will not be autoactivated if the VG or LV autoactivation\n"
+ "property is disabled (see vgs or lvs \"-o autoactivation\").\n"
+ "The volume_list setting and the \"activation skip\" property\n"
+ "also apply to autoactivation.\n"
+ "The -aay option is meant to be used by activation commands that\n"
+ "are run automatically by the system, e.g. from systemd services.\n"
"#\n"
"Accepted values:\n"
" vgname\n"
diff --git a/lib/format_text/flags.c b/lib/format_text/flags.c
index 4cee14aa7..3890a40cc 100644
--- a/lib/format_text/flags.c
+++ b/lib/format_text/flags.c
@@ -35,6 +35,7 @@ static const struct flag _vg_flags[] = {
{LVM_READ, "READ", STATUS_FLAG},
{LVM_WRITE, "WRITE", STATUS_FLAG},
{LVM_WRITE_LOCKED, "WRITE_LOCKED", COMPATIBLE_FLAG},
+ {NOAUTOACTIVATE, "NOAUTOACTIVATE", COMPATIBLE_FLAG},
{CLUSTERED, "CLUSTERED", STATUS_FLAG},
{SHARED, "SHARED", STATUS_FLAG},
{PARTIAL_VG, NULL, 0},
@@ -70,6 +71,7 @@ static const struct flag _lv_flags[] = {
{LV_REMOVE_AFTER_RESHAPE, "REMOVE_AFTER_RESHAPE", SEGTYPE_FLAG},
{LV_WRITEMOSTLY, "WRITEMOSTLY", STATUS_FLAG},
{LV_ACTIVATION_SKIP, "ACTIVATION_SKIP", COMPATIBLE_FLAG},
+ {LV_NOAUTOACTIVATE, "NOAUTOACTIVATE", COMPATIBLE_FLAG},
{LV_ERROR_WHEN_FULL, "ERROR_WHEN_FULL", COMPATIBLE_FLAG},
{LV_METADATA_FORMAT, "METADATA_FORMAT", SEGTYPE_FLAG},
{LV_CACHE_VOL, "CACHE_VOL", COMPATIBLE_FLAG},
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index b38acf883..ff2a673eb 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -8569,6 +8569,10 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
lv_set_activation_skip(lv, lp->activation_skip & ACTIVATION_SKIP_SET,
lp->activation_skip & ACTIVATION_SKIP_SET_ENABLED);
+
+ if (lp->noautoactivate)
+ lv->status |= LV_NOAUTOACTIVATE;
+
/*
* Check for autoactivation.
* If the LV passes the auto activation filter, activate
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 8ae067ebc..c6116350f 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -57,7 +57,9 @@
#define ALLOCATABLE_PV UINT64_C(0x0000000000000008) /* PV */
#define ARCHIVED_VG ALLOCATABLE_PV /* VG, reuse same bit */
-//#define SPINDOWN_LV UINT64_C(0x0000000000000010) /* LV */
+#define LV_NOAUTOACTIVATE UINT64_C(0x0000000000000010) /* LV - also a PV flag */
+#define NOAUTOACTIVATE UINT64_C(0x0000000000000010) /* VG - also a PV flag */
+
//#define BADBLOCK_ON UINT64_C(0x0000000000000020) /* LV */
#define VISIBLE_LV UINT64_C(0x0000000000000040) /* LV */
#define FIXED_MINOR UINT64_C(0x0000000000000080) /* LV */
@@ -159,6 +161,7 @@
#define LV_CACHE_USES_CACHEVOL UINT64_C(0x4000000000000000) /* LV - also a PV flag */
+
/* Format features flags */
#define FMT_SEGMENTS 0x00000001U /* Arbitrary segment params? */
// #define FMT_MDAS 0x00000002U /* Proper metadata areas? */
@@ -972,6 +975,7 @@ struct lvcreate_params {
#define ACTIVATION_SKIP_SET_ENABLED 0x02 /* set the LV activation skip flag state to 'enabled' */
#define ACTIVATION_SKIP_IGNORE 0x04 /* request to ignore LV activation skip flag (if any) */
int activation_skip; /* activation skip flags */
+ int noautoactivate; /* 1 if --setautoactivation n */
activation_change_t activate; /* non-snapshot, non-mirror */
thin_discards_t discards; /* thin */
thin_zero_t zero_new_blocks;
diff --git a/lib/metadata/metadata.h b/lib/metadata/metadata.h
index 3ea77ce2e..dfd576e3c 100644
--- a/lib/metadata/metadata.h
+++ b/lib/metadata/metadata.h
@@ -55,7 +55,6 @@
/* May any free extents on this PV be used or must they be left free? */
-#define SPINDOWN_LV UINT64_C(0x00000010) /* LV */
#define BADBLOCK_ON UINT64_C(0x00000020) /* LV */
//#define VIRTUAL UINT64_C(0x00010000) /* LV - internal use only */
#define PRECOMMITTED UINT64_C(0x00200000) /* VG - internal use only */
diff --git a/lib/report/columns.h b/lib/report/columns.h
index 8d2f7a993..12b78b766 100644
--- a/lib/report/columns.h
+++ b/lib/report/columns.h
@@ -61,6 +61,7 @@ FIELD(LVS, lv, STR, "AllocPol", lvid, 10, lvallocationpolicy, lv_allocation_poli
FIELD(LVS, lv, BIN, "AllocLock", lvid, 10, lvallocationlocked, lv_allocation_locked, "Set if LV is locked against allocation changes.", 0)
FIELD(LVS, lv, BIN, "FixMin", lvid, 10, lvfixedminor, lv_fixed_minor, "Set if LV has fixed minor number assigned.", 0)
FIELD(LVS, lv, BIN, "SkipAct", lvid, 15, lvskipactivation, lv_skip_activation, "Set if LV is skipped on activation.", 0)
+FIELD(LVS, lv, BIN, "AutoAct", lvid, 0, lvautoactivation, lv_autoactivation, "Set if LV autoactivation is enabled.", 0)
FIELD(LVS, lv, STR, "WhenFull", lvid, 15, lvwhenfull, lv_when_full, "For thin pools, behavior when full.", 0)
FIELD(LVS, lv, STR, "Active", lvid, 0, lvactive, lv_active, "Active state of the LV.", 0)
FIELD(LVS, lv, BIN, "ActLocal", lvid, 10, lvactivelocally, lv_active_locally, "Set if the LV is active locally.", 0)
@@ -222,6 +223,7 @@ FIELD(VGS, vg, STR, "Attr", cmd, 5, vgstatus, vg_attr, "Various attributes - see
FIELD(VGS, vg, STR, "VPerms", cmd, 10, vgpermissions, vg_permissions, "VG permissions.", 0)
FIELD(VGS, vg, BIN, "Extendable", cmd, 0, vgextendable, vg_extendable, "Set if VG is extendable.", 0)
FIELD(VGS, vg, BIN, "Exported", cmd, 10, vgexported, vg_exported, "Set if VG is exported.", 0)
+FIELD(VGS, vg, BIN, "AutoAct", cmd, 0, vgautoactivation, vg_autoactivation, "Set if VG autoactivation is enabled.", 0)
FIELD(VGS, vg, BIN, "Partial", cmd, 10, vgpartial, vg_partial, "Set if VG is partial.", 0)
FIELD(VGS, vg, STR, "AllocPol", cmd, 10, vgallocationpolicy, vg_allocation_policy, "VG allocation policy.", 0)
FIELD(VGS, vg, BIN, "Clustered", cmd, 10, vgclustered, vg_clustered, "Set if VG is clustered.", 0)
diff --git a/lib/report/properties.c b/lib/report/properties.c
index f2174b83c..12ea890f4 100644
--- a/lib/report/properties.c
+++ b/lib/report/properties.c
@@ -269,6 +269,8 @@ GET_PV_STR_PROPERTY_FN(pv_device_id_type, pv->device_id_type)
#define _vg_extendable_get prop_not_implemented_get
#define _vg_exported_set prop_not_implemented_set
#define _vg_exported_get prop_not_implemented_get
+#define _vg_autoactivation_set prop_not_implemented_set
+#define _vg_autoactivation_get prop_not_implemented_get
#define _vg_partial_set prop_not_implemented_set
#define _vg_partial_get prop_not_implemented_get
#define _vg_allocation_policy_set prop_not_implemented_set
@@ -323,6 +325,8 @@ GET_PV_STR_PROPERTY_FN(pv_device_id_type, pv->device_id_type)
#define _lv_skip_activation_get prop_not_implemented_get
#define _lv_check_needed_set prop_not_implemented_set
#define _lv_check_needed_get prop_not_implemented_get
+#define _lv_autoactivation_set prop_not_implemented_set
+#define _lv_autoactivation_get prop_not_implemented_get
#define _lv_historical_set prop_not_implemented_set
#define _lv_historical_get prop_not_implemented_get
diff --git a/lib/report/report.c b/lib/report/report.c
index 2f5811a96..222d3f4b6 100644
--- a/lib/report/report.c
+++ b/lib/report/report.c
@@ -3573,6 +3573,15 @@ static int _vgexported_disp(struct dm_report *rh, struct dm_pool *mem,
return _binary_disp(rh, mem, field, exported, GET_FIRST_RESERVED_NAME(vg_exported_y), private);
}
+static int _vgautoactivation_disp(struct dm_report *rh, struct dm_pool *mem,
+ struct dm_report_field *field,
+ const void *data, void *private)
+{
+ const struct volume_group *vg = (const struct volume_group *)data;
+ int aa_yes = (vg->status & NOAUTOACTIVATE) ? 0 : 1;
+ return _binary_disp(rh, mem, field, aa_yes, "enabled", private);
+}
+
static int _vgpartial_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
@@ -3969,6 +3978,14 @@ static int _lvskipactivation_disp(struct dm_report *rh, struct dm_pool *mem,
return _binary_disp(rh, mem, field, skip_activation, "skip activation", private);
}
+static int _lvautoactivation_disp(struct dm_report *rh, struct dm_pool *mem,
+ struct dm_report_field *field,
+ const void *data, void *private)
+{
+ int aa_yes = (((const struct logical_volume *) data)->status & LV_NOAUTOACTIVATE) ? 0 : 1;
+ return _binary_disp(rh, mem, field, aa_yes, "enabled", private);
+}
+
static int _lvhistorical_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
diff --git a/test/shell/autoactivation-metadata.sh b/test/shell/autoactivation-metadata.sh
new file mode 100644
index 000000000..4ee2b75c0
--- /dev/null
+++ b/test/shell/autoactivation-metadata.sh
@@ -0,0 +1,335 @@
+
+SKIP_WITH_LVMPOLLD=1
+
+RUNDIR="/run"
+test -d "$RUNDIR" || RUNDIR="/var/run"
+PVS_ONLINE_DIR="$RUNDIR/lvm/pvs_online"
+VGS_ONLINE_DIR="$RUNDIR/lvm/vgs_online"
+PVS_LOOKUP_DIR="$RUNDIR/lvm/pvs_lookup"
+
+_clear_online_files() {
+ # wait till udev is finished
+ aux udev_wait
+ rm -f "$PVS_ONLINE_DIR"/*
+ rm -f "$VGS_ONLINE_DIR"/*
+ rm -f "$PVS_LOOKUP_DIR"/*
+}
+
+. lib/inittest
+
+aux prepare_devs 1
+
+#
+# test lvchange --setautoactivation
+#
+
+# default
+vgcreate $SHARED $vg "$dev1"
+lvcreate -n $lv1 -l1 -an $vg
+check vg_field $vg autoactivation "enabled"
+check lv_field $vg/$lv1 autoactivation "enabled"
+
+lvchange -aay $vg/$lv1
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+lvchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+_clear_online_files
+
+# --aa=n
+lvchange --setautoactivation n $vg/$lv1
+check vg_field $vg autoactivation "enabled"
+check lv_field $vg/$lv1 autoactivation ""
+
+lvchange -aay $vg/$lv1
+check lv_field $vg/$lv1 lv_active ""
+lvchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active ""
+_clear_online_files
+
+# --aa=y
+lvchange --setautoactivation y $vg/$lv1
+check vg_field $vg autoactivation "enabled"
+check lv_field $vg/$lv1 autoactivation "enabled"
+
+lvchange -aay $vg/$lv1
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+lvchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+_clear_online_files
+
+vgremove -y $vg
+
+#
+# test vgchange --setautoactivation
+#
+
+# default
+vgcreate $SHARED $vg "$dev1"
+lvcreate -n $lv1 -l1 -an $vg
+
+# --aa=n
+vgchange --setautoactivation n $vg
+check vg_field $vg autoactivation ""
+check lv_field $vg/$lv1 autoactivation "enabled"
+
+lvchange -aay $vg/$lv1
+check lv_field $vg/$lv1 lv_active ""
+lvchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active ""
+_clear_online_files
+
+# --aa=y
+vgchange --setautoactivation y $vg
+check vg_field $vg autoactivation "enabled"
+check lv_field $vg/$lv1 autoactivation "enabled"
+
+lvchange -aay $vg/$lv1
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+lvchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active "active"
+lvchange -an $vg/$lv1
+_clear_online_files
+
+vgremove -y $vg
+
+#
+# test vgcreate --setautoactivation, lvcreate --setautoactivation
+#
+
+vgcreate $SHARED $vg "$dev1"
+lvcreate -n $lv1 -l1 -an $vg
+lvcreate -n $lv2 -l1 --setautoactivation y -an $vg
+lvcreate -n $lv3 -l1 --setautoactivation n -an $vg
+check vg_field $vg autoactivation "enabled"
+check lv_field $vg/$lv1 autoactivation "enabled"
+check lv_field $vg/$lv2 autoactivation "enabled"
+check lv_field $vg/$lv3 autoactivation ""
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active "active"
+check lv_field $vg/$lv3 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+lvchange -aay $vg/$lv3
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active "active"
+check lv_field $vg/$lv3 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active "active"
+check lv_field $vg/$lv3 lv_active ""
+vgchange -an $vg
+vgremove -y $vg
+_clear_online_files
+
+vgcreate $SHARED --setautoactivation y $vg "$dev1"
+lvcreate -n $lv1 -l1 -an $vg
+lvcreate -n $lv2 -l1 --setautoactivation y -an $vg
+lvcreate -n $lv3 -l1 --setautoactivation n -an $vg
+check vg_field $vg autoactivation "enabled"
+check lv_field $vg/$lv1 autoactivation "enabled"
+check lv_field $vg/$lv2 autoactivation "enabled"
+check lv_field $vg/$lv3 autoactivation ""
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active "active"
+check lv_field $vg/$lv3 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+lvchange -aay $vg/$lv3
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active "active"
+check lv_field $vg/$lv3 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active "active"
+check lv_field $vg/$lv3 lv_active ""
+vgchange -an $vg
+vgremove -y $vg
+_clear_online_files
+
+vgcreate $SHARED --setautoactivation n $vg "$dev1"
+lvcreate -n $lv1 -l1 -an $vg
+lvcreate -n $lv2 -l1 --setautoactivation y -an $vg
+lvcreate -n $lv3 -l1 --setautoactivation n -an $vg
+check vg_field $vg autoactivation ""
+check lv_field $vg/$lv1 autoactivation "enabled"
+check lv_field $vg/$lv2 autoactivation "enabled"
+check lv_field $vg/$lv3 autoactivation ""
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+check lv_field $vg/$lv3 lv_active ""
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+lvchange -aay $vg/$lv3
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+check lv_field $vg/$lv3 lv_active ""
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+check lv_field $vg/$lv3 lv_active ""
+vgremove -y $vg
+_clear_online_files
+
+
+#
+# test combination of --aa and auto_activation_volume_list
+#
+
+vgcreate $SHARED $vg "$dev1"
+lvcreate -n $lv1 -l1 -an $vg
+lvcreate -n $lv2 -l1 --setautoactivation n -an $vg
+check vg_field $vg autoactivation "enabled"
+check lv_field $vg/$lv1 autoactivation "enabled"
+check lv_field $vg/$lv2 autoactivation ""
+
+# list prevents all aa, metadata settings don't matter
+aux lvmconf "activation/auto_activation_volume_list = [ ]"
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+_clear_online_files
+
+# list allows all vg aa, metadata allows lv1 -> lv1 activated
+aux lvmconf "activation/auto_activation_volume_list = [ \"$vg\" ]"
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+_clear_online_files
+
+# list allows lv1, metadata allows lv1 -> lv1 activated
+aux lvmconf "activation/auto_activation_volume_list = [ \"$vg/$lv1\" ]"
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active "active"
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+_clear_online_files
+
+# list allows lv2, metadata allows lv1 -> nothing activated
+aux lvmconf "activation/auto_activation_volume_list = [ \"$vg/$lv2\" ]"
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+_clear_online_files
+
+vgremove -y $vg
+
+vgcreate $SHARED --setautoactivation n $vg "$dev1"
+lvcreate -n $lv1 -l1 -an $vg
+lvcreate -n $lv2 -l1 --setautoactivation n -an $vg
+check vg_field $vg autoactivation ""
+check lv_field $vg/$lv1 autoactivation "enabled"
+check lv_field $vg/$lv2 autoactivation ""
+
+# list prevents all aa, metadata settings don't matter
+aux lvmconf "activation/auto_activation_volume_list = [ ]"
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+_clear_online_files
+
+# list allows lv1, metadata disallows vg -> nothing activated
+aux lvmconf "activation/auto_activation_volume_list = [ \"$vg/$lv1\" ]"
+vgchange -aay $vg
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+lvchange -aay $vg/$lv1
+lvchange -aay $vg/$lv2
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+pvscan --cache -aay "$dev1"
+check lv_field $vg/$lv1 lv_active ""
+check lv_field $vg/$lv2 lv_active ""
+vgchange -an $vg
+_clear_online_files
+
+vgremove -y $vg
+
diff --git a/tools/args.h b/tools/args.h
index c1d32be32..3dcffe224 100644
--- a/tools/args.h
+++ b/tools/args.h
@@ -87,6 +87,17 @@ arg(atversion_ARG, '\0', "atversion", string_VAL, 0, 0,
"which does not contain any newer settings for which LVM would\n"
"issue a warning message when checking the configuration.\n")
+arg(setautoactivation_ARG, '\0', "setautoactivation", bool_VAL, 0, 0,
+ "Set the autoactivation property on a VG or LV.\n"
+ "Display the property with vgs or lvs \"-o autoactivation\".\n"
+ "When the autoactivation property is disabled, the VG or LV\n"
+ "will not be activated by a command doing autoactivation\n"
+ "(vgchange, lvchange, or pvscan using -aay.)\n"
+ "If autoactivation is disabled on a VG, no LVs will be autoactivated\n"
+ "in that VG, and the LV autoactivation property has no effect.\n"
+ "If autoactivation is enabled on a VG, autoactivation can be disabled\n"
+ "for individual LVs.\n")
+
arg(binary_ARG, '\0', "binary", 0, 0, 0,
"Use binary values \"0\" or \"1\" instead of descriptive literal values\n"
"for columns that have exactly two valid values to report (not counting\n"
@@ -960,12 +971,17 @@ arg(activate_ARG, 'a', "activate", activation_VAL, 0, 0,
"link and present this as the name of the device.\n"
"The location and name of the underlying device node may depend on\n"
"the distribution, configuration (e.g. udev), or release version.\n"
- "\\fBay\\fP specifies autoactivation, in which case an LV is activated\n"
- "only if it matches an item in lvm.conf activation/auto_activation_volume_list.\n"
- "If the list is not set, all LVs are considered to match, and if\n"
- "if the list is set but empty, no LVs match.\n"
- "Autoactivation should be used during system boot to make it possible\n"
- "to select which LVs should be automatically activated by the system.\n"
+ "\\fBay\\fP specifies autoactivation, which is used by system-generated\n"
+ "activation commands. By default, LVs are autoactivated.\n"
+ "An autoactivation property can be set on a VG or LV to disable autoactivation,\n"
+ "see --setautoactivation y|n in vgchange, lvchange, vgcreate, and lvcreate.\n"
+ "Display the property with vgs or lvs \"-o autoactivation\".\n"
+ "The lvm.conf auto_activation_volume_list includes names of VGs or LVs\n"
+ "that should be autoactivated, and anything not listed is not autoactivated.\n"
+ "When auto_activation_volume_list is undefined (the default), it has no effect.\n"
+ "If auto_activation_volume_list is defined and empty, no LVs are autoactivated.\n"
+ "Items included by auto_activation_volume_list will not be autoactivated if\n"
+ "the autoactivation property has been disabled.\n"
"See \\fBlvmlockd\\fP(8) for more information about activation options \\fBey\\fP and \\fBsy\\fP for shared VGs.\n"
"#lvcreate\n"
"Controls the active state of the new LV.\n"
diff --git a/tools/command-lines.in b/tools/command-lines.in
index 528811f52..0a3359630 100644
--- a/tools/command-lines.in
+++ b/tools/command-lines.in
@@ -224,7 +224,7 @@ OO_LVCHANGE_META: --addtag Tag, --deltag Tag,
--compression Bool, --deduplication Bool,
--detachprofile, --metadataprofile String, --profile String,
--permission Permission, --readahead Readahead, --setactivationskip Bool,
---errorwhenfull Bool, --discards Discards, --zero Bool,
+--setautoactivation Bool, --errorwhenfull Bool, --discards Discards, --zero Bool,
--cachemode CacheMode, --cachepolicy String, --cachesettings String,
--minrecoveryrate SizeKB, --maxrecoveryrate SizeKB,
--writebehind Number, --writemostly WriteMostlyPV, --persistent n
@@ -799,7 +799,7 @@ OO_LVCREATE: --addtag Tag, --alloc Alloc, --autobackup Bool, --activate Active,
--metadataprofile String, --minor Number, --monitor Bool, --name String, --nosync,
--noudevsync, --permission Permission, --persistent Bool, --readahead Readahead,
--reportformat ReportFmt, --setactivationskip Bool, --wipesignatures Bool,
---zero Bool
+--zero Bool, --setautoactivation Bool
OO_LVCREATE_CACHE: --cachemode CacheMode, --cachepolicy String, --cachesettings String,
--chunksize SizeKB, --cachemetadataformat CacheMetadataFormat
@@ -1662,7 +1662,8 @@ OO_VGCHANGE_META: --addtag Tag, --deltag Tag,
--logicalvolume Uint32, --maxphysicalvolumes Uint32, --alloc Alloc, --uuid,
--pvmetadatacopies MetadataCopiesPV, --vgmetadatacopies MetadataCopiesVG,
--physicalextentsize SizeMB, --resizeable Bool,
---profile String, --detachprofile, --metadataprofile String
+--profile String, --detachprofile, --metadataprofile String,
+--setautoactivation Bool
vgchange OO_VGCHANGE_META
OO: --poll Bool, OO_VGCHANGE
@@ -1742,7 +1743,7 @@ OO: --addtag Tag, --alloc Alloc, --autobackup Bool, --clustered Bool, --maxlogic
--physicalextentsize SizeMB, --force, --zero Bool, --labelsector Number,
--metadatasize SizeMB, --pvmetadatacopies MetadataCopiesPV, --vgmetadatacopies MetadataCopiesVG,
--reportformat ReportFmt, --dataalignment SizeKB, --dataalignmentoffset SizeKB,
---shared, --systemid String, --locktype LockType
+--shared, --systemid String, --locktype LockType, --setautoactivation Bool
ID: vgcreate_general
---
diff --git a/tools/lvchange.c b/tools/lvchange.c
index 0189c365e..8293f5035 100644
--- a/tools/lvchange.c
+++ b/tools/lvchange.c
@@ -215,6 +215,10 @@ static int _lvchange_activate(struct cmd_context *cmd, struct logical_volume *lv
!lv_passes_auto_activation_filter(cmd, lv))
return 1;
+ if ((activate == CHANGE_AAY) &&
+ ((lv->status & LV_NOAUTOACTIVATE) || (lv->vg->status & NOAUTOACTIVATE)))
+ return 1;
+
if (!lv_change_activate(cmd, lv, activate))
return_0;
@@ -1009,6 +1013,28 @@ static int _lvchange_activation_skip(struct logical_volume *lv, uint32_t *mr)
return 1;
}
+static int _lvchange_autoactivation(struct logical_volume *lv, uint32_t *mr)
+{
+ int aa_no_arg = !arg_int_value(lv->vg->cmd, setautoactivation_ARG, 0);
+ int aa_no_meta = (lv->status & LV_NOAUTOACTIVATE);
+
+ if ((aa_no_arg && aa_no_meta) || (!aa_no_arg && !aa_no_meta))
+ return 1;
+
+ if (aa_no_arg)
+ lv->status |= LV_NOAUTOACTIVATE;
+ else
+ lv->status &= ~LV_NOAUTOACTIVATE;
+
+ log_verbose("Changing autoactivation flag to %s for LV %s.",
+ display_lvname(lv), aa_no_arg ? "no" : "yes");
+
+ /* Request caller to commit+backup metadata */
+ *mr |= MR_COMMIT;
+
+ return 1;
+}
+
static int _lvchange_compression(struct logical_volume *lv, uint32_t *mr)
{
struct cmd_context *cmd = lv->vg->cmd;
@@ -1112,6 +1138,7 @@ static int _option_allows_group_commit(int opt_enum)
metadataprofile_ARG,
detachprofile_ARG,
setactivationskip_ARG,
+ setautoactivation_ARG,
-1
};
@@ -1250,6 +1277,11 @@ static int _lvchange_properties_single(struct cmd_context *cmd,
doit += _lvchange_activation_skip(lv, &mr);
break;
+ case setautoactivation_ARG:
+ docmds++;
+ doit += _lvchange_autoactivation(lv, &mr);
+ break;
+
case compression_ARG:
docmds++;
doit += _lvchange_compression(lv, &mr);
diff --git a/tools/lvcreate.c b/tools/lvcreate.c
index 1ce561fc3..a28f0931e 100644
--- a/tools/lvcreate.c
+++ b/tools/lvcreate.c
@@ -742,6 +742,9 @@ static int _read_activation_params(struct cmd_context *cmd,
if (arg_is_set(cmd, ignoreactivationskip_ARG))
lp->activation_skip |= ACTIVATION_SKIP_IGNORE;
+ if (arg_is_set(cmd, setautoactivation_ARG) && !arg_int_value(cmd, setautoactivation_ARG, 1))
+ lp->noautoactivate = 1;
+
return 1;
}
diff --git a/tools/vgchange.c b/tools/vgchange.c
index 58c8ddc84..032f3efcf 100644
--- a/tools/vgchange.c
+++ b/tools/vgchange.c
@@ -117,6 +117,10 @@ static int _activate_lvs_in_vg(struct cmd_context *cmd, struct volume_group *vg,
!lv_passes_auto_activation_filter(cmd, lv))
continue;
+ /* vg NOAUTOACTIVATE flag was already checked */
+ if ((activate == CHANGE_AAY) && (lv->status & LV_NOAUTOACTIVATE))
+ continue;
+
expected_count++;
if (!lv_change_activate(cmd, lv, activate)) {
@@ -209,6 +213,11 @@ int vgchange_activate(struct cmd_context *cmd, struct volume_group *vg,
return 0;
}
+ if ((activate == CHANGE_AAY) && (vg->status & NOAUTOACTIVATE)) {
+ log_debug("Autoactivation is disabled for VG %s.", vg->name);
+ return 1;
+ }
+
/*
* Safe, since we never write out new metadata here. Required for
* partial activation to work.
@@ -317,6 +326,26 @@ static int _vgchange_resizeable(struct cmd_context *cmd,
return 1;
}
+static int _vgchange_autoactivation(struct cmd_context *cmd,
+ struct volume_group *vg)
+{
+ int aa_no_arg = !arg_int_value(cmd, setautoactivation_ARG, 0);
+ int aa_no_meta = (vg->status & NOAUTOACTIVATE) ? 1 : 0;
+
+ if ((aa_no_arg && aa_no_meta) || (!aa_no_arg && !aa_no_meta)) {
+ log_error("Volume group autoactivation is already %s.",
+ aa_no_arg ? "no" : "yes");
+ return 0;
+ }
+
+ if (aa_no_arg)
+ vg->status |= NOAUTOACTIVATE;
+ else
+ vg->status &= ~NOAUTOACTIVATE;
+
+ return 1;
+}
+
static int _vgchange_logicalvolume(struct cmd_context *cmd,
struct volume_group *vg)
{
@@ -619,6 +648,7 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
{ logicalvolume_ARG, &_vgchange_logicalvolume },
{ maxphysicalvolumes_ARG, &_vgchange_physicalvolumes },
{ resizeable_ARG, &_vgchange_resizeable },
+ { setautoactivation_ARG, &_vgchange_autoactivation },
{ deltag_ARG, &_vgchange_deltag },
{ addtag_ARG, &_vgchange_addtag },
{ physicalextentsize_ARG, &_vgchange_pesize },
@@ -707,6 +737,7 @@ int vgchange(struct cmd_context *cmd, int argc, char **argv)
arg_is_set(cmd, logicalvolume_ARG) ||
arg_is_set(cmd, maxphysicalvolumes_ARG) ||
arg_is_set(cmd, resizeable_ARG) ||
+ arg_is_set(cmd, setautoactivation_ARG) ||
arg_is_set(cmd, uuid_ARG) ||
arg_is_set(cmd, physicalextentsize_ARG) ||
arg_is_set(cmd, alloc_ARG) ||
diff --git a/tools/vgcreate.c b/tools/vgcreate.c
index 73066e9a4..f9c40e86d 100644
--- a/tools/vgcreate.c
+++ b/tools/vgcreate.c
@@ -118,6 +118,9 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv)
!vg_set_mda_copies(vg, vp_new.vgmetadatacopies))
goto_bad;
+ if (arg_is_set(cmd, setautoactivation_ARG) && !arg_int_value(cmd, setautoactivation_ARG, 1))
+ vg->status |= NOAUTOACTIVATE;
+
/* attach the pv's */
if (!vg_extend_each_pv(vg, &pp))
goto_bad;
--
2.12.3

View File

@ -39,9 +39,9 @@ index 78f506520c45..16e8536e8a2a 100644
+IMPORT{cmdline}="nolvm"
+ENV{nolvm}=="?*", GOTO="lvm_end"
+
# If the PV label got lost, inform lvmetad immediately.
# Detect the lost PV label by comparing previous ID_FS_TYPE value with current one.
# Detect removed PV label by comparing previous ID_FS_TYPE value with current one.
ENV{.ID_FS_TYPE_NEW}="$env{ID_FS_TYPE}"
IMPORT{db}="ID_FS_TYPE"
--
1.8.3.1

View File

@ -1,39 +0,0 @@
From 2be585b79c71b8f70c0252af5f09dbd5e6103030 Mon Sep 17 00:00:00 2001
From: Zdenek Kabelac <zkabelac@redhat.com>
Date: Mon, 8 Feb 2021 16:28:18 +0100
Subject: [PATCH] pvscan: support disabled event_activation
In past we had this control with use_lvmetad check for
pvscan --cache -aay
Howerer this got lost with lvmetad removal commit:
117160b27e510dceb1ed6acf995115c040acd88d
When user sets lvm.conf global/event_activation=0
pvscan service will no longer auto activate any LVs on appeared PVs.
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
tools/pvscan.c | 6 ++++++
1 file changed, 6 insertions(+)
diff -Nupr a/tools/pvscan.c b/tools/pvscan.c
--- a/tools/pvscan.c 2021-04-23 11:12:35.352431602 +0800
+++ b/tools/pvscan.c 2021-04-23 11:18:29.194565976 +0800
@@ -1224,6 +1224,12 @@ int pvscan_cache_cmd(struct cmd_context
dm_list_init(&vgnames);
dm_list_init(&vglist);
+ if (do_activate &&
+ !find_config_tree_bool(cmd, global_event_activation_CFG, NULL)) {
+ log_verbose("Ignoring pvscan --cache -aay because event_activation is disabled.");
+ return ECMD_PROCESSED;
+ }
+
/*
* When systemd/udev run pvscan --cache commands, those commands
* should not wait on udev info since the udev info may not be
--
2.16.4

View File

@ -1,39 +0,0 @@
From a616abba03a35ec3064360ac8cab9ebb2203d645 Mon Sep 17 00:00:00 2001
From: David Teigland <teigland@redhat.com>
Date: Mon, 19 Apr 2021 13:29:17 -0500
Subject: [PATCH] config: improve description for event_activation
Signed-off-by: Heming Zhao <heming.zhao@suse.com>
---
lib/config/config_settings.h | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h
index 3946a7129343..d3a42a153e68 100644
--- a/lib/config/config_settings.h
+++ b/lib/config/config_settings.h
@@ -1117,13 +1117,14 @@ cfg(global_lvdisplay_shows_full_device_path_CFG, "lvdisplay_shows_full_device_pa
cfg(global_event_activation_CFG, "event_activation", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 1, vsn(2, 3, 1), 0, 0, NULL,
"Activate LVs based on system-generated device events.\n"
- "When a device appears on the system, a system-generated event runs\n"
- "the pvscan command to activate LVs if the new PV completes the VG.\n"
- "When event_activation is disabled, the system will generally run\n"
- "a direct activation command to activate LVs in complete VGs.\n"
- "Activation commands that are run by the system, either from events\n"
- "or at fixed points during startup, use autoactivation (-aay). See\n"
- "the --setautoactivation option or the auto_activation_volume_list\n"
+ "When a PV appears on the system, a system-generated uevent triggers\n"
+ "the lvm2-pvscan service which runs the pvscan --cache -aay command.\n"
+ "If the new PV completes a VG, pvscan autoactivates LVs in the VG.\n"
+ "When event_activation is disabled, the lvm2-activation services are\n"
+ "generated and run at fixed points during system startup. These\n"
+ "services run vgchange -aay to autoactivate LVs in VGs that happen\n"
+ "to be present at that point in time.\n"
+ "See the --setautoactivation option or the auto_activation_volume_list\n"
"setting to configure autoactivation for specific VGs or LVs.\n")
cfg(global_use_lvmetad_CFG, "use_lvmetad", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 93), 0, vsn(2, 3, 0), NULL,
--
2.16.4

View File

@ -8,6 +8,11 @@ Check: mount the device first and then run`btrfs filesystem scrub start
Reisze: find the mount point first and resize the filesystem after get
the device id since there are maybe several devices underneath btrfs
filesystem
---
by heming.zhao@suse.com 2021.6.8
modify patch according to upstream code
---
scripts/fsadm.sh | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 72 insertions(+), 3 deletions(-)
@ -26,9 +31,9 @@ index a09b91534..2a29d5ad5 100755
# 0 success
@@ -57,6 +58,7 @@ XFS_CHECK=xfs_check
# XFS_REPAIR -n is used when XFS_CHECK is not found
XFS_REPAIR=xfs_repair
CRYPTSETUP=cryptsetup
+BTRFS=btrfs
XFS_REPAIR="xfs_repair"
CRYPTSETUP="cryptsetup"
+BTRFS="btrfs"
# user may override lvm location by setting LVM_BINARY
LVM=${LVM_BINARY:-lvm}
@ -126,17 +131,17 @@ index a09b91534..2a29d5ad5 100755
####################
@@ -676,6 +738,7 @@ resize() {
"crypto_LUKS")
which "$CRYPTSETUP" > /dev/null 2>&1 || error "$CRYPTSETUP utility required to resize LUKS volume"
resize_luks $NEWSIZE ;;
+ "btrfs") resize_btrfs $NEWSIZE ;;
which "$CRYPTSETUP" >"$NULL" 2>&1 || error "$CRYPTSETUP utility required to resize LUKS volume"
CMD=resize_luks ;;
+ "btrfs") CMD=resize_btrfs ;;
*) error "Filesystem \"$FSTYPE\" on device \"$VOLUME\" is not supported by this tool." ;;
esac || error "Resize $FSTYPE failed."
test -z "$CRYPT_SHRINK" || resize_crypt "$VOLUME_ORIG"
esac
@@ -748,6 +811,12 @@ check() {
"crypto_LUKS")
which "$CRYPTSETUP" > /dev/null 2>&1 || error "$CRYPTSETUP utility required."
check_luks ;;
+ "btrfs") #mount the device first and then run scrub
which "$CRYPTSETUP" >"$NULL" 2>&1 || error "$CRYPTSETUP utility required."
check_luks || error "Crypto luks check failed."
;;
+ "btrfs") #mount the device first and then run scrub
+ MOUNTPOINT=$TEMPDIR
+ temp_mount || error "Cannot mount btrfs filesystem"
+ dry "$BTRFS" scrub start -B "$VOLUME"

268
lvm.conf
View File

@ -78,14 +78,14 @@ devices {
# routines to acquire this information. For example, this information
# is used to drive LVM filtering like MD component detection, multipath
# component detection, partition detection and others.
#
#
# Accepted values:
# none
# No external device information source is used.
# udev
# Reuse existing udev database records. Applicable only if LVM is
# compiled with udev support.
#
#
external_device_info_source = "udev"
# Configuration option devices/hints.
@ -94,13 +94,13 @@ devices {
# scanning, and will only scan the listed PVs. Removing the hint file
# will cause lvm to generate a new one. Disable hints if PVs will
# be copied onto devices using non-lvm commands, like dd.
#
#
# Accepted values:
# all
# Use all hints.
# none
# Use no hints.
#
#
# This configuration option has an automatic default value.
# hints = "all"
@ -118,12 +118,44 @@ devices {
# Prefer the name with the least number of slashes.
# Prefer a name that is a symlink.
# Prefer the path with least value in lexicographical order.
#
#
# Example
# preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option devices/use_devicesfile.
# Enable or disable the use of a devices file.
# When enabled, lvm will only use devices that
# are lised in the devices file. A devices file will
# be used, regardless of this setting, when the --devicesfile
# option is set to a specific file name.
# This configuration option has an automatic default value.
# use_devicesfile = 0
# Configuration option devices/devicesfile.
# The name of the system devices file, listing devices that LVM should use.
# This should not be used to select a non-system devices file.
# The --devicesfile option is intended for alternative devices files.
# This configuration option has an automatic default value.
# devicesfile = "system.devices"
# Configuration option devices/search_for_devnames.
# Look outside of the devices file for missing devname entries.
# A devname entry is used for a device that does not have a stable
# device id, e.g. wwid, so the unstable device name is used as
# the device id. After reboot, or if the device is reattached,
# the device name may change, in which case lvm will not find
# the expected PV on the device listed in the devices file.
# This setting controls whether lvm will search other devices,
# outside the devices file, to look for the missing PV on a
# renamed device. If "none", lvm will not look at other devices,
# and the PV may appear to be missing. If "auto", lvm will look
# at other devices, but only those that are likely to have the PV.
# If "all", lvm will look at all devices on the system.
# This configuration option has an automatic default value.
# search_for_devnames = "auto"
# Configuration option devices/filter.
# Limit the block devices that are used by LVM commands.
# This is a list of regular expressions used to accept or reject block
@ -139,7 +171,7 @@ devices {
# then the device is accepted. Be careful mixing 'a' and 'r' patterns,
# as the combination might produce unexpected results (test changes.)
# Run vgscan after changing the filter to regenerate the cache.
#
#
# Example
# Accept every block device:
# filter = [ "a|.*|" ]
@ -151,7 +183,7 @@ devices {
# filter = [ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
# Use anchors to be very specific:
# filter = [ "a|^/dev/hda8$|", "r|.*|" ]
#
#
# This configuration option has an automatic default value.
# filter = [ "a|.*|" ]
# Below filter was used in SUSE/openSUSE before lvm2-2.03. It conflicts
@ -172,10 +204,10 @@ devices {
# List of additional acceptable block device types.
# These are of device type names from /proc/devices, followed by the
# maximum number of partitions.
#
#
# Example
# types = [ "fd", 16 ]
#
#
# This configuration option is advanced.
# This configuration option does not have a default value defined.
@ -217,7 +249,7 @@ devices {
# Configuration option devices/md_component_checks.
# The checks LVM should use to detect MD component devices.
# MD component devices are block devices used by MD software RAID.
#
#
# Accepted values:
# auto
# LVM will skip scanning the end of devices when it has other
@ -228,7 +260,7 @@ devices {
# full
# LVM will scan the start and end of devices for MD superblocks.
# This requires an extra read at the end of devices.
#
#
# This configuration option has an automatic default value.
# md_component_checks = "auto"
@ -241,16 +273,16 @@ devices {
# Configuration option devices/md_chunk_alignment.
# Align the start of a PV data area with md device's stripe-width.
# This applies if a PV is placed directly on an md device.
# default_data_alignment will be overriden if it is not aligned
# default_data_alignment will be overridden if it is not aligned
# with the value detected for this setting.
# This setting is overriden by data_alignment_detection,
# This setting is overridden by data_alignment_detection,
# data_alignment, and the --dataalignment option.
md_chunk_alignment = 1
# Configuration option devices/default_data_alignment.
# Align the start of a PV data area with this number of MiB.
# Set to 1 for 1MiB, 2 for 2MiB, etc. Set to 0 to disable.
# This setting is overriden by data_alignment and the --dataalignment
# This setting is overridden by data_alignment and the --dataalignment
# option.
# This configuration option has an automatic default value.
# default_data_alignment = 1
@ -264,9 +296,9 @@ devices {
# preferred unit of receiving I/O, e.g. MD stripe width.
# minimum_io_size is used if optimal_io_size is undefined (0).
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
# default_data_alignment and md_chunk_alignment will be overriden
# default_data_alignment and md_chunk_alignment will be overridden
# if they are not aligned with the value detected for this setting.
# This setting is overriden by data_alignment and the --dataalignment
# This setting is overridden by data_alignment and the --dataalignment
# option.
data_alignment_detection = 1
@ -275,7 +307,7 @@ devices {
# When non-zero, this setting overrides default_data_alignment.
# Set to 0 to disable, in which case default_data_alignment
# is used to align the first PE in units of MiB.
# This setting is overriden by the --dataalignment option.
# This setting is overridden by the --dataalignment option.
data_alignment = 0
# Configuration option devices/data_alignment_offset_detection.
@ -286,7 +318,7 @@ devices {
# partitioning will have an alignment_offset of 3584 bytes (sector 7
# is the lowest aligned logical block, the 4KiB sectors start at
# LBA -1, and consequently sector 63 is aligned on a 4KiB boundary).
# This setting is overriden by the --dataalignmentoffset option.
# This setting is overridden by the --dataalignmentoffset option.
data_alignment_offset_detection = 1
# Configuration option devices/ignore_suspended_devices.
@ -370,7 +402,7 @@ allocation {
# defined here, it will check whether any of them are attached to the
# PVs concerned and then seek to match those PV tags between existing
# extents and new extents.
#
#
# Example
# Use the special tag "@*" as a wildcard to match any PV tag:
# cling_tag_list = [ "@*" ]
@ -378,7 +410,7 @@ allocation {
# PVs are tagged with either @site1 or @site2 to indicate where
# they are situated:
# cling_tag_list = [ "@site1", "@site2" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option allocation/maximise_cling.
@ -437,25 +469,25 @@ allocation {
# Configuration option allocation/cache_metadata_format.
# Sets default metadata format for new cache.
#
#
# Accepted values:
# 0 Automatically detected best available format
# 1 Original format
# 2 Improved 2nd. generation format
#
#
# This configuration option has an automatic default value.
# cache_metadata_format = 0
# Configuration option allocation/cache_mode.
# The default cache mode used for new cache.
#
#
# Accepted values:
# writethrough
# Data blocks are immediately written from the cache to disk.
# writeback
# Data blocks are written from the cache back to disk after some
# delay to improve performance.
#
#
# This setting replaces allocation/cache_pool_cachemode.
# This configuration option has an automatic default value.
# cache_mode = "writethrough"
@ -496,6 +528,13 @@ allocation {
# This configuration option has an automatic default value.
# thin_pool_metadata_require_separate_pvs = 0
# Configuration option allocation/thin_pool_crop_metadata.
# Older version of lvm2 cropped pool's metadata size to 15.81 GiB.
# This is slightly less then the actual maximum 15.88 GiB.
# For compatibility with older version and use of cropped size set to 1.
# This configuration option has an automatic default value.
# thin_pool_crop_metadata = 0
# Configuration option allocation/thin_pool_zero.
# Thin pool data chunks are zeroed before they are first used.
# Zeroing with a larger thin pool chunk size reduces performance.
@ -504,18 +543,18 @@ allocation {
# Configuration option allocation/thin_pool_discards.
# The discards behaviour of thin pool volumes.
#
#
# Accepted values:
# ignore
# nopassdown
# passdown
#
#
# This configuration option has an automatic default value.
# thin_pool_discards = "passdown"
# Configuration option allocation/thin_pool_chunk_size_policy.
# The chunk size calculation policy for thin pool volumes.
#
#
# Accepted values:
# generic
# If thin_pool_chunk_size is defined, use it. Otherwise, calculate
@ -527,7 +566,7 @@ allocation {
# the chunk size for performance based on device hints exposed in
# sysfs - the optimal_io_size. The chunk size is always at least
# 512KiB.
#
#
# This configuration option has an automatic default value.
# thin_pool_chunk_size_policy = "generic"
@ -773,7 +812,6 @@ log {
# Configuration option log/file.
# Write error and debug log messages to a file specified here.
# e.g. file = "/var/log/lvm2.log"
# This configuration option does not have a default value defined.
# Configuration option log/overwrite.
@ -965,7 +1003,7 @@ global {
# Configuration option global/mirror_segtype_default.
# The segment type used by the short mirroring option -m.
# The --type mirror|raid1 option overrides this setting.
#
#
# Accepted values:
# mirror
# The original RAID1 implementation from LVM/DM. It is
@ -985,16 +1023,16 @@ global {
# handling a failure. This mirror implementation is not
# cluster-aware and cannot be used in a shared (active/active)
# fashion in a cluster.
#
#
mirror_segtype_default = "raid1"
# Configuration option global/support_mirrored_mirror_log.
# Enable mirrored 'mirror' log type for testing.
#
#
# This type is deprecated to create or convert to but can
# be enabled to test that activation of existing mirrored
# logs and conversion to disk/core works.
#
#
# Not supported for regular operation!
# This configuration option has an automatic default value.
# support_mirrored_mirror_log = 0
@ -1005,7 +1043,7 @@ global {
# The --stripes/-i and --mirrors/-m options can both be specified
# during the creation of a logical volume to use both striping and
# mirroring for the LV. There are two different implementations.
#
#
# Accepted values:
# raid10
# LVM uses MD's RAID10 personality through DM. This is the
@ -1015,7 +1053,7 @@ global {
# is done by creating a mirror LV on top of striped sub-LVs,
# effectively creating a RAID 0+1 array. The layering is suboptimal
# in terms of providing redundancy and performance.
#
#
raid10_segtype_default = "raid10"
# Configuration option global/sparse_segtype_default.
@ -1023,7 +1061,7 @@ global {
# The --type snapshot|thin option overrides this setting.
# The combination of -V and -L options creates a sparse LV. There are
# two different implementations.
#
#
# Accepted values:
# snapshot
# The original snapshot implementation from LVM/DM. It uses an old
@ -1035,7 +1073,7 @@ global {
# bigger minimal chunk size (64KiB) and uses a separate volume for
# metadata. It has better performance, especially when more data
# is used. It also supports full snapshots.
#
#
sparse_segtype_default = "thin"
# Configuration option global/lvdisplay_shows_full_device_path.
@ -1049,12 +1087,15 @@ global {
# Configuration option global/event_activation.
# Activate LVs based on system-generated device events.
# When a device appears on the system, a system-generated event runs
# the pvscan command to activate LVs if the new PV completes the VG.
# Use auto_activation_volume_list to select which LVs should be
# activated from these events (the default is all.)
# When event_activation is disabled, the system will generally run
# a direct activation command to activate LVs in complete VGs.
# When a PV appears on the system, a system-generated uevent triggers
# the lvm2-pvscan service which runs the pvscan --cache -aay command.
# If the new PV completes a VG, pvscan autoactivates LVs in the VG.
# When event_activation is disabled, the lvm2-activation services are
# generated and run at fixed points during system startup. These
# services run vgchange -aay to autoactivate LVs in VGs that happen
# to be present at that point in time.
# See the --setautoactivation option or the auto_activation_volume_list
# setting to configure autoactivation for specific VGs or LVs.
# This configuration option has an automatic default value.
# event_activation = 1
@ -1087,6 +1128,17 @@ global {
# This configuration option has an automatic default value.
# sanlock_lv_extend = 256
# Configuration option global/lvmlockctl_kill_command.
# The command that lvmlockctl --kill should use to force LVs offline.
# The lvmlockctl --kill command is run when a shared VG has lost
# access to locks (e.g. when sanlock has lost access to storage.)
# An empty string means that there will be no automatic attempt by
# lvmlockctl --kill to forcibly shut down LVs in the VG, and the user
# can manually intervene as described in lvmlockd(8).
# The VG name will be appended to the command specified here.
# This configuration option has an automatic default value.
# lvmlockctl_kill_command = ""
# Configuration option global/thin_check_executable.
# The full path to the thin_check command.
# LVM uses this command to check that a thin metadata device is in a
@ -1133,20 +1185,20 @@ global {
# causing problems. Features include: block_size, discards,
# discards_non_power_2, external_origin, metadata_resize,
# external_origin_extend, error_if_no_space.
#
#
# Example
# thin_disabled_features = [ "discards", "block_size" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option global/cache_disabled_features.
# Features to not use in the cache driver.
# This can be helpful for testing, or to avoid using a feature that is
# causing problems. Features include: policy_mq, policy_smq, metadata2.
#
#
# Example
# cache_disabled_features = [ "policy_smq" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option global/cache_check_executable.
@ -1198,6 +1250,16 @@ global {
# This configuration option has an automatic default value.
# vdo_format_options = [ "" ]
# Configuration option global/vdo_disabled_features.
# Features to not use in the vdo driver.
# This can be helpful for testing, or to avoid using a feature that is
# causing problems. Features include: online_rename
#
# Example
# vdo_disabled_features = [ "online_rename" ]
#
# This configuration option does not have a default value defined.
# Configuration option global/fsadm_executable.
# The full path to the fsadm command.
# LVM uses this command to help with lvresize -r operations.
@ -1210,7 +1272,7 @@ global {
# or vgimport.) A VG on shared storage devices is accessible only to
# the host with a matching system ID. See 'man lvmsystemid' for
# information on limitations and correct usage.
#
#
# Accepted values:
# none
# The host has no system ID.
@ -1227,7 +1289,7 @@ global {
# file
# Use the contents of another file (system_id_file) to set the
# system ID.
#
#
system_id_source = "none"
# Configuration option global/system_id_file.
@ -1279,7 +1341,7 @@ activation {
# Configuration option activation/udev_sync.
# Use udev notifications to synchronize udev and LVM.
# The --nodevsync option overrides this setting.
# The --noudevsync option overrides this setting.
# When disabled, LVM commands will not wait for notifications from
# udev, but continue irrespective of any possible udev processing in
# the background. Only use this if udev is not running or has rules
@ -1353,7 +1415,7 @@ activation {
# If this list is defined, an LV is only activated if it matches an
# entry in this list. If this list is undefined, it imposes no limits
# on LV activation (all are allowed).
#
#
# Accepted values:
# vgname
# The VG name is matched exactly and selects all LVs in the VG.
@ -1367,30 +1429,30 @@ activation {
# or VG. See tags/hosttags. If any host tags exist but volume_list
# is not defined, a default single-entry list containing '@*'
# is assumed.
#
#
# Example
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option activation/auto_activation_volume_list.
# Only LVs selected by this list are auto-activated.
# This list works like volume_list, but it is used only by
# auto-activation commands. It does not apply to direct activation
# commands. If this list is defined, an LV is only auto-activated
# if it matches an entry in this list. If this list is undefined, it
# imposes no limits on LV auto-activation (all are allowed.) If this
# list is defined and empty, i.e. "[]", then no LVs are selected for
# auto-activation. An LV that is selected by this list for
# auto-activation, must also be selected by volume_list (if defined)
# before it is activated. Auto-activation is an activation command that
# includes the 'a' argument: --activate ay or -a ay. The 'a' (auto)
# argument for auto-activation is meant to be used by activation
# commands that are run automatically by the system, as opposed to LVM
# commands run directly by a user. A user may also use the 'a' flag
# directly to perform auto-activation. Also see pvscan(8) for more
# information about auto-activation.
#
# A list of VGs or LVs that should be autoactivated.
# Autoactivation is an activation command run with -aay,
# i.e. vgchange -aay, lvchange -aay, or pvscan --cache -aay.
# When this list is defined, an autoactivation command will only
# activate LVs included in the list. If this list is undefined,
# it has no effect. If this list is defined but empty, then no
# LVs will be autoactivated. LVs can be included in the list by
# LV name, VG name (applies to all LVs in the VG), or tag name.
# VGs and LVs can also have an autoactivation property set in
# metadata, see --setautoactivation. LVs included in this list
# will not be autoactivated if the VG or LV autoactivation
# property is disabled (see vgs or lvs "-o autoactivation").
# The volume_list setting and the "activation skip" property
# also apply to autoactivation.
# The -aay option is meant to be used by activation commands that
# are run automatically by the system, e.g. from systemd services.
#
# Accepted values:
# vgname
# The VG name is matched exactly and selects all LVs in the VG.
@ -1404,10 +1466,10 @@ activation {
# or VG. See tags/hosttags. If any host tags exist but volume_list
# is not defined, a default single-entry list containing '@*'
# is assumed.
#
#
# Example
# auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option activation/read_only_volume_list.
@ -1416,7 +1478,7 @@ activation {
# against this list, and if it matches, it is activated in read-only
# mode. This overrides the permission setting stored in the metadata,
# e.g. from --permission rw.
#
#
# Accepted values:
# vgname
# The VG name is matched exactly and selects all LVs in the VG.
@ -1430,10 +1492,10 @@ activation {
# or VG. See tags/hosttags. If any host tags exist but volume_list
# is not defined, a default single-entry list containing '@*'
# is assumed.
#
#
# Example
# read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option activation/raid_region_size.
@ -1456,13 +1518,13 @@ activation {
# Configuration option activation/readahead.
# Setting to use when there is no readahead setting in metadata.
#
#
# Accepted values:
# none
# Disable readahead.
# auto
# Use default value chosen by kernel.
#
#
# This configuration option has an automatic default value.
# readahead = "auto"
@ -1474,7 +1536,7 @@ activation {
# performed by dmeventd automatically, and the steps perfomed by the
# manual command lvconvert --repair --use-policies.
# Automatic handling requires dmeventd to be monitoring the LV.
#
#
# Accepted values:
# warn
# Use the system log to warn the user that a device in the RAID LV
@ -1485,7 +1547,7 @@ activation {
# allocate
# Attempt to use any extra physical volumes in the VG as spares and
# replace faulty devices.
#
#
raid_fault_policy = "warn"
# Configuration option activation/mirror_image_fault_policy.
@ -1497,7 +1559,7 @@ activation {
# determines the steps perfomed by dmeventd automatically, and the steps
# performed by the manual command lvconvert --repair --use-policies.
# Automatic handling requires dmeventd to be monitoring the LV.
#
#
# Accepted values:
# remove
# Simply remove the faulty device and run without it. If the log
@ -1522,7 +1584,7 @@ activation {
# the redundant nature of the mirror. This policy acts like
# 'remove' if no suitable device and space can be allocated for the
# replacement.
#
#
mirror_image_fault_policy = "remove"
# Configuration option activation/mirror_log_fault_policy.
@ -1537,26 +1599,26 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see snapshot_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# snapshot_autoextend_threshold = 70
#
#
snapshot_autoextend_threshold = 100
# Configuration option activation/snapshot_autoextend_percent.
# Auto-extending a snapshot adds this percent extra space.
# The amount of additional space added to a snapshot is this
# percent of its current size.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# snapshot_autoextend_percent = 20
#
#
snapshot_autoextend_percent = 20
# Configuration option activation/thin_pool_autoextend_threshold.
@ -1565,26 +1627,26 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see thin_pool_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# thin_pool_autoextend_threshold = 70
#
#
thin_pool_autoextend_threshold = 100
# Configuration option activation/thin_pool_autoextend_percent.
# Auto-extending a thin pool adds this percent extra space.
# The amount of additional space added to a thin pool is this
# percent of its current size.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# thin_pool_autoextend_percent = 20
#
#
thin_pool_autoextend_percent = 20
# Configuration option activation/vdo_pool_autoextend_threshold.
@ -1593,13 +1655,13 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see vdo_pool_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 10G
# VDO pool exceeds 7G, it is extended to 12G, and when it exceeds
# 8.4G, it is extended to 14.4G:
# vdo_pool_autoextend_threshold = 70
#
#
# This configuration option has an automatic default value.
# vdo_pool_autoextend_threshold = 100
@ -1607,7 +1669,7 @@ activation {
# Auto-extending a VDO pool adds this percent extra space.
# The amount of additional space added to a VDO pool is this
# percent of its current size.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 10G
# VDO pool exceeds 7G, it is extended to 12G, and when it exceeds
@ -1626,10 +1688,10 @@ activation {
# pages corresponding to lines that match are not pinned. On some
# systems, locale-archive was found to make up over 80% of the memory
# used by the process.
#
#
# Example
# mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
#
#
# This configuration option is advanced.
# This configuration option does not have a default value defined.
@ -1670,7 +1732,7 @@ activation {
# Configuration option activation/activation_mode.
# How LVs with missing devices are activated.
# The --activationmode option overrides this setting.
#
#
# Accepted values:
# complete
# Only allow activation of an LV if all of the Physical Volumes it
@ -1685,7 +1747,7 @@ activation {
# could cause data loss with a portion of the LV inaccessible.
# This setting should not normally be used, but may sometimes
# assist with data recovery.
#
#
activation_mode = "degraded"
# Configuration option activation/lock_start_list.
@ -1733,7 +1795,7 @@ activation {
# Configuration option metadata/pvmetadatacopies.
# Number of copies of metadata to store on each PV.
# The --pvmetadatacopies option overrides this setting.
#
#
# Accepted values:
# 2
# Two copies of the VG metadata are stored on the PV, one at the
@ -1743,7 +1805,7 @@ activation {
# 0
# No copies of VG metadata are stored on the PV. This may be
# useful for VGs containing large numbers of PVs.
#
#
# This configuration option is advanced.
# This configuration option has an automatic default value.
# pvmetadatacopies = 1
@ -1893,7 +1955,7 @@ activation {
# sequences are copied verbatim. Each special character sequence is
# introduced by the '%' character and such sequence is then
# substituted with a value as described below.
#
#
# Accepted values:
# %a
# The abbreviated name of the day of the week according to the
@ -2016,7 +2078,7 @@ activation {
# The timezone name or abbreviation.
# %%
# A literal '%' character.
#
#
# This configuration option has an automatic default value.
# time_format = "%Y-%m-%d %T %z"
@ -2285,12 +2347,12 @@ dmeventd {
# applied to the local machine as a 'host tag'. If this subsection is
# empty (has no host_list), then the subsection name is always applied
# as a 'host tag'.
#
#
# Example
# The host tag foo is given to all hosts, and the host tag
# bar is given to the hosts named machine1 and machine2.
# tags { foo { } bar { host_list = [ "machine1", "machine2" ] } }
#
#
# This configuration section has variable name.
# This configuration section has an automatic default value.
# tag {

View File

@ -1,3 +1,182 @@
-------------------------------------------------------------------
Tue Jun 8 03:00:00 UTC 2021 - heming.zhao@suse.com
- update lvm2 from LVM2.03.10 to LVM2.2.03.12 (bsc#1187010)
*** WHATS_NEW from 2.03.11 to 2.03.12 ***
Version 2.03.12 - 07th May 2021
===============================
Allow attaching cache to thin data volume.
Fix memleak when generating list of outdated pvs.
Better hyphenation usage in man pages.
Replace use of deprecated security_context_t with char*.
Configure supports AIO_LIBS and AIO_CFLAGS.
Improve build process for static builds.
New --setautoactivation option to modify LV or VG auto activation.
New metadata based autoactivation property for LVs and VGs.
Improve signal handling with lvmpolld.
Signal handler can interrupt command also for SIGTERM.
Lvreduce --yes support.
Add configure option --with/out-symvers for non-glibc builds.
Report error when the filesystem is missing on fsadm resized volume.
Handle better blockdev with --getsize64 support for fsadm.
Do not include editline/history.h when using editline library.
Support error and zero segtype for thin-pool data for testing.
Support mixed extension for striped, error and zero segtypes.
Support resize also for stacked virtual volumes.
Skip dm-zero devices just like with dm-error target.
Reduce ioctl() calls when checking target status.
Merge polling does not fail, when LV is found to be already merged.
Poll volumes with at least 100ms delays.
Do not flush dm cache when cached LV is going to be removed.
New lvmlockctl_kill_command configuration option.
Support interruption while waiting on device close before deactivation.
Flush thin-pool messages before removing more thin volumes.
Improve hash function with less collisions and make it faster.
Reduce ioctl count when deactivating volumes.
Reduce number of metadata parsing.
Enhance performance of lvremove and vgremove commands.
Support interruption when taking archive and backup.
Accelerate large lvremoves.
Speedup search for cached device nodes.
Speedup command initialization.
Add devices file feature, off by default for now.
Support extension of writecached volumes.
Fix problem with unbound variable usage within fsadm.
Fix IMSM MD RAID detection on 4k devices.
Check for presence of VDO target before starting any conversion.
Support metatadata profiles with volume VDO pool conversions.
Support -Zn for conversion of already formated VDO pools.
Avoid removing LVs on error path of lvconvert during creation volumes.
Fix crashing lvdisplay when thin volume was waiting for merge.
Support option --errorwhenfull when converting volume to thin-pool.
Improve thin-performance profile support conversion to thin-pool.
Add workaround to avoid read of internal 'converted' devices.
Prohibit merging snapshot into the read-only thick snapshot origin.
Restore support for flipping rw/r permissions for thin snapshot origin.
Support resize of cached volumes.
Disable autoactivation with global/event_activation=0.
Check if lvcreate passes read_only_volume_list with tags and skips zeroing.
Allocation prints better error when metadata cannot fit on a single PV.
Pvmove can better resolve full thin-pool tree move.
Limit pool metadata spare to 16GiB.
Improves conversion and allocation of pool metadata.
Support thin pool metadata 15.88GiB, adds 64MiB, thin_pool_crop_metadata=0.
Enhance lvdisplay to report raid available/partial.
Support online rename of VDO pools.
Improve removal of pmspare when last pool is removed.
Fix problem with wiping of converted LVs.
Fix memleak in scanning (2.03.11).
Fix corner case allocation for thin-pools.
Version 2.03.11 - 08th January 2021
===================================
Fix pvck handling MDA at offset different from 4096.
Partial or degraded activation of writecache is not allowed.
Enhance error handling for fsadm and handle correct fsck result.
Dmeventd lvm plugin ignores higher reserved_stack lvm.conf values.
Support using BLKZEROOUT for clearing devices.
Support interruption when wipping LVs.
Support interruption for bcache waiting.
Fix bcache when device has too many failing writes.
Fix bcache waiting for IO completion with failing disks.
Configure use own python path name order to prefer using python3.
Add configure --enable-editline support as an alternative to readline.
Enhance reporting and error handling when creating thin volumes.
Enable vgsplit for VDO volumes.
Lvextend of vdo pool volumes ensure at least 1 new VDO slab is added.
Use revert_lv() on reload error path after vg_revert().
Configure --with-integrity enabled.
Restore lost signal blocking while VG lock is held.
Improve estimation of needed extents when creating thin-pool.
Use extra 1% when resizing thin-pool metadata LV with --use-policy.
Enhance --use-policy percentage rounding.
Configure --with-vdo and --with-writecache as internal segments.
Improving VDO man page examples.
Allow pvmove of writecache origin.
Report integrity fields.
Integrity volumes defaults to journal mode.
Switch code base to use flexible array syntax.
Fix 64bit math when calculation cachevol size.
Preserve uint32_t for seqno handling.
Switch from mmap to plain read when loading regular files.
Update lvmvdo man page and better explain DISCARD usage.
*** WHATS_NEW_DM from 1.02.175 to 1.02.177 ***
Version 1.02.177 - 07th May 2021
================================
Configure proceeds without libaio to allow build of device-mapper only.
Fix symbol versioning build with -O2 -flto.
Add dm_tree_node_add_thin_pool_target_v1 with crop_metadata support.
- Drop patches that have been merged into upstream
- bug-1175565_01-tools-move-struct-element-before-variable-lenght-lis.patch
- bug-1175565_02-gcc-change-zero-sized-array-to-fexlible-array.patch
- bug-1175565_03-gcc-zero-sized-array-to-fexlible-array-C99.patch
- bug-1178680_add-metadata-based-autoactivation-property-for-VG-an.patch
- bug-1185190_01-pvscan-support-disabled-event_activation.patch
- bug-1185190_02-config-improve-description-for-event_activation.patch
- Add patch
+ 0001-lvmlockd-idm-Introduce-new-locking-scheme.patch
+ 0002-lvmlockd-idm-Hook-Seagate-IDM-wrapper-APIs.patch
+ 0003-lib-locking-Add-new-type-idm.patch
+ 0004-lib-locking-Parse-PV-list-for-IDM-locking.patch
+ 0005-tools-Add-support-for-idm-lock-type.patch
+ 0006-configure-Add-macro-LOCKDIDM_SUPPORT.patch
+ 0007-enable-command-syntax-for-thin-and-writecache.patch
+ 0008-lvremove-fix-removing-thin-pool-with-writecache-on-d.patch
+ 0009-vdo-fix-preload-of-kvdo.patch
+ 0010-writecache-fix-lv_on_pmem.patch
+ 0011-writecache-don-t-pvmove-device-used-by-writecache.patch
+ 0012-pvchange-fix-file-locking-deadlock.patch
+ 0013-tests-Enable-the-testing-for-IDM-locking-scheme.patch
+ 0014-tests-Support-multiple-backing-devices.patch
+ 0015-tests-Cleanup-idm-context-when-prepare-devices.patch
+ 0016-tests-Add-checking-for-lvmlockd-log.patch
+ 0017-tests-stress-Add-single-thread-stress-testing.patch
+ 0018-tests-stress-Add-multi-threads-stress-testing-for-VG.patch
+ 0019-tests-stress-Add-multi-threads-stress-testing-for-PV.patch
+ 0020-tests-Support-idm-failure-injection.patch
+ 0021-tests-Add-testing-for-lvmlockd-failure.patch
+ 0022-tests-idm-Add-testing-for-the-fabric-failure.patch
+ 0023-tests-idm-Add-testing-for-the-fabric-failure-and-tim.patch
+ 0024-tests-idm-Add-testing-for-the-fabric-s-half-brain-fa.patch
+ 0025-tests-idm-Add-testing-for-IDM-lock-manager-failure.patch
+ 0026-tests-multi-hosts-Add-VG-testing.patch
+ 0027-tests-multi-hosts-Add-LV-testing.patch
+ 0028-tests-multi-hosts-Test-lease-timeout-with-LV-exclusi.patch
+ 0029-tests-multi-hosts-Test-lease-timeout-with-LV-shareab.patch
+ 0030-fix-empty-mem-pool-leak.patch
+ 0031-tests-writecache-blocksize-add-dm-cache-tests.patch
+ 0032-tests-rename-test.patch
+ 0033-tests-add-writecache-cache-blocksize-2.patch
+ 0034-lvmlockd-Fix-the-compilation-warning.patch
+ 0035-devices-don-t-use-deleted-loop-backing-file-for-devi.patch
+ 0036-man-help-fix-common-option-listing.patch
+ 0037-archiving-take-archive-automatically.patch
+ 0038-backup-automatically-store-data-on-vg_unlock.patch
+ 0039-archive-avoid-abuse-of-internal-flag.patch
+ 0040-pvck-add-lock_global-before-clean_hint_file.patch
+ 0041-lvmdevices-add-deviceidtype-option.patch
- Update patch
- bug-1184687_Add-nolvm-for-kernel-cmdline.patch
- fate-31841_fsadm-add-support-for-btrfs.patch
- lvm.conf
- trim tail space
- fix typo
- [new item] devices/use_devicesfile
- [new item] devices/devicesfile
- [new item] devices/search_for_devnames
- [new item] allocation/thin_pool_crop_metadata
- [new item] global/lvmlockctl_kill_command
- [new item] global/vdo_disabled_features
-------------------------------------------------------------------
Tue May 25 11:23:12 UTC 2021 - Wolfgang Frisch <wolfgang.frisch@suse.com>

View File

@ -1,5 +1,5 @@
#
# spec file
# spec file for package lvm2
#
# Copyright (c) 2021 SUSE LLC
#
@ -21,8 +21,8 @@
%define libname_event libdevmapper-event1_03
%define _udevdir %(pkg-config --variable=udevdir udev)
%define cmdlib liblvm2cmd2_03
%define lvm2_version 2.03.10
%define device_mapper_version 1.02.173
%define lvm2_version 2.03.12
%define device_mapper_version 1.02.177
%define thin_provisioning_version 0.7.0
%define _supportsanlock 0
%define dlm_version 4.0.9
@ -60,13 +60,50 @@ Source: ftp://sourceware.org/pub/lvm2/LVM2.%{version}.tgz
Source1: lvm.conf
Source42: ftp://sourceware.org/pub/lvm2/LVM2.%{version}.tgz.asc
Source99: baselibs.conf
# Upstream patches
Patch0001: bug-1175565_01-tools-move-struct-element-before-variable-lenght-lis.patch
Patch0002: bug-1175565_02-gcc-change-zero-sized-array-to-fexlible-array.patch
Patch0003: bug-1175565_03-gcc-zero-sized-array-to-fexlible-array-C99.patch
Patch0004: bug-1178680_add-metadata-based-autoactivation-property-for-VG-an.patch
Patch0005: bug-1185190_01-pvscan-support-disabled-event_activation.patch
Patch0006: bug-1185190_02-config-improve-description-for-event_activation.patch
Patch0001: 0001-lvmlockd-idm-Introduce-new-locking-scheme.patch
Patch0002: 0002-lvmlockd-idm-Hook-Seagate-IDM-wrapper-APIs.patch
Patch0003: 0003-lib-locking-Add-new-type-idm.patch
Patch0004: 0004-lib-locking-Parse-PV-list-for-IDM-locking.patch
Patch0005: 0005-tools-Add-support-for-idm-lock-type.patch
Patch0006: 0006-configure-Add-macro-LOCKDIDM_SUPPORT.patch
Patch0007: 0007-enable-command-syntax-for-thin-and-writecache.patch
Patch0008: 0008-lvremove-fix-removing-thin-pool-with-writecache-on-d.patch
Patch0009: 0009-vdo-fix-preload-of-kvdo.patch
Patch0010: 0010-writecache-fix-lv_on_pmem.patch
Patch0011: 0011-writecache-don-t-pvmove-device-used-by-writecache.patch
Patch0012: 0012-pvchange-fix-file-locking-deadlock.patch
Patch0013: 0013-tests-Enable-the-testing-for-IDM-locking-scheme.patch
Patch0014: 0014-tests-Support-multiple-backing-devices.patch
Patch0015: 0015-tests-Cleanup-idm-context-when-prepare-devices.patch
Patch0016: 0016-tests-Add-checking-for-lvmlockd-log.patch
Patch0017: 0017-tests-stress-Add-single-thread-stress-testing.patch
Patch0018: 0018-tests-stress-Add-multi-threads-stress-testing-for-VG.patch
Patch0019: 0019-tests-stress-Add-multi-threads-stress-testing-for-PV.patch
Patch0020: 0020-tests-Support-idm-failure-injection.patch
Patch0021: 0021-tests-Add-testing-for-lvmlockd-failure.patch
Patch0022: 0022-tests-idm-Add-testing-for-the-fabric-failure.patch
Patch0023: 0023-tests-idm-Add-testing-for-the-fabric-failure-and-tim.patch
Patch0024: 0024-tests-idm-Add-testing-for-the-fabric-s-half-brain-fa.patch
Patch0025: 0025-tests-idm-Add-testing-for-IDM-lock-manager-failure.patch
Patch0026: 0026-tests-multi-hosts-Add-VG-testing.patch
Patch0027: 0027-tests-multi-hosts-Add-LV-testing.patch
Patch0028: 0028-tests-multi-hosts-Test-lease-timeout-with-LV-exclusi.patch
Patch0029: 0029-tests-multi-hosts-Test-lease-timeout-with-LV-shareab.patch
Patch0030: 0030-fix-empty-mem-pool-leak.patch
Patch0031: 0031-tests-writecache-blocksize-add-dm-cache-tests.patch
Patch0032: 0032-tests-rename-test.patch
Patch0033: 0033-tests-add-writecache-cache-blocksize-2.patch
Patch0034: 0034-lvmlockd-Fix-the-compilation-warning.patch
Patch0035: 0035-devices-don-t-use-deleted-loop-backing-file-for-devi.patch
Patch0036: 0036-man-help-fix-common-option-listing.patch
Patch0037: 0037-archiving-take-archive-automatically.patch
Patch0038: 0038-backup-automatically-store-data-on-vg_unlock.patch
Patch0039: 0039-archive-avoid-abuse-of-internal-flag.patch
Patch0040: 0040-pvck-add-lock_global-before-clean_hint_file.patch
Patch0041: 0041-lvmdevices-add-deviceidtype-option.patch
# SUSE patches: 1000+ for LVM
# Never upstream
Patch1001: cmirrord_remove_date_time_from_compilation.patch
@ -131,6 +168,41 @@ Volume Manager.
%patch0004 -p1
%patch0005 -p1
%patch0006 -p1
%patch0007 -p1
%patch0008 -p1
%patch0009 -p1
%patch0010 -p1
%patch0011 -p1
%patch0012 -p1
%patch0013 -p1
%patch0014 -p1
%patch0015 -p1
%patch0016 -p1
%patch0017 -p1
%patch0018 -p1
%patch0019 -p1
%patch0020 -p1
%patch0021 -p1
%patch0022 -p1
%patch0023 -p1
%patch0024 -p1
%patch0025 -p1
%patch0026 -p1
%patch0027 -p1
%patch0028 -p1
%patch0029 -p1
%patch0030 -p1
%patch0031 -p1
%patch0032 -p1
%patch0033 -p1
%patch0034 -p1
%patch0035 -p1
%patch0036 -p1
%patch0037 -p1
%patch0038 -p1
%patch0039 -p1
%patch0040 -p1
%patch0041 -p1
%patch1001 -p1
%patch1002 -p1
%patch1003 -p1