Accepting request 84012 from network:ha-clustering:Factory

Update to version 1.1.6

OBS-URL: https://build.opensuse.org/request/show/84012
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/pacemaker?expand=0&rev=39
This commit is contained in:
Sascha Peilicke 2011-09-21 15:14:27 +00:00 committed by Git OBS Bridge
commit e14c971d8a
25 changed files with 5255 additions and 5 deletions

163
acl_fix_d44ff2711662.patch Normal file
View File

@ -0,0 +1,163 @@
# HG changeset patch
# User Andrew Beekhof <andrew@beekhof.net>
# Date 1314251944 -36000
# Node ID d44ff2711662517d91b542b122218cffa2af3eb1
# Parent 4cc8fdf2827a31d41b48b8c97d784c75c9418eda
Low: cib: Remove the remaining uses of the xml_child_iter() macro
diff --git a/lib/cib/cib_acl.c b/lib/cib/cib_acl.c
--- a/lib/cib/cib_acl.c
+++ b/lib/cib/cib_acl.c
@@ -159,8 +159,7 @@ acl_check_diff(xmlNode *request, xmlNode
orig_diff = diff_xml_object_orig(current_cib, result_cib, FALSE, diff);
- xml_child_iter(
- orig_diff, diff_child,
+ for (diff_child = __xml_first_child(orig_diff); diff_child; diff_child = __xml_next(diff_child)) {
const char *tag = crm_element_name(diff_child);
GListPtr parsed_acl = NULL;
@@ -176,8 +175,7 @@ acl_check_diff(xmlNode *request, xmlNode
continue;
}
- xml_child_iter(
- diff_child, diff_cib,
+ for (diff_cib = __xml_first_child(diff_child); diff_cib; diff_child = __xml_next(diff_cib)) {
GHashTable *xml_perms = NULL;
gen_xml_perms(diff_cib, parsed_acl, &xml_perms);
@@ -188,9 +186,9 @@ acl_check_diff(xmlNode *request, xmlNode
crm_warn("User '%s' doesn't have enough permission to modify the CIB objects", user);
goto done;
}
- );
+ }
free_acl(parsed_acl);
- );
+ }
done:
free_xml(orig_diff);
@@ -264,8 +262,7 @@ unpack_user_acl(xmlNode *xml_acls, const
return FALSE;
}
- xml_child_iter(
- xml_acls, xml_acl,
+ for (xml_acl = __xml_first_child(xml_acls); xml_acl; xml_acl = __xml_next(xml_acl)) {
const char *tag = crm_element_name(xml_acl);
const char *id = crm_element_value(xml_acl, XML_ATTR_ID);
@@ -276,7 +273,7 @@ unpack_user_acl(xmlNode *xml_acls, const
return TRUE;
}
}
- );
+ }
return FALSE;
}
@@ -296,8 +293,7 @@ user_match(const char *user, const char
static gboolean
unpack_acl(xmlNode *xml_acls, xmlNode *xml_acl, GListPtr *acl)
{
- xml_child_iter(
- xml_acl, acl_child,
+ for (acl_child = __xml_first_child(xml_acl); acl_child; acl_child = __xml_next(acl_child)) {
const char *tag = crm_element_name(acl_child);
if (crm_str_eq(XML_ACL_TAG_ROLE_REF, tag, TRUE)) {
@@ -316,8 +312,8 @@ unpack_acl(xmlNode *xml_acls, xmlNode *x
static gboolean
unpack_role_acl(xmlNode *xml_acls, const char *role, GListPtr *acl)
{
- xml_child_iter_filter(
- xml_acls, xml_acl, XML_ACL_TAG_ROLE,
+ for (xml_acl = __xml_first_child(xml_acls); xml_acl; xml_acl = __xml_next(xml_acl)) {
+ if(crm_str_eq(XML_ACL_TAG_ROLE, (const char *)child->name, TRUE)) {
const char *role_id = crm_element_value(xml_acl, XML_ATTR_ID);
if (role_id && crm_str_eq(role, role_id, TRUE)) {
@@ -325,7 +321,8 @@ unpack_role_acl(xmlNode *xml_acls, const
unpack_acl(xml_acls, xml_acl, acl);
return TRUE;
}
- );
+ }
+ }
return FALSE;
}
@@ -495,12 +492,11 @@ search_xml_children(GListPtr *children,
}
if(search_matches || match_found == 0) {
- xml_child_iter(
- root, child,
+ for (child = __xml_first_child(root); child; child = __xml_next(child)) {
match_found += search_xml_children(
children, child, tag, field, value,
search_matches);
- );
+ }
}
return match_found;
@@ -563,10 +559,9 @@ update_xml_perms(xmlNode *xml, acl_obj_t
crm_debug_3("Permission for element: element_mode=%s, tag=%s, id=%s",
perm->mode, crm_element_name(xml), crm_element_value(xml, XML_ATTR_ID));
- xml_child_iter(
- xml, child,
+ for (child = __xml_first_child(root); child; child = __xml_next(child)) {
update_xml_children_perms(child, perm->mode, xml_perms);
- );
+ }
} else {
if (perm->attribute_perms == NULL
@@ -610,10 +605,9 @@ update_xml_children_perms(xmlNode *xml,
crm_debug_4("Permission for child element: element_mode=%s, tag=%s, id=%s",
mode, crm_element_name(xml), crm_element_value(xml, XML_ATTR_ID));
- xml_child_iter(
- xml, child,
+ for (child = __xml_first_child(root); child; child = __xml_next(child)) {
update_xml_children_perms(child, mode, xml_perms);
- );
+ }
return TRUE;
}
@@ -647,12 +641,11 @@ acl_filter_xml(xmlNode *xml, GHashTable
xml_perm_t *perm = NULL;
int allow_counter = 0;
- xml_child_iter(
- xml, child,
+ for (child = __xml_first_child(xml); child; child = __xml_next(child)) {
if (acl_filter_xml(child, xml_perms) == FALSE) {
children_counter++;
}
- );
+ }
g_hash_table_lookup_extended(xml_perms, xml, NULL, (gpointer)&perm);
@@ -720,12 +713,11 @@ acl_check_diff_xml(xmlNode *xml, GHashTa
{
xml_perm_t *perm = NULL;
- xml_child_iter(
- xml, child,
+ for (child = __xml_first_child(xml); child; child = __xml_next(child)) {
if (acl_check_diff_xml(child, xml_perms) == FALSE) {
return FALSE;
}
- );
+ }
g_hash_table_lookup_extended(xml_perms, xml, NULL, (gpointer)&perm);

100
crm_deleteunmanaged.patch Normal file
View File

@ -0,0 +1,100 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1313755383 -7200
# Node ID e8ea8fb95f310997995576ee831693b0d3b2736a
# Parent 0abb257259ed722abaa32a237c3c284c08ec0737
Medium: Shell: enable removal of unmanaged resources (bnc#696506)
diff --git a/shell/modules/cibconfig.py b/shell/modules/cibconfig.py
--- a/shell/modules/cibconfig.py
+++ b/shell/modules/cibconfig.py
@@ -2303,7 +2303,7 @@ class CibFactory(Singleton):
no_object_err(obj_id)
rc = False
continue
- if is_rsc_running(obj_id):
+ if is_rsc_managed(obj_id) and is_rsc_running(obj_id):
common_err("resource %s is running, can't delete it" % obj_id)
rc = False
else:
diff --git a/shell/modules/xmlutil.py b/shell/modules/xmlutil.py
--- a/shell/modules/xmlutil.py
+++ b/shell/modules/xmlutil.py
@@ -178,6 +178,34 @@ def shadowfile(name):
def shadow2doc(name):
return file2doc(shadowfile(name))
+def is_xs_boolean_true(bool):
+ return bool.lower() in ("true","1")
+def is_rsc_managed(id):
+ if not is_live_cib():
+ return False
+ rsc_node = rsc2node(id)
+ if not rsc_node:
+ return False
+ prop_node = get_properties_node(get_conf_elem(cibdump2doc("crm_config"), "crm_config"))
+ # maintenance-mode, if true, overrides all
+ attr = get_attr_value(prop_node, "maintenance-mode")
+ if attr and is_xs_boolean_true(attr):
+ return False
+ # then check the rsc is-managed meta attribute
+ rsc_meta_node = get_rsc_meta_node(rsc_node)
+ attr = get_attr_value(rsc_meta_node, "is-managed")
+ if attr:
+ return is_xs_boolean_true(attr)
+ # then rsc_defaults is-managed attribute
+ rsc_dflt_node = get_rscop_defaults_meta_node(get_conf_elem(cibdump2doc("rsc_defaults"), "rsc_defaults"))
+ attr = get_attr_value(rsc_dflt_node, "is-managed")
+ if attr:
+ return is_xs_boolean_true(attr)
+ # finally the is-managed-default property
+ attr = get_attr_value(prop_node, "is-managed-default")
+ if attr:
+ return is_xs_boolean_true(attr)
+ return True
def is_rsc_running(id):
if not is_live_cib():
return False
@@ -691,12 +719,20 @@ def silly_constraint(c_node,rsc_id):
def get_rsc_children_ids(node):
return [x.getAttribute("id") \
for x in node.childNodes if is_child_rsc(x)]
-def get_rscop_defaults_meta_node(node):
+def get_child_nvset_node(node, attr_set = "meta_attributes"):
+ if not node:
+ return None
for c in node.childNodes:
- if not is_element(c) or c.tagName != "meta_attributes":
+ if not is_element(c) or c.tagName != attr_set:
continue
return c
return None
+def get_rscop_defaults_meta_node(node):
+ return get_child_nvset_node(node)
+def get_rsc_meta_node(node):
+ return get_child_nvset_node(node)
+def get_properties_node(node):
+ return get_child_nvset_node(node, attr_set = "cluster_property_set")
def new_cib():
doc = xml.dom.minidom.Document()
@@ -727,12 +763,19 @@ def new_cib_element(node,tagname,id_pfx)
node.appendChild(newnode)
return newnode
def get_attr_in_set(node,attr):
+ if not node:
+ return None
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "nvpair" and c.getAttribute("name") == attr:
return c
return None
+def get_attr_value(node,attr):
+ n = get_attr_in_set(node,attr)
+ if not n:
+ return None
+ return n.getAttribute("value")
def set_attr(node,attr,value):
'''
Set an attribute in the attribute set.

View File

@ -0,0 +1,13 @@
Index: pacemaker/shell/modules/report.py
===================================================================
--- pacemaker.orig/shell/modules/report.py
+++ pacemaker/shell/modules/report.py
@@ -643,6 +643,8 @@ class Report(Singleton):
def set_source(self,src):
'Set our source.'
self.source = src
+ if self.source != "live":
+ self.reset_period()
def set_period(self,from_dt,to_dt):
'''
Set from/to_dt.

2213
crm_history.patch Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1314279513 -7200
# Node ID d21f988a419c0c7fa349c4e26f6b500944d91370
# Parent 709ef91cfada2822aca53dcef085ddb6952393c5
Low: Shell: look for log segments with more care and don't throw exception on seek (bnc#713939)
diff --git a/shell/modules/report.py b/shell/modules/report.py
--- a/shell/modules/report.py
+++ b/shell/modules/report.py
@@ -72,8 +72,15 @@ def seek_to_edge(f, ts, to_end):
Linear search, but should be short.
'''
if not to_end:
+ beg = 0
while ts == get_timestamp(f):
- f.seek(-1000, 1) # go back 10 or so lines
+ if f.tell() < 1000:
+ f.seek(0) # otherwise, the seek below throws an exception
+ if beg > 0: # avoid infinite loop
+ return # goes all the way to the top
+ beg += 1
+ else:
+ f.seek(-1000, 1) # go back 10 or so lines
while True:
pos = f.tell()
s = f.readline()
@@ -86,8 +93,8 @@ def seek_to_edge(f, ts, to_end):
def log_seek(f, ts, to_end = False):
'''
f is an open log. Do binary search for the timestamp.
- Return the position of the (more or less) first line with a
- newer time.
+ Return the position of the (more or less) first line with an
+ earlier (or later) time.
'''
first = 0
f.seek(0,2)

View File

@ -0,0 +1,298 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1314632951 -7200
# Node ID ccd0c1e1edf9f23cafb4363014acba755f1b4e25
# Parent d21f988a419c0c7fa349c4e26f6b500944d91370
Medium: Shell: several history improvements
- add more patterns for fencing
- handle better PE files number reaching limit
diff --git a/doc/crm.8.txt b/doc/crm.8.txt
--- a/doc/crm.8.txt
+++ b/doc/crm.8.txt
@@ -2426,7 +2426,8 @@ Example:
The `latest` command shows a bit of recent history, more
precisely whatever happened since the last cluster change (the
-latest transition).
+latest transition). If the transition is running, the shell will
+first wait until it finishes.
Usage:
...............
@@ -2540,10 +2541,13 @@ Example:
setnodes node_a node_b
...............
-[[cmdhelp_history_resource,resource failed actions]]
+[[cmdhelp_history_resource,resource events]]
==== `resource`
-Show status changes and any failures that happened on a resource.
+Show actions and any failures that happened on all specified
+resources on all nodes. Normally, one gives resource names as
+arguments, but it is also possible to use extended regular
+expressions.
Usage:
...............
@@ -2551,14 +2555,17 @@ Usage:
...............
Example:
...............
- resource mydb
+ resource bigdb public_ip
+ resource bigdb:0
+ resource bigdb:.
...............
[[cmdhelp_history_node,node events]]
==== `node`
Show important events that happened on a node. Important events
-are node lost and join, standby and online, and fence.
+are node lost and join, standby and online, and fence. Use either
+node names or extended regular expressions.
Usage:
...............
@@ -2572,7 +2579,17 @@ Example:
[[cmdhelp_history_log,log content]]
==== `log`
-Show logs for a node or combined logs of all nodes.
+Show messages logged on one or more nodes. Leaving out a node
+name produces combined logs of all nodes. Messages are sorted by
+time and, if the terminal emulations supports it, displayed in
+different colours depending on the node to allow for easier
+reading.
+
+The sorting key is the timestamp as written by syslog which
+normally has the maximum resolution of one second. Obviously,
+messages generated by events which share the same timestamp may
+not be sorted in the same way as they happened. Such close events
+may actually happen fairly often.
Usage:
...............
@@ -2634,8 +2651,8 @@ the transition are printed.
Usage:
...............
- transition [<number>|<file>] [nograph] [v...] [scores] [actions] [utilization]
- transition showdot [<number>|<file>]
+ transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+ transition showdot [<number>|<index>|<file>]
...............
Examples:
...............
diff --git a/shell/modules/log_patterns.py b/shell/modules/log_patterns.py
--- a/shell/modules/log_patterns.py
+++ b/shell/modules/log_patterns.py
@@ -12,34 +12,41 @@
# detail level 0 is the lowest, i.e. should match the least
# number of relevant messages
-# NB: If you modify this file, you must follow python syntax!
+# NB:
+# %% stands for whatever user input we get, for instance a
+# resource name or node name or just some regular expression
+# in optimal case, it should be surrounded by literals
+#
+# [Note that resources may contain clone numbers!]
log_patterns = {
"resource": (
( # detail 0
- "lrmd:.*rsc:%%.*(start|stop|promote|demote|migrate)",
- "lrmd:.*RA output:.*%%.*stderr",
- "lrmd:.*WARN:.*Managed.*%%.*exited",
+ "lrmd:.*rsc:%% (start|stop|promote|demote|migrate)",
+ "lrmd:.*RA output: .%%:.*:stderr",
+ "lrmd:.*WARN: Managed %%:.*exited",
),
( # detail 1
- "lrmd:.*rsc:%%.*(probe|notify)",
- "lrmd:.*info:.*Managed.*%%.*exited",
+ "lrmd:.*rsc:%%:.*(probe|notify)",
+ "lrmd:.*info: Managed %%:.*exited",
),
),
"node": (
( # detail 0
- "%%.*Corosync.Cluster.Engine",
- "%%.*Executive.Service.RELEASE",
- "%%.*crm_shutdown:.Requesting.shutdown",
- "%%.*pcmk_shutdown:.Shutdown.complete",
- "%%.*Configuration.validated..Starting.heartbeat",
- "pengine.*Scheduling Node %%",
- "te_fence_node.*Exec.*%%",
- "stonith-ng.*log_oper.*reboot.*%%",
- "stonithd.*to STONITH.*%%",
- "stonithd.*fenced node %%",
- "pcmk_peer_update.*(lost|memb): %%",
- "crmd.*ccm_event.*(NEW|LOST) %%",
+ " %% .*Corosync.Cluster.Engine",
+ " %% .*Executive.Service.RELEASE",
+ " %% .*crm_shutdown:.Requesting.shutdown",
+ " %% .*pcmk_shutdown:.Shutdown.complete",
+ " %% .*Configuration.validated..Starting.heartbeat",
+ "pengine.*Scheduling Node %% for STONITH",
+ "crmd.* tengine_stonith_callback: .* of %% failed",
+ "stonith-ng.*log_operation:.*host '%%'",
+ "te_fence_node: Exec.*on %% ",
+ "pe_fence_node: Node %% will be fenced",
+ "stonith-ng.*remote_op_timeout:.*for %% timed",
+ "stonithd.*Succeeded.*node %%:",
+ "pcmk_peer_update.*(lost|memb): %% ",
+ "crmd.*ccm_event.*(NEW|LOST):.* %% ",
),
( # detail 1
),
diff --git a/shell/modules/report.py b/shell/modules/report.py
--- a/shell/modules/report.py
+++ b/shell/modules/report.py
@@ -589,7 +589,7 @@ class Report(Singleton):
except IOError,msg:
common_err("open %s: %s"%(fl[0],msg))
continue
- pe_l = self.get_transitions([x for x in f], keep_pe_path = True)
+ pe_l = self.list_transitions([x for x in f], future_pe = True)
if pe_l:
l.append([node,pe_l])
return l
@@ -752,12 +752,13 @@ class Report(Singleton):
for n in self.cibnode_l:
self.nodecolor[n] = self.nodecolors[i]
i = (i+1) % len(self.nodecolors)
- def get_transitions(self, msg_l = None, keep_pe_path = False):
+ def list_transitions(self, msg_l = None, future_pe = False):
'''
- Get a list of transitions.
+ List transitions by reading logs.
Empty transitions are skipped.
- Some callers need original PE file path (keep_pe_path),
- otherwise we produce the path within the report.
+ Some callers need original PE file path (future_pe),
+ otherwise we produce the path within the report and check
+ if the transition files exist.
If the caller doesn't provide the message list, then we
build it from the collected log files (self.logobj).
Otherwise, we get matches for transition patterns.
@@ -786,11 +787,18 @@ class Report(Singleton):
continue
elif num_actions == -1: # couldn't find messages
common_warn("could not find number of actions for transition (%s)" % pe_base)
- common_debug("found PE input at %s: %s" % (node, pe_file))
- if keep_pe_path:
- pe_l.append(pe_file)
+ if not future_pe:
+ pe_l_file = os.path.join(self.loc, node, "pengine", pe_base)
+ if not os.path.isfile(pe_l_file):
+ warn_once("%s in the logs, but not in the report" % pe_l_file)
+ continue
else:
- pe_l.append(os.path.join(self.loc, node, "pengine", pe_base))
+ pe_l_file = "%s:%s" % (node, pe_file)
+ if pe_l_file in pe_l:
+ common_warn("duplicate %s, replacing older PE file" % pe_l_file)
+ pe_l.remove(pe_l_file)
+ common_debug("found PE input: %s" % pe_l_file)
+ pe_l.append(pe_l_file)
return pe_l
def report_setup(self):
if not self.loc:
@@ -802,11 +810,7 @@ class Report(Singleton):
self.set_node_colors()
self.logobj = LogSyslog(self.central_log, self.log_l, \
self.from_dt, self.to_dt)
- self.peinputs_l = self.get_transitions()
- for pe_input in self.peinputs_l:
- if not os.path.isfile(pe_input):
- warn_once("%s in the logs, but not in the report" % pe_input)
- self.peinputs_l.remove(pe_input)
+ self.peinputs_l = self.list_transitions()
def prepare_source(self):
'''
Unpack a hb_report tarball.
@@ -859,7 +863,7 @@ class Report(Singleton):
if not args:
re_l = mk_re_list(patt_l,"")
else:
- re_l = mk_re_list(patt_l,r'(%s)\W' % "|".join(args))
+ re_l = mk_re_list(patt_l,r'(%s)' % "|".join(args))
return re_l
def disp(self, s):
'color output'
@@ -886,11 +890,6 @@ class Report(Singleton):
self.error("no logs found")
return
self.display_logs(self.logobj.get_matches(re_l, log_l))
- def match_args(self, cib_l, args):
- for a in args:
- a_clone = re.sub(r':.*', '', a)
- if not (a in cib_l) and not (a_clone in cib_l):
- self.warn("%s not found in report, proceeding anyway" % a)
def get_desc_line(self,fld):
try:
f = open(self.desc)
@@ -923,8 +922,9 @@ class Report(Singleton):
'''
Show all events.
'''
- all_re_l = self.build_re("resource",self.cibrsc_l) + \
- self.build_re("node",self.cibnode_l)
+ all_re_l = self.build_re("resource", self.cibrsc_l) + \
+ self.build_re("node", self.cibnode_l) + \
+ self.build_re("events", [])
if not all_re_l:
self.error("no resources or nodes found")
return False
@@ -940,6 +940,7 @@ class Report(Singleton):
te_invoke_patt = transition_patt[0].replace("%%", pe_num)
run_patt = transition_patt[1].replace("%%", pe_num)
r = None
+ msg_l.reverse()
for msg in msg_l:
r = re.search(te_invoke_patt, msg)
if r:
@@ -1009,7 +1010,6 @@ class Report(Singleton):
expanded_l += self.cibgrp_d[a]
else:
expanded_l.append(a)
- self.match_args(self.cibrsc_l,expanded_l)
rsc_re_l = self.build_re("resource",expanded_l)
if not rsc_re_l:
return False
@@ -1020,7 +1020,6 @@ class Report(Singleton):
'''
if not self.prepare_source():
return False
- self.match_args(self.cibnode_l,args)
node_re_l = self.build_re("node",args)
if not node_re_l:
return False
diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in
--- a/shell/modules/ui.py.in
+++ b/shell/modules/ui.py.in
@@ -1877,16 +1877,16 @@ Examine Pacemaker's history: node and re
def _get_pe_byidx(self, idx):
l = crm_report.pelist()
if len(l) < abs(idx):
- common_err("pe input file for index %d not found" % (idx+1))
+ common_err("PE input file for index %d not found" % (idx+1))
return None
return l[idx]
def _get_pe_bynum(self, n):
l = crm_report.pelist([n])
if len(l) == 0:
- common_err("%s: PE file %d not found" % n)
+ common_err("PE file %d not found" % n)
return None
elif len(l) > 1:
- common_err("%s: PE file %d ambiguous" % n)
+ common_err("PE file %d ambiguous" % n)
return None
return l[0]
def transition(self,cmd,*args):

View File

@ -0,0 +1,20 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1312579593 -7200
# Node ID d0359dca5dba3fd6fee856d51cca5ee7ac752ee6
# Parent a7acb683b3568ca81d90472f770b0270270d5dfd
Low: Shell: relax host key checking in pssh
diff -r a7acb683b356 -r d0359dca5dba shell/modules/crm_pssh.py
--- a/shell/modules/crm_pssh.py Fri Aug 05 23:13:37 2011 +0200
+++ b/shell/modules/crm_pssh.py Fri Aug 05 23:26:33 2011 +0200
@@ -84,7 +84,8 @@ def do_pssh(l, opts):
hosts = []
for host, cmdline in l:
cmd = ['ssh', host, '-o', 'PasswordAuthentication=no',
- '-o', 'SendEnv=PSSH_NODENUM']
+ '-o', 'SendEnv=PSSH_NODENUM',
+ '-o', 'StrictHostKeyChecking=no']
if opts.options:
for opt in opts.options:
cmd += ['-o', opt]

View File

@ -0,0 +1,19 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1312580731 -7200
# Node ID 29fd4f04c01f92e54026d9d6bb54d617d8b1fdcd
# Parent d0359dca5dba3fd6fee856d51cca5ee7ac752ee6
Low: Shell: enforce remote report directory removal for history
diff -r d0359dca5dba -r 29fd4f04c01f shell/modules/report.py
--- a/shell/modules/report.py Fri Aug 05 23:26:33 2011 +0200
+++ b/shell/modules/report.py Fri Aug 05 23:45:31 2011 +0200
@@ -595,7 +595,7 @@ class Report(Singleton):
if ext_cmd_nosudo("mkdir -p %s" % os.path.dirname(d)) != 0:
return None
common_info("retrieving information from cluster nodes, please wait ...")
- rc = ext_cmd_nosudo("hb_report -f '%s' %s %s %s" %
+ rc = ext_cmd_nosudo("hb_report -Z -f '%s' %s %s %s" %
(self.from_dt.ctime(), to_option, nodes_option, d))
if rc != 0:
if os.path.isfile(tarball):

View File

@ -0,0 +1,558 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1312993121 -7200
# Node ID b3a014c0f85b2bbe1e6a2360c44fbbfc7ac27b73
# Parent a09974a06cdf6a3d73c3cdfa6e4d89d41e2ca9f0
Medium: Shell: improve peinputs and transition interface (bnc#710655,711060)
- allow specifying PE files as relative paths in order to
disambiguate between PE inputs with the same number
- remove peinputs "get" and "list" subcommands, just use 'v' for the
long listing
- remove transition "show" subcommand, if there is no subcommand
it is assumed that the user wants to do "show"
- detect (and ignore) empty transitions
- update completion tables
diff --git a/doc/crm.8.txt b/doc/crm.8.txt
--- a/doc/crm.8.txt
+++ b/doc/crm.8.txt
@@ -2560,55 +2560,62 @@ Example:
Every event in the cluster results in generating one or more
Policy Engine (PE) files. These files describe future motions of
-resources. The files are listed along with the node where they
-were created (the DC at the time). The `get` subcommand will copy
-all PE input files to the current working directory (and use ssh
-if necessary).
+resources. The files are listed as full paths in the current
+report directory. Add `v` to also see the creation time stamps.
Usage:
...............
- peinputs list [{<range>|<number>} ...]
- peinputs get [{<range>|<number>} ...]
+ peinputs [{<range>|<number>} ...] [v]
range :: <n1>:<n2>
...............
Example:
...............
- peinputs get 440:444 446
+ peinputs
+ peinputs 440:444 446
+ peinputs v
...............
[[cmdhelp_history_transition,show transition]]
==== `transition`
-The `show` subcommand will print actions planned by the PE and
-run graphviz (`dotty`) to display a graphical representation. Of
-course, for the latter an X11 session is required. This command
-invokes `ptest(8)` in background.
+This command will print actions planned by the PE and run
+graphviz (`dotty`) to display a graphical representation of the
+transition. Of course, for the latter an X11 session is required.
+This command invokes `ptest(8)` in background.
The `showdot` subcommand runs graphviz (`dotty`) to display a
graphical representation of the `.dot` file which has been
included in the report. Essentially, it shows the calculation
produced by `pengine` which is installed on the node where the
-report was produced.
+report was produced. In optimal case this output should not
+differ from the one produced by the locally installed `pengine`.
If the PE input file number is not provided, it defaults to the
last one, i.e. the last transition. If the number is negative,
then the corresponding transition relative to the last one is
chosen.
+If there are warning and error PE input files or different nodes
+were the DC in the observed timeframe, it may happen that PE
+input file numbers collide. In that case provide some unique part
+of the path to the file.
+
After the `ptest` output, logs about events that happened during
the transition are printed.
Usage:
...............
- transition show [<number>] [nograph] [v...] [scores] [actions] [utilization]
- transition showdot [<number>]
+ transition [<number>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+ transition showdot [<number>|<file>]
...............
Examples:
...............
- transition show
- transition show 444
- transition show -1
+ transition
+ transition 444
+ transition -1
+ transition pe-error-3.bz2
+ transition node-a/pengine/pe-input-2.bz2
transition showdot 444
...............
diff --git a/shell/modules/completion.py b/shell/modules/completion.py
--- a/shell/modules/completion.py
+++ b/shell/modules/completion.py
@@ -165,14 +165,14 @@ def report_node_list(idx,delimiter = Fal
if delimiter:
return ' '
return crm_report.node_list()
-def report_pe_cmd_list(idx,delimiter = False):
+def report_pe_list_transition(idx,delimiter = False):
if delimiter:
return ' '
- return ["list","get","show","showdot"]
-def report_pe_list(idx,delimiter = False):
+ return crm_report.peinputs_list() + ["showdot"]
+def report_pe_list_peinputs(idx,delimiter = False):
if delimiter:
return ' '
- return crm_report.peinputs_list()
+ return crm_report.peinputs_list() + ["v"]
#
# completion for primitives including help for parameters
@@ -484,7 +484,8 @@ completer_lists = {
"resource" : (report_rsc_list,loop),
"node" : (report_node_list,loop),
"log" : (report_node_list,loop),
- "peinputs" : (report_pe_cmd_list,report_pe_list,loop),
+ "peinputs" : (report_pe_list_peinputs,loop),
+ "transition" : (report_pe_list_transition,),
},
}
def get_completer_list(level,cmd):
diff --git a/shell/modules/crm_pssh.py b/shell/modules/crm_pssh.py
--- a/shell/modules/crm_pssh.py
+++ b/shell/modules/crm_pssh.py
@@ -156,6 +156,9 @@ def next_peinputs(node_pe_l, outdir, err
myopts = ["-q", "-o", outdir, "-e", errdir]
opts, args = parse_args(myopts)
l.append([node, cmdline])
+ if not l:
+ # is this a failure?
+ return True
return do_pssh(l, opts)
# vim:ts=4:sw=4:et:
diff --git a/shell/modules/log_patterns.py b/shell/modules/log_patterns.py
--- a/shell/modules/log_patterns.py
+++ b/shell/modules/log_patterns.py
@@ -62,8 +62,3 @@ log_patterns = {
),
),
}
-
-transition_patt = (
- "crmd: .* Processing graph.*derived from .*/pe-[^-]+-(%%)[.]bz2", # transition start
- "crmd: .* Transition.*Source=.*/pe-[^-]+-(%%)[.]bz2.: (Stopped|Complete|Terminated)", # and stop
-)
diff --git a/shell/modules/report.py b/shell/modules/report.py
--- a/shell/modules/report.py
+++ b/shell/modules/report.py
@@ -31,7 +31,7 @@ from term import TerminalController
from xmlutil import *
from utils import *
from msg import *
-from log_patterns import log_patterns, transition_patt
+from log_patterns import log_patterns
_NO_PSSH = False
try:
from crm_pssh import next_loglines, next_peinputs
@@ -297,8 +297,8 @@ def human_date(dt):
def is_log(p):
return os.path.isfile(p) and os.path.getsize(p) > 0
-def pe_file_in_range(pe_f, a, ext):
- r = re.search("pe-[^-]+-([0-9]+)[.]%s$" % ext, pe_f)
+def pe_file_in_range(pe_f, a):
+ r = re.search("pe-[^-]+-([0-9]+)[.]bz2$", pe_f)
if not r:
return None
if not a or (a[0] <= int(r.group(1)) <= a[1]):
@@ -325,6 +325,17 @@ def update_loginfo(rptlog, logfile, oldp
except IOError, msg:
common_err("couldn't the update %s.info: %s" % (rptlog, msg))
+# r.group(1) transition number (a different thing from file number)
+# r.group(2) contains full path
+# r.group(3) file number
+transition_patt = (
+ "crmd: .* do_te_invoke: Processing graph ([0-9]+) .*derived from (.*/pe-[^-]+-(%%)[.]bz2)", # transition start
+ "crmd: .* run_graph: Transition ([0-9]+).*Source=(.*/pe-[^-]+-(%%)[.]bz2).: (Stopped|Complete|Terminated)", # and stop
+# r.group(1) transition number
+# r.group(2) number of actions
+ "crmd: .* unpack_graph: Unpacked transition (%%): ([0-9]+) actions", # number of actions
+)
+
class Report(Singleton):
'''
A hb_report class.
@@ -346,6 +357,7 @@ class Report(Singleton):
self.desc = None
self.log_l = []
self.central_log = None
+ self.peinputs_l = []
self.cibgrp_d = {}
self.cibrsc_l = []
self.cibnode_l = []
@@ -363,7 +375,7 @@ class Report(Singleton):
return self.cibnode_l
def peinputs_list(self):
return [re.search("pe-[^-]+-([0-9]+)[.]bz2$", x).group(1)
- for x in self._file_list("bz2")]
+ for x in self.peinputs_l]
def unpack_report(self, tarball):
'''
Unpack hb_report tarball.
@@ -495,28 +507,26 @@ class Report(Singleton):
continue
u_dir = os.path.join(self.loc, node)
rc = ext_cmd_nosudo("tar -C %s -x < %s" % (u_dir,fl[0]))
- def find_new_peinputs(self, a):
+ def find_new_peinputs(self, node_l):
'''
- Get a list of pe inputs appearing in logs.
+ Get a list of pe inputs appearing in new logs.
+ The log is put in self.outdir/node by pssh.
'''
if not os.path.isdir(self.outdir):
return []
l = []
- trans_re_l = [x.replace("%%","") for x in transition_patt]
- for node,rptlog,logfile,nextpos in a:
- node_l = []
+ for node in node_l:
fl = glob.glob("%s/*%s*" % (self.outdir,node))
if not fl:
continue
- for s in file2list(fl[0]):
- r = re.search(trans_re_l[0], s)
- if not r:
- continue
- node_l.append(r.group(1))
- if node_l:
- common_debug("found new PE inputs %s at %s" %
- ([os.path.basename(x) for x in node_l], node))
- l.append([node,node_l])
+ try:
+ f = open(fl[0])
+ except IOError,msg:
+ common_err("open %s: %s"%(fl[0],msg))
+ continue
+ pe_l = self.get_transitions([x for x in f], keep_pe_path = True)
+ if pe_l:
+ l.append([node,pe_l])
return l
def update_live(self):
'''
@@ -544,7 +554,7 @@ class Report(Singleton):
rmdir_r(self.errdir)
rc1 = next_loglines(a, self.outdir, self.errdir)
self.append_newlogs(a)
- pe_l = self.find_new_peinputs(a)
+ pe_l = self.find_new_peinputs([x[0] for x in a])
rmdir_r(self.outdir)
rmdir_r(self.errdir)
rc2 = True
@@ -677,6 +687,55 @@ class Report(Singleton):
for n in self.cibnode_l:
self.nodecolor[n] = self.nodecolors[i]
i = (i+1) % len(self.nodecolors)
+ def get_transitions(self, msg_l = None, keep_pe_path = False):
+ '''
+ Get a list of transitions.
+ Empty transitions are skipped.
+ We use the unpack_graph message to see the number of
+ actions.
+ Some callers need original PE file path (keep_pe_path),
+ otherwise we produce the path within the report.
+ If the caller doesn't provide the message list, then we
+ build it from the collected log files (self.logobj).
+ Otherwise, we get matches for transition patterns.
+ '''
+ trans_re_l = [x.replace("%%", "[0-9]+") for x in transition_patt]
+ if not msg_l:
+ msg_l = self.logobj.get_matches(trans_re_l)
+ else:
+ re_s = '|'.join(trans_re_l)
+ msg_l = [x for x in msg_l if re.search(re_s, x)]
+ pe_l = []
+ for msg in msg_l:
+ msg_a = msg.split()
+ if len(msg_a) < 8:
+ # this looks too short
+ common_warn("log message <%s> unexpected format, please report a bug" % msg)
+ continue
+ if msg_a[7] in ("unpack_graph:","run_graph:"):
+ continue # we want another message
+ node = msg_a[3]
+ pe_file = msg_a[-1]
+ pe_base = os.path.basename(pe_file)
+ # check if there were any actions in this transition
+ r = re.search(trans_re_l[0], msg)
+ trans_num = r.group(1)
+ unpack_patt = transition_patt[2].replace("%%", trans_num)
+ num_actions = 0
+ for t in msg_l:
+ try:
+ num_actions = int(re.search(unpack_patt, t).group(2))
+ break
+ except: pass
+ if num_actions == 0: # empty transition
+ common_debug("skipping empty transition %s (%s)" % (trans_num, pe_base))
+ continue
+ common_debug("found PE input at %s: %s" % (node, pe_file))
+ if keep_pe_path:
+ pe_l.append(pe_file)
+ else:
+ pe_l.append(os.path.join(self.loc, node, "pengine", pe_base))
+ return pe_l
def report_setup(self):
if not self.loc:
return
@@ -687,6 +746,11 @@ class Report(Singleton):
self.set_node_colors()
self.logobj = LogSyslog(self.central_log, self.log_l, \
self.from_dt, self.to_dt)
+ self.peinputs_l = self.get_transitions()
+ for pe_input in self.peinputs_l:
+ if not os.path.isfile(pe_input):
+ warn_once("%s in the logs, but not in the report" % pe_input)
+ self.peinputs_l.remove(pe_input)
def prepare_source(self):
'''
Unpack a hb_report tarball.
@@ -821,16 +885,16 @@ class Report(Singleton):
Search for events within the given transition.
'''
pe_base = os.path.basename(pe_file)
- r = re.search("pe-[^-]+-([0-9]+)[.]bz2", pe_base)
+ r = re.search("pe-[^-]+-([0-9]+)[.]", pe_base)
pe_num = r.group(1)
trans_re_l = [x.replace("%%",pe_num) for x in transition_patt]
trans_start = self.logobj.search_logs(self.log_l, trans_re_l[0])
trans_end = self.logobj.search_logs(self.log_l, trans_re_l[1])
if not trans_start:
- common_warn("transition %s start not found in logs" % pe_base)
+ common_warn("start of transition %s not found in logs" % pe_base)
return False
if not trans_end:
- common_warn("transition %s end not found in logs" % pe_base)
+ common_warn("end of transition %s not found in logs (transition not complete yet?)" % pe_base)
return False
common_debug("transition start: %s" % trans_start[0])
common_debug("transition end: %s" % trans_end[0])
@@ -891,23 +955,23 @@ class Report(Singleton):
if not l:
return False
self.show_logs(log_l = l)
- def _file_list(self, ext, a = []):
- '''
- Return list of PE (or dot) files (abs paths) sorted by
- mtime.
- Input is a number or a pair of numbers representing
- range. Otherwise, all matching files are returned.
- '''
+ def pelist(self, a = []):
if not self.prepare_source():
return []
- if not isinstance(a,(tuple,list)) and a is not None:
+ if isinstance(a,(tuple,list)):
+ if len(a) == 1:
+ a.append(a[0])
+ elif a is not None:
a = [a,a]
- return sort_by_mtime([x for x in dirwalk(self.loc) \
- if pe_file_in_range(x,a,ext)])
- def pelist(self, a = []):
- return self._file_list("bz2", a)
+ return [x for x in self.peinputs_l \
+ if pe_file_in_range(x, a)]
def dotlist(self, a = []):
- return self._file_list("dot", a)
+ l = [x.replace("bz2","dot") for x in self.pelist(a)]
+ return [x for x in l if os.path.isfile(x)]
+ def find_pe_files(self, path):
+ 'Find a PE or dot file matching part of the path.'
+ pe_l = path.endswith(".dot") and self.dotlist() or self.pelist()
+ return [x for x in pe_l if x.endswith(path)]
def find_file(self, f):
return file_find_by_name(self.loc, f)
diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in
--- a/shell/modules/ui.py.in
+++ b/shell/modules/ui.py.in
@@ -1686,8 +1686,8 @@ Examine Pacemaker's history: node and re
self.cmd_table["resource"] = (self.resource,(1,),1,0)
self.cmd_table["node"] = (self.node,(1,),1,1)
self.cmd_table["log"] = (self.log,(0,),1,0)
- self.cmd_table["peinputs"] = (self.peinputs,(1,),1,0)
- self.cmd_table["transition"] = (self.transition,(1,),1,0)
+ self.cmd_table["peinputs"] = (self.peinputs,(0,),1,0)
+ self.cmd_table["transition"] = (self.transition,(0,),1,0)
self._set_source(options.history)
def _no_source(self):
common_error("we have no source set yet! please use the source command")
@@ -1831,64 +1831,83 @@ Examine Pacemaker's history: node and re
s = bz2.decompress(''.join(f))
f.close()
return run_ptest(s, nograph, scores, utilization, actions, verbosity)
- def peinputs(self,cmd,subcmd,*args):
- """usage: peinputs list [{<range>|<number>} ...]
- peinputs get [{<range>|<number>} ...]"""
- if subcmd not in ("get","list"):
- bad_usage(cmd,subcmd)
- return False
- if args:
+ def peinputs(self,cmd,*args):
+ """usage: peinputs [{<range>|<number>} ...] [v]"""
+ argl = list(args)
+ long = "v" in argl
+ if long:
+ argl.remove("v")
+ if argl:
l = []
- for s in args:
+ for s in argl:
a = convert2ints(s.split(':'))
- if len(a) == 2 and not check_range(a):
+ if a and len(a) == 2 and not check_range(a):
common_err("%s: invalid peinputs range" % a)
return False
l += crm_report.pelist(a)
else:
l = crm_report.pelist()
if not l: return False
- if subcmd == "list":
- s = get_stdout("ls -lrt %s" % ' '.join(l))
- page_string(s)
+ if long:
+ s = get_stdout("for f in %s; do ls -l $f; done" % ' '.join(l))
else:
- print '\n'.join(l)
- def transition(self,cmd,subcmd,*args):
- """usage: transition show [<number>] [nograph] [v...] [scores] [actions] [utilization]
- transition showdot [<number>]"""
- if subcmd not in ("show", "showdot"):
- bad_usage(cmd,subcmd)
- return False
- try: n = convert2ints(args[0])
- except: n = None
- startarg = 1
- if n is None:
- idx = -1
- startarg = 0 # peinput number missing
- elif n <= 0:
- idx = n - 1
- n = [] # to get all peinputs
- else:
- idx = 0
- if subcmd == "showdot":
+ s = '\n'.join(l)
+ page_string(s)
+ def transition(self,cmd,*args):
+ """usage: transition [<number>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+ transition showdot [<number>|<file>]"""
+ argl = list(args)
+ subcmd = "show"
+ if argl and argl[0] == "showdot":
if not user_prefs.dotty:
common_err("install graphviz to draw transition graphs")
return False
- l = crm_report.dotlist(n)
+ subcmd = "showdot"
+ argl.remove(subcmd)
+ f = None
+ startarg = 1
+ if argl and re.search('pe-', argl[0]):
+ l = crm_report.find_pe_files(argl[0])
+ if len(l) == 0:
+ common_err("%s: path not found" % argl[0])
+ return False
+ elif len(l) > 1:
+ common_err("%s: path ambiguous" % argl[0])
+ return False
+ f = l[0]
else:
- l = crm_report.pelist(n)
- if len(l) < abs(idx):
- common_err("pe input or dot file not found")
+ try: n = convert2ints(argl[0])
+ except: n = None
+ if n is None:
+ idx = -1
+ startarg = 0 # peinput number missing
+ elif n <= 0:
+ idx = n - 1
+ n = [] # to get all peinputs
+ else:
+ idx = 0
+ if subcmd == "showdot":
+ l = crm_report.dotlist(n)
+ else:
+ l = crm_report.pelist(n)
+ if len(l) < abs(idx):
+ if subcmd == "show":
+ common_err("pe input file not found")
+ else:
+ common_err("dot file not found")
+ return False
+ f = l[idx]
+ if not f:
return False
rc = True
if subcmd == "show":
- self.pe_file = l[idx]
+ self.pe_file = f # self.pe_file needed by self.ptest
rc = ptestlike(self.ptest,'vv',"%s %s" % \
- (cmd, subcmd), *args[startarg:])
- if rc:
- crm_report.show_transition_log(self.pe_file)
+ (cmd, subcmd), *argl[startarg:])
else:
- show_dot_graph(l[idx])
+ show_dot_graph(f.replace("bz2","dot"))
+ if rc:
+ crm_report.show_transition_log(f)
return rc
class TopLevel(UserInterface):
diff --git a/shell/modules/utils.py b/shell/modules/utils.py
--- a/shell/modules/utils.py
+++ b/shell/modules/utils.py
@@ -392,7 +392,7 @@ def run_ptest(graph_s, nograph, scores,
Pipe graph_s thru ptest(8). Show graph using dotty if requested.
'''
actions_filter = "grep LogActions: | grep -vw Leave"
- ptest = "ptest -X"
+ ptest = "2>&1 ptest -X"
if verbosity:
if actions:
verbosity = 'v' * max(3,len(verbosity))
@@ -408,7 +408,8 @@ def run_ptest(graph_s, nograph, scores,
dotfile = None
# ptest prints to stderr
if actions:
- ptest = "%s 2>&1 | %s" % (ptest, actions_filter)
+ ptest = "%s | %s" % (ptest, actions_filter)
+ common_debug("invoke: %s" % ptest)
print get_stdout(ptest, input_s = graph_s)
#page_string(get_stdout(ptest, input_s = graph_s))
if dotfile:
@@ -443,7 +444,7 @@ def check_range(a):
return False
if not isinstance(a[0],int) or not isinstance(a[1],int):
return False
- return (int(a[0]) < int(a[1]))
+ return (int(a[0]) <= int(a[1]))
def sort_by_mtime(l):
'Sort a (small) list of files by time mod.'

View File

@ -0,0 +1,23 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1312838871 -7200
# Node ID a09974a06cdf6a3d73c3cdfa6e4d89d41e2ca9f0
# Parent 29fd4f04c01f92e54026d9d6bb54d617d8b1fdcd
Low: Shell: avoid DeprecationWarning for BaseException.message
diff --git a/shell/modules/utils.py b/shell/modules/utils.py
--- a/shell/modules/utils.py
+++ b/shell/modules/utils.py
@@ -257,9 +257,9 @@ def acquire_lock(dir):
os.mkdir(os.path.join(dir,_LOCKDIR))
str2file("%d" % os.getpid(),os.path.join(dir,_LOCKDIR,_PIDF))
return True
- except OSError, e:
- if e.errno != os.errno.EEXIST:
- common_err("%s" % e.message)
+ except OSError as (errno, strerror):
+ if errno != os.errno.EEXIST:
+ common_err(strerror)
return False
time.sleep(0.1)
continue

View File

@ -0,0 +1,90 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1313019300 -7200
# Node ID c3068d22de72d1ba616d43c808091bef830eb9f6
# Parent b3a014c0f85b2bbe1e6a2360c44fbbfc7ac27b73
Medium: Shell: improve capture log slices for transitions (bnc#710907)
diff --git a/shell/modules/report.py b/shell/modules/report.py
--- a/shell/modules/report.py
+++ b/shell/modules/report.py
@@ -65,7 +65,25 @@ def syslog_ts(s):
common_warn("malformed line: %s" % s)
return None
-def log_seek(f, ts, endpos = False):
+def seek_to_edge(f, ts, to_end):
+ '''
+ f contains lines with exactly the timestamp ts.
+ Read forward (or backward) till we find the edge.
+ Linear search, but should be short.
+ '''
+ if not to_end:
+ while ts == get_timestamp(f):
+ f.seek(-1000, 1) # go back 10 or so lines
+ while True:
+ pos = f.tell()
+ s = f.readline()
+ curr_ts = syslog_ts(s)
+ if (to_end and curr_ts > ts) or \
+ (not to_end and curr_ts >= ts):
+ break
+ f.seek(pos)
+
+def log_seek(f, ts, to_end = False):
'''
f is an open log. Do binary search for the timestamp.
Return the position of the (more or less) first line with a
@@ -75,10 +93,11 @@ def log_seek(f, ts, endpos = False):
f.seek(0,2)
last = f.tell()
if not ts:
- return endpos and last or first
+ return to_end and last or first
badline = 0
maxbadline = 10
- common_debug("seek ts %s" % time.ctime(ts))
+ common_debug("seek %s:%s in %s" %
+ (time.ctime(ts), to_end and "end" or "start", f.name))
while first <= last:
# we can skip some iterations if it's about few lines
if abs(first-last) < 120:
@@ -98,9 +117,12 @@ def log_seek(f, ts, endpos = False):
elif log_ts < ts:
first = mid+1
else:
+ seek_to_edge(f, log_ts, to_end)
break
- common_debug("sought to %s" % time.ctime(log_ts))
- return f.tell()
+ fpos = f.tell()
+ common_debug("sought to %s (%d)" % (f.readline(), fpos))
+ f.seek(fpos)
+ return fpos
def get_timestamp(f):
'''
@@ -187,7 +209,7 @@ class LogSyslog(object):
for log in self.f:
f = self.f[log]
start = log_seek(f, self.from_ts)
- end = log_seek(f, self.to_ts, endpos = True)
+ end = log_seek(f, self.to_ts, to_end = True)
if start == -1 or end == -1:
bad_logs.append(log)
else:
diff --git a/shell/modules/utils.py b/shell/modules/utils.py
--- a/shell/modules/utils.py
+++ b/shell/modules/utils.py
@@ -413,7 +413,10 @@ def run_ptest(graph_s, nograph, scores,
print get_stdout(ptest, input_s = graph_s)
#page_string(get_stdout(ptest, input_s = graph_s))
if dotfile:
- show_dot_graph(dotfile)
+ if os.path.getsize(dotfile) > 0:
+ show_dot_graph(dotfile)
+ else:
+ common_warn("ptest produced empty dot file")
vars.tmpfiles.append(dotfile)
else:
if not nograph:

View File

@ -0,0 +1,245 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1313081065 -7200
# Node ID 441f4448eba6eda1a2cf44d3d63a0db9f8d56a20
# Parent c3068d22de72d1ba616d43c808091bef830eb9f6
Medium: Shell: reimplement the history latest command (bnc#710958)
This command is going to show logs for the latest transition.
Basically, it's the same as "history transition", but it will
wait for the current (if any) transition to finish.
(Also, the horrible transition command arg parsing has been
improved.)
diff --git a/shell/modules/report.py b/shell/modules/report.py
--- a/shell/modules/report.py
+++ b/shell/modules/report.py
@@ -467,8 +467,7 @@ class Report(Singleton):
def is_last_live_recent(self):
'''
Look at the last live hb_report. If it's recent enough,
- return True. Return True also if self.to_dt is not empty
- (not an open end report).
+ return True.
'''
try:
last_ts = os.stat(self.desc).st_mtime
@@ -800,6 +799,7 @@ class Report(Singleton):
if self.source != "live":
self.error("refresh not supported")
return False
+ self.last_live_update = 0
self.loc = self.manage_live_report(force)
self.report_setup()
self.ready = self.check_report()
@@ -884,18 +884,10 @@ class Report(Singleton):
print "Nodes:",' '.join(self.cibnode_l)
print "Groups:",' '.join(self.cibgrp_d.keys())
print "Resources:",' '.join(self.cibrsc_l)
- def latest(self):
- '''
- Get the very latest cluster events, basically from the
- latest transition.
- Some transitions may be empty though.
- '''
def events(self):
'''
Show all events.
'''
- if not self.prepare_source():
- return False
all_re_l = self.build_re("resource",self.cibrsc_l) + \
self.build_re("node",self.cibnode_l)
if not all_re_l:
@@ -906,6 +898,8 @@ class Report(Singleton):
'''
Search for events within the given transition.
'''
+ if not self.prepare_source():
+ return False
pe_base = os.path.basename(pe_file)
r = re.search("pe-[^-]+-([0-9]+)[.]", pe_base)
pe_num = r.group(1)
@@ -926,6 +920,9 @@ class Report(Singleton):
self.warn("strange, no timestamps found")
return False
# limit the log scope temporarily
+ common_info("logs for transition %s (%s-%s)" %
+ (pe_file.replace(self.loc+"/",""), \
+ shorttime(start_ts), shorttime(end_ts)))
self.logobj.set_log_timeframe(start_ts, end_ts)
self.events()
self.logobj.set_log_timeframe(self.from_dt, self.to_dt)
@@ -994,6 +991,11 @@ class Report(Singleton):
'Find a PE or dot file matching part of the path.'
pe_l = path.endswith(".dot") and self.dotlist() or self.pelist()
return [x for x in pe_l if x.endswith(path)]
+ def pe2dot(self, f):
+ f = f.replace("bz2","dot")
+ if os.path.isfile(f):
+ return f
+ return None
def find_file(self, f):
return file_find_by_name(self.loc, f)
diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in
--- a/shell/modules/ui.py.in
+++ b/shell/modules/ui.py.in
@@ -1796,22 +1796,15 @@ Examine Pacemaker's history: node and re
return crm_report.info()
def latest(self,cmd):
"usage: latest"
- try:
- prev_level = levels.previous().myname()
- except:
- prev_level = ''
- if prev_level != "cibconfig":
- common_err("%s is available only when invoked from configure" % cmd)
- return False
- ts = cib_factory.last_commit_at()
- if not ts:
- common_err("no last commit time found")
- return False
if not wait4dc("transition", not options.batch):
return False
- self._set_source("live", ts)
+ self._set_source("live")
crm_report.refresh_source()
- return crm_report.events()
+ f = self._get_pe_byidx(-1)
+ if not f:
+ common_err("no transitions found")
+ return False
+ crm_report.show_transition_log(f)
def resource(self,cmd,*args):
"usage: resource <rsc> [<rsc> ...]"
return crm_report.resource(*args)
@@ -1853,6 +1846,30 @@ Examine Pacemaker's history: node and re
else:
s = '\n'.join(l)
page_string(s)
+ def _get_pe_byname(self, s):
+ l = crm_report.find_pe_files(s)
+ if len(l) == 0:
+ common_err("%s: path not found" % s)
+ return None
+ elif len(l) > 1:
+ common_err("%s: path ambiguous" % s)
+ return None
+ return l[0]
+ def _get_pe_byidx(self, idx):
+ l = crm_report.pelist()
+ if len(l) < abs(idx):
+ common_err("pe input file not found")
+ return None
+ return l[idx]
+ def _get_pe_bynum(self, n):
+ l = crm_report.pelist([n])
+ if len(l) == 0:
+ common_err("%s: PE file %d not found" % n)
+ return None
+ elif len(l) > 1:
+ common_err("%s: PE file %d ambiguous" % n)
+ return None
+ return l[0]
def transition(self,cmd,*args):
"""usage: transition [<number>|<file>] [nograph] [v...] [scores] [actions] [utilization]
transition showdot [<number>|<file>]"""
@@ -1864,48 +1881,35 @@ Examine Pacemaker's history: node and re
return False
subcmd = "showdot"
argl.remove(subcmd)
- f = None
- startarg = 1
- if argl and re.search('pe-', argl[0]):
- l = crm_report.find_pe_files(argl[0])
- if len(l) == 0:
- common_err("%s: path not found" % argl[0])
- return False
- elif len(l) > 1:
- common_err("%s: path ambiguous" % argl[0])
- return False
- f = l[0]
+ if argl:
+ if re.search('pe-', argl[0]):
+ f = self._get_pe_byname(argl[0])
+ argl.pop(0)
+ elif is_int(argl[0]):
+ n = int(argl[0])
+ if n <= 0:
+ f = self._get_pe_byidx(n-1)
+ else:
+ f = self._get_pe_bynum(n)
+ argl.pop(0)
+ else:
+ f = self._get_pe_byidx(-1)
else:
- try: n = convert2ints(argl[0])
- except: n = None
- if n is None:
- idx = -1
- startarg = 0 # peinput number missing
- elif n <= 0:
- idx = n - 1
- n = [] # to get all peinputs
- else:
- idx = 0
- if subcmd == "showdot":
- l = crm_report.dotlist(n)
- else:
- l = crm_report.pelist(n)
- if len(l) < abs(idx):
- if subcmd == "show":
- common_err("pe input file not found")
- else:
- common_err("dot file not found")
- return False
- f = l[idx]
+ f = self._get_pe_byidx(-1)
if not f:
return False
rc = True
if subcmd == "show":
self.pe_file = f # self.pe_file needed by self.ptest
+ common_info("running ptest with %s" % f)
rc = ptestlike(self.ptest,'vv',"%s %s" % \
- (cmd, subcmd), *argl[startarg:])
+ (cmd, subcmd), *argl)
else:
- show_dot_graph(f.replace("bz2","dot"))
+ f = crm_report.pe2dot(f)
+ if not f:
+ common_err("dot file not found in the report")
+ return False
+ show_dot_graph(f)
if rc:
crm_report.show_transition_log(f)
return rc
diff --git a/shell/modules/utils.py b/shell/modules/utils.py
--- a/shell/modules/utils.py
+++ b/shell/modules/utils.py
@@ -449,6 +449,9 @@ def check_range(a):
return False
return (int(a[0]) <= int(a[1]))
+def shorttime(ts):
+ return time.strftime("%X",time.localtime(ts))
+
def sort_by_mtime(l):
'Sort a (small) list of files by time mod.'
l2 = [(os.stat(x).st_mtime, x) for x in l]
@@ -489,6 +492,13 @@ def convert2ints(l):
else: # it's a string then
return int(l)
except: return None
+def is_int(s):
+ 'Check if the string can be converted to an integer.'
+ try:
+ i = int(s)
+ return True
+ except:
+ return False
def is_process(s):
proc = subprocess.Popen("ps -e -o pid,command | grep -qs '%s'" % s, \

View File

@ -0,0 +1,207 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1313413824 -7200
# Node ID 3f3c348aaaed52383f6646b08899943aec8911f4
# Parent 441f4448eba6eda1a2cf44d3d63a0db9f8d56a20
Medium: Shell: relax transition acceptance
Sometimes logs are missing one or another transition related
message. Try to be more forgiving then.
Also, print information about number of actions which were
completed, skipped, etc.
diff --git a/shell/modules/report.py b/shell/modules/report.py
--- a/shell/modules/report.py
+++ b/shell/modules/report.py
@@ -320,10 +320,8 @@ def is_log(p):
return os.path.isfile(p) and os.path.getsize(p) > 0
def pe_file_in_range(pe_f, a):
- r = re.search("pe-[^-]+-([0-9]+)[.]bz2$", pe_f)
- if not r:
- return None
- if not a or (a[0] <= int(r.group(1)) <= a[1]):
+ pe_num = get_pe_num(pe_f)
+ if not a or (a[0] <= int(pe_num) <= a[1]):
return pe_f
return None
@@ -347,6 +345,12 @@ def update_loginfo(rptlog, logfile, oldp
except IOError, msg:
common_err("couldn't the update %s.info: %s" % (rptlog, msg))
+def get_pe_num(pe_file):
+ try:
+ return re.search("pe-[^-]+-([0-9]+)[.]", pe_file).group(1)
+ except:
+ return "-1"
+
# r.group(1) transition number (a different thing from file number)
# r.group(2) contains full path
# r.group(3) file number
@@ -358,6 +362,40 @@ transition_patt = (
"crmd: .* unpack_graph: Unpacked transition (%%): ([0-9]+) actions", # number of actions
)
+def run_graph_msg_actions(msg):
+ '''
+ crmd: [13667]: info: run_graph: Transition 399 (Complete=5,
+ Pending=1, Fired=1, Skipped=0, Incomplete=3,
+ Source=...
+ '''
+ d = {}
+ s = msg
+ while True:
+ r = re.search("([A-Z][a-z]+)=([0-9]+)", s)
+ if not r:
+ return d
+ d[r.group(1)] = int(r.group(2))
+ s = s[r.end():]
+def transition_actions(msg_l, te_invoke_msg, pe_file):
+ '''
+ Get the number of actions for the transition.
+ '''
+ # check if there were any actions in this transition
+ pe_num = get_pe_num(pe_file)
+ te_invoke_patt = transition_patt[0].replace("%%", pe_num)
+ run_patt = transition_patt[1].replace("%%", pe_num)
+ r = re.search(te_invoke_patt, te_invoke_msg)
+ trans_num = r.group(1)
+ unpack_patt = transition_patt[2].replace("%%", trans_num)
+ for msg in msg_l:
+ try:
+ return int(re.search(unpack_patt, msg).group(2))
+ except:
+ if re.search(run_patt, msg):
+ act_d = run_graph_msg_actions(msg)
+ return sum(act_d.values())
+ return -1
+
class Report(Singleton):
'''
A hb_report class.
@@ -396,8 +434,7 @@ class Report(Singleton):
def node_list(self):
return self.cibnode_l
def peinputs_list(self):
- return [re.search("pe-[^-]+-([0-9]+)[.]bz2$", x).group(1)
- for x in self.peinputs_l]
+ return [get_pe_num(x) for x in self.peinputs_l]
def unpack_report(self, tarball):
'''
Unpack hb_report tarball.
@@ -712,8 +749,6 @@ class Report(Singleton):
'''
Get a list of transitions.
Empty transitions are skipped.
- We use the unpack_graph message to see the number of
- actions.
Some callers need original PE file path (keep_pe_path),
otherwise we produce the path within the report.
If the caller doesn't provide the message list, then we
@@ -738,19 +773,12 @@ class Report(Singleton):
node = msg_a[3]
pe_file = msg_a[-1]
pe_base = os.path.basename(pe_file)
- # check if there were any actions in this transition
- r = re.search(trans_re_l[0], msg)
- trans_num = r.group(1)
- unpack_patt = transition_patt[2].replace("%%", trans_num)
- num_actions = 0
- for t in msg_l:
- try:
- num_actions = int(re.search(unpack_patt, t).group(2))
- break
- except: pass
+ num_actions = transition_actions(msg_l, msg, pe_file)
if num_actions == 0: # empty transition
- common_debug("skipping empty transition %s (%s)" % (trans_num, pe_base))
+ common_debug("skipping empty transition (%s)" % pe_base)
continue
+ elif num_actions == -1: # couldn't find messages
+ common_warn("could not find number of actions for transition (%s)" % pe_base)
common_debug("found PE input at %s: %s" % (node, pe_file))
if keep_pe_path:
pe_l.append(pe_file)
@@ -894,6 +922,34 @@ class Report(Singleton):
self.error("no resources or nodes found")
return False
self.show_logs(re_l = all_re_l)
+ def get_transition_msgs(self, pe_file, msg_l = []):
+ if not msg_l:
+ trans_re_l = [x.replace("%%", "[0-9]+") for x in transition_patt]
+ msg_l = self.logobj.get_matches(trans_re_l)
+ te_invoke_msg = ""
+ run_msg = ""
+ unpack_msg = ""
+ pe_num = get_pe_num(pe_file)
+ te_invoke_patt = transition_patt[0].replace("%%", pe_num)
+ run_patt = transition_patt[1].replace("%%", pe_num)
+ r = None
+ for msg in msg_l:
+ r = re.search(te_invoke_patt, msg)
+ if r:
+ te_invoke_msg = msg
+ break
+ if not r:
+ return ["", "", ""]
+ trans_num = r.group(1)
+ unpack_patt = transition_patt[2].replace("%%", trans_num)
+ for msg in msg_l:
+ if re.search(run_patt, msg):
+ run_msg = msg
+ elif re.search(unpack_patt, msg):
+ unpack_msg = msg
+ if run_msg and unpack_msg:
+ break
+ return [unpack_msg, te_invoke_msg, run_msg]
def show_transition_log(self, pe_file):
'''
Search for events within the given transition.
@@ -901,28 +957,34 @@ class Report(Singleton):
if not self.prepare_source():
return False
pe_base = os.path.basename(pe_file)
- r = re.search("pe-[^-]+-([0-9]+)[.]", pe_base)
- pe_num = r.group(1)
- trans_re_l = [x.replace("%%",pe_num) for x in transition_patt]
- trans_start = self.logobj.search_logs(self.log_l, trans_re_l[0])
- trans_end = self.logobj.search_logs(self.log_l, trans_re_l[1])
- if not trans_start:
+ pe_num = get_pe_num(pe_base)
+ unpack_msg, te_invoke_msg, run_msg = self.get_transition_msgs(pe_file)
+ if not te_invoke_msg:
common_warn("start of transition %s not found in logs" % pe_base)
return False
- if not trans_end:
+ if not run_msg:
common_warn("end of transition %s not found in logs (transition not complete yet?)" % pe_base)
return False
- common_debug("transition start: %s" % trans_start[0])
- common_debug("transition end: %s" % trans_end[0])
- start_ts = syslog_ts(trans_start[0])
- end_ts = syslog_ts(trans_end[0])
+ common_debug("transition start: %s" % te_invoke_msg)
+ common_debug("transition end: %s" % run_msg)
+ start_ts = syslog_ts(te_invoke_msg)
+ end_ts = syslog_ts(run_msg)
if not start_ts or not end_ts:
self.warn("strange, no timestamps found")
return False
- # limit the log scope temporarily
+ act_d = run_graph_msg_actions(run_msg)
+ total = sum(act_d.values())
+ s = ""
+ for a in act_d:
+ if not act_d[a]:
+ continue
+ s = "%s %s=%d" % (s, a, act_d[a])
+ common_info("transition %s %d actions: %s" %
+ (pe_file.replace(self.loc+"/",""), total, s))
common_info("logs for transition %s (%s-%s)" %
(pe_file.replace(self.loc+"/",""), \
shorttime(start_ts), shorttime(end_ts)))
+ # limit the log scope temporarily
self.logobj.set_log_timeframe(start_ts, end_ts)
self.events()
self.logobj.set_log_timeframe(self.from_dt, self.to_dt)

View File

@ -0,0 +1,34 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1313416746 -7200
# Node ID 3681d3471fdecde109ea7c25ab2ceb31e1e8646f
# Parent 3f3c348aaaed52383f6646b08899943aec8911f4
Low: Shell: update log patterns for history
diff --git a/shell/modules/log_patterns.py b/shell/modules/log_patterns.py
--- a/shell/modules/log_patterns.py
+++ b/shell/modules/log_patterns.py
@@ -3,7 +3,7 @@
# log pattern specification
#
# patterns are grouped one of several classes:
-# - resources: pertaining to a resource
+# - resource: pertaining to a resource
# - node: pertaining to a node
# - quorum: quorum changes
# - events: other interesting events (core dumps, etc)
@@ -17,12 +17,12 @@
log_patterns = {
"resource": (
( # detail 0
- "lrmd:.*rsc:%%.*(start|stop)",
+ "lrmd:.*rsc:%%.*(start|stop|promote|demote|migrate)",
"lrmd:.*RA output:.*%%.*stderr",
"lrmd:.*WARN:.*Managed.*%%.*exited",
),
( # detail 1
- "lrmd:.*rsc:%%.*probe",
+ "lrmd:.*rsc:%%.*(probe|notify)",
"lrmd:.*info:.*Managed.*%%.*exited",
),
),

View File

@ -0,0 +1,47 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1314196090 -7200
# Node ID 709ef91cfada2822aca53dcef085ddb6952393c5
# Parent 3a81b7eae66672dd9873fe6b53ee3c0da6fc87d7
Low: Shell: update pe not found message
diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in
--- a/shell/modules/ui.py.in
+++ b/shell/modules/ui.py.in
@@ -1822,7 +1822,6 @@ Examine Pacemaker's history: node and re
crm_report.refresh_source()
f = self._get_pe_byidx(-1)
if not f:
- common_err("no transitions found")
return False
crm_report.show_transition_log(f)
def resource(self,cmd,*args):
@@ -1878,7 +1877,7 @@ Examine Pacemaker's history: node and re
def _get_pe_byidx(self, idx):
l = crm_report.pelist()
if len(l) < abs(idx):
- common_err("pe input file not found")
+ common_err("pe input file for index %d not found" % (idx+1))
return None
return l[idx]
def _get_pe_bynum(self, n):
@@ -1913,7 +1912,8 @@ Examine Pacemaker's history: node and re
f = self._get_pe_bynum(n)
argl.pop(0)
else:
- f = self._get_pe_byidx(-1)
+ common_err("<%s> doesn't sound like a PE input" % argl[0])
+ return False
else:
f = self._get_pe_byidx(-1)
if not f:
@@ -1922,8 +1922,7 @@ Examine Pacemaker's history: node and re
if subcmd == "show":
self.pe_file = f # self.pe_file needed by self.ptest
common_info("running ptest with %s" % f)
- rc = ptestlike(self.ptest,'vv',"%s %s" % \
- (cmd, subcmd), *argl)
+ rc = ptestlike(self.ptest,'vv', cmd, *argl)
else:
f = crm_report.pe2dot(f)
if not f:

315
crm_history_peinputs.patch Normal file
View File

@ -0,0 +1,315 @@
changeset: 10788:6f9cc20dba0d
user: Dejan Muhamedagic <dejan@hello-penguin.com>
date: Mon Jul 18 12:35:57 2011 +0200
summary: Dev: Shell: spawn transition command from peinputs
diff -r b694b75d2e33 -r 6f9cc20dba0d doc/crm.8.txt
--- a/doc/crm.8.txt Mon Jul 18 12:35:57 2011 +0200
+++ b/doc/crm.8.txt Mon Jul 18 12:35:57 2011 +0200
@@ -2565,6 +2565,21 @@ were created (the DC at the time). The `
all PE input files to the current working directory (and use ssh
if necessary).
+Usage:
+...............
+ peinputs list [{<range>|<number>} ...]
+ peinputs get [{<range>|<number>} ...]
+
+ range :: <n1>:<n2>
+...............
+Example:
+...............
+ peinputs get 440:444 446
+...............
+
+[[cmdhelp_history_transition,show transition]]
+==== `transition`
+
The `show` subcommand will print actions planned by the PE and
run graphviz (`dotty`) to display a graphical representation. Of
course, for the latter an X11 session is required. This command
@@ -2581,22 +2596,20 @@ last one, i.e. the last transition. If t
then the corresponding transition relative to the last one is
chosen.
+After the `ptest` output, logs about events that happened during
+the transition are printed.
+
Usage:
...............
- peinputs list [{<range>|<number>} ...]
- peinputs get [{<range>|<number>} ...]
- peinputs show [<number>] [nograph] [v...] [scores] [actions] [utilization]
- peinputs showdot [<number>]
-
- range :: <n1>:<n2>
+ transition show [<number>] [nograph] [v...] [scores] [actions] [utilization]
+ transition showdot [<number>]
...............
-Example:
+Examples:
...............
- peinputs get 440:444 446
- peinputs show
- peinputs show 444
- peinputs show -1
- peinputs showdot 444
+ transition show
+ transition show 444
+ transition show -1
+ transition showdot 444
...............
=== `end` (`cd`, `up`)
diff -r b694b75d2e33 -r 6f9cc20dba0d shell/modules/log_patterns.py
--- a/shell/modules/log_patterns.py Mon Jul 18 12:35:57 2011 +0200
+++ b/shell/modules/log_patterns.py Mon Jul 18 12:35:57 2011 +0200
@@ -64,6 +64,6 @@ log_patterns = {
}
transition_patt = (
- "crmd: .* Processing graph.*derived from (.*bz2)", # transition start
- "crmd: .* Transition.*Source=(.*bz2): (Stopped|Complete|Terminated)", # and stop
+ "crmd: .* Processing graph.*derived from .*/pe-[^-]+-(%%)[.]bz2", # transition start
+ "crmd: .* Transition.*Source=.*/pe-[^-]+-(%%)[.]bz2.: (Stopped|Complete|Terminated)", # and stop
)
diff -r b694b75d2e33 -r 6f9cc20dba0d shell/modules/report.py
--- a/shell/modules/report.py Mon Jul 18 12:35:57 2011 +0200
+++ b/shell/modules/report.py Mon Jul 18 12:35:57 2011 +0200
@@ -177,8 +177,12 @@ class LogSyslog(object):
find out start/end file positions. Logs need to be
already open.
'''
- self.from_ts = convert_dt(from_dt)
- self.to_ts = convert_dt(to_dt)
+ if isinstance(from_dt, datetime.datetime):
+ self.from_ts = convert_dt(from_dt)
+ self.to_ts = convert_dt(to_dt)
+ else:
+ self.from_ts = from_dt
+ self.to_ts = to_dt
bad_logs = []
for log in self.f:
f = self.f[log]
@@ -498,13 +502,14 @@ class Report(Singleton):
if not os.path.isdir(self.outdir):
return []
l = []
+ trans_re_l = [x.replace("%%","") for x in transition_patt]
for node,rptlog,logfile,nextpos in a:
node_l = []
fl = glob.glob("%s/*%s*" % (self.outdir,node))
if not fl:
continue
for s in file2list(fl[0]):
- r = re.search(transition_patt[0], s)
+ r = re.search(trans_re_l[0], s)
if not r:
continue
node_l.append(r.group(1))
@@ -680,7 +685,8 @@ class Report(Singleton):
self.find_central_log()
self.read_cib()
self.set_node_colors()
- self.logobj = None
+ self.logobj = LogSyslog(self.central_log, self.log_l, \
+ self.from_dt, self.to_dt)
def prepare_source(self):
'''
Unpack a hb_report tarball.
@@ -740,6 +746,15 @@ class Report(Singleton):
try: clr = self.nodecolor[a[3]]
except: return s
return termctrl.render("${%s}%s${NORMAL}" % (clr,s))
+ def display_logs(self, l):
+ if not options.batch and sys.stdout.isatty():
+ page_string('\n'.join([ self.disp(x) for x in l ]))
+ else: # raw output
+ try: # in case user quits the next prog in pipe
+ for s in l: print s
+ except IOError, msg:
+ if not ("Broken pipe" in msg):
+ common_err(msg)
def show_logs(self, log_l = [], re_l = []):
'''
Print log lines, either matched by re_l or all.
@@ -749,18 +764,7 @@ class Report(Singleton):
if not self.central_log and not log_l:
self.error("no logs found")
return
- if not self.logobj:
- self.logobj = LogSyslog(self.central_log, log_l, \
- self.from_dt, self.to_dt)
- l = self.logobj.get_matches(re_l, log_l)
- if not options.batch and sys.stdout.isatty():
- page_string('\n'.join([ self.disp(x) for x in l ]))
- else: # raw output
- try: # in case user quits the next prog in pipe
- for s in l: print s
- except IOError, msg:
- if not ("Broken pipe" in msg):
- common_err(msg)
+ self.display_logs(self.logobj.get_matches(re_l, log_l))
def match_args(self, cib_l, args):
for a in args:
a_clone = re.sub(r':.*', '', a)
@@ -812,6 +816,34 @@ class Report(Singleton):
self.error("no resources or nodes found")
return False
self.show_logs(re_l = all_re_l)
+ def show_transition_log(self, pe_file):
+ '''
+ Search for events within the given transition.
+ '''
+ pe_base = os.path.basename(pe_file)
+ r = re.search("pe-[^-]+-([0-9]+)[.]bz2", pe_base)
+ pe_num = r.group(1)
+ trans_re_l = [x.replace("%%",pe_num) for x in transition_patt]
+ trans_start = self.logobj.search_logs(self.log_l, trans_re_l[0])
+ trans_end = self.logobj.search_logs(self.log_l, trans_re_l[1])
+ if not trans_start:
+ common_warn("transition %s start not found in logs" % pe_base)
+ return False
+ if not trans_end:
+ common_warn("transition %s end not found in logs" % pe_base)
+ return False
+ common_debug("transition start: %s" % trans_start[0])
+ common_debug("transition end: %s" % trans_end[0])
+ start_ts = syslog_ts(trans_start[0])
+ end_ts = syslog_ts(trans_end[0])
+ if not start_ts or not end_ts:
+ self.warn("strange, no timestamps found")
+ return False
+ # limit the log scope temporarily
+ self.logobj.set_log_timeframe(start_ts, end_ts)
+ self.events()
+ self.logobj.set_log_timeframe(self.from_dt, self.to_dt)
+ return True
def resource(self,*args):
'''
Show resource relevant logs.
diff -r b694b75d2e33 -r 6f9cc20dba0d shell/modules/ui.py.in
--- a/shell/modules/ui.py.in Mon Jul 18 12:35:57 2011 +0200
+++ b/shell/modules/ui.py.in Mon Jul 18 12:35:57 2011 +0200
@@ -1686,7 +1686,8 @@ Examine Pacemaker's history: node and re
self.cmd_table["resource"] = (self.resource,(1,),1,0)
self.cmd_table["node"] = (self.node,(1,),1,1)
self.cmd_table["log"] = (self.log,(0,),1,0)
- self.cmd_table["peinputs"] = (self.peinputs,(0,),1,0)
+ self.cmd_table["peinputs"] = (self.peinputs,(1,),1,0)
+ self.cmd_table["transition"] = (self.transition,(1,),1,0)
self._set_source(options.history)
def _no_source(self):
common_error("we have no source set yet! please use the source command")
@@ -1832,57 +1833,63 @@ Examine Pacemaker's history: node and re
return run_ptest(s, nograph, scores, utilization, actions, verbosity)
def peinputs(self,cmd,subcmd,*args):
"""usage: peinputs list [{<range>|<number>} ...]
- peinputs get [{<range>|<number>} ...]
- peinputs show [<number>] [nograph] [v...] [scores] [actions] [utilization]
- peinputs showdot [<number>]"""
- if subcmd in ("get","list"):
- if args:
- l = []
- for s in args:
- a = convert2ints(s.split(':'))
- if len(a) == 2 and not check_range(a):
- common_err("%s: invalid peinputs range" % a)
- return False
- l += crm_report.pelist(a)
- else:
- l = crm_report.pelist()
- if not l: return False
- if subcmd == "list":
- s = get_stdout("ls -lrt %s" % ' '.join(l))
- page_string(s)
- else:
- print '\n'.join(l)
- elif subcmd in ("show","showdot"):
- try: n = convert2ints(args[0])
- except: n = None
- startarg = 1
- if n is None:
- idx = -1
- startarg = 0 # peinput number missing
- elif n <= 0:
- idx = n - 1
- n = [] # to get all peinputs
- else:
- idx = 0
- if subcmd == "showdot":
- if not user_prefs.dotty:
- common_err("install graphviz to draw transition graphs")
+ peinputs get [{<range>|<number>} ...]"""
+ if subcmd not in ("get","list"):
+ bad_usage(cmd,subcmd)
+ return False
+ if args:
+ l = []
+ for s in args:
+ a = convert2ints(s.split(':'))
+ if len(a) == 2 and not check_range(a):
+ common_err("%s: invalid peinputs range" % a)
return False
- l = crm_report.dotlist(n)
- else:
- l = crm_report.pelist(n)
- if len(l) < abs(idx):
- common_err("pe input or dot file not found")
+ l += crm_report.pelist(a)
+ else:
+ l = crm_report.pelist()
+ if not l: return False
+ if subcmd == "list":
+ s = get_stdout("ls -lrt %s" % ' '.join(l))
+ page_string(s)
+ else:
+ print '\n'.join(l)
+ def transition(self,cmd,subcmd,*args):
+ """usage: transition show [<number>] [nograph] [v...] [scores] [actions] [utilization]
+ transition showdot [<number>]"""
+ if subcmd not in ("show", "showdot"):
+ bad_usage(cmd,subcmd)
+ return False
+ try: n = convert2ints(args[0])
+ except: n = None
+ startarg = 1
+ if n is None:
+ idx = -1
+ startarg = 0 # peinput number missing
+ elif n <= 0:
+ idx = n - 1
+ n = [] # to get all peinputs
+ else:
+ idx = 0
+ if subcmd == "showdot":
+ if not user_prefs.dotty:
+ common_err("install graphviz to draw transition graphs")
return False
- if subcmd == "show":
- self.pe_file = l[idx]
- return ptestlike(self.ptest,'vv',"%s %s" % \
- (cmd, subcmd), *args[startarg:])
- else:
- show_dot_graph(l[idx])
+ l = crm_report.dotlist(n)
else:
- bad_usage(cmd,' '.join(subcmd,args))
+ l = crm_report.pelist(n)
+ if len(l) < abs(idx):
+ common_err("pe input or dot file not found")
return False
+ rc = True
+ if subcmd == "show":
+ self.pe_file = l[idx]
+ rc = ptestlike(self.ptest,'vv',"%s %s" % \
+ (cmd, subcmd), *args[startarg:])
+ if rc:
+ crm_report.show_transition_log(self.pe_file)
+ else:
+ show_dot_graph(l[idx])
+ return rc
class TopLevel(UserInterface):
'''

12
crm_history_pssh.patch Normal file
View File

@ -0,0 +1,12 @@
Index: pacemaker/shell/modules/Makefile.am
===================================================================
--- pacemaker.orig/shell/modules/Makefile.am
+++ pacemaker/shell/modules/Makefile.am
@@ -35,6 +35,7 @@ modules = __init__.py \
ra.py \
report.py \
log_patterns.py \
+ crm_pssh.py \
singletonmixin.py \
template.py \
term.py \

View File

@ -0,0 +1,98 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1313760016 -7200
# Node ID 3a81b7eae66672dd9873fe6b53ee3c0da6fc87d7
# Parent e8ea8fb95f310997995576ee831693b0d3b2736a
Medium: Shell: support for LRM secrets in resource level
diff --git a/doc/crm.8.txt b/doc/crm.8.txt
--- a/doc/crm.8.txt
+++ b/doc/crm.8.txt
@@ -869,6 +869,34 @@ Example:
param ip_0 show ip
...............
+[[cmdhelp_resource_secret,manage sensitive parameters]]
+==== `secret`
+
+Sensitive parameters can be kept in local files rather than CIB
+in order to prevent accidental data exposure. Use the `secret`
+command to manage such parameters. `stash` and `unstash` move the
+value from the CIB and back to the CIB respectively. The `set`
+subcommand sets the parameter to the provided value. `delete`
+removes the parameter completely. `show` displays the value of
+the parameter from the local file. Use `check` to verify if the
+local file content is valid.
+
+Usage:
+...............
+ secret <rsc> set <param> <value>
+ secret <rsc> stash <param>
+ secret <rsc> unstash <param>
+ secret <rsc> delete <param>
+ secret <rsc> show <param>
+ secret <rsc> check <param>
+...............
+Example:
+...............
+ secret fence_1 show password
+ secret fence_1 stash password
+ secret fence_1 set password secret_value
+...............
+
[[cmdhelp_resource_meta,manage a meta attribute]]
==== `meta`
diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in
--- a/shell/modules/ui.py.in
+++ b/shell/modules/ui.py.in
@@ -661,7 +661,8 @@ def manage_attr(cmd,attr_ext_commands,*a
else:
bad_usage(cmd,' '.join(args))
return False
- elif args[1] in ('delete','show'):
+ elif args[1] in ('delete','show') or \
+ (cmd == "secret" and args[1] in ('stash','unstash','check')):
if len(args) == 3:
if not is_name_sane(args[0]) \
or not is_name_sane(args[2]):
@@ -770,6 +771,14 @@ program.
'delete': "crm_resource -z -r '%s' -d '%s'",
'show': "crm_resource -z -r '%s' -g '%s'",
}
+ rsc_secret = {
+ 'set': "cibsecret set '%s' '%s' '%s'",
+ 'stash': "cibsecret stash '%s' '%s'",
+ 'unstash': "cibsecret unstash '%s' '%s'",
+ 'delete': "cibsecret delete '%s' '%s'",
+ 'show': "cibsecret get '%s' '%s'",
+ 'check': "cibsecret check '%s' '%s'",
+ }
rsc_refresh = "crm_resource -R"
rsc_refresh_node = "crm_resource -R -H '%s'"
rsc_reprobe = "crm_resource -P"
@@ -787,6 +796,7 @@ program.
self.cmd_table["migrate"] = (self.migrate,(1,4),0,1)
self.cmd_table["unmigrate"] = (self.unmigrate,(1,1),0,1)
self.cmd_table["param"] = (self.param,(3,4),1,1)
+ self.cmd_table["secret"] = (self.secret,(3,4),1,1)
self.cmd_table["meta"] = (self.meta,(3,4),1,1)
self.cmd_table["utilization"] = (self.utilization,(3,4),1,1)
self.cmd_table["failcount"] = (self.failcount,(3,4),0,0)
@@ -924,6 +934,16 @@ program.
param <rsc> show <param>"""
d = lambda: manage_attr(cmd,self.rsc_param,*args)
return d()
+ def secret(self,cmd,*args):
+ """usage:
+ secret <rsc> set <param> <value>
+ secret <rsc> stash <param>
+ secret <rsc> unstash <param>
+ secret <rsc> delete <param>
+ secret <rsc> show <param>
+ secret <rsc> check <param>"""
+ d = lambda: manage_attr(cmd,self.rsc_secret,*args)
+ return d()
def meta(self,cmd,*args):
"""usage:
meta <rsc> set <attr> <value>

View File

@ -0,0 +1,28 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1314633641 -7200
# Node ID f77e52725f2d98c219d8b22208da0b89b3d42112
# Parent ccd0c1e1edf9f23cafb4363014acba755f1b4e25
Low: Shell: let the pager decide how to handle output smaller than terminal
Instead of trying to calculate the size of the output, which may
not be trivial, better let the pager deal with it. For instance,
less(1) can be configured to exit immediately on a
less-than-screenful of input (-F). IIRC, more(1) does that
automatically.
diff --git a/shell/modules/utils.py b/shell/modules/utils.py
--- a/shell/modules/utils.py
+++ b/shell/modules/utils.py
@@ -524,10 +524,7 @@ def page_string(s):
'Write string through a pager.'
if not s:
return
- w,h = get_winsize()
- if s.count('\n') < h:
- print s
- elif not user_prefs.pager or not sys.stdout.isatty() or options.batch:
+ if not user_prefs.pager or not sys.stdout.isatty() or options.batch:
print s
else:
opts = ""

34
crm_path_bnc712605.patch Normal file
View File

@ -0,0 +1,34 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1313589488 -7200
# Node ID 0abb257259ed722abaa32a237c3c284c08ec0737
# Parent 3681d3471fdecde109ea7c25ab2ceb31e1e8646f
Low: Shell: add crm execute directory to the PATH if not already present (bnc#712605)
Important if crm is run as non-root user. We use sys.argv[0],
but perhaps it'd be better to use autoconf @sbindir@ (or however
it's named) and set it in vars.sbindir.
diff --git a/shell/modules/main.py b/shell/modules/main.py
--- a/shell/modules/main.py
+++ b/shell/modules/main.py
@@ -16,6 +16,7 @@
#
import sys
+import os
import shlex
import getopt
@@ -205,7 +206,10 @@ vars = Vars.getInstance()
levels = Levels.getInstance()
# prefer the user set PATH
-os.putenv("PATH", "%s:%s" % (os.getenv("PATH"),vars.crm_daemon_dir))
+mybinpath = os.path.dirname(sys.argv[0])
+for p in mybinpath, vars.crm_daemon_dir:
+ if p not in os.environ["PATH"].split(':'):
+ os.environ['PATH'] = "%s:%s" % (os.environ['PATH'], p)
def set_interactive():
'''Set the interactive option only if we're on a tty.'''

148
crm_site_9b07d41c73b4.patch Normal file
View File

@ -0,0 +1,148 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1314872213 -7200
# Node ID 9b07d41c73b456e8189fea757a5c3d9e5b32512d
# Parent 825cb3e79d7bc1c4ac30468f8c028c9129d00541
High: Shell: geo-cluster support commands
diff --git a/doc/crm.8.txt b/doc/crm.8.txt
--- a/doc/crm.8.txt
+++ b/doc/crm.8.txt
@@ -1133,6 +1133,31 @@ Example:
status-attr node_1 show pingd
...............
+[[cmdhelp_site,site support]]
+=== `site`
+
+A cluster may consist of two or more subclusters in different and
+distant locations. This set of commands supports such setups.
+
+[[cmdhelp_site_ticket,manage site tickets]]
+==== `ticket`
+
+Tickets are cluster-wide attributes. They can be managed at the
+site where this command is executed.
+
+It is then possible to constrain resources depending on the
+ticket availability (see the <<cmdhelp_configure_rsc_ticket,`rsc_ticket`>> command
+for more details).
+
+Usage:
+...............
+ ticket {grant|revoke|show|time|delete} <ticket>
+...............
+Example:
+...............
+ ticket grant ticket1
+...............
+
[[cmdhelp_options,user preferences]]
=== `options`
@@ -1652,6 +1677,8 @@ resource (or resources) if the ticket is
either `stop` or `demote` depending on whether a resource is
multi-state.
+See also the <<cmdhelp_site_ticket,`site`>> set of commands.
+
Usage:
...............
rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
diff --git a/shell/modules/completion.py b/shell/modules/completion.py
--- a/shell/modules/completion.py
+++ b/shell/modules/completion.py
@@ -173,6 +173,10 @@ def report_pe_list_peinputs(idx,delimite
if delimiter:
return ' '
return crm_report.peinputs_list() + ["v"]
+def ticket_cmd_list(idx,delimiter = False):
+ if delimiter:
+ return ' '
+ return ["grant","revoke","show","time","delete"]
#
# completion for primitives including help for parameters
@@ -488,6 +492,9 @@ completer_lists = {
"peinputs" : (report_pe_list_peinputs,loop),
"transition" : (report_pe_list_transition,),
},
+ "site" : {
+ "ticket" : (ticket_cmd_list,),
+ },
}
def get_completer_list(level,cmd):
'Return a list of completer functions.'
diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in
--- a/shell/modules/ui.py.in
+++ b/shell/modules/ui.py.in
@@ -1938,6 +1938,61 @@ Examine Pacemaker's history: node and re
crm_report.show_transition_log(f)
return rc
+class Site(UserInterface):
+ '''
+ The site class
+ '''
+ lvl_name = "site"
+ desc_short = "Geo-cluster support"
+ desc_long = """
+The site level.
+
+Geo-cluster related management.
+"""
+ crm_ticket = {
+ 'grant': "crm_ticket -t '%s' -v true",
+ 'revoke': "crm_ticket -t '%s' -v false",
+ 'delete': "crm_ticket -t '%s' -D",
+ 'show': "crm_ticket -t '%s' -G",
+ 'time': "crm_ticket -t '%s' -T",
+ }
+ def __init__(self):
+ UserInterface.__init__(self)
+ self.cmd_table["ticket"] = (self.ticket,(2,2),1,0)
+ def ticket(self, cmd, subcmd, ticket):
+ "usage: ticket {grant|revoke|show|time|delete} <ticket>"
+ try:
+ attr_cmd = self.crm_ticket[subcmd]
+ except:
+ bad_usage(cmd,'%s %s' % (subcmd, ticket))
+ return False
+ if not is_name_sane(ticket):
+ return False
+ if subcmd not in ("show", "time"):
+ return ext_cmd(attr_cmd % ticket) == 0
+ l = stdout2list(attr_cmd % ticket)
+ try:
+ val = l[0].split('=')[3]
+ except:
+ common_warn("apparently nothing to show for ticket %s" % ticket)
+ return False
+ if subcmd == "show":
+ if val == "false":
+ print "ticket %s is revoked" % ticket
+ elif val == "true":
+ print "ticket %s is granted" % ticket
+ else:
+ common_warn("unexpected value for ticket %s: %s" % (ticket, val))
+ return False
+ else: # time
+ if not is_int(val):
+ common_warn("unexpected value for ticket %s: %s" % (ticket, val))
+ return False
+ if val == "-1":
+ print "%s: no such ticket" % ticket
+ return False
+ print "ticket %s last time granted on %s" % (ticket, time.ctime(int(val)))
+
class TopLevel(UserInterface):
'''
The top level.
@@ -1959,6 +2014,7 @@ class TopLevel(UserInterface):
self.cmd_table['node'] = NodeMgmt
self.cmd_table['options'] = CliOptions
self.cmd_table['history'] = History
+ self.cmd_table['site'] = Site
self.cmd_table['status'] = (self.status,(0,5),0,0)
self.cmd_table['ra'] = RA
setup_aliases(self)

View File

@ -0,0 +1,326 @@
# HG changeset patch
# User Dejan Muhamedagic <dejan@hello-penguin.com>
# Date 1314783705 -7200
# Node ID 825cb3e79d7bc1c4ac30468f8c028c9129d00541
# Parent f77e52725f2d98c219d8b22208da0b89b3d42112
High: Shell: support for rsc_ticket
diff --git a/doc/crm.8.txt b/doc/crm.8.txt
--- a/doc/crm.8.txt
+++ b/doc/crm.8.txt
@@ -1639,6 +1639,34 @@ Example:
order o1 inf: A ( B C )
...............
+[[cmdhelp_configure_rsc_ticket,resources ticket dependency]]
+==== `rsc_ticket`
+
+This constraint expresses dependency of resources on cluster-wide
+attributes, also known as tickets. Tickets are mainly used in
+geo-clusters, which consist of multiple sites. A ticket may be
+granted to a site, thus allowing resources to run there.
+
+The `loss-policy` attribute specifies what happens to the
+resource (or resources) if the ticket is revoked. The default is
+either `stop` or `demote` depending on whether a resource is
+multi-state.
+
+Usage:
+...............
+ rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]
+
+ loss_policy_action :: stop | demote | fence | freeze
+...............
+Example:
+...............
+ rsc_ticket ticket-A_public-ip ticket-A: public-ip
+ rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence
+ rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master
+...............
+
+
[[cmdhelp_configure_property,set a cluster property]]
==== `property`
diff --git a/shell/modules/cibconfig.py b/shell/modules/cibconfig.py
--- a/shell/modules/cibconfig.py
+++ b/shell/modules/cibconfig.py
@@ -1243,7 +1243,7 @@ class CibSimpleConstraint(CibObject):
if node.getElementsByTagName("resource_set"):
col = rsc_set_constraint(node,obj_type)
else:
- col = two_rsc_constraint(node,obj_type)
+ col = simple_rsc_constraint(node,obj_type)
if not col:
return None
symm = node.getAttribute("symmetrical")
@@ -1264,6 +1264,27 @@ class CibSimpleConstraint(CibObject):
remove_id_used_attributes(oldnode)
return headnode
+class CibRscTicket(CibSimpleConstraint):
+ '''
+ rsc_ticket constraint.
+ '''
+ def repr_cli_head(self,node):
+ obj_type = vars.cib_cli_map[node.tagName]
+ node_id = node.getAttribute("id")
+ s = cli_display.keyword(obj_type)
+ id = cli_display.id(node_id)
+ ticket = cli_display.ticket(node.getAttribute("ticket"))
+ if node.getElementsByTagName("resource_set"):
+ col = rsc_set_constraint(node,obj_type)
+ else:
+ col = simple_rsc_constraint(node,obj_type)
+ if not col:
+ return None
+ a = node.getAttribute("loss-policy")
+ if a:
+ col.append("loss-policy=%s" % a)
+ return "%s %s %s: %s" % (s,id,ticket,' '.join(col))
+
class CibProperty(CibObject):
'''
Cluster properties.
@@ -1371,6 +1392,7 @@ cib_object_map = {
"rsc_location": ( "location", CibLocation, "constraints" ),
"rsc_colocation": ( "colocation", CibSimpleConstraint, "constraints" ),
"rsc_order": ( "order", CibSimpleConstraint, "constraints" ),
+ "rsc_ticket": ( "rsc_ticket", CibRscTicket, "constraints" ),
"cluster_property_set": ( "property", CibProperty, "crm_config", "cib-bootstrap-options" ),
"rsc_defaults": ( "rsc_defaults", CibProperty, "rsc_defaults", "rsc-options" ),
"op_defaults": ( "op_defaults", CibProperty, "op_defaults", "op-options" ),
diff --git a/shell/modules/clidisplay.py b/shell/modules/clidisplay.py
--- a/shell/modules/clidisplay.py
+++ b/shell/modules/clidisplay.py
@@ -62,6 +62,8 @@ class CliDisplay(Singleton):
return self.otherword(4, s)
def score(self, s):
return self.otherword(5, s)
+ def ticket(self, s):
+ return self.otherword(5, s)
user_prefs = UserPrefs.getInstance()
vars = Vars.getInstance()
diff --git a/shell/modules/cliformat.py b/shell/modules/cliformat.py
--- a/shell/modules/cliformat.py
+++ b/shell/modules/cliformat.py
@@ -226,22 +226,25 @@ def rsc_set_constraint(node,obj_type):
action = n.getAttribute("action")
for r in n.getElementsByTagName("resource_ref"):
rsc = cli_display.rscref(r.getAttribute("id"))
- q = (obj_type == "colocation") and role or action
+ q = (obj_type == "order") and action or role
col.append(q and "%s:%s"%(rsc,q) or rsc)
cnt += 1
if not sequential:
col.append(")")
- if cnt <= 2: # a degenerate thingie
+ if (obj_type != "rsc_ticket" and cnt <= 2) or \
+ (obj_type == "rsc_ticket" and cnt <= 1): # a degenerate thingie
col.insert(0,"_rsc_set_")
return col
-def two_rsc_constraint(node,obj_type):
+def simple_rsc_constraint(node,obj_type):
col = []
if obj_type == "colocation":
col.append(mkrscrole(node,"rsc"))
col.append(mkrscrole(node,"with-rsc"))
- else:
+ elif obj_type == "order":
col.append(mkrscaction(node,"first"))
col.append(mkrscaction(node,"then"))
+ else: # rsc_ticket
+ col.append(mkrscrole(node,"rsc"))
return col
# this pre (or post)-processing is oversimplified
diff --git a/shell/modules/completion.py b/shell/modules/completion.py
--- a/shell/modules/completion.py
+++ b/shell/modules/completion.py
@@ -467,6 +467,7 @@ completer_lists = {
"location" : (null_list,rsc_id_list),
"colocation" : (null_list,null_list,rsc_id_list,loop),
"order" : (null_list,null_list,rsc_id_list,loop),
+ "rsc_ticket" : (null_list,null_list,rsc_id_list,loop),
"property" : (property_complete,loop),
"rsc_defaults" : (prim_complete_meta,loop),
"op_defaults" : (op_attr_list,loop),
diff --git a/shell/modules/parse.py b/shell/modules/parse.py
--- a/shell/modules/parse.py
+++ b/shell/modules/parse.py
@@ -178,6 +178,15 @@ def parse_op(s):
head_pl.append(["name",s[0]])
return cli_list
+def cli_parse_ticket(ticket,pl):
+ if ticket.endswith(':'):
+ ticket = ticket.rstrip(':')
+ else:
+ syntax_err(ticket, context = 'rsc_ticket')
+ return False
+ pl.append(["ticket",ticket])
+ return True
+
def cli_parse_score(score,pl,noattr = False):
if score.endswith(':'):
score = score.rstrip(':')
@@ -197,6 +206,7 @@ def cli_parse_score(score,pl,noattr = Fa
else:
pl.append(["score-attribute",score])
return True
+
def is_binary_op(s):
l = s.split(':')
if len(l) == 2:
@@ -302,13 +312,13 @@ def parse_location(s):
return False
return cli_list
-def cli_opt_symmetrical(p,pl):
+def cli_opt_attribute(type, p, pl, attr):
if not p:
return True
pl1 = []
cli_parse_attr([p],pl1)
- if len(pl1) != 1 or not find_value(pl1,"symmetrical"):
- syntax_err(p,context = "order")
+ if len(pl1) != 1 or not find_value(pl1, attr):
+ syntax_err(p,context = type)
return False
pl += pl1
return True
@@ -490,7 +500,33 @@ def parse_order(s):
resource_set_obj = ResourceSet(type,s[3:],cli_list)
if not resource_set_obj.parse():
return False
- if not cli_opt_symmetrical(symm,head_pl):
+ if not cli_opt_attribute(type, symm, head_pl, "symmetrical"):
+ return False
+ return cli_list
+
+def parse_rsc_ticket(s):
+ cli_list = []
+ head_pl = []
+ type = "rsc_ticket"
+ cli_list.append([s[0],head_pl])
+ if len(s) < 4:
+ syntax_err(s,context = "rsc_ticket")
+ return False
+ head_pl.append(["id",s[1]])
+ if not cli_parse_ticket(s[2],head_pl):
+ return False
+ # save loss-policy for later (if it exists)
+ loss_policy = ""
+ if is_attribute(s[len(s)-1],"loss-policy"):
+ loss_policy = s.pop()
+ if len(s) == 4:
+ if not cli_parse_rsc_role(s[3], head_pl):
+ return False
+ else:
+ resource_set_obj = ResourceSet(type, s[3:], cli_list)
+ if not resource_set_obj.parse():
+ return False
+ if not cli_opt_attribute(type, loss_policy, head_pl, attr = "loss-policy"):
return False
return cli_list
@@ -501,6 +537,8 @@ def parse_constraint(s):
return parse_colocation(s)
elif keyword_cmp(s[0], "order"):
return parse_order(s)
+ elif keyword_cmp(s[0], "rsc_ticket"):
+ return parse_rsc_ticket(s)
def parse_property(s):
cli_list = []
head_pl = []
@@ -708,6 +746,7 @@ class CliParser(object):
"colocation": (3,parse_constraint),
"collocation": (3,parse_constraint),
"order": (3,parse_constraint),
+ "rsc_ticket": (3,parse_constraint),
"monitor": (3,parse_op),
"node": (2,parse_node),
"property": (2,parse_property),
diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in
--- a/shell/modules/ui.py.in
+++ b/shell/modules/ui.py.in
@@ -1400,6 +1400,7 @@ cluster.
self.cmd_table["location"] = (self.conf_location,(2,),1,0)
self.cmd_table["colocation"] = (self.conf_colocation,(2,),1,0)
self.cmd_table["order"] = (self.conf_order,(2,),1,0)
+ self.cmd_table["rsc_ticket"] = (self.conf_rsc_ticket,(2,),1,0)
self.cmd_table["property"] = (self.conf_property,(1,),1,0)
self.cmd_table["rsc_defaults"] = (self.conf_rsc_defaults,(1,),1,0)
self.cmd_table["op_defaults"] = (self.conf_op_defaults,(1,),1,0)
@@ -1632,6 +1633,10 @@ cluster.
"""usage: order <id> score-type: <first-rsc>[:<action>] <then-rsc>[:<action>]
[symmetrical=<bool>]"""
return self.__conf_object(cmd,*args)
+ def conf_rsc_ticket(self,cmd,*args):
+ """usage: rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]"""
+ return self.__conf_object(cmd,*args)
def conf_property(self,cmd,*args):
"usage: property [$id=<set_id>] <option>=<value>"
return self.__conf_object(cmd,*args)
diff --git a/shell/modules/vars.py.in b/shell/modules/vars.py.in
--- a/shell/modules/vars.py.in
+++ b/shell/modules/vars.py.in
@@ -53,6 +53,7 @@ class Vars(Singleton):
"rsc_location": "location",
"rsc_colocation": "colocation",
"rsc_order": "order",
+ "rsc_ticket": "rsc_ticket",
"cluster_property_set": "property",
"rsc_defaults": "rsc_defaults",
"op_defaults": "op_defaults",
@@ -62,13 +63,13 @@ class Vars(Singleton):
container_tags = ("group", "clone", "ms", "master")
clonems_tags = ("clone", "ms", "master")
resource_tags = ("primitive","group","clone","ms","master")
- constraint_tags = ("rsc_location","rsc_colocation","rsc_order")
+ constraint_tags = ("rsc_location","rsc_colocation","rsc_order","rsc_ticket")
constraint_rsc_refs = ("rsc","with-rsc","first","then")
children_tags = ("group", "primitive")
nvpairs_tags = ("meta_attributes", "instance_attributes", "utilization")
defaults_tags = ("rsc_defaults","op_defaults")
resource_cli_names = ("primitive","group","clone","ms","master")
- constraint_cli_names = ("location","colocation","collocation","order")
+ constraint_cli_names = ("location","colocation","collocation","order","rsc_ticket")
nvset_cli_names = ("property","rsc_defaults","op_defaults")
op_cli_names = ("monitor", "start", "stop", "migrate_to", "migrate_from","promote","demote","notify")
ra_operations = ("probe", "monitor", "start", "stop",
diff --git a/shell/modules/xmlutil.py b/shell/modules/xmlutil.py
--- a/shell/modules/xmlutil.py
+++ b/shell/modules/xmlutil.py
@@ -520,7 +520,8 @@ def mss(node_list):
def constraints(node_list):
return filter_on_tag(node_list,"rsc_location") \
+ filter_on_tag(node_list,"rsc_colocation") \
- + filter_on_tag(node_list,"rsc_order")
+ + filter_on_tag(node_list,"rsc_order") \
+ + filter_on_tag(node_list,"rsc_ticket")
def properties(node_list):
return filter_on_tag(node_list,"cluster_property_set") \
+ filter_on_tag(node_list,"rsc_defaults") \
@@ -562,7 +563,8 @@ def constraints_cli(node_list):
return filter_on_type(node_list,"location") \
+ filter_on_type(node_list,"colocation") \
+ filter_on_type(node_list,"collocation") \
- + filter_on_type(node_list,"order")
+ + filter_on_type(node_list,"order") \
+ + filter_on_type(node_list,"rsc_ticket")
def properties_cli(cl):
return filter_on_type(cl,"property") \
+ filter_on_type(cl,"rsc_defaults") \
@@ -601,6 +603,8 @@ def referenced_resources(node):
elif xml_obj_type == "rsc_order":
node_list = node.getElementsByTagName("first") + \
node.getElementsByTagName("then")
+ elif xml_obj_type == "rsc_ticket":
+ node_list = node.getElementsByTagName("rsc")
return [x.getAttribute("id") for x in node_list]
def rename_id(node,old_id,new_id):

View File

@ -1,3 +1,173 @@
-------------------------------------------------------------------
Tue Sep 20 14:27:47 UTC 2011 - tserong@suse.com
- Upgrade to 1.1.6.
- PE: Demote from Master does not clear previous errors
- crmd: Prevent secondary DC fencing resulting from CIB updates
that are lost due to elections
- crmd: Log duplicate DC detection as a WARNING not ERROR
- crmd: Bug lf#2632 - Correctly handle nodes that return faster
than stonith
- Core: Treat GNUTLS_E_UNEXPECTED_PACKET_LENGTH as normal
termination of a TLS session
- cib: Call gnutls_bye() and shutdown() when disconnecting from
remote TLS connections
- cib: Remove disconnected remote connections from mainloop
- cib: Attempt a graceful sign-off for remote TLS connections
- Core: Ensure there is sufficient space for EOS when building
short-form option strings (prevents segfault)
- Core: Fix variable expansion in pkg-config files
- PE: Resolve memory leak reported by valgrind
- PE: Fix memory leak for re-allocated resources reported by
valgrind
- PE: Improve the merging with template's operations
- crmd: Allow nodes to fence themselves if they're the last one
standing (lf#2584)
- stonith: Add an API call for listing installed agents
- stonith: Allow the fencing history to be queried
- stonith: Ensure completed operations are recorded as such in
the history
- stonith: Support --quiet to display just the seconds since
epoch at which a node was last shot
- stonith: Serialize actions for a given device
- stonith: Add missing entries to stonith_error2string() (missing
error messages)
- Shell: geo-cluster support commands
- Shell: support for rsc_ticket
- Shell: let the pager decide how to handle output smaller than
terminal
- Shell: look for log segments with more care and don't throw
exception on seek (bnc#713939)
- Shell: several history improvements
- tools: crm_attribute - Update tickets directly to cib instead
of through attrd
- Shell: support for LRM secrets in resource level
- Shell: enable removal of unmanaged resources (bnc#696506)
- Shell: add crm execute directory to the PATH if not already present (bnc#712605)
- Shell: update log patterns for history
- Shell: relax transition acceptance in history
- tools: crm_simulate - Send logging to stdout when additional detail is
requested so that grep works as intended
- PE: Prevent services being active if dependancies on clones are not
satisfied (bnc#707150)
- PE: Prevent resource shuffling when the 'utilization' placement
strategy is used
- PE: Implement resource template
- fencing: Add support for --reboot to stonith_admin
- Fencing: Downgrade an error message for a valid scenario
- crmd: Ensure we do not attempt to perform action on failed nodes
- Fencing: admin - Disable stderr logging by default
- Fencing: Fix use-of-NULL by g_hash_table_lookup
- Tools: Correctly log corosync node IDs (lf#2621)
- pacemaker:ping: Incorporate fping functionality from Dan Urist
- crmd: Recurring actions shouldn't cause the last non-recurring action
to be forgotten
- crmd: Do not wait for actions that were pending on dead nodes
- crmd: Cancel timers for actions that were pending on dead nodes
- PE: Correctly recognise which recurring operations are currently
active
- PE: Ensure role is preserved for unmanaged resources
- PE: Ensure unmanaged resources have the correct role set so the
correct monitor operation is chosen
- PE: Implement cluster ticket and deadman
- Build: Add crm_ticket into spec file
- Tools: crm_attribute - No need to determine the node for tickets
management
- Tools: crm_mon crm_resource - Display the "operation_key" instead of
the operation "id" if available (bnc#707201)
- CTS improvements.
- Shell: reimplement the history latest command (bnc#710958)
- Shell: Don't limit to last hour when examining hb_reports
- Shell: improve capture log slices for transitions (bnc#710907)
- Shell: allow specifying PE files as relative paths in order to
disambiguate between PE inputs with the same number (bnc#710655)
- Shell: remove peinputs "get" and "list" subcommands, just use 'v' for the
long listing (bnc#711060)
- Shell: remove transition "show" subcommand, if there is no subcommand
it is assumed that the user wants to do "show" (bnc#711060)
- Shell: detect (and ignore) empty transitions
- Shell: review transition interface (bnc#710655)
- Fencing: Improved pcmk_host_map parsing
- Fencing: Consolidate pcmk_host_map into run_stonith_agent so that it
is applied consistently
- Shell: Update regression tests for new stonith metadata
- pengine: Correctly determine the state of multi-state resources with a
partial operation history
- Core: Cancelled and pending operations do not count as failed
- PE: Ensure restarts due to definition changes cause the start action
to be re-issued not probes
- Fencing: Prevent use-of-NULL by g_hash_table_lookup
- Shell: Install support for pssh
- PE: Support of monitor op with role="Stopped"
- Shell: improve logic for when to use the pager
- Shell: don't print error on Broken pipe
- cib: Record and display the originator details of CIB changes for
schema "pacemaker-1.2"
- crmd: Display the name and vlaue of the transient attribute(s) that
caused a new transition
- stonith: Correctly handle synchronous calls
- PE: A demote operation is implied by the stonith operation (lf#2606)
- PE: Group migration after failures and non-default utilization
policies (lf#2613, lf#2619)
- Shell: Include log file querying tools and "history" extension.
- Significant number of fixes for issues reported by Coverity.
- ais: Handle IPC error before checking for NULL data (bnc#702907)
- PE: save PE inputs again (bnc#703723)
- PE: Before migrating an utilization-using resource to a node, take off
the load which will no longer run there (lf#2599, bnc#695440)
- cib: Record and display the originator details of CIB changes
- cib: Update "cib-last-written" for live CIB on a config change
- cib: Filter out unnecessary "update-*" fields on digest calculating
- doc: man page for crm
- cib: Check the validation version before adding the originator details
of a CIB change
- PE: Don't log resource states unless debug is on
- Shell: implement -w,--wait option to wait for the transition to finish
- crmd: Store only the last and last failed operation in the CIB
- Core: Use pre-glib2-2.28 version of g_str_hash to ensure consistent
score calculation for clone instances
- Shell: repair template list command
- PE: Optimize the placement strategy and handle stickiness for
utilization-based placement correctly (bnc#669686)
- Shell: print error instead of warning and exit with 1 if a resource
cannot be deleted (bnc#680401)
- Tools: attrd - Do not initiate updates for deleted attributes
- Tools: Make progress when attrd_updater is called repeatedly within
the dampen interval but with the same value (lf#2528)
- tools: Do not pass bash specific switches to other SHELLs (lf#2556)
- crmd: Watch for config option changes from the CIB even if we're not
the DC (lf#2509)
- PE: Prevent shuffling by choosing the correct clone instance to stop
(lf#2574)
- PE: Avoid group restart when clone (re)starts on an unrelated node
(lf#2581, bnc#681595)
- PE: Set reasonable default limits for the number of input files saved
- tools: Do not recursively look for resource meta attributes (lf#2563)
- PE: Do not write out duplicated input files (lf#2582)
- cib: Use SIGPIPE for re-enabling disk writes after an error (lf#2229)
- crmd: Fail actions that were scheduled for a failed/fenced node
(lf#2559)
- Fencing: Clean up the metadata creation
- Stonith: Allow the fencing agent operations to be overridden on a
per-device basis (lf#2557)
- Tools: crm_resource - Indicate master/slave state when querying the
parent resource (lf#2477)
- Tools: crm_mon - Display the true number of configured resources
(lf#2549)
- RA: Fix handling of 'devices' in the HealthSMART agent
- Shell: print warning if start or stop interval is not zero
- Shell: Check for violations of uniqueness for instance parameters
during commit
- PE: Indicate resource failures even if on-fail="ignore"
- ais: Ensure we drain the corosync queue of messages when glib tells us
there is input
- crmd: Catch fence operations that claim to succeed but did not really
- Core: Use pre-glib2-2.28 version of g_str_hash to ensure consistent
score calculation for clone instances
- Shell: repair template list command
- Shell: node clearstate must flush LRM state too (bnc#653795)
- Upstream version cs: 9971ebba4494 (release 1.1.6)
-------------------------------------------------------------------
Fri Aug 5 06:09:30 UTC 2011 - tserong@novell.com

View File

@ -22,7 +22,7 @@
%define _libexecdir %{_libdir}
%endif
%define with_extra_warnings 0
%define with_debugging 0
%define with_debugging 1
%define with_ais_support 1
%define with_heartbeat_support 0
%define with_regression_tests 0
@ -38,8 +38,8 @@
Name: pacemaker
Summary: The Pacemaker scalable High-Availability cluster resource manager
Version: 1.1.5
Release: 6
Version: 1.1.6
Release: 0
License: GPLv2+ ; LGPLv2.1+
Url: http://www.clusterlabs.org
Group: Productivity/Clustering/HA
@ -47,6 +47,28 @@ Source: pacemaker.tar.bz2
Source2: %{doc_pkg}.tar.gz
Source100: pacemaker.rpmlintrc
Patch1: pacemaker-cts-startcmd.patch
Patch2: acl_fix_d44ff2711662.patch
Patch10: crm_history.patch
Patch11: crm_history_peinputs.patch
Patch12: crm_history_pssh.patch
Patch13: crm_history_1_d0359dca5dba.patch
Patch14: crm_history_2_29fd4f04c01f.patch
Patch15: crm_history_3_b3a014c0f85b.patch
Patch16: crm_history_4_a09974a06cdf.patch
Patch17: crm_history_5_c3068d22de72.patch
Patch18: crm_history-fix-hb_report-limit.patch
Patch19: crm_history_6_441f4448eba6.patch
Patch20: crm_history_7_3f3c348aaaed.patch
Patch21: crm_history_8_3681d3471fde.patch
Patch22: crm_path_bnc712605.patch
Patch23: crm_deleteunmanaged.patch
Patch24: crm_lrmsecrets_3a81b7eae666.patch
Patch25: crm_history_9_709ef91cfada.patch
Patch26: crm_history_10_d21f988a419c.patch
Patch27: crm_history_11_ccd0c1e1edf9.patch
Patch28: crm_pager_f77e52725f2d.patch
Patch29: crm_tickets_825cb3e79d7b.patch
Patch30: crm_site_9b07d41c73b4.patch
BuildRoot: %{_tmppath}/%{name}-%{version}-build
AutoReqProv: on
Conflicts: heartbeat < 3.0
@ -71,6 +93,7 @@ Requires: heartbeat
BuildRequires: libbz2-devel
Suggests: graphviz
Recommends: libdlm resource-agents
Recommends: python-pssh
%if 0%{?suse_version} > 1100
BuildRequires: docbook-xsl-stylesheets
%endif
@ -156,6 +179,28 @@ Authors:
###########################################################
%setup -a 2 -n pacemaker -q
%patch1 -p1
%patch2 -p1 -R
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1
%patch14 -p1
%patch15 -p1
%patch16 -p1
%patch17 -p1
%patch18 -p1
%patch19 -p1
%patch20 -p1
%patch21 -p1
%patch22 -p1
%patch23 -p1
%patch24 -p1
%patch25 -p1
%patch26 -p1
%patch27 -p1
%patch28 -p1
%patch29 -p1
%patch30 -p1
###########################################################
%build
@ -271,6 +316,7 @@ fi
%{_sbindir}/crm_resource
%{_sbindir}/crm_report
%{_sbindir}/crm_standby
%{_sbindir}/crm_ticket
%{_sbindir}/crm_verify
%{_sbindir}/crmadmin
%{_sbindir}/iso8601
@ -306,10 +352,14 @@ fi
%exclude %{pcmk_docdir}/AUTHORS
%exclude %{pcmk_docdir}/COPYING
%exclude %{pcmk_docdir}/COPYING.LIB
%exclude %{_libdir}/pkgconfig
%doc %{pcmk_docdir}
# %doc %{pcmk_docdir}/*.html
%doc %{_mandir}/man8/*.8*
%doc %{_mandir}/man7/*.7*
%doc %{pcmk_docdir}/README.hb2openais
%doc %{pcmk_docdir}/acls.txt
%doc %{pcmk_docdir}/crm_fencing.txt
%files -n libpacemaker3
%defattr(-,root,root)
@ -330,6 +380,7 @@ fi
%{_includedir}/pacemaker
# %{_includedir}/heartbeat/fencing
%{_libdir}/*.so
%{_libdir}/pkgconfig/*.pc
%if %with_gcov
%dir %{_var}/lib/pacemaker
/%{_var}/lib/pacemaker

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:daed563f1c16a25e00fa1253a85a7e5a1313a84bc15ff253d2365f5b67737844
size 23537762
oid sha256:148fdba7b16d81529ad9923bfe94241487e9a143753b785583d1850eaa902bb6
size 15452016