Tim Serong 2013-05-21 12:29:04 +00:00 committed by Git OBS Bridge
parent 10fe763ff4
commit 62ef318946
24 changed files with 2004 additions and 123 deletions

View File

@ -1,44 +0,0 @@
commit 543b4337b42f64fa46e2c01b7eb466dc4bd7665b
Author: Gao,Yan <ygao@suse.com>
Date: Fri Nov 11 14:03:56 2011 +0800
Medium: stonith: Expose IDs of stonith resources to stonith agents through "$CRM_meta_st_device_id" environment variable
Index: pacemaker/fencing/commands.c
===================================================================
--- pacemaker.orig/fencing/commands.c
+++ pacemaker/fencing/commands.c
@@ -376,6 +376,8 @@ static stonith_device_t *build_device_fr
device->agent = crm_element_value_copy(dev, "agent");
device->namespace = crm_element_value_copy(dev, "namespace");
device->params = xml2list(dev);
+
+ g_hash_table_insert(device->params, crm_strdup(CRM_META "_" F_STONITH_DEVICE), crm_strdup(device->id));
device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
/* TODO: Hook up priority */
Index: pacemaker/lib/fencing/st_client.c
===================================================================
--- pacemaker.orig/lib/fencing/st_client.c
+++ pacemaker/lib/fencing/st_client.c
@@ -558,6 +558,8 @@ run_stonith_agent(const char *agent, con
} else {
/* child */
+ const char *st_dev_id_key = CRM_META "_" F_STONITH_DEVICE;
+ const char *st_dev_id_value = NULL;
close(1);
/* coverity[leaked_handle] False positive */
@@ -577,6 +579,11 @@ run_stonith_agent(const char *agent, con
close(p_read_fd);
close(p_write_fd);
+ st_dev_id_value = g_hash_table_lookup(device_args, st_dev_id_key);
+ if (st_dev_id_value) {
+ setenv(st_dev_id_key, st_dev_id_value, 1);
+ }
+
execlp(agent, agent, NULL);
exit(EXIT_FAILURE);
}

View File

@ -0,0 +1,78 @@
commit 1e01228825eb8d4449edfbb1a1fa0c38fab4d5e6
Author: Gao,Yan <ygao@suse.com>
Date: Thu Sep 6 15:14:58 2012 +0800
Medium: stonith: Expose IDs of stonith resources to stonith agents through "$CRM_meta_st_device_id" environment variable
Index: pacemaker/fencing/commands.c
===================================================================
--- pacemaker.orig/fencing/commands.c
+++ pacemaker/fencing/commands.c
@@ -596,6 +596,7 @@ build_device_from_xml(xmlNode * msg)
device->id, device->on_target_actions);
}
+ g_hash_table_insert(device->params, strdup(CRM_META "_" F_STONITH_DEVICE), strdup(device->id));
device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
/* TODO: Hook up priority */
Index: pacemaker/lib/fencing/st_client.c
===================================================================
--- pacemaker.orig/lib/fencing/st_client.c
+++ pacemaker/lib/fencing/st_client.c
@@ -54,6 +54,7 @@ struct stonith_action_s {
char *action;
char *victim;
char *args;
+ char *dev_id;
int timeout;
int async;
void *userdata;
@@ -546,6 +547,7 @@ stonith_action_destroy(stonith_action_t
free(action->args);
free(action->action);
free(action->victim);
+ free(action->dev_id);
free(action);
}
@@ -573,6 +575,8 @@ stonith_action_create(const char *agent,
if (device_args) {
char buffer[512];
const char *value = NULL;
+ const char *st_dev_id_key = CRM_META "_" F_STONITH_DEVICE;
+ const char *st_dev_id_value = NULL;
snprintf(buffer, 511, "pcmk_%s_retries", _action);
value = g_hash_table_lookup(device_args, buffer);
@@ -580,6 +584,11 @@ stonith_action_create(const char *agent,
if (value) {
action->max_retries = atoi(value);
}
+
+ st_dev_id_value = g_hash_table_lookup(device_args, st_dev_id_key);
+ if (st_dev_id_value) {
+ action->dev_id = strdup(st_dev_id_value);
+ }
}
return action;
@@ -731,6 +740,7 @@ internal_stonith_action_execute(stonith_
if (!pid) {
/* child */
+ const char *st_dev_id_key = CRM_META "_" F_STONITH_DEVICE;
close(1);
/* coverity[leaked_handle] False positive */
@@ -750,6 +760,10 @@ internal_stonith_action_execute(stonith_
close(p_read_fd);
close(p_write_fd);
+ if (action->dev_id) {
+ setenv(st_dev_id_key, action->dev_id, 1);
+ }
+
/* keep retries from executing out of control */
if (is_retry) {
sleep(1);

View File

@ -0,0 +1,72 @@
commit c9765144c0c23808b37adaf56c3305e2e2167173
Author: Gao,Yan <ygao@suse.com>
Date: Mon Dec 17 16:09:28 2012 +0800
Low: fencing: Suppress logging for stonith monitor actions (bnc#792124)
Index: pacemaker/fencing/commands.c
===================================================================
--- pacemaker.orig/fencing/commands.c
+++ pacemaker/fencing/commands.c
@@ -1296,7 +1296,13 @@ log_operation(async_command_t * cmd, int
/* Logging the whole string confuses syslog when the string is xml */
char *prefix = g_strdup_printf("%s:%d", cmd->device, pid);
- crm_log_output(rc == 0 ? LOG_INFO : LOG_WARNING, prefix, output);
+ int success_log_level = LOG_INFO;
+
+ if (safe_str_eq(cmd->action, "monitor") ||
+ safe_str_eq(cmd->action, "status")) {
+ success_log_level = LOG_DEBUG;
+ }
+ crm_log_output(rc==0?success_log_level:LOG_WARNING, prefix, output);
g_free(prefix);
}
}
@@ -1954,6 +1960,7 @@ stonith_command(crm_client_t * client, u
* by 0x40AD4F: stonith_command (commands.c:1891)
*
*/
+ int error_log_level = LOG_INFO;
if (get_xpath_object("//" T_STONITH_REPLY, request, LOG_DEBUG_3)) {
is_reply = TRUE;
@@ -1973,7 +1980,16 @@ stonith_command(crm_client_t * client, u
rc = handle_request(client, id, flags, request, remote_peer);
}
- do_crm_log_unlikely(rc > 0 ? LOG_DEBUG : LOG_INFO, "Processed %s%s from %s: %s (%d)", op,
+ if (rc == -EINPROGRESS) {
+ xmlNode *op = get_xpath_object("//@"F_STONITH_ACTION, request, LOG_DEBUG_3);
+ const char *action = crm_element_value(op, F_STONITH_ACTION);
+
+ if (safe_str_eq(action, "monitor") ||
+ safe_str_eq(action, "status")) {
+ error_log_level = LOG_DEBUG;
+ }
+ }
+ do_crm_log_unlikely(rc > 0 ? LOG_DEBUG : error_log_level, "Processed %s%s from %s: %s (%d)", op,
is_reply ? " reply" : "", client ? client->name : remote_peer,
rc > 0 ? "" : pcmk_strerror(rc), rc);
Index: pacemaker/lib/fencing/st_client.c
===================================================================
--- pacemaker.orig/lib/fencing/st_client.c
+++ pacemaker/lib/fencing/st_client.c
@@ -561,8 +561,15 @@ stonith_action_create(const char *agent,
{
stonith_action_t *action;
+ int log_level = LOG_INFO;
+
+ if (safe_str_eq(_action, "monitor") ||
+ safe_str_eq(_action, "status")) {
+ log_level = LOG_DEBUG;
+ }
+
action = calloc(1, sizeof(stonith_action_t));
- crm_info("Initiating action %s for agent %s (target=%s)", _action, agent, victim);
+ do_crm_log(log_level, "Initiating action %s for agent %s (target=%s)", _action, agent, victim);
action->args = make_args(_action, victim, victim_nodeid, device_args, port_map);
action->agent = strdup(agent);
action->action = strdup(_action);

View File

@ -0,0 +1,84 @@
commit e97f7eb36e1d2c3a8b241ba52e98b707da1eccf5
Author: Gao,Yan <ygao@suse.com>
Date: Mon Apr 1 14:19:47 2013 +0800
Log: tools: crm_mon - Save relevant cib XML into /tmp/cmon.* files if refreshing fails (bnc#800323)
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index da778de..c6af01b 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -85,6 +85,8 @@ const char *external_recipient = NULL;
cib_t *cib = NULL;
stonith_t *st = NULL;
xmlNode *current_cib = NULL;
+xmlNode *orig_cib = NULL;
+xmlNode *diff_cib = NULL;
gboolean one_shot = FALSE;
gboolean has_warnings = FALSE;
@@ -2130,10 +2132,12 @@ crm_diff_update(const char *event, xmlNode * msg)
static bool stale = FALSE;
print_dot();
+ diff_cib = copy_xml(msg);
if (current_cib != NULL) {
xmlNode *cib_last = current_cib;
+ orig_cib = copy_xml(cib_last);
current_cib = NULL;
rc = cib_apply_patch_event(msg, cib_last, &current_cib, LOG_DEBUG);
@@ -2196,6 +2200,38 @@ mon_refresh_display(gpointer user_data)
last_refresh = time(NULL);
if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) {
+ char *tmp_prefix = tempnam("/tmp", "cmon.");
+ char *real_filename = NULL;
+ char *orig_filename = NULL;
+ char *new_filename = NULL;
+ char *diff_filename = NULL;
+ xmlNode *real_cib = get_cib_copy(cib);
+
+ real_filename = g_strdup_printf("%s.real", tmp_prefix);
+ write_xml_file(real_cib, real_filename, FALSE);
+ free(real_filename);
+ free_xml(real_cib);
+ real_cib = NULL;
+
+ orig_filename = g_strdup_printf("%s.orig", tmp_prefix);
+ write_xml_file(orig_cib, orig_filename, FALSE);
+ free(orig_filename);
+ free_xml(orig_cib);
+ orig_cib = NULL;
+
+ new_filename = g_strdup_printf("%s.new", tmp_prefix);
+ write_xml_file(current_cib, new_filename, FALSE);
+ free(new_filename);
+
+ diff_filename = g_strdup_printf("%s.diff", tmp_prefix);
+ write_xml_file(diff_cib, diff_filename, FALSE);
+ free(diff_filename);
+ free_xml(diff_cib);
+ diff_cib = NULL;
+
+ print_as("Saved relevant XML into /tmp/%s* files", tmp_prefix);
+ free(tmp_prefix);
+
if (cib) {
cib->cmds->signoff(cib);
}
@@ -2207,6 +2243,12 @@ mon_refresh_display(gpointer user_data)
return FALSE;
}
+ free_xml(orig_cib);
+ orig_cib = NULL;
+
+ free_xml(diff_cib);
+ diff_cib = NULL;
+
set_working_set_defaults(&data_set);
data_set.input = cib_copy;
cluster_status(&data_set);

View File

@ -0,0 +1,76 @@
commit b802c689cdad03a185c5c6689741e2a4db8d5924
Author: Gao,Yan <ygao@suse.com>
Date: Thu Mar 14 09:41:53 2013 +0800
Log: Change some messages to notice level (bnc#806256)
diff --git a/crmd/callbacks.c b/crmd/callbacks.c
index a7830d3..d80cff0 100644
--- a/crmd/callbacks.c
+++ b/crmd/callbacks.c
@@ -179,7 +179,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d
const char *task = crm_element_value(down->xml, XML_LRM_ATTR_TASK);
if (alive && safe_str_eq(task, CRM_OP_FENCE)) {
- crm_info("Node return implies stonith of %s (action %d) completed", node->uname,
+ crm_notice("Node return implies stonith of %s (action %d) completed", node->uname,
down->id);
erase_status_tag(node->uname, XML_CIB_TAG_LRM, cib_scope_local);
erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local);
diff --git a/crmd/membership.c b/crmd/membership.c
index e435e21..b3c34ae 100644
--- a/crmd/membership.c
+++ b/crmd/membership.c
@@ -322,7 +322,7 @@ crm_update_quorum(gboolean quorum, gboolean force_update)
set_uuid(update, XML_ATTR_DC_UUID, fsa_our_uname);
fsa_cib_update(XML_TAG_CIB, update, call_options, call_id, NULL);
- crm_debug("Updating quorum status to %s (call=%d)", quorum ? "true" : "false", call_id);
+ crm_notice("Updating quorum status to %s (call=%d)", quorum ? "true" : "false", call_id);
fsa_register_cib_callback(call_id, FALSE, NULL, cib_quorum_update_complete);
free_xml(update);
}
diff --git a/crmd/tengine.c b/crmd/tengine.c
index 9ff458c..f22e87f 100644
--- a/crmd/tengine.c
+++ b/crmd/tengine.c
@@ -208,7 +208,7 @@ do_te_invoke(long long action,
destroy_graph(transition_graph);
transition_graph = unpack_graph(graph_data, graph_input);
CRM_CHECK(transition_graph != NULL, transition_graph = create_blank_graph(); return);
- crm_info("Processing graph %d (ref=%s) derived from %s", transition_graph->id, ref,
+ crm_notice("Processing graph %d (ref=%s) derived from %s", transition_graph->id, ref,
graph_input);
value = crm_element_value(graph_data, "failed-stop-offset");
diff --git a/fencing/commands.c b/fencing/commands.c
index 026371a..d1ad657 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -1098,9 +1098,11 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc
}
if (safe_str_eq(host, alias)) {
- crm_info("%s can%s fence %s: %s", dev->id, can ? "" : " not", host, check_type);
+ do_crm_log(can ? LOG_INFO : LOG_NOTICE,
+ "%s can%s fence %s: %s", dev->id, can ? "" : " not", host, check_type);
} else {
- crm_info("%s can%s fence %s (aka. '%s'): %s", dev->id, can ? "" : " not", host, alias,
+ do_crm_log(can ? LOG_INFO : LOG_NOTICE,
+ "%s can%s fence %s (aka. '%s'): %s", dev->id, can ? "" : " not", host, alias,
check_type);
}
diff --git a/fencing/remote.c b/fencing/remote.c
index 15a52b7..98dc1f9 100644
--- a/fencing/remote.c
+++ b/fencing/remote.c
@@ -340,7 +340,7 @@ remote_op_timeout(gpointer userdata)
return FALSE;
}
- crm_debug("Action %s (%s) for %s (%s) timed out",
+ crm_notice("Action %s (%s) for %s (%s) timed out",
op->action, op->id, op->target, op->client_name);
op->state = st_failed;
remote_op_done(op, NULL, -ETIME, FALSE);

View File

@ -0,0 +1,129 @@
diff --git a/lib/common/xml.c b/lib/common/xml.c
index b6df79f..33a9abf 100644
--- a/lib/common/xml.c
+++ b/lib/common/xml.c
@@ -2275,6 +2275,7 @@ calculate_xml_digest_v1(xmlNode * input, gboolean sort, gboolean ignored)
return digest;
}
+#if 0
static char *
calculate_xml_digest_v2(xmlNode * source, gboolean do_filter)
{
@@ -2324,6 +2325,116 @@ calculate_xml_digest_v2(xmlNode * source, gboolean do_filter)
crm_trace("End digest");
return digest;
}
+#endif
+
+static void
+filter_xml(xmlNode *data, filter_t *filter, int filter_len, gboolean recursive)
+{
+ int lpc = 0;
+ xmlNode *child = NULL;
+
+ for(lpc = 0; lpc < filter_len; lpc++) {
+ xml_remove_prop(data, filter[lpc].string);
+ }
+
+ if(recursive == FALSE || filter_len == 0) {
+ return;
+ }
+
+ for(child = __xml_first_child(data); child != NULL; child = __xml_next(child)) {
+ filter_xml(child, filter, filter_len, recursive);
+ }
+}
+
+static char *
+calculate_xml_digest_v2(xmlNode *source, gboolean do_filter)
+{
+ char *digest = NULL;
+
+ int buffer_len = 0;
+ int filter_size = DIMOF(filter);
+
+ xmlDoc *doc = NULL;
+ xmlNode *copy = NULL;
+ xmlNode *input = source;
+ xmlBuffer *xml_buffer = NULL;
+ static struct qb_log_callsite *digest_cs = NULL;
+
+ crm_trace("Begin digest");
+ if(do_filter && BEST_EFFORT_STATUS) {
+ /* Exclude the status calculation from the digest
+ *
+ * This doesn't mean it wont be sync'd, we just wont be paranoid
+ * about it being an _exact_ copy
+ *
+ * We don't need it to be exact, since we throw it away and regenerate
+ * from our peers whenever a new DC is elected anyway
+ *
+ * Importantly, this reduces the amount of XML to copy+export as
+ * well as the amount of data for MD5 needs to operate on
+ */
+ xmlNode *child = NULL;
+ xmlAttrPtr pIter = NULL;
+ copy = create_xml_node(NULL, XML_TAG_CIB);
+ for(pIter = crm_first_attr(input); pIter != NULL; pIter = pIter->next) {
+ const char *p_name = (const char *)pIter->name;
+ const char *p_value = crm_attr_value(pIter);
+
+ xmlSetProp(copy, (const xmlChar*)p_name, (const xmlChar*)p_value);
+ }
+
+ xml_remove_prop(copy, XML_ATTR_ORIGIN);
+ xml_remove_prop(copy, XML_CIB_ATTR_WRITTEN);
+
+ /* We just did all the filtering */
+
+ for(child = __xml_first_child(input); child != NULL; child = __xml_next(child)) {
+ if(safe_str_neq(crm_element_name(child), XML_CIB_TAG_STATUS)) {
+ add_node_copy(copy, child);
+ }
+ }
+
+ } else if(do_filter) {
+ copy = copy_xml(input);
+ filter_xml(copy, filter, filter_size, TRUE);
+ input = copy;
+ }
+
+ crm_trace("Dumping");
+ doc = getDocPtr(input);
+ xml_buffer = xmlBufferCreate();
+
+ CRM_ASSERT(xml_buffer != NULL);
+ CRM_CHECK(doc != NULL, return NULL); /* doc will only be NULL if an_xml_node is */
+
+ buffer_len = xmlNodeDump(xml_buffer, doc, input, 0, FALSE);
+ CRM_CHECK(xml_buffer->content != NULL && buffer_len > 0, goto done);
+
+ digest = crm_md5sum((char *)xml_buffer->content);
+
+ if(digest_cs == NULL) {
+ digest_cs = qb_log_callsite_get(
+ __func__, __FILE__, "cib-digest", LOG_TRACE, __LINE__,
+ crm_trace_nonlog);
+ }
+ if (digest_cs && digest_cs->targets) {
+ char *trace_file = crm_concat("/tmp/cib-digest", digest, '-');
+ crm_trace("Saving %s.%s.%s to %s",
+ crm_element_value(input, XML_ATTR_GENERATION_ADMIN),
+ crm_element_value(input, XML_ATTR_GENERATION),
+ crm_element_value(input, XML_ATTR_NUMUPDATES),
+ trace_file);
+ save_xml_to_file(source, "digest input", trace_file);
+ free(trace_file);
+ }
+
+ done:
+ xmlBufferFree(xml_buffer);
+ free_xml(copy);
+
+ crm_trace("End digest");
+ return digest;
+}
char *
calculate_on_disk_digest(xmlNode * input)

View File

@ -0,0 +1,28 @@
commit 907b91e7154d57f091af547aa2179e1433bc69eb
Author: Gao,Yan <ygao@suse.com>
Date: Thu Apr 18 16:00:02 2013 +0800
Log: fencing: Drop the severity of the messages on registering a stonith device
diff --git a/fencing/commands.c b/fencing/commands.c
index a58c880..c7d9d38 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -812,7 +812,7 @@ stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
stonith_device_t *device = build_device_from_xml(msg);
if ((dup = device_has_duplicate(device))) {
- crm_notice("Device '%s' already existed in device list (%d active devices)", device->id,
+ crm_info("Device '%s' already existed in device list (%d active devices)", device->id,
g_hash_table_size(device_list));
free_device(device);
device = dup;
@@ -832,7 +832,7 @@ stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
}
g_hash_table_replace(device_list, device->id, device);
- crm_notice("Added '%s' to the device list (%d active devices)", device->id,
+ crm_info("Added '%s' to the device list (%d active devices)", device->id,
g_hash_table_size(device_list));
}
if (desc) {

View File

@ -0,0 +1,13 @@
diff --git a/fencing/main.c b/fencing/main.c
index 1a16b48..90787f8 100644
--- a/fencing/main.c
+++ b/fencing/main.c
@@ -761,7 +761,7 @@ update_cib_cache_cb(const char *event, xmlNode * msg)
xmlNode *cib_last = local_cib;
local_cib = NULL;
- rc = (*cib_apply_patch_event)(msg, cib_last, &local_cib, LOG_DEBUG);
+ /*rc = (*cib_apply_patch_event)(msg, cib_last, &local_cib, LOG_DEBUG);*/
free_xml(cib_last);
switch (rc) {

View File

@ -0,0 +1,26 @@
diff --git a/cib/io.c b/cib/io.c
index 26f0aea..1700967 100644
--- a/cib/io.c
+++ b/cib/io.c
@@ -681,6 +681,8 @@ write_cib_contents(gpointer p)
}
}
+ strip_text_nodes(cib_local);
+
tmp_cib = g_strdup_printf("%s/cib.XXXXXX", cib_root);
tmp_digest = g_strdup_printf("%s/cib.XXXXXX", cib_root);
diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c
index 6353d1d..2c21f02 100644
--- a/lib/cib/cib_utils.c
+++ b/lib/cib/cib_utils.c
@@ -491,7 +491,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
}
crm_trace("Massaging CIB contents");
- strip_text_nodes(scratch);
+ /*strip_text_nodes(scratch);*/
fix_plus_plus_recursive(scratch);
/* The diff calculation in cib_config_changed() accounts for 25% of the

View File

@ -4,26 +4,25 @@ Date: Fri Nov 25 13:11:15 2011 +0100
Medium: RA: add NodeUtilization RA
diff --git a/extra/resources/Makefile.am b/extra/resources/Makefile.am
index bc35401..e38d64d 100644
--- a/extra/resources/Makefile.am
+++ b/extra/resources/Makefile.am
@@ -32,7 +32,8 @@ ocf_SCRIPTS = ClusterMon \
pingd \
Index: pacemaker/extra/resources/Makefile.am
===================================================================
--- pacemaker.orig/extra/resources/Makefile.am
+++ pacemaker/extra/resources/Makefile.am
@@ -33,7 +33,8 @@ ocf_SCRIPTS = ClusterMon \
Stateful \
SysInfo \
- SystemHealth
+ SystemHealth \
SystemHealth \
- remote
+ remote \
+ NodeUtilization
if BUILD_XML_HELP
man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7)
diff --git a/extra/resources/NodeUtilization b/extra/resources/NodeUtilization
new file mode 100644
index 0000000..a2a17c6
Index: pacemaker/extra/resources/NodeUtilization
===================================================================
--- /dev/null
+++ b/extra/resources/NodeUtilization
@@ -0,0 +1,229 @@
+++ pacemaker/extra/resources/NodeUtilization
@@ -0,0 +1,230 @@
+#!/bin/sh
+#
+#
@ -54,8 +53,9 @@ index 0000000..a2a17c6
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs}
+. ${OCF_FUNCTIONS}
+: ${__OCF_ACTION=$1}
+
+#######################################################################
+

View File

@ -0,0 +1,13 @@
diff --git a/tools/Makefile.am b/tools/Makefile.am
index e8d0587..89b0b07 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -33,7 +33,7 @@ pcmk_DATA = report.common report.collector
sbin_SCRIPTS = crm_report crm_standby crm_master crm_failcount
if BUILD_CIBSECRETS
-sbin_SCRIPTS += cibsecret
+#sbin_SCRIPTS += cibsecret
endif
EXTRA_DIST = $(sbin_SCRIPTS)

View File

@ -0,0 +1,518 @@
commit 3738e2d5dd80146afb0427f96df786f8fa7f09b3
Author: Gao,Yan <ygao@suse.com>
Date: Mon Jan 7 03:01:40 2013 +0800
High: PE: cl#5130 - Improve the placement for colocated utilization resources
diff --git a/pengine/group.c b/pengine/group.c
index 823ea08..88f6a69 100644
--- a/pengine/group.c
+++ b/pengine/group.c
@@ -515,3 +515,62 @@ void
group_append_meta(resource_t * rsc, xmlNode * xml)
{
}
+
+GListPtr
+group_find_colocated_rscs(GListPtr colocated_rscs, resource_t * rsc,
+ resource_t * from_rsc, resource_t * orig_rsc)
+{
+ group_variant_data_t *group_data = NULL;
+
+ get_group_variant_data(group_data, rsc);
+ if (group_data->colocated ||
+ (rsc->parent &&
+ (rsc->parent->variant == pe_clone || rsc->parent->variant == pe_master))) {
+ GListPtr gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ resource_t *child_rsc = (resource_t *) gIter->data;
+
+ colocated_rscs = find_colocated_rscs(colocated_rscs, child_rsc, from_rsc, orig_rsc);
+ }
+
+ } else {
+ if (group_data->first_child) {
+ colocated_rscs = find_colocated_rscs(colocated_rscs, group_data->first_child, from_rsc, orig_rsc);
+ }
+ }
+
+ colocated_rscs = find_colocated_rscs(colocated_rscs, rsc, from_rsc, orig_rsc);
+
+ return colocated_rscs;
+}
+
+void
+group_unallocated_utilization_add(GHashTable * all_utilization, resource_t * rsc,
+ GListPtr all_rscs)
+{
+ group_variant_data_t *group_data = NULL;
+
+ get_group_variant_data(group_data, rsc);
+ if (group_data->colocated ||
+ (rsc->parent &&
+ (rsc->parent->variant == pe_clone || rsc->parent->variant == pe_master))) {
+ GListPtr gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ resource_t *child_rsc = (resource_t *) gIter->data;
+
+ if (is_set(child_rsc->flags, pe_rsc_provisional) &&
+ g_list_find(all_rscs, child_rsc) == FALSE) {
+ calculate_utilization(all_utilization, child_rsc->utilization, TRUE);
+ }
+ }
+
+ } else {
+ if (group_data->first_child &&
+ is_set(group_data->first_child->flags, pe_rsc_provisional) &&
+ g_list_find(all_rscs, group_data->first_child) == FALSE) {
+ calculate_utilization(all_utilization, group_data->first_child->utilization, TRUE);
+ }
+ }
+}
diff --git a/pengine/native.c b/pengine/native.c
index 47c12b4..cacc226 100644
--- a/pengine/native.c
+++ b/pengine/native.c
@@ -79,7 +79,7 @@ gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gb
struct capacity_data {
node_t *node;
- resource_t *rsc;
+ const char *rsc_id;
gboolean is_enough;
};
@@ -94,27 +94,119 @@ check_capacity(gpointer key, gpointer value, gpointer user_data)
remaining = crm_parse_int(g_hash_table_lookup(data->node->details->utilization, key), "0");
if (required > remaining) {
- pe_rsc_debug(data->rsc,
- "Node %s has no enough %s for resource %s: required=%d remaining=%d",
- data->node->details->uname, (char *)key, data->rsc->id, required, remaining);
+ crm_debug("Node %s has no enough %s for %s: required=%d remaining=%d",
+ data->node->details->uname, (char *)key, data->rsc_id, required, remaining);
data->is_enough = FALSE;
}
}
static gboolean
-have_enough_capacity(node_t * node, resource_t * rsc)
+have_enough_capacity(node_t * node, const char * rsc_id, GHashTable * utilization)
{
struct capacity_data data;
data.node = node;
- data.rsc = rsc;
+ data.rsc_id = rsc_id;
data.is_enough = TRUE;
- g_hash_table_foreach(rsc->utilization, check_capacity, &data);
+ g_hash_table_foreach(utilization, check_capacity, &data);
return data.is_enough;
}
+static GHashTable *
+sum_unallocated_utilization(resource_t * rsc, GListPtr colocated_rscs)
+{
+ GListPtr gIter = NULL;
+ GListPtr all_rscs = NULL;
+ GHashTable *all_utilization = g_hash_table_new_full(crm_str_hash, g_str_equal,
+ g_hash_destroy_str, g_hash_destroy_str);
+
+ all_rscs = g_list_copy(colocated_rscs);
+ if (g_list_find(all_rscs, rsc) == FALSE) {
+ all_rscs = g_list_append(all_rscs, rsc);
+ }
+
+ for (gIter = all_rscs; gIter != NULL; gIter = gIter->next) {
+ resource_t *listed_rsc = (resource_t *) gIter->data;
+
+ if(is_set(listed_rsc->flags, pe_rsc_provisional) == FALSE) {
+ continue;
+ }
+
+ pe_rsc_trace(rsc, "%s: Processing unallocated colocated %s", rsc->id, listed_rsc->id);
+
+ if (listed_rsc->variant == pe_native) {
+ pe_rsc_trace(rsc, "%s: Adding %s as colocated utilization", rsc->id, listed_rsc->id);
+ calculate_utilization(all_utilization, listed_rsc->utilization, TRUE);
+
+ } else if (listed_rsc->variant == pe_group) {
+ pe_rsc_trace(rsc, "%s: Adding %s as colocated utilization", rsc->id, listed_rsc->id);
+ group_unallocated_utilization_add(all_utilization, listed_rsc, all_rscs);
+
+ } else if (listed_rsc->variant == pe_clone ||
+ listed_rsc->variant == pe_master) {
+ GListPtr gIter1 = NULL;
+ gboolean existing = FALSE;
+ resource_t *first_child = (resource_t *) listed_rsc->children->data;
+
+ /* Check if there's any child already existing in the list */
+ gIter1 = listed_rsc->children;
+ for (; gIter1 != NULL; gIter1 = gIter1->next) {
+ resource_t *child = (resource_t *) gIter1->data;
+
+ if (g_list_find(all_rscs, child)) {
+ existing = TRUE;
+ break;
+ }
+ }
+
+ if (existing) {
+ continue;
+
+ } else if (first_child->variant == pe_native) {
+ pe_rsc_trace(rsc, "%s: Adding %s as colocated utilization",
+ rsc->id, ID(first_child->xml));
+ calculate_utilization(all_utilization, first_child->utilization, TRUE);
+
+ } else if (first_child->variant == pe_group) {
+ GListPtr gIter2 = NULL;
+ resource_t *match_group = NULL;
+
+ /* Check if there's any grandchild already existing in the list */
+ gIter2 = all_rscs;
+ for (; gIter2 != NULL; gIter2 = gIter2->next) {
+ resource_t *listed_native = (resource_t *) gIter2->data;
+
+ if (listed_native->variant == pe_native &&
+ listed_native->parent &&
+ listed_native->parent->parent == listed_rsc) {
+ match_group = listed_native->parent;
+ break;
+ }
+
+ if (match_group) {
+ if(is_set(match_group->flags, pe_rsc_provisional)) {
+ pe_rsc_trace(rsc, "%s: Adding %s as colocated utilization",
+ rsc->id, match_group->id);
+ group_unallocated_utilization_add(all_utilization, match_group, all_rscs);
+ }
+
+ } else {
+ pe_rsc_trace(rsc, "%s: Adding %s as colocated utilization",
+ rsc->id, ID(first_child->xml));
+ group_unallocated_utilization_add(all_utilization, first_child, all_rscs);
+ }
+ }
+ }
+ }
+ }
+
+ g_list_free(all_rscs);
+
+ return all_utilization;
+}
+
static gboolean
native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
{
@@ -136,15 +228,63 @@ native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_se
if (safe_str_neq(data_set->placement_strategy, "default")) {
GListPtr gIter = NULL;
+ GListPtr colocated_rscs = NULL;
+ gboolean any_capable = FALSE;
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- node_t *node = (node_t *) gIter->data;
+ colocated_rscs = find_colocated_rscs(colocated_rscs, rsc, NULL, rsc);
+ if (colocated_rscs) {
+ GHashTable *unallocated_utilization = NULL;
+ char *rscs_id = crm_concat(rsc->id, "and its colocated resources", ' ');
+ node_t *most_capable_node = NULL;
+
+ unallocated_utilization = sum_unallocated_utilization(rsc, colocated_rscs);
+
+ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ node_t *node = (node_t *) gIter->data;
+
+ if (have_enough_capacity(node, rscs_id, unallocated_utilization)) {
+ any_capable = TRUE;
+ }
+
+ if (most_capable_node == NULL ||
+ compare_capacity(node, most_capable_node) < 0) {
+ /* < 0 means 'node' is more capable */
+ most_capable_node = node;
+ }
+ }
+
+ if (any_capable) {
+ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ node_t *node = (node_t *) gIter->data;
+
+ if (have_enough_capacity(node, rscs_id, unallocated_utilization) == FALSE) {
+ pe_rsc_debug(rsc, "Resource %s and its colocated resources cannot be allocated to node %s: no enough capacity",
+ rsc->id, node->details->uname);
+ resource_location(rsc, node, -INFINITY, "__limit_utilization__", data_set);
+ }
+ }
+
+ } else if (prefer == NULL) {
+ prefer = most_capable_node;
+ }
+
+ if (unallocated_utilization) {
+ g_hash_table_destroy(unallocated_utilization);
+ }
- if (have_enough_capacity(node, rsc) == FALSE) {
- pe_rsc_debug(rsc,
- "Resource %s cannot be allocated to node %s: none of enough capacity",
- rsc->id, node->details->uname);
- resource_location(rsc, node, -INFINITY, "__limit_utilization_", data_set);
+ g_list_free(colocated_rscs);
+ free(rscs_id);
+ }
+
+ if (any_capable == FALSE) {
+ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ node_t *node = (node_t *) gIter->data;
+
+ if (have_enough_capacity(node, rsc->id, rsc->utilization) == FALSE) {
+ pe_rsc_debug(rsc, "Resource %s cannot be allocated to node %s: no enough capacity",
+ rsc->id, node->details->uname);
+ resource_location(rsc, node, -INFINITY, "__limit_utilization__", data_set);
+ }
}
}
dump_node_scores(alloc_details, rsc, "Post-utilization", rsc->allowed_nodes);
@@ -1279,14 +1419,14 @@ enum filter_colocation_res {
static enum filter_colocation_res
filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh,
- rsc_colocation_t * constraint)
+ rsc_colocation_t * constraint, gboolean preview)
{
if (constraint->score == 0) {
return influence_nothing;
}
/* rh side must be allocated before we can process constraint */
- if (is_set(rsc_rh->flags, pe_rsc_provisional)) {
+ if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
return influence_nothing;
}
@@ -1462,7 +1602,7 @@ native_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocatio
{
enum filter_colocation_res filter_results;
- filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint);
+ filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
switch (filter_results) {
case influence_rsc_priority:
@@ -3173,3 +3313,86 @@ native_append_meta(resource_t * rsc, xmlNode * xml)
free(name);
}
}
+
+static GListPtr
+colocated_rscs_append(GListPtr colocated_rscs, resource_t * rsc,
+ resource_t * from_rsc, resource_t * orig_rsc)
+{
+ if (rsc == NULL) {
+ return colocated_rscs;
+
+ /* Avoid searching loop */
+ } else if (rsc == orig_rsc) {
+ return colocated_rscs;
+
+ } else if (g_list_find(colocated_rscs, rsc)) {
+ return colocated_rscs;
+ }
+
+ crm_trace("%s: %s is supposed to be colocated with %s", orig_rsc->id, rsc->id, orig_rsc->id);
+ colocated_rscs = g_list_append(colocated_rscs, rsc);
+
+ if (rsc->variant == pe_group) {
+ /* Need to use group_variant_data */
+ colocated_rscs = group_find_colocated_rscs(colocated_rscs, rsc, from_rsc, orig_rsc);
+
+ } else {
+ colocated_rscs = find_colocated_rscs(colocated_rscs, rsc, from_rsc, orig_rsc);
+ }
+
+ return colocated_rscs;
+}
+
+GListPtr
+find_colocated_rscs(GListPtr colocated_rscs, resource_t * rsc,
+ resource_t * from_rsc, resource_t * orig_rsc)
+{
+ GListPtr gIter = NULL;
+
+ for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
+ rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
+ resource_t *rsc_rh = constraint->rsc_rh;
+
+ /* Avoid going back */
+ if (from_rsc && rsc_rh == from_rsc) {
+ continue;
+ }
+
+ /* Break colocation loop */
+ if (rsc_rh == orig_rsc) {
+ continue;
+ }
+
+ if (constraint->score == INFINITY &&
+ filter_colocation_constraint(rsc, rsc_rh, constraint, TRUE) == influence_rsc_location) {
+ colocated_rscs = colocated_rscs_append(colocated_rscs, rsc_rh, rsc, orig_rsc);
+ }
+ }
+
+ for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
+ rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
+ resource_t *rsc_lh = constraint->rsc_lh;
+
+ /* Avoid going back */
+ if (from_rsc && rsc_lh == from_rsc) {
+ continue;
+ }
+
+ /* Break colocation loop */
+ if (rsc_lh == orig_rsc) {
+ continue;
+ }
+
+ if (rsc_lh->variant <= pe_group && rsc->variant > pe_group) {
+ /* We do not know if rsc_lh will be colocated with orig_rsc in this case */
+ continue;
+ }
+
+ if (constraint->score == INFINITY &&
+ filter_colocation_constraint(rsc_lh, rsc, constraint, TRUE) == influence_rsc_location) {
+ colocated_rscs = colocated_rscs_append(colocated_rscs, rsc_lh, rsc, orig_rsc);
+ }
+ }
+
+ return colocated_rscs;
+}
diff --git a/pengine/utils.c b/pengine/utils.c
index abd416d..d8d8b81 100644
--- a/pengine/utils.c
+++ b/pengine/utils.c
@@ -163,7 +163,7 @@ do_compare_capacity2(gpointer key, gpointer value, gpointer user_data)
/* rc < 0 if 'node1' has more capacity remaining
* rc > 0 if 'node1' has less capacity remaining
*/
-static int
+int
compare_capacity(const node_t * node1, const node_t * node2)
{
struct compare_data data;
@@ -268,44 +268,41 @@ sort_node_weight(gconstpointer a, gconstpointer b, gpointer data)
}
struct calculate_data {
- node_t *node;
- gboolean allocate;
+ GHashTable *current_utilization;
+ gboolean plus;
};
static void
do_calculate_utilization(gpointer key, gpointer value, gpointer user_data)
{
- const char *capacity = NULL;
- char *remain_capacity = NULL;
+ const char *current = NULL;
+ char *result = NULL;
struct calculate_data *data = user_data;
- capacity = g_hash_table_lookup(data->node->details->utilization, key);
- if (capacity) {
- if (data->allocate) {
- remain_capacity = crm_itoa(crm_parse_int(capacity, "0") - crm_parse_int(value, "0"));
- } else {
- remain_capacity = crm_itoa(crm_parse_int(capacity, "0") + crm_parse_int(value, "0"));
- }
- g_hash_table_replace(data->node->details->utilization, strdup(key), remain_capacity);
+ current = g_hash_table_lookup(data->current_utilization, key);
+ if (data->plus) {
+ result = crm_itoa(crm_parse_int(current, "0") + crm_parse_int(value, "0"));
+ g_hash_table_replace(data->current_utilization, strdup(key), result);
+
+ } else if (current) {
+ result = crm_itoa(crm_parse_int(current, "0") - crm_parse_int(value, "0"));
+ g_hash_table_replace(data->current_utilization, strdup(key), result);
}
}
-/* Specify 'allocate' to TRUE when allocating
- * Otherwise to FALSE when deallocating
+/* Specify 'plus' to FALSE when allocating
+ * Otherwise to TRUE when deallocating
*/
-static void
-calculate_utilization(node_t * node, resource_t * rsc, gboolean allocate)
+void
+calculate_utilization(GHashTable * current_utilization,
+ GHashTable * utilization, gboolean plus)
{
struct calculate_data data;
- data.node = node;
- data.allocate = allocate;
+ data.current_utilization = current_utilization;
+ data.plus = plus;
- g_hash_table_foreach(rsc->utilization, do_calculate_utilization, &data);
-
- if (allocate) {
- dump_rsc_utilization(show_utilization ? 0 : utilization_log_level, __FUNCTION__, rsc, node);
- }
+ g_hash_table_foreach(utilization, do_calculate_utilization, &data);
}
void
@@ -321,7 +318,7 @@ native_deallocate(resource_t * rsc)
old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, rsc);
old->details->num_resources--;
/* old->count--; */
- calculate_utilization(old, rsc, FALSE);
+ calculate_utilization(old->details->utilization, rsc->utilization, TRUE);
free(old);
}
}
@@ -388,7 +385,9 @@ native_assign_node(resource_t * rsc, GListPtr nodes, node_t * chosen, gboolean f
chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc, rsc);
chosen->details->num_resources++;
chosen->count++;
- calculate_utilization(chosen, rsc, TRUE);
+ calculate_utilization(chosen->details->utilization, rsc->utilization, FALSE);
+ dump_rsc_utilization(show_utilization ? 0 : utilization_log_level, __FUNCTION__, rsc, chosen);
+
return TRUE;
}
diff --git a/pengine/utils.h b/pengine/utils.h
index 1939d40..f8c6b6d 100644
--- a/pengine/utils.h
+++ b/pengine/utils.h
@@ -55,6 +55,19 @@ extern gboolean can_run_any(GHashTable * nodes);
extern resource_t *find_compatible_child(resource_t * local_child, resource_t * rsc,
enum rsc_role_e filter, gboolean current);
+extern int compare_capacity(const node_t * node1, const node_t * node2);
+extern void calculate_utilization(GHashTable * current_utilization,
+ GHashTable * utilization, gboolean plus);
+
+extern GListPtr find_colocated_rscs(GListPtr colocated_rscs, resource_t * rsc,
+ resource_t * from_rsc, resource_t * orig_rsc);
+
+extern GListPtr group_find_colocated_rscs(GListPtr colocated_rscs, resource_t * rsc,
+ resource_t * from_rsc, resource_t * orig_rsc);
+
+extern void group_unallocated_utilization_add(GHashTable *all_utilization, resource_t * rsc,
+ GListPtr all_rscs);
+
# define STONITH_UP "stonith_up"
# define STONITH_DONE "stonith_complete"
# define ALL_STOPPED "all_stopped"

View File

@ -1,43 +0,0 @@
commit 3e12eebb6cb2636054f42198222e52a28da40056
Author: Gao,Yan <ygao@suse.com>
Date: Fri May 18 10:48:49 2012 +0800
High: Core: Revert to the original process flags for the support of rolling-upgrade
diff --git a/include/crm/ais.h b/include/crm/ais.h
index cf00fff..f2615d6 100644
--- a/include/crm/ais.h
+++ b/include/crm/ais.h
@@ -107,22 +107,22 @@ enum crm_proc_flag {
crm_proc_none = 0x00000001,
/* 3 messaging types */
- crm_proc_heartbeat = 0x00000100,
- crm_proc_plugin = 0x00000200,
- crm_proc_cpg = 0x00000400,
+ crm_proc_heartbeat = 0x01000000,
+ crm_proc_plugin = 0x00000002,
+ crm_proc_cpg = 0x04000000,
- crm_proc_lrmd = 0x00001000,
- crm_proc_cib = 0x00002000,
- crm_proc_crmd = 0x00004000,
- crm_proc_attrd = 0x00008000,
+ crm_proc_lrmd = 0x00000010,
+ crm_proc_cib = 0x00000100,
+ crm_proc_crmd = 0x00000200,
+ crm_proc_attrd = 0x00001000,
crm_proc_pe = 0x00010000,
crm_proc_te = 0x00020000,
- crm_proc_stonithd = 0x00100000,
- crm_proc_stonith_ng= 0x00200000,
+ crm_proc_stonithd = 0x00002000,
+ crm_proc_stonith_ng= 0x00100000,
- crm_proc_mgmtd = 0x01000000,
+ crm_proc_mgmtd = 0x00040000,
};
/* *INDENT-ON* */

View File

@ -0,0 +1,23 @@
commit 62545b8a202a5fef0aad187de096b0f938f4f832
Author: Gao,Yan <ygao@suse.com>
Date: Thu May 9 22:35:01 2013 +0800
Fix: crmd: Do not record pending delete operations in the CIB
Otherwise, it would get stuck in an endless transition loop.
diff --git a/crmd/te_actions.c b/crmd/te_actions.c
index c3e333d..f8179bb 100644
--- a/crmd/te_actions.c
+++ b/crmd/te_actions.c
@@ -464,7 +464,9 @@ te_rsc_command(crm_graph_t * graph, crm_action_t * action)
}
value = crm_meta_value(action->params, XML_OP_ATTR_PENDING);
- if (crm_is_true(value) && safe_str_neq(task, CRMD_ACTION_CANCEL)) {
+ if (crm_is_true(value)
+ && safe_str_neq(task, CRMD_ACTION_CANCEL)
+ && safe_str_neq(task, CRMD_ACTION_DELETE)) {
/* write a "pending" entry to the CIB, inhibit notification */
crm_debug("Recording pending op %s in the CIB", task_uuid);
cib_action_update(action, PCMK_LRM_OP_PENDING, PCMK_EXECRA_STATUS_UNKNOWN);

View File

@ -0,0 +1,13 @@
Index: pacemaker/lib/lrmd/lrmd_client.c
===================================================================
--- pacemaker.orig/lib/lrmd/lrmd_client.c
+++ pacemaker/lib/lrmd/lrmd_client.c
@@ -846,7 +846,7 @@ list_stonith_agents(lrmd_list_t ** resou
stonith_key_value_t *stonith_resources = NULL;
stonith_key_value_t *dIter = NULL;
- stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL, &stonith_resources, 0);
+ stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, "heartbeat", &stonith_resources, 0);
for (dIter = stonith_resources; dIter; dIter = dIter->next) {
rc++;

View File

@ -0,0 +1,15 @@
diff --git a/lrmd/Makefile.am b/lrmd/Makefile.am
index 0eb0219..2e3e90c 100644
--- a/lrmd/Makefile.am
+++ b/lrmd/Makefile.am
@@ -24,8 +24,8 @@ lrmdlibdir = $(CRM_DAEMON_DIR)
lrmdlib_PROGRAMS = lrmd lrmd_test
initdir = $(INITDIR)
-init_SCRIPTS = pacemaker_remote
-sbin_PROGRAMS = pacemaker_remoted
+#init_SCRIPTS = pacemaker_remote
+#sbin_PROGRAMS = pacemaker_remoted
if HAVE_SYSTEMD
systemdunit_DATA = pacemaker_remote.service

View File

@ -0,0 +1,19 @@
commit f813880dd1b6d1614393128a7f5f745437bea121
Author: Gao,Yan <ygao@suse.com>
Date: Wed Mar 27 22:03:56 2013 +0800
Build: lrmd: Change the default directory for nagios plugins
diff --git a/configure.ac b/configure.ac
index 9ad7413..135a365 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1218,7 +1218,7 @@ AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins)
AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1)
if test x"$NAGIOS_PLUGIN_DIR" = x""; then
- NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"
+ NAGIOS_PLUGIN_DIR="${prefix}/lib/nagios/plugins"
fi
AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins)

View File

@ -0,0 +1,414 @@
commit 6b3407f69109e4ce2cec728817f271c7c2d88c16
Author: Gao,Yan <ygao@suse.com>
Date: Mon Jan 7 10:50:43 2013 +0800
Low: PE: cl#5128 - Support maintenance mode for a single node (regression tests)
diff --git a/pengine/regression.sh b/pengine/regression.sh
index 0f96239..47c1180 100755
--- a/pengine/regression.sh
+++ b/pengine/regression.sh
@@ -488,6 +488,8 @@ do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)"
echo ""
do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
+do_test node-maintenance-1 "cl#5128 - Node maintenance"
+do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)"
echo ""
do_test stopped-monitor-00 "Stopped Monitor - initial start"
diff --git a/pengine/test10/node-maintenance-1.dot b/pengine/test10/node-maintenance-1.dot
new file mode 100644
index 0000000..e7332ef
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.dot
@@ -0,0 +1,8 @@
+digraph "g" {
+"Cancel rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"all_stopped" [ style=bold color="green" fontcolor="orange"]
+"probe_complete node1" [ style=bold color="green" fontcolor="black"]
+"probe_complete node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_stop_0 node1" -> "all_stopped" [ style = bold]
+"rsc1_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/pengine/test10/node-maintenance-1.exp b/pengine/test10/node-maintenance-1.exp
new file mode 100644
index 0000000..3c73791
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.exp
@@ -0,0 +1,49 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" batch-limit="30" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="9" operation="stop" operation_key="rsc1_stop_0" on_node="node1" on_node_uuid="node1">
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="2" operation="cancel" operation_key="rsc2_monitor_10000" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_call_id="3" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_operation="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2" priority="1000000">
+ <action_set>
+ <rsc_op id="6" operation="probe_complete" operation_key="probe_complete" on_node="node2" on_node_uuid="node2">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3" priority="1000000">
+ <action_set>
+ <rsc_op id="5" operation="probe_complete" operation_key="probe_complete" on_node="node1" on_node_uuid="node1">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <pseudo_event id="3" operation="all_stopped" operation_key="all_stopped">
+ <attributes crm_feature_set="3.0.7"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="stop" operation_key="rsc1_stop_0" on_node="node1" on_node_uuid="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
+
diff --git a/pengine/test10/node-maintenance-1.scores b/pengine/test10/node-maintenance-1.scores
new file mode 100644
index 0000000..0b1e6dc
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.scores
@@ -0,0 +1,7 @@
+Allocation scores:
+native_color: rsc1 allocation score on node1: -1
+native_color: rsc1 allocation score on node2: 0
+native_color: rsc2 allocation score on node1: 0
+native_color: rsc2 allocation score on node2: INFINITY
+native_color: rsc_stonith allocation score on node1: 0
+native_color: rsc_stonith allocation score on node2: 0
diff --git a/pengine/test10/node-maintenance-1.summary b/pengine/test10/node-maintenance-1.summary
new file mode 100644
index 0000000..85dfb46
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.summary
@@ -0,0 +1,25 @@
+
+Current cluster status:
+Node node2: maintenance
+Online: [ node1 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Started node1
+ rsc2 (ocf::pacemaker:Dummy): Started node2 (unmanaged)
+
+Transition Summary:
+ * Stop rsc1 (node1)
+
+Executing cluster transition:
+ * Resource action: rsc1 stop on node1
+ * Resource action: rsc2 cancel=10000 on node2
+ * Pseudo action: all_stopped
+
+Revised cluster status:
+Node node2: maintenance
+Online: [ node1 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Stopped
+ rsc2 (ocf::pacemaker:Dummy): Started node2 (unmanaged)
+
diff --git a/pengine/test10/node-maintenance-1.xml b/pengine/test10/node-maintenance-1.xml
new file mode 100644
index 0000000..783b0b5
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.xml
@@ -0,0 +1,80 @@
+<cib epoch="1" num_updates="28" admin_epoch="0" validate-with="pacemaker-1.1" cib-last-written="Wed Dec 26 15:15:32 2012" have-quorum="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="node1" type="normal" uname="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="false"/>
+ </instance_attributes>
+ </node>
+ <node id="node2" type="normal" uname="node2">
+ <instance_attributes id="nodes-node2">
+ <nvpair id="nodes-node2-maintenance" name="maintenance" value="true"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="rsc_stonith" type="null"/>
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc1-meta_attributes">
+ <nvpair id="rsc1-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc2-meta_attributes">
+ <nvpair id="rsc2-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc2-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="rsc1-loc" rsc="rsc1" node="node1" score="-1"/>
+ <rsc_location id="rsc2-loc" rsc="rsc2" node="node2" score="-1"/>
+ </constraints>
+ </configuration>
+ <status>
+ <node_state id="node1" uname="node1" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node1">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc_stonith_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="10000" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="node2" uname="node2" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node2">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="10000" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/pengine/test10/node-maintenance-2.dot b/pengine/test10/node-maintenance-2.dot
new file mode 100644
index 0000000..59fa1dc
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.dot
@@ -0,0 +1,8 @@
+digraph "g" {
+"probe_complete node1" [ style=bold color="green" fontcolor="black"]
+"probe_complete node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold]
+"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/pengine/test10/node-maintenance-2.exp b/pengine/test10/node-maintenance-2.exp
new file mode 100644
index 0000000..0156835
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.exp
@@ -0,0 +1,50 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" batch-limit="30" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="rsc1_monitor_10000" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="7" operation="start" operation_key="rsc1_start_0" on_node="node2" on_node_uuid="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="7" operation="start" operation_key="rsc1_start_0" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="11" operation="monitor" operation_key="rsc2_monitor_10000" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3" priority="1000000">
+ <action_set>
+ <rsc_op id="4" operation="probe_complete" operation_key="probe_complete" on_node="node2" on_node_uuid="node2">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4" priority="1000000">
+ <action_set>
+ <rsc_op id="3" operation="probe_complete" operation_key="probe_complete" on_node="node1" on_node_uuid="node1">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
+
diff --git a/pengine/test10/node-maintenance-2.scores b/pengine/test10/node-maintenance-2.scores
new file mode 100644
index 0000000..c3fe45f
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.scores
@@ -0,0 +1,7 @@
+Allocation scores:
+native_color: rsc1 allocation score on node1: -1
+native_color: rsc1 allocation score on node2: 0
+native_color: rsc2 allocation score on node1: 0
+native_color: rsc2 allocation score on node2: 0
+native_color: rsc_stonith allocation score on node1: 0
+native_color: rsc_stonith allocation score on node2: 0
diff --git a/pengine/test10/node-maintenance-2.summary b/pengine/test10/node-maintenance-2.summary
new file mode 100644
index 0000000..9712d04
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.summary
@@ -0,0 +1,23 @@
+
+Current cluster status:
+Online: [ node1 node2 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Stopped
+ rsc2 (ocf::pacemaker:Dummy): Started node2
+
+Transition Summary:
+ * Start rsc1 (node2)
+
+Executing cluster transition:
+ * Resource action: rsc1 start on node2
+ * Resource action: rsc2 monitor=10000 on node2
+ * Resource action: rsc1 monitor=10000 on node2
+
+Revised cluster status:
+Online: [ node1 node2 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Started node2
+ rsc2 (ocf::pacemaker:Dummy): Started node2
+
diff --git a/pengine/test10/node-maintenance-2.xml b/pengine/test10/node-maintenance-2.xml
new file mode 100644
index 0000000..2f2f966
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.xml
@@ -0,0 +1,78 @@
+<cib epoch="1" num_updates="31" admin_epoch="0" validate-with="pacemaker-1.1" cib-last-written="Thu Jan 3 13:01:04 2013" have-quorum="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="node1" type="normal" uname="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="false"/>
+ </instance_attributes>
+ </node>
+ <node id="node2" type="normal" uname="node2">
+ <instance_attributes id="nodes-node2">
+ <nvpair id="nodes-node2-maintenance" name="maintenance" value="false"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="rsc_stonith" type="null"/>
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc1-meta_attributes">
+ <nvpair id="rsc1-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc2-meta_attributes">
+ <nvpair id="rsc2-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc2-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="rsc1-loc" rsc="rsc1" node="node1" score="-1"/>
+ </constraints>
+ </configuration>
+ <status>
+ <node_state id="node1" uname="node1" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node1">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc_stonith_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_stop_0" operation="stop" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="4" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="10000" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="node2" uname="node2" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node2">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>

View File

@ -0,0 +1,226 @@
commit 5e1d788ff7ddf9b9ba3bdcf3ac1bde09c40055ee
Author: Gao,Yan <ygao@suse.com>
Date: Wed Dec 26 21:11:20 2012 +0800
High: PE: cl#5128 - Support maintenance mode for a single node
diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index 0dead57..84e6c0e 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -137,6 +137,8 @@ struct node_shared_s {
/*! cache of calculated rsc digests for this node. */
GHashTable *digest_cache;
+
+ gboolean maintenance;
};
struct node_s {
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 8585e99..29a0ad1 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -51,6 +51,10 @@ native_add_running(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
}
+ if (rsc->variant == pe_native && node->details->maintenance) {
+ clear_bit(rsc->flags, pe_rsc_managed);
+ }
+
if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_info(rsc, "resource %s isnt managed", rsc->id);
resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index fcd830c..dccc62f 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -704,6 +704,11 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set)
this_node->details->standby = TRUE;
}
+ if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "maintenance"))) {
+ crm_info("Node %s is in maintenance-mode", this_node->details->uname);
+ this_node->details->maintenance = TRUE;
+ }
+
crm_trace("determining node state");
determine_online_status(state, this_node, data_set);
@@ -969,7 +974,8 @@ determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set
crm_info("Node %s is %s", this_node->details->uname,
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
- this_node->details->standby ? "standby" : "online");
+ this_node->details->standby ? "standby" :
+ this_node->details->maintenance ? "maintenance" : "online");
} else {
crm_trace("Node %s is offline", this_node->details->uname);
diff --git a/pengine/allocate.c b/pengine/allocate.c
index a20f4a6..246b513 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -368,7 +368,8 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki
is_probe = TRUE;
}
- if (interval > 0 && is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (interval > 0 &&
+ (is_set(data_set->flags, pe_flag_maintenance_mode) || node->details->maintenance)) {
CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
} else if (is_probe || safe_str_eq(task, RSC_START) || interval > 0
@@ -470,7 +471,8 @@ check_actions(pe_working_set_t * data_set)
if (node == NULL) {
continue;
- } else if (can_run_resources(node) == FALSE) {
+ /* Still need to check actions for a maintenance node to cancel existing monitor operations */
+ } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
crm_trace("Skipping param check for %s: cant run resources", node->details->uname);
continue;
}
diff --git a/pengine/graph.c b/pengine/graph.c
index 8d90042..041fcd4 100644
--- a/pengine/graph.c
+++ b/pengine/graph.c
@@ -489,6 +489,10 @@ shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * d
} else if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe_rsc_trace(action->rsc, "Skipping %s: maintainence mode", action->uuid);
continue;
+ } else if (node->details->maintenance) {
+ pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode",
+ action->uuid, node->details->uname);
+ continue;
} else if (safe_str_neq(action->task, RSC_STOP)) {
continue;
} else if (is_not_set(action->rsc->flags, pe_rsc_managed)
diff --git a/pengine/native.c b/pengine/native.c
index 47c12b4..66d775a 100644
--- a/pengine/native.c
+++ b/pengine/native.c
@@ -771,7 +771,8 @@ RecurringOp(resource_t * rsc, action_t * start, node_t * node,
void
Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
- if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (is_not_set(data_set->flags, pe_flag_maintenance_mode) &&
+ (node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
@@ -1000,7 +1001,8 @@ RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
void
Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
- if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (is_not_set(data_set->flags, pe_flag_maintenance_mode) &&
+ (node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
diff --git a/pengine/utils.c b/pengine/utils.c
index abd416d..7dfaf95 100644
--- a/pengine/utils.c
+++ b/pengine/utils.c
@@ -106,10 +106,11 @@ can_run_resources(const node_t * node)
#endif
if (node->details->online == FALSE
- || node->details->shutdown || node->details->unclean || node->details->standby) {
- crm_trace("%s: online=%d, unclean=%d, standby=%d",
+ || node->details->shutdown || node->details->unclean
+ || node->details->standby || node->details->maintenance) {
+ crm_trace("%s: online=%d, unclean=%d, standby=%d, maintenance=%d",
node->details->uname, node->details->online,
- node->details->unclean, node->details->standby);
+ node->details->unclean, node->details->standby, node->details->maintenance);
return FALSE;
}
return TRUE;
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 80a44c7..107a908 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -665,6 +665,7 @@ print_simple_status(pe_working_set_t * data_set)
GListPtr gIter = NULL;
int nodes_online = 0;
int nodes_standby = 0;
+ int nodes_maintenance = 0;
dc = data_set->dc_node;
@@ -677,6 +678,8 @@ print_simple_status(pe_working_set_t * data_set)
if (node->details->standby && node->details->online) {
nodes_standby++;
+ } else if (node->details->maintenance && node->details->online) {
+ nodes_maintenance++;
} else if (node->details->online) {
nodes_online++;
} else {
@@ -689,6 +692,9 @@ print_simple_status(pe_working_set_t * data_set)
if (nodes_standby > 0) {
print_as(", %d standby nodes", nodes_standby);
}
+ if (nodes_maintenance > 0) {
+ print_as(", %d maintenance nodes", nodes_maintenance);
+ }
print_as(", %d resources configured", count_resources(data_set, NULL));
}
@@ -1131,6 +1137,13 @@ print_status(pe_working_set_t * data_set)
node_mode = "OFFLINE (standby)";
}
+ } else if (node->details->maintenance) {
+ if (node->details->online) {
+ node_mode = "maintenance";
+ } else {
+ node_mode = "OFFLINE (maintenance)";
+ }
+
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
@@ -1358,6 +1371,7 @@ print_xml_status(pe_working_set_t * data_set)
fprintf(stream, "online=\"%s\" ", node->details->online ? "true" : "false");
fprintf(stream, "standby=\"%s\" ", node->details->standby ? "true" : "false");
fprintf(stream, "standby_onfail=\"%s\" ", node->details->standby_onfail ? "true" : "false");
+ fprintf(stream, "maintenance=\"%s\" ", node->details->maintenance ? "true" : "false");
fprintf(stream, "pending=\"%s\" ", node->details->pending ? "true" : "false");
fprintf(stream, "unclean=\"%s\" ", node->details->unclean ? "true" : "false");
fprintf(stream, "shutdown=\"%s\" ", node->details->shutdown ? "true" : "false");
@@ -1510,6 +1524,12 @@ print_html_status(pe_working_set_t * data_set, const char *filename, gboolean we
} else if (node->details->standby) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"red\">OFFLINE (standby)</font>\n");
+ } else if (node->details->maintenance && node->details->online) {
+ fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
+ "<font color=\"blue\">maintenance</font>\n");
+ } else if (node->details->maintenance) {
+ fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
+ "<font color=\"red\">OFFLINE (maintenance)</font>\n");
} else if (node->details->online) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"green\">online</font>\n");
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index 6466b53..9e02a35 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -539,6 +539,13 @@ print_cluster_status(pe_working_set_t * data_set)
node_mode = "OFFLINE (standby)";
}
+ } else if (node->details->maintenance) {
+ if (node->details->online) {
+ node_mode = "maintenance";
+ } else {
+ node_mode = "OFFLINE (maintenance)";
+ }
+
} else if (node->details->online) {
node_mode = "online";
online_nodes = add_list_element(online_nodes, node->details->uname);

View File

@ -0,0 +1,33 @@
diff --git a/pengine/Makefile.am b/pengine/Makefile.am
index 521b142..ffb8267 100644
--- a/pengine/Makefile.am
+++ b/pengine/Makefile.am
@@ -42,8 +42,16 @@ lib_LTLIBRARIES = libpengine.la
## binary progs
halib_PROGRAMS = pengine
+sbin_PROGRAMS = ptest
man7_MANS =
+man8_MANS =
+
+if BUILD_HELP
+man8_MANS += ptest.8
+ptest.8: ptest
+ $(HELP2MAN) --output $@ --no-info --section 8 --name "Part of the Pacemaker cluster resource manager" $(top_builddir)/pengine/$<
+endif
if BUILD_XML_HELP
man7_MANS += pengine.7
@@ -72,6 +80,11 @@ pengine_LDADD = $(top_builddir)/lib/cib/libcib.la $(COMMONLIBS)
# libcib for get_object_root()
# $(top_builddir)/lib/hbclient/libhbclient.la
+ptest_SOURCES = ptest.c
+ptest_LDADD = $(top_builddir)/lib/cib/libcib.la \
+ $(top_builddir)/lib/transition/libtransitioner.la \
+ $(COMMONLIBS)
+
install-exec-local:
$(mkinstalldirs) $(DESTDIR)/$(PE_STATE_DIR)
-chown $(CRM_DAEMON_USER) $(DESTDIR)/$(PE_STATE_DIR)

View File

@ -1,3 +1,147 @@
-------------------------------------------------------------------
Thu May 9 14:58:19 UTC 2013 - ygao@suse.com
- crmd: Do not record pending delete operations in the CIB
-------------------------------------------------------------------
Sat May 4 01:31:38 UTC 2013 - ygao@suse.com
- cib: Strip text nodes on writing CIB to disk (bnc#815447, bnc#809635)
- xml: Use xmlNodeDump() from libxml2 when calculating digest (bnc#809635)
-------------------------------------------------------------------
Fri May 3 08:35:25 UTC 2013 - ygao@suse.com
- PE: Ensure per-node resource parameters are used during probes
- cl#5154 - Do not expire failures when on-fail=block is present
- Upstream version cs: 2db99f16fe9b19a6320db62755f0f28ced8efe00
-------------------------------------------------------------------
Thu May 2 03:59:20 UTC 2013 - ygao@suse.com
- cl#5153 - Correctly display clone failcounts in crm_mon
- crmd: Simplify the logic for resetting node status in the CIB after fencing events (bnc#812913)
- crmd: cl#5152 - Correctly clean up fenced nodes during membership changes (bnc#812913)
- ipc: Reference the correct type when inspecting IPC messages
- Core: Ensure the last field in transition keys is 36 characters
- crm_mon: Check if a process can be daemonized before forking so the parent can report an error
- crm_mon: Ensure stale pid files are updated when a new process is started
- extra: add failure_score parameter into ping RA
- crm_resource: Allow --cleanup without a resource name
- Upstream version cs: 3297085490c2fe0af25fc805d2b53123da877f7a
-------------------------------------------------------------------
Thu Apr 18 08:08:51 UTC 2013 - ygao@suse.com
- fencing: Drop the severity of the messages on registering a stonith device (bnc#812269)
-------------------------------------------------------------------
Thu Apr 18 07:09:27 UTC 2013 - ygao@suse.com
- crmd: Ensure we return to a stable state if there have been too many fencing failures
- fencing: Prevent empty call-id during notification processing
- crm_resource: Make --refresh and --reprobe an alias for --cleanup [--resource name]
- crmd: Indicate completion of refresh to callers
- crmd: Indicate completion of re-probe to callers
- crm_resource: Allow individual resources to be reprobed
- ipc: Allow unpriviliged clients to clean up after server failures
- crmd: Only performa dry run for deletions if built with ACL support
- Upstream version cs: a92f3a0e327da2af171856c979fb8eaa00122534
-------------------------------------------------------------------
Wed Apr 17 05:34:18 UTC 2013 - ygao@suse.com
- crmd: Fixes hash table assert when resetting stonith failures
- crmd: Fixes memleak in failure cleanup
- crmd: Do not update fail-count and last-failure for old failures (bnc#804003)
- tools: cibadmin - Do not log for query commands by default (bnc#813045)
- services: Fix signal FD leak
- Upstream version cs: d63c13cbcde7663cfae9bb412583ad11914b6acc
-------------------------------------------------------------------
Thu Apr 4 03:42:40 UTC 2013 - ygao@suse.com
- tools: crm_mon - Prevent segfault on reconnecting cib (bnc#813131)
-------------------------------------------------------------------
Mon Apr 1 10:27:14 UTC 2013 - ygao@suse.com
- Fix: tools: crm_mon - Print a timing field only if its value is non-zero (bnc#809313)
-------------------------------------------------------------------
Mon Apr 1 07:44:56 UTC 2013 - ygao@suse.com
- tools: crm_mon - Save relevant cib XML into /tmp/cmon.* files if refreshing fails (bnc#800323)
-------------------------------------------------------------------
Fri Mar 29 08:06:16 UTC 2013 - ygao@suse.com
- fencing: Directly query the full cib on updating the cib cache (bnc#812269)
-------------------------------------------------------------------
Fri Mar 29 03:33:25 UTC 2013 - ygao@suse.com
- cib: Use unique error codes for write_cib_contents()
- Fix: Ensure custom error codes are less than 256
- corosync: cl#5148 - Correctly remove a node that used to have a different nodeid (bnc#805278)
- Upstream version cs: e8caee88c9e078fccf98a9da05543b73d4696f04
-------------------------------------------------------------------
Thu Mar 28 16:55:11 UTC 2013 - ygao@suse.com
- services: Improve the performance of synchronous actions (bnc#803969)
-------------------------------------------------------------------
Wed Mar 27 14:11:11 UTC 2013 - ygao@suse.com
- lrmd: Change the default directory for nagios plugins (bnc#812005)
-------------------------------------------------------------------
Wed Mar 27 08:00:17 UTC 2013 - ygao@suse.com
- xml: Prevent lockups by setting a more reliable buffer allocation strategy
- fencing: Fix memory leaks and invalid read
- fencing: Correctly unpack device parameters before using them
- lrmd: Avoid memory leak in resources_action_create()
- Skip WNOHANG when waiting after sending SIGKILL to child processes
- fencing: Fail the operation once all peers have been exhausted
- Fix several use-after-free and use-of-NULL
- fencing: Correctly filter devices when no nodes are configured yet
- ipc: Restore the ability for members of the haclient group to connect to the cluster
- Fencing: Re-use the policy engine libraries for filtering devices
- Upstream version cs: c7910371a5ada346fe7d1e1872ec9a40570018e6
-------------------------------------------------------------------
Mon Mar 25 03:05:54 UTC 2013 - ygao@suse.com
- corosync: Support "crm_node --remove" with a node name for corosync plugin (bnc#805278)
- Fix memory leaks in stonithd, crmd and lrmd
- services: Close the reading pipes when a synchronous action is finished
- Doc: Pacemaker Explained: Add nagios resource class
- Upstream version cs: cb7b3f48dea9eb1b6825242da2ef507887b409cc
-------------------------------------------------------------------
Tue Mar 12 11:39:23 UTC 2013 - ygao@suse.com
- Log: Change some messages to notice level (bnc#806256)
-------------------------------------------------------------------
Mon Mar 11 10:49:35 UTC 2013 - ygao@suse.com
- xml: Support nagios resource class in the 1.2 schema
- xml: Support resource container in the 1.2 schema
-------------------------------------------------------------------
Fri Mar 8 11:37:17 UTC 2013 - ygao@suse.com
- Update to version 1.1.9
- fencing: Only initiate topology based fencing if all topology queries have arrived
AND we are still in the query phase
- fencing: Don't delay execution if all topology queries have arrived
- crmd: Bug cl#5144 - Do not updated the expected status of failed nodes (bnc#807824)
- Core: Support PCMK_logpriority to set syslog priority
- Upstream version cs: 2a917ddb9d1a01f47db84b17468413522d63ddfa
-------------------------------------------------------------------
Fri Mar 1 10:31:54 UTC 2013 - coolo@suse.com

View File

@ -39,18 +39,32 @@ Name: pacemaker
Summary: The Pacemaker scalable High-Availability cluster resource manager
License: GPL-2.0+ and LGPL-2.1+
Group: Productivity/Clustering/HA
Version: 1.1.7
Version: 1.1.9
Release: 0
Url: http://www.clusterlabs.org
Source: pacemaker.tar.bz2
Source2: %{doc_pkg}.tar.gz
Source100: pacemaker.rpmlintrc
Patch1: pacemaker-cts-startcmd.patch
Patch2: pacemaker-daemon-symlinks.diff
Patch3: bug-728579_pacemaker-stonith-dev-id.diff
Patch4: pacemaker-NodeUtilization-RA.diff
Patch5: pacemaker-crm-proc-flag.patch
Patch6: pacemaker-resource.patch
Patch2: pacemaker-disable-listing-fence-agents.patch
Patch3: pacemaker-daemon-symlinks.patch
Patch4: bug-806256_pacemaker-log-level-notice.patch
Patch5: pacemaker-ptest-build.patch
Patch6: bug-728579_pacemaker-stonith-dev-id.patch
Patch7: bug-792124_pacemaker-stonith-monitor-log.patch
Patch8: pacemaker-NodeUtilization-RA.patch
Patch9: pacemaker-node-maintenance.patch
Patch10: pacemaker-node-maintenance-tests.patch
Patch11: pacemaker-colocated-utilization.patch
Patch12: pacemaker-lrmd-remote-disabled.patch
Patch13: pacemaker-cibsecret-tool-temp-disabled.patch
Patch14: pacemaker-nagios-plugin-dir.patch
Patch15: bug-812269_pacemaker-fencing-update-cib.patch
Patch16: bug-800323_pacemaker-crm_mon-save-cib.patch
Patch17: bug-812269_pacemaker-fencing-device-register-messages.patch
Patch18: bug-815447_pacemaker-cib-strip-text-nodes.patch
Patch19: bug-809635_pacemaker-xml-digest.patch
Patch20: pacemaker-crmd-pending-delete.patch
BuildRoot: %{_tmppath}/%{name}-%{version}-build
Provides: pacemaker-ticket-support = 2.0
Conflicts: heartbeat < 3.0
@ -58,6 +72,7 @@ Conflicts: heartbeat < 3.0
Conflicts: libheartbeat2 < 3.0.0
PreReq: cluster-glue >= 1.0.6
Requires: libpacemaker3 = %{version}-%{release}
Requires: libqb
Requires: openais
Requires: python
Requires: python-curses
@ -72,8 +87,10 @@ BuildRequires: libgnutls-devel
%else
BuildRequires: gnutls-devel
%endif
BuildRequires: help2man
BuildRequires: libesmtp-devel
BuildRequires: libglue-devel
BuildRequires: libqb-devel
BuildRequires: libtool
BuildRequires: libxml2-devel
BuildRequires: libxslt-devel
@ -82,7 +99,6 @@ BuildRequires: net-snmp-devel
BuildRequires: pam-devel
BuildRequires: pkgconfig
BuildRequires: python-devel
BuildRequires: resource-agents
BuildRequires: swig
%if %with_regression_tests
BuildRequires: openais
@ -161,6 +177,7 @@ Group: Development/Libraries/C and C++
Requires: %{name} = %{version}-%{release}
Requires: libglue-devel
Requires: libpacemaker3 = %{version}-%{release}
Requires: libqb-devel
%if %with_regression_tests
# For the regression tests, we can run them only if all pacemaker
# packages are installed, so we pull that in here for the regression
@ -194,6 +211,20 @@ Authors:
%patch4 -p1
%patch5 -p1
%patch6 -p1
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1
%patch14 -p1
%patch15 -p1
%patch16 -p1
%patch17 -p1
%patch18 -p1
%patch19 -p1
%patch20 -p1
###########################################################
%build
@ -208,6 +239,7 @@ export CFLAGS
--with-gcov=1 \
%endif
--with-acl=true \
--with-cibsecrets=true \
--enable-fatal-warnings=no
make %{_smp_mflags}
@ -220,15 +252,17 @@ fi
%install
export SUSE_ASNEEDED=0
###########################################################
make DESTDIR=$RPM_BUILD_ROOT install
if [ -e %{doc_pkg}/cibadmin.8 ]; then
install -d $RPM_BUILD_ROOT/%{_mandir}/man8
for file in `ls -1 %{doc_pkg}/*.8`; do
install -p -m 644 $file $RPM_BUILD_ROOT/%{_mandir}/man8
done
fi
chmod a+x $RPM_BUILD_ROOT/%{_libdir}/pacemaker/crm_primitive.py
chmod a+x $RPM_BUILD_ROOT/%{_libdir}/pacemaker/hb2openais-helper.py
make DESTDIR=$RPM_BUILD_ROOT install
#chmod a+x $RPM_BUILD_ROOT/%{_libdir}/pacemaker/crm_primitive.py
#chmod a+x $RPM_BUILD_ROOT/%{_libdir}/pacemaker/hb2openais-helper.py
rm $RPM_BUILD_ROOT/%{_libdir}/service_crm.so
# Don't want to ship this just yet:
@ -299,7 +333,8 @@ fi
%defattr(-,root,root)
%dir %{_libdir}/pacemaker
%dir %{_libdir}/heartbeat
%dir %{_var}/lib/heartbeat
#%dir %{_var}/lib/heartbeat
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker
%dir %{pcmk_docdir}
%exclude %{_datadir}/pacemaker/tests
%{_datadir}/pacemaker
@ -321,14 +356,21 @@ fi
%{_sbindir}/attrd_updater
%{_sbindir}/ptest
%{_sbindir}/crm_shadow
%{_sbindir}/cibpipe
#%{_sbindir}/cibpipe
%{_sbindir}/crm_node
%{_sbindir}/crm_simulate
%{_sbindir}/fence_legacy
%{_sbindir}/stonith_admin
%{_sbindir}/crm_error
#%{_sbindir}/cibsecret
#%{_sbindir}/pacemaker_remoted
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/heartbeat/crm
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pengine
#%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/heartbeat/crm
#%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pengine
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox
#%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores
%ghost %dir %attr (750, %{uname}, %{gname}) %{_var}/run/crm
%dir /usr/lib/ocf
%dir /usr/lib/ocf/resource.d
@ -339,11 +381,11 @@ fi
%{_libexecdir}/lcrso
%{_libexecdir}/lcrso/pacemaker.lcrso
%endif
%if !0%{?_without_heartbeat}
%{_sbindir}/crm_uuid
%else
%exclude %{_sbindir}/crm_uuid
%endif
#%if !0%{?_without_heartbeat}
#%{_sbindir}/crm_uuid
#%else
#%exclude %{_sbindir}/crm_uuid
#%endif
%exclude /usr/lib/ocf/resource.d/pacemaker/o2cb
# Packaged elsewhere
%exclude %{pcmk_docdir}/AUTHORS
@ -367,6 +409,8 @@ fi
%{_libdir}/libpengine.so.*
%{_libdir}/libtransitioner.so.*
%{_libdir}/libstonithd.so.*
%{_libdir}/libcrmservice.so.*
%{_libdir}/liblrmd.so.*
%files -n libpacemaker-devel
%defattr(-,root,root)

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:38cef3a375dc4d083e66eb2bb5f92b1cdfddfac5e7b69dbfe45ff19d078e6438
size 7888481
oid sha256:209f22488ae245639dce98bb1431c9ab1f7acde22d5c7099f7680643c335aecf
size 8684301