- cpg: Correctly free sent messages

- crm_resource: Present an error if an admin tries to --force-start an already active resource
- cluster: Correctly construct the header for compressed messages
- PE: Re-allow ordering constraints with fencing devices now that it is safe to do so
- cl#5170 - Correctly support on-fail=block for clones
- PE: Do the right thing when admins specify the internal resource instead of the clone
- fencing: Correctly detect existing device entries when registering a new one
- crm_node: Return 0 if --remove passed
- remove the mainloop_trigger that are no longer needed.
- corosync: Ensure removed peers are erased from all caches
- cluster: Detect and warn about node names with capitals
- Upstream version cs: 9abe6876d50ff2879f17b08dfa1500817a7fbeba

- crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons

- lrmd: Prevent use-of-NULL in client library
- Upstream version cs: 9d656180294770e39deced79a134d4cf531b8159

- Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints
- Upstream version cs: 90b8afe77ab8a3ff2fd30826187fd7726aab4f04

- crm_mon: Bug cl#5167 - Only print "stopped" node list for incomplete clone sets
- Upstream version cs: d16f4c56b120c5ae761680cfb4d98922a185c6c2

- Fix: memory leak when used score2char()
- Fencing: Avoid hashtable lookups in NULL
- Fencing: Force use of the calculated command for support ops
- Bug cl#5157 - Allow migration in the absence of some colocation constraints
- PE: Detect constraints with inappropriate actions (ie. promote for a clone)
- PE: Do the right thing when admins specify the internal resource instead of the clone

OBS-URL: https://build.opensuse.org/package/show/network:ha-clustering:Factory/pacemaker?expand=0&rev=47
This commit is contained in:
Yan Gao 2013-07-22 10:12:36 +00:00 committed by Git OBS Bridge
parent 62ef318946
commit fe46e06b0e
13 changed files with 257 additions and 755 deletions

View File

@ -4,11 +4,11 @@ Date: Thu Mar 14 09:41:53 2013 +0800
Log: Change some messages to notice level (bnc#806256)
diff --git a/crmd/callbacks.c b/crmd/callbacks.c
index a7830d3..d80cff0 100644
--- a/crmd/callbacks.c
+++ b/crmd/callbacks.c
@@ -179,7 +179,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d
Index: pacemaker/crmd/callbacks.c
===================================================================
--- pacemaker.orig/crmd/callbacks.c
+++ pacemaker/crmd/callbacks.c
@@ -179,7 +179,7 @@ peer_update_callback(enum crm_status_typ
const char *task = crm_element_value(down->xml, XML_LRM_ATTR_TASK);
if (alive && safe_str_eq(task, CRM_OP_FENCE)) {
@ -17,12 +17,12 @@ index a7830d3..d80cff0 100644
down->id);
erase_status_tag(node->uname, XML_CIB_TAG_LRM, cib_scope_local);
erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local);
diff --git a/crmd/membership.c b/crmd/membership.c
index e435e21..b3c34ae 100644
--- a/crmd/membership.c
+++ b/crmd/membership.c
@@ -322,7 +322,7 @@ crm_update_quorum(gboolean quorum, gboolean force_update)
set_uuid(update, XML_ATTR_DC_UUID, fsa_our_uname);
Index: pacemaker/crmd/membership.c
===================================================================
--- pacemaker.orig/crmd/membership.c
+++ pacemaker/crmd/membership.c
@@ -297,7 +297,7 @@ crm_update_quorum(gboolean quorum, gbool
crm_xml_add(update, XML_ATTR_DC_UUID, fsa_our_uuid);
fsa_cib_update(XML_TAG_CIB, update, call_options, call_id, NULL);
- crm_debug("Updating quorum status to %s (call=%d)", quorum ? "true" : "false", call_id);
@ -30,11 +30,11 @@ index e435e21..b3c34ae 100644
fsa_register_cib_callback(call_id, FALSE, NULL, cib_quorum_update_complete);
free_xml(update);
}
diff --git a/crmd/tengine.c b/crmd/tengine.c
index 9ff458c..f22e87f 100644
--- a/crmd/tengine.c
+++ b/crmd/tengine.c
@@ -208,7 +208,7 @@ do_te_invoke(long long action,
Index: pacemaker/crmd/tengine.c
===================================================================
--- pacemaker.orig/crmd/tengine.c
+++ pacemaker/crmd/tengine.c
@@ -204,7 +204,7 @@ do_te_invoke(long long action,
destroy_graph(transition_graph);
transition_graph = unpack_graph(graph_data, graph_input);
CRM_CHECK(transition_graph != NULL, transition_graph = create_blank_graph(); return);
@ -43,29 +43,11 @@ index 9ff458c..f22e87f 100644
graph_input);
value = crm_element_value(graph_data, "failed-stop-offset");
diff --git a/fencing/commands.c b/fencing/commands.c
index 026371a..d1ad657 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -1098,9 +1098,11 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc
}
if (safe_str_eq(host, alias)) {
- crm_info("%s can%s fence %s: %s", dev->id, can ? "" : " not", host, check_type);
+ do_crm_log(can ? LOG_INFO : LOG_NOTICE,
+ "%s can%s fence %s: %s", dev->id, can ? "" : " not", host, check_type);
} else {
- crm_info("%s can%s fence %s (aka. '%s'): %s", dev->id, can ? "" : " not", host, alias,
+ do_crm_log(can ? LOG_INFO : LOG_NOTICE,
+ "%s can%s fence %s (aka. '%s'): %s", dev->id, can ? "" : " not", host, alias,
check_type);
}
diff --git a/fencing/remote.c b/fencing/remote.c
index 15a52b7..98dc1f9 100644
--- a/fencing/remote.c
+++ b/fencing/remote.c
@@ -340,7 +340,7 @@ remote_op_timeout(gpointer userdata)
Index: pacemaker/fencing/remote.c
===================================================================
--- pacemaker.orig/fencing/remote.c
+++ pacemaker/fencing/remote.c
@@ -341,7 +341,7 @@ remote_op_timeout(gpointer userdata)
return FALSE;
}

View File

@ -4,20 +4,20 @@ Date: Thu Apr 18 16:00:02 2013 +0800
Log: fencing: Drop the severity of the messages on registering a stonith device
diff --git a/fencing/commands.c b/fencing/commands.c
index a58c880..c7d9d38 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -812,7 +812,7 @@ stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
stonith_device_t *device = build_device_from_xml(msg);
Index: pacemaker/fencing/commands.c
===================================================================
--- pacemaker.orig/fencing/commands.c
+++ pacemaker/fencing/commands.c
@@ -816,7 +816,7 @@ stonith_device_register(xmlNode * msg, c
if ((dup = device_has_duplicate(device))) {
dup = device_has_duplicate(device);
if (dup) {
- crm_notice("Device '%s' already existed in device list (%d active devices)", device->id,
+ crm_info("Device '%s' already existed in device list (%d active devices)", device->id,
g_hash_table_size(device_list));
free_device(device);
device = dup;
@@ -832,7 +832,7 @@ stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
@@ -839,7 +839,7 @@ stonith_device_register(xmlNode * msg, c
}
g_hash_table_replace(device_list, device->id, device);

View File

@ -0,0 +1,13 @@
diff --git a/configure.ac b/configure.ac
index 483f94d..0a3b9b5 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,7 +19,7 @@ dnl checks for compiler characteristics
dnl checks for library functions
dnl checks for system services
-AC_INIT(pacemaker, 1.1.9, pacemaker@oss.clusterlabs.org,,http://www.clusterlabs.org)
+AC_INIT(pacemaker, 1.1.10, pacemaker@oss.clusterlabs.org,,http://www.clusterlabs.org)
CRM_DTD_VERSION="1.2"
PCMK_FEATURES=""

View File

@ -0,0 +1,21 @@
diff --git a/configure.ac b/configure.ac
index 483f94d..8445c60 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1009,11 +1009,11 @@ AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set)
LIBQB_LOG=1
PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc"
-if
- ! pkg-config --atleast-version 0.13 libqb
-then
- AC_MSG_FAILURE(Version of libqb is too old: v0.13 or greater requried)
-fi
+#if
+# ! pkg-config --atleast-version 0.13 libqb
+#then
+# AC_MSG_FAILURE(Version of libqb is too old: v0.13 or greater requried)
+#fi
LIBS="$LIBS $libqb_LIBS"

View File

@ -1,23 +0,0 @@
commit 62545b8a202a5fef0aad187de096b0f938f4f832
Author: Gao,Yan <ygao@suse.com>
Date: Thu May 9 22:35:01 2013 +0800
Fix: crmd: Do not record pending delete operations in the CIB
Otherwise, it would get stuck in an endless transition loop.
diff --git a/crmd/te_actions.c b/crmd/te_actions.c
index c3e333d..f8179bb 100644
--- a/crmd/te_actions.c
+++ b/crmd/te_actions.c
@@ -464,7 +464,9 @@ te_rsc_command(crm_graph_t * graph, crm_action_t * action)
}
value = crm_meta_value(action->params, XML_OP_ATTR_PENDING);
- if (crm_is_true(value) && safe_str_neq(task, CRMD_ACTION_CANCEL)) {
+ if (crm_is_true(value)
+ && safe_str_neq(task, CRMD_ACTION_CANCEL)
+ && safe_str_neq(task, CRMD_ACTION_DELETE)) {
/* write a "pending" entry to the CIB, inhibit notification */
crm_debug("Recording pending op %s in the CIB", task_uuid);
cib_action_update(action, PCMK_LRM_OP_PENDING, PCMK_EXECRA_STATUS_UNKNOWN);

View File

@ -2,12 +2,12 @@ Index: pacemaker/lib/lrmd/lrmd_client.c
===================================================================
--- pacemaker.orig/lib/lrmd/lrmd_client.c
+++ pacemaker/lib/lrmd/lrmd_client.c
@@ -846,7 +846,7 @@ list_stonith_agents(lrmd_list_t ** resou
stonith_key_value_t *stonith_resources = NULL;
@@ -1815,7 +1815,7 @@ list_stonith_agents(lrmd_list_t ** resou
stonith_key_value_t *dIter = NULL;
if(stonith_api) {
- stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL, &stonith_resources, 0);
+ stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, "heartbeat", &stonith_resources, 0);
stonith_api->cmds->free(stonith_api);
}
for (dIter = stonith_resources; dIter; dIter = dIter->next) {
rc++;

View File

@ -1,7 +1,7 @@
diff --git a/lrmd/Makefile.am b/lrmd/Makefile.am
index 0eb0219..2e3e90c 100644
--- a/lrmd/Makefile.am
+++ b/lrmd/Makefile.am
Index: pacemaker/lrmd/Makefile.am
===================================================================
--- pacemaker.orig/lrmd/Makefile.am
+++ pacemaker/lrmd/Makefile.am
@@ -24,8 +24,8 @@ lrmdlibdir = $(CRM_DAEMON_DIR)
lrmdlib_PROGRAMS = lrmd lrmd_test
@ -11,5 +11,5 @@ index 0eb0219..2e3e90c 100644
+#init_SCRIPTS = pacemaker_remote
+#sbin_PROGRAMS = pacemaker_remoted
if HAVE_SYSTEMD
if BUILD_SYSTEMD
systemdunit_DATA = pacemaker_remote.service

View File

@ -1,414 +0,0 @@
commit 6b3407f69109e4ce2cec728817f271c7c2d88c16
Author: Gao,Yan <ygao@suse.com>
Date: Mon Jan 7 10:50:43 2013 +0800
Low: PE: cl#5128 - Support maintenance mode for a single node (regression tests)
diff --git a/pengine/regression.sh b/pengine/regression.sh
index 0f96239..47c1180 100755
--- a/pengine/regression.sh
+++ b/pengine/regression.sh
@@ -488,6 +488,8 @@ do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)"
echo ""
do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
+do_test node-maintenance-1 "cl#5128 - Node maintenance"
+do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)"
echo ""
do_test stopped-monitor-00 "Stopped Monitor - initial start"
diff --git a/pengine/test10/node-maintenance-1.dot b/pengine/test10/node-maintenance-1.dot
new file mode 100644
index 0000000..e7332ef
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.dot
@@ -0,0 +1,8 @@
+digraph "g" {
+"Cancel rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"all_stopped" [ style=bold color="green" fontcolor="orange"]
+"probe_complete node1" [ style=bold color="green" fontcolor="black"]
+"probe_complete node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_stop_0 node1" -> "all_stopped" [ style = bold]
+"rsc1_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/pengine/test10/node-maintenance-1.exp b/pengine/test10/node-maintenance-1.exp
new file mode 100644
index 0000000..3c73791
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.exp
@@ -0,0 +1,49 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" batch-limit="30" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="9" operation="stop" operation_key="rsc1_stop_0" on_node="node1" on_node_uuid="node1">
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="2" operation="cancel" operation_key="rsc2_monitor_10000" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_call_id="3" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_operation="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2" priority="1000000">
+ <action_set>
+ <rsc_op id="6" operation="probe_complete" operation_key="probe_complete" on_node="node2" on_node_uuid="node2">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3" priority="1000000">
+ <action_set>
+ <rsc_op id="5" operation="probe_complete" operation_key="probe_complete" on_node="node1" on_node_uuid="node1">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <pseudo_event id="3" operation="all_stopped" operation_key="all_stopped">
+ <attributes crm_feature_set="3.0.7"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="stop" operation_key="rsc1_stop_0" on_node="node1" on_node_uuid="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
+
diff --git a/pengine/test10/node-maintenance-1.scores b/pengine/test10/node-maintenance-1.scores
new file mode 100644
index 0000000..0b1e6dc
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.scores
@@ -0,0 +1,7 @@
+Allocation scores:
+native_color: rsc1 allocation score on node1: -1
+native_color: rsc1 allocation score on node2: 0
+native_color: rsc2 allocation score on node1: 0
+native_color: rsc2 allocation score on node2: INFINITY
+native_color: rsc_stonith allocation score on node1: 0
+native_color: rsc_stonith allocation score on node2: 0
diff --git a/pengine/test10/node-maintenance-1.summary b/pengine/test10/node-maintenance-1.summary
new file mode 100644
index 0000000..85dfb46
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.summary
@@ -0,0 +1,25 @@
+
+Current cluster status:
+Node node2: maintenance
+Online: [ node1 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Started node1
+ rsc2 (ocf::pacemaker:Dummy): Started node2 (unmanaged)
+
+Transition Summary:
+ * Stop rsc1 (node1)
+
+Executing cluster transition:
+ * Resource action: rsc1 stop on node1
+ * Resource action: rsc2 cancel=10000 on node2
+ * Pseudo action: all_stopped
+
+Revised cluster status:
+Node node2: maintenance
+Online: [ node1 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Stopped
+ rsc2 (ocf::pacemaker:Dummy): Started node2 (unmanaged)
+
diff --git a/pengine/test10/node-maintenance-1.xml b/pengine/test10/node-maintenance-1.xml
new file mode 100644
index 0000000..783b0b5
--- /dev/null
+++ b/pengine/test10/node-maintenance-1.xml
@@ -0,0 +1,80 @@
+<cib epoch="1" num_updates="28" admin_epoch="0" validate-with="pacemaker-1.1" cib-last-written="Wed Dec 26 15:15:32 2012" have-quorum="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="node1" type="normal" uname="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="false"/>
+ </instance_attributes>
+ </node>
+ <node id="node2" type="normal" uname="node2">
+ <instance_attributes id="nodes-node2">
+ <nvpair id="nodes-node2-maintenance" name="maintenance" value="true"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="rsc_stonith" type="null"/>
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc1-meta_attributes">
+ <nvpair id="rsc1-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc2-meta_attributes">
+ <nvpair id="rsc2-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc2-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="rsc1-loc" rsc="rsc1" node="node1" score="-1"/>
+ <rsc_location id="rsc2-loc" rsc="rsc2" node="node2" score="-1"/>
+ </constraints>
+ </configuration>
+ <status>
+ <node_state id="node1" uname="node1" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node1">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc_stonith_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="10000" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="node2" uname="node2" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node2">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="10000" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/pengine/test10/node-maintenance-2.dot b/pengine/test10/node-maintenance-2.dot
new file mode 100644
index 0000000..59fa1dc
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.dot
@@ -0,0 +1,8 @@
+digraph "g" {
+"probe_complete node1" [ style=bold color="green" fontcolor="black"]
+"probe_complete node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold]
+"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/pengine/test10/node-maintenance-2.exp b/pengine/test10/node-maintenance-2.exp
new file mode 100644
index 0000000..0156835
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.exp
@@ -0,0 +1,50 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" batch-limit="30" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="rsc1_monitor_10000" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="7" operation="start" operation_key="rsc1_start_0" on_node="node2" on_node_uuid="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="7" operation="start" operation_key="rsc1_start_0" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="11" operation="monitor" operation_key="rsc2_monitor_10000" on_node="node2" on_node_uuid="node2">
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3" priority="1000000">
+ <action_set>
+ <rsc_op id="4" operation="probe_complete" operation_key="probe_complete" on_node="node2" on_node_uuid="node2">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4" priority="1000000">
+ <action_set>
+ <rsc_op id="3" operation="probe_complete" operation_key="probe_complete" on_node="node1" on_node_uuid="node1">
+ <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.7"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
+
diff --git a/pengine/test10/node-maintenance-2.scores b/pengine/test10/node-maintenance-2.scores
new file mode 100644
index 0000000..c3fe45f
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.scores
@@ -0,0 +1,7 @@
+Allocation scores:
+native_color: rsc1 allocation score on node1: -1
+native_color: rsc1 allocation score on node2: 0
+native_color: rsc2 allocation score on node1: 0
+native_color: rsc2 allocation score on node2: 0
+native_color: rsc_stonith allocation score on node1: 0
+native_color: rsc_stonith allocation score on node2: 0
diff --git a/pengine/test10/node-maintenance-2.summary b/pengine/test10/node-maintenance-2.summary
new file mode 100644
index 0000000..9712d04
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.summary
@@ -0,0 +1,23 @@
+
+Current cluster status:
+Online: [ node1 node2 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Stopped
+ rsc2 (ocf::pacemaker:Dummy): Started node2
+
+Transition Summary:
+ * Start rsc1 (node2)
+
+Executing cluster transition:
+ * Resource action: rsc1 start on node2
+ * Resource action: rsc2 monitor=10000 on node2
+ * Resource action: rsc1 monitor=10000 on node2
+
+Revised cluster status:
+Online: [ node1 node2 ]
+
+ rsc_stonith (stonith:null): Started node1
+ rsc1 (ocf::pacemaker:Dummy): Started node2
+ rsc2 (ocf::pacemaker:Dummy): Started node2
+
diff --git a/pengine/test10/node-maintenance-2.xml b/pengine/test10/node-maintenance-2.xml
new file mode 100644
index 0000000..2f2f966
--- /dev/null
+++ b/pengine/test10/node-maintenance-2.xml
@@ -0,0 +1,78 @@
+<cib epoch="1" num_updates="31" admin_epoch="0" validate-with="pacemaker-1.1" cib-last-written="Thu Jan 3 13:01:04 2013" have-quorum="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="node1" type="normal" uname="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="false"/>
+ </instance_attributes>
+ </node>
+ <node id="node2" type="normal" uname="node2">
+ <instance_attributes id="nodes-node2">
+ <nvpair id="nodes-node2-maintenance" name="maintenance" value="false"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="rsc_stonith" type="null"/>
+ <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc1-meta_attributes">
+ <nvpair id="rsc1-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <meta_attributes id="rsc2-meta_attributes">
+ <nvpair id="rsc2-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc2-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="rsc1-loc" rsc="rsc1" node="node1" score="-1"/>
+ </constraints>
+ </configuration>
+ <status>
+ <node_state id="node1" uname="node1" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node1">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc_stonith_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_stop_0" operation="stop" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="4" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="10000" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="node2" uname="node2" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
+ <lrm id="node2">
+ <lrm_resources>
+ <lrm_resource id="rsc_stonith" class="stonith" type="null">
+ <lrm_rsc_op id="rsc_stonith_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.5" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>

View File

@ -1,226 +0,0 @@
commit 5e1d788ff7ddf9b9ba3bdcf3ac1bde09c40055ee
Author: Gao,Yan <ygao@suse.com>
Date: Wed Dec 26 21:11:20 2012 +0800
High: PE: cl#5128 - Support maintenance mode for a single node
diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index 0dead57..84e6c0e 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -137,6 +137,8 @@ struct node_shared_s {
/*! cache of calculated rsc digests for this node. */
GHashTable *digest_cache;
+
+ gboolean maintenance;
};
struct node_s {
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 8585e99..29a0ad1 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -51,6 +51,10 @@ native_add_running(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
}
+ if (rsc->variant == pe_native && node->details->maintenance) {
+ clear_bit(rsc->flags, pe_rsc_managed);
+ }
+
if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_info(rsc, "resource %s isnt managed", rsc->id);
resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index fcd830c..dccc62f 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -704,6 +704,11 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set)
this_node->details->standby = TRUE;
}
+ if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "maintenance"))) {
+ crm_info("Node %s is in maintenance-mode", this_node->details->uname);
+ this_node->details->maintenance = TRUE;
+ }
+
crm_trace("determining node state");
determine_online_status(state, this_node, data_set);
@@ -969,7 +974,8 @@ determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set
crm_info("Node %s is %s", this_node->details->uname,
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
- this_node->details->standby ? "standby" : "online");
+ this_node->details->standby ? "standby" :
+ this_node->details->maintenance ? "maintenance" : "online");
} else {
crm_trace("Node %s is offline", this_node->details->uname);
diff --git a/pengine/allocate.c b/pengine/allocate.c
index a20f4a6..246b513 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -368,7 +368,8 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki
is_probe = TRUE;
}
- if (interval > 0 && is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (interval > 0 &&
+ (is_set(data_set->flags, pe_flag_maintenance_mode) || node->details->maintenance)) {
CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
} else if (is_probe || safe_str_eq(task, RSC_START) || interval > 0
@@ -470,7 +471,8 @@ check_actions(pe_working_set_t * data_set)
if (node == NULL) {
continue;
- } else if (can_run_resources(node) == FALSE) {
+ /* Still need to check actions for a maintenance node to cancel existing monitor operations */
+ } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
crm_trace("Skipping param check for %s: cant run resources", node->details->uname);
continue;
}
diff --git a/pengine/graph.c b/pengine/graph.c
index 8d90042..041fcd4 100644
--- a/pengine/graph.c
+++ b/pengine/graph.c
@@ -489,6 +489,10 @@ shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * d
} else if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe_rsc_trace(action->rsc, "Skipping %s: maintainence mode", action->uuid);
continue;
+ } else if (node->details->maintenance) {
+ pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode",
+ action->uuid, node->details->uname);
+ continue;
} else if (safe_str_neq(action->task, RSC_STOP)) {
continue;
} else if (is_not_set(action->rsc->flags, pe_rsc_managed)
diff --git a/pengine/native.c b/pengine/native.c
index 47c12b4..66d775a 100644
--- a/pengine/native.c
+++ b/pengine/native.c
@@ -771,7 +771,8 @@ RecurringOp(resource_t * rsc, action_t * start, node_t * node,
void
Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
- if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (is_not_set(data_set->flags, pe_flag_maintenance_mode) &&
+ (node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
@@ -1000,7 +1001,8 @@ RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
void
Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
- if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (is_not_set(data_set->flags, pe_flag_maintenance_mode) &&
+ (node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
diff --git a/pengine/utils.c b/pengine/utils.c
index abd416d..7dfaf95 100644
--- a/pengine/utils.c
+++ b/pengine/utils.c
@@ -106,10 +106,11 @@ can_run_resources(const node_t * node)
#endif
if (node->details->online == FALSE
- || node->details->shutdown || node->details->unclean || node->details->standby) {
- crm_trace("%s: online=%d, unclean=%d, standby=%d",
+ || node->details->shutdown || node->details->unclean
+ || node->details->standby || node->details->maintenance) {
+ crm_trace("%s: online=%d, unclean=%d, standby=%d, maintenance=%d",
node->details->uname, node->details->online,
- node->details->unclean, node->details->standby);
+ node->details->unclean, node->details->standby, node->details->maintenance);
return FALSE;
}
return TRUE;
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 80a44c7..107a908 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -665,6 +665,7 @@ print_simple_status(pe_working_set_t * data_set)
GListPtr gIter = NULL;
int nodes_online = 0;
int nodes_standby = 0;
+ int nodes_maintenance = 0;
dc = data_set->dc_node;
@@ -677,6 +678,8 @@ print_simple_status(pe_working_set_t * data_set)
if (node->details->standby && node->details->online) {
nodes_standby++;
+ } else if (node->details->maintenance && node->details->online) {
+ nodes_maintenance++;
} else if (node->details->online) {
nodes_online++;
} else {
@@ -689,6 +692,9 @@ print_simple_status(pe_working_set_t * data_set)
if (nodes_standby > 0) {
print_as(", %d standby nodes", nodes_standby);
}
+ if (nodes_maintenance > 0) {
+ print_as(", %d maintenance nodes", nodes_maintenance);
+ }
print_as(", %d resources configured", count_resources(data_set, NULL));
}
@@ -1131,6 +1137,13 @@ print_status(pe_working_set_t * data_set)
node_mode = "OFFLINE (standby)";
}
+ } else if (node->details->maintenance) {
+ if (node->details->online) {
+ node_mode = "maintenance";
+ } else {
+ node_mode = "OFFLINE (maintenance)";
+ }
+
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
@@ -1358,6 +1371,7 @@ print_xml_status(pe_working_set_t * data_set)
fprintf(stream, "online=\"%s\" ", node->details->online ? "true" : "false");
fprintf(stream, "standby=\"%s\" ", node->details->standby ? "true" : "false");
fprintf(stream, "standby_onfail=\"%s\" ", node->details->standby_onfail ? "true" : "false");
+ fprintf(stream, "maintenance=\"%s\" ", node->details->maintenance ? "true" : "false");
fprintf(stream, "pending=\"%s\" ", node->details->pending ? "true" : "false");
fprintf(stream, "unclean=\"%s\" ", node->details->unclean ? "true" : "false");
fprintf(stream, "shutdown=\"%s\" ", node->details->shutdown ? "true" : "false");
@@ -1510,6 +1524,12 @@ print_html_status(pe_working_set_t * data_set, const char *filename, gboolean we
} else if (node->details->standby) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"red\">OFFLINE (standby)</font>\n");
+ } else if (node->details->maintenance && node->details->online) {
+ fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
+ "<font color=\"blue\">maintenance</font>\n");
+ } else if (node->details->maintenance) {
+ fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
+ "<font color=\"red\">OFFLINE (maintenance)</font>\n");
} else if (node->details->online) {
fprintf(stream, "Node: %s (%s): %s", node->details->uname, node->details->id,
"<font color=\"green\">online</font>\n");
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index 6466b53..9e02a35 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -539,6 +539,13 @@ print_cluster_status(pe_working_set_t * data_set)
node_mode = "OFFLINE (standby)";
}
+ } else if (node->details->maintenance) {
+ if (node->details->online) {
+ node_mode = "maintenance";
+ } else {
+ node_mode = "OFFLINE (maintenance)";
+ }
+
} else if (node->details->online) {
node_mode = "online";
online_nodes = add_list_element(online_nodes, node->details->uname);

View File

@ -1,12 +0,0 @@
Index: pacemaker/mcp/pacemaker.c
===================================================================
--- pacemaker.orig/mcp/pacemaker.c
+++ pacemaker/mcp/pacemaker.c
@@ -20,6 +20,7 @@
#include <pacemaker.h>
#include <pwd.h>
+#include <sys/resource.h>
#include <crm/msg_xml.h>
#include <crm/common/ipc.h>

View File

@ -1,3 +1,166 @@
-------------------------------------------------------------------
Mon Jul 22 05:28:01 UTC 2013 - ygao@suse.com
- cpg: Correctly free sent messages
- crm_resource: Present an error if an admin tries to --force-start an already active resource
- cluster: Correctly construct the header for compressed messages
- PE: Re-allow ordering constraints with fencing devices now that it is safe to do so
- cl#5170 - Correctly support on-fail=block for clones
- PE: Do the right thing when admins specify the internal resource instead of the clone
- fencing: Correctly detect existing device entries when registering a new one
- crm_node: Return 0 if --remove passed
- remove the mainloop_trigger that are no longer needed.
- corosync: Ensure removed peers are erased from all caches
- cluster: Detect and warn about node names with capitals
- Upstream version cs: 9abe6876d50ff2879f17b08dfa1500817a7fbeba
-------------------------------------------------------------------
Thu Jul 11 00:30:44 UTC 2013 - ygao@suse.com
- crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons
-------------------------------------------------------------------
Wed Jul 10 12:43:46 UTC 2013 - ygao@suse.com
- lrmd: Prevent use-of-NULL in client library
- Upstream version cs: 9d656180294770e39deced79a134d4cf531b8159
-------------------------------------------------------------------
Wed Jul 10 08:33:31 UTC 2013 - ygao@suse.com
- Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints
- Upstream version cs: 90b8afe77ab8a3ff2fd30826187fd7726aab4f04
-------------------------------------------------------------------
Tue Jul 9 14:32:31 UTC 2013 - ygao@suse.com
- crm_mon: Bug cl#5167 - Only print "stopped" node list for incomplete clone sets
- Upstream version cs: d16f4c56b120c5ae761680cfb4d98922a185c6c2
-------------------------------------------------------------------
Thu Jul 4 14:09:36 UTC 2013 - ygao@suse.com
- Fix: memory leak when used score2char()
- Fencing: Avoid hashtable lookups in NULL
- Fencing: Force use of the calculated command for support ops
- Bug cl#5157 - Allow migration in the absence of some colocation constraints
- PE: Detect constraints with inappropriate actions (ie. promote for a clone)
- PE: Do the right thing when admins specify the internal resource instead of the clone
- Fencing: User-defined arguments always trump calculated ones
- cluster: Correctly remove duplicate peer entries
- Upstream version cs: 0dc3203917438057ab476a0f0aac4a1c8e27f97a
-------------------------------------------------------------------
Wed Jul 3 02:32:22 UTC 2013 - ygao@suse.com
- plugin: Fix memory leaks
- Upstream version cs: 5053ba656a854bdf4eacaf7d43b0dce1b7e58b9d
-------------------------------------------------------------------
Tue Jul 2 04:14:55 UTC 2013 - ygao@suse.com
- plugin: Handle messages in the common way in the plugin dispatch function by default
- Upstream version cs: 704f45e36594018358b0ea9d578291af1d52196e
-------------------------------------------------------------------
Sat Jun 29 18:02:34 UTC 2013 - lmb@suse.com
- plugin: Fix small memory leak
-------------------------------------------------------------------
Wed Jun 26 10:20:03 UTC 2013 - ygao@suse.com
- plugin: Change the function declaration in the plugin header
- crm_mon --neg-location drbd-fence-by-handler
- crm_mon -A: print_attr_msg needs to lookup paramters, not meta
- crm_mon -A: fix memleak, add g_free_list(attr_list)
- crmd: CID#1036761 Dereference null return value
- Corosync: CIB#1036763 Dereference before null check
- Tools: CID#1036764 Unused pointer value, CID#1036762 Resource leak
- Tools: CID#1036760 Truncated stdio return value
- PE: Delete the old resource state on every node whenever the resource type is changed (bnc#822233)
- Upstream version cs: 61e4b8f62b109ca5b910efd38c7e79ef403030fd
-------------------------------------------------------------------
Fri Jun 21 04:44:12 UTC 2013 - ygao@suse.com
- crmd: Ensure operations for cleaned up resources don't block recovery (bnc#825536)
- logging: If SIGTRAP is sent before tracing is turned on, turn it on
- membership: Correctly merge partial entries in the peer cache
- attrd: Fixes deleted attributes during dc election
- crmd: Stop trying to fence when no devices exist capable of fencing a node
- PE: Allow active nodes in our current membership to be fenced without quorum
- PE: If fencing is unavailable or disabled, block further recovery for resources that fail to stop
- crm_resource: Avoid whitespace in transition keys when cleaning up and failing resources
- crmd: Everyone who gets a fencing notification should mark the node as down
- crmd: Initiate node shutdown if another node claims to have successfully fenced us
- crm_resource: Correctly implement --master
- crm_resource: Support OCF tracing when using --force-(check|start|stop)
- lrmd: Make lrmd and pacemaker_remoted in charge of setting up rsc state folder
- Allow uuid retrieval for nodes with no known name
- crmd: Update the status section with details of nodes for which we only know the nodeid
- crm_error: Add the ability to list and print error symbols
- crmd: Further improve clean shutdowns
- Core: Ensure the blackbox is saved on abnormal program termination
- crm_mon: Implement the ability to change the display options during operation.
- Upstream version cs: 6ea4b7ef34ce6e17f53f69d87a404d334db5d837
-------------------------------------------------------------------
Wed Jun 19 08:45:46 UTC 2013 - ygao@suse.com
- xml: Purge diff markers even if there's no digest (bnc#824097)
-------------------------------------------------------------------
Wed Jun 19 08:32:15 UTC 2013 - ygao@suse.com
- crmd: Prevent use-of-NULL during emergency shutdown
- crmd: Ensure all membership operations can complete while trying to cancel a transition
- Repair handling of attrd_update_delegate() return codes
- attrd: Correctly detect successful invocation of attrd_update()
- Convert all exit codes to positive errno values
- crm_attribute: Send details on duplicate values to stdout
- crm_attribute: Return the actual error code rather than the negative of it
- crm_resource: Implement the --master option for the --move,--ban,--clear commands
- crm_resource: Make --move without --host a true alias for --ban without --host
- crm_resource: Implement --ban for moving resources away from nodes and --clear (replaces --unmove)
- cib: Allow multiple children to be removed from the object root (eg. constraints)
- crmd: Prevent use-after-free now that mainloop terminates naturally
- mainloop: Saner ordering of reference counting events in mainloop_destroy_trigger()
- Correctly calculate crm_system_name
- Core: Tolerate NULL in mainloop_destroy_trigger()
- crmd: Allow mainloop to terminate naturally on graceful shutdown
- crmd: Use standard error codes on exit
- Ensure anything we free at shutdown is set to NULL too
- Ensure crm_system_name is cleaned up at exit for valgrind
- crmd: Ensure timers are stopped at exit
- Fencing: Ensure API object is consistently free'd
- Clean up internal mainloop structures at exit
- Core: Correctly unreference GSource inputs
- crmd: Clean up more memory before exit
- crmd: Do not get stuck in S_POLICY_ENGINE if a node we couldn't fence returns
- Upstream version cs: 47ed85ac6f4a9ac353945ae8bc4d7cea98b7cb7a
-------------------------------------------------------------------
Tue May 28 08:50:43 UTC 2013 - ygao@suse.com
- PE: Mark unrunnable stop actions as "blocked" (bnc#816511)
- crm_resource: Do not crash when calling --force-start on an invalid resource
- ping: Ensure OCF_RESKEY_failure_score has a value
- PE: Unlink pengine output files before writing.
- Fencing: Restore the ability to manually confirm that fencing completed
- Fix: Resolve several memory leaks and improper dereferencing of pointers
- PE: cl#5155 - Block the stop of resources if any depending resource is unmanaged (bnc#816511)
- crmd: Insert async failures so that they appear in the correct order
- cl#5151 - Ensure node names are consistently compared without case
- crmd: Block after 10 failed fencing regardless of crmd-transition-delay
- PE: Re-initiate _active_ recurring monitors that previously failed but have timed out
- cl#5133 - PE: Correctly observe on-fail=block for failed demote operation
- PE: Suppress meaningless IDs when displaying anonymous clone status
- PE: Display a list of nodes on which stopped anonymous clones are not active instead of meaningless clone IDs
- PE: Correctly handle resources that recover before we operate on them
- Core: Detect child processes that terminate before our SIGCHLD handler is installed
- Upstream version cs: 674e69854a0ce4636ee54e123e45b1612c8f1fd5
-------------------------------------------------------------------
Thu May 9 14:58:19 UTC 2013 - ygao@suse.com

View File

@ -53,18 +53,17 @@ Patch5: pacemaker-ptest-build.patch
Patch6: bug-728579_pacemaker-stonith-dev-id.patch
Patch7: bug-792124_pacemaker-stonith-monitor-log.patch
Patch8: pacemaker-NodeUtilization-RA.patch
Patch9: pacemaker-node-maintenance.patch
Patch10: pacemaker-node-maintenance-tests.patch
Patch11: pacemaker-colocated-utilization.patch
Patch12: pacemaker-lrmd-remote-disabled.patch
Patch13: pacemaker-cibsecret-tool-temp-disabled.patch
Patch14: pacemaker-nagios-plugin-dir.patch
Patch15: bug-812269_pacemaker-fencing-update-cib.patch
Patch16: bug-800323_pacemaker-crm_mon-save-cib.patch
Patch17: bug-812269_pacemaker-fencing-device-register-messages.patch
Patch18: bug-815447_pacemaker-cib-strip-text-nodes.patch
Patch19: bug-809635_pacemaker-xml-digest.patch
Patch20: pacemaker-crmd-pending-delete.patch
Patch9: pacemaker-colocated-utilization.patch
Patch10: pacemaker-lrmd-remote-disabled.patch
Patch11: pacemaker-cibsecret-tool-temp-disabled.patch
Patch12: pacemaker-nagios-plugin-dir.patch
Patch13: bug-812269_pacemaker-fencing-update-cib.patch
Patch14: bug-800323_pacemaker-crm_mon-save-cib.patch
Patch15: bug-812269_pacemaker-fencing-device-register-messages.patch
Patch16: bug-815447_pacemaker-cib-strip-text-nodes.patch
Patch17: bug-809635_pacemaker-xml-digest.patch
Patch18: pacemaker-configure-libqb-version.patch
Patch19: pacemaker-build-1.1.10-rc.patch
BuildRoot: %{_tmppath}/%{name}-%{version}-build
Provides: pacemaker-ticket-support = 2.0
Conflicts: heartbeat < 3.0
@ -224,7 +223,6 @@ Authors:
%patch17 -p1
%patch18 -p1
%patch19 -p1
%patch20 -p1
###########################################################
%build

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:209f22488ae245639dce98bb1431c9ab1f7acde22d5c7099f7680643c335aecf
size 8684301
oid sha256:79d3de89e46bdc3c6054929a947ebe948442358e0aeba20b2c65a02b808f973d
size 8733062