pacemaker  2.0.4-2deceaa3ae
Scalable High-Availability cluster resource manager
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2020 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/pengine/rules.h>
13 #include <crm/msg_xml.h>
14 #include <pacemaker-internal.h>
15 #include <crm/services.h>
16 
17 // The controller removes the resource from the CIB, making this redundant
18 // #define DELETE_THEN_REFRESH 1
19 
20 #define INFINITY_HACK (INFINITY * -100)
21 
22 #define VARIANT_NATIVE 1
24 
25 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
26  pe_working_set_t *data_set);
27 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
28  xmlNode *operation, pe_working_set_t *data_set);
29 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
30  pe_working_set_t *data_set);
31 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
32  xmlNode *operation, pe_working_set_t *data_set);
33 
34 void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
35 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
36 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
37 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
38 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
39 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
40  pe_working_set_t * data_set);
41 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
42 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
43 
44 /* This array says what the *next* role should be when transitioning from one
45  * role to another. For example going from Stopped to Master, the next role is
46  * RSC_ROLE_SLAVE, because the resource must be started before being promoted.
47  * The current state then becomes Started, which is fed into this array again,
48  * giving a next role of RSC_ROLE_MASTER.
49  */
51  /* Current state Next state*/
52  /* Unknown Stopped Started Slave Master */
58 };
59 
60 typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
61  gboolean optional,
62  pe_working_set_t *data_set);
63 
64 // This array picks the function needed to transition from one role to another
66  /* Current state Next state */
67  /* Unknown Stopped Started Slave Master */
68  /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
69  /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
70  /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
71  /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
72  /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp , },
73 };
74 
75 static gboolean
76 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
77 {
78  GListPtr nodes = NULL;
79  pe_node_t *chosen = NULL;
80  pe_node_t *best = NULL;
81  int multiple = 1;
82  int length = 0;
83  gboolean result = FALSE;
84 
85  process_utilization(rsc, &prefer, data_set);
86 
87  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
88  return rsc->allocated_to ? TRUE : FALSE;
89  }
90 
91  // Sort allowed nodes by weight
92  if (rsc->allowed_nodes) {
93  length = g_hash_table_size(rsc->allowed_nodes);
94  }
95  if (length > 0) {
96  nodes = g_hash_table_get_values(rsc->allowed_nodes);
97  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
98 
99  // First node in sorted list has the best score
100  best = g_list_nth_data(nodes, 0);
101  }
102 
103  if (prefer && nodes) {
104  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
105 
106  if (chosen == NULL) {
107  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
108  prefer->details->uname, rsc->id);
109 
110  /* Favor the preferred node as long as its weight is at least as good as
111  * the best allowed node's.
112  *
113  * An alternative would be to favor the preferred node even if the best
114  * node is better, when the best node's weight is less than INFINITY.
115  */
116  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
117  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
118  chosen->details->uname, rsc->id);
119  chosen = NULL;
120 
121  } else if (!can_run_resources(chosen)) {
122  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
123  chosen->details->uname, rsc->id);
124  chosen = NULL;
125 
126  } else {
127  pe_rsc_trace(rsc,
128  "Chose preferred node %s for %s (ignoring %d candidates)",
129  chosen->details->uname, rsc->id, length);
130  }
131  }
132 
133  if ((chosen == NULL) && nodes) {
134  /* Either there is no preferred node, or the preferred node is not
135  * available, but there are other nodes allowed to run the resource.
136  */
137 
138  chosen = best;
139  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
140  chosen ? chosen->details->uname : "<none>", rsc->id, length);
141 
142  if (!pe_rsc_is_unique_clone(rsc->parent)
143  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
144  /* If the resource is already running on a node, prefer that node if
145  * it is just as good as the chosen node.
146  *
147  * We don't do this for unique clone instances, because
148  * distribute_children() has already assigned instances to their
149  * running nodes when appropriate, and if we get here, we don't want
150  * remaining unallocated instances to prefer a node that's already
151  * running another instance.
152  */
153  pe_node_t *running = pe__current_node(rsc);
154 
155  if (running && (can_run_resources(running) == FALSE)) {
156  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
157  rsc->id, running->details->uname);
158  } else if (running) {
159  for (GList *iter = nodes->next; iter; iter = iter->next) {
160  pe_node_t *tmp = (pe_node_t *) iter->data;
161 
162  if (tmp->weight != chosen->weight) {
163  // The nodes are sorted by weight, so no more are equal
164  break;
165  }
166  if (tmp->details == running->details) {
167  // Scores are equal, so prefer the current node
168  chosen = tmp;
169  }
170  multiple++;
171  }
172  }
173  }
174  }
175 
176  if (multiple > 1) {
177  static char score[33];
178  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
179 
180  score2char_stack(chosen->weight, score, sizeof(score));
181  do_crm_log(log_level,
182  "Chose node %s for %s from %d nodes with score %s",
183  chosen->details->uname, rsc->id, multiple, score);
184  }
185 
186  result = native_assign_node(rsc, nodes, chosen, FALSE);
187  g_list_free(nodes);
188  return result;
189 }
190 
199 static int
200 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
201  const char *value)
202 {
203  GHashTableIter iter;
204  pe_node_t *node = NULL;
205  int best_score = -INFINITY;
206  const char *best_node = NULL;
207 
208  // Find best allowed node with matching attribute
209  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
210  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
211 
212  if ((node->weight > best_score) && can_run_resources(node)
213  && safe_str_eq(value, pe_node_attribute_raw(node, attr))) {
214 
215  best_score = node->weight;
216  best_node = node->details->uname;
217  }
218  }
219 
220  if (safe_str_neq(attr, CRM_ATTR_UNAME)) {
221  if (best_node == NULL) {
222  crm_info("No allowed node for %s matches node attribute %s=%s",
223  rsc->id, attr, value);
224  } else {
225  crm_info("Allowed node %s for %s had best score (%d) "
226  "of those matching node attribute %s=%s",
227  best_node, rsc->id, best_score, attr, value);
228  }
229  }
230  return best_score;
231 }
232 
247 static void
248 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
249  const char *attr, float factor,
250  bool only_positive)
251 {
252  GHashTableIter iter;
253  pe_node_t *node = NULL;
254 
255  if (attr == NULL) {
256  attr = CRM_ATTR_UNAME;
257  }
258 
259  // Iterate through each node
260  g_hash_table_iter_init(&iter, nodes);
261  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
262  float weight_f = 0;
263  int weight = 0;
264  int score = 0;
265  int new_score = 0;
266 
267  score = best_node_score_matching_attr(rsc, attr,
268  pe_node_attribute_raw(node, attr));
269 
270  if ((factor < 0) && (score < 0)) {
271  /* Negative preference for a node with a negative score
272  * should not become a positive preference.
273  *
274  * @TODO Consider filtering only if weight is -INFINITY
275  */
276  crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
277  node->details->uname, node->weight, factor, score);
278  continue;
279  }
280 
281  if (node->weight == INFINITY_HACK) {
282  crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
283  node->details->uname, node->weight, factor, score);
284  continue;
285  }
286 
287  weight_f = factor * score;
288 
289  // Round the number; see http://c-faq.com/fp/round.html
290  weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
291 
292  /* Small factors can obliterate the small scores that are often actually
293  * used in configurations. If the score and factor are nonzero, ensure
294  * that the result is nonzero as well.
295  */
296  if ((weight == 0) && (score != 0)) {
297  if (factor > 0.0) {
298  weight = 1;
299  } else if (factor < 0.0) {
300  weight = -1;
301  }
302  }
303 
304  new_score = pe__add_scores(weight, node->weight);
305 
306  if (only_positive && (new_score < 0) && (node->weight > 0)) {
307  crm_trace("%s: Filtering %d + %f * %d = %d "
308  "(negative disallowed, marking node unusable)",
309  node->details->uname, node->weight, factor, score,
310  new_score);
311  node->weight = INFINITY_HACK;
312  continue;
313  }
314 
315  if (only_positive && (new_score < 0) && (node->weight == 0)) {
316  crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
317  node->details->uname, node->weight, factor, score,
318  new_score);
319  continue;
320  }
321 
322  crm_trace("%s: %d + %f * %d = %d", node->details->uname,
323  node->weight, factor, score, new_score);
324  node->weight = new_score;
325  }
326 }
327 
328 static inline bool
329 is_nonempty_group(pe_resource_t *rsc)
330 {
331  return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
332 }
333 
349 GHashTable *
351  GHashTable *nodes, const char *attr, float factor,
352  uint32_t flags)
353 {
354  GHashTable *work = NULL;
355 
356  // Avoid infinite recursion
357  if (is_set(rsc->flags, pe_rsc_merging)) {
358  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
359  return nodes;
360  }
362 
363  if (is_set(flags, pe_weights_init)) {
364  if (is_nonempty_group(rsc)) {
365  GList *last = g_list_last(rsc->children);
366  pe_resource_t *last_rsc = last->data;
367 
368  pe_rsc_trace(rsc, "%s: Merging scores from group %s "
369  "using last member %s (at %.6f)",
370  rhs, rsc->id, last_rsc->id, factor);
371  work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor,
372  flags);
373  } else {
375  }
377 
378  } else if (is_nonempty_group(rsc)) {
379  /* The first member of the group will recursively incorporate any
380  * constraints involving other members (including the group internal
381  * colocation).
382  *
383  * @TODO The indirect colocations from the dependent group's other
384  * members will be incorporated at full strength rather than by
385  * factor, so the group's combined stickiness will be treated as
386  * (factor + (#members - 1)) * stickiness. It is questionable what
387  * the right approach should be.
388  */
389  pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
390  "(at %.6f)", rhs, rsc->id, factor);
391  work = pcmk__copy_node_table(nodes);
392  work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr,
393  factor, flags);
394 
395  } else {
396  pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
397  rhs, rsc->id, factor);
398  work = pcmk__copy_node_table(nodes);
399  add_node_scores_matching_attr(work, rsc, attr, factor,
400  is_set(flags, pe_weights_positive));
401  }
402 
403  if (can_run_any(work)) {
404  GListPtr gIter = NULL;
405  int multiplier = (factor < 0)? -1 : 1;
406 
407  if (is_set(flags, pe_weights_forward)) {
408  gIter = rsc->rsc_cons;
409  pe_rsc_trace(rsc,
410  "Checking additional %d optional '%s with' constraints",
411  g_list_length(gIter), rsc->id);
412 
413  } else if (is_nonempty_group(rsc)) {
414  pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
415 
416  gIter = last_rsc->rsc_cons_lhs;
417  pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
418  "constraints using last member %s",
419  g_list_length(gIter), rsc->id, last_rsc->id);
420 
421  } else {
422  gIter = rsc->rsc_cons_lhs;
423  pe_rsc_trace(rsc,
424  "Checking additional %d optional 'with %s' constraints",
425  g_list_length(gIter), rsc->id);
426  }
427 
428  for (; gIter != NULL; gIter = gIter->next) {
429  pe_resource_t *other = NULL;
430  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
431 
432  if (constraint->score == 0) {
433  continue;
434  }
435 
436  if (is_set(flags, pe_weights_forward)) {
437  other = constraint->rsc_rh;
438  } else {
439  other = constraint->rsc_lh;
440  }
441 
442  pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
443  constraint->id, constraint->rsc_lh->id,
444  constraint->rsc_rh->id);
445  work = pcmk__native_merge_weights(other, rhs, work,
446  constraint->node_attribute,
447  multiplier * constraint->score / (float) INFINITY,
449  pe__show_node_weights(true, NULL, rhs, work);
450  }
451 
452  } else if (is_set(flags, pe_weights_rollback)) {
453  pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
454  rhs, rsc->id);
455  g_hash_table_destroy(work);
457  return nodes;
458  }
459 
460 
461  if (is_set(flags, pe_weights_positive)) {
462  pe_node_t *node = NULL;
463  GHashTableIter iter;
464 
465  g_hash_table_iter_init(&iter, work);
466  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
467  if (node->weight == INFINITY_HACK) {
468  node->weight = 1;
469  }
470  }
471  }
472 
473  if (nodes) {
474  g_hash_table_destroy(nodes);
475  }
476 
478  return work;
479 }
480 
481 static inline bool
482 node_has_been_unfenced(pe_node_t *node)
483 {
484  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
485 
486  return unfenced && strcmp("0", unfenced);
487 }
488 
489 static inline bool
490 is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set)
491 {
492  return is_set(rsc->flags, pe_rsc_fence_device)
493  && is_set(data_set->flags, pe_flag_enable_unfencing);
494 }
495 
496 pe_node_t *
498  pe_working_set_t *data_set)
499 {
500  GListPtr gIter = NULL;
501 
502  if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) {
503  /* never allocate children on their own */
504  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
505  rsc->parent->id);
506  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
507  }
508 
509  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
510  return rsc->allocated_to;
511  }
512 
513  if (is_set(rsc->flags, pe_rsc_allocating)) {
514  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
515  return NULL;
516  }
517 
519  pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes);
520 
521  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
522  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
523 
524  GHashTable *archive = NULL;
525  pe_resource_t *rsc_rh = constraint->rsc_rh;
526 
527  if (constraint->score == 0) {
528  continue;
529  }
530 
531  if (constraint->role_lh >= RSC_ROLE_MASTER
532  || (constraint->score < 0 && constraint->score > -INFINITY)) {
533  archive = pcmk__copy_node_table(rsc->allowed_nodes);
534  }
535 
536  pe_rsc_trace(rsc,
537  "%s: Allocating %s first (constraint=%s score=%d role=%s)",
538  rsc->id, rsc_rh->id, constraint->id,
539  constraint->score, role2text(constraint->role_lh));
540  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
541  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
542  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
543  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
544  g_hash_table_destroy(rsc->allowed_nodes);
545  rsc->allowed_nodes = archive;
546  archive = NULL;
547  }
548  if (archive) {
549  g_hash_table_destroy(archive);
550  }
551  }
552 
553  pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes);
554 
555  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
556  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
557 
558  if (constraint->score == 0) {
559  continue;
560  }
561  pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
562  constraint->id, constraint->rsc_lh->id,
563  constraint->rsc_rh->id);
564  rsc->allowed_nodes =
565  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
566  constraint->node_attribute,
567  (float)constraint->score / INFINITY,
569  }
570 
571  if (rsc->next_role == RSC_ROLE_STOPPED) {
572  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
573  /* make sure it doesn't come up again */
574  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
575 
576  } else if(rsc->next_role > rsc->role
577  && is_set(data_set->flags, pe_flag_have_quorum) == FALSE
578  && data_set->no_quorum_policy == no_quorum_freeze) {
579  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
580  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
581  rsc->next_role = rsc->role;
582  }
583 
584  pe__show_node_weights(!show_scores, rsc, __FUNCTION__, rsc->allowed_nodes);
585  if (is_set(data_set->flags, pe_flag_stonith_enabled)
586  && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
588  }
589 
590  if (is_not_set(rsc->flags, pe_rsc_managed)) {
591  const char *reason = NULL;
592  pe_node_t *assign_to = NULL;
593 
594  rsc->next_role = rsc->role;
595  assign_to = pe__current_node(rsc);
596  if (assign_to == NULL) {
597  reason = "inactive";
598  } else if (rsc->role == RSC_ROLE_MASTER) {
599  reason = "master";
600  } else if (is_set(rsc->flags, pe_rsc_failed)) {
601  reason = "failed";
602  } else {
603  reason = "active";
604  }
605  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
606  (assign_to? assign_to->details->uname : "no node"), reason);
607  native_assign_node(rsc, NULL, assign_to, TRUE);
608 
609  } else if (is_set(data_set->flags, pe_flag_stop_everything)) {
610  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
611  native_assign_node(rsc, NULL, NULL, TRUE);
612 
613  } else if (is_set(rsc->flags, pe_rsc_provisional)
614  && native_choose_node(rsc, prefer, data_set)) {
615  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
616  rsc->allocated_to->details->uname);
617 
618  } else if (rsc->allocated_to == NULL) {
619  if (is_not_set(rsc->flags, pe_rsc_orphan)) {
620  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
621  } else if (rsc->running_on != NULL) {
622  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
623  }
624 
625  } else {
626  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
627  rsc->allocated_to->details->uname);
628  }
629 
631 
632  if (rsc->is_remote_node) {
633  pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
634 
635  CRM_ASSERT(remote_node != NULL);
636  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
637  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
638  remote_node->details->id);
639  remote_node->details->online = TRUE;
640  /* We shouldn't consider an unseen remote-node unclean if we are going
641  * to try and connect to it. Otherwise we get an unnecessary fence */
642  if (remote_node->details->unseen == TRUE) {
643  remote_node->details->unclean = FALSE;
644  }
645 
646  } else {
647  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
648  remote_node->details->id, role2text(rsc->next_role),
649  (rsc->allocated_to? "" : "un"));
650  remote_node->details->shutdown = TRUE;
651  }
652  }
653 
654  return rsc->allocated_to;
655 }
656 
657 static gboolean
658 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
659 {
660  gboolean dup = FALSE;
661  const char *id = NULL;
662  const char *value = NULL;
663  xmlNode *operation = NULL;
664  guint interval2_ms = 0;
665 
666  CRM_ASSERT(rsc);
667  for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
668  operation = __xml_next_element(operation)) {
669 
670  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
671  value = crm_element_value(operation, "name");
672  if (safe_str_neq(value, name)) {
673  continue;
674  }
675 
676  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
677  interval2_ms = crm_parse_interval_spec(value);
678  if (interval_ms != interval2_ms) {
679  continue;
680  }
681 
682  if (id == NULL) {
683  id = ID(operation);
684 
685  } else {
686  pcmk__config_err("Operation %s is duplicate of %s (do not use "
687  "same name and interval combination more "
688  "than once per resource)", ID(operation), id);
689  dup = TRUE;
690  }
691  }
692  }
693 
694  return dup;
695 }
696 
697 static bool
698 op_cannot_recur(const char *name)
699 {
700  return safe_str_eq(name, RSC_STOP)
704 }
705 
706 static void
707 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
708  xmlNode * operation, pe_working_set_t * data_set)
709 {
710  char *key = NULL;
711  const char *name = NULL;
712  const char *role = NULL;
713  const char *interval_spec = NULL;
714  const char *node_uname = node? node->details->uname : "n/a";
715 
716  guint interval_ms = 0;
717  pe_action_t *mon = NULL;
718  gboolean is_optional = TRUE;
719  GListPtr possible_matches = NULL;
720 
721  CRM_ASSERT(rsc);
722 
723  /* Only process for the operations without role="Stopped" */
724  role = crm_element_value(operation, "role");
725  if (role && text2role(role) == RSC_ROLE_STOPPED) {
726  return;
727  }
728 
729  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
730  interval_ms = crm_parse_interval_spec(interval_spec);
731  if (interval_ms == 0) {
732  return;
733  }
734 
735  name = crm_element_value(operation, "name");
736  if (is_op_dup(rsc, name, interval_ms)) {
737  crm_trace("Not creating duplicate recurring action %s for %dms %s",
738  ID(operation), interval_ms, name);
739  return;
740  }
741 
742  if (op_cannot_recur(name)) {
743  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
744  ID(operation), name);
745  return;
746  }
747 
748  key = pcmk__op_key(rsc->id, name, interval_ms);
749  if (find_rsc_op_entry(rsc, key) == NULL) {
750  crm_trace("Not creating recurring action %s for disabled resource %s",
751  ID(operation), rsc->id);
752  free(key);
753  return;
754  }
755 
756  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
757  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
758 
759  if (start != NULL) {
760  pe_rsc_trace(rsc, "Marking %s %s due to %s",
761  key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory",
762  start->uuid);
763  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
764  } else {
765  pe_rsc_trace(rsc, "Marking %s optional", key);
766  is_optional = TRUE;
767  }
768 
769  /* start a monitor for an already active resource */
770  possible_matches = find_actions_exact(rsc->actions, key, node);
771  if (possible_matches == NULL) {
772  is_optional = FALSE;
773  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
774 
775  } else {
776  GListPtr gIter = NULL;
777 
778  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
779  pe_action_t *op = (pe_action_t *) gIter->data;
780 
781  if (is_set(op->flags, pe_action_reschedule)) {
782  is_optional = FALSE;
783  break;
784  }
785  }
786  g_list_free(possible_matches);
787  }
788 
789  if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
790  || (role != NULL && text2role(role) != rsc->next_role)) {
791  int log_level = LOG_TRACE;
792  const char *result = "Ignoring";
793 
794  if (is_optional) {
795  char *after_key = NULL;
796  pe_action_t *cancel_op = NULL;
797 
798  // It's running, so cancel it
799  log_level = LOG_INFO;
800  result = "Cancelling";
801  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
802 
803  switch (rsc->role) {
804  case RSC_ROLE_SLAVE:
805  case RSC_ROLE_STARTED:
806  if (rsc->next_role == RSC_ROLE_MASTER) {
807  after_key = promote_key(rsc);
808 
809  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
810  after_key = stop_key(rsc);
811  }
812 
813  break;
814  case RSC_ROLE_MASTER:
815  after_key = demote_key(rsc);
816  break;
817  default:
818  break;
819  }
820 
821  if (after_key) {
822  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
823  pe_order_runnable_left, data_set);
824  }
825  }
826 
827  do_crm_log(log_level, "%s action %s (%s vs. %s)",
828  result, key, role ? role : role2text(RSC_ROLE_SLAVE),
829  role2text(rsc->next_role));
830 
831  free(key);
832  return;
833  }
834 
835  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
836  key = mon->uuid;
837  if (is_optional) {
838  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
839  }
840 
841  if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
842  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
843  node_uname, mon->uuid);
844  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
845 
846  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
847  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
848  node_uname, mon->uuid);
849  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
850 
851  } else if (is_set(mon->flags, pe_action_optional) == FALSE) {
852  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
853  mon->task, interval_ms / 1000, rsc->id, node_uname);
854  }
855 
856  if (rsc->next_role == RSC_ROLE_MASTER) {
857  char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
858 
859  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
860  free(running_master);
861  }
862 
863  if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) {
864  custom_action_order(rsc, start_key(rsc), NULL,
865  NULL, strdup(key), mon,
867 
868  custom_action_order(rsc, reload_key(rsc), NULL,
869  NULL, strdup(key), mon,
871 
872  if (rsc->next_role == RSC_ROLE_MASTER) {
873  custom_action_order(rsc, promote_key(rsc), NULL,
874  rsc, NULL, mon,
876 
877  } else if (rsc->role == RSC_ROLE_MASTER) {
878  custom_action_order(rsc, demote_key(rsc), NULL,
879  rsc, NULL, mon,
881  }
882  }
883 }
884 
885 static void
886 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
887 {
888  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
889  (node == NULL || node->details->maintenance == FALSE)) {
890  xmlNode *operation = NULL;
891 
892  for (operation = __xml_first_child_element(rsc->ops_xml);
893  operation != NULL;
894  operation = __xml_next_element(operation)) {
895 
896  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
897  RecurringOp(rsc, start, node, operation, data_set);
898  }
899  }
900  }
901 }
902 
903 static void
904 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
905  xmlNode * operation, pe_working_set_t * data_set)
906 {
907  char *key = NULL;
908  const char *name = NULL;
909  const char *role = NULL;
910  const char *interval_spec = NULL;
911  const char *node_uname = node? node->details->uname : "n/a";
912 
913  guint interval_ms = 0;
914  GListPtr possible_matches = NULL;
915  GListPtr gIter = NULL;
916 
917  /* Only process for the operations with role="Stopped" */
918  role = crm_element_value(operation, "role");
919  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
920  return;
921  }
922 
923  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
924  interval_ms = crm_parse_interval_spec(interval_spec);
925  if (interval_ms == 0) {
926  return;
927  }
928 
929  name = crm_element_value(operation, "name");
930  if (is_op_dup(rsc, name, interval_ms)) {
931  crm_trace("Not creating duplicate recurring action %s for %dms %s",
932  ID(operation), interval_ms, name);
933  return;
934  }
935 
936  if (op_cannot_recur(name)) {
937  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
938  ID(operation), name);
939  return;
940  }
941 
942  key = pcmk__op_key(rsc->id, name, interval_ms);
943  if (find_rsc_op_entry(rsc, key) == NULL) {
944  crm_trace("Not creating recurring action %s for disabled resource %s",
945  ID(operation), rsc->id);
946  free(key);
947  return;
948  }
949 
950  // @TODO add support
951  if (is_set(rsc->flags, pe_rsc_unique) == FALSE) {
952  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
953  "not supported for anonymous clones)",
954  ID(operation));
955  return;
956  }
957 
958  pe_rsc_trace(rsc,
959  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
960  ID(operation), rsc->id, role2text(rsc->next_role));
961 
962  /* if the monitor exists on the node where the resource will be running, cancel it */
963  if (node != NULL) {
964  possible_matches = find_actions_exact(rsc->actions, key, node);
965  if (possible_matches) {
966  pe_action_t *cancel_op = NULL;
967 
968  g_list_free(possible_matches);
969 
970  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
971 
972  if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
973  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
974  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
975  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
976  pe_order_runnable_left, data_set);
977  }
978 
979  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
980  key, role, role2text(rsc->next_role), node_uname);
981  }
982  }
983 
984  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
985  pe_node_t *stop_node = (pe_node_t *) gIter->data;
986  const char *stop_node_uname = stop_node->details->uname;
987  gboolean is_optional = TRUE;
988  gboolean probe_is_optional = TRUE;
989  gboolean stop_is_optional = TRUE;
990  pe_action_t *stopped_mon = NULL;
991  char *rc_inactive = NULL;
992  GListPtr probe_complete_ops = NULL;
993  GListPtr stop_ops = NULL;
994  GListPtr local_gIter = NULL;
995 
996  if (node && safe_str_eq(stop_node_uname, node_uname)) {
997  continue;
998  }
999 
1000  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
1001  ID(operation), rsc->id, crm_str(stop_node_uname));
1002 
1003  /* start a monitor for an already stopped resource */
1004  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
1005  if (possible_matches == NULL) {
1006  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
1007  crm_str(stop_node_uname));
1008  is_optional = FALSE;
1009  } else {
1010  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
1011  crm_str(stop_node_uname));
1012  is_optional = TRUE;
1013  g_list_free(possible_matches);
1014  }
1015 
1016  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
1017 
1018  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
1019  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
1020  free(rc_inactive);
1021 
1022  if (is_set(rsc->flags, pe_rsc_managed)) {
1023  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
1024  FALSE);
1025  GListPtr pIter = NULL;
1026 
1027  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1028  pe_action_t *probe = (pe_action_t *) pIter->data;
1029 
1030  order_actions(probe, stopped_mon, pe_order_runnable_left);
1031  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
1032  }
1033 
1034  g_list_free(probes);
1035  }
1036 
1037  if (probe_complete_ops) {
1038  g_list_free(probe_complete_ops);
1039  }
1040 
1041  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
1042 
1043  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
1044  pe_action_t *stop = (pe_action_t *) local_gIter->data;
1045 
1046  if (is_set(stop->flags, pe_action_optional) == FALSE) {
1047  stop_is_optional = FALSE;
1048  }
1049 
1050  if (is_set(stop->flags, pe_action_runnable) == FALSE) {
1051  crm_debug("%s\t %s (cancelled : stop un-runnable)",
1052  crm_str(stop_node_uname), stopped_mon->uuid);
1053  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
1054  }
1055 
1056  if (is_set(rsc->flags, pe_rsc_managed)) {
1057  custom_action_order(rsc, stop_key(rsc), stop,
1058  NULL, strdup(key), stopped_mon,
1060  }
1061 
1062  }
1063 
1064  if (stop_ops) {
1065  g_list_free(stop_ops);
1066  }
1067 
1068  if (is_optional == FALSE && probe_is_optional && stop_is_optional
1069  && is_set(rsc->flags, pe_rsc_managed) == FALSE) {
1070  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
1071  key, crm_str(stop_node_uname));
1072  update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__);
1073  }
1074 
1075  if (is_set(stopped_mon->flags, pe_action_optional)) {
1076  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1077  }
1078 
1079  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1080  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1081  crm_str(stop_node_uname), stopped_mon->uuid);
1082  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
1083  }
1084 
1085  if (is_set(stopped_mon->flags, pe_action_runnable)
1086  && is_set(stopped_mon->flags, pe_action_optional) == FALSE) {
1087  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1088  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1089  }
1090  }
1091 
1092  free(key);
1093 }
1094 
1095 static void
1096 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
1097 {
1098  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
1099  (node == NULL || node->details->maintenance == FALSE)) {
1100  xmlNode *operation = NULL;
1101 
1102  for (operation = __xml_first_child_element(rsc->ops_xml);
1103  operation != NULL;
1104  operation = __xml_next_element(operation)) {
1105 
1106  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
1107  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1108  }
1109  }
1110  }
1111 }
1112 
1113 static void
1114 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
1115 {
1116  pe_action_t *migrate_to = NULL;
1117  pe_action_t *migrate_from = NULL;
1118  pe_action_t *start = NULL;
1119  pe_action_t *stop = NULL;
1120  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1121 
1122  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1123  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1124  start = start_action(rsc, chosen, TRUE);
1125  stop = stop_action(rsc, current, TRUE);
1126 
1127  if (partial == FALSE) {
1128  migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1129  RSC_MIGRATE, current, TRUE, TRUE, data_set);
1130  }
1131 
1132  migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1133  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1134 
1135  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1136 
1139 
1140  update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */
1141 
1142  /* order probes before migrations */
1143  if (partial) {
1144  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1145  migrate_from->needs = start->needs;
1146 
1147  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1148  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1149  NULL, pe_order_optional, data_set);
1150 
1151  } else {
1152  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1154  migrate_to->needs = start->needs;
1155 
1156  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1157  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1158  NULL, pe_order_optional, data_set);
1160  NULL, rsc,
1161  pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1163  data_set);
1164  }
1165 
1166  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1167  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1169  data_set);
1170  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1171  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1173  data_set);
1174 
1175  }
1176 
1177  if (migrate_to) {
1178  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1180 
1181  /* Pacemaker Remote connections don't require pending to be recorded in
1182  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1183  */
1184  if (rsc->is_remote_node == FALSE) {
1185  /* migrate_to takes place on the source node, but can
1186  * have an effect on the target node depending on how
1187  * the agent is written. Because of this, we have to maintain
1188  * a record that the migrate_to occurred, in case the source node
1189  * loses membership while the migrate_to action is still in-flight.
1190  */
1191  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1192  }
1193  }
1194 
1195  if (migrate_from) {
1196  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1197  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1198  }
1199 }
1200 
1201 void
1203 {
1204  pe_action_t *start = NULL;
1205  pe_node_t *chosen = NULL;
1206  pe_node_t *current = NULL;
1207  gboolean need_stop = FALSE;
1208  bool need_promote = FALSE;
1209  gboolean is_moving = FALSE;
1210  gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE;
1211 
1212  GListPtr gIter = NULL;
1213  unsigned int num_all_active = 0;
1214  unsigned int num_clean_active = 0;
1215  bool multiply_active = FALSE;
1216  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1217  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1218 
1219  CRM_ASSERT(rsc);
1220  chosen = rsc->allocated_to;
1221  if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
1222  rsc->next_role = RSC_ROLE_STARTED;
1223  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1224 
1225  } else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
1226  rsc->next_role = RSC_ROLE_STOPPED;
1227  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1228  }
1229 
1230  pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
1231  role2text(rsc->role), role2text(rsc->next_role));
1232 
1233  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1234 
1235  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1236  pe_node_t *dangling_source = (pe_node_t *) gIter->data;
1237 
1238  pe_action_t *stop = stop_action(rsc, dangling_source, FALSE);
1239 
1240  set_bit(stop->flags, pe_action_dangle);
1241  pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s",
1242  rsc->id, dangling_source->details->uname);
1243 
1244  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
1245  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1246  }
1247  }
1248 
1249  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1251  && (current->details == rsc->partial_migration_source->details)
1252  && (chosen->details == rsc->partial_migration_target->details)) {
1253 
1254  /* The chosen node is still the migration target from a partial
1255  * migration. Attempt to continue the migration instead of recovering
1256  * by stopping the resource everywhere and starting it on a single node.
1257  */
1258  pe_rsc_trace(rsc,
1259  "Will attempt to continue with a partial migration to target %s from %s",
1262 
1263  } else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
1264  /* If a resource has "requires" set to nothing or quorum, don't consider
1265  * it active on unclean nodes (similar to how all resources behave when
1266  * stonith-enabled is false). We can start such resources elsewhere
1267  * before fencing completes, and if we considered the resource active on
1268  * the failed node, we would attempt recovery for being active on
1269  * multiple nodes.
1270  */
1271  multiply_active = (num_clean_active > 1);
1272  } else {
1273  multiply_active = (num_all_active > 1);
1274  }
1275 
1276  if (multiply_active) {
1278  // Migration was in progress, but we've chosen a different target
1279  crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too",
1282 
1283  } else {
1284  // Resource was incorrectly multiply active
1285  pe_proc_err("Resource %s is active on %u nodes (%s)",
1286  rsc->id, num_all_active,
1287  recovery2text(rsc->recovery_type));
1288  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1289  }
1290 
1291  if (rsc->recovery_type == recovery_stop_start) {
1292  need_stop = TRUE;
1293  }
1294 
1295  /* If by chance a partial migration is in process, but the migration
1296  * target is not chosen still, clear all partial migration data.
1297  */
1299  allow_migrate = FALSE;
1300  }
1301 
1302  if (is_set(rsc->flags, pe_rsc_start_pending)) {
1303  start = start_action(rsc, chosen, TRUE);
1305  }
1306 
1307  if (current && chosen && current->details != chosen->details) {
1308  pe_rsc_trace(rsc, "Moving %s", rsc->id);
1309  is_moving = TRUE;
1310  need_stop = TRUE;
1311 
1312  } else if (is_set(rsc->flags, pe_rsc_failed)) {
1313  if (is_set(rsc->flags, pe_rsc_stop)) {
1314  need_stop = TRUE;
1315  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1316  } else {
1317  pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
1318  if (rsc->next_role == RSC_ROLE_MASTER) {
1319  need_promote = TRUE;
1320  }
1321  }
1322 
1323  } else if (is_set(rsc->flags, pe_rsc_block)) {
1324  pe_rsc_trace(rsc, "Block %s", rsc->id);
1325  need_stop = TRUE;
1326 
1327  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1328  /* Recovery of a promoted resource */
1329  start = start_action(rsc, chosen, TRUE);
1330  if (is_set(start->flags, pe_action_optional) == FALSE) {
1331  pe_rsc_trace(rsc, "Forced start %s", rsc->id);
1332  need_stop = TRUE;
1333  }
1334  }
1335 
1336  pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
1337  role2text(rsc->role), role2text(rsc->next_role));
1338 
1339  /* Create any additional actions required when bringing resource down and
1340  * back up to same level.
1341  */
1342  role = rsc->role;
1343  while (role != RSC_ROLE_STOPPED) {
1344  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1345  pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1346  rsc->id, need_stop ? " required" : "");
1347  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1348  break;
1349  }
1350  role = next_role;
1351  }
1352 
1353 
1354  while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) {
1355  bool required = need_stop;
1356 
1357  next_role = rsc_state_matrix[role][rsc->role];
1358  if ((next_role == RSC_ROLE_MASTER) && need_promote) {
1359  required = true;
1360  }
1361  pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1362  rsc->id, (required? " required" : ""));
1363  if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
1364  data_set) == FALSE) {
1365  break;
1366  }
1367  role = next_role;
1368  }
1369  role = rsc->role;
1370 
1371  /* Required steps from this role to the next */
1372  while (role != rsc->next_role) {
1373  next_role = rsc_state_matrix[role][rsc->next_role];
1374  pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
1375  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1376  break;
1377  }
1378  role = next_role;
1379  }
1380 
1381  if(is_set(rsc->flags, pe_rsc_block)) {
1382  pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
1383 
1384  } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) {
1385  pe_rsc_trace(rsc, "Monitor ops for active resource");
1386  start = start_action(rsc, chosen, TRUE);
1387  Recurring(rsc, start, chosen, data_set);
1388  Recurring_Stopped(rsc, start, chosen, data_set);
1389  } else {
1390  pe_rsc_trace(rsc, "Monitor ops for inactive resource");
1391  Recurring_Stopped(rsc, NULL, NULL, data_set);
1392  }
1393 
1394  /* if we are stuck in a partial migration, where the target
1395  * of the partial migration no longer matches the chosen target.
1396  * A full stop/start is required */
1397  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1398  pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
1399  allow_migrate = FALSE;
1400 
1401  } else if (is_moving == FALSE ||
1402  is_not_set(rsc->flags, pe_rsc_managed) ||
1403  is_set(rsc->flags, pe_rsc_failed) ||
1404  is_set(rsc->flags, pe_rsc_start_pending) ||
1405  (current && current->details->unclean) ||
1406  rsc->next_role < RSC_ROLE_STARTED) {
1407 
1408  allow_migrate = FALSE;
1409  }
1410 
1411  if (allow_migrate) {
1412  handle_migration_actions(rsc, current, chosen, data_set);
1413  }
1414 }
1415 
1416 static void
1417 rsc_avoids_remote_nodes(pe_resource_t *rsc)
1418 {
1419  GHashTableIter iter;
1420  pe_node_t *node = NULL;
1421  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1422  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1423  if (node->details->remote_rsc) {
1424  node->weight = -INFINITY;
1425  }
1426  }
1427 }
1428 
1443 static GList *
1444 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1445 {
1446  GList *allowed_nodes = NULL;
1447 
1448  if (rsc->allowed_nodes) {
1449  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1450  }
1451 
1452  if (is_set(data_set->flags, pe_flag_stdout)) {
1453  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1454  }
1455  return allowed_nodes;
1456 }
1457 
1458 void
1460 {
1461  /* This function is on the critical path and worth optimizing as much as possible */
1462 
1463  pe_resource_t *top = NULL;
1464  GList *allowed_nodes = NULL;
1465  bool check_unfencing = FALSE;
1466  bool check_utilization = FALSE;
1467 
1468  if (is_not_set(rsc->flags, pe_rsc_managed)) {
1469  pe_rsc_trace(rsc,
1470  "Skipping native constraints for unmanaged resource: %s",
1471  rsc->id);
1472  return;
1473  }
1474 
1475  top = uber_parent(rsc);
1476 
1477  // Whether resource requires unfencing
1478  check_unfencing = is_not_set(rsc->flags, pe_rsc_fence_device)
1479  && is_set(data_set->flags, pe_flag_enable_unfencing)
1480  && is_set(rsc->flags, pe_rsc_needs_unfencing);
1481 
1482  // Whether a non-default placement strategy is used
1483  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1484  && safe_str_neq(data_set->placement_strategy, "default");
1485 
1486  // Order stops before starts (i.e. restart)
1487  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1488  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1490  data_set);
1491 
1492  // Promotable ordering: demote before stop, start before promote
1493  if (is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) {
1494  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1495  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1496  pe_order_implies_first_master, data_set);
1497 
1498  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1499  rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1500  pe_order_runnable_left, data_set);
1501  }
1502 
1503  // Don't clear resource history if probing on same node
1505  NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
1507  data_set);
1508 
1509  // Certain checks need allowed nodes
1510  if (check_unfencing || check_utilization || rsc->container) {
1511  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1512  }
1513 
1514  if (check_unfencing) {
1515  /* Check if the node needs to be unfenced first */
1516 
1517  for (GList *item = allowed_nodes; item; item = item->next) {
1518  pe_node_t *node = item->data;
1519  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
1520 
1521  crm_debug("Ordering any stops of %s before %s, and any starts after",
1522  rsc->id, unfence->uuid);
1523 
1524  /*
1525  * It would be more efficient to order clone resources once,
1526  * rather than order each instance, but ordering the instance
1527  * allows us to avoid unnecessary dependencies that might conflict
1528  * with user constraints.
1529  *
1530  * @TODO: This constraint can still produce a transition loop if the
1531  * resource has a stop scheduled on the node being unfenced, and
1532  * there is a user ordering constraint to start some other resource
1533  * (which will be ordered after the unfence) before stopping this
1534  * resource. An example is "start some slow-starting cloned service
1535  * before stopping an associated virtual IP that may be moving to
1536  * it":
1537  * stop this -> unfencing -> start that -> stop this
1538  */
1539  custom_action_order(rsc, stop_key(rsc), NULL,
1540  NULL, strdup(unfence->uuid), unfence,
1542 
1543  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1544  rsc, start_key(rsc), NULL,
1546  data_set);
1547  }
1548  }
1549 
1550  if (check_utilization) {
1551  GListPtr gIter = NULL;
1552 
1553  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1554  rsc->id, data_set->placement_strategy);
1555 
1556  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1557  pe_node_t *current = (pe_node_t *) gIter->data;
1558 
1559  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1560  current->details->uname);
1561  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1562 
1563  if (load_stopped->node == NULL) {
1564  load_stopped->node = pe__copy_node(current);
1565  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1566  }
1567 
1568  custom_action_order(rsc, stop_key(rsc), NULL,
1569  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1570  }
1571 
1572  for (GList *item = allowed_nodes; item; item = item->next) {
1573  pe_node_t *next = item->data;
1574  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1575  next->details->uname);
1576  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1577 
1578  if (load_stopped->node == NULL) {
1579  load_stopped->node = pe__copy_node(next);
1580  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1581  }
1582 
1583  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1584  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1585 
1586  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1587  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1588  NULL, pe_order_load, data_set);
1589 
1590  free(load_stopped_task);
1591  }
1592  }
1593 
1594  if (rsc->container) {
1595  pe_resource_t *remote_rsc = NULL;
1596 
1597  if (rsc->is_remote_node) {
1598  // rsc is the implicit remote connection for a guest or bundle node
1599 
1600  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1601  * to avoid nesting remotes. However, allow bundles to run on remote
1602  * nodes.
1603  */
1604  if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1605  rsc_avoids_remote_nodes(rsc->container);
1606  }
1607 
1608  /* If someone cleans up a guest or bundle node's container, we will
1609  * likely schedule a (re-)probe of the container and recovery of the
1610  * connection. Order the connection stop after the container probe,
1611  * so that if we detect the container running, we will trigger a new
1612  * transition and avoid the unnecessary recovery.
1613  */
1615  pe_order_optional, data_set);
1616 
1617  /* A user can specify that a resource must start on a Pacemaker Remote
1618  * node by explicitly configuring it with the container=NODENAME
1619  * meta-attribute. This is of questionable merit, since location
1620  * constraints can accomplish the same thing. But we support it, so here
1621  * we check whether a resource (that is not itself a remote connection)
1622  * has container set to a remote node or guest node resource.
1623  */
1624  } else if (rsc->container->is_remote_node) {
1625  remote_rsc = rsc->container;
1626  } else {
1627  remote_rsc = pe__resource_contains_guest_node(data_set,
1628  rsc->container);
1629  }
1630 
1631  if (remote_rsc) {
1632  /* Force the resource on the Pacemaker Remote node instead of
1633  * colocating the resource with the container resource.
1634  */
1635  for (GList *item = allowed_nodes; item; item = item->next) {
1636  pe_node_t *node = item->data;
1637 
1638  if (node->details->remote_rsc != remote_rsc) {
1639  node->weight = -INFINITY;
1640  }
1641  }
1642 
1643  } else {
1644  /* This resource is either a filler for a container that does NOT
1645  * represent a Pacemaker Remote node, or a Pacemaker Remote
1646  * connection resource for a guest node or bundle.
1647  */
1648  int score;
1649 
1650  crm_trace("Order and colocate %s relative to its container %s",
1651  rsc->id, rsc->container->id);
1652 
1654  pcmk__op_key(rsc->container->id, RSC_START, 0),
1655  NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
1656  NULL,
1658  data_set);
1659 
1660  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1661  rsc->container,
1662  pcmk__op_key(rsc->container->id, RSC_STOP, 0),
1663  NULL, pe_order_implies_first, data_set);
1664 
1665  if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1666  score = 10000; /* Highly preferred but not essential */
1667  } else {
1668  score = INFINITY; /* Force them to run on the same host */
1669  }
1670  rsc_colocation_new("resource-with-container", NULL, score, rsc,
1671  rsc->container, NULL, NULL, data_set);
1672  }
1673  }
1674 
1675  if (rsc->is_remote_node || is_set(rsc->flags, pe_rsc_fence_device)) {
1676  /* don't allow remote nodes to run stonith devices
1677  * or remote connection resources.*/
1678  rsc_avoids_remote_nodes(rsc);
1679  }
1680  g_list_free(allowed_nodes);
1681 }
1682 
1683 void
1685  rsc_colocation_t *constraint,
1686  pe_working_set_t *data_set)
1687 {
1688  if (rsc_lh == NULL) {
1689  pe_err("rsc_lh was NULL for %s", constraint->id);
1690  return;
1691 
1692  } else if (constraint->rsc_rh == NULL) {
1693  pe_err("rsc_rh was NULL for %s", constraint->id);
1694  return;
1695  }
1696 
1697  if (constraint->score == 0) {
1698  return;
1699  }
1700  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1701  rsc_rh->id);
1702 
1703  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1704 }
1705 
1708  rsc_colocation_t * constraint, gboolean preview)
1709 {
1710  if (constraint->score == 0) {
1711  return influence_nothing;
1712  }
1713 
1714  /* rh side must be allocated before we can process constraint */
1715  if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
1716  return influence_nothing;
1717  }
1718 
1719  if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
1720  rsc_lh->parent && is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1721  && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1722 
1723  /* LH and RH resources have already been allocated, place the correct
1724  * priority on LH rsc for the given promotable clone resource role */
1725  return influence_rsc_priority;
1726  }
1727 
1728  if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1729  // Log an error if we violated a mandatory colocation constraint
1730  const pe_node_t *rh_node = rsc_rh->allocated_to;
1731 
1732  if (rsc_lh->allocated_to == NULL) {
1733  // Dependent resource isn't allocated, so constraint doesn't matter
1734  return influence_nothing;
1735  }
1736 
1737  if (constraint->score >= INFINITY) {
1738  // Dependent resource must colocate with rh_node
1739 
1740  if ((rh_node == NULL)
1741  || (rh_node->details != rsc_lh->allocated_to->details)) {
1742  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1743  rsc_lh->id, rsc_rh->id,
1744  rsc_lh->allocated_to->details->uname,
1745  (rh_node? rh_node->details->uname : "unallocated"));
1746  }
1747 
1748  } else if (constraint->score <= -INFINITY) {
1749  // Dependent resource must anti-colocate with rh_node
1750 
1751  if ((rh_node != NULL)
1752  && (rsc_lh->allocated_to->details == rh_node->details)) {
1753  crm_err("%s and %s must be anti-colocated but are allocated "
1754  "to the same node (%s)",
1755  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1756  }
1757  }
1758  return influence_nothing;
1759  }
1760 
1761  if (constraint->score > 0
1762  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1763  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1764  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1765  return influence_nothing;
1766  }
1767 
1768  if (constraint->score > 0
1769  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1770  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1771  return influence_nothing;
1772  }
1773 
1774  if (constraint->score < 0
1775  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1776  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1777  role2text(constraint->role_lh));
1778  return influence_nothing;
1779  }
1780 
1781  if (constraint->score < 0
1782  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1783  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1784  role2text(constraint->role_rh));
1785  return influence_nothing;
1786  }
1787 
1788  return influence_rsc_location;
1789 }
1790 
1791 static void
1792 influence_priority(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
1793 {
1794  const char *rh_value = NULL;
1795  const char *lh_value = NULL;
1796  const char *attribute = CRM_ATTR_ID;
1797  int score_multiplier = 1;
1798 
1799  if (constraint->score == 0) {
1800  return;
1801  }
1802  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1803  return;
1804  }
1805 
1806  if (constraint->node_attribute != NULL) {
1807  attribute = constraint->node_attribute;
1808  }
1809 
1810  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1811  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1812 
1813  if (!safe_str_eq(lh_value, rh_value)) {
1814  if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
1815  rsc_lh->priority = -INFINITY;
1816  }
1817  return;
1818  }
1819 
1820  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1821  return;
1822  }
1823 
1824  if (constraint->role_lh == RSC_ROLE_SLAVE) {
1825  score_multiplier = -1;
1826  }
1827 
1828  rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score,
1829  rsc_lh->priority);
1830 }
1831 
1832 static void
1833 colocation_match(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
1834 {
1835  const char *attribute = CRM_ATTR_ID;
1836  const char *value = NULL;
1837  GHashTable *work = NULL;
1838  GHashTableIter iter;
1839  pe_node_t *node = NULL;
1840 
1841  if (constraint->score == 0) {
1842  return;
1843  }
1844  if (constraint->node_attribute != NULL) {
1845  attribute = constraint->node_attribute;
1846  }
1847 
1848  if (rsc_rh->allocated_to) {
1849  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1850 
1851  } else if (constraint->score < 0) {
1852  // Nothing to do (anti-colocation with something that is not running)
1853  return;
1854  }
1855 
1856  work = pcmk__copy_node_table(rsc_lh->allowed_nodes);
1857 
1858  g_hash_table_iter_init(&iter, work);
1859  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1860  if (rsc_rh->allocated_to == NULL) {
1861  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)",
1862  constraint->id, rsc_lh->id, node->details->uname,
1863  constraint->score, rsc_rh->id);
1864  node->weight = pe__add_scores(-constraint->score, node->weight);
1865 
1866  } else if (safe_str_eq(pe_node_attribute_raw(node, attribute), value)) {
1867  if (constraint->score < CRM_SCORE_INFINITY) {
1868  pe_rsc_trace(rsc_lh, "%s: %s@%s += %d",
1869  constraint->id, rsc_lh->id,
1870  node->details->uname, constraint->score);
1871  node->weight = pe__add_scores(constraint->score, node->weight);
1872  }
1873 
1874  } else if (constraint->score >= CRM_SCORE_INFINITY) {
1875  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)",
1876  constraint->id, rsc_lh->id, node->details->uname,
1877  constraint->score, attribute);
1878  node->weight = pe__add_scores(-constraint->score, node->weight);
1879  }
1880  }
1881 
1882  if (can_run_any(work)
1883  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1884  g_hash_table_destroy(rsc_lh->allowed_nodes);
1885  rsc_lh->allowed_nodes = work;
1886  work = NULL;
1887 
1888  } else {
1889  pe_rsc_info(rsc_lh,
1890  "%s: Rolling back scores from %s (no available nodes)",
1891  rsc_lh->id, rsc_rh->id);
1892  }
1893 
1894  if (work) {
1895  g_hash_table_destroy(work);
1896  }
1897 }
1898 
1899 void
1901  rsc_colocation_t *constraint,
1902  pe_working_set_t *data_set)
1903 {
1904  enum filter_colocation_res filter_results;
1905 
1906  CRM_ASSERT(rsc_lh);
1907  CRM_ASSERT(rsc_rh);
1908  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1909  pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)",
1910  ((constraint->score >= 0)? "Colocating" : "Anti-colocating"),
1911  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1912 
1913  switch (filter_results) {
1915  influence_priority(rsc_lh, rsc_rh, constraint);
1916  break;
1918  colocation_match(rsc_lh, rsc_rh, constraint);
1919  break;
1920  case influence_nothing:
1921  default:
1922  return;
1923  }
1924 }
1925 
1926 static gboolean
1927 filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1928 {
1929  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1930  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1931  role2text(rsc_ticket->role_lh));
1932  return FALSE;
1933  }
1934 
1935  return TRUE;
1936 }
1937 
1938 void
1940 {
1941  if (rsc_ticket == NULL) {
1942  pe_err("rsc_ticket was NULL");
1943  return;
1944  }
1945 
1946  if (rsc_lh == NULL) {
1947  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1948  return;
1949  }
1950 
1951  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1952  return;
1953  }
1954 
1955  if (rsc_lh->children) {
1956  GListPtr gIter = rsc_lh->children;
1957 
1958  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1959 
1960  for (; gIter != NULL; gIter = gIter->next) {
1961  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
1962 
1963  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1964  }
1965  return;
1966  }
1967 
1968  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1969  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1970  role2text(rsc_ticket->role_lh));
1971 
1972  if ((rsc_ticket->ticket->granted == FALSE)
1973  && (rsc_lh->running_on != NULL)) {
1974 
1975  GListPtr gIter = NULL;
1976 
1977  switch (rsc_ticket->loss_policy) {
1978  case loss_ticket_stop:
1979  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1980  break;
1981 
1982  case loss_ticket_demote:
1983  // Promotion score will be set to -INFINITY in promotion_order()
1984  if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
1985  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1986  }
1987  break;
1988 
1989  case loss_ticket_fence:
1990  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1991  return;
1992  }
1993 
1994  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1995 
1996  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
1997  pe_node_t *node = (pe_node_t *) gIter->data;
1998 
1999  pe_fence_node(data_set, node, "deadman ticket was lost", FALSE);
2000  }
2001  break;
2002 
2003  case loss_ticket_freeze:
2004  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
2005  return;
2006  }
2007  if (rsc_lh->running_on != NULL) {
2008  clear_bit(rsc_lh->flags, pe_rsc_managed);
2009  set_bit(rsc_lh->flags, pe_rsc_block);
2010  }
2011  break;
2012  }
2013 
2014  } else if (rsc_ticket->ticket->granted == FALSE) {
2015 
2016  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
2017  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
2018  }
2019 
2020  } else if (rsc_ticket->ticket->standby) {
2021 
2022  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
2023  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
2024  }
2025  }
2026 }
2027 
2028 enum pe_action_flags
2030 {
2031  return action->flags;
2032 }
2033 
2034 static inline bool
2035 is_primitive_action(pe_action_t *action)
2036 {
2037  return action && action->rsc && (action->rsc->variant == pe_native);
2038 }
2039 
2051 static void
2052 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
2053  enum pe_action_flags filter)
2054 {
2055  const char *reason = NULL;
2056 
2057  CRM_ASSERT(is_primitive_action(first));
2058  CRM_ASSERT(is_primitive_action(then));
2059 
2060  // We need to update the action in two cases:
2061 
2062  // ... if 'then' is required
2063  if (is_set(filter, pe_action_optional)
2064  && is_not_set(then->flags, pe_action_optional)) {
2065  reason = "restart";
2066  }
2067 
2068  /* ... if 'then' is unrunnable start of managed resource (if a resource
2069  * should restart but can't start, we still want to stop)
2070  */
2071  if (is_set(filter, pe_action_runnable)
2072  && is_not_set(then->flags, pe_action_runnable)
2073  && is_set(then->rsc->flags, pe_rsc_managed)
2074  && safe_str_eq(then->task, RSC_START)) {
2075  reason = "stop";
2076  }
2077 
2078  if (reason == NULL) {
2079  return;
2080  }
2081 
2082  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
2083  first->uuid, then->uuid, reason);
2084 
2085  // Make 'first' required if it is runnable
2086  if (is_set(first->flags, pe_action_runnable)) {
2087  pe_action_implies(first, then, pe_action_optional);
2088  }
2089 
2090  // Make 'first' required if 'then' is required
2091  if (is_not_set(then->flags, pe_action_optional)) {
2092  pe_action_implies(first, then, pe_action_optional);
2093  }
2094 
2095  // Make 'first' unmigratable if 'then' is unmigratable
2096  if (is_not_set(then->flags, pe_action_migrate_runnable)) {
2098  }
2099 
2100  // Make 'then' unrunnable if 'first' is required but unrunnable
2101  if (is_not_set(first->flags, pe_action_optional)
2102  && is_not_set(first->flags, pe_action_runnable)) {
2103  pe_action_implies(then, first, pe_action_runnable);
2104  }
2105 }
2106 
2107 enum pe_graph_flags
2109  enum pe_action_flags flags, enum pe_action_flags filter,
2110  enum pe_ordering type, pe_working_set_t *data_set)
2111 {
2112  /* flags == get_action_flags(first, then_node) called from update_action() */
2113  enum pe_graph_flags changed = pe_graph_none;
2114  enum pe_action_flags then_flags = then->flags;
2115  enum pe_action_flags first_flags = first->flags;
2116 
2117  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2118  first->uuid, first->node ? first->node->details->uname : "[none]",
2119  first->flags, then->uuid, then->flags);
2120 
2121  if (type & pe_order_asymmetrical) {
2122  pe_resource_t *then_rsc = then->rsc;
2123  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2124 
2125  if (!then_rsc) {
2126  /* ignore */
2127  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) {
2128  /* ignore... if 'then' is supposed to be stopped after 'first', but
2129  * then is already stopped, there is nothing to be done when non-symmetrical. */
2130  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2131  && safe_str_eq(then->task, RSC_START)
2132  && is_set(then->flags, pe_action_optional)
2133  && then->node
2134  && pcmk__list_of_1(then_rsc->running_on)
2135  && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
2136  /* Ignore. If 'then' is supposed to be started after 'first', but
2137  * 'then' is already started, there is nothing to be done when
2138  * asymmetrical -- unless the start is mandatory, which indicates
2139  * the resource is restarting, and the ordering is still needed.
2140  */
2141  } else if (!(first->flags & pe_action_runnable)) {
2142  /* prevent 'then' action from happening if 'first' is not runnable and
2143  * 'then' has not yet occurred. */
2144  pe_action_implies(then, first, pe_action_optional);
2145  pe_action_implies(then, first, pe_action_runnable);
2146 
2147  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2148  } else {
2149  /* ignore... then is allowed to start/stop if it wants to. */
2150  }
2151  }
2152 
2153  if (type & pe_order_implies_first) {
2154  if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) {
2155  // Needs is_set(first_flags, pe_action_optional) too?
2156  pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2157  pe_action_implies(first, then, pe_action_optional);
2158  }
2159 
2160  if (is_set(flags, pe_action_migrate_runnable) &&
2161  is_set(then->flags, pe_action_migrate_runnable) == FALSE &&
2162  is_set(then->flags, pe_action_optional) == FALSE) {
2163 
2164  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2165  first->uuid, then->uuid);
2167  }
2168  }
2169 
2171  if ((filter & pe_action_optional) &&
2172  ((then->flags & pe_action_optional) == FALSE) &&
2173  then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
2174  pe_action_implies(first, then, pe_action_optional);
2175 
2176  if (is_set(first->flags, pe_action_migrate_runnable) &&
2177  is_set(then->flags, pe_action_migrate_runnable) == FALSE) {
2178 
2179  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2181  }
2182  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2183  }
2184  }
2185 
2187  && is_set(filter, pe_action_optional)) {
2188 
2189  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2190  ((then->flags & pe_action_runnable) == FALSE)) {
2191 
2192  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2193  pe_action_implies(first, then, pe_action_runnable);
2194  }
2195 
2196  if ((then->flags & pe_action_optional) == 0) {
2197  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2198  pe_action_implies(first, then, pe_action_optional);
2199  }
2200  }
2201 
2202  if ((type & pe_order_pseudo_left)
2203  && is_set(filter, pe_action_optional)) {
2204 
2205  if ((first->flags & pe_action_runnable) == FALSE) {
2208  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2209  }
2210 
2211  }
2212 
2213  if (is_set(type, pe_order_runnable_left)
2214  && is_set(filter, pe_action_runnable)
2215  && is_set(then->flags, pe_action_runnable)
2216  && is_set(flags, pe_action_runnable) == FALSE) {
2217  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2218  pe_action_implies(then, first, pe_action_runnable);
2220  }
2221 
2222  if (is_set(type, pe_order_implies_then)
2223  && is_set(filter, pe_action_optional)
2224  && is_set(then->flags, pe_action_optional)
2225  && is_set(flags, pe_action_optional) == FALSE) {
2226 
2227  /* in this case, treat migrate_runnable as if first is optional */
2228  if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) {
2229  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
2230  pe_action_implies(then, first, pe_action_optional);
2231  }
2232  }
2233 
2234  if (is_set(type, pe_order_restart)) {
2235  handle_restart_ordering(first, then, filter);
2236  }
2237 
2238  if (then_flags != then->flags) {
2239  changed |= pe_graph_updated_then;
2240  pe_rsc_trace(then->rsc,
2241  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2242  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2243  then_flags, first->uuid, first->flags);
2244 
2245  if(then->rsc && then->rsc->parent) {
2246  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2247  update_action(then, data_set);
2248  }
2249  }
2250 
2251  if (first_flags != first->flags) {
2252  changed |= pe_graph_updated_first;
2253  pe_rsc_trace(first->rsc,
2254  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2255  first->uuid, first->node ? first->node->details->uname : "[none]",
2256  first->flags, first_flags, then->uuid, then->flags);
2257  }
2258 
2259  return changed;
2260 }
2261 
2262 void
2264 {
2265  GListPtr gIter = NULL;
2266  GHashTableIter iter;
2267  pe_node_t *node = NULL;
2268 
2269  if (constraint == NULL) {
2270  pe_err("Constraint is NULL");
2271  return;
2272 
2273  } else if (rsc == NULL) {
2274  pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
2275  return;
2276  }
2277 
2278  pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
2279  role2text(constraint->role_filter), rsc->id);
2280 
2281  /* take "lifetime" into account */
2282  if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
2283  pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
2284  constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
2285  return;
2286  }
2287 
2288  if (constraint->node_list_rh == NULL) {
2289  pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
2290  return;
2291  }
2292 
2293  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2294  pe_node_t *node = (pe_node_t *) gIter->data;
2295  pe_node_t *other_node = NULL;
2296 
2297  other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2298 
2299  if (other_node != NULL) {
2300  pe_rsc_trace(rsc, "%s + %s: %d + %d",
2301  node->details->uname,
2302  other_node->details->uname, node->weight, other_node->weight);
2303  other_node->weight = pe__add_scores(other_node->weight,
2304  node->weight);
2305 
2306  } else {
2307  other_node = pe__copy_node(node);
2308 
2309  pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode);
2310  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2311  }
2312 
2313  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2314  if (constraint->discover_mode == pe_discover_exclusive) {
2315  rsc->exclusive_discover = TRUE;
2316  }
2317  /* exclusive > never > always... always is default */
2318  other_node->rsc_discover_mode = constraint->discover_mode;
2319  }
2320  }
2321 
2322  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
2323  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
2324  pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
2325  }
2326 }
2327 
2328 void
2330 {
2331  GListPtr gIter = NULL;
2332 
2333  CRM_ASSERT(rsc);
2334  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2335 
2336  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2337  pe_action_t *action = (pe_action_t *) gIter->data;
2338 
2339  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2340  graph_element_from_action(action, data_set);
2341  }
2342 
2343  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2344  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2345 
2346  child_rsc->cmds->expand(child_rsc, data_set);
2347  }
2348 }
2349 
2350 #define log_change(a, fmt, args...) do { \
2351  if(a && a->reason && terminal) { \
2352  printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
2353  } else if(a && a->reason) { \
2354  crm_notice(fmt" \tdue to %s", ##args, a->reason); \
2355  } else if(terminal) { \
2356  printf(" * "fmt"\n", ##args); \
2357  } else { \
2358  crm_notice(fmt, ##args); \
2359  } \
2360  } while(0)
2361 
2362 #define STOP_SANITY_ASSERT(lineno) do { \
2363  if(current && current->details->unclean) { \
2364  /* It will be a pseudo op */ \
2365  } else if(stop == NULL) { \
2366  crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \
2367  CRM_ASSERT(stop != NULL); \
2368  } else if(is_set(stop->flags, pe_action_optional)) { \
2369  crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \
2370  CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \
2371  } \
2372  } while(0)
2373 
2374 static void
2375 LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
2376 {
2377  int len = 0;
2378  char *reason = NULL;
2379  char *details = NULL;
2380  bool same_host = FALSE;
2381  bool same_role = FALSE;
2382  bool need_role = FALSE;
2383 
2384  static int rsc_width = 5;
2385  static int detail_width = 5;
2386 
2387  CRM_ASSERT(action);
2388  CRM_ASSERT(destination != NULL || origin != NULL);
2389 
2390  if(source == NULL) {
2391  source = action;
2392  }
2393 
2394  len = strlen(rsc->id);
2395  if(len > rsc_width) {
2396  rsc_width = len + 2;
2397  }
2398 
2399  if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
2400  need_role = TRUE;
2401  }
2402 
2403  if(origin != NULL && destination != NULL && origin->details == destination->details) {
2404  same_host = TRUE;
2405  }
2406 
2407  if(rsc->role == rsc->next_role) {
2408  same_role = TRUE;
2409  }
2410 
2411  if (need_role && (origin == NULL)) {
2412  /* Starting and promoting a promotable clone instance */
2413  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
2414 
2415  } else if (origin == NULL) {
2416  /* Starting a resource */
2417  details = crm_strdup_printf("%s", destination->details->uname);
2418 
2419  } else if (need_role && (destination == NULL)) {
2420  /* Stopping a promotable clone instance */
2421  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2422 
2423  } else if (destination == NULL) {
2424  /* Stopping a resource */
2425  details = crm_strdup_printf("%s", origin->details->uname);
2426 
2427  } else if (need_role && same_role && same_host) {
2428  /* Recovering, restarting or re-promoting a promotable clone instance */
2429  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2430 
2431  } else if (same_role && same_host) {
2432  /* Recovering or Restarting a normal resource */
2433  details = crm_strdup_printf("%s", origin->details->uname);
2434 
2435  } else if (need_role && same_role) {
2436  /* Moving a promotable clone instance */
2437  details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
2438 
2439  } else if (same_role) {
2440  /* Moving a normal resource */
2441  details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
2442 
2443  } else if (same_host) {
2444  /* Promoting or demoting a promotable clone instance */
2445  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
2446 
2447  } else {
2448  /* Moving and promoting/demoting */
2449  details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
2450  }
2451 
2452  len = strlen(details);
2453  if(len > detail_width) {
2454  detail_width = len;
2455  }
2456 
2457  if(source->reason && is_not_set(action->flags, pe_action_runnable)) {
2458  reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
2459 
2460  } else if(source->reason) {
2461  reason = crm_strdup_printf(" due to %s", source->reason);
2462 
2463  } else if(is_not_set(action->flags, pe_action_runnable)) {
2464  reason = strdup(" blocked");
2465 
2466  } else {
2467  reason = strdup("");
2468  }
2469 
2470  if(terminal) {
2471  printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
2472  } else {
2473  crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
2474  }
2475 
2476  free(details);
2477  free(reason);
2478 }
2479 
2480 
2481 void
2482 LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
2483 {
2484  pe_node_t *next = NULL;
2485  pe_node_t *current = NULL;
2486  pe_node_t *start_node = NULL;
2487 
2488  pe_action_t *stop = NULL;
2489  pe_action_t *start = NULL;
2490  pe_action_t *demote = NULL;
2491  pe_action_t *promote = NULL;
2492 
2493  char *key = NULL;
2494  gboolean moving = FALSE;
2495  GListPtr possible_matches = NULL;
2496 
2497  if(rsc->variant == pe_container) {
2498  pcmk__bundle_log_actions(rsc, data_set, terminal);
2499  return;
2500  }
2501 
2502  if (rsc->children) {
2503  GListPtr gIter = NULL;
2504 
2505  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2506  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2507 
2508  LogActions(child_rsc, data_set, terminal);
2509  }
2510  return;
2511  }
2512 
2513  next = rsc->allocated_to;
2514  if (rsc->running_on) {
2515  current = pe__current_node(rsc);
2516  if (rsc->role == RSC_ROLE_STOPPED) {
2517  /*
2518  * This can occur when resources are being recovered
2519  * We fiddle with the current role in native_create_actions()
2520  */
2521  rsc->role = RSC_ROLE_STARTED;
2522  }
2523  }
2524 
2525  if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
2526  /* Don't log stopped orphans */
2527  return;
2528  }
2529 
2530  if (is_not_set(rsc->flags, pe_rsc_managed)
2531  || (current == NULL && next == NULL)) {
2532  pe_rsc_info(rsc, "Leave %s\t(%s%s)",
2533  rsc->id, role2text(rsc->role), is_not_set(rsc->flags,
2534  pe_rsc_managed) ? " unmanaged" : "");
2535  return;
2536  }
2537 
2538  if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) {
2539  moving = TRUE;
2540  }
2541 
2542  possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
2543  if (possible_matches) {
2544  start = possible_matches->data;
2545  g_list_free(possible_matches);
2546  }
2547 
2548  if ((start == NULL) || is_not_set(start->flags, pe_action_runnable)) {
2549  start_node = NULL;
2550  } else {
2551  start_node = current;
2552  }
2553  possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
2554  if (possible_matches) {
2555  stop = possible_matches->data;
2556  g_list_free(possible_matches);
2557  }
2558 
2559  possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
2560  if (possible_matches) {
2561  promote = possible_matches->data;
2562  g_list_free(possible_matches);
2563  }
2564 
2565  possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
2566  if (possible_matches) {
2567  demote = possible_matches->data;
2568  g_list_free(possible_matches);
2569  }
2570 
2571  if (rsc->role == rsc->next_role) {
2572  pe_action_t *migrate_op = NULL;
2573 
2574  possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
2575  if (possible_matches) {
2576  migrate_op = possible_matches->data;
2577  }
2578 
2579  CRM_CHECK(next != NULL,);
2580  if (next == NULL) {
2581  } else if (migrate_op && is_set(migrate_op->flags, pe_action_runnable) && current) {
2582  LogAction("Migrate", rsc, current, next, start, NULL, terminal);
2583 
2584  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2585  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2586 
2587 
2588  } else if (start == NULL || is_set(start->flags, pe_action_optional)) {
2589  if ((demote != NULL) && (promote != NULL)
2590  && is_not_set(demote->flags, pe_action_optional)
2591  && is_not_set(promote->flags, pe_action_optional)) {
2592  LogAction("Re-promote", rsc, current, next, promote, demote,
2593  terminal);
2594  } else {
2595  pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id,
2596  role2text(rsc->role), next->details->uname);
2597  }
2598 
2599  } else if (is_not_set(start->flags, pe_action_runnable)) {
2600  LogAction("Stop", rsc, current, NULL, stop,
2601  (stop && stop->reason)? stop : start, terminal);
2602  STOP_SANITY_ASSERT(__LINE__);
2603 
2604  } else if (moving && current) {
2605  LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move",
2606  rsc, current, next, stop, NULL, terminal);
2607 
2608  } else if (is_set(rsc->flags, pe_rsc_failed)) {
2609  LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
2610  STOP_SANITY_ASSERT(__LINE__);
2611 
2612  } else {
2613  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2614  /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
2615  }
2616 
2617  g_list_free(possible_matches);
2618  return;
2619  }
2620 
2621  if(stop
2622  && (rsc->next_role == RSC_ROLE_STOPPED
2623  || (start && is_not_set(start->flags, pe_action_runnable)))) {
2624 
2625  GListPtr gIter = NULL;
2626 
2627  key = stop_key(rsc);
2628  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2629  pe_node_t *node = (pe_node_t *) gIter->data;
2630  pe_action_t *stop_op = NULL;
2631 
2632  possible_matches = find_actions(rsc->actions, key, node);
2633  if (possible_matches) {
2634  stop_op = possible_matches->data;
2635  g_list_free(possible_matches);
2636  }
2637 
2638  if (stop_op && (stop_op->flags & pe_action_runnable)) {
2639  STOP_SANITY_ASSERT(__LINE__);
2640  }
2641 
2642  LogAction("Stop", rsc, node, NULL, stop_op,
2643  (stop_op && stop_op->reason)? stop_op : start, terminal);
2644  }
2645 
2646  free(key);
2647 
2648  } else if (stop && is_set(rsc->flags, pe_rsc_failed)
2649  && is_set(rsc->flags, pe_rsc_stop)) {
2650  /* 'stop' may be NULL if the failure was ignored */
2651  LogAction("Recover", rsc, current, next, stop, start, terminal);
2652  STOP_SANITY_ASSERT(__LINE__);
2653 
2654  } else if (moving) {
2655  LogAction("Move", rsc, current, next, stop, NULL, terminal);
2656  STOP_SANITY_ASSERT(__LINE__);
2657 
2658  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2659  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2660 
2661  } else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) {
2662  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2663  STOP_SANITY_ASSERT(__LINE__);
2664 
2665  } else if (rsc->role == RSC_ROLE_MASTER) {
2666  CRM_LOG_ASSERT(current != NULL);
2667  LogAction("Demote", rsc, current, next, demote, NULL, terminal);
2668 
2669  } else if(rsc->next_role == RSC_ROLE_MASTER) {
2670  CRM_LOG_ASSERT(next);
2671  LogAction("Promote", rsc, current, next, promote, NULL, terminal);
2672 
2673  } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
2674  LogAction("Start", rsc, current, next, start, NULL, terminal);
2675  }
2676 }
2677 
2678 gboolean
2679 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2680 {
2681  GListPtr gIter = NULL;
2682 
2683  CRM_ASSERT(rsc);
2684  pe_rsc_trace(rsc, "%s", rsc->id);
2685 
2686  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2687  pe_node_t *current = (pe_node_t *) gIter->data;
2688  pe_action_t *stop;
2689 
2690  if (rsc->partial_migration_target) {
2691  if (rsc->partial_migration_target->details == current->details) {
2692  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2693  next->details->uname, rsc->id);
2694  continue;
2695  } else {
2696  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2697  optional = FALSE;
2698  }
2699  }
2700 
2701  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2702  stop = stop_action(rsc, current, optional);
2703 
2704  if(rsc->allocated_to == NULL) {
2705  pe_action_set_reason(stop, "node availability", TRUE);
2706  }
2707 
2708  if (is_not_set(rsc->flags, pe_rsc_managed)) {
2709  update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2710  }
2711 
2712  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
2713  DeleteRsc(rsc, current, optional, data_set);
2714  }
2715 
2716  if(is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2717  pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
2718 
2719  order_actions(stop, unfence, pe_order_implies_first);
2720  if (!node_has_been_unfenced(current)) {
2721  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2722  }
2723  }
2724  }
2725 
2726  return TRUE;
2727 }
2728 
2729 static void
2730 order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
2731  enum pe_ordering order, pe_working_set_t *data_set)
2732 {
2733  /* When unfencing is in use, we order unfence actions before any probe or
2734  * start of resources that require unfencing, and also of fence devices.
2735  *
2736  * This might seem to violate the principle that fence devices require
2737  * only quorum. However, fence agents that unfence often don't have enough
2738  * information to even probe or start unless the node is first unfenced.
2739  */
2740  if (is_unfence_device(rsc, data_set)
2741  || is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2742 
2743  /* Start with an optional ordering. Requiring unfencing would result in
2744  * the node being unfenced, and all its resources being stopped,
2745  * whenever a new resource is added -- which would be highly suboptimal.
2746  */
2747  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
2748 
2749  order_actions(unfence, action, order);
2750 
2751  if (!node_has_been_unfenced(node)) {
2752  // But unfencing is required if it has never been done
2753  char *reason = crm_strdup_printf("required by %s %s",
2754  rsc->id, action->task);
2755 
2756  trigger_unfencing(NULL, node, reason, NULL, data_set);
2757  free(reason);
2758  }
2759  }
2760 }
2761 
2762 gboolean
2763 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2764 {
2765  pe_action_t *start = NULL;
2766 
2767  CRM_ASSERT(rsc);
2768  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2769  start = start_action(rsc, next, TRUE);
2770 
2771  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2772 
2773  if (is_set(start->flags, pe_action_runnable) && optional == FALSE) {
2774  update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2775  }
2776 
2777 
2778  return TRUE;
2779 }
2780 
2781 gboolean
2782 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2783 {
2784  GListPtr gIter = NULL;
2785  gboolean runnable = TRUE;
2786  GListPtr action_list = NULL;
2787 
2788  CRM_ASSERT(rsc);
2789  CRM_CHECK(next != NULL, return FALSE);
2790  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2791 
2792  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2793 
2794  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2795  pe_action_t *start = (pe_action_t *) gIter->data;
2796 
2797  if (is_set(start->flags, pe_action_runnable) == FALSE) {
2798  runnable = FALSE;
2799  }
2800  }
2801  g_list_free(action_list);
2802 
2803  if (runnable) {
2804  promote_action(rsc, next, optional);
2805  return TRUE;
2806  }
2807 
2808  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2809 
2810  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2811 
2812  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2813  pe_action_t *promote = (pe_action_t *) gIter->data;
2814 
2815  update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2816  }
2817 
2818  g_list_free(action_list);
2819  return TRUE;
2820 }
2821 
2822 gboolean
2823 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2824 {
2825  GListPtr gIter = NULL;
2826 
2827  CRM_ASSERT(rsc);
2828  pe_rsc_trace(rsc, "%s", rsc->id);
2829 
2830 /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
2831  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2832  pe_node_t *current = (pe_node_t *) gIter->data;
2833 
2834  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2835  demote_action(rsc, current, optional);
2836  }
2837  return TRUE;
2838 }
2839 
2840 gboolean
2841 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2842 {
2843  CRM_ASSERT(rsc);
2844  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2845  CRM_CHECK(FALSE, return FALSE);
2846  return FALSE;
2847 }
2848 
2849 gboolean
2850 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2851 {
2852  CRM_ASSERT(rsc);
2853  pe_rsc_trace(rsc, "%s", rsc->id);
2854  return FALSE;
2855 }
2856 
2857 gboolean
2858 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
2859 {
2860  if (is_set(rsc->flags, pe_rsc_failed)) {
2861  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2862  return FALSE;
2863 
2864  } else if (node == NULL) {
2865  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2866  return FALSE;
2867 
2868  } else if (node->details->unclean || node->details->online == FALSE) {
2869  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2870  node->details->uname);
2871  return FALSE;
2872  }
2873 
2874  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2875 
2876  delete_action(rsc, node, optional);
2877 
2878  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2879  optional ? pe_order_implies_then : pe_order_optional, data_set);
2880 
2881  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2882  optional ? pe_order_implies_then : pe_order_optional, data_set);
2883 
2884  return TRUE;
2885 }
2886 
2887 gboolean
2889  gboolean force, pe_working_set_t * data_set)
2890 {
2892  char *key = NULL;
2893  pe_action_t *probe = NULL;
2894  pe_node_t *running = NULL;
2895  pe_node_t *allowed = NULL;
2896  pe_resource_t *top = uber_parent(rsc);
2897 
2898  static const char *rc_master = NULL;
2899  static const char *rc_inactive = NULL;
2900 
2901  if (rc_inactive == NULL) {
2902  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
2903  rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
2904  }
2905 
2906  CRM_CHECK(node != NULL, return FALSE);
2907  if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) {
2908  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2909  return FALSE;
2910  }
2911 
2912  if (pe__is_guest_or_remote_node(node)) {
2913  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2914 
2916  pe_rsc_trace(rsc,
2917  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2918  rsc->id, node->details->id);
2919  return FALSE;
2920  } else if (pe__is_guest_node(node)
2921  && pe__resource_contains_guest_node(data_set, rsc)) {
2922  pe_rsc_trace(rsc,
2923  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2924  rsc->id, node->details->id);
2925  return FALSE;
2926  } else if (rsc->is_remote_node) {
2927  pe_rsc_trace(rsc,
2928  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2929  rsc->id, node->details->id);
2930  return FALSE;
2931  }
2932  }
2933 
2934  if (rsc->children) {
2935  GListPtr gIter = NULL;
2936  gboolean any_created = FALSE;
2937 
2938  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2939  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2940 
2941  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2942  || any_created;
2943  }
2944 
2945  return any_created;
2946 
2947  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2948  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2949  return FALSE;
2950  }
2951 
2952  if (is_set(rsc->flags, pe_rsc_orphan)) {
2953  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2954  return FALSE;
2955  }
2956 
2957  // Check whether resource is already known on node
2958  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2959  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2960  return FALSE;
2961  }
2962 
2963  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2964 
2965  if (rsc->exclusive_discover || top->exclusive_discover) {
2966  if (allowed == NULL) {
2967  /* exclusive discover is enabled and this node is not in the allowed list. */
2968  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2969  return FALSE;
2970  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2971  /* exclusive discover is enabled and this node is not marked
2972  * as a node this resource should be discovered on */
2973  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2974  return FALSE;
2975  }
2976  }
2977 
2978  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2979  /* If this node was allowed to host this resource it would
2980  * have been explicitly added to the 'allowed_nodes' list.
2981  * However it wasn't and the node has discovery disabled, so
2982  * no need to probe for this resource.
2983  */
2984  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2985  return FALSE;
2986  }
2987 
2988  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2989  /* this resource is marked as not needing to be discovered on this node */
2990  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2991  return FALSE;
2992  }
2993 
2994  if (pe__is_guest_node(node)) {
2995  pe_resource_t *remote = node->details->remote_rsc->container;
2996 
2997  if(remote->role == RSC_ROLE_STOPPED) {
2998  /* If the container is stopped, then we know anything that
2999  * might have been inside it is also stopped and there is
3000  * no need to probe.
3001  *
3002  * If we don't know the container's state on the target
3003  * either:
3004  *
3005  * - the container is running, the transition will abort
3006  * and we'll end up in a different case next time, or
3007  *
3008  * - the container is stopped
3009  *
3010  * Either way there is no need to probe.
3011  *
3012  */
3013  if(remote->allocated_to
3014  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
3015  /* For safety, we order the 'rsc' start after 'remote'
3016  * has been probed.
3017  *
3018  * Using 'top' helps for groups, but we may need to
3019  * follow the start's ordering chain backwards.
3020  */
3021  custom_action_order(remote,
3022  pcmk__op_key(remote->id, RSC_STATUS, 0),
3023  NULL, top,
3024  pcmk__op_key(top->id, RSC_START, 0), NULL,
3025  pe_order_optional, data_set);
3026  }
3027  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
3028  rsc->id, node->details->id, remote->id);
3029  return FALSE;
3030 
3031  /* Here we really we want to check if remote->stop is required,
3032  * but that information doesn't exist yet
3033  */
3034  } else if(node->details->remote_requires_reset
3035  || node->details->unclean
3036  || is_set(remote->flags, pe_rsc_failed)
3037  || remote->next_role == RSC_ROLE_STOPPED
3038  || (remote->allocated_to
3039  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
3040  ) {
3041  /* The container is stopping or restarting, don't start
3042  * 'rsc' until 'remote' stops as this also implies that
3043  * 'rsc' is stopped - avoiding the need to probe
3044  */
3045  custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
3046  NULL, top, pcmk__op_key(top->id, RSC_START, 0),
3047  NULL, pe_order_optional, data_set);
3048  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
3049  rsc->id, node->details->id, remote->id);
3050  return FALSE;
3051 /* } else {
3052  * The container is running so there is no problem probing it
3053  */
3054  }
3055  }
3056 
3057  key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
3058  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
3059  update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
3060 
3061  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
3062 
3063  /*
3064  * We need to know if it's running_on (not just known_on) this node
3065  * to correctly determine the target rc.
3066  */
3067  running = pe_find_node_id(rsc->running_on, node->details->id);
3068  if (running == NULL) {
3069  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
3070 
3071  } else if (rsc->role == RSC_ROLE_MASTER) {
3072  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
3073  }
3074 
3075  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
3076  is_set(probe->flags, pe_action_runnable), rsc->running_on);
3077 
3078  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
3079  top = rsc;
3080  } else {
3081  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
3082  }
3083 
3084  if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) {
3085  /* Prevent the start from occurring if rsc isn't active, but
3086  * don't cause it to stop if it was active already
3087  */
3089  }
3090 
3091  custom_action_order(rsc, NULL, probe,
3092  top, pcmk__op_key(top->id, RSC_START, 0), NULL,
3093  flags, data_set);
3094 
3095  /* Before any reloads, if they exist */
3096  custom_action_order(rsc, NULL, probe,
3097  top, reload_key(rsc), NULL,
3098  pe_order_optional, data_set);
3099 
3100 #if 0
3101  // complete is always null currently
3102  if (!is_unfence_device(rsc, data_set)) {
3103  /* Normally rsc.start depends on probe complete which depends
3104  * on rsc.probe. But this can't be the case for fence devices
3105  * with unfencing, as it would create graph loops.
3106  *
3107  * So instead we explicitly order 'rsc.probe then rsc.start'
3108  */
3109  order_actions(probe, complete, pe_order_implies_then);
3110  }
3111 #endif
3112  return TRUE;
3113 }
3114 
3124 static bool
3125 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
3126 {
3127  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
3128  return TRUE;
3129 
3130  } else if ((rsc->variant == pe_native)
3131  && pe_rsc_is_anon_clone(rsc->parent)
3132  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
3133  /* We check only the parent, not the uber-parent, because we cannot
3134  * assume that the resource is known if it is in an anonymously cloned
3135  * group (which may be only partially known).
3136  */
3137  return TRUE;
3138  }
3139  return FALSE;
3140 }
3141 
3150 static void
3151 native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
3152 {
3153  pe_node_t *target;
3154  GListPtr gIter = NULL;
3155 
3156  CRM_CHECK(stonith_op && stonith_op->node, return);
3157  target = stonith_op->node;
3158 
3159  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
3160  pe_action_t *action = (pe_action_t *) gIter->data;
3161 
3162  switch (action->needs) {
3163  case rsc_req_nothing:
3164  // Anything other than start or promote requires nothing
3165  break;
3166 
3167  case rsc_req_stonith:
3168  order_actions(stonith_op, action, pe_order_optional);
3169  break;
3170 
3171  case rsc_req_quorum:
3172  if (safe_str_eq(action->task, RSC_START)
3173  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
3174  && !rsc_is_known_on(rsc, target)) {
3175 
3176  /* If we don't know the status of the resource on the node
3177  * we're about to shoot, we have to assume it may be active
3178  * there. Order the resource start after the fencing. This
3179  * is analogous to waiting for all the probes for a resource
3180  * to complete before starting it.
3181  *
3182  * The most likely explanation is that the DC died and took
3183  * its status with it.
3184  */
3185  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
3186  target->details->uname);
3187  order_actions(stonith_op, action,
3189  }
3190  break;
3191  }
3192  }
3193 }
3194 
3195 static void
3196 native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
3197 {
3198  GListPtr gIter = NULL;
3199  GListPtr action_list = NULL;
3200  bool order_implicit = false;
3201 
3202  pe_resource_t *top = uber_parent(rsc);
3203  pe_action_t *parent_stop = NULL;
3204  pe_node_t *target;
3205 
3206  CRM_CHECK(stonith_op && stonith_op->node, return);
3207  target = stonith_op->node;
3208 
3209  /* Get a list of stop actions potentially implied by the fencing */
3210  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
3211 
3212  /* If resource requires fencing, implicit actions must occur after fencing.
3213  *
3214  * Implied stops and demotes of resources running on guest nodes are always
3215  * ordered after fencing, even if the resource does not require fencing,
3216  * because guest node "fencing" is actually just a resource stop.
3217  */
3218  if (is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) {
3219  order_implicit = true;
3220  }
3221 
3222  if (action_list && order_implicit) {
3223  parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
3224  }
3225 
3226  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3227  pe_action_t *action = (pe_action_t *) gIter->data;
3228 
3229  // The stop would never complete, so convert it into a pseudo-action.
3231  __FUNCTION__, __LINE__);
3232 
3233  if (order_implicit) {
3235  __FUNCTION__, __LINE__);
3236 
3237  /* Order the stonith before the parent stop (if any).
3238  *
3239  * Also order the stonith before the resource stop, unless the
3240  * resource is inside a bundle -- that would cause a graph loop.
3241  * We can rely on the parent stop's ordering instead.
3242  *
3243  * User constraints must not order a resource in a guest node
3244  * relative to the guest node container resource. The
3245  * pe_order_preserve flag marks constraints as generated by the
3246  * cluster and thus immune to that check (and is irrelevant if
3247  * target is not a guest).
3248  */
3249  if (!pe_rsc_is_bundled(rsc)) {
3250  order_actions(stonith_op, action, pe_order_preserve);
3251  }
3252  order_actions(stonith_op, parent_stop, pe_order_preserve);
3253  }
3254 
3255  if (is_set(rsc->flags, pe_rsc_failed)) {
3256  crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
3257  rsc->id, (order_implicit? "after" : "because"),
3258  target->details->uname);
3259  } else {
3260  crm_info("%s is implicit %s %s is fenced",
3261  action->uuid, (order_implicit? "after" : "because"),
3262  target->details->uname);
3263  }
3264 
3265  if (is_set(rsc->flags, pe_rsc_notify)) {
3266  /* Create a second notification that will be delivered
3267  * immediately after the node is fenced
3268  *
3269  * Basic problem:
3270  * - C is a clone active on the node to be shot and stopping on another
3271  * - R is a resource that depends on C
3272  *
3273  * + C.stop depends on R.stop
3274  * + C.stopped depends on STONITH
3275  * + C.notify depends on C.stopped
3276  * + C.healthy depends on C.notify
3277  * + R.stop depends on C.healthy
3278  *
3279  * The extra notification here changes
3280  * + C.healthy depends on C.notify
3281  * into:
3282  * + C.healthy depends on C.notify'
3283  * + C.notify' depends on STONITH'
3284  * thus breaking the loop
3285  */
3286  create_secondary_notification(action, rsc, stonith_op, data_set);
3287  }
3288 
3289 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3290 
3291  However given group(rA, rB) running on nodeX and B.stop has failed,
3292  A := stop healthy resource (rA.stop)
3293  B := stop failed resource (pseudo operation B.stop)
3294  C := stonith nodeX
3295  A requires B, B requires C, C requires A
3296  This loop would prevent the cluster from making progress.
3297 
3298  This block creates the "C requires A" dependency and therefore must (at least
3299  for now) be disabled.
3300 
3301  Instead, run the block above and treat all resources on nodeX as B would be
3302  (marked as a pseudo op depending on the STONITH).
3303 
3304  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3305 
3306  } else if(is_stonith == FALSE) {
3307  crm_info("Moving healthy resource %s"
3308  " off %s before fencing",
3309  rsc->id, node->details->uname);
3310 
3311  * stop healthy resources before the
3312  * stonith op
3313  *
3314  custom_action_order(
3315  rsc, stop_key(rsc), NULL,
3316  NULL,strdup(CRM_OP_FENCE),stonith_op,
3317  pe_order_optional, data_set);
3318 */
3319  }
3320 
3321  g_list_free(action_list);
3322 
3323  /* Get a list of demote actions potentially implied by the fencing */
3324  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3325 
3326  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3327  pe_action_t *action = (pe_action_t *) gIter->data;
3328 
3329  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3330  || is_set(rsc->flags, pe_rsc_failed)) {
3331 
3332  if (is_set(rsc->flags, pe_rsc_failed)) {
3333  pe_rsc_info(rsc,
3334  "Demote of failed resource %s is implicit after %s is fenced",
3335  rsc->id, target->details->uname);
3336  } else {
3337  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3338  action->uuid, target->details->uname);
3339  }
3340 
3341  /* The demote would never complete and is now implied by the
3342  * fencing, so convert it into a pseudo-action.
3343  */
3345  __FUNCTION__, __LINE__);
3346 
3347  if (pe_rsc_is_bundled(rsc)) {
3348  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3349 
3350  } else if (order_implicit) {
3352  }
3353  }
3354  }
3355 
3356  g_list_free(action_list);
3357 }
3358 
3359 void
3361 {
3362  if (rsc->children) {
3363  GListPtr gIter = NULL;
3364 
3365  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3366  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3367 
3368  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3369  }
3370 
3371  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3372  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3373 
3374  } else {
3375  native_start_constraints(rsc, stonith_op, data_set);
3376  native_stop_constraints(rsc, stonith_op, data_set);
3377  }
3378 }
3379 
3380 void
3382 {
3383  GListPtr gIter = NULL;
3384  pe_action_t *reload = NULL;
3385 
3386  if (rsc->children) {
3387  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3388  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3389 
3390  ReloadRsc(child_rsc, node, data_set);
3391  }
3392  return;
3393 
3394  } else if (rsc->variant > pe_native) {
3395  /* Complex resource with no children */
3396  return;
3397 
3398  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3399  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3400  return;
3401 
3402  } else if (is_set(rsc->flags, pe_rsc_failed)) {
3403  /* We don't need to specify any particular actions here, normal failure
3404  * recovery will apply.
3405  */
3406  pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id);
3407  return;
3408 
3409  } else if (is_set(rsc->flags, pe_rsc_start_pending)) {
3410  /* If a resource's configuration changed while a start was pending,
3411  * force a full restart.
3412  */
3413  pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id);
3414  stop_action(rsc, node, FALSE);
3415  return;
3416 
3417  } else if (node == NULL) {
3418  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3419  return;
3420  }
3421 
3422  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3423  set_bit(rsc->flags, pe_rsc_reload);
3424 
3425  reload = custom_action(
3426  rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
3427  pe_action_set_reason(reload, "resource definition change", FALSE);
3428 
3429  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3431  data_set);
3432  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3434  data_set);
3435 }
3436 
3437 void
3438 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
3439 {
3440  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3441  pe_resource_t *parent;
3442 
3443  if (value) {
3444  char *name = NULL;
3445 
3447  crm_xml_add(xml, name, value);
3448  free(name);
3449  }
3450 
3451  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3452  if (value) {
3453  char *name = NULL;
3454 
3456  crm_xml_add(xml, name, value);
3457  free(name);
3458  }
3459 
3460  for (parent = rsc; parent != NULL; parent = parent->parent) {
3461  if (parent->container) {
3463  }
3464  }
3465 }
pe_action_flags
pe_action_flags
Definition: pe_types.h:278
show_scores
gboolean show_scores
Definition: pcmk_sched_messages.c:25
find_actions_exact
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1575
pe_resource_s::priority
int priority
Definition: pe_types.h:326
pe_rsc_orphan
#define pe_rsc_orphan
Definition: pe_types.h:236
pe_weights_rollback
@ pe_weights_rollback
Definition: pcmki_scheduler.h:37
pe_native
@ pe_native
Definition: pe_types.h:37
start_key
#define start_key(rsc)
Definition: internal.h:302
INFINITY_HACK
#define INFINITY_HACK
Definition: pcmk_sched_native.c:20
GListPtr
GList * GListPtr
Definition: crm.h:215
XML_LRM_ATTR_MIGRATE_SOURCE
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:285
INFINITY
#define INFINITY
Definition: crm.h:96
rsc_colocation_s::role_lh
int role_lh
Definition: pcmki_scheduler.h:46
find_rsc_op_entry
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1367
LOAD_STOPPED
#define LOAD_STOPPED
Definition: pcmki_sched_utils.h:92
rsc_action_matrix
rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX]
Definition: pcmk_sched_native.c:65
pe_rsc_notify
#define pe_rsc_notify
Definition: pe_types.h:241
pe_action_s::needs
enum rsc_start_requirement needs
Definition: pe_types.h:400
pe_container
@ pe_container
Definition: pe_types.h:40
sort_nodes_by_weight
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
Definition: pcmk_sched_utils.c:243
pe_resource_s::exclusive_discover
gboolean exclusive_discover
Definition: pe_types.h:338
pe_cancel_op
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
Definition: pcmk_sched_utils.c:481
crm_str_eq
gboolean crm_str_eq(const char *a, const char *b, gboolean use_case)
Definition: strings.c:326
pe_resource_s::variant
enum pe_obj_types variant
Definition: pe_types.h:316
RSC_PROMOTE
#define RSC_PROMOTE
Definition: crm.h:203
XML_RSC_ATTR_REMOTE_NODE
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:208
pe_resource_s::dangling_migrations
GListPtr dangling_migrations
Definition: pe_types.h:364
RSC_DEMOTE
#define RSC_DEMOTE
Definition: crm.h:205
pe_rsc_allow_migrate
#define pe_rsc_allow_migrate
Definition: pe_types.h:260
new_rsc_order
int new_rsc_order(pe_resource_t *lh_rsc, const char *lh_task, pe_resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1396
custom_action_order
int custom_action_order(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1574
pe_resource_s::actions
GListPtr actions
Definition: pe_types.h:345
no_quorum_freeze
@ no_quorum_freeze
Definition: pe_types.h:61
pacemaker-internal.h
rsc_colocation_new
gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1340
pe_resource_s::next_role
enum rsc_role_e next_role
Definition: pe_types.h:357
pe_find_node
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:427
flags
uint64_t flags
Definition: remote.c:3
pe_node_shared_s::unseen
gboolean unseen
Definition: pe_types.h:206
pe_working_set_s::nodes
GListPtr nodes
Definition: pe_types.h:147
msg_xml.h
RSC_ROLE_STOPPED
@ RSC_ROLE_STOPPED
Definition: common.h:90
pe_resource_s::utilization
GHashTable * utilization
Definition: pe_types.h:361
pe_rsc_info
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:17
filter_colocation_constraint
enum filter_colocation_res filter_colocation_constraint(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, gboolean preview)
Definition: pcmk_sched_native.c:1707
StartRsc
gboolean StartRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2763
pe_resource_s::rsc_cons_lhs
GListPtr rsc_cons_lhs
Definition: pe_types.h:342
pe_node_shared_s::remote_rsc
pe_resource_t * remote_rsc
Definition: pe_types.h:218
LOG_TRACE
#define LOG_TRACE
Definition: logging.h:36
RSC_ROLE_MASTER
@ RSC_ROLE_MASTER
Definition: common.h:93
pe_rsc_debug
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:18
rsc_req_quorum
@ rsc_req_quorum
Definition: common.h:84
pe_rsc_provisional
#define pe_rsc_provisional
Definition: pe_types.h:246
pe_order_runnable_left
@ pe_order_runnable_left
Definition: pe_types.h:475
pe_ticket_s::granted
gboolean granted
Definition: pe_types.h:438
PCMK_RESOURCE_CLASS_STONITH
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:49
pe_resource_s::known_on
GHashTable * known_on
Definition: pe_types.h:353
create_secondary_notification
void create_secondary_notification(pe_action_t *action, pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
Definition: pcmk_sched_notif.c:823
pe_resource_s::children
GListPtr children
Definition: pe_types.h:363
pe_resource_s::id
char * id
Definition: pe_types.h:307
pe_rsc_allow_remote_remotes
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:252
pe_resource_s::allocated_to
pe_node_t * allocated_to
Definition: pe_types.h:349
resource_alloc_functions_s::allocate
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:23
stop_action
#define stop_action(rsc, node, optional)
Definition: internal.h:297
influence_rsc_location
@ influence_rsc_location
Definition: pcmki_sched_utils.h:68
rsc_role_e
rsc_role_e
Definition: common.h:88
pe__location_constraint_s::id
char * id
Definition: internal.h:55
pe_weights_init
@ pe_weights_init
Definition: pcmki_scheduler.h:34
RSC_MIGRATED
#define RSC_MIGRATED
Definition: crm.h:195
CRM_CHECK
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:233
pe_node_s::weight
int weight
Definition: pe_types.h:229
pe_action_pseudo
@ pe_action_pseudo
Definition: pe_types.h:279
clear_bit
#define clear_bit(word, bit)
Definition: crm_internal.h:69
rsc_ticket_s::role_lh
int role_lh
Definition: pcmki_scheduler.h:65
CRM_ATTR_UNAME
#define CRM_ATTR_UNAME
Definition: crm.h:111
pe_rsc_fence_device
#define pe_rsc_fence_device
Definition: pe_types.h:243
pe_node_s::details
struct pe_node_shared_s * details
Definition: pe_types.h:232
crm_notice
#define crm_notice(fmt, args...)
Definition: logging.h:365
can_run_resources
gboolean can_run_resources(const pe_node_t *node)
Definition: pcmk_sched_utils.c:64
pe_node_shared_s::id
const char * id
Definition: pe_types.h:196
RoleError
gboolean RoleError(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2841
rsc_colocation_s::node_attribute
const char * node_attribute
Definition: pcmki_scheduler.h:42
type
enum crm_ais_msg_types type
Definition: internal.h:3
crm_err
#define crm_err(fmt, args...)
Definition: logging.h:363
pe_weights_forward
@ pe_weights_forward
Definition: pcmki_scheduler.h:35
resource_alloc_functions_s::action_flags
enum pe_action_flags(* action_flags)(pe_action_t *, pe_node_t *)
Definition: pcmki_sched_allocate.h:35
XML_LRM_ATTR_INTERVAL
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:254
crm_trace
#define crm_trace(fmt, args...)
Definition: logging.h:369
pe_resource_s::meta
GHashTable * meta
Definition: pe_types.h:359
pe_action_print_always
@ pe_action_print_always
Definition: pe_types.h:282
safe_str_eq
#define safe_str_eq(a, b)
Definition: util.h:65
rsc_colocation_s::score
int score
Definition: pcmki_scheduler.h:49
pe_action_implies
#define pe_action_implies(action, reason, flag)
Definition: internal.h:416
uber_parent
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:762
pe__is_guest_or_remote_node
gboolean pe__is_guest_or_remote_node(pe_node_t *node)
Definition: remote.c:58
pe_resource_s::recovery_type
enum rsc_recovery_type recovery_type
Definition: pe_types.h:321
pe_action_s::flags
enum pe_action_flags flags
Definition: pe_types.h:399
rsc_colocation_s::id
const char * id
Definition: pcmki_scheduler.h:41
pe_order_asymmetrical
@ pe_order_asymmetrical
Definition: pe_types.h:495
pe_group
@ pe_group
Definition: pe_types.h:38
action
const char * action
Definition: pcmk_fence.c:29
pe_resource_s::running_on
GListPtr running_on
Definition: pe_types.h:352
pe_resource_s::partial_migration_target
pe_node_t * partial_migration_target
Definition: pe_types.h:350
graph_element_from_action
void graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
Definition: pcmk_sched_graph.c:1798
CRM_ATTR_UNFENCED
#define CRM_ATTR_UNFENCED
Definition: crm.h:118
pe_working_set_s::placement_strategy
const char * placement_strategy
Definition: pe_types.h:134
loss_ticket_stop
@ loss_ticket_stop
Definition: pcmki_scheduler.h:53
pe_node_shared_s::remote_requires_reset
gboolean remote_requires_reset
Definition: pe_types.h:212
set_bit
#define set_bit(word, bit)
Definition: crm_internal.h:68
CRM_ATTR_ID
#define CRM_ATTR_ID
Definition: crm.h:112
native_assign_node
gboolean native_assign_node(pe_resource_t *rsc, GListPtr candidates, pe_node_t *chosen, gboolean force)
Definition: pcmk_sched_utils.c:270
find_actions
GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1535
influence_rsc_priority
@ influence_rsc_priority
Definition: pcmki_sched_utils.h:69
native_update_actions
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2108
pe_action_s::uuid
char * uuid
Definition: pe_types.h:395
ID
#define ID(x)
Definition: msg_xml.h:418
demote_key
#define demote_key(rsc)
Definition: internal.h:322
pe_order_same_node
@ pe_order_same_node
Definition: pe_types.h:490
rsc_ticket_s::loss_policy
enum loss_ticket_policy_e loss_policy
Definition: pcmki_scheduler.h:63
RSC_START
#define RSC_START
Definition: crm.h:197
pe_ticket_s::id
char * id
Definition: pe_types.h:437
promote_action
#define promote_action(rsc, node, optional)
Definition: internal.h:313
pe_order_load
@ pe_order_load
Definition: pe_types.h:496
pe_err
#define pe_err(fmt...)
Definition: internal.h:21
pe_fence_node
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:85
RSC_ROLE_SLAVE
@ RSC_ROLE_SLAVE
Definition: common.h:92
pe_action_s
Definition: pe_types.h:386
CRM_META
#define CRM_META
Definition: crm.h:72
pe_node_shared_s::shutdown
gboolean shutdown
Definition: pe_types.h:207
pe_graph_none
@ pe_graph_none
Definition: pe_types.h:271
native_append_meta
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
Definition: pcmk_sched_native.c:3438
crm_info
#define crm_info(fmt, args...)
Definition: logging.h:366
pe__location_constraint_s::role_filter
enum rsc_role_e role_filter
Definition: internal.h:57
CRM_LOG_ASSERT
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:219
process_utilization
void process_utilization(pe_resource_t *rsc, pe_node_t **prefer, pe_working_set_t *data_set)
Definition: pcmk_sched_utilization.c:331
XML_AGENT_ATTR_CLASS
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:229
XML_ATTR_TE_TARGET_RC
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:364
pe_action_set_reason
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2695
native_rsc_colocation_rh
void native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1900
demote_action
#define demote_action(rsc, node, optional)
Definition: internal.h:323
native_rsc_colocation_lh
void native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1684
get_pseudo_op
pe_action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1938
role2text
const char * role2text(enum rsc_role_e role)
Definition: common.c:466
pe_action_implied_by_stonith
@ pe_action_implied_by_stonith
Definition: pe_types.h:285
crm_strdup_printf
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
custom_action
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:510
pe_flag_stop_everything
#define pe_flag_stop_everything
Definition: pe_types.h:103
RSC_ROLE_UNKNOWN
@ RSC_ROLE_UNKNOWN
Definition: common.h:89
crm_debug
#define crm_debug(fmt, args...)
Definition: logging.h:368
pe_order_pseudo_left
@ pe_order_pseudo_left
Definition: pe_types.h:477
pe_flag_stdout
#define pe_flag_stdout
Definition: pe_types.h:116
pe_action_s::node
pe_node_t * node
Definition: pe_types.h:391
resource_object_functions_s::state
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:52
pe__copy_node
pe_node_t * pe__copy_node(const pe_node_t *this_node)
Definition: utils.c:139
CRM_OP_LRM_DELETE
#define CRM_OP_LRM_DELETE
Definition: crm.h:148
pe_order_optional
@ pe_order_optional
Definition: pe_types.h:465
DemoteRsc
gboolean DemoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2823
rsc_req_stonith
@ rsc_req_stonith
Definition: common.h:85
rsc_ticket_s::ticket
pe_ticket_t * ticket
Definition: pcmki_scheduler.h:62
pe_node_s::rsc_discover_mode
int rsc_discover_mode
Definition: pe_types.h:233
pe_action_optional
@ pe_action_optional
Definition: pe_types.h:281
resource_alloc_functions_s::rsc_colocation_rh
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:30
native_expand
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2329
pe_resource_s::partial_migration_source
pe_node_t * partial_migration_source
Definition: pe_types.h:351
pe_rsc_needs_unfencing
#define pe_rsc_needs_unfencing
Definition: pe_types.h:268
pe_order_implies_first
@ pe_order_implies_first
Definition: pe_types.h:468
promote_key
#define promote_key(rsc)
Definition: internal.h:312
pe__location_constraint_s::discover_mode
enum pe_discover_e discover_mode
Definition: internal.h:58
do_crm_log
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:150
RSC_STOP
#define RSC_STOP
Definition: crm.h:200
native_rsc_location
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
Definition: pcmk_sched_native.c:2263
filter_colocation_res
filter_colocation_res
Definition: pcmki_sched_utils.h:66
crm_xml_add
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:316
rsc_transition_fn
gboolean(* rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:60
RSC_MIGRATE
#define RSC_MIGRATE
Definition: crm.h:194
pe_working_set_s
Definition: pe_types.h:126
resource_alloc_functions_s::expand
void(* expand)(pe_resource_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:42
pcmk__config_err
#define pcmk__config_err(fmt...)
Definition: internal.h:95
find_first_action
pe_action_t * find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t *on_node)
Definition: utils.c:1505
native_internal_constraints
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1459
crm_element_value
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:522
pe_action_clear
@ pe_action_clear
Definition: pe_types.h:290
pe__find_active_on
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:858
pe_action_s::reason
char * reason
Definition: pe_types.h:397
influence_nothing
@ influence_nothing
Definition: pcmki_sched_utils.h:67
rsc_colocation_s::role_rh
int role_rh
Definition: pcmki_scheduler.h:47
pe_resource_s::rsc_cons
GListPtr rsc_cons
Definition: pe_types.h:343
RSC_ROLE_MAX
#define RSC_ROLE_MAX
Definition: common.h:96
sort_node_uname
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:215
text2role
enum rsc_role_e text2role(const char *role)
Definition: common.c:487
ReloadRsc
void ReloadRsc(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:3381
pe_rsc_merging
#define pe_rsc_merging
Definition: pe_types.h:248
loss_ticket_demote
@ loss_ticket_demote
Definition: pcmki_scheduler.h:54
order_actions
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1886
pe_rsc_allocating
#define pe_rsc_allocating
Definition: pe_types.h:247
rules.h
add_hash_param
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:576
delete_action
#define delete_action(rsc, node, optional)
Definition: internal.h:287
pe_resource_s::container
pe_resource_t * container
Definition: pe_types.h:366
variant.h
pe__location_constraint_s::node_list_rh
GListPtr node_list_rh
Definition: internal.h:59
pe_order_implies_first_master
@ pe_order_implies_first_master
Definition: pe_types.h:470
pe_rsc_needs_fencing
#define pe_rsc_needs_fencing
Definition: pe_types.h:267
pe_flag_have_stonith_resource
#define pe_flag_have_stonith_resource
Definition: pe_types.h:97
resource_alloc_functions_s::rsc_colocation_lh
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:28
target
const char * target
Definition: pcmk_fence.c:28
XML_RSC_ATTR_TARGET_ROLE
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:196
pe_rsc_unique
#define pe_rsc_unique
Definition: pe_types.h:242
reload_key
#define reload_key(rsc)
Definition: internal.h:301
pe_order_implies_then
@ pe_order_implies_then
Definition: pe_types.h:469
pe__show_node_weights
#define pe__show_node_weights(level, rsc, text, nodes)
Definition: internal.h:273
update_action_flags
gboolean update_action_flags(pe_action_t *action, enum pe_action_flags flags, const char *source, int line)
Definition: pcmk_sched_allocate.c:119
pe_graph_flags
pe_graph_flags
Definition: pe_types.h:270
can_run_any
gboolean can_run_any(GHashTable *nodes)
Definition: pcmk_sched_utils.c:436
RSC_STATUS
#define RSC_STATUS
Definition: crm.h:211
pe_graph_updated_first
@ pe_graph_updated_first
Definition: pe_types.h:272
CRMD_ACTION_RELOAD
#define CRMD_ACTION_RELOAD
Definition: crm.h:168
rsc_req_nothing
@ rsc_req_nothing
Definition: common.h:83
pe_ticket_s::standby
gboolean standby
Definition: pe_types.h:440
pe_action_reschedule
@ pe_action_reschedule
Definition: pe_types.h:298
pe_clear_action_bit
#define pe_clear_action_bit(action, bit)
Definition: internal.h:26
safe_str_neq
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:263
pe_order_then_cancels_first
@ pe_order_then_cancels_first
Definition: pe_types.h:501
CRM_SCORE_INFINITY
#define CRM_SCORE_INFINITY
Definition: crm.h:82
pcmk__copy_node_table
GHashTable * pcmk__copy_node_table(GHashTable *nodes)
Definition: pcmk_sched_utils.c:95
pe_action_s::rsc
pe_resource_t * rsc
Definition: pe_types.h:390
rsc_colocation_s
Definition: pcmki_scheduler.h:40
pe_resource_s::parent
pe_resource_t * parent
Definition: pe_types.h:314
crm_parse_interval_spec
guint crm_parse_interval_spec(const char *input)
Parse milliseconds from a Pacemaker interval specification.
Definition: utils.c:309
XML_LRM_ATTR_MIGRATE_TARGET
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:286
crm_str
#define crm_str(x)
Definition: logging.h:389
StopRsc
gboolean StopRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2679
pe_order_implies_first_migratable
@ pe_order_implies_first_migratable
Definition: pe_types.h:473
services.h
Services API.
resource_alloc_functions_s::merge_weights
GHashTable *(* merge_weights)(pe_resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
Definition: pcmki_sched_allocate.h:21
STOP_SANITY_ASSERT
#define STOP_SANITY_ASSERT(lineno)
Definition: pcmk_sched_native.c:2362
rsc_colocation_s::rsc_rh
pe_resource_t * rsc_rh
Definition: pcmki_scheduler.h:44
rsc_state_matrix
enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX]
Definition: pcmk_sched_native.c:50
pe_resource_s::flags
unsigned long long flags
Definition: pe_types.h:334
score2char_stack
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:94
pe_action_migrate_runnable
@ pe_action_migrate_runnable
Definition: pe_types.h:286
pe_order_restart
@ pe_order_restart
Definition: pe_types.h:487
rsc_ticket_s::id
const char * id
Definition: pcmki_scheduler.h:60
resource_location
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1672
pe_flag_enable_unfencing
#define pe_flag_enable_unfencing
Definition: pe_types.h:98
pe_order_implies_then_on_node
@ pe_order_implies_then_on_node
Definition: pe_types.h:478
pe_flag_remove_after_stop
#define pe_flag_remove_after_stop
Definition: pe_types.h:106
PCMK_OCF_RUNNING_MASTER
@ PCMK_OCF_RUNNING_MASTER
Definition: services.h:98
recovery_stop_start
@ recovery_stop_start
Definition: common.h:77
pe_resource_s::role
enum rsc_role_e role
Definition: pe_types.h:356
native_create_actions
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1202
pe_rsc_trace
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:19
LogActions
void LogActions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
Definition: pcmk_sched_native.c:2482
pe_order_preserve
@ pe_order_preserve
Definition: pe_types.h:500
pe_flag_startup_probes
#define pe_flag_startup_probes
Definition: pe_types.h:110
pcmk__op_key
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:40
CRM_ASSERT
#define CRM_ASSERT(expr)
Definition: results.h:42
pcmk__bundle_log_actions
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
Definition: pcmk_sched_bundle.c:1058
pe__resource_actions
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1624
pe_rsc_start_pending
#define pe_rsc_start_pending
Definition: pe_types.h:256
pe_action_runnable
@ pe_action_runnable
Definition: pe_types.h:280
pe_action_dangle
@ pe_action_dangle
Definition: pe_types.h:291
NullOp
gboolean NullOp(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2850
native_action_flags
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
Definition: pcmk_sched_native.c:2029
pe_rsc_managed
#define pe_rsc_managed
Definition: pe_types.h:237
loss_ticket_fence
@ loss_ticket_fence
Definition: pcmki_scheduler.h:55
rsc_ticket_constraint
void rsc_ticket_constraint(pe_resource_t *rsc_lh, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1939
trigger_unfencing
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2567
start_action
#define start_action(rsc, node, optional)
Definition: internal.h:303
PCMK_OCF_NOT_RUNNING
@ PCMK_OCF_NOT_RUNNING
Definition: services.h:97
pe_fence_op
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2461
pe__add_scores
int pe__add_scores(int score1, int score2)
Definition: common.c:513
pe_node_attribute_raw
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:632
pe_rsc_promotable
#define pe_rsc_promotable
Definition: pe_types.h:244
RSC_DELETE
#define RSC_DELETE
Definition: crm.h:191
pe_flag_stonith_enabled
#define pe_flag_stonith_enabled
Definition: pe_types.h:96
pe_node_shared_s::maintenance
gboolean maintenance
Definition: pe_types.h:210
rsc_stonith_ordering
void rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:3360
pe__is_guest_node
gboolean pe__is_guest_node(pe_node_t *node)
Definition: remote.c:47
pe_resource_s
Definition: pe_types.h:306
pe_working_set_s::flags
unsigned long long flags
Definition: pe_types.h:136
pe_resource_s::allowed_nodes
GHashTable * allowed_nodes
Definition: pe_types.h:354
rsc_ticket_s
Definition: pcmki_scheduler.h:59
pe_node_shared_s::unclean
gboolean unclean
Definition: pe_types.h:205
pe__location_constraint_s
Definition: internal.h:54
pe_rsc_reload
#define pe_rsc_reload
Definition: pe_types.h:251
pe_resource_s::ops_xml
xmlNode * ops_xml
Definition: pe_types.h:311
loss_ticket_freeze
@ loss_ticket_freeze
Definition: pcmki_scheduler.h:56
RSC_ROLE_STARTED
@ RSC_ROLE_STARTED
Definition: common.h:91
pe_working_set_s::no_quorum_policy
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:139
DeleteRsc
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2858
XML_OP_ATTR_PENDING
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:221
update_action
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
Definition: pcmk_sched_graph.c:512
XML_RSC_ATTR_INCARNATION
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:186
pe_node_shared_s::online
gboolean online
Definition: pe_types.h:201
pe_graph_updated_then
@ pe_graph_updated_then
Definition: pe_types.h:273
pe_action_s::task
char * task
Definition: pe_types.h:394
pe_node_shared_s::uname
const char * uname
Definition: pe_types.h:197
pe_resource_s::cmds
resource_alloc_functions_t * cmds
Definition: pe_types.h:319
pe_weights_positive
@ pe_weights_positive
Definition: pcmki_scheduler.h:36
pe_discover_exclusive
@ pe_discover_exclusive
Definition: pe_types.h:459
pe_rsc_stop
#define pe_rsc_stop
Definition: pe_types.h:250
rsc_colocation_s::rsc_lh
pe_resource_t * rsc_lh
Definition: pcmki_scheduler.h:43
pcmk__native_allocate
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:497
pe_resource_s::is_remote_node
gboolean is_remote_node
Definition: pe_types.h:337
crm_internal.h
pe__resource_contains_guest_node
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:83
stop_key
#define stop_key(rsc)
Definition: internal.h:296
pe_node_s
Definition: pe_types.h:228
name
char * name
Definition: pcmk_fence.c:30
pe_flag_have_quorum
#define pe_flag_have_quorum
Definition: pe_types.h:92
pe_rsc_maintenance
#define pe_rsc_maintenance
Definition: pe_types.h:263
pe_find_node_id
pe_node_t * pe_find_node_id(GListPtr node_list, const char *id)
Definition: status.c:411
crm_meta_name
char * crm_meta_name(const char *field)
Definition: utils.c:454
XML_RSC_ATTR_CONTAINER
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:205
pe_discover_never
@ pe_discover_never
Definition: pe_types.h:458
pe_action_s::meta
GHashTable * meta
Definition: pe_types.h:404
pe_rsc_block
#define pe_rsc_block
Definition: pe_types.h:238
PromoteRsc
gboolean PromoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2782
resource_alloc_functions_s::create_probe
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:25
pe_resource_s::fns
resource_object_functions_t * fns
Definition: pe_types.h:318
native_create_probe
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2888
pcmk__native_merge_weights
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
Definition: pcmk_sched_native.c:350
pe_proc_err
#define pe_proc_err(fmt...)
Definition: internal.h:23
pe_rsc_failed
#define pe_rsc_failed
Definition: pe_types.h:254
pe_ordering
pe_ordering
Definition: pe_types.h:463