def del_node(addr): ''' Remove node from corosync ''' f = open(conf()).read() p = Parser(f) nth = p.remove_section_where('nodelist.node', 'ring0_addr', addr) if nth == -1: return if p.count('nodelist.node') <= 2: p.set('quorum.two_node', '1') f = open(conf(), 'w') f.write(p.to_string()) f.close() # check for running config try: nodes = utils.list_cluster_nodes() except Exception: nodes = [] if nodes: utils.ext_cmd(["corosync-cmapctl", "-D", "nodelist.node.%s.nodeid" % (nth)], shell=False) utils.ext_cmd(["corosync-cmapctl", "-D", "nodelist.node.%s.ring0_addr" % (nth)], shell=False)
def del_node(addr): ''' Remove node from corosync ''' f = open(conf()).read() p = Parser(f) nth = p.remove_section_where('nodelist.node', 'ring0_addr', addr) if nth == -1: return if p.count('nodelist.node') <= 2: p.set('quorum.two_node', '1') f = open(conf(), 'w') f.write(p.to_string()) f.close() # check for running config try: nodes = utils.list_cluster_nodes() except Exception: nodes = [] if nodes: utils.ext_cmd( ["corosync-cmapctl", "-D", "nodelist.node.%s.nodeid" % (nth)], shell=False) utils.ext_cmd( ["corosync-cmapctl", "-D", "nodelist.node.%s.ring0_addr" % (nth)], shell=False)
def cleanup_resource(rsc, node=''): if not utils.is_name_sane(rsc) or not utils.is_name_sane(node): return False if not node: rc = utils.ext_cmd(RscMgmt.rsc_cleanup_all % (rsc)) == 0 else: rc = utils.ext_cmd(RscMgmt.rsc_cleanup % (rsc, node)) == 0 return rc
def do_scores(self, context): "usage: scores" if utils.is_program('crm_simulate'): utils.ext_cmd('crm_simulate -sL') elif utils.is_program('ptest'): utils.ext_cmd('ptest -sL') else: context.fatal_error("Need crm_simulate or ptest in path to display scores")
def do_reprobe(self, context, *args): 'usage: reprobe [<node>]' if len(args) == 1: if not utils.is_name_sane(args[0]): return False return utils.ext_cmd(self.rsc_reprobe_node % args[0]) == 0 else: return utils.ext_cmd(self.rsc_reprobe) == 0
def do_status(self, context, rsc=None): "usage: status [<rsc>]" if rsc: if not utils.is_name_sane(rsc): return False return utils.ext_cmd(self.rsc_status % rsc) == 0 else: return utils.ext_cmd(self.rsc_status_all) == 0
def add_node(name): ''' Add node to corosync.conf ''' coronodes = None nodes = None coronodes = utils.list_corosync_nodes() try: nodes = utils.list_cluster_nodes() except Exception: nodes = [] ipaddr = get_ip(name) if name in coronodes or (ipaddr and ipaddr in coronodes): err_buf.warning("%s already in corosync.conf" % (name)) return if name in nodes: err_buf.warning("%s already in configuration" % (name)) return f = open(conf()).read() p = Parser(f) node_addr = name node_id = next_nodeid(p) p.add( 'nodelist', make_section( 'nodelist.node', make_value('nodelist.node.ring0_addr', node_addr) + make_value('nodelist.node.nodeid', str(node_id)))) num_nodes = p.count('nodelist.node') if num_nodes > 2: p.remove('quorum.two_node') f = open(conf(), 'w') f.write(p.to_string()) f.close() # update running config (if any) if nodes: utils.ext_cmd([ "corosync-cmapctl", "-s", "nodelist.node.%s.nodeid" % (num_nodes - 1), "u32", str(node_id) ], shell=False) utils.ext_cmd([ "corosync-cmapctl", "-s", "nodelist.node.%s.ring0_addr" % (num_nodes - 1), "str", node_addr ], shell=False)
def do_clearstate(self, context, node): 'usage: clearstate <node>' if not utils.is_name_sane(node): return False if not config.core.force and \ not utils.ask("Do you really want to drop state for node %s?" % node): return False if utils.is_pcmk_118(): return utils.ext_cmd(self.node_clear_state_118 % node) == 0 else: return utils.ext_cmd(self.node_clear_state % ("-M -c", node, node)) == 0 and \ utils.ext_cmd(self.node_clear_state % ("-R", node, node)) == 0
def do_wait_for_startup(self, context, timeout='10'): "usage: wait_for_startup [<timeout>]" import time t0 = time.time() timeout = float(timeout) cmd = 'crm_mon -bD1 2&>1 >/dev/null' ret = utils.ext_cmd(cmd) while ret in (107, 64) and time.time() < t0 + timeout: time.sleep(1) ret = utils.ext_cmd(cmd) if ret != 0: context.fatal_error("Timed out waiting for cluster (rc = %s)" % (ret))
def do_delete(self, context, node): 'usage: delete <node>' if not utils.is_name_sane(node): return False if not xmlutil.is_our_node(node): common_err("node %s not found in the CIB" % node) return False if not self._call_delnode(node): return False if utils.ext_cmd(self.node_delete % node) != 0 or \ utils.ext_cmd(self.node_delete_status % node) != 0: common_err("%s removed from membership, but not from CIB!" % node) return False common_info("node %s deleted" % node) return True
def _use(self, name, withstatus): # Choose a shadow cib for further changes. If the name # provided is empty, then choose the live (cluster) cib. # Don't allow ' in shadow names if not name or name == "live": if withstatus: cib_status.load("live") if constants.tmp_cib: utils.ext_cmd("%s -D '%s' --force" % (self.extcmd, utils.get_cib_in_use())) constants.tmp_cib = False utils.clear_cib_in_use() else: utils.set_cib_in_use(name) if withstatus: cib_status.load("shadow:%s" % name) return True
def do_online(self, context, node=None): 'usage: online [<node>]' if not node: node = utils.this_node() if not utils.is_name_sane(node): return False return utils.ext_cmd(self.node_standby % (node, "off", "--lifetime='forever'")) == 0
def do_ticket(self, context, subcmd, ticket): "usage: ticket {grant|revoke|standby|activate|show|time|delete} <ticket>" base_cmd = "crm_ticket" if config.core.force: base_cmd += " --force" attr_cmd = _ticket_commands.get(subcmd) if not attr_cmd: context.fatal_error('Expected one of %s' % '|'.join(_ticket_commands.keys())) if not utils.is_name_sane(ticket): return False if subcmd not in ("show", "time"): return utils.ext_cmd(attr_cmd % (base_cmd, ticket)) == 0 rc, l = utils.stdout2list(attr_cmd % (base_cmd, ticket)) try: val = l[0] except IndexError: context.fatal_error("apparently nothing to show for ticket %s" % ticket) if subcmd == "show": _show(context, ticket, val) else: # time _time(context, ticket, val)
def do_ready(self, context, node=None): 'usage: ready [<node>]' if not node: node = utils.this_node() if not utils.is_name_sane(node): return False return utils.ext_cmd(self.node_maint % (node, "off")) == 0
def runop(self, op, nodes=None, local_only=False): ''' Execute an operation. ''' if not nodes or self.run_on_all(op): nodes = self.nodes self.last_op = op self.set_rscenv(op) real_op = (op == "probe" and "monitor" or op) cmd = self.exec_cmd(real_op) common_debug("running %s on %s" % (real_op, nodes)) for attr in self.rscenv.keys(): # shell doesn't allow "-" in var names envvar = attr.replace("-", "_") cmd = "%s=%s %s" % (envvar, quote(self.rscenv[attr]), cmd) if local_only: self.ec_l[this_node()] = ext_cmd(cmd) else: from crm_pssh import do_pssh_cmd statuses = do_pssh_cmd(cmd, nodes, self.outdir, self.errdir, self.timeout) for i in range(len(nodes)): try: self.ec_l[nodes[i]] = statuses[i] except: self.ec_l[nodes[i]] = self.undef return
def do_new(self, context, *args): "usage: new [<shadow_cib>] [withstatus] [force] [empty]" argl = list(args) opt_l = utils.fetch_opts(argl, ["force", "--force", "withstatus", "empty"]) if len(argl) > 1: context.fatal_error("Unexpected argument(s): " + ' '.join(argl)) name = None if argl: name = argl[0] if not utils.is_filename_sane(name): context.fatal_error("Bad filename: " + name) if name in (constants.tmp_cib_prompt, constants.live_cib_prompt): context.fatal_error("Shadow name '%s' is not allowed" % (name)) del argl[0] constants.tmp_cib = False else: fd, fname = tmpfiles.create(dir=xmlutil.cib_shadow_dir(), prefix="shadow.crmsh_") name = os.path.basename(fname).replace("shadow.", "") constants.tmp_cib = True if "empty" in opt_l: new_cmd = "%s -e '%s'" % (self.extcmd, name) else: new_cmd = "%s -c '%s'" % (self.extcmd, name) if constants.tmp_cib or config.core.force or "force" in opt_l or "--force" in opt_l: new_cmd = "%s --force" % new_cmd if utils.ext_cmd(new_cmd) == 0: context.info("%s shadow CIB created" % name) self.do_use(context, name) if "withstatus" in opt_l: cib_status.load("shadow:%s" % name)
def do_new(self, context, *args): "usage: new [<shadow_cib>] [withstatus] [force] [empty]" argl = list(args) opt_l = utils.fetch_opts(argl, ["force", "--force", "withstatus", "empty"]) if len(argl) > 1: context.fatal_error("Unexpected argument(s): " + ','.join(argl)) name = None if argl: name = argl[0] if not utils.is_filename_sane(name): context.fatal_error("Bad filename: " + name) if name in (vars.tmp_cib_prompt, vars.live_cib_prompt): context.fatal_error("Shadow name '%s' is not allowed" % (name)) del argl[0] vars.tmp_cib = False else: fd, fname = tmpfiles.create(dir=xmlutil.cib_shadow_dir(), prefix="shadow.crmsh_") name = os.path.basename(fname).replace("shadow.", "") vars.tmp_cib = True if "empty" in opt_l: new_cmd = "%s -e '%s'" % (self.extcmd, name) else: new_cmd = "%s -c '%s'" % (self.extcmd, name) if vars.tmp_cib or config.core.force or "force" in opt_l or "--force" in opt_l: new_cmd = "%s --force" % new_cmd if utils.ext_cmd(new_cmd) == 0: context.info("%s shadow CIB created" % name) self.do_use(context, name) if "withstatus" in opt_l: cib_status.load("shadow:%s" % name)
def _call_delnode(self, node): "Remove node (how depends on cluster stack)" rc = True if utils.cluster_stack() == "heartbeat": cmd = (self.hb_delnode % node) else: ec, s = utils.get_stdout("%s -p" % self.crm_node) if not s: common_err('%s -p could not list any nodes (rc=%d)' % (self.crm_node, ec)) rc = False else: partition_l = s.split() if node in partition_l: common_err("according to %s, node %s is still active" % (self.crm_node, node)) rc = False cmd = "%s --force -R %s" % (self.crm_node, node) if not rc: if config.core.force: common_info('proceeding with node %s removal' % node) else: return False ec = utils.ext_cmd(cmd) if ec != 0: common_warn('"%s" failed, rc=%d' % (cmd, ec)) return False return True
def do_reset(self, context, name): "usage: reset <shadow_cib>" if not utils.is_filename_sane(name): context.fatal_error("Bad filename: " + name) if utils.ext_cmd("%s -r '%s'" % (self.extcmd, name)) == 0: context.info("copied live CIB to %s" % name) else: context.fatal_error("failed to copy live CIB to %s" % name)
def do_fence(self, context, node): 'usage: fence <node>' if not utils.is_name_sane(node): return False if not config.core.force and \ not utils.ask("Do you really want to shoot %s?" % node): return False return utils.ext_cmd(self.node_fence % (node)) == 0
def do_demote(self, context, rsc): "usage: demote <rsc>" if not utils.is_name_sane(rsc): return False if not xmlutil.RscState().is_ms(rsc): common_err("%s is not a master-slave resource" % rsc) return False return utils.ext_cmd(self.rsc_setrole % (rsc, "Slave")) == 0
def add_node(name): ''' Add node to corosync.conf ''' coronodes = None nodes = None coronodes = utils.list_corosync_nodes() try: nodes = utils.list_cluster_nodes() except Exception: nodes = [] ipaddr = get_ip(name) if name in coronodes or (ipaddr and ipaddr in coronodes): err_buf.warning("%s already in corosync.conf" % (name)) return if name in nodes: err_buf.warning("%s already in configuration" % (name)) return f = open(conf()).read() p = Parser(f) node_addr = name node_id = next_nodeid(p) p.add('nodelist', make_section('nodelist.node', make_value('nodelist.node.ring0_addr', node_addr) + make_value('nodelist.node.nodeid', str(node_id)))) num_nodes = p.count('nodelist.node') if num_nodes > 2: p.remove('quorum.two_node') f = open(conf(), 'w') f.write(p.to_string()) f.close() # update running config (if any) if nodes: utils.ext_cmd(["corosync-cmapctl", "-s", "nodelist.node.%s.nodeid" % (num_nodes - 1), "u32", str(node_id)], shell=False) utils.ext_cmd(["corosync-cmapctl", "-s", "nodelist.node.%s.ring0_addr" % (num_nodes - 1), "str", node_addr], shell=False)
def _dispatch_attr_cmd(cmd, attr_cmd, args): if args[1] == 'set': if len(args) != 4: raise ValueError("Expected 4 arguments to 'set'") if not utils.is_name_sane(args[0]) \ or not utils.is_name_sane(args[2]) \ or not utils.is_value_sane(args[3]): raise ValueError("Argument failed sanity check") return utils.ext_cmd(attr_cmd % (args[0], args[2], args[3])) == 0 elif args[1] in ('delete', 'show') or \ (cmd == "secret" and args[1] in ('stash', 'unstash', 'check')): if len(args) != 3: raise ValueError("Expected 3 arguments to " + args[1]) if not utils.is_name_sane(args[0]) \ or not utils.is_name_sane(args[2]): raise ValueError("Argument failed sanity check") return utils.ext_cmd(attr_cmd % (args[0], args[2])) == 0 raise ValueError("Unknown command " + repr(args[1]))
def do_maintenance(self, context, resource, on_off='true'): 'usage: maintenance <resource> [on|off|true|false]' on_off = on_off.lower() if on_off not in ('on', 'true', 'off', 'false'): context.fatal_error("Expected <resource> [on|off|true|false]") elif on_off in ('on', 'true'): on_off = 'true' else: on_off = 'false' return utils.ext_cmd(self.rsc_maintenance % (resource, on_off)) == 0
def do_delete(self, context, name): "usage: delete <shadow_cib>" if not utils.is_filename_sane(name): context.fatal_error("Bad filename: " + name) if utils.get_cib_in_use() == name: context.fatal_error("%s shadow CIB is in use" % name) if utils.ext_cmd("%s -D '%s' --force" % (self.extcmd, name)) == 0: context.info("%s shadow CIB deleted" % name) else: context.fatal_error("failed to delete %s shadow CIB" % name)
def _dispatch_attr_cmd(cmd, attr_cmd, rsc, subcmd, attr, value): def sanity_check(arg): if not utils.is_name_sane(arg): raise ValueError("Expected valid name, got '%s'" % (arg)) if subcmd == 'set': if value is None: raise ValueError("Missing value argument to set") sanity_check(rsc) sanity_check(attr) sanity_check(value) return utils.ext_cmd(attr_cmd % (rsc, attr, value)) == 0 elif subcmd in ('delete', 'show') or \ (cmd == "secret" and subcmd in ('stash', 'unstash', 'check')): if value is not None: raise ValueError("Too many arguments to %s" % (subcmd)) sanity_check(rsc) sanity_check(attr) return utils.ext_cmd(attr_cmd % (rsc, attr)) == 0 raise ValueError("Unknown command " + repr(subcmd))
def do_commit(self, context, name=None): "usage: commit [<shadow_cib>]" if name and not utils.is_filename_sane(name): context.fatal_error("Bad filename: " + name) if not name: name = utils.get_cib_in_use() if not name: context.fatal_error("There is nothing to commit") if utils.ext_cmd("%s -C '%s' --force" % (self.extcmd, name)) == 0: context.info("committed '%s' shadow CIB to the cluster" % name) else: context.fatal_error("failed to commit the %s shadow CIB" % name) if vars.tmp_cib: self._use('', '')
def do_commit(self, context, name=None): "usage: commit [<shadow_cib>]" if name and not utils.is_filename_sane(name): context.fatal_error("Bad filename: " + name) if not name: name = utils.get_cib_in_use() if not name: context.fatal_error("There is nothing to commit") if utils.ext_cmd("%s -C '%s' --force" % (self.extcmd, name)) == 0: context.info("committed '%s' shadow CIB to the cluster" % name) else: context.fatal_error("failed to commit the %s shadow CIB" % name) if constants.tmp_cib: self._use('', '')
def can_use_lrmadmin(): from distutils import version # after this glue release all users can get meta-data and # similar from lrmd minimum_glue = "1.0.10" rc, glue_ver = get_stdout("%s -v" % lrmadmin_prog, stderr_on=False) if not glue_ver: # lrmadmin probably not found return False v_min = version.LooseVersion(minimum_glue) v_this = version.LooseVersion(glue_ver) if v_this < v_min: return False if userdir.getuser() not in ("root", config.path.crm_daemon_user): return False if not (is_program(lrmadmin_prog) and is_process("lrmd")): return False return utils.ext_cmd(">/dev/null 2>&1 %s -C" % lrmadmin_prog) == 0
def do_ticket(self, context, subcmd, ticket): "usage: ticket {grant|revoke|standby|activate|show|time|delete} <ticket>" attr_cmd = _ticket_commands.get(subcmd) if not attr_cmd: context.fatal_error('Expected one of %s' % '|'.join(_ticket_commands.keys())) if not utils.is_name_sane(ticket): return False if subcmd not in ("show", "time"): return utils.ext_cmd(attr_cmd % ticket) == 0 rc, l = utils.stdout2list(attr_cmd % ticket) try: val = l[0] except IndexError: context.fatal_error("apparently nothing to show for ticket %s" % ticket) if subcmd == "show": _show(context, ticket, val) else: # time _time(context, ticket, val)
def _crm_simulate(self, cmd, nograph, scores, utilization, verbosity): if not self.origin: self.initialize() if verbosity: cmd = "%s -%s" % (cmd, verbosity.upper()) if scores: cmd = "%s -s" % cmd if utilization: cmd = "%s -U" % cmd if config.core.dotty and not nograph: fd, dotfile = mkstemp() cmd = "%s -D %s" % (cmd, dotfile) else: dotfile = None rc = ext_cmd(cmd % self.source_file()) if dotfile: show_dot_graph(dotfile) return rc == 0
def do_migrate(self, context, rsc, *args): """usage: migrate <rsc> [<node>] [<lifetime>] [force]""" if not utils.is_name_sane(rsc): return False node = None argl = list(args) force = "force" in utils.fetch_opts(argl, ["force"]) lifetime = utils.fetch_lifetime_opt(argl) if len(argl) > 0: node = argl[0] if not xmlutil.is_our_node(node): context.fatal_error("Not our node: " + node) opts = '' if node: opts = "--node='%s'" % node if lifetime: opts = "%s --lifetime='%s'" % (opts, lifetime) if force or config.core.force: opts = "%s --force" % opts return utils.ext_cmd(self.rsc_migrate % (rsc, opts)) == 0
def do_standby(self, context, *args): 'usage: standby [<node>] [<lifetime>]' argl = list(args) node = None lifetime = utils.fetch_lifetime_opt(argl, iso8601=False) if not argl: node = utils.this_node() elif len(argl) == 1: node = args[0] if not xmlutil.is_our_node(node): common_err("%s: node name not recognized" % node) return False else: syntax_err(args, context=context.get_command_name()) return False opts = '' if lifetime: opts = "--lifetime='%s'" % lifetime else: opts = "--lifetime='forever'" return utils.ext_cmd(self.node_standby % (node, "on", opts)) == 0
def do_migrate(self, context, *args): """usage: migrate <rsc> [<node>] [<lifetime>] [force]""" argl = list(args) rsc = argl[0] if not utils.is_name_sane(rsc): return False del argl[0] node = None opt_l = utils.fetch_opts(argl, ["force"]) lifetime = utils.fetch_lifetime_opt(argl) if len(argl) == 1: if xmlutil.is_our_node(argl[0]): node = argl[0] else: context.fatal_error("Not our node: " + argl[0]) opts = '' if node: opts = "--node='%s'" % node if lifetime: opts = "%s --lifetime='%s'" % (opts, lifetime) if "force" in opt_l or config.core.force: opts = "%s --force" % opts return utils.ext_cmd(self.rsc_migrate % (rsc, opts)) == 0
def do_status(self, context, node=None): 'usage: status [<node>]' a = node and ('--xpath "//nodes/node[@uname=\'%s\']"' % node) or \ '-o nodes' return utils.ext_cmd("%s %s" % (xmlutil.cib_dump, a)) == 0