Esempio n. 1
0
def node_standby(argv, standby=True):
    if (len(argv) > 1) or (len(argv) > 0 and "--all" in utils.pcs_options):
        usage.node(["standby" if standby else "unstandby"])
        sys.exit(1)

    all_nodes = "--all" in utils.pcs_options
    node_list = [argv[0]] if argv else []
    wait = False
    timeout = None
    if "--wait" in utils.pcs_options:
        wait = True
        timeout = utils.pcs_options["--wait"]

    try:
        if wait:
            lib_pacemaker.ensure_resource_wait_support(utils.cmd_runner())
            valid_timeout = get_valid_timeout_seconds(timeout)
        if standby:
            lib_pacemaker.nodes_standby(utils.cmd_runner(), node_list,
                                        all_nodes)
        else:
            lib_pacemaker.nodes_unstandby(utils.cmd_runner(), node_list,
                                          all_nodes)
        if wait:
            lib_pacemaker.wait_for_resources(utils.cmd_runner(), valid_timeout)
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 2
0
File: node.py Progetto: idevat/pcs
def node_standby(argv, standby=True):
    if (len(argv) > 1) or (len(argv) > 0 and "--all" in utils.pcs_options):
        usage.node(["standby" if standby else "unstandby"])
        sys.exit(1)

    all_nodes = "--all" in utils.pcs_options
    node_list = [argv[0]] if argv else []
    wait = False
    timeout = None
    if "--wait" in utils.pcs_options:
        wait = True
        timeout = utils.pcs_options["--wait"]

    try:
        if wait:
            lib_pacemaker.ensure_resource_wait_support(utils.cmd_runner())
            valid_timeout = get_valid_timeout_seconds(timeout)
        if standby:
            lib_pacemaker.nodes_standby(
                utils.cmd_runner(), node_list, all_nodes
            )
        else:
            lib_pacemaker.nodes_unstandby(
                utils.cmd_runner(), node_list, all_nodes
            )
        if wait:
            lib_pacemaker.wait_for_resources(utils.cmd_runner(), valid_timeout)
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 3
0
def run_permission_add(argv):
    if len(argv) < 4:
        raise CmdLineInputError()
    role_id = argv.pop(0)
    permission_info_list = argv_to_permission_info_list(argv)

    cib = get_cib(get_cib_xml(utils.cmd_runner()))
    provide_role(cib, role_id)
    add_permissions_to_role(cib, role_id, permission_info_list)
    replace_cib_configuration(utils.cmd_runner(), cib)
Esempio n. 4
0
def run_create_role(argv):
    if len(argv) < 1:
        raise CmdLineInputError()
    role_id = argv.pop(0)
    description = ""
    desc_key = 'description='
    if argv and argv[0].startswith(desc_key) and len(argv[0]) > len(desc_key):
        description = argv.pop(0)[len(desc_key):]
    permission_info_list = argv_to_permission_info_list(argv)

    cib = get_cib(get_cib_xml(utils.cmd_runner()))
    create_role(cib, role_id, description)
    add_permissions_to_role(cib, role_id, permission_info_list)
    replace_cib_configuration(utils.cmd_runner(), cib)
Esempio n. 5
0
File: node.py Progetto: gmelikov/pcs
def node_pacemaker_status(lib, argv, dummy_modifiers):
    """
    Internal pcs-pcsd command
    """
    print(json.dumps(
        lib_pacemaker.get_local_node_status(utils.cmd_runner())
    ))
Esempio n. 6
0
File: node.py Progetto: idevat/pcs
def node_pacemaker_status():
    try:
        print(json.dumps(
            lib_pacemaker.get_local_node_status(utils.cmd_runner())
        ))
    except LibraryError as e:
        utils.process_library_reports(e.args)
def cluster_local_node_status():
    """
    Return the status of the local cluster member.
    Example of output:
        {
            u'resources_running': 10,
            u'shutdown': False,
            u'name': 'cluster-node1',
            u'standby': False,
            u'standby_onfail': False,
            u'expected_up': True,
            u'is_dc': True,
            u'maintenance': False,
            u'online': True,
            u'offline': False,
            u'type': 'member',
            u'id': '1',
            u'pending': False,
            u'unclean': False
        }
    """
    try:
        node_status = lib_pacemaker.get_local_node_status(utils.cmd_runner())
    except LibraryError as exc:
        raise RuntimeError('Unable to get node status: {0}'.format('\n'.join(
            [item.message for item in exc.args])))
    return node_status
Esempio n. 8
0
def node_pacemaker_status():
    try:
        print(
            json.dumps(lib_pacemaker.get_local_node_status(
                utils.cmd_runner())))
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 9
0
def get_fence_agent_info(lib, argv, modifiers):
    """
    Options: no options
    """
    del lib
    modifiers.ensure_only_supported()
    if len(argv) != 1:
        utils.err("One parameter expected")

    agent = argv[0]
    if not agent.startswith("stonith:"):
        utils.err("Invalid fence agent name")

    runner = utils.cmd_runner()

    try:
        metadata = lib_ra.StonithAgent(runner, agent[len("stonith:"):])
        info = metadata.get_full_info()
        info["name"] = "stonith:{0}".format(info["name"])
        print(json.dumps(info))
    except lib_ra.ResourceAgentError as e:
        utils.process_library_reports(
            [lib_ra.resource_agent_error_to_report_item(e)]
        )
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 10
0
def get_fence_agent_info(lib, argv, modifiers):
    """
    Options: no options
    """
    del lib
    modifiers.ensure_only_supported()
    if len(argv) != 1:
        utils.err("One parameter expected")

    agent = argv[0]
    if not agent.startswith("stonith:"):
        utils.err("Invalid fence agent name")

    runner = utils.cmd_runner()

    try:
        metadata = lib_ra.StonithAgent(runner, agent[len("stonith:"):])
        info = metadata.get_full_info()
        info["name"] = "stonith:{0}".format(info["name"])
        print(json.dumps(info))
    except lib_ra.ResourceAgentError as e:
        process_library_reports(
            [lib_ra.resource_agent_error_to_report_item(e)])
    except LibraryError as e:
        process_library_reports(e.args)
Esempio n. 11
0
def get_fence_agent_info(argv):
    if len(argv) != 1:
        utils.err("One parameter expected")

    agent = argv[0]
    if not agent.startswith("stonith:"):
        utils.err("Invalid fence agent name")

    runner = utils.cmd_runner()

    try:
        metadata_dom = lib_ra.get_fence_agent_metadata(
            runner,
            agent.split("stonith:", 1)[1]
        )
        metadata = lib_ra.get_agent_desc(metadata_dom)
        metadata["name"] = agent
        metadata["parameters"] = lib_ra.get_fence_agent_parameters(
            runner, metadata_dom
        )

        print(json.dumps(metadata))
    except lib_ra.ResourceAgentLibError as e:
        utils.process_library_reports(
            [lib_ra.resource_agent_lib_error_to_report_item(e)]
        )
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 12
0
def status_stonith_check():
    # We should read the default value from pacemaker. However that may slow
    # pcs down as we need to run 'pengine metadata' to get it.
    stonith_enabled = True
    stonith_devices = []
    stonith_devices_id_action = []
    stonith_devices_id_method_cycle = []
    sbd_running = False

    cib = utils.get_cib_dom()
    for conf in cib.getElementsByTagName("configuration"):
        for crm_config in conf.getElementsByTagName("crm_config"):
            for nvpair in crm_config.getElementsByTagName("nvpair"):
                if (nvpair.getAttribute("name") == "stonith-enabled"
                        and is_false(nvpair.getAttribute("value"))):
                    stonith_enabled = False
                    break
            if not stonith_enabled:
                break
        for resource in conf.getElementsByTagName("primitive"):
            if resource.getAttribute("class") == "stonith":
                stonith_devices.append(resource)
                for attribs in resource.getElementsByTagName(
                        "instance_attributes"):
                    for nvpair in attribs.getElementsByTagName("nvpair"):
                        if (nvpair.getAttribute("name") == "action"
                                and nvpair.getAttribute("value")):
                            stonith_devices_id_action.append(
                                resource.getAttribute("id"))
                        if (nvpair.getAttribute("name") == "method"
                                and nvpair.getAttribute("value") == "cycle"):
                            stonith_devices_id_method_cycle.append(
                                resource.getAttribute("id"))

    if not utils.usefile:
        # check if SBD daemon is running
        try:
            sbd_running = utils.is_service_running(utils.cmd_runner(),
                                                   get_sbd_service_name())
        except LibraryError:
            pass

    if stonith_enabled and not stonith_devices and not sbd_running:
        print("WARNING: no stonith devices and stonith-enabled is not false")

    if stonith_devices_id_action:
        print(
            "WARNING: following stonith devices have the 'action' option set, "
            "it is recommended to set {0} instead: {1}".format(
                ", ".join(
                    ["'{0}'".format(x) for x in _STONITH_ACTION_REPLACED_BY]),
                ", ".join(sorted(stonith_devices_id_action))))
    if stonith_devices_id_method_cycle:
        print(
            "WARNING: following stonith devices have the 'method' option set "
            "to 'cycle' which is potentially dangerous, please consider using "
            "'onoff': {0}".format(", ".join(
                sorted(stonith_devices_id_method_cycle))))
Esempio n. 13
0
def _get_rule_status(rule_id, cib):
    _, _, retval = utils.cmd_runner().run(
        [settings.crm_rule, "--check", "--rule=" + rule_id, "-X-"], cib)
    translation_map = {
        CrmRuleReturnCode.IN_EFFECT.value: RULE_IN_EFFECT,
        CrmRuleReturnCode.EXPIRED.value: RULE_EXPIRED,
        CrmRuleReturnCode.TO_BE_IN_EFFECT.value: RULE_NOT_IN_EFFECT,
    }
    return translation_map.get(retval, RULE_UNKNOWN_STATUS)
Esempio n. 14
0
def node_pacemaker_status(lib, argv, modifiers):
    """
    Internal pcs-pcsd command
    """
    del lib
    del argv
    del modifiers
    print(json.dumps(
        lib_pacemaker.get_local_node_status(utils.cmd_runner())
    ))
Esempio n. 15
0
def wait_for_local_node_started(stop_at, interval):
    try:
        while True:
            time.sleep(interval)
            node_status = lib_pacemaker.get_local_node_status(
                utils.cmd_runner())
            if is_node_fully_started(node_status):
                return 0, "Started"
            if datetime.datetime.now() > stop_at:
                return 1, "Waiting timeout"
    except LibraryError as e:
        return 1, "Unable to get node status: {0}".format("\n".join(
            [build_report_message(item) for item in e.args]))
Esempio n. 16
0
def stonith_list_options(stonith_agent):
    runner = utils.cmd_runner()
    try:
        metadata = lib_ra.get_fence_agent_metadata(runner, stonith_agent)
        desc = lib_ra.get_agent_desc(metadata)
        params = lib_ra.get_fence_agent_parameters(runner, metadata)
        resource.resource_print_options(stonith_agent, desc, params)
    except lib_ra.ResourceAgentLibError as e:
        utils.process_library_reports(
            [lib_ra.resource_agent_lib_error_to_report_item(e)]
        )
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 17
0
def stonith_list_available(argv):
    if len(argv) != 0:
        filter_string = argv[0]
    else:
        filter_string = ""

    bad_fence_devices = ["kdump_send", "legacy", "na", "nss_wrapper",
            "pcmk", "vmware_helper", "ack_manual", "virtd", "sanlockd",
            "check", "tool", "node"]
    fence_devices = sorted(glob.glob(utils.fence_bin + "fence_*"))
    for bfd in bad_fence_devices:
        try:
            fence_devices.remove(utils.fence_bin + "fence_"+bfd)
        except ValueError:
            continue

    if not fence_devices:
        utils.err(
            "No stonith agents available. Do you have fence agents installed?"
        )
    fence_devices_filtered = [fd for fd in fence_devices if filter_string in fd]
    if not fence_devices_filtered:
        utils.err("No stonith agents matching the filter.")

    for fd in fence_devices_filtered:
        sd = ""
        agent_name = os.path.basename(fd)
        if "--nodesc" not in utils.pcs_options:
            try:
                metadata = lib_ra.get_fence_agent_metadata(
                    utils.cmd_runner(), agent_name
                )
                shortdesc = lib_ra.get_agent_desc(metadata)["shortdesc"]
                if shortdesc:
                    sd = " - " + resource.format_desc(
                        len(agent_name) + 3, shortdesc
                    )
            except lib_ra.ResourceAgentLibError as e:
                utils.process_library_reports([
                    lib_ra.resource_agent_lib_error_to_report_item(
                        e, ReportItemSeverity.WARNING
                    )
                ])
            except LibraryError as e:
                utils.err(
                    e.args[-1].message, False
                )
                continue
        print(agent_name + sd)
Esempio n. 18
0
def stonith_list_options(stonith_agent):
    runner = utils.cmd_runner()
    try:
        metadata = lib_ra.get_fence_agent_metadata(runner, stonith_agent)
        desc = lib_ra.get_agent_desc(metadata)
        params = lib_ra.get_fence_agent_parameters(runner, metadata)
        # Fence agents just list the actions, usually without any attributes.
        # We could print them but it wouldn't add any usefull information.
        resource.resource_print_options(stonith_agent, desc, params, actions=[])
    except lib_ra.ResourceAgentLibError as e:
        utils.process_library_reports(
            [lib_ra.resource_agent_lib_error_to_report_item(e)]
        )
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 19
0
def get_fence_agent_info(argv):
    # This is used only by pcsd, will be removed in new architecture
    if len(argv) != 1:
        utils.err("One parameter expected")

    agent = argv[0]
    if not agent.startswith("stonith:"):
        utils.err("Invalid fence agent name")

    runner = utils.cmd_runner()

    try:
        metadata = lib_ra.StonithAgent(runner, agent[len("stonith:"):])
        info = metadata.get_full_info()
        info["name"] = "stonith:{0}".format(info["name"])
        print(json.dumps(info))
    except lib_ra.ResourceAgentError as e:
        utils.process_library_reports(
            [lib_ra.resource_agent_error_to_report_item(e)])
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 20
0
def stonith_create(argv):
    if len(argv) < 2:
        usage.stonith(["create"])
        sys.exit(1)

    stonith_id = argv.pop(0)
    stonith_type = argv.pop(0)
    st_values, op_values, meta_values = resource.parse_resource_options(
        argv, with_clone=False
    )

    try:
        metadata = lib_ra.StonithAgent(
            utils.cmd_runner(),
            stonith_type
        )
        if metadata.get_provides_unfencing():
            meta_values = [
                meta for meta in meta_values if not meta.startswith("provides=")
            ]
            meta_values.append("provides=unfencing")
    except lib_ra.ResourceAgentError as e:
        forced = utils.get_modificators().get("force", False)
        if forced:
            severity = ReportItemSeverity.WARNING
        else:
            severity = ReportItemSeverity.ERROR
        utils.process_library_reports([
            lib_ra.resource_agent_error_to_report_item(
                e, severity, not forced
            )
        ])
    except LibraryError as e:
        utils.process_library_reports(e.args)

    resource.resource_create(
        stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values,
        group=utils.pcs_options.get("--group", None)
    )
Esempio n. 21
0
def get_fence_agent_info(argv):
# This is used only by pcsd, will be removed in new architecture
    if len(argv) != 1:
        utils.err("One parameter expected")

    agent = argv[0]
    if not agent.startswith("stonith:"):
        utils.err("Invalid fence agent name")

    runner = utils.cmd_runner()

    try:
        metadata = lib_ra.StonithAgent(runner, agent[len("stonith:"):])
        info = metadata.get_full_info()
        info["name"] = "stonith:{0}".format(info["name"])
        print(json.dumps(info))
    except lib_ra.ResourceAgentError as e:
        utils.process_library_reports(
            [lib_ra.resource_agent_error_to_report_item(e)]
        )
    except LibraryError as e:
        utils.process_library_reports(e.args)
Esempio n. 22
0
def node_pacemaker_status(lib, argv, modifiers):
    print(json.dumps(lib_pacemaker.get_local_node_status(utils.cmd_runner())))
Esempio n. 23
0
def nodes_status(lib, argv, modifiers):
    """
    Options:
      * -f - CIB file - for config subcommand and not for both or corosync
      * --corosync_conf - only for config subcommand

    NOTE: modifiers check is in subcommand
    """
    del lib
    if len(argv) == 1 and (argv[0] == "config"):
        modifiers.ensure_only_supported("-f", "--corosync_conf")
        if utils.hasCorosyncConf():
            corosync_nodes, report_list = get_existing_nodes_names(
                utils.get_corosync_conf_facade())
            if report_list:
                process_library_reports(report_list)
        else:
            corosync_nodes = []
        try:
            pacemaker_nodes = sorted([
                node.attrs.name for node in ClusterState(
                    get_cluster_status_dom(
                        utils.cmd_runner())).node_section.nodes
                if node.attrs.type != "remote"
            ])
        except LibraryError as e:
            process_library_reports(e.args)
        print("Corosync Nodes:")
        if corosync_nodes:
            print(" " + " ".join(corosync_nodes))
        print("Pacemaker Nodes:")
        if pacemaker_nodes:
            print(" " + " ".join(pacemaker_nodes))

        return

    if len(argv) == 1 and (argv[0] == "corosync" or argv[0] == "both"):
        modifiers.ensure_only_supported()
        all_nodes, report_list = get_existing_nodes_names(
            utils.get_corosync_conf_facade())
        if report_list:
            process_library_reports(report_list)
        online_nodes = utils.getCorosyncActiveNodes()
        offline_nodes = []
        for node in all_nodes:
            if node not in online_nodes:
                offline_nodes.append(node)

        online_nodes.sort()
        offline_nodes.sort()
        print("Corosync Nodes:")
        print(" ".join([" Online:"] + online_nodes))
        print(" ".join([" Offline:"] + offline_nodes))
        if argv[0] != "both":
            sys.exit(0)

    modifiers.ensure_only_supported("-f")
    info_dom = utils.getClusterState()

    nodes = info_dom.getElementsByTagName("nodes")
    if nodes.length == 0:
        utils.err("No nodes section found")

    onlinenodes = []
    offlinenodes = []
    standbynodes = []
    standbynodes_with_resources = []
    maintenancenodes = []
    remote_onlinenodes = []
    remote_offlinenodes = []
    remote_standbynodes = []
    remote_standbynodes_with_resources = []
    remote_maintenancenodes = []
    for node in nodes[0].getElementsByTagName("node"):
        node_name = node.getAttribute("name")
        node_remote = node.getAttribute("type") == "remote"
        if node.getAttribute("online") == "true":
            if node.getAttribute("standby") == "true":
                is_running_resources = (node.getAttribute("resources_running")
                                        != "0")
                if node_remote:
                    if is_running_resources:
                        remote_standbynodes_with_resources.append(node_name)
                    else:
                        remote_standbynodes.append(node_name)
                else:
                    if is_running_resources:
                        standbynodes_with_resources.append(node_name)
                    else:
                        standbynodes.append(node_name)
            if node.getAttribute("maintenance") == "true":
                if node_remote:
                    remote_maintenancenodes.append(node_name)
                else:
                    maintenancenodes.append(node_name)
            if (node.getAttribute("standby") == "false"
                    and node.getAttribute("maintenance") == "false"):
                if node_remote:
                    remote_onlinenodes.append(node_name)
                else:
                    onlinenodes.append(node_name)
        else:
            if node_remote:
                remote_offlinenodes.append(node_name)
            else:
                offlinenodes.append(node_name)

    print("Pacemaker Nodes:")
    print(" ".join([" Online:"] + onlinenodes))
    print(" ".join([" Standby:"] + standbynodes))
    print(" ".join([" Standby with resource(s) running:"] +
                   standbynodes_with_resources))
    print(" ".join([" Maintenance:"] + maintenancenodes))
    print(" ".join([" Offline:"] + offlinenodes))

    print("Pacemaker Remote Nodes:")
    print(" ".join([" Online:"] + remote_onlinenodes))
    print(" ".join([" Standby:"] + remote_standbynodes))
    print(" ".join([" Standby with resource(s) running:"] +
                   remote_standbynodes_with_resources))
    print(" ".join([" Maintenance:"] + remote_maintenancenodes))
    print(" ".join([" Offline:"] + remote_offlinenodes))
Esempio n. 24
0
def cluster_destroy(argv):
    if argv:
        raise CmdLineInputError()
    if "--all" in utils.pcs_options:
        # destroy remote and guest nodes
        cib = None
        lib_env = utils.get_lib_env()
        try:
            cib = lib_env.get_cib()
        except LibraryError as e:
            warn("Unable to load CIB to get guest and remote nodes from it, "
                 "those nodes will not be deconfigured.")
        if cib is not None:
            try:
                all_remote_nodes = get_existing_nodes_names(cib=cib)
                if len(all_remote_nodes) > 0:
                    _destroy_pcmk_remote_env(lib_env,
                                             all_remote_nodes,
                                             skip_offline_nodes=True,
                                             allow_fails=True)
            except LibraryError as e:
                utils.process_library_reports(e.args)

        # destroy full-stack nodes
        destroy_cluster(utils.get_corosync_conf_facade().get_nodes_names())
    else:
        print("Shutting down pacemaker/corosync services...")
        for service in ["pacemaker", "corosync-qdevice", "corosync"]:
            # Returns an error if a service is not running. It is safe to
            # ignore it since we want it not to be running anyways.
            utils.stop_service(service)
        print("Killing any remaining services...")
        kill_local_cluster_services()
        try:
            utils.disableServices()
        except:
            # previously errors were suppressed in here, let's keep it that way
            # for now
            pass
        try:
            disable_service(utils.cmd_runner(), lib_sbd.get_sbd_service_name())
        except:
            # it's not a big deal if sbd disable fails
            pass

        print("Removing all cluster configuration files...")
        dummy_output, dummy_retval = utils.run([
            "rm",
            "-f",
            settings.corosync_conf_file,
            settings.corosync_authkey_file,
            settings.pacemaker_authkey_file,
        ])
        state_files = [
            "cib.xml*", "cib-*", "core.*", "hostcache", "cts.*", "pe*.bz2",
            "cib.*"
        ]
        for name in state_files:
            dummy_output, dummy_retval = utils.run([
                "find", "/var/lib/pacemaker", "-name", name, "-exec", "rm",
                "-f", "{}", ";"
            ])
        try:
            qdevice_net.client_destroy()
        except:
            # errors from deleting other files are suppressed as well
            # we do not want to fail if qdevice was not set up
            pass
Esempio n. 25
0
def config_restore_local(infile_name, infile_obj):
    """
    Commandline options: no options
    """
    if (
        utils.is_service_running(utils.cmd_runner(), "corosync")
        or
        utils.is_service_running(utils.cmd_runner(), "pacemaker")
        or
        utils.is_service_running(utils.cmd_runner(), "pacemaker_remote")
    ):
        utils.err(
            "Cluster is currently running on this node. You need to stop "
                "the cluster in order to restore the configuration."
        )

    file_list = config_backup_path_list(with_uid_gid=True)
    tarball_file_list = []
    version = None
    tmp_dir = None
    try:
        tarball = tarfile.open(infile_name, "r|*", infile_obj)
        while True:
            # next(tarball) does not work in python2.6
            tar_member_info = tarball.next()
            if tar_member_info is None:
                break
            if tar_member_info.name == "version.txt":
                version_data = tarball.extractfile(tar_member_info)
                version = version_data.read()
                version_data.close()
                continue
            tarball_file_list.append(tar_member_info.name)
        tarball.close()

        required_file_list = [
            tar_path
            for tar_path, path_info in file_list.items()
                if path_info["required"]
        ]
        missing = set(required_file_list) - set(tarball_file_list)
        if missing:
            utils.err(
                "unable to restore the cluster, missing files in backup: %s"
                % ", ".join(missing)
            )

        config_backup_check_version(version)

        if infile_obj:
            infile_obj.seek(0)
        tarball = tarfile.open(infile_name, "r|*", infile_obj)
        while True:
            # next(tarball) does not work in python2.6
            tar_member_info = tarball.next()
            if tar_member_info is None:
                break
            extract_info = None
            path = tar_member_info.name
            while path:
                if path in file_list:
                    extract_info = file_list[path]
                    break
                path = os.path.dirname(path)
            if not extract_info:
                continue
            path_full = None
            if hasattr(extract_info.get("pre_store_call"), '__call__'):
                extract_info["pre_store_call"]()
            if "rename" in extract_info and extract_info["rename"]:
                if tmp_dir is None:
                    tmp_dir = tempfile.mkdtemp()
                tarball.extractall(tmp_dir, [tar_member_info])
                path_full = extract_info["path"]
                shutil.move(
                    os.path.join(tmp_dir, tar_member_info.name),
                    path_full
                )
            else:
                dir_path = os.path.dirname(extract_info["path"])
                tarball.extractall(dir_path, [tar_member_info])
                path_full = os.path.join(dir_path, tar_member_info.name)
            file_attrs = extract_info["attrs"]
            os.chmod(path_full, file_attrs["mode"])
            os.chown(path_full, file_attrs["uid"], file_attrs["gid"])
        tarball.close()
    except (tarfile.TarError, EnvironmentError, OSError) as e:
        utils.err("unable to restore the cluster: %s" % e)
    finally:
        if tmp_dir:
            shutil.rmtree(tmp_dir, ignore_errors=True)

    try:
        sig_path = os.path.join(settings.cib_dir, "cib.xml.sig")
        if os.path.exists(sig_path):
            os.remove(sig_path)
    except EnvironmentError as e:
        utils.err("unable to remove %s: %s" % (sig_path, e))
Esempio n. 26
0
File: config.py Progetto: ldming/pcs
def config_restore_local(infile_name, infile_obj):
    """
    Commandline options: no options
    """
    if (is_service_running(utils.cmd_runner(), "corosync")
            or is_service_running(utils.cmd_runner(), "pacemaker")
            or is_service_running(utils.cmd_runner(), "pacemaker_remote")):
        utils.err(
            "Cluster is currently running on this node. You need to stop "
            "the cluster in order to restore the configuration.")

    file_list = config_backup_path_list(with_uid_gid=True)
    tarball_file_list = []
    version = None
    tmp_dir = None
    try:
        tarball = tarfile.open(infile_name, "r|*", infile_obj)
        while True:
            # next(tarball) does not work in python2.6
            tar_member_info = tarball.next()
            if tar_member_info is None:
                break
            if tar_member_info.name == "version.txt":
                version_data = tarball.extractfile(tar_member_info)
                version = version_data.read()
                version_data.close()
                continue
            tarball_file_list.append(tar_member_info.name)
        tarball.close()

        required_file_list = [
            tar_path for tar_path, path_info in file_list.items()
            if path_info["required"]
        ]
        missing = set(required_file_list) - set(tarball_file_list)
        if missing:
            utils.err(
                "unable to restore the cluster, missing files in backup: %s" %
                ", ".join(missing))

        config_backup_check_version(version)

        if infile_obj:
            infile_obj.seek(0)
        tarball = tarfile.open(infile_name, "r|*", infile_obj)
        while True:
            # next(tarball) does not work in python2.6
            tar_member_info = tarball.next()
            if tar_member_info is None:
                break
            extract_info = None
            path = tar_member_info.name
            while path:
                if path in file_list:
                    extract_info = file_list[path]
                    break
                path = os.path.dirname(path)
            if not extract_info:
                continue
            path_full = None
            if hasattr(extract_info.get("pre_store_call"), "__call__"):
                extract_info["pre_store_call"]()
            if "rename" in extract_info and extract_info["rename"]:
                if tmp_dir is None:
                    tmp_dir = tempfile.mkdtemp()
                tarball.extractall(tmp_dir, [tar_member_info])
                path_full = extract_info["path"]
                shutil.move(os.path.join(tmp_dir, tar_member_info.name),
                            path_full)
            else:
                dir_path = os.path.dirname(extract_info["path"])
                tarball.extractall(dir_path, [tar_member_info])
                path_full = os.path.join(dir_path, tar_member_info.name)
            file_attrs = extract_info["attrs"]
            os.chmod(path_full, file_attrs["mode"])
            os.chown(path_full, file_attrs["uid"], file_attrs["gid"])
        tarball.close()
    except (tarfile.TarError, EnvironmentError, OSError) as e:
        utils.err("unable to restore the cluster: %s" % e)
    finally:
        if tmp_dir:
            shutil.rmtree(tmp_dir, ignore_errors=True)

    try:
        sig_path = os.path.join(settings.cib_dir, "cib.xml.sig")
        if os.path.exists(sig_path):
            os.remove(sig_path)
    except EnvironmentError as e:
        utils.err("unable to remove %s: %s" % (sig_path, e))
Esempio n. 27
0
def cluster_push(argv):
    if len(argv) > 2:
        usage.cluster(["cib-push"])
        sys.exit(1)

    filename = None
    scope = None
    timeout = None
    diff_against = None

    if "--wait" in utils.pcs_options:
        timeout = utils.validate_wait_get_timeout()
    for arg in argv:
        if "=" not in arg:
            filename = arg
        else:
            arg_name, arg_value = arg.split("=", 1)
            if arg_name == "scope":
                if "--config" in utils.pcs_options:
                    utils.err("Cannot use both scope and --config")
                if not utils.is_valid_cib_scope(arg_value):
                    utils.err("invalid CIB scope '%s'" % arg_value)
                else:
                    scope = arg_value
            elif arg_name == "diff-against":
                diff_against = arg_value
            else:
                usage.cluster(["cib-push"])
                sys.exit(1)
    if "--config" in utils.pcs_options:
        scope = "configuration"
    if diff_against and scope:
        utils.err("Cannot use both scope and diff-against")
    if not filename:
        usage.cluster(["cib-push"])
        sys.exit(1)

    try:
        new_cib_dom = xml.dom.minidom.parse(filename)
        if scope and not new_cib_dom.getElementsByTagName(scope):
            utils.err("unable to push cib, scope '%s' not present in new cib" %
                      scope)
    except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
        utils.err("unable to parse new cib: %s" % e)

    if diff_against:
        try:
            xml.dom.minidom.parse(diff_against)
        except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
            utils.err("unable to parse original cib: %s" % e)
        runner = utils.cmd_runner()
        command = [
            "crm_diff", "--original", diff_against, "--new", filename,
            "--no-version"
        ]
        patch, error, dummy_retval = runner.run(command)
        # dummy_retval == 1 means one of two things:
        # a) an error has occured
        # b) --original and --new differ
        # therefore it's of no use to see if an error occurred
        if error.strip():
            utils.err("unable to diff the CIBs:\n" + error)
        if not patch.strip():
            print(
                "The new CIB is the same as the original CIB, nothing to push."
            )
            sys.exit(0)

        command = ["cibadmin", "--patch", "--xml-pipe"]
        output, error, retval = runner.run(command, patch)
        if retval != 0:
            utils.err("unable to push cib\n" + error + output)

    else:
        command = ["cibadmin", "--replace", "--xml-file", filename]
        if scope:
            command.append("--scope=%s" % scope)
        output, retval = utils.run(command)
        if retval != 0:
            utils.err("unable to push cib\n" + output)

    print("CIB updated")

    if "--wait" not in utils.pcs_options:
        return
    cmd = ["crm_resource", "--wait"]
    if timeout:
        cmd.extend(["--timeout", str(timeout)])
    output, retval = utils.run(cmd)
    if retval != 0:
        msg = []
        if retval == settings.pacemaker_wait_timeout_status:
            msg.append("waiting timeout")
        if output:
            msg.append("\n" + output)
        utils.err("\n".join(msg).strip())
Esempio n. 28
0
def status_stonith_check(modifiers):
    """
    Commandline options:
      * -f - CIB file, to get stonith devices and cluster property
        stonith-enabled from CIB, to determine whenever we are working with
        files or cluster
    """
    # pylint: disable=too-many-nested-blocks
    # We should read the default value from pacemaker. However that may slow
    # pcs down as we need to run 'pacemaker-schedulerd metadata' to get it.
    warnings = []
    stonith_enabled = True
    stonith_devices = []
    stonith_devices_id_action = []
    stonith_devices_id_method_cycle = []
    sbd_running = False

    cib = utils.get_cib_dom()
    for conf in cib.getElementsByTagName("configuration"):
        for crm_config in conf.getElementsByTagName("crm_config"):
            for nvpair in crm_config.getElementsByTagName("nvpair"):
                if (nvpair.getAttribute("name") == "stonith-enabled"
                        and is_false(nvpair.getAttribute("value"))):
                    stonith_enabled = False
                    break
            if not stonith_enabled:
                break
        for resource_el in conf.getElementsByTagName("primitive"):
            if resource_el.getAttribute("class") == "stonith":
                stonith_devices.append(resource_el)
                for attribs in resource_el.getElementsByTagName(
                        "instance_attributes"):
                    for nvpair in attribs.getElementsByTagName("nvpair"):
                        if (nvpair.getAttribute("name") == "action"
                                and nvpair.getAttribute("value")):
                            stonith_devices_id_action.append(
                                resource_el.getAttribute("id"))
                        if (nvpair.getAttribute("name") == "method"
                                and nvpair.getAttribute("value") == "cycle"):
                            stonith_devices_id_method_cycle.append(
                                resource_el.getAttribute("id"))

    if not modifiers.is_specified("-f"):
        # check if SBD daemon is running
        try:
            sbd_running = utils.is_service_running(utils.cmd_runner(),
                                                   get_sbd_service_name())
        except LibraryError:
            pass

    if stonith_enabled and not stonith_devices and not sbd_running:
        warnings.append("No stonith devices and stonith-enabled is not false")

    if stonith_devices_id_action:
        warnings.append(
            "Following stonith devices have the 'action' option set, "
            "it is recommended to set {0} instead: {1}".format(
                ", ".join(
                    ["'{0}'".format(x) for x in _STONITH_ACTION_REPLACED_BY]),
                ", ".join(sorted(stonith_devices_id_action))))
    if stonith_devices_id_method_cycle:
        warnings.append(
            "Following stonith devices have the 'method' option set "
            "to 'cycle' which is potentially dangerous, please consider using "
            "'onoff': {0}".format(", ".join(
                sorted(stonith_devices_id_method_cycle))))
    return warnings
Esempio n. 29
0
def full_status(lib, argv, modifiers):
    """
    Options:
      * --hide-inactive - hide inactive resources
      * --full - show full details, node attributes and failcount
      * -f - CIB file, crm_mon accepts CIB_file environment variable
      * --corosync_conf - file corocync.conf
      * --request-timeout - HTTP timeout for node authorization check
    """
    modifiers.ensure_only_supported(
        "--hide-inactive",
        "--full",
        "-f",
        "--corosync_conf",
        "--request-timeout",
    )
    if argv:
        raise CmdLineInputError()
    if (modifiers.is_specified("--hide-inactive")
            and modifiers.is_specified("--full")):
        utils.err("you cannot specify both --hide-inactive and --full")

    monitor_command = [
        os.path.join(settings.pacemaker_binaries, "crm_mon"), "--one-shot"
    ]
    if not modifiers.get("--hide-inactive"):
        monitor_command.append('--inactive')
    if modifiers.get("--full"):
        monitor_command.extend(
            ["--show-detail", "--show-node-attributes", "--failcounts"])
        # by default, pending and failed actions are displayed
        # with --full, we display the whole history
        if is_fence_history_supported():
            monitor_command.append("--fence-history=3")

    stdout, stderr, retval = utils.cmd_runner().run(monitor_command)

    if retval != 0:
        utils.err("cluster is not currently running on this node")

    warnings = []
    if stderr.strip():
        for line in stderr.strip().splitlines():
            if line.startswith("DEBUG: "):
                if modifiers.get("--full"):
                    warnings.append(line)
            else:
                warnings.append(line)
    warnings.extend(status_stonith_check(modifiers))

    print("Cluster name: %s" % utils.getClusterName())
    if warnings:
        print()
        print("WARNINGS:")
        print("\n".join(warnings))
        print()
    print(stdout)

    if modifiers.get("--full"):
        tickets, retval = utils.run(["crm_ticket", "-L"])
        if retval != 0:
            print("WARNING: Unable to get information about tickets")
            print()
        elif tickets:
            print("Tickets:")
            print("\n".join(indent(tickets.split("\n"))))

    if not (modifiers.is_specified("-f")
            or modifiers.is_specified("--corosync_conf")):
        # do this only if in live environment
        if modifiers.get("--full"):
            print_pcsd_daemon_status(lib, modifiers)
            print()
        utils.serviceStatus("  ")
Esempio n. 30
0
def node_pacemaker_status(lib, argv, modifiers):
    print(json.dumps(
        lib_pacemaker.get_local_node_status(utils.cmd_runner())
    ))