Пример #1
0
def node_clear(env, node_name, allow_clear_cluster_node=False):
    """
    Remove specified node from various cluster caches.

    LibraryEnvironment env provides all for communication with externals
    string node_name
    bool allow_clear_cluster_node -- flag allows to clear node even if it's
        still in a cluster
    """
    mocked_envs = []
    if not env.is_cib_live:
        mocked_envs.append("CIB")
    if not env.is_corosync_conf_live:
        mocked_envs.append("COROSYNC_CONF")
    if mocked_envs:
        raise LibraryError(reports.live_environment_required(mocked_envs))

    current_nodes = get_existing_nodes_names(env.get_corosync_conf(),
                                             env.get_cib())
    if node_name in current_nodes:
        env.report_processor.process(
            reports.get_problem_creator(
                report_codes.FORCE_CLEAR_CLUSTER_NODE,
                allow_clear_cluster_node)(
                    reports.node_to_clear_is_still_in_cluster, node_name))

    remove_node(env.cmd_runner(), node_name)
Пример #2
0
def location_add(lib, argv, modifiers):
    """
    Options:
      * --force - allow unknown options, allow constraint for any resource type
      * -f - CIB file
    """
    del lib
    modifiers.ensure_only_supported("--force", "-f")
    if len(argv) < 4:
        raise CmdLineInputError()

    constraint_id = argv.pop(0)
    rsc_type, rsc_value = parse_args.parse_typed_arg(
        argv.pop(0), [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP],
        RESOURCE_TYPE_RESOURCE)
    node = argv.pop(0)
    score = argv.pop(0)
    options = []
    # For now we only allow setting resource-discovery
    if argv:
        for arg in argv:
            if '=' in arg:
                options.append(arg.split('=', 1))
            else:
                raise CmdLineInputError(f"bad option '{arg}'")
            if (options[-1][0] != "resource-discovery"
                    and not modifiers.get("--force")):
                utils.err("bad option '%s', use --force to override" %
                          options[-1][0])

    id_valid, id_error = utils.validate_xml_id(constraint_id, 'constraint id')
    if not id_valid:
        utils.err(id_error)

    if not utils.is_score(score):
        utils.err("invalid score '%s', use integer or INFINITY or -INFINITY" %
                  score)

    required_version = None
    if [x for x in options if x[0] == "resource-discovery"]:
        required_version = 2, 2, 0
    if rsc_type == RESOURCE_TYPE_REGEXP:
        required_version = 2, 6, 0

    if required_version:
        dom = utils.cluster_upgrade_to_version(required_version)
    else:
        dom = utils.get_cib_dom()

    if rsc_type == RESOURCE_TYPE_RESOURCE:
        rsc_valid, rsc_error, dummy_correct_id = (
            utils.validate_constraint_resource(dom, rsc_value))
        if not rsc_valid:
            utils.err(rsc_error)

    # Verify that specified node exists in the cluster
    if not (modifiers.is_specified("-f") or modifiers.get("--force")):
        lib_env = utils.get_lib_env()
        existing_nodes = get_existing_nodes_names(
            corosync_conf=lib_env.get_corosync_conf(),
            cib=lib_env.get_cib(),
        )
        if node not in existing_nodes:
            raise error(f"Node '{node}' does not seem to be in the cluster"
                        ", use --force to override")
    else:
        warn(LOCATION_NODE_VALIDATION_SKIP_MSG)

    # Verify current constraint doesn't already exist
    # If it does we replace it with the new constraint
    dummy_dom, constraintsElement = getCurrentConstraints(dom)
    elementsToRemove = []
    # If the id matches, or the rsc & node match, then we replace/remove
    for rsc_loc in constraintsElement.getElementsByTagName('rsc_location'):
        # pylint: disable=too-many-boolean-expressions
        if (rsc_loc.getAttribute("id") == constraint_id
                or (rsc_loc.getAttribute("node") == node and
                    ((RESOURCE_TYPE_RESOURCE == rsc_type
                      and rsc_loc.getAttribute("rsc") == rsc_value) or
                     (RESOURCE_TYPE_REGEXP == rsc_type
                      and rsc_loc.getAttribute("rsc-pattern") == rsc_value)))):
            elementsToRemove.append(rsc_loc)
    for etr in elementsToRemove:
        constraintsElement.removeChild(etr)

    element = dom.createElement("rsc_location")
    element.setAttribute("id", constraint_id)
    if rsc_type == RESOURCE_TYPE_RESOURCE:
        element.setAttribute("rsc", rsc_value)
    elif rsc_type == RESOURCE_TYPE_REGEXP:
        element.setAttribute("rsc-pattern", rsc_value)
    element.setAttribute("node", node)
    element.setAttribute("score", score)
    for option in options:
        element.setAttribute(option[0], option[1])
    constraintsElement.appendChild(element)

    utils.replace_cib_configuration(dom)
Пример #3
0
def cluster_destroy(argv):
    if argv:
        raise CmdLineInputError()
    if "--all" in utils.pcs_options:
        # destroy remote and guest nodes
        cib = None
        lib_env = utils.get_lib_env()
        try:
            cib = lib_env.get_cib()
        except LibraryError as e:
            warn("Unable to load CIB to get guest and remote nodes from it, "
                 "those nodes will not be deconfigured.")
        if cib is not None:
            try:
                all_remote_nodes = get_existing_nodes_names(cib=cib)
                if len(all_remote_nodes) > 0:
                    _destroy_pcmk_remote_env(lib_env,
                                             all_remote_nodes,
                                             skip_offline_nodes=True,
                                             allow_fails=True)
            except LibraryError as e:
                utils.process_library_reports(e.args)

        # destroy full-stack nodes
        destroy_cluster(utils.get_corosync_conf_facade().get_nodes_names())
    else:
        print("Shutting down pacemaker/corosync services...")
        for service in ["pacemaker", "corosync-qdevice", "corosync"]:
            # Returns an error if a service is not running. It is safe to
            # ignore it since we want it not to be running anyways.
            utils.stop_service(service)
        print("Killing any remaining services...")
        kill_local_cluster_services()
        try:
            utils.disableServices()
        except:
            # previously errors were suppressed in here, let's keep it that way
            # for now
            pass
        try:
            disable_service(utils.cmd_runner(), lib_sbd.get_sbd_service_name())
        except:
            # it's not a big deal if sbd disable fails
            pass

        print("Removing all cluster configuration files...")
        dummy_output, dummy_retval = utils.run([
            "rm",
            "-f",
            settings.corosync_conf_file,
            settings.corosync_authkey_file,
            settings.pacemaker_authkey_file,
        ])
        state_files = [
            "cib.xml*", "cib-*", "core.*", "hostcache", "cts.*", "pe*.bz2",
            "cib.*"
        ]
        for name in state_files:
            dummy_output, dummy_retval = utils.run([
                "find", "/var/lib/pacemaker", "-name", name, "-exec", "rm",
                "-f", "{}", ";"
            ])
        try:
            qdevice_net.client_destroy()
        except:
            # errors from deleting other files are suppressed as well
            # we do not want to fail if qdevice was not set up
            pass