예제 #1
0
def node_add_guest(
    env, node_name, resource_id, options,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    wait=False,
):
    # pylint: disable=too-many-locals
    """
    Make a guest node from the specified resource

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- name of the guest node
    string resource_id -- specifies resource that should become a guest node
    dict options -- guest node options (remote-port, remote-addr,
        remote-connect-timeout)
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    env.ensure_wait_satisfiable(wait)

    report_processor = env.report_processor
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            reports.corosync_node_conflict_check_skipped("not_live_cib")
        )
    existing_nodes_names, existing_nodes_addrs, report_list = (
        get_existing_nodes_names_addrs(corosync_conf, cib)
    )
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    existing_target_list = []
    if env.is_cib_live:
        target_factory = env.get_node_target_factory()
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory, report_processor, existing_nodes_names, [node_name],
            skip_offline_nodes
        )
        new_target = new_target_list[0] if new_target_list else None
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            new_addr = new_target.first_addr if new_target else node_name
            options["remote-addr"] = new_addr
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, new_addr)
            )
    else:
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            known_hosts = env.get_known_hosts([node_name])
            new_addr = known_hosts[0].dest.addr if known_hosts else node_name
            options["remote-addr"] = new_addr
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, new_addr)
            )

    # validate inputs
    report_list = guest_node.validate_set_as_guest(
        cib,
        existing_nodes_names,
        existing_nodes_addrs,
        node_name,
        options
    )
    searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib))
    if searcher.element_found():
        resource_element = searcher.get_element()
        report_list.extend(guest_node.validate_is_not_guest(resource_element))
    else:
        report_list.extend(searcher.get_errors())

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    guest_node.set_as_guest(
        resource_element,
        id_provider,
        node_name,
        options.get("remote-addr", None),
        options.get("remote-port", None),
        options.get("remote-connect-timeout", None),
    )

    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib")
        )

    env.push_cib(wait=wait)
    if wait:
        _ensure_resource_running(env, resource_id)
예제 #2
0
def node_add_guest(
    env, node_name, resource_id, options,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    wait=False,
):
    # pylint: disable=too-many-locals
    """
    Make a guest node from the specified resource

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- name of the guest node
    string resource_id -- specifies resource that should become a guest node
    dict options -- guest node options (remote-port, remote-addr,
        remote-connect-timeout)
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    env.ensure_wait_satisfiable(wait)

    report_processor = SimpleReportProcessor(env.report_processor)
    target_factory = env.get_node_target_factory()
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            reports.corosync_node_conflict_check_skipped("not_live_cib")
        )
    existing_nodes_names, existing_nodes_addrs, report_list = (
        get_existing_nodes_names_addrs(corosync_conf, cib)
    )
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    existing_target_list = []
    if env.is_cib_live:
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory, report_processor, existing_nodes_names, [node_name],
            skip_offline_nodes
        )
        new_target = new_target_list[0] if new_target_list else None
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            new_addr = new_target.first_addr if new_target else node_name
            options["remote-addr"] = new_addr
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, new_addr)
            )
    else:
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            known_hosts = env.get_known_hosts([node_name])
            new_addr = known_hosts[0].dest.addr if known_hosts else node_name
            options["remote-addr"] = new_addr
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, new_addr)
            )

    # validate inputs
    report_list = guest_node.validate_set_as_guest(
        cib,
        existing_nodes_names,
        existing_nodes_addrs,
        node_name,
        options
    )
    searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib))
    if searcher.element_found():
        resource_element = searcher.get_element()
        report_list.extend(guest_node.validate_is_not_guest(resource_element))
    else:
        report_list.extend(searcher.get_errors())

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    guest_node.set_as_guest(
        resource_element,
        id_provider,
        node_name,
        options.get("remote-addr", None),
        options.get("remote-port", None),
        options.get("remote-connect-timeout", None),
    )

    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib")
        )

    env.push_cib(wait=wait)
    if wait:
        _ensure_resource_running(env, resource_id)
예제 #3
0
def node_add_remote(
    env, node_name, node_addr, operations, meta_attributes, instance_attributes,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    allow_invalid_operation=False,
    allow_invalid_instance_attributes=False,
    use_default_operations=True,
    wait=False,
):
    # pylint: disable=too-many-arguments, too-many-branches, too-many-locals
    """
    create an ocf:pacemaker:remote resource and use it as a remote node

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- the name of the new node
    mixed node_addr -- the address of the new node or None for default
    list of dict operations -- attributes for each entered operation
    dict meta_attributes -- attributes for primitive/meta_attributes
    dict instance_attributes -- attributes for primitive/instance_attributes
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    bool allow_invalid_operation -- if True, allow to use operations that
        are not listed in a resource agent metadata
    bool allow_invalid_instance_attributes -- if True, allow to use instance
        attributes that are not listed in a resource agent metadata and allow to
        omit required instance_attributes
    bool use_default_operations -- if True, add operations specified in
        a resource agent metadata to the resource
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    env.ensure_wait_satisfiable(wait)

    report_processor = env.report_processor
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            reports.corosync_node_conflict_check_skipped("not_live_cib")
        )
    existing_nodes_names, existing_nodes_addrs, report_list = (
        get_existing_nodes_names_addrs(corosync_conf, cib)
    )
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    resource_agent = remote_node.get_agent(
        env.report_processor,
        env.cmd_runner()
    )

    existing_target_list = []
    if env.is_cib_live:
        target_factory = env.get_node_target_factory()
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory, report_processor, existing_nodes_names, [node_name],
            skip_offline_nodes
        )
        new_target = new_target_list[0] if new_target_list else None
        # default node_addr to an address from known-hosts
        if node_addr is None:
            node_addr = new_target.first_addr if new_target else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )
    else:
        # default node_addr to an address from known-hosts
        if node_addr is None:
            known_hosts = env.get_known_hosts([node_name])
            node_addr = known_hosts[0].dest.addr if known_hosts else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )

    # validate inputs
    report_list = remote_node.validate_create(
        existing_nodes_names,
        existing_nodes_addrs,
        resource_agent,
        node_name,
        node_addr,
        instance_attributes
    )
    if report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    # validation + cib setup
    # TODO extract the validation to a separate function
    try:
        remote_resource_element = remote_node.create(
            env.report_processor,
            resource_agent,
            get_resources(cib),
            id_provider,
            node_addr,
            node_name,
            operations,
            meta_attributes,
            instance_attributes,
            allow_invalid_operation,
            allow_invalid_instance_attributes,
            use_default_operations,
        )
    except LibraryError as e:
        #Check unique id conflict with check against nodes. Until validation
        #resource create is not separated, we need to make unique post
        #validation.
        already_exists = []
        unified_report_list = []
        for report in report_list + list(e.args):
            if report.code not in (
                report_codes.ID_ALREADY_EXISTS,
                report_codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE,
            ):
                unified_report_list.append(report)
            elif (
                "id" in report.info
                and
                report.info["id"] not in already_exists
            ):
                unified_report_list.append(report)
                already_exists.append(report.info["id"])
        report_list = unified_report_list

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib")
        )

    env.push_cib(wait=wait)
    if wait:
        _ensure_resource_running(env, remote_resource_element.attrib["id"])
예제 #4
0
def node_add_remote(
    env, node_name, node_addr, operations, meta_attributes, instance_attributes,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    allow_invalid_operation=False,
    allow_invalid_instance_attributes=False,
    use_default_operations=True,
    wait=False,
):
    # pylint: disable=too-many-arguments, too-many-branches, too-many-locals
    """
    create an ocf:pacemaker:remote resource and use it as a remote node

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- the name of the new node
    mixed node_addr -- the address of the new node or None for default
    list of dict operations -- attributes for each entered operation
    dict meta_attributes -- attributes for primitive/meta_attributes
    dict instance_attributes -- attributes for primitive/instance_attributes
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    bool allow_invalid_operation -- if True, allow to use operations that
        are not listed in a resource agent metadata
    bool allow_invalid_instance_attributes -- if True, allow to use instance
        attributes that are not listed in a resource agent metadata and allow to
        omit required instance_attributes
    bool use_default_operations -- if True, add operations specified in
        a resource agent metadata to the resource
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    env.ensure_wait_satisfiable(wait)

    report_processor = SimpleReportProcessor(env.report_processor)
    target_factory = env.get_node_target_factory()
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            reports.corosync_node_conflict_check_skipped("not_live_cib")
        )
    existing_nodes_names, existing_nodes_addrs, report_list = (
        get_existing_nodes_names_addrs(corosync_conf, cib)
    )
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    resource_agent = remote_node.get_agent(
        env.report_processor,
        env.cmd_runner()
    )

    existing_target_list = []
    if env.is_cib_live:
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory, report_processor, existing_nodes_names, [node_name],
            skip_offline_nodes
        )
        new_target = new_target_list[0] if new_target_list else None
        # default node_addr to an address from known-hosts
        if node_addr is None:
            node_addr = new_target.first_addr if new_target else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )
    else:
        # default node_addr to an address from known-hosts
        if node_addr is None:
            known_hosts = env.get_known_hosts([node_name])
            node_addr = known_hosts[0].dest.addr if known_hosts else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )

    # validate inputs
    report_list = remote_node.validate_create(
        existing_nodes_names,
        existing_nodes_addrs,
        resource_agent,
        node_name,
        node_addr,
        instance_attributes
    )
    # validation + cib setup
    # TODO extract the validation to a separate function
    try:
        remote_resource_element = remote_node.create(
            env.report_processor,
            resource_agent,
            get_resources(cib),
            id_provider,
            node_addr,
            node_name,
            operations,
            meta_attributes,
            instance_attributes,
            allow_invalid_operation,
            allow_invalid_instance_attributes,
            use_default_operations,
        )
    except LibraryError as e:
        #Check unique id conflict with check against nodes. Until validation
        #resource create is not separated, we need to make unique post
        #validation.
        already_exists = []
        unified_report_list = []
        for report in report_list + list(e.args):
            if report.code not in (
                report_codes.ID_ALREADY_EXISTS,
                report_codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE,
            ):
                unified_report_list.append(report)
            elif (
                "id" in report.info
                and
                report.info["id"] not in already_exists
            ):
                unified_report_list.append(report)
                already_exists.append(report.info["id"])
        report_list = unified_report_list

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib")
        )

    env.push_cib(wait=wait)
    if wait:
        _ensure_resource_running(env, remote_resource_element.attrib["id"])