コード例 #1
0
ファイル: resource.py プロジェクト: visioncritical/pcs
def _get_nodes_to_validate_against(env, tree):
    if not env.is_corosync_conf_live and env.is_cib_live:
        raise LibraryError(reports.live_environment_required(["COROSYNC_CONF"
                                                              ]))

    if not env.is_cib_live and env.is_corosync_conf_live:
        #we do not try to get corosync.conf from live cluster when cib is not
        #taken from live cluster
        return get_existing_nodes_names_addrs(cib=tree)

    return get_existing_nodes_names_addrs(env.get_corosync_conf(), cib=tree)
コード例 #2
0
ファイル: resource.py プロジェクト: tomjelinek/pcs
def _get_nodes_to_validate_against(env, tree):
    if not env.is_corosync_conf_live and env.is_cib_live:
        raise LibraryError(
            reports.live_environment_required(["COROSYNC_CONF"])
        )

    if not env.is_cib_live and env.is_corosync_conf_live:
        #we do not try to get corosync.conf from live cluster when cib is not
        #taken from live cluster
        return get_existing_nodes_names_addrs(cib=tree)

    return get_existing_nodes_names_addrs(env.get_corosync_conf(), cib=tree)
コード例 #3
0
ファイル: remote_node.py プロジェクト: kmalyjur/pcs
def node_add_guest(
    env: LibraryEnvironment,
    node_name,
    resource_id,
    options,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    wait: WaitType = False,
):
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-statements
    """
    Make a guest node from the specified resource

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- name of the guest node
    string resource_id -- specifies resource that should become a guest node
    dict options -- guest node options (remote-port, remote-addr,
        remote-connect-timeout)
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    wait_timeout = env.ensure_wait_satisfiable(wait)

    report_processor = env.report_processor
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    corosync_conf: Optional[CorosyncConfigFacade]
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            ReportItem.info(
                reports.messages.CorosyncNodeConflictCheckSkipped(
                    reports.const.REASON_NOT_LIVE_CIB, )))
    (
        existing_nodes_names,
        existing_nodes_addrs,
        report_list,
    ) = get_existing_nodes_names_addrs(corosync_conf, cib)
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    existing_target_list = []
    if env.is_cib_live:
        target_factory = env.get_node_target_factory()
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory,
            report_processor,
            existing_nodes_names,
            [node_name],
            skip_offline_nodes,
        )
        new_target = new_target_list[0] if new_target_list else None
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            if new_target:
                new_addr = new_target.first_addr
                new_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                new_addr = node_name
                new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME
            options["remote-addr"] = new_addr
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, new_addr, new_addr_source)))
    else:
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            known_hosts = env.get_known_hosts([node_name])
            if known_hosts:
                new_addr = known_hosts[0].dest.addr
                new_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                new_addr = node_name
                new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME
            options["remote-addr"] = new_addr
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, new_addr, new_addr_source)))

    # validate inputs
    report_list = guest_node.validate_set_as_guest(cib, existing_nodes_names,
                                                   existing_nodes_addrs,
                                                   node_name, options)
    searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib))
    if searcher.element_found():
        resource_element = searcher.get_element()
        report_list.extend(guest_node.validate_is_not_guest(resource_element))
    else:
        report_list.extend(searcher.get_errors())

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    guest_node.set_as_guest(
        resource_element,
        id_provider,
        node_name,
        options.get("remote-addr", None),
        options.get("remote-port", None),
        options.get("remote-connect-timeout", None),
    )

    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib"))

    env.push_cib(wait_timeout=wait_timeout)
    if wait_timeout >= 0:
        _ensure_resource_running(env, resource_id)
コード例 #4
0
ファイル: remote_node.py プロジェクト: kmalyjur/pcs
def node_add_remote(
    env: LibraryEnvironment,
    node_name: str,
    node_addr: Optional[str],
    operations: Iterable[Mapping[str, str]],
    meta_attributes: Mapping[str, str],
    instance_attributes: Mapping[str, str],
    skip_offline_nodes: bool = False,
    allow_incomplete_distribution: bool = False,
    allow_pacemaker_remote_service_fail: bool = False,
    allow_invalid_operation: bool = False,
    allow_invalid_instance_attributes: bool = False,
    use_default_operations: bool = True,
    wait: WaitType = False,
):
    # pylint: disable=too-many-arguments
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-statements
    """
    create an ocf:pacemaker:remote resource and use it as a remote node

    env -- provides all for communication with externals
    node_name -- the name of the new node
    node_addr -- the address of the new node or None for default
    operations -- attributes for each entered operation
    meta_attributes -- attributes for primitive/meta_attributes
    instance_attributes -- attributes for primitive/instance_attributes
    skip_offline_nodes -- if True, ignore when some nodes are offline
    allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    allow_invalid_operation -- if True, allow to use operations that
        are not listed in a resource agent metadata
    allow_invalid_instance_attributes -- if True, allow to use instance
        attributes that are not listed in a resource agent metadata and allow to
        omit required instance_attributes
    use_default_operations -- if True, add operations specified in
        a resource agent metadata to the resource
    wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    wait_timeout = env.ensure_wait_satisfiable(wait)

    report_processor = env.report_processor
    cib = env.get_cib(
        minimal_version=get_required_cib_version_for_primitive(operations))
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf: Optional[CorosyncConfigFacade] = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            ReportItem.info(
                reports.messages.CorosyncNodeConflictCheckSkipped(
                    reports.const.REASON_NOT_LIVE_CIB, )))
    (
        existing_nodes_names,
        existing_nodes_addrs,
        report_list,
    ) = get_existing_nodes_names_addrs(corosync_conf, cib)
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    try:
        resource_agent_facade = ResourceAgentFacadeFactory(
            env.cmd_runner(),
            report_processor).facade_from_parsed_name(remote_node.AGENT_NAME)
    except ResourceAgentError as e:
        report_processor.report(resource_agent_error_to_report_item(e))
        raise LibraryError() from e

    existing_target_list = []
    if env.is_cib_live:
        target_factory = env.get_node_target_factory()
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory,
            report_processor,
            existing_nodes_names,
            [node_name],
            skip_offline_nodes,
        )
        new_target = new_target_list[0] if new_target_list else None
        # default node_addr to an address from known-hosts
        if node_addr is None:
            if new_target:
                node_addr = new_target.first_addr
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                node_addr = node_name
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME)
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, node_addr, node_addr_source)))
    else:
        # default node_addr to an address from known-hosts
        if node_addr is None:
            known_hosts = env.get_known_hosts([node_name])
            if known_hosts:
                node_addr = known_hosts[0].dest.addr
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                node_addr = node_name
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME)
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, node_addr, node_addr_source)))

    # validate inputs
    report_list = remote_node.validate_create(
        existing_nodes_names,
        existing_nodes_addrs,
        resource_agent_facade.metadata,
        node_name,
        node_addr,
        instance_attributes,
    )
    if report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    # validation + cib setup
    # TODO extract the validation to a separate function
    try:
        remote_resource_element = remote_node.create(
            env.report_processor,
            resource_agent_facade,
            get_resources(cib),
            id_provider,
            node_addr,
            node_name,
            operations,
            meta_attributes,
            instance_attributes,
            allow_invalid_operation,
            allow_invalid_instance_attributes,
            use_default_operations,
        )
    except LibraryError as e:
        # Check unique id conflict with check against nodes. Until validation
        # resource create is not separated, we need to make unique post
        # validation.
        already_exists = []
        unified_report_list = []
        for report_item in report_list + list(e.args):
            # pylint: disable=no-member
            dto_obj = report_item.message.to_dto()
            if dto_obj.code not in (
                    reports.codes.ID_ALREADY_EXISTS,
                    reports.codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE,
            ):
                unified_report_list.append(report_item)
            elif ("id" in dto_obj.payload
                  and dto_obj.payload["id"] not in already_exists):
                unified_report_list.append(report_item)
                already_exists.append(dto_obj.payload["id"])
        report_list = unified_report_list

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib"))

    env.push_cib(wait_timeout=wait_timeout)
    if wait_timeout >= 0:
        _ensure_resource_running(env, remote_resource_element.attrib["id"])
コード例 #5
0
def node_add_remote(
    env, node_name, node_addr, operations, meta_attributes, instance_attributes,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    allow_invalid_operation=False,
    allow_invalid_instance_attributes=False,
    use_default_operations=True,
    wait=False,
):
    # pylint: disable=too-many-arguments, too-many-branches, too-many-locals
    """
    create an ocf:pacemaker:remote resource and use it as a remote node

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- the name of the new node
    mixed node_addr -- the address of the new node or None for default
    list of dict operations -- attributes for each entered operation
    dict meta_attributes -- attributes for primitive/meta_attributes
    dict instance_attributes -- attributes for primitive/instance_attributes
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    bool allow_invalid_operation -- if True, allow to use operations that
        are not listed in a resource agent metadata
    bool allow_invalid_instance_attributes -- if True, allow to use instance
        attributes that are not listed in a resource agent metadata and allow to
        omit required instance_attributes
    bool use_default_operations -- if True, add operations specified in
        a resource agent metadata to the resource
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    env.ensure_wait_satisfiable(wait)

    report_processor = env.report_processor
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            reports.corosync_node_conflict_check_skipped("not_live_cib")
        )
    existing_nodes_names, existing_nodes_addrs, report_list = (
        get_existing_nodes_names_addrs(corosync_conf, cib)
    )
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    resource_agent = remote_node.get_agent(
        env.report_processor,
        env.cmd_runner()
    )

    existing_target_list = []
    if env.is_cib_live:
        target_factory = env.get_node_target_factory()
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory, report_processor, existing_nodes_names, [node_name],
            skip_offline_nodes
        )
        new_target = new_target_list[0] if new_target_list else None
        # default node_addr to an address from known-hosts
        if node_addr is None:
            node_addr = new_target.first_addr if new_target else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )
    else:
        # default node_addr to an address from known-hosts
        if node_addr is None:
            known_hosts = env.get_known_hosts([node_name])
            node_addr = known_hosts[0].dest.addr if known_hosts else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )

    # validate inputs
    report_list = remote_node.validate_create(
        existing_nodes_names,
        existing_nodes_addrs,
        resource_agent,
        node_name,
        node_addr,
        instance_attributes
    )
    if report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    # validation + cib setup
    # TODO extract the validation to a separate function
    try:
        remote_resource_element = remote_node.create(
            env.report_processor,
            resource_agent,
            get_resources(cib),
            id_provider,
            node_addr,
            node_name,
            operations,
            meta_attributes,
            instance_attributes,
            allow_invalid_operation,
            allow_invalid_instance_attributes,
            use_default_operations,
        )
    except LibraryError as e:
        #Check unique id conflict with check against nodes. Until validation
        #resource create is not separated, we need to make unique post
        #validation.
        already_exists = []
        unified_report_list = []
        for report in report_list + list(e.args):
            if report.code not in (
                report_codes.ID_ALREADY_EXISTS,
                report_codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE,
            ):
                unified_report_list.append(report)
            elif (
                "id" in report.info
                and
                report.info["id"] not in already_exists
            ):
                unified_report_list.append(report)
                already_exists.append(report.info["id"])
        report_list = unified_report_list

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib")
        )

    env.push_cib(wait=wait)
    if wait:
        _ensure_resource_running(env, remote_resource_element.attrib["id"])
コード例 #6
0
ファイル: remote_node.py プロジェクト: tomjelinek/pcs
def node_add_guest(
    env, node_name, resource_id, options,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    wait=False,
):
    # pylint: disable=too-many-locals
    """
    Make a guest node from the specified resource

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- name of the guest node
    string resource_id -- specifies resource that should become a guest node
    dict options -- guest node options (remote-port, remote-addr,
        remote-connect-timeout)
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    env.ensure_wait_satisfiable(wait)

    report_processor = SimpleReportProcessor(env.report_processor)
    target_factory = env.get_node_target_factory()
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            reports.corosync_node_conflict_check_skipped("not_live_cib")
        )
    existing_nodes_names, existing_nodes_addrs, report_list = (
        get_existing_nodes_names_addrs(corosync_conf, cib)
    )
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    existing_target_list = []
    if env.is_cib_live:
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory, report_processor, existing_nodes_names, [node_name],
            skip_offline_nodes
        )
        new_target = new_target_list[0] if new_target_list else None
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            new_addr = new_target.first_addr if new_target else node_name
            options["remote-addr"] = new_addr
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, new_addr)
            )
    else:
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            known_hosts = env.get_known_hosts([node_name])
            new_addr = known_hosts[0].dest.addr if known_hosts else node_name
            options["remote-addr"] = new_addr
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, new_addr)
            )

    # validate inputs
    report_list = guest_node.validate_set_as_guest(
        cib,
        existing_nodes_names,
        existing_nodes_addrs,
        node_name,
        options
    )
    searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib))
    if searcher.element_found():
        resource_element = searcher.get_element()
        report_list.extend(guest_node.validate_is_not_guest(resource_element))
    else:
        report_list.extend(searcher.get_errors())

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    guest_node.set_as_guest(
        resource_element,
        id_provider,
        node_name,
        options.get("remote-addr", None),
        options.get("remote-port", None),
        options.get("remote-connect-timeout", None),
    )

    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib")
        )

    env.push_cib(wait=wait)
    if wait:
        _ensure_resource_running(env, resource_id)
コード例 #7
0
ファイル: remote_node.py プロジェクト: tomjelinek/pcs
def node_add_remote(
    env, node_name, node_addr, operations, meta_attributes, instance_attributes,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    allow_invalid_operation=False,
    allow_invalid_instance_attributes=False,
    use_default_operations=True,
    wait=False,
):
    # pylint: disable=too-many-arguments, too-many-branches, too-many-locals
    """
    create an ocf:pacemaker:remote resource and use it as a remote node

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- the name of the new node
    mixed node_addr -- the address of the new node or None for default
    list of dict operations -- attributes for each entered operation
    dict meta_attributes -- attributes for primitive/meta_attributes
    dict instance_attributes -- attributes for primitive/instance_attributes
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    bool allow_invalid_operation -- if True, allow to use operations that
        are not listed in a resource agent metadata
    bool allow_invalid_instance_attributes -- if True, allow to use instance
        attributes that are not listed in a resource agent metadata and allow to
        omit required instance_attributes
    bool use_default_operations -- if True, add operations specified in
        a resource agent metadata to the resource
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    env.ensure_wait_satisfiable(wait)

    report_processor = SimpleReportProcessor(env.report_processor)
    target_factory = env.get_node_target_factory()
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            reports.corosync_node_conflict_check_skipped("not_live_cib")
        )
    existing_nodes_names, existing_nodes_addrs, report_list = (
        get_existing_nodes_names_addrs(corosync_conf, cib)
    )
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    resource_agent = remote_node.get_agent(
        env.report_processor,
        env.cmd_runner()
    )

    existing_target_list = []
    if env.is_cib_live:
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory, report_processor, existing_nodes_names, [node_name],
            skip_offline_nodes
        )
        new_target = new_target_list[0] if new_target_list else None
        # default node_addr to an address from known-hosts
        if node_addr is None:
            node_addr = new_target.first_addr if new_target else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )
    else:
        # default node_addr to an address from known-hosts
        if node_addr is None:
            known_hosts = env.get_known_hosts([node_name])
            node_addr = known_hosts[0].dest.addr if known_hosts else node_name
            report_processor.report(
                reports.using_known_host_address_for_host(node_name, node_addr)
            )

    # validate inputs
    report_list = remote_node.validate_create(
        existing_nodes_names,
        existing_nodes_addrs,
        resource_agent,
        node_name,
        node_addr,
        instance_attributes
    )
    # validation + cib setup
    # TODO extract the validation to a separate function
    try:
        remote_resource_element = remote_node.create(
            env.report_processor,
            resource_agent,
            get_resources(cib),
            id_provider,
            node_addr,
            node_name,
            operations,
            meta_attributes,
            instance_attributes,
            allow_invalid_operation,
            allow_invalid_instance_attributes,
            use_default_operations,
        )
    except LibraryError as e:
        #Check unique id conflict with check against nodes. Until validation
        #resource create is not separated, we need to make unique post
        #validation.
        already_exists = []
        unified_report_list = []
        for report in report_list + list(e.args):
            if report.code not in (
                report_codes.ID_ALREADY_EXISTS,
                report_codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE,
            ):
                unified_report_list.append(report)
            elif (
                "id" in report.info
                and
                report.info["id"] not in already_exists
            ):
                unified_report_list.append(report)
                already_exists.append(report.info["id"])
        report_list = unified_report_list

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib")
        )

    env.push_cib(wait=wait)
    if wait:
        _ensure_resource_running(env, remote_resource_element.attrib["id"])