def node_add_remote( env: LibraryEnvironment, node_name: str, node_addr: Optional[str], operations: Iterable[Mapping[str, str]], meta_attributes: Mapping[str, str], instance_attributes: Mapping[str, str], skip_offline_nodes: bool = False, allow_incomplete_distribution: bool = False, allow_pacemaker_remote_service_fail: bool = False, allow_invalid_operation: bool = False, allow_invalid_instance_attributes: bool = False, use_default_operations: bool = True, wait: WaitType = False, ): # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements """ create an ocf:pacemaker:remote resource and use it as a remote node env -- provides all for communication with externals node_name -- the name of the new node node_addr -- the address of the new node or None for default operations -- attributes for each entered operation meta_attributes -- attributes for primitive/meta_attributes instance_attributes -- attributes for primitive/instance_attributes skip_offline_nodes -- if True, ignore when some nodes are offline allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed allow_invalid_operation -- if True, allow to use operations that are not listed in a resource agent metadata allow_invalid_instance_attributes -- if True, allow to use instance attributes that are not listed in a resource agent metadata and allow to omit required instance_attributes use_default_operations -- if True, add operations specified in a resource agent metadata to the resource wait -- a flag for controlling waiting for pacemaker idle mechanism """ env.ensure_wait_satisfiable(wait) report_processor = env.report_processor cib = env.get_cib( minimal_version=get_required_cib_version_for_primitive(operations)) id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf: Optional[CorosyncConfigFacade] = env.get_corosync_conf() else: corosync_conf = None report_processor.report( ReportItem.info( reports.messages.CorosyncNodeConflictCheckSkipped( reports.const.REASON_NOT_LIVE_CIB, ))) ( existing_nodes_names, existing_nodes_addrs, report_list, ) = get_existing_nodes_names_addrs(corosync_conf, cib) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) resource_agent = remote_node.get_agent(env.report_processor, env.cmd_runner()) existing_target_list = [] if env.is_cib_live: target_factory = env.get_node_target_factory() existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes, ) new_target = new_target_list[0] if new_target_list else None # default node_addr to an address from known-hosts if node_addr is None: if new_target: node_addr = new_target.first_addr node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS) else: node_addr = node_name node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME) report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, node_addr, node_addr_source))) else: # default node_addr to an address from known-hosts if node_addr is None: known_hosts = env.get_known_hosts([node_name]) if known_hosts: node_addr = known_hosts[0].dest.addr node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS) else: node_addr = node_name node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME) report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, node_addr, node_addr_source))) # validate inputs report_list = remote_node.validate_create( existing_nodes_names, existing_nodes_addrs, resource_agent, node_name, node_addr, instance_attributes, ) if report_processor.report_list(report_list).has_errors: raise LibraryError() # validation + cib setup # TODO extract the validation to a separate function try: remote_resource_element = remote_node.create( env.report_processor, resource_agent, get_resources(cib), id_provider, node_addr, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: # Check unique id conflict with check against nodes. Until validation # resource create is not separated, we need to make unique post # validation. already_exists = [] unified_report_list = [] for report_item in report_list + list(e.args): # pylint: disable=no-member dto_obj = report_item.message.to_dto() if dto_obj.code not in ( reports.codes.ID_ALREADY_EXISTS, reports.codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE, ): unified_report_list.append(report_item) elif ("id" in dto_obj.payload and dto_obj.payload["id"] not in already_exists): unified_report_list.append(report_item) already_exists.append(dto_obj.payload["id"]) report_list = unified_report_list report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib")) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, remote_resource_element.attrib["id"])
def node_add_remote( env, host, node_name, operations, meta_attributes, instance_attributes, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, allow_invalid_operation=False, allow_invalid_instance_attributes=False, use_default_operations=True, wait=False, ): """ create an ocf:pacemaker:remote resource and use it as a remote node LibraryEnvironment env -- provides all for communication with externals list of dict operations -- attributes for each entered operation dict meta_attributes -- attributes for primitive/meta_attributes dict instance_attributes -- attributes for primitive/instance_attributes bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed bool allow_invalid_operation -- if True, allow to use operations that are not listed in a resource agent metadata bool allow_invalid_instance_attributes -- if True, allow to use instance attributes that are not listed in a resource agent metadata and allow to omit required instance_attributes bool use_default_operations -- if True, add operations specified in a resource agent metadata to the resource mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ # TODO # * make the node name mandatory and the node address optional # * in this function interface and comment # * in cli - do not fill the addr if not specified # * in usage and man page # * get a target factory from lib.env # * use the factory to turn node names to targets # * this will create reports for unknown node names (not authenticated) # * if the node addr is not specified, use the first addr from the matching # target # * pass the target to communication functions instead of the node name # * do not create targets again and again in each function _ensure_consistently_live_env(env) env.ensure_wait_satisfiable(wait) cib = env.get_cib() existing_nodes_names, existing_nodes_addrs = get_existing_nodes_names_addrs( env.get_corosync_conf(), cib) resource_agent = remote_node.get_agent(env.report_processor, env.cmd_runner()) report_list = remote_node.validate_create(existing_nodes_names, existing_nodes_addrs, resource_agent, node_name, host, instance_attributes) try: remote_resource_element = remote_node.create( env.report_processor, resource_agent, get_resources(cib), host, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: #Check unique id conflict with check against nodes. Until validation #resource create is not separated, we need to make unique post #validation. already_exists = [] unified_report_list = [] for report in report_list + list(e.args): if report.code != report_codes.ID_ALREADY_EXISTS: unified_report_list.append(report) elif report.info["id"] not in already_exists: unified_report_list.append(report) already_exists.append(report.info["id"]) report_list = unified_report_list env.report_processor.process_list(report_list) _prepare_pacemaker_remote_environment( env, existing_nodes_names, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, remote_resource_element.attrib["id"])
def node_add_remote( env, host, node_name, operations, meta_attributes, instance_attributes, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, allow_invalid_operation=False, allow_invalid_instance_attributes=False, use_default_operations=True, wait=False, ): """ create resource ocf:pacemaker:remote and use it as remote node LibraryEnvironment env provides all for communication with externals list of dict operations contains attributes for each entered operation dict meta_attributes contains attributes for primitive/meta_attributes dict instance_attributes contains attributes for primitive/instance_attributes bool allow_incomplete_distribution -- is a flag for allowing successfully finish this command even if is file distribution not succeeded bool allow_pacemaker_remote_service_fail -- is a flag for allowing successfully finish this command even if starting/enabling pacemaker_remote not succeeded bool allow_invalid_operation is a flag for allowing to use operations that are not listed in a resource agent metadata bool allow_invalid_instance_attributes is a flag for allowing to use instance attributes that are not listed in a resource agent metadata or for allowing to not use the instance_attributes that are required in resource agent metadata bool use_default_operations is a flag for stopping stopping of adding default cib operations (specified in a resource agent) mixed wait is flag for controlling waiting for pacemaker iddle mechanism """ _ensure_consistently_live_env(env) env.ensure_wait_satisfiable(wait) cib = env.get_cib() current_nodes = get_nodes(env.get_corosync_conf(), cib) resource_agent = remote_node.get_agent(env.report_processor, env.cmd_runner()) report_list = remote_node.validate_create(current_nodes, resource_agent, host, node_name, instance_attributes) try: remote_resource_element = remote_node.create( env.report_processor, resource_agent, get_resources(cib), host, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: #Check unique id conflict with check against nodes. Until validation #resource create is not separated, we need to make unique post #validation. already_exists = [] unified_report_list = [] for report in report_list + list(e.args): if report.code != report_codes.ID_ALREADY_EXISTS: unified_report_list.append(report) elif report.info["id"] not in already_exists: unified_report_list.append(report) already_exists.append(report.info["id"]) report_list = unified_report_list env.report_processor.process_list(report_list) _prepare_pacemaker_remote_environment( env, current_nodes, host, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) env.push_cib(cib, wait) if wait: _ensure_resource_running(env, remote_resource_element.attrib["id"])
def node_add_remote( env, node_name, node_addr, operations, meta_attributes, instance_attributes, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, allow_invalid_operation=False, allow_invalid_instance_attributes=False, use_default_operations=True, wait=False, ): # pylint: disable=too-many-arguments, too-many-branches, too-many-locals """ create an ocf:pacemaker:remote resource and use it as a remote node LibraryEnvironment env -- provides all for communication with externals string node_name -- the name of the new node mixed node_addr -- the address of the new node or None for default list of dict operations -- attributes for each entered operation dict meta_attributes -- attributes for primitive/meta_attributes dict instance_attributes -- attributes for primitive/instance_attributes bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed bool allow_invalid_operation -- if True, allow to use operations that are not listed in a resource agent metadata bool allow_invalid_instance_attributes -- if True, allow to use instance attributes that are not listed in a resource agent metadata and allow to omit required instance_attributes bool use_default_operations -- if True, add operations specified in a resource agent metadata to the resource mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ env.ensure_wait_satisfiable(wait) report_processor = env.report_processor cib = env.get_cib() id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( reports.corosync_node_conflict_check_skipped("not_live_cib") ) existing_nodes_names, existing_nodes_addrs, report_list = ( get_existing_nodes_names_addrs(corosync_conf, cib) ) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) resource_agent = remote_node.get_agent( env.report_processor, env.cmd_runner() ) existing_target_list = [] if env.is_cib_live: target_factory = env.get_node_target_factory() existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes ) new_target = new_target_list[0] if new_target_list else None # default node_addr to an address from known-hosts if node_addr is None: node_addr = new_target.first_addr if new_target else node_name report_processor.report( reports.using_known_host_address_for_host(node_name, node_addr) ) else: # default node_addr to an address from known-hosts if node_addr is None: known_hosts = env.get_known_hosts([node_name]) node_addr = known_hosts[0].dest.addr if known_hosts else node_name report_processor.report( reports.using_known_host_address_for_host(node_name, node_addr) ) # validate inputs report_list = remote_node.validate_create( existing_nodes_names, existing_nodes_addrs, resource_agent, node_name, node_addr, instance_attributes ) if report_processor.report_list(report_list).has_errors: raise LibraryError() # validation + cib setup # TODO extract the validation to a separate function try: remote_resource_element = remote_node.create( env.report_processor, resource_agent, get_resources(cib), id_provider, node_addr, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: #Check unique id conflict with check against nodes. Until validation #resource create is not separated, we need to make unique post #validation. already_exists = [] unified_report_list = [] for report in report_list + list(e.args): if report.code not in ( report_codes.ID_ALREADY_EXISTS, report_codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE, ): unified_report_list.append(report) elif ( "id" in report.info and report.info["id"] not in already_exists ): unified_report_list.append(report) already_exists.append(report.info["id"]) report_list = unified_report_list report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib") ) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, remote_resource_element.attrib["id"])
def node_add_remote( env, node_name, node_addr, operations, meta_attributes, instance_attributes, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, allow_invalid_operation=False, allow_invalid_instance_attributes=False, use_default_operations=True, wait=False, ): # pylint: disable=too-many-arguments, too-many-branches, too-many-locals """ create an ocf:pacemaker:remote resource and use it as a remote node LibraryEnvironment env -- provides all for communication with externals string node_name -- the name of the new node mixed node_addr -- the address of the new node or None for default list of dict operations -- attributes for each entered operation dict meta_attributes -- attributes for primitive/meta_attributes dict instance_attributes -- attributes for primitive/instance_attributes bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed bool allow_invalid_operation -- if True, allow to use operations that are not listed in a resource agent metadata bool allow_invalid_instance_attributes -- if True, allow to use instance attributes that are not listed in a resource agent metadata and allow to omit required instance_attributes bool use_default_operations -- if True, add operations specified in a resource agent metadata to the resource mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ env.ensure_wait_satisfiable(wait) report_processor = SimpleReportProcessor(env.report_processor) target_factory = env.get_node_target_factory() cib = env.get_cib() id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( reports.corosync_node_conflict_check_skipped("not_live_cib") ) existing_nodes_names, existing_nodes_addrs, report_list = ( get_existing_nodes_names_addrs(corosync_conf, cib) ) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) resource_agent = remote_node.get_agent( env.report_processor, env.cmd_runner() ) existing_target_list = [] if env.is_cib_live: existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes ) new_target = new_target_list[0] if new_target_list else None # default node_addr to an address from known-hosts if node_addr is None: node_addr = new_target.first_addr if new_target else node_name report_processor.report( reports.using_known_host_address_for_host(node_name, node_addr) ) else: # default node_addr to an address from known-hosts if node_addr is None: known_hosts = env.get_known_hosts([node_name]) node_addr = known_hosts[0].dest.addr if known_hosts else node_name report_processor.report( reports.using_known_host_address_for_host(node_name, node_addr) ) # validate inputs report_list = remote_node.validate_create( existing_nodes_names, existing_nodes_addrs, resource_agent, node_name, node_addr, instance_attributes ) # validation + cib setup # TODO extract the validation to a separate function try: remote_resource_element = remote_node.create( env.report_processor, resource_agent, get_resources(cib), id_provider, node_addr, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: #Check unique id conflict with check against nodes. Until validation #resource create is not separated, we need to make unique post #validation. already_exists = [] unified_report_list = [] for report in report_list + list(e.args): if report.code not in ( report_codes.ID_ALREADY_EXISTS, report_codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE, ): unified_report_list.append(report) elif ( "id" in report.info and report.info["id"] not in already_exists ): unified_report_list.append(report) already_exists.append(report.info["id"]) report_list = unified_report_list report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib") ) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, remote_resource_element.attrib["id"])