def _get_nodes_to_validate_against(env, tree): if not env.is_corosync_conf_live and env.is_cib_live: raise LibraryError( reports.live_environment_required(["COROSYNC_CONF"]) ) if not env.is_cib_live and env.is_corosync_conf_live: #we do not try to get corosync.conf from live cluster when cib is not #taken from live cluster return get_nodes(tree=tree) return get_nodes(env.get_corosync_conf(), tree)
def node_clear(env, node_name, allow_clear_cluster_node=False): """ Remove specified node from various cluster caches. LibraryEnvironment env provides all for communication with externals string node_name bool allow_clear_cluster_node -- flag allows to clear node even if it's still in a cluster """ mocked_envs = [] if not env.is_cib_live: mocked_envs.append("CIB") if not env.is_corosync_conf_live: mocked_envs.append("COROSYNC_CONF") if mocked_envs: raise LibraryError(reports.live_environment_required(mocked_envs)) current_nodes = get_nodes(env.get_corosync_conf(), env.get_cib()) if (node_addresses_contain_name(current_nodes, node_name) or node_addresses_contain_host(current_nodes, node_name)): env.report_processor.process( reports.get_problem_creator( report_codes.FORCE_CLEAR_CLUSTER_NODE, allow_clear_cluster_node)( reports.node_to_clear_is_still_in_cluster, node_name)) remove_node(env.cmd_runner(), node_name)
def node_add_guest( env, node_name, resource_id, options, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, wait=False, ): """ setup resource (resource_id) as guest node and setup node as guest LibraryEnvironment env provides all for communication with externals string resource_id -- specifies resource that should be guest node dict options could contain keys remote-node, remote-port, remote-addr, remote-connect-timeout bool allow_incomplete_distribution -- is a flag for allowing successfully finish this command even if is file distribution not succeeded bool allow_pacemaker_remote_service_fail -- is a flag for allowing successfully finish this command even if starting/enabling pacemaker_remote not succeeded mixed wait is flag for controlling waiting for pacemaker iddle mechanism """ _ensure_consistently_live_env(env) env.ensure_wait_satisfiable(wait) cib = env.get_cib() current_nodes = get_nodes(env.get_corosync_conf(), cib) report_list = guest_node.validate_set_as_guest(cib, current_nodes, node_name, options) try: resource_element = find_element_by_tag_and_id(primitive.TAG, get_resources(cib), resource_id) report_list.extend(guest_node.validate_is_not_guest(resource_element)) except LibraryError as e: report_list.extend(e.args) env.report_processor.process_list(report_list) guest_node.set_as_guest( resource_element, node_name, options.get("remote-addr", None), options.get("remote-port", None), options.get("remote-connect-timeout", None), ) _prepare_pacemaker_remote_environment( env, current_nodes, guest_node.get_host_from_options(node_name, options), allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) env.push_cib(cib, wait) if wait: _ensure_resource_running(env, resource_id)
def node_add_remote( env, host, node_name, operations, meta_attributes, instance_attributes, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, allow_invalid_operation=False, allow_invalid_instance_attributes=False, use_default_operations=True, wait=False, ): """ create resource ocf:pacemaker:remote and use it as remote node LibraryEnvironment env provides all for communication with externals list of dict operations contains attributes for each entered operation dict meta_attributes contains attributes for primitive/meta_attributes dict instance_attributes contains attributes for primitive/instance_attributes bool allow_incomplete_distribution -- is a flag for allowing successfully finish this command even if is file distribution not succeeded bool allow_pacemaker_remote_service_fail -- is a flag for allowing successfully finish this command even if starting/enabling pacemaker_remote not succeeded bool allow_invalid_operation is a flag for allowing to use operations that are not listed in a resource agent metadata bool allow_invalid_instance_attributes is a flag for allowing to use instance attributes that are not listed in a resource agent metadata or for allowing to not use the instance_attributes that are required in resource agent metadata bool use_default_operations is a flag for stopping stopping of adding default cib operations (specified in a resource agent) mixed wait is flag for controlling waiting for pacemaker iddle mechanism """ _ensure_consistently_live_env(env) env.ensure_wait_satisfiable(wait) cib = env.get_cib() current_nodes = get_nodes(env.get_corosync_conf(), cib) resource_agent = remote_node.get_agent(env.report_processor, env.cmd_runner()) report_list = remote_node.validate_create(current_nodes, resource_agent, host, node_name, instance_attributes) try: remote_resource_element = remote_node.create( env.report_processor, resource_agent, get_resources(cib), host, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: #Check unique id conflict with check against nodes. Until validation #resource create is not separated, we need to make unique post #validation. already_exists = [] unified_report_list = [] for report in report_list + list(e.args): if report.code != report_codes.ID_ALREADY_EXISTS: unified_report_list.append(report) elif report.info["id"] not in already_exists: unified_report_list.append(report) already_exists.append(report.info["id"]) report_list = unified_report_list env.report_processor.process_list(report_list) _prepare_pacemaker_remote_environment( env, current_nodes, host, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) env.push_cib(cib, wait) if wait: _ensure_resource_running(env, remote_resource_element.attrib["id"])