def wait_for_idle(runner, timeout=None): """ Run waiting command. Raise LibraryError if command failed. runner is preconfigured object for running external programs string timeout is waiting timeout """ args = [__exec("crm_resource"), "--wait"] if timeout is not None: args.append("--timeout={0}".format(timeout)) stdout, stderr, retval = runner.run(args) if retval != 0: # Usefull info goes to stderr - not only error messages, a list of # pending actions in case of timeout goes there as well. # We use stdout just to be sure if that's get changed. if retval == __EXITCODE_WAIT_TIMEOUT: raise LibraryError( reports.wait_for_idle_timed_out( join_multilines([stderr, stdout]) ) ) else: raise LibraryError( reports.wait_for_idle_error( join_multilines([stderr, stdout]) ) )
def resource_cleanup(runner, resource=None, node=None, force=False): if not force and not node and not resource: summary = ClusterState(get_cluster_status_xml(runner)).summary operations = summary.nodes.attrs.count * summary.resources.attrs.count if operations > __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD: raise LibraryError( reports.resource_cleanup_too_time_consuming( __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD ) ) cmd = [__exec("crm_resource"), "--cleanup"] if resource: cmd.extend(["--resource", resource]) if node: cmd.extend(["--node", node]) stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError( reports.resource_cleanup_error( join_multilines([stderr, stdout]), resource, node ) ) # usefull output (what has been done) goes to stderr return join_multilines([stdout, stderr])
def resource_cleanup( runner: CommandRunner, resource: Optional[str] = None, node: Optional[str] = None, operation: Optional[str] = None, interval: Optional[str] = None, strict: bool = False, ): cmd = [__exec("crm_resource"), "--cleanup"] if resource: cmd.extend(["--resource", resource]) if node: cmd.extend(["--node", node]) if operation: cmd.extend(["--operation", operation]) if interval: cmd.extend(["--interval", interval]) if strict: cmd.extend(["--force"]) stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError( reports.resource_cleanup_error(join_multilines([stderr, stdout]), resource, node)) # usefull output (what has been done) goes to stderr return join_multilines([stdout, stderr])
def wait_for_idle(runner, timeout=None): """ Run waiting command. Raise LibraryError if command failed. runner is preconfigured object for running external programs string timeout is waiting timeout """ args = [__exec("crm_resource"), "--wait"] if timeout is not None: args.append("--timeout={0}".format(timeout)) stdout, stderr, retval = runner.run(args) if retval != 0: # Usefull info goes to stderr - not only error messages, a list of # pending actions in case of timeout goes there as well. # We use stdout just to be sure if that's get changed. if retval == __EXITCODE_WAIT_TIMEOUT: raise LibraryError( reports.wait_for_idle_timed_out( join_multilines([stderr, stdout]) ) ) raise LibraryError( reports.wait_for_idle_error( join_multilines([stderr, stdout]) ) )
def resource_cleanup( runner, resource=None, node=None, operation=None, interval=None ): cmd = [__exec("crm_resource"), "--cleanup"] if resource: cmd.extend(["--resource", resource]) if node: cmd.extend(["--node", node]) if operation: cmd.extend(["--operation", operation]) if interval: cmd.extend(["--interval", interval]) stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError( reports.resource_cleanup_error( join_multilines([stderr, stdout]), resource, node ) ) # usefull output (what has been done) goes to stderr return join_multilines([stdout, stderr])
def resource_refresh( runner: CommandRunner, resource: Optional[str] = None, node: Optional[str] = None, strict: bool = False, force: bool = False, ): if not force and not node and not resource: summary = ClusterState(get_cluster_status_xml(runner)).summary operations = summary.nodes.attrs.count * summary.resources.attrs.count if operations > __RESOURCE_REFRESH_OPERATION_COUNT_THRESHOLD: raise LibraryError( reports.resource_refresh_too_time_consuming( __RESOURCE_REFRESH_OPERATION_COUNT_THRESHOLD)) cmd = [__exec("crm_resource"), "--refresh"] if resource: cmd.extend(["--resource", resource]) if node: cmd.extend(["--node", node]) if strict: cmd.extend(["--force"]) stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError( reports.resource_refresh_error(join_multilines([stderr, stdout]), resource, node)) # usefull output (what has been done) goes to stderr return join_multilines([stdout, stderr])
def get_cib_xml(runner, scope=None): stdout, stderr, retval = get_cib_xml_cmd_results(runner, scope) if retval != 0: if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope: raise LibraryError( reports.cib_load_error_scope_missing( scope, join_multilines([stderr, stdout]))) raise LibraryError( reports.cib_load_error(join_multilines([stderr, stdout]))) return stdout
def get_cib_xml(runner, scope=None): stdout, stderr, retval = get_cib_xml_cmd_results(runner, scope) if retval != 0: if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope: raise LibraryError( reports.cib_load_error_scope_missing( scope, join_multilines([stderr, stdout]) ) ) raise LibraryError( reports.cib_load_error(join_multilines([stderr, stdout])) ) return stdout
def get_cib_xml(runner, scope=None): command = [__exec("cibadmin"), "--local", "--query"] if scope: command.append("--scope={0}".format(scope)) stdout, stderr, retval = runner.run(command) if retval != 0: if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope: raise LibraryError( reports.cib_load_error_scope_missing( scope, join_multilines([stderr, stdout]))) else: raise LibraryError( reports.cib_load_error(join_multilines([stderr, stdout]))) return stdout
def resource_cleanup(runner, resource=None, node=None): cmd = [__exec("crm_resource"), "--cleanup"] if resource: cmd.extend(["--resource", resource]) if node: cmd.extend(["--node", node]) stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError( reports.resource_cleanup_error(join_multilines([stderr, stdout]), resource, node)) # usefull output (what has been done) goes to stderr return join_multilines([stdout, stderr])
def client_cert_request_to_pk12(runner, cert_request): """ transform signed certificate request to pk12 certificate which can be imported to nodes cert_request signed certificate request """ if not client_initialized(): raise LibraryError(reports.qdevice_not_initialized(__model)) # save the signed certificate request, corosync tool only works with files tmpfile = _store_to_tmpfile( cert_request, reports.qdevice_certificate_import_error ) # transform it stdout, stderr, retval = runner.run([ __qdevice_certutil, "-M", "-c", tmpfile.name ]) tmpfile.close() # temp file is deleted on close if retval != 0: raise LibraryError( reports.qdevice_certificate_import_error( join_multilines([stderr, stdout]) ) ) # get resulting pk12, corosync tool only works with files return _get_output_certificate( stdout, reports.qdevice_certificate_import_error )
def client_setup(runner, ca_certificate): """ initialize qdevice client on local host ca_certificate qnetd CA certificate """ client_destroy() # save CA certificate, corosync tool only works with files ca_file_path = os.path.join( settings.corosync_qdevice_net_client_certs_dir, settings.corosync_qdevice_net_client_ca_file_name ) try: if not os.path.exists(ca_file_path): os.makedirs( settings.corosync_qdevice_net_client_certs_dir, mode=0o700 ) with open(ca_file_path, "wb") as ca_file: ca_file.write(ca_certificate) except EnvironmentError as e: raise LibraryError( reports.qdevice_initialization_error(__model, e.strerror) ) # initialize client's certificate storage stdout, stderr, retval = runner.run([ __qdevice_certutil, "-i", "-c", ca_file_path ]) if retval != 0: raise LibraryError( reports.qdevice_initialization_error( __model, join_multilines([stderr, stdout]) ) )
def qdevice_sign_certificate_request(runner, cert_request, cluster_name): """ sign client certificate request cert_request certificate request data string cluster_name name of the cluster to which qdevice is being added """ if not qdevice_initialized(): raise LibraryError(reports.qdevice_not_initialized(__model)) # save the certificate request, corosync tool only works with files tmpfile = _store_to_tmpfile( cert_request, reports.qdevice_certificate_sign_error ) # sign the request stdout, stderr, retval = runner.run([ __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name ]) tmpfile.close() # temp file is deleted on close if retval != 0: raise LibraryError( reports.qdevice_certificate_sign_error( join_multilines([stderr, stdout]) ) ) # get signed certificate, corosync tool only works with files return _get_output_certificate( stdout, reports.qdevice_certificate_sign_error )
def client_setup(runner, ca_certificate): """ initialize qdevice client on local host ca_certificate qnetd CA certificate """ client_destroy() # save CA certificate, corosync tool only works with files ca_file_path = os.path.join( settings.corosync_qdevice_net_client_certs_dir, settings.corosync_qdevice_net_client_ca_file_name) try: if not os.path.exists(ca_file_path): os.makedirs(settings.corosync_qdevice_net_client_certs_dir, mode=0o700) with open(ca_file_path, "wb") as ca_file: ca_file.write(ca_certificate) except EnvironmentError as e: raise LibraryError( reports.qdevice_initialization_error(__model, e.strerror)) # initialize client's certificate storage stdout, stderr, retval = runner.run( [__qdevice_certutil, "-i", "-c", ca_file_path]) if retval != 0: raise LibraryError( reports.qdevice_initialization_error( __model, join_multilines([stderr, stdout])))
def get_cluster_status_text( runner: CommandRunner, hide_inactive_resources: bool, verbose: bool, ) -> Tuple[str, List[str]]: cmd = [__exec("crm_mon"), "--one-shot"] if not hide_inactive_resources: cmd.append("--inactive") if verbose: cmd.extend(["--show-detail", "--show-node-attributes", "--failcounts"]) # by default, pending and failed actions are displayed # with verbose==True, we display the whole history if is_fence_history_supported_status(runner): cmd.append("--fence-history=3") stdout, stderr, retval = runner.run(cmd) if retval != 0: raise CrmMonErrorException( reports.cluster_state_cannot_load(join_multilines([stderr, stdout]))) warnings: List[str] = [] if stderr.strip(): warnings = [ line for line in stderr.strip().splitlines() if verbose or not line.startswith("DEBUG: ") ] return stdout.strip(), warnings
def upgrade_cib(cib, runner): """ Upgrade CIB to the latest schema of installed pacemaker. Returns upgraded CIB as string. Raises LibraryError on any failure. cib -- cib etree runner -- CommandRunner """ temp_file = None try: temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs") temp_file.write(etree.tostring(cib).decode()) temp_file.flush() stdout, stderr, retval = runner.run( [ os.path.join(settings.pacemaker_binaries, "cibadmin"), "--upgrade", "--force" ], env_extend={"CIB_file": temp_file.name}) if retval != 0: temp_file.close() raise LibraryError( reports.cib_upgrade_failed(join_multilines([stderr, stdout]))) temp_file.seek(0) return etree.fromstring(temp_file.read()) except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e: raise LibraryError(reports.cib_upgrade_failed(str(e))) finally: if temp_file: temp_file.close()
def wait_for_resources(runner, timeout=None): args = [__exec("crm_resource"), "--wait"] if timeout is not None: args.append("--timeout={0}".format(timeout)) stdout, stderr, retval = runner.run(args) if retval != 0: # Usefull info goes to stderr - not only error messages, a list of # pending actions in case of timeout goes there as well. # We use stdout just to be sure if that's get changed. if retval == __EXITCODE_WAIT_TIMEOUT: raise LibraryError( reports.resource_wait_timed_out( join_multilines([stderr, stdout]))) else: raise LibraryError( reports.resource_wait_error(join_multilines([stderr, stdout])))
def _ticket_operation(operation, env, ticket, site_ip): if not env.is_cib_live: raise LibraryError(reports.live_environment_required(["CIB"])) if not site_ip: site_ip_list = resource.find_bound_ip( get_resources(env.get_cib()), get_config_file_name(env.booth.name) ) if len(site_ip_list) != 1: raise LibraryError( booth_reports.booth_cannot_determine_local_site_ip() ) site_ip = site_ip_list[0] stdout, stderr, return_code = env.cmd_runner().run([ settings.booth_binary, operation, "-s", site_ip, ticket ]) if return_code != 0: raise LibraryError( booth_reports.booth_ticket_operation_failed( operation, join_multilines([stderr, stdout]), site_ip, ticket ) )
def get_local_node_name(runner): stdout, stderr, retval = runner.run([__exec("crm_node"), "--name"]) if retval != 0: raise LibraryError( reports.pacemaker_local_node_name_not_found( join_multilines([stderr, stdout]))) return stdout.strip()
def _run_fence_history_command(runner, command, node=None): stdout, stderr, retval = runner.run( [__exec("stonith_admin"), "--history", node if node else "*", command]) if retval != 0: raise FenceHistoryCommandErrorException( join_multilines([stderr, stdout])) return stdout.strip()
def get_cluster_status_xml(runner): stdout, stderr, retval = runner.run( [__exec("crm_mon"), "--one-shot", "--as-xml", "--inactive"]) if retval != 0: raise CrmMonErrorException( reports.cluster_state_cannot_load(join_multilines([stderr, stdout]))) return stdout
def reload_config(runner): """ Ask corosync to reload its configuration """ stdout, stderr, retval = runner.run([os.path.join(settings.corosync_binaries, "corosync-cfgtool"), "-R"]) message = join_multilines([stderr, stdout]) if retval != 0 or "invalid option" in message: raise LibraryError(reports.corosync_config_reload_error(message))
def get_cluster_status_xml(runner): stdout, stderr, retval = runner.run( [__exec("crm_mon"), "--one-shot", "--as-xml", "--inactive"] ) if retval != 0: raise CrmMonErrorException( reports.cluster_state_cannot_load(join_multilines([stderr, stdout])) ) return stdout
def reload_config(runner): """ Ask corosync to reload its configuration """ stdout, stderr, retval = runner.run( [os.path.join(settings.corosync_binaries, "corosync-cfgtool"), "-R"]) message = join_multilines([stderr, stdout]) if retval != 0 or "invalid option" in message: raise LibraryError(reports.corosync_config_reload_error(message))
def get_local_node_name(runner): stdout, stderr, retval = runner.run([__exec("crm_node"), "--name"]) if retval != 0: raise LibraryError( reports.pacemaker_local_node_name_not_found( join_multilines([stderr, stdout]) ) ) return stdout.strip()
def get_cib_xml(runner, scope=None): command = [__exec("cibadmin"), "--local", "--query"] if scope: command.append("--scope={0}".format(scope)) stdout, stderr, retval = runner.run(command) if retval != 0: if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope: raise LibraryError( reports.cib_load_error_scope_missing( scope, join_multilines([stderr, stdout]) ) ) else: raise LibraryError( reports.cib_load_error(join_multilines([stderr, stdout])) ) return stdout
def get_peers_status(runner, name=None): cmd = [settings.booth_binary, "peers"] if name: cmd += ["-c", name] stdout, stderr, return_value = runner.run(cmd) if return_value != 0: raise LibraryError( reports.booth_peers_status_error(join_multilines([stderr, stdout])) ) return stdout
def get_peers_status(runner, name=None): cmd = [settings.booth_binary, "peers"] if name: cmd += ["-c", name] stdout, stderr, return_value = runner.run(cmd) if return_value != 0: raise LibraryError( reports.booth_peers_status_error(join_multilines([stderr, stdout]))) return stdout
def get_daemon_status(runner, name=None): cmd = [settings.booth_binary, "status"] if name: cmd += ["-c", name] stdout, stderr, return_value = runner.run(cmd) # 7 means that there is no booth instance running if return_value not in [0, 7]: raise LibraryError( reports.booth_daemon_status_error(join_multilines([stderr, stdout])) ) return stdout
def get_daemon_status(runner, name=None): cmd = [settings.booth_binary, "status"] if name: cmd += ["-c", name] stdout, stderr, return_value = runner.run(cmd) # 7 means that there is no booth instance running if return_value not in [0, 7]: raise LibraryError( reports.booth_daemon_status_error(join_multilines([stderr, stdout]))) return stdout
def wait_for_resources(runner, timeout=None): args = [__exec("crm_resource"), "--wait"] if timeout is not None: args.append("--timeout={0}".format(timeout)) stdout, stderr, retval = runner.run(args) if retval != 0: # Usefull info goes to stderr - not only error messages, a list of # pending actions in case of timeout goes there as well. # We use stdout just to be sure if that's get changed. if retval == __EXITCODE_WAIT_TIMEOUT: raise LibraryError( reports.resource_wait_timed_out( join_multilines([stderr, stdout]) ) ) else: raise LibraryError( reports.resource_wait_error( join_multilines([stderr, stdout]) ) )
def qdevice_setup(runner): """ initialize qdevice on local host """ if qdevice_initialized(): raise LibraryError(reports.qdevice_already_initialized(__model)) stdout, stderr, retval = runner.run([__qnetd_certutil, "-i"]) if retval != 0: raise LibraryError( reports.qdevice_initialization_error( __model, join_multilines([stderr, stdout])))
def get_status_text(runner, verbose=False): """ Get quorum device client runtime status in plain text bool verbose get more detailed output """ cmd = [os.path.join(settings.corosync_binaries, "corosync-qdevice-tool"), "-s"] if verbose: cmd.append("-v") stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError(reports.corosync_quorum_get_status_error(join_multilines([stderr, stdout]))) return stdout
def remove_node(runner, node_name): stdout, stderr, retval = runner.run([ __exec("crm_node"), "--force", "--remove", node_name, ]) if retval != 0: raise LibraryError( reports.node_remove_in_pacemaker_failed(node_name, reason=join_multilines( [stderr, stdout])))
def qdevice_setup(runner): """ initialize qdevice on local host """ if external.is_dir_nonempty( settings.corosync_qdevice_net_server_certs_dir): raise LibraryError(reports.qdevice_already_initialized(__model)) stdout, stderr, retval = runner.run([__qnetd_certutil, "-i"]) if retval != 0: raise LibraryError( reports.qdevice_initialization_error( __model, join_multilines([stderr, stdout])))
def _upgrade_cib(runner): """ Upgrade CIB to the latest schema available locally or clusterwise. CommandRunner runner """ stdout, stderr, retval = runner.run( [__exec("cibadmin"), "--upgrade", "--force"]) # If we are already on the latest schema available, do not consider it an # error. We do not know here what version is required. The caller however # knows and is responsible for dealing with it. if retval not in (0, __EXITCODE_CIB_SCHEMA_IS_THE_LATEST_AVAILABLE): raise LibraryError( reports.cib_upgrade_failed(join_multilines([stderr, stdout])))
def _upgrade_cib(runner): """ Upgrade CIB to the latest schema available locally or clusterwise. CommandRunner runner """ stdout, stderr, retval = runner.run( [__exec("cibadmin"), "--upgrade", "--force"]) # If we are already on the latest schema available, cibadmin exits with 0. # That is fine. We do not know here what version is required anyway. The # caller knows that and is responsible for dealing with it. if retval != 0: raise LibraryError( reports.cib_upgrade_failed(join_multilines([stderr, stdout])))
def remove_node(runner, node_name): stdout, stderr, retval = runner.run([ __exec("crm_node"), "--force", "--remove", node_name, ]) if retval != 0: raise LibraryError( reports.node_remove_in_pacemaker_failed( [node_name], reason=join_multilines([stderr, stdout]) ) )
def _run_fence_history_command(runner, command, node=None): stdout, stderr, retval = runner.run( [ __exec("stonith_admin"), "--history", node if node else "*", command ] ) if retval != 0: raise FenceHistoryCommandErrorException( join_multilines([stderr, stdout]) ) return stdout.strip()
def _upgrade_cib(runner): """ Upgrade CIB to the latest schema available locally or clusterwise. CommandRunner runner """ stdout, stderr, retval = runner.run( [__exec("cibadmin"), "--upgrade", "--force"] ) # If we are already on the latest schema available, cibadmin exits with 0. # That is fine. We do not know here what version is required anyway. The # caller knows that and is responsible for dealing with it. if retval != 0: raise LibraryError( reports.cib_upgrade_failed(join_multilines([stderr, stdout])) )
def qdevice_status_generic_text(runner, verbose=False): """ get qdevice runtime status in plain text bool verbose get more detailed output """ args = ["-s"] if verbose: args.append("-v") stdout, stderr, retval = _qdevice_run_tool(runner, args) if retval != 0: raise LibraryError( reports.qdevice_get_status_error(__model, join_multilines([stderr, stdout]))) return stdout
def _upgrade_cib(runner): """ Upgrade CIB to the latest schema available locally or clusterwise. CommandRunner runner """ stdout, stderr, retval = runner.run( [__exec("cibadmin"), "--upgrade", "--force"] ) # If we are already on the latest schema available, do not consider it an # error. We do not know here what version is required. The caller however # knows and is responsible for dealing with it. if retval not in (0, __EXITCODE_CIB_SCHEMA_IS_THE_LATEST_AVAILABLE): raise LibraryError( reports.cib_upgrade_failed(join_multilines([stderr, stdout])) )
def kill_services(runner, services): """ Kill specified services in local system CommandRunner runner iterable services service names """ # make killall not report that a process is not running stdout, stderr, retval = runner.run( ["killall", "--quiet", "--signal", "9", "--"] + list(services)) # If a process isn't running, killall will still return 1 even with --quiet. # We don't consider that an error, so we check for output string as well. # If it's empty, no actuall error happened. if retval != 0: message = join_multilines([stderr, stdout]) if message: raise KillServicesError(list(services), message)
def get_status_text(runner, verbose=False): """ Get quorum device client runtime status in plain text bool verbose get more detailed output """ cmd = [ os.path.join(settings.corosync_binaries, "corosync-qdevice-tool"), "-s" ] if verbose: cmd.append("-v") stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError( reports.corosync_quorum_get_status_error( join_multilines([stderr, stdout]))) return stdout
def client_generate_certificate_request(runner, cluster_name): """ create a certificate request which can be signed by qnetd server string cluster_name name of the cluster to which qdevice is being added """ if not client_initialized(): raise LibraryError(reports.qdevice_not_initialized(__model)) stdout, stderr, retval = runner.run( [__qdevice_certutil, "-r", "-n", cluster_name]) if retval != 0: raise LibraryError( reports.qdevice_initialization_error( __model, join_multilines([stderr, stdout]))) return _get_output_certificate( stdout, functools.partial(reports.qdevice_initialization_error, __model))
def client_import_certificate_and_key(runner, pk12_certificate): """ import qdevice client certificate to the local node certificate storage """ if not client_initialized(): raise LibraryError(reports.qdevice_not_initialized(__model)) # save the certificate, corosync tool only works with files tmpfile = _store_to_tmpfile(pk12_certificate, reports.qdevice_certificate_import_error) stdout, stderr, retval = runner.run( [__qdevice_certutil, "-m", "-c", tmpfile.name]) tmpfile.close() # temp file is deleted on close if retval != 0: raise LibraryError( reports.qdevice_certificate_import_error( join_multilines([stderr, stdout])))
def kill_services(runner, services): """ Kill specified services in local system CommandRunner runner iterable services service names """ # make killall not report that a process is not running stdout, stderr, retval = runner.run( ["killall", "--quiet", "--signal", "9", "--"] + list(services) ) # If a process isn't running, killall will still return 1 even with --quiet. # We don't consider that an error, so we check for output string as well. # If it's empty, no actuall error happened. if retval != 0: message = join_multilines([stderr, stdout]) if message: raise KillServicesError(list(services), message)
def qdevice_setup(runner): """ initialize qdevice on local host """ if external.is_dir_nonempty(settings.corosync_qdevice_net_server_certs_dir): raise LibraryError(reports.qdevice_already_initialized(__model)) stdout, stderr, retval = runner.run([ __qnetd_certutil, "-i" ]) if retval != 0: raise LibraryError( reports.qdevice_initialization_error( __model, join_multilines([stderr, stdout]) ) )
def stop_service(runner, service, instance=None): """ Stop specified service in local system CommandRunner runner string service service name string instance instance name, it ha no effect on not systemd systems. If None no instance name will be used. """ if is_systemctl(): stdout, stderr, retval = runner.run( [_systemctl, "stop", _get_service_name(service, instance)]) else: stdout, stderr, retval = runner.run([_service, service, "stop"]) if retval != 0: raise StopServiceError(service, join_multilines([stderr, stdout]), instance)
def qdevice_status_generic_text(runner, verbose=False): """ get qdevice runtime status in plain text bool verbose get more detailed output """ args = ["-s"] if verbose: args.append("-v") stdout, stderr, retval = _qdevice_run_tool(runner, args) if retval != 0: raise LibraryError( reports.qdevice_get_status_error( __model, join_multilines([stderr, stdout]) ) ) return stdout
def qdevice_status_cluster_text(runner, cluster=None, verbose=False): """ get qdevice runtime status in plain text bool verbose get more detailed output string cluster show information only about specified cluster """ args = ["-l"] if verbose: args.append("-v") if cluster: args.extend(["-c", cluster]) stdout, stderr, retval = _qdevice_run_tool(runner, args) if retval != 0: raise LibraryError( reports.qdevice_get_status_error( __model, join_multilines([stderr, stdout]) ) ) return stdout
def stop_service(runner, service, instance=None): """ Stop specified service in local system CommandRunner runner string service service name string instance instance name, it ha no effect on not systemd systems. If None no instance name will be used. """ if is_systemctl(): stdout, stderr, retval = runner.run([ _systemctl, "stop", _get_service_name(service, instance) ]) else: stdout, stderr, retval = runner.run([_service, service, "stop"]) if retval != 0: raise StopServiceError( service, join_multilines([stderr, stdout]), instance )
def client_generate_certificate_request(runner, cluster_name): """ create a certificate request which can be signed by qnetd server string cluster_name name of the cluster to which qdevice is being added """ if not client_initialized(): raise LibraryError(reports.qdevice_not_initialized(__model)) stdout, stderr, retval = runner.run([ __qdevice_certutil, "-r", "-n", cluster_name ]) if retval != 0: raise LibraryError( reports.qdevice_initialization_error( __model, join_multilines([stderr, stdout]) ) ) return _get_output_certificate( stdout, functools.partial(reports.qdevice_initialization_error, __model) )
def client_import_certificate_and_key(runner, pk12_certificate): """ import qdevice client certificate to the local node certificate storage """ if not client_initialized(): raise LibraryError(reports.qdevice_not_initialized(__model)) # save the certificate, corosync tool only works with files tmpfile = _store_to_tmpfile( pk12_certificate, reports.qdevice_certificate_import_error ) stdout, stderr, retval = runner.run([ __qdevice_certutil, "-m", "-c", tmpfile.name ]) tmpfile.close() # temp file is deleted on close if retval != 0: raise LibraryError( reports.qdevice_certificate_import_error( join_multilines([stderr, stdout]) ) )
def enable_service(runner, service, instance=None): """ Enable specified service in local system. Raise EnableServiceError or LibraryError on failure. runner -- CommandRunner service -- name of service instance -- instance name, it ha no effect on not systemd systems. If None no instance name will be used. """ if is_systemctl(): stdout, stderr, retval = runner.run([ _systemctl, "enable", _get_service_name(service, instance) ]) else: stdout, stderr, retval = runner.run([_chkconfig, service, "on"]) if retval != 0: raise EnableServiceError( service, join_multilines([stderr, stdout]), instance )
def __nodes_standby_unstandby( runner, standby=True, node_list=None, all_nodes=False ): if node_list or all_nodes: # TODO once we switch to editing CIB instead of running crm_stanby, we # cannot always relly on getClusterState. If we're not editing a CIB # from a live cluster, there is no status. state = ClusterState(get_cluster_status_xml(runner)).node_section.nodes known_nodes = [node.attrs.name for node in state] if all_nodes: node_list = known_nodes elif node_list: report = [] for node in node_list: if node not in known_nodes: report.append(reports.node_not_found(node)) if report: raise LibraryError(*report) # TODO Edit CIB directly instead of running commands for each node; be aware # remote nodes might not be in the CIB yet so we need to put them there. cmd_template = [__exec("crm_standby")] cmd_template.extend(["-v", "on"] if standby else ["-D"]) cmd_list = [] if node_list: for node in node_list: cmd_list.append(cmd_template + ["-N", node]) else: cmd_list.append(cmd_template) report = [] for cmd in cmd_list: stdout, stderr, retval = runner.run(cmd) if retval != 0: report.append( reports.common_error(join_multilines([stderr, stdout])) ) if report: raise LibraryError(*report)
def upgrade_cib(cib, runner): """ Upgrade CIB to the latest schema of installed pacemaker. Returns upgraded CIB as string. Raises LibraryError on any failure. cib -- cib etree runner -- CommandRunner """ temp_file = None try: temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs") temp_file.write(etree.tostring(cib).decode()) temp_file.flush() stdout, stderr, retval = runner.run( [ os.path.join(settings.pacemaker_binaries, "cibadmin"), "--upgrade", "--force" ], env_extend={"CIB_file": temp_file.name} ) if retval != 0: temp_file.close() raise LibraryError( reports.cib_upgrade_failed(join_multilines([stderr, stdout])) ) temp_file.seek(0) return etree.fromstring(temp_file.read()) except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e: raise LibraryError(reports.cib_upgrade_failed(str(e))) finally: if temp_file: temp_file.close()