def test_unsupported_exception(self): exc = Exception("test") raised = False try: lib.node_communicator_exception_to_report_item(exc) except Exception as e: raised = True self.assertEqual(e, exc) self.assertTrue(raised)
def is_node_online(node): try: nodes_task.node_check_auth(lib_env.node_communicator(), node) online_node_list.append(node) except NodeConnectionException as e: if ignore_offline_nodes: to_raise.append(reports.omitting_node(node.label)) else: to_raise.append(node_communicator_exception_to_report_item( e, Severities.ERROR, report_codes.SKIP_OFFLINE_NODES )) except NodeCommunicationException as e: to_raise.append(node_communicator_exception_to_report_item(e))
def is_node_online(node): try: nodes_task.node_check_auth(lib_env.node_communicator(), node) online_node_list.append(node) except NodeConnectionException as e: if ignore_offline_nodes: to_raise.append(reports.omitting_node(node.label)) else: to_raise.append( node_communicator_exception_to_report_item( e, Severities.ERROR, report_codes.SKIP_OFFLINE_NODES)) except NodeCommunicationException as e: to_raise.append(node_communicator_exception_to_report_item(e))
def _add_device_model_net(lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes): """ setup cluster nodes for using qdevice model net string qnetd_host address of qdevice provider (qnetd host) string cluster_name name of the cluster to which qdevice is being added NodeAddressesList cluster_nodes list of cluster nodes addresses bool skip_offline_nodes continue even if not all nodes are accessible """ communicator = lib_env.node_communicator() runner = lib_env.cmd_runner() reporter = lib_env.report_processor reporter.process(reports.qdevice_certificate_distribution_started()) # get qnetd CA certificate try: qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate( communicator, qnetd_host) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) # init certificate storage on all nodes parallel_nodes_communication_helper( qdevice_net.remote_client_setup, [((communicator, node, qnetd_ca_cert), {}) for node in cluster_nodes], reporter, skip_offline_nodes) # create client certificate request cert_request = qdevice_net.client_generate_certificate_request( runner, cluster_name) # sign the request on qnetd host try: signed_certificate = qdevice_net.remote_sign_certificate_request( communicator, qnetd_host, cert_request, cluster_name) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) # transform the signed certificate to pk12 format which can sent to nodes pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate) # distribute final certificate to nodes def do_and_report(reporter, communicator, node, pk12): qdevice_net.remote_client_import_certificate_and_key( communicator, node, pk12) reporter.process( reports.qdevice_certificate_accepted_by_node(node.label)) parallel_nodes_communication_helper( do_and_report, [((reporter, communicator, node, pk12), {}) for node in cluster_nodes], reporter, skip_offline_nodes)
def check_can_add_node_to_cluster(node_communicator, node, report_items): try: availability_info = json.loads(node_communicator.call_node( node, "remote/node_available", data=None )) if availability_info["node_available"]: return if availability_info.get("pacemaker_remote", False): report_items.append(reports.cannot_add_node_is_running_service( node.label, "pacemaker_remote" )) return report_items.append(reports.cannot_add_node_is_in_cluster(node.label)) except NodeCommunicationException as e: report_items.append( node_communicator_exception_to_report_item( e, ReportItemSeverity.ERROR, ) ) except(ValueError, TypeError, KeyError): report_items.append(reports.invalid_response_format(node.label))
def _parallel(*args, **kwargs): try: func(*args, **kwargs) except NodeCommunicationException as e: report_list.append(node_communicator_exception_to_report_item(e)) except LibraryError as e: report_list.extend(e.args)
def pull_config_from_node(communicator, node, name): """ Get config of specified booth instance and its authfile if there is one from 'node'. It returns dictionary with format: { "config": { "name": <file name of config>, "data": <content of file> }, "authfile": { "name": <file name of authfile, None if it doesn't exist>, "data": <base64 coded content of authfile> } communicator -- NodeCommunicator node -- NodeAddresses name -- name of booth instance """ try: return json.loads(communicator.call_node( node, "remote/booth_get_config", NodeCommunicator.format_data_dict([("name", name)]) )) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) except ValueError: raise LibraryError(lib_reports.invalid_response_format(node.label))
def _parallel(node): try: corosync_live.set_remote_corosync_conf( node_communicator, node, config_text ) reporter.process( reports.corosync_config_accepted_by_node(node.label) ) except NodeCommunicationException as e: report_items.append( node_communicator_exception_to_report_item( e, failure_severity, failure_forceable ) ) report_items.append( reports.corosync_config_distribution_node_error( node.label, failure_severity, failure_forceable ) )
def _parallel(*args, **kwargs): try: func(*args, **kwargs) except NodeCommunicationException as e: report_list.append(node_communicator_exception_to_report_item(e)) except LibraryError as e: report_list.extend(e.args)
def get_sbd_config(node): try: config_list.append({ "node": node, "config": environment_file_to_dict( sbd.get_sbd_config(lib_env.node_communicator(), node) ) }) successful_node_list.append(node) except NodeCommandUnsuccessfulException as e: report_item_list.append(reports.unable_to_get_sbd_config( node.label, e.reason, Severities.WARNING )) except NodeCommunicationException as e: report_item_list.append(node_communicator_exception_to_report_item( e, severity=Severities.WARNING )) report_item_list.append(reports.unable_to_get_sbd_config( node.label, "", #reason is in previous report item Severities.WARNING ))
def _parallel(node): try: status = node_communicator.call_node(node, "remote/status", None) if not json.loads(status)["corosync"]: reporter.process( reports.corosync_not_running_on_node_ok(node.label) ) else: report_items.append( reports.corosync_running_on_node_fail(node.label) ) except NodeCommunicationException as e: report_items.append( node_communicator_exception_to_report_item( e, failure_severity, failure_forceable ) ) report_items.append( reports.corosync_not_running_check_node_error( node.label, failure_severity, failure_forceable ) ) except (ValueError, LookupError): report_items.append( reports.corosync_not_running_check_node_error( node.label, failure_severity, failure_forceable ) )
def pull_config_from_node(communicator, node, name): """ Get config of specified booth instance and its authfile if there is one from 'node'. It returns dictionary with format: { "config": { "name": <file name of config>, "data": <content of file> }, "authfile": { "name": <file name of authfile, None if it doesn't exist>, "data": <base64 coded content of authfile> } communicator -- NodeCommunicator node -- NodeAddresses name -- name of booth instance """ try: return json.loads(communicator.call_node( node, "remote/booth_get_config", NodeCommunicator.format_data_dict([("name", name)]) )) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) except ValueError: raise LibraryError(lib_reports.invalid_response_format(node.label))
def get_sbd_status(node): try: status_list.append({ "node": node.label, "status": json.loads( # here we just need info about sbd service, # therefore watchdog and device list is empty sbd.check_sbd(lib_env.node_communicator(), node, "", []))["sbd"] }) successful_node_list.append(node) except NodeCommunicationException as e: report_item_list.append( node_communicator_exception_to_report_item( e, severity=Severities.WARNING)) report_item_list.append( reports.unable_to_get_sbd_status( node.label, "", #reason is in previous report item #warning is there implicit )) except (ValueError, KeyError) as e: report_item_list.append( reports.unable_to_get_sbd_status(node.label, str(e)))
def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list): """ Removes cluster property 'stonith-watchdog-timeout' from all nodes in 'node_list', even if cluster is not currently running on them (direct editing CIB file). Raises LibraryError with all ReportItems in case of any failure. node_communicator -- NodeCommunicator node_list -- NodeAddressesList """ report_list = [] for node in node_list: try: remove_stonith_watchdog_timeout(node_communicator, node) except NodeCommunicationException as e: report_list.append(node_communicator_exception_to_report_item(e)) if report_list: raise LibraryError(*report_list)
def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list): """ Removes cluster property 'stonith-watchdog-timeout' from all nodes in 'node_list', even if cluster is not currently running on them (direct editing CIB file). Raises LibraryError with all ReportItems in case of any failure. node_communicator -- NodeCommunicator node_list -- NodeAddressesList """ report_list = [] for node in node_list: try: remove_stonith_watchdog_timeout(node_communicator, node) except NodeCommunicationException as e: report_list.append(node_communicator_exception_to_report_item(e)) if report_list: raise LibraryError(*report_list)
def get_sbd_status(node): try: status_list.append({ "node": node, "status": json.loads( sbd.check_sbd(lib_env.node_communicator(), node, "") )["sbd"] }) successful_node_list.append(node) except NodeCommunicationException as e: report_item_list.append(reports.unable_to_get_sbd_status( node.label, node_communicator_exception_to_report_item(e).message )) except (ValueError, KeyError) as e: report_item_list.append(reports.unable_to_get_sbd_status( node.label, str(e) ))
def test_transform_error_403(self): node = "test_node" command = "test_command" reason = "test_reason" assert_report_item_equal( lib.node_communicator_exception_to_report_item( lib.NodePermissionDeniedException(node, command, reason) ), ( severity.ERROR, report_codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED, { "node": node, "command": command, "reason": reason, } ) )
def test_transform_error_other(self): node = "test_node" command = "test_command" reason = "test_reason" assert_report_item_equal( lib.node_communicator_exception_to_report_item( lib.NodeCommunicationException(node, command, reason) ), ( severity.ERROR, report_codes.NODE_COMMUNICATION_ERROR, { "node": node, "command": command, "reason": reason, } ) )
def test_transform_error_404(self): node = "test_node" command = "test_command" reason = "test_reason" assert_report_item_equal( lib.node_communicator_exception_to_report_item( lib.NodeUnsupportedCommandException(node, command, reason) ), ( severity.ERROR, report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND, { "node": node, "command": command, "reason": reason, } ) )
def test_transform_error_400(self): node = "test_node" command = "test_command" reason = "test_reason" assert_report_item_equal( lib.node_communicator_exception_to_report_item( lib.NodeCommandUnsuccessfulException(node, command, reason) ), ( severity.ERROR, report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, { "node": node, "command": command, "reason": reason, } ) )
def get_sbd_status(node): try: status_list.append({ "node": node, "status": json.loads( sbd.check_sbd(lib_env.node_communicator(), node, "") )["sbd"] }) successful_node_list.append(node) except NodeCommunicationException as e: report_item_list.append(node_communicator_exception_to_report_item( e, severity=Severities.WARNING )) report_item_list.append(reports.unable_to_get_sbd_status( node.label, "", #reason is in previous report item #warning is there implicit )) except (ValueError, KeyError) as e: report_item_list.append(reports.unable_to_get_sbd_status( node.label, str(e) ))
def node_add_outside_cluster(lib, argv, modifiers): #pylint: disable=unreachable raise CmdLineInputError("not implemented") # TODO if len(argv) != 2: raise CmdLineInputError( "Usage: pcs cluster node add-outside <node[,node-altaddr]> <cluster node>" ) if len(modifiers["watchdog"]) > 1: raise CmdLineInputError("Multiple watchdogs defined") node_ring0, node_ring1 = utils.parse_multiring_node(argv[0]) cluster_node = argv[1] data = [ ("new_nodename", node_ring0), ] if node_ring1: data.append(("new_ring1addr", node_ring1)) if modifiers["watchdog"]: data.append(("watchdog", modifiers["watchdog"][0])) if modifiers["device"]: # way to send data in array data += [("devices[]", device) for device in modifiers["device"]] communicator = utils.get_lib_env().node_communicator() try: communicator.call_host( cluster_node, "remote/add_node_all", communicator.format_data_dict(data), ) except NodeCommandUnsuccessfulException as e: print(e.reason) except NodeCommunicationException as e: process_library_reports( [node_communicator_exception_to_report_item(e)])
def send_all_config_to_node( communicator, reporter, node, rewrite_existing=False, skip_wrong_config=False ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor node -- NodeAddress rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ config_dict = booth_conf.read_configs(reporter, skip_wrong_config) if not config_dict: return file_list = [] for config, config_data in sorted(config_dict.items()): try: authfile_path = config_structure.get_authfile( config_parser.parse(config_data) ) file_list.append({ "name": config, "data": config_data, "is_authfile": False }) if authfile_path: content = booth_conf.read_authfile(reporter, authfile_path) if not content: continue file_list.append({ "name": os.path.basename(authfile_path), "data": base64.b64encode(content).decode("utf-8"), "is_authfile": True }) except LibraryError: reporter.process(reports.booth_skipping_config( config, "unable to parse config" )) data = [("data_json", json.dumps(file_list))] if rewrite_existing: data.append(("rewrite_existing", "1")) reporter.process(reports.booth_sending_local_configs_to_node(node.label)) try: response = json.loads(communicator.call_node( node, "remote/booth_save_files", NodeCommunicator.format_data_dict(data) )) report_list = [] for file in response["existing"]: report_list.append(lib_reports.file_already_exists( None, file, Severities.WARNING if rewrite_existing else Severities.ERROR, ( None if rewrite_existing else report_codes.FORCE_FILE_OVERWRITE ), node.label )) for file, reason in response["failed"].items(): report_list.append(reports.booth_config_not_saved( node.label, reason, file )) reporter.process_list(report_list) reporter.process( reports.booth_config_saved(node.label, response["saved"]) ) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) except (KeyError, ValueError): raise LibraryError(lib_reports.invalid_response_format(node.label))
def send_all_config_to_node( communicator, reporter, node, rewrite_existing=False, skip_wrong_config=False ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor node -- NodeAddress rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ config_dict = booth_conf.read_configs(reporter, skip_wrong_config) if not config_dict: return reporter.process(reports.booth_config_distribution_started()) file_list = [] for config, config_data in sorted(config_dict.items()): try: authfile_path = config_structure.get_authfile( config_parser.parse(config_data) ) file_list.append({ "name": config, "data": config_data, "is_authfile": False }) if authfile_path: content = booth_conf.read_authfile(reporter, authfile_path) if not content: continue file_list.append({ "name": os.path.basename(authfile_path), "data": base64.b64encode(content).decode("utf-8"), "is_authfile": True }) except LibraryError: reporter.process(reports.booth_skipping_config( config, "unable to parse config" )) data = [("data_json", json.dumps(file_list))] if rewrite_existing: data.append(("rewrite_existing", "1")) try: response = json.loads(communicator.call_node( node, "remote/booth_save_files", NodeCommunicator.format_data_dict(data) )) report_list = [] for file in response["existing"]: report_list.append(lib_reports.file_already_exists( None, file, Severities.WARNING if rewrite_existing else Severities.ERROR, ( None if rewrite_existing else report_codes.FORCE_FILE_OVERWRITE ), node.label )) for file, reason in response["failed"].items(): report_list.append(reports.booth_config_distribution_node_error( node.label, reason, file )) reporter.process_list(report_list) reporter.process( reports.booth_config_accepted_by_node(node.label, response["saved"]) ) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) except (KeyError, ValueError): raise LibraryError(lib_reports.invalid_response_format(node.label))
def _add_device_model_net( lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes ): """ setup cluster nodes for using qdevice model net string qnetd_host address of qdevice provider (qnetd host) string cluster_name name of the cluster to which qdevice is being added NodeAddressesList cluster_nodes list of cluster nodes addresses bool skip_offline_nodes continue even if not all nodes are accessible """ communicator = lib_env.node_communicator() runner = lib_env.cmd_runner() reporter = lib_env.report_processor reporter.process( reports.qdevice_certificate_distribution_started() ) # get qnetd CA certificate try: qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate( communicator, qnetd_host ) except NodeCommunicationException as e: raise LibraryError( node_communicator_exception_to_report_item(e) ) # init certificate storage on all nodes parallel_nodes_communication_helper( qdevice_net.remote_client_setup, [ ((communicator, node, qnetd_ca_cert), {}) for node in cluster_nodes ], reporter, skip_offline_nodes ) # create client certificate request cert_request = qdevice_net.client_generate_certificate_request( runner, cluster_name ) # sign the request on qnetd host try: signed_certificate = qdevice_net.remote_sign_certificate_request( communicator, qnetd_host, cert_request, cluster_name ) except NodeCommunicationException as e: raise LibraryError( node_communicator_exception_to_report_item(e) ) # transform the signed certificate to pk12 format which can sent to nodes pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate) # distribute final certificate to nodes def do_and_report(reporter, communicator, node, pk12): qdevice_net.remote_client_import_certificate_and_key( communicator, node, pk12 ) reporter.process( reports.qdevice_certificate_accepted_by_node(node.label) ) parallel_nodes_communication_helper( do_and_report, [ ((reporter, communicator, node, pk12), {}) for node in cluster_nodes ], reporter, skip_offline_nodes )