def response_items_to_result(response_items, expected_keys, node_label): """ Check format of response_items and return dict where keys are transformed to Result. E.g. {"file1": {"code": "success", "message": ""}} -> {"file1": Result("success", "")}} dict resposne_items has item name as key and dict with result as value. list expected_keys contains expected keys in a dict main_response[main_key] string node_label is a node label for reporting an invalid format """ if set(expected_keys) != set(response_items.keys()): raise LibraryError(reports.invalid_response_format(node_label)) for result in response_items.values(): if( not isinstance(result, dict) or "code" not in result or "message" not in result ): raise LibraryError(reports.invalid_response_format(node_label)) return dict([ ( file_key, Result(raw_result["code"], raw_result["message"]) ) for file_key, raw_result in response_items.items() ])
def _process_response(self, response): # do not send outside any report, just append them into specified list report = self._get_response_report(response) if report: self._report_items.append(report) return target = response.request.target data = None try: data = json.loads(response.data) except ValueError: self._report_items.append( reports.invalid_response_format(target.label) ) return is_in_expected_format = ( #node_available is a mandatory field isinstance(data, dict) and "node_available" in data ) if not is_in_expected_format: self._report_items.append( reports.invalid_response_format(target.label) ) return self._check_response(data, self._report_items, target.label)
def pull_config(env, node_name): """ Get config from specified node and save it on local system. It will rewrite existing files. env -- LibraryEnvironment node_name -- string, name of node from which config should be fetched """ name = env.booth.name env.report_processor.process( booth_reports.booth_fetching_config_from_node_started(node_name, name)) com_cmd = BoothGetConfig(env.report_processor, name) com_cmd.set_targets( [env.get_node_target_factory().get_target_from_hostname(node_name)]) # pylint: disable=unsubscriptable-object # In general, pylint is right. And it cannot know in this case code is OK. # It is covered by tests. output = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: env.booth.create_config(output["config"]["data"], True) if (output["authfile"]["name"] is not None and output["authfile"]["data"]): env.booth.set_key_path( os.path.join(settings.booth_config_dir, output["authfile"]["name"])) env.booth.create_key( base64.b64decode(output["authfile"]["data"].encode("utf-8")), True) env.report_processor.process( booth_reports.booth_config_accepted_by_node(name_list=[name])) except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def _host_check_remote_node(host_info_dict): # Version of services may not be the same across the existing cluster # nodes, so it's not easy to make this check properly. report_list = [] required_service_list = ["pacemaker_remote"] required_as_stopped_service_list = (required_service_list + ["pacemaker", "corosync"]) for host_name, host_info in host_info_dict.items(): try: services = host_info["services"] missing_service_list = [ service for service in required_service_list if not services[service]["installed"] ] if missing_service_list: report_list.append( reports.service_not_installed(host_name, missing_service_list)) cannot_be_running_service_list = [ service for service in required_as_stopped_service_list if service in services and services[service]["running"] ] if cannot_be_running_service_list: report_list.append( reports.host_already_in_cluster_services( host_name, cannot_be_running_service_list, )) if host_info["cluster_configuration_exists"]: report_list.append( reports.host_already_in_cluster_config(host_name)) except (KeyError, TypeError): report_list.append(reports.invalid_response_format(host_name)) return report_list
def _process_response(self, response): report = self._get_response_report(response) if report: self._report(report) return results = None target = response.request.target try: results = json.loads(response.data) except ValueError: self._report(reports.invalid_response_format(target.label)) return results = node_communication_format.response_to_result( results, self._response_key, list(self._action_definition.keys()), target.label) for key, item_response in sorted(results.items()): if self._is_success(item_response): #only success process individually report = self._success_report(target.label, key) else: report = self._failure_report( target.label, key, node_communication_format.get_format_result( self._code_message_map)(item_response), **self._action_error_force) self._report(report)
def _process_response(self, response): report = response_to_report_item(response, severity=ReportItemSeverity.WARNING) node = response.request.target.label if report is not None: self.__has_failures = True self._report(report) return self._get_next_list() try: output = json.loads(response.data) if output["code"] == "reloaded": self.__was_successful = True self._report(reports.corosync_config_reloaded(node)) return if output["code"] == "not_running": self._report(reports.corosync_config_reload_not_possible(node)) else: self.__has_failures = True self._report( reports.corosync_config_reload_error( output["message"], node=node, severity=ReportItemSeverity.WARNING, )) except (ValueError, LookupError): self._report( reports.invalid_response_format( node, severity=ReportItemSeverity.WARNING, )) return self._get_next_list()
def pull_config_from_node(communicator, node, name): """ Get config of specified booth instance and its authfile if there is one from 'node'. It returns dictionary with format: { "config": { "name": <file name of config>, "data": <content of file> }, "authfile": { "name": <file name of authfile, None if it doesn't exist>, "data": <base64 coded content of authfile> } communicator -- NodeCommunicator node -- NodeAddresses name -- name of booth instance """ try: return json.loads(communicator.call_node( node, "remote/booth_get_config", NodeCommunicator.format_data_dict([("name", name)]) )) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) except ValueError: raise LibraryError(lib_reports.invalid_response_format(node.label))
def _process_response(self, response): report = response_to_report_item(response) target = response.request.target if report is None: try: parsed_response = json.loads(response.data) # If the node is offline, we only get the "offline" key. Asking # for any other in that case results in KeyError which is not # what we want. if ( parsed_response.get("pending", True) or not parsed_response.get("online", False) ): self._not_yet_started_target_list.append(target) return report = reports.cluster_start_success(target.label) except (json.JSONDecodeError, KeyError): report = reports.invalid_response_format(target.label) else: if not response.was_connected: self._not_yet_started_target_list.append(target) report = response_to_report_item( response, severity=ReportItemSeverity.WARNING ) self._report(report)
def _process_response(self, response): report = response_to_report_item(response) if report: self._report(report) return report_list = [] node_label = response.request.target.label try: data = json.loads(response.data) if not data["sbd"]["installed"]: report_list.append(reports.sbd_not_installed(node_label)) if not data["watchdog"]["exist"]: report_list.append( reports.watchdog_not_found(node_label, data["watchdog"]["path"])) for device in data.get("device_list", []): if not device["exist"]: report_list.append( reports.sbd_device_does_not_exist( device["path"], node_label)) elif not device["block_device"]: report_list.append( reports.sbd_device_is_not_block_device( device["path"], node_label)) # TODO maybe we can check whenever device is initialized by sbd (by # running 'sbd -d <dev> dump;') except (ValueError, KeyError, TypeError): report_list.append(reports.invalid_response_format(node_label)) if report_list: self._report_list(report_list) else: self._report( reports.sbd_check_success(response.request.target.label))
def _process_response(self, response): report = self._get_response_report(response) if report is not None: self._report(report) return target = response.request.target try: parsed_data = json.loads(response.data) self._report( reports_booth.booth_config_accepted_by_node( target.label, list(parsed_data["saved"]))) for filename in list(parsed_data["existing"]): self._report( reports.file_already_exists( "", # TODO specify file type; this will be overhauled to # a generic file transport framework anyway filename, severity=(ReportItemSeverity.WARNING if self._rewrite_existing else ReportItemSeverity.ERROR), forceable=(None if self._rewrite_existing else report_codes.FORCE_FILE_OVERWRITE), node=target.label, )) for file, reason in dict(parsed_data["failed"]).items(): self._report( reports_booth.booth_config_distribution_node_error( target.label, reason, file)) except (KeyError, TypeError, ValueError): self._report(reports.invalid_response_format(target.label))
def pull_config(env, node_name, name): """ Get config from specified node and save it on local system. It will rewrite existing files. env -- LibraryEnvironment node_name -- string, name of node from which config should be fetched name -- string, name of booth instance of which config should be fetched """ env.report_processor.process( booth_reports.booth_fetching_config_from_node_started(node_name, name) ) output = sync.pull_config_from_node( env.node_communicator(), NodeAddresses(node_name), name ) try: env.booth.create_config(output["config"]["data"], True) if ( output["authfile"]["name"] is not None and output["authfile"]["data"] ): env.booth.set_key_path(os.path.join( settings.booth_config_dir, output["authfile"]["name"] )) env.booth.create_key( base64.b64decode( output["authfile"]["data"].encode("utf-8") ), True ) env.report_processor.process( booth_reports.booth_config_accepted_by_node(name_list=[name]) ) except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def _process_response(self, response): report = self._get_response_report(response) if report: self._report(report) return results = None target = response.request.target try: results = json.loads(response.data) except ValueError: self._report(reports.invalid_response_format(target.label)) return results = node_communication_format.response_to_result( results, self._response_key, list(self._action_definition.keys()), target.label ) for key, item_response in sorted(results.items()): if self._is_success(item_response): #only success process individually report = self._success_report( target.label, self._action_key_to_report(key), ) else: report = self._failure_report( target.label, self._action_key_to_report(key), node_communication_format.get_format_result( self._code_message_map )(item_response), **self._action_error_force ) self._report(report)
def check_sbd_on_node(report_processor, node_communicator, node, watchdog): """ Check if SBD can be enabled on specified 'node'. Raises LibraryError if check fails. Raises NodeCommunicationException if there is communication issue. report_processor -- node_communicator -- NodeCommunicator node -- NodeAddresses watchdog -- watchdog path """ report_list = [] try: data = json.loads(check_sbd(node_communicator, node, watchdog)) if not data["sbd"]["installed"]: report_list.append(reports.sbd_not_installed(node.label)) if not data["watchdog"]["exist"]: report_list.append(reports.watchdog_not_found( node.label, watchdog)) except (ValueError, KeyError): raise LibraryError(reports.invalid_response_format(node.label)) if report_list: raise LibraryError(*report_list) report_processor.process(reports.sbd_check_success(node.label))
def pull_config(env, node_name, name): """ Get config from specified node and save it on local system. It will rewrite existing files. env -- LibraryEnvironment node_name -- string, name of node from which config should be fetched name -- string, name of booth instance of which config should be fetched """ env.report_processor.process( booth_reports.booth_fetching_config_from_node_started(node_name, name)) output = sync.pull_config_from_node(env.node_communicator(), NodeAddresses(node_name), name) try: env.booth.create_config(output["config"]["data"], True) if (output["authfile"]["name"] is not None and output["authfile"]["data"]): env.booth.set_key_path( os.path.join(settings.booth_config_dir, output["authfile"]["name"])) env.booth.create_key( base64.b64decode(output["authfile"]["data"].encode("utf-8")), True) env.report_processor.process( booth_reports.booth_config_accepted_by_node(name_list=[name])) except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def unpack_items_from_response(main_response, main_key, node_label): """ Check format of main_response and return main_response[main_key]. dict main_response has on the key 'main_key' dict with item name as key and dict with result as value. E.g. { "files": { "file1": {"code": "success", "message": ""} } } string main_key is name of key under that is a dict with results string node_label is a node label for reporting an invalid format """ is_in_expected_format = ( isinstance(main_response, dict) and main_key in main_response and isinstance(main_response[main_key], dict) ) if not is_in_expected_format: raise LibraryError(reports.invalid_response_format(node_label)) return main_response[main_key]
def check_can_add_node_to_cluster(node_communicator, node, report_items): try: availability_info = json.loads(node_communicator.call_node( node, "remote/node_available", data=None )) if availability_info["node_available"]: return if availability_info.get("pacemaker_remote", False): report_items.append(reports.cannot_add_node_is_running_service( node.label, "pacemaker_remote" )) return report_items.append(reports.cannot_add_node_is_in_cluster(node.label)) except NodeCommunicationException as e: report_items.append( node_communicator_exception_to_report_item( e, ReportItemSeverity.ERROR, ) ) except(ValueError, TypeError, KeyError): report_items.append(reports.invalid_response_format(node.label))
def _process_response(self, response): report = response_to_report_item( response, severity=ReportItemSeverity.WARNING ) node = response.request.target.label if report is not None: self.__has_failures = True self._report(report) return self._get_next_list() try: output = json.loads(response.data) if output["code"] == "reloaded": self.__was_successful = True self._report(reports.corosync_config_reloaded(node)) return [] if output["code"] == "not_running": self._report(reports.corosync_config_reload_not_possible(node)) else: self.__has_failures = True self._report(reports.corosync_config_reload_error( output["message"], node=node, severity=ReportItemSeverity.WARNING, )) except (ValueError, LookupError): self.__has_failures = True self._report(reports.invalid_response_format( node, severity=ReportItemSeverity.WARNING, )) return self._get_next_list()
def _process_response(self, response): report = self._get_response_report(response) if report: self._report(report) return host_name = response.request.target.label try: self._responses[host_name] = json.loads(response.data) except json.JSONDecodeError: self._report(reports.invalid_response_format(host_name))
def _process_response(self, response): report = self._get_response_report(response) if report is not None: self._report(report) return target = response.request.target try: self._output_data.append((target, base64.b64decode(response.data))) except (TypeError, binascii.Error): self._report(reports.invalid_response_format(target.label))
def _process_response(self, response): report = self._get_response_report(response) if report is not None: self._report(report) return target = response.request.target try: self._data.append((target, json.loads(response.data))) except ValueError: self._report(reports.invalid_response_format(target.label))
def remote_qdevice_get_ca_certificate(node_communicator, host): """ connect to a qnetd host and get qnetd CA certificate string host address of the qnetd host """ try: return base64.b64decode( node_communicator.call_host( host, "remote/qdevice_net_get_ca_certificate", None)) except (TypeError, binascii.Error): raise LibraryError(reports.invalid_response_format(host))
def remote_qdevice_get_ca_certificate(node_communicator, host): """ connect to a qnetd host and get qnetd CA certificate string host address of the qnetd host """ try: return base64.b64decode( node_communicator.call_host( host, "remote/qdevice_net_get_ca_certificate", None ) ) except (TypeError, binascii.Error): raise LibraryError(reports.invalid_response_format(host))
def config_text(env, name, node_name=None): """ get configuration in raw format string name -- name of booth instance whose config should be returned string node_name -- get the config from specified node or local host if None """ if node_name is None: # TODO add name support return env.booth.get_config_content() remote_data = sync.pull_config_from_node(env.node_communicator(), NodeAddresses(node_name), name) try: return remote_data["config"]["data"] except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def pull_config(env, node_name, instance_name=None): """ Get config from specified node and save it on local system. It will rewrite existing files. LibraryEnvironment env string node_name -- name of the node from which the config should be fetched string instance_name -- booth instance name """ report_processor = SimpleReportProcessor(env.report_processor) booth_env = env.get_booth_env(instance_name) instance_name = booth_env.instance_name _ensure_live_env(env, booth_env) env.report_processor.process( booth_reports.booth_fetching_config_from_node_started( node_name, instance_name)) com_cmd = BoothGetConfig(env.report_processor, instance_name) com_cmd.set_targets( [env.get_node_target_factory().get_target_from_hostname(node_name)]) # pylint: disable=unsubscriptable-object # In general, pylint is right. And it cannot know in this case code is OK. # It is covered by tests. output = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: # TODO adapt to new file transfer framework once it is written if (output["authfile"]["name"] is not None and output["authfile"]["data"]): authfile_name = output["authfile"]["name"] report_list = config_validators.check_instance_name(authfile_name) if report_list: raise LibraryError(*report_list) booth_key = FileInstance.for_booth_key(authfile_name) booth_key.write_raw(base64.b64decode( output["authfile"]["data"].encode("utf-8")), can_overwrite=True) booth_env.config.write_raw(output["config"]["data"].encode("utf-8"), can_overwrite=True) env.report_processor.process( booth_reports.booth_config_accepted_by_node( name_list=[instance_name])) except RawFileError as e: report_processor.report(raw_file_error_report(e)) except KeyError: raise LibraryError(reports.invalid_response_format(node_name)) if report_processor.has_errors: raise LibraryError()
def config_text(env, name, node_name=None): """ get configuration in raw format string name -- name of booth instance whose config should be returned string node_name -- get the config from specified node or local host if None """ if node_name is None: # TODO add name support return env.booth.get_config_content() remote_data = sync.pull_config_from_node( env.node_communicator(), NodeAddresses(node_name), name ) try: return remote_data["config"]["data"] except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def _process_response(self, response): report = self._get_response_report(response) if report is not None: self._report(report) return node_label = response.request.target.label try: output = json.loads(response.data) if output["code"] != "success": self._report( reports.node_remove_in_pacemaker_failed( self._nodes_to_remove, node=node_label, reason=output["message"], )) except (KeyError, json.JSONDecodeError): self._report(reports.invalid_response_format(node_label))
def config_text(env, name, node_name=None): """ get configuration in raw format string name -- name of booth instance whose config should be returned string node_name -- get the config from specified node or local host if None """ if node_name is None: # TODO add name support return env.booth.get_config_content() com_cmd = BoothGetConfig(env.report_processor, name) com_cmd.set_targets( [env.get_node_target_factory().get_target_from_hostname(node_name)]) remote_data = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: return remote_data["config"]["data"] except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def _process_response(self, response): report = self._get_response_report(response) if report is not None: self._report(report) return node_label = response.request.target.label try: output = json.loads(response.data) if output["code"] != "success": self._report( reports.node_remove_in_pacemaker_failed( self._nodes_to_remove, node=node_label, reason=output["message"], ) ) except (KeyError, json.JSONDecodeError): self._report(reports.invalid_response_format(node_label))
def remote_sign_certificate_request(node_communicator, host, cert_request, cluster_name): """ connect to a qdevice host and sign node certificate there string host address of the qnetd host cert_request certificate request to be signed string cluster_name name of the cluster to which qdevice is being added """ try: return base64.b64decode( node_communicator.call_host( host, "remote/qdevice_net_sign_node_certificate", external.NodeCommunicator.format_data_dict([ ("certificate_request", base64.b64encode(cert_request)), ("cluster_name", cluster_name), ]))) except (TypeError, binascii.Error): raise LibraryError(reports.invalid_response_format(host))
def _process_response(self, response): report = response_to_report_item(response) if report: self._report(report) return report_list = [] node_label = response.request.target.label try: data = json.loads(response.data) if not data["sbd"]["installed"]: report_list.append(reports.sbd_not_installed(node_label)) if "watchdog" in data: if data["watchdog"]["exist"]: if not data["watchdog"].get("is_supported", True): report_list.append(reports.sbd_watchdog_not_supported( node_label, data["watchdog"]["path"] )) else: report_list.append(reports.watchdog_not_found( node_label, data["watchdog"]["path"] )) for device in data.get("device_list", []): if not device["exist"]: report_list.append(reports.sbd_device_does_not_exist( device["path"], node_label )) elif not device["block_device"]: report_list.append(reports.sbd_device_is_not_block_device( device["path"], node_label )) # TODO maybe we can check whenever device is initialized by sbd # (by running 'sbd -d <dev> dump;') except (ValueError, KeyError, TypeError): report_list.append(reports.invalid_response_format(node_label)) if report_list: self._report_list(report_list) else: self._report( reports.sbd_check_success(response.request.target.label) )
def pull_config(env, node_name): """ Get config from specified node and save it on local system. It will rewrite existing files. env -- LibraryEnvironment node_name -- string, name of node from which config should be fetched """ name = env.booth.name env.report_processor.process( booth_reports.booth_fetching_config_from_node_started(node_name, name) ) com_cmd = BoothGetConfig(env.report_processor, name) com_cmd.set_targets([ env.get_node_target_factory().get_target_from_hostname(node_name) ]) # pylint: disable=unsubscriptable-object # In general, pylint is right. And it cannot know in this case code is OK. # It is covered by tests. output = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: env.booth.create_config(output["config"]["data"], True) if ( output["authfile"]["name"] is not None and output["authfile"]["data"] ): env.booth.set_key_path(os.path.join( settings.booth_config_dir, output["authfile"]["name"] )) env.booth.create_key( base64.b64decode( output["authfile"]["data"].encode("utf-8") ), True ) env.report_processor.process( booth_reports.booth_config_accepted_by_node(name_list=[name]) ) except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def _process_response(self, response): report = response_to_report_item(response, severity=ReportItemSeverity.WARNING) if report is not None: self._report(report) return self._get_next_list() node = response.request.target.label try: output = json.loads(response.data) if output["status"] == "success": self._was_successful = True self._cluster_status = output["data"] return [] if output["status_msg"]: self._report( reports.node_communication_command_unsuccessful( node, response.request.action, output["status_msg"])) # TODO Node name should be added to each received report item and # those modified report itemss should be reported. That, however, # requires reports overhaul which would add posibility to add a # node name to any report item. Also, infos and warnings should not # be ignored. if output["report_list"]: for report_data in output["report_list"]: if (report_data["severity"] == ReportItemSeverity.ERROR and report_data["report_text"]): self._report( reports.node_communication_command_unsuccessful( node, response.request.action, report_data["report_text"])) except (ValueError, LookupError, TypeError): self._report( reports.invalid_response_format( node, severity=ReportItemSeverity.WARNING, )) return self._get_next_list()
def _process_response(self, response): report = response_to_report_item(response) target = response.request.target if report is None: try: parsed_response = json.loads(response.data) # If the node is offline, we only get the "offline" key. Asking # for any other in that case results in KeyError which is not # what we want. if (parsed_response.get("pending", True) or not parsed_response.get("online", False)): self._not_yet_started_target_list.append(target) return report = reports.cluster_start_success(target.label) except (json.JSONDecodeError, KeyError): report = reports.invalid_response_format(target.label) else: if not response.was_connected: self._not_yet_started_target_list.append(target) report = response_to_report_item( response, severity=ReportItemSeverity.WARNING) self._report(report)
def remote_sign_certificate_request( node_communicator, host, cert_request, cluster_name ): """ connect to a qdevice host and sign node certificate there string host address of the qnetd host cert_request certificate request to be signed string cluster_name name of the cluster to which qdevice is being added """ try: return base64.b64decode( node_communicator.call_host( host, "remote/qdevice_net_sign_node_certificate", external.NodeCommunicator.format_data_dict([ ("certificate_request", base64.b64encode(cert_request)), ("cluster_name", cluster_name), ]) ) ) except (TypeError, binascii.Error): raise LibraryError(reports.invalid_response_format(host))
def check_sbd_on_node(report_processor, node_communicator, node, watchdog, device_list): """ Check if SBD can be enabled on specified 'node'. Raises LibraryError if check fails. Raises NodeCommunicationException if there is communication issue. report_processor -- node_communicator -- NodeCommunicator node -- NodeAddresses watchdog -- watchdog path device_list -- list of strings """ report_list = [] try: data = json.loads( check_sbd(node_communicator, node, watchdog, device_list)) if not data["sbd"]["installed"]: report_list.append(reports.sbd_not_installed(node.label)) if not data["watchdog"]["exist"]: report_list.append(reports.watchdog_not_found( node.label, watchdog)) for device in data.get("device_list", []): if not device["exist"]: report_list.append( reports.sbd_device_does_not_exist(device["path"], node.label)) elif not device["block_device"]: report_list.append( reports.sbd_device_is_not_block_device( device["path"], node.label)) # TODO maybe we can check whenever device is initialized by sbd (by # running 'sbd -d <dev> dump;') except (ValueError, KeyError, TypeError): raise LibraryError(reports.invalid_response_format(node.label)) if report_list: raise LibraryError(*report_list) report_processor.process(reports.sbd_check_success(node.label))
def config_text(env, node_name=None): """ get configuration in raw format string node_name -- get the config from specified node or local host if None """ if node_name is None: # TODO add name support return env.booth.get_config_content() name = env.booth.name com_cmd = BoothGetConfig(env.report_processor, name) com_cmd.set_targets( [env.get_node_target_factory().get_target_from_hostname(node_name)]) # pylint: disable=unsubscriptable-object # In general, pylint is right. And it cannot know in this case code is OK. # It is covered by tests. remote_data = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: return remote_data["config"]["data"] except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def config_text(env: LibraryEnvironment, instance_name=None, node_name=None): """ get configuration in raw format env string instance_name -- booth instance name string node_name -- get the config from specified node or local host if None """ report_processor = env.report_processor booth_env = env.get_booth_env(instance_name) instance_name = booth_env.instance_name # It does not make any sense for the cli to read a ghost file and send it # to lib so that the lib could return it unchanged to cli. Just use 'cat'. # When node_name is specified, using ghost files doesn't make any sense # either. _ensure_live_env(env, booth_env) if node_name is None: try: return booth_env.config.read_raw() except RawFileError as e: report_processor.report(raw_file_error_report(e)) if report_processor.has_errors: raise LibraryError() com_cmd = BoothGetConfig(env.report_processor, instance_name) com_cmd.set_targets( [env.get_node_target_factory().get_target_from_hostname(node_name)]) # pylint: disable=unsubscriptable-object # In general, pylint is right. And it cannot know in this case code is OK. # It is covered by tests. remote_data = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: # TODO switch to new file transfer commands (not implemented yet) # which send and receive configs as bytes instead of strings return remote_data["config"]["data"].encode("utf-8") except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def config_text(env, node_name=None): """ get configuration in raw format string node_name -- get the config from specified node or local host if None """ if node_name is None: # TODO add name support return env.booth.get_config_content() name = env.booth.name com_cmd = BoothGetConfig(env.report_processor, name) com_cmd.set_targets([ env.get_node_target_factory().get_target_from_hostname(node_name) ]) # pylint: disable=unsubscriptable-object # In general, pylint is right. And it cannot know in this case code is OK. # It is covered by tests. remote_data = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: return remote_data["config"]["data"] except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def _host_check_remote_node(host_info_dict): # Version of services may not be the same across the existing cluster # nodes, so it's not easy to make this check properly. report_list = [] required_service_list = ["pacemaker_remote"] required_as_stopped_service_list = ( required_service_list + ["pacemaker", "corosync"] ) for host_name, host_info in host_info_dict.items(): try: services = host_info["services"] missing_service_list = [ service for service in required_service_list if not services[service]["installed"] ] if missing_service_list: report_list.append(reports.service_not_installed( host_name, missing_service_list )) cannot_be_running_service_list = [ service for service in required_as_stopped_service_list if service in services and services[service]["running"] ] if cannot_be_running_service_list: report_list.append( reports.host_already_in_cluster_services( host_name, cannot_be_running_service_list, ) ) if host_info["cluster_configuration_exists"]: report_list.append( reports.host_already_in_cluster_config(host_name) ) except (KeyError, TypeError): report_list.append(reports.invalid_response_format(host_name)) return report_list
def check_sbd_on_node(report_processor, node_communicator, node, watchdog): """ Check if SBD can be enabled on specified 'node'. Raises LibraryError if check fails. Raises NodeCommunicationException if there is communication issue. report_processor -- node_communicator -- NodeCommunicator node -- NodeAddresses watchdog -- watchdog path """ report_list = [] try: data = json.loads(check_sbd(node_communicator, node, watchdog)) if not data["sbd"]["installed"]: report_list.append(reports.sbd_not_installed(node.label)) if not data["watchdog"]["exist"]: report_list.append(reports.watchdog_not_found(node.label, watchdog)) except (ValueError, KeyError): raise LibraryError(reports.invalid_response_format(node.label)) if report_list: raise LibraryError(*report_list) report_processor.process(reports.sbd_check_success(node.label))
def _host_check_cluster_setup(host_info_dict, force, check_services_versions=True): report_list = [] # We only care about services which matter for creating a cluster. It does # not make sense to check e.g. booth when a) it will never be used b) it # will be used in a year - which means we should do the check in a year. service_version_dict = { "pacemaker": {}, "corosync": {}, "pcsd": {}, } required_service_list = ["pacemaker", "corosync"] required_as_stopped_service_list = (required_service_list + ["pacemaker_remote"]) report_severity = (ReportItemSeverity.ERROR if not force else ReportItemSeverity.WARNING) cluster_exists_on_nodes = False for host_name, host_info in host_info_dict.items(): try: services = host_info["services"] if check_services_versions: for service, version_dict in service_version_dict.items(): version_dict[host_name] = services[service]["version"] missing_service_list = [ service for service in required_service_list if not services[service]["installed"] ] if missing_service_list: report_list.append( reports.service_not_installed(host_name, missing_service_list)) cannot_be_running_service_list = [ service for service in required_as_stopped_service_list if service in services and services[service]["running"] ] if cannot_be_running_service_list: cluster_exists_on_nodes = True report_list.append( reports.host_already_in_cluster_services( host_name, cannot_be_running_service_list, severity=report_severity, )) if host_info["cluster_configuration_exists"]: cluster_exists_on_nodes = True report_list.append( reports.host_already_in_cluster_config( host_name, severity=report_severity, )) except KeyError: report_list.append(reports.invalid_response_format(host_name)) if check_services_versions: for service, version_dict in service_version_dict.items(): report_list.extend( _check_for_not_matching_service_versions( service, version_dict)) if cluster_exists_on_nodes and not force: report_list.append(reports.cluster_will_be_destroyed()) return report_list
def send_all_config_to_node( communicator, reporter, node, rewrite_existing=False, skip_wrong_config=False ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor node -- NodeAddress rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ config_dict = booth_conf.read_configs(reporter, skip_wrong_config) if not config_dict: return file_list = [] for config, config_data in sorted(config_dict.items()): try: authfile_path = config_structure.get_authfile( config_parser.parse(config_data) ) file_list.append({ "name": config, "data": config_data, "is_authfile": False }) if authfile_path: content = booth_conf.read_authfile(reporter, authfile_path) if not content: continue file_list.append({ "name": os.path.basename(authfile_path), "data": base64.b64encode(content).decode("utf-8"), "is_authfile": True }) except LibraryError: reporter.process(reports.booth_skipping_config( config, "unable to parse config" )) data = [("data_json", json.dumps(file_list))] if rewrite_existing: data.append(("rewrite_existing", "1")) reporter.process(reports.booth_sending_local_configs_to_node(node.label)) try: response = json.loads(communicator.call_node( node, "remote/booth_save_files", NodeCommunicator.format_data_dict(data) )) report_list = [] for file in response["existing"]: report_list.append(lib_reports.file_already_exists( None, file, Severities.WARNING if rewrite_existing else Severities.ERROR, ( None if rewrite_existing else report_codes.FORCE_FILE_OVERWRITE ), node.label )) for file, reason in response["failed"].items(): report_list.append(reports.booth_config_not_saved( node.label, reason, file )) reporter.process_list(report_list) reporter.process( reports.booth_config_saved(node.label, response["saved"]) ) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) except (KeyError, ValueError): raise LibraryError(lib_reports.invalid_response_format(node.label))
def send_all_config_to_node( communicator, reporter, target, rewrite_existing=False, skip_wrong_config=False ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor node -- NodeAddress rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ config_dict = booth_conf.read_configs(reporter, skip_wrong_config) if not config_dict: return reporter.process(reports.booth_config_distribution_started()) file_list = [] for config, config_data in sorted(config_dict.items()): try: authfile_path = config_structure.get_authfile( config_parser.parse(config_data) ) file_list.append({ "name": config, "data": config_data, "is_authfile": False }) if authfile_path: content = booth_conf.read_authfile(reporter, authfile_path) if not content: continue file_list.append({ "name": os.path.basename(authfile_path), "data": base64.b64encode(content).decode("utf-8"), "is_authfile": True }) except LibraryError: reporter.process(reports.booth_skipping_config( config, "unable to parse config" )) com_cmd = BoothSaveFiles( reporter, file_list, rewrite_existing=rewrite_existing ) com_cmd.set_targets([target]) response = run_and_raise(communicator, com_cmd)[0][1] try: report_list = [] for file in response["existing"]: report_list.append(lib_reports.file_already_exists( None, file, Severities.WARNING if rewrite_existing else Severities.ERROR, ( None if rewrite_existing else report_codes.FORCE_FILE_OVERWRITE ), target.label )) for file, reason in response["failed"].items(): report_list.append(reports.booth_config_distribution_node_error( target.label, reason, file )) reporter.process_list(report_list) reporter.process( reports.booth_config_accepted_by_node(target.label, response["saved"]) ) except (KeyError, ValueError): raise LibraryError(lib_reports.invalid_response_format(target.label))
def send_all_config_to_node( communicator, reporter, node, rewrite_existing=False, skip_wrong_config=False ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor node -- NodeAddress rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ config_dict = booth_conf.read_configs(reporter, skip_wrong_config) if not config_dict: return reporter.process(reports.booth_config_distribution_started()) file_list = [] for config, config_data in sorted(config_dict.items()): try: authfile_path = config_structure.get_authfile( config_parser.parse(config_data) ) file_list.append({ "name": config, "data": config_data, "is_authfile": False }) if authfile_path: content = booth_conf.read_authfile(reporter, authfile_path) if not content: continue file_list.append({ "name": os.path.basename(authfile_path), "data": base64.b64encode(content).decode("utf-8"), "is_authfile": True }) except LibraryError: reporter.process(reports.booth_skipping_config( config, "unable to parse config" )) data = [("data_json", json.dumps(file_list))] if rewrite_existing: data.append(("rewrite_existing", "1")) try: response = json.loads(communicator.call_node( node, "remote/booth_save_files", NodeCommunicator.format_data_dict(data) )) report_list = [] for file in response["existing"]: report_list.append(lib_reports.file_already_exists( None, file, Severities.WARNING if rewrite_existing else Severities.ERROR, ( None if rewrite_existing else report_codes.FORCE_FILE_OVERWRITE ), node.label )) for file, reason in response["failed"].items(): report_list.append(reports.booth_config_distribution_node_error( node.label, reason, file )) reporter.process_list(report_list) reporter.process( reports.booth_config_accepted_by_node(node.label, response["saved"]) ) except NodeCommunicationException as e: raise LibraryError(node_communicator_exception_to_report_item(e)) except (KeyError, ValueError): raise LibraryError(lib_reports.invalid_response_format(node.label))