def _get_online_nodes(lib_env, node_list, ignore_offline_nodes=False): """ Returns NodeAddressesList of online nodes. Raises LibraryError on any failure. lib_env -- LibraryEnvironment node_list -- NodeAddressesList ignore_offline_nodes -- if True offline nodes are just omitted from returned list. """ to_raise = [] online_node_list = NodeAddressesList() def is_node_online(node): try: nodes_task.node_check_auth(lib_env.node_communicator(), node) online_node_list.append(node) except NodeConnectionException as e: if ignore_offline_nodes: to_raise.append(reports.omitting_node(node.label)) else: to_raise.append( node_communicator_exception_to_report_item( e, Severities.ERROR, report_codes.SKIP_OFFLINE_NODES)) except NodeCommunicationException as e: to_raise.append(node_communicator_exception_to_report_item(e)) tools.run_parallel(is_node_online, [([node], {}) for node in node_list]) lib_env.report_processor.process_list(to_raise) return online_node_list
def _get_online_nodes(lib_env, node_list, ignore_offline_nodes=False): """ Returns NodeAddressesList of online nodes. Raises LibraryError on any failure. lib_env -- LibraryEnvironment node_list -- NodeAddressesList ignore_offline_nodes -- if True offline nodes are just omitted from returned list. """ to_raise = [] online_node_list = NodeAddressesList() def is_node_online(node): try: nodes_task.node_check_auth(lib_env.node_communicator(), node) online_node_list.append(node) except NodeConnectionException as e: if ignore_offline_nodes: to_raise.append(reports.omitting_node(node.label)) else: to_raise.append(node_communicator_exception_to_report_item( e, Severities.ERROR, report_codes.SKIP_OFFLINE_NODES )) except NodeCommunicationException as e: to_raise.append(node_communicator_exception_to_report_item(e)) tools.run_parallel(is_node_online, [([node], {}) for node in node_list]) lib_env.report_processor.process_list(to_raise) return online_node_list
def get_cluster_sbd_config(lib_env): """ Returns list of SBD config from all cluster nodes in cluster. Structure of data: [ { "node": <NodeAddress> "config": <sbd_config_dict> or None if there was failure, }, ... ] If error occurs while obtaining config from some node, it's config will be None. If obtaining config fail on all node returns empty dictionary. lib_env -- LibraryEnvironment """ node_list = _get_cluster_nodes(lib_env) config_list = [] successful_node_list = [] report_item_list = [] def get_sbd_config(node): try: config_list.append({ "node": node, "config": environment_file_to_dict( sbd.get_sbd_config(lib_env.node_communicator(), node) ) }) successful_node_list.append(node) except NodeCommandUnsuccessfulException as e: report_item_list.append(reports.unable_to_get_sbd_config( node.label, e.reason, Severities.WARNING )) except NodeCommunicationException as e: report_item_list.append(node_communicator_exception_to_report_item( e, severity=Severities.WARNING )) report_item_list.append(reports.unable_to_get_sbd_config( node.label, "", #reason is in previous report item Severities.WARNING )) tools.run_parallel(get_sbd_config, [([node], {}) for node in node_list]) lib_env.report_processor.process_list(report_item_list) if not len(config_list): return [] for node in node_list: if node not in successful_node_list: config_list.append({ "node": node, "config": None }) return config_list
def get_cluster_sbd_status(lib_env): """ Returns status of SBD service in cluster in dictionary with format: { <NodeAddress>: { "installed": <boolean>, "enabled": <boolean>, "running": <boolean> }, ... } lib_env -- LibraryEnvironment """ node_list = _get_cluster_nodes(lib_env) report_item_list = [] successful_node_list = [] status_list = [] def get_sbd_status(node): try: status_list.append({ "node": node.label, "status": json.loads( # here we just need info about sbd service, # therefore watchdog and device list is empty sbd.check_sbd(lib_env.node_communicator(), node, "", []))["sbd"] }) successful_node_list.append(node) except NodeCommunicationException as e: report_item_list.append( node_communicator_exception_to_report_item( e, severity=Severities.WARNING)) report_item_list.append( reports.unable_to_get_sbd_status( node.label, "", #reason is in previous report item #warning is there implicit )) except (ValueError, KeyError) as e: report_item_list.append( reports.unable_to_get_sbd_status(node.label, str(e))) tools.run_parallel(get_sbd_status, [([node], {}) for node in node_list]) lib_env.report_processor.process_list(report_item_list) for node in node_list: if node not in successful_node_list: status_list.append({ "node": node.label, "status": { "installed": None, "enabled": None, "running": None } }) return status_list
def get_cluster_sbd_status(lib_env): """ Returns status of SBD service in cluster in dictionary with format: { <NodeAddress>: { "installed": <boolean>, "enabled": <boolean>, "running": <boolean> }, ... } lib_env -- LibraryEnvironment """ node_list = _get_cluster_nodes(lib_env) report_item_list = [] successful_node_list = [] status_list = [] def get_sbd_status(node): try: status_list.append({ "node": node, "status": json.loads( sbd.check_sbd(lib_env.node_communicator(), node, "") )["sbd"] }) successful_node_list.append(node) except NodeCommunicationException as e: report_item_list.append(node_communicator_exception_to_report_item( e, severity=Severities.WARNING )) report_item_list.append(reports.unable_to_get_sbd_status( node.label, "", #reason is in previous report item #warning is there implicit )) except (ValueError, KeyError) as e: report_item_list.append(reports.unable_to_get_sbd_status( node.label, str(e) )) tools.run_parallel(get_sbd_status, [([node], {}) for node in node_list]) lib_env.report_processor.process_list(report_item_list) for node in node_list: if node not in successful_node_list: status_list.append({ "node": node, "status": { "installed": None, "enabled": None, "running": None } }) return status_list
def test_parallelism(self): x = 5 data_list = [[[i + 1], {}] for i in range(x)] start_time = time.time() # this should last for least x seconds, but less than sum of all times tools.run_parallel(time.sleep, data_list) finish_time = time.time() elapsed_time = finish_time - start_time self.assertTrue(elapsed_time > x) self.assertTrue(elapsed_time < sum([i + 1 for i in range(x)]))
def _run_parallel_and_raise_lib_error_on_failure(func, param_list): """ Run function func in parallel for all specified parameters in arg_list. Raise LibraryError on any failure. func -- function to be run param_list -- list of tuples: (*args, **kwargs) """ report_list = [] def _parallel(*args, **kwargs): try: func(*args, **kwargs) except NodeCommunicationException as e: report_list.append(node_communicator_exception_to_report_item(e)) except LibraryError as e: report_list.extend(e.args) tools.run_parallel(_parallel, param_list) if report_list: raise LibraryError(*report_list)
def test_run_all(self): data_list = [([i], {}) for i in range(5)] out_list = [] tools.run_parallel(out_list.append, data_list) self.assertEqual(sorted(out_list), [i for i in range(5)])
def test_run_all(self): data_list = [([i], {}) for i in range(5)] out_list = [] tools.run_parallel(out_list.append, data_list) self.assertEqual(sorted(out_list), list(range(5)))