class __MPIInterfaceImpl: """ Implementation of the MPIInterface singleton interface """ def __init__(self): # Reference to inner cluster object (equivalent to parallel_go) self._cluster = MPIInterfaceCore() # Direct reference to MPICommandClient for methods not resorting to the inner cluster self.__command_client = MPICommandClient() def isClusterRunning(self): if self.__command_client.get_lifecyle_state() == 1: return True else: return False def init_cluster(self, clusterfile=None, project=None): # NOTE: In the MPI framework the clusterfile is processed by mpirun # So it is not necessary to process and validate clusterfile here self.start_cluster() def start_cluster(self): # TODO: This should set OMP_NUM_THREADS as well self._cluster.start_cluster() def stop_cluster(self): self._cluster.stop_cluster() def do_and_record(self, cmd, id=None, group='', subMS=''): jobId = self._cluster.odo(cmd, id) return jobId def get_engine_store(self, id): res = self._cluster.execute('os.getcwd()', id) return res[0]['ret'] def get_status(self): command_request_list = self._cluster.get_command_request_list() command_response_list = self._cluster.get_command_response_list() server_status = self._cluster.get_server_status() # Show first jobs running, then the ones holding the queue and finally the ones already processed status_list = [ 'request sent', 'holding queue', 'timeout', 'response received' ] # Generate job status table job_status_list = [[ 'JOB ID', 'SERVER', 'HOSTNAME', 'QUEUE STATUS', 'COMMAND', 'ELAPSED TIME (s)', 'EXECUTION STATUS' ]] for status in status_list: for jobId in command_request_list: # Generate job status list if command_request_list[jobId]['status'] == status: # Get server to access info from the server status server = command_request_list[jobId]['server'] hostname = server_status[server]['processor'] # Create job status info job_status = [] job_status.append(str(jobId)) job_status.append(str(server)) job_status.append(hostname) job_status.append( command_request_list[jobId]['status']) job_status.append( command_request_list[jobId]['command']) # Add run time elapsed_time = '' if status == status_list[1]: # holding queue elapsed_time = 'N/A' elif status == status_list[0] or status == status_list[ 2]: # request sent / timeout start_time = server_status[server][ 'command_start_time'] if start_time is not None: elapsed_time = "%.2f" % (time.time() - start_time) else: elapsed_time = 'unset' elif status == status_list[3]: # 'response received' start_time = command_response_list[jobId][ 'command_start_time'] stop_time = command_response_list[jobId][ 'command_stop_time'] if start_time is not None and stop_time is not None: elapsed_time = "%.2f" % (stop_time - start_time) else: elapsed_time = 'unset' job_status.append(elapsed_time) # Add job execution status execution_status = '' if status == status_list[0] or status == status_list[ 1]: # request sent / holding queue execution_status = 'pending' elif status == status_list[2]: # timeout execution_status = 'timeout' elif status == status_list[3]: # response received if command_response_list[jobId]['successful']: ret = command_response_list[jobId]['ret'] if isinstance(ret, bool): if ret is True: execution_status = 'completed - True' else: execution_status = 'completed - False' else: execution_status = 'completed' else: execution_status = 'exception raised' job_status.append(execution_status) # Append job status info to list job_status_list.append(job_status) # Print job status table MPIInterface.print_table(job_status_list) # Return job status table return job_status_list def set_log_level(self, log_level): self.__command_client.set_log_level(log_level)
class __MPIInterfaceImpl: """ Implementation of the MPIInterface singleton interface """ def __init__(self): # Reference to inner cluster object (equivalent to parallel_go) self._cluster = MPIInterfaceCore() # Direct reference to MPICommandClient for methods not resorting to the inner cluster self.__command_client = MPICommandClient() def isClusterRunning(self): if self.__command_client.get_lifecyle_state() == 1: return True else: return False def init_cluster(self, clusterfile=None, project=None): # NOTE: In the MPI framework the clusterfile is processed by mpirun # So it is not necessary to process and validate clusterfile here self.start_cluster() def start_cluster(self): # TODO: This should set OMP_NUM_THREADS as well self._cluster.start_cluster() def stop_cluster(self): self._cluster.stop_cluster() def do_and_record(self, cmd, id=None, group='', subMS=''): jobId = self._cluster.odo(cmd,id) return jobId def get_engine_store(self, id): res = self._cluster.execute('os.getcwd()',id) return res[0]['ret'] def get_status(self): command_request_list = self._cluster.get_command_request_list() command_response_list = self._cluster.get_command_response_list() server_status = self._cluster.get_server_status() # Show first jobs running, then the ones holding the queue and finally the ones already processed status_list = ['request sent','holding queue','timeout','response received'] # Generate job status table job_status_list = [['JOB ID','SERVER','HOSTNAME','QUEUE STATUS','COMMAND','ELAPSED TIME (s)','EXECUTION STATUS']] for status in status_list: for jobId in command_request_list: # Generate job status list if command_request_list[jobId]['status'] == status: # Get server to access info from the server status server = command_request_list[jobId]['server'] hostname = server_status[server]['processor'] # Create job status info job_status = [] job_status.append(str(jobId)) job_status.append(str(server)) job_status.append(hostname) job_status.append(command_request_list[jobId]['status']) job_status.append(command_request_list[jobId]['command']) # Add run time elapsed_time = '' if status == status_list[1]: # holding queue elapsed_time = 'N/A' elif status == status_list[0] or status == status_list[2]: # request sent / timeout start_time = server_status[server]['command_start_time'] if start_time is not None: elapsed_time = "%.2f" % (time.time() - start_time) else: elapsed_time = 'unset' elif status == status_list[3]: # 'response received' start_time = command_response_list[jobId]['command_start_time'] stop_time = command_response_list[jobId]['command_stop_time'] if start_time is not None and stop_time is not None: elapsed_time = "%.2f" % (stop_time- start_time) else: elapsed_time = 'unset' job_status.append(elapsed_time) # Add job execution status execution_status = '' if status == status_list[0] or status == status_list[1]: # request sent / holding queue execution_status = 'pending' elif status == status_list[2]: # timeout execution_status = 'timeout' elif status == status_list[3]: # response received if command_response_list[jobId]['successful']: ret = command_response_list[jobId]['ret'] if isinstance(ret,bool): if ret == True: execution_status = 'completed - True' else: execution_status = 'completed - False' else: execution_status = 'completed' else: execution_status = 'exception raised' job_status.append(execution_status) # Append job status info to list job_status_list.append(job_status) # Print job status table MPIInterface.print_table(job_status_list) # Return job status table return job_status_list def set_log_level(self,log_level): self.__command_client.set_log_level(log_level)