def _analyse(self, benchmark_id): ''' Executes `jube analyse` command Args: benchmark_id (int): id of the benchmark to be analyzed Returns: (str) Result directory absolute path Raises: RuntimeError ''' outpath = self.jube_files.get_bench_outputdir() # Continue benchmark steps that were not already executed. # This is often mandatory to execute postprocessing steps. cmd_str = 'jube continue --hide-animation {} --id {}'.format(outpath, benchmark_id) ret_code, _, stderr = utils.run_cmd(cmd_str, self.benchmark_path) if ret_code: print(stderr) msg = 'Error when executing command: {}'.format(cmd_str) raise RuntimeError(msg) cmd_str = 'jube analyse {} --id {}'.format(outpath, benchmark_id) ret_code, _, stderr = utils.run_cmd(cmd_str, self.benchmark_path) if ret_code: print(stderr) msg = 'Error when executing command: {}'.format(cmd_str) raise RuntimeError(msg)
def get_available_nodes(self, slices_size=1): """ Returns a list of currently available nodes by slice of slices_size ex: for slices of size 4 ['cn[100-103]','cn[109,150-152]'] Args: (int) slices_size: slices size Returns: (str) list of nodes_id """ cmd_str = "sinfo -h -t IDLE" ret_code, stdout, _ = utils.run_cmd(cmd_str, os.getcwd()) if ret_code: print("!!Warning: unclebech was not able to get avaiable nodes") return [] nodeset = NodeSet() for line in stdout: nodeset_str = re.split(r'\s+', line.strip())[5] nodeset.update(nodeset_str) split_c = int(len(nodeset) / slices_size) nodes_list = [str(ns) for ns in nodeset.split(split_c)] return nodes_list
def update_remote(self): ''' Update remote repository with local copy ''' status, _, stderr = utils.run_cmd(self.push_command(), self.local_dir) if status: print(stderr) msg = 'Error when executing command: {}'.format( self.push_command()) raise RuntimeError(msg) else: print('Cloning remote repository using Git')
def copy_remote_to_local(self): ''' Copies remote repository to local directory ''' status, _, stderr = utils.run_cmd(self.clone_command(), self.local_dir) if status: print(stderr) msg = 'Error when executing command: {}'.format( self.clone_command()) raise RuntimeError(msg) else: print('Cloning remote repository using Git')
def add_contents_to_local_repo(self, file_list, commit_msg=None): ''' Executes both add and commit operations ''' status, _, stderr = utils.run_cmd(self.add_command(file_list), self.local_dir) if status: print(stderr) msg = 'Error when executing command: {}'.format( self.add_command(file_list)) raise RuntimeError(msg) else: print('New contents added to repository using Git') status, _, stderr = utils.run_cmd(self.commit_command(commit_msg), self.local_dir) if status: print(stderr) msg = 'Error when executing command: {}'.format( self.commit_command(commit_msg)) raise RuntimeError(msg) else: print('New contents commited to local repository using Git')
def _extract_results(self, benchmark_id): # pylint: disable=too-many-locals ''' Get result from a jube benchmark with its id and build a python result array Args: benchmark_id (int): id of the benchmark Returns: (str) result array ''' outpath = self.jube_files.get_bench_outputdir() benchmark_runpath = os.path.join(self.benchmark_path, outpath, self.get_bench_rundir(benchmark_id, outpath)) jube_xml_config = self._get_jubexmlconfig(benchmark_id) cvsfile = jube_xml_config.get_result_cvsfile() cmd_str = 'jube result {} --id {} -o {}'.format(outpath, benchmark_id, cvsfile) _, stdout, _ = utils.run_cmd(cmd_str, self.benchmark_path) result_array = [] cvs_data = csv.reader(stdout) with open(os.path.join(benchmark_runpath, 'result/ubench_results.dat'), 'w') as result_file: cvs_writer = csv.writer(result_file) for row in cvs_data: cvs_writer.writerow(row) jubecvsfile_path = os.path.join(benchmark_runpath, 'result', '{}.dat'.format(cvsfile)) try: with open(jubecvsfile_path, 'r') as jubecsvfile: jubereader = csv.reader(jubecsvfile) for row in jubereader: if isinstance(row, list): result_array.append(row) except IOError: print('JUBE cvs file not found') # pylint: disable=superfluous-parens return result_array
def get_job_info(self, job_id): """Return a hash with job information using an id Args: (int) job_id: job id Returns: (dictionary) Job information """ job_cmd = ( 'sacct --jobs={0} -n -p --format=JobName,Elapsed,NodeList,Submit,Start' .format(job_id)) ret_code, stdout, stderr = utils.run_cmd(job_cmd, os.getcwd()) if ret_code: print("!!Warning: unclebech was not able to get job information") print("!!Warning: {}".format(stderr)) return [] job_info = [] for line in stdout: fields = line.split("|") if not fields or len(fields) < 5: continue job_name = fields[0] job_info_temp = {} if job_name != 'batch': job_info_temp['job_name'] = job_name job_info_temp['job_elasped'] = fields[1] job_info_temp['job_nodelist'] = [ node for node in NodeSet(fields[2]) ] job_info_temp['job_submit_time'] = fields[3] job_info_temp['job_start_time'] = fields[4] job_info.append(job_info_temp) return job_info
def get_files_from_tag(self, tag): ''' Returns files related with tag ''' _, files, _ = utils.run_cmd(self.show_command(tag), self.local_dir) return [os.path.join(self.local_dir, fl) for fl in files]