def dump_test_orgz(self): test_orgz_data = {} test_id = 0 jobid_list = [] if not self.config['no_lsf'] and self.config['lsf_cmd']: lsf_monitor = LSFMonitor() for test_dir, cmd_file in self._test_dir_cmd.items(): test_orgz_data[test_id] = {} test_orgz_data[test_id]['dir'] = test_dir test_name = cmd_file[0:-3] test_orgz_data[test_id]['name'] = test_name for item in self.run_test_list: if item['name'] == test_name: test_orgz_data[test_id]['tags'] = item['tags'] break test_orgz_data[test_id]['test_bench'] = 'nvdla_utb' if not self.config['no_lsf'] and self.config['lsf_cmd']: job_id = lsf_monitor.get_job_by_name( os.path.join(test_dir, cmd_file)) jobid_list += job_id test_orgz_data[test_id]['job_id'] = int(job_id[0]) else: test_orgz_data[test_id]['job_id'] = '' test_id += 1 with open(self.config['run_dir'] + '/test_organization.json', 'w') as test_orgz_fh: json.dump(test_orgz_data, test_orgz_fh, sort_keys=True, indent=4) if not self.config['no_lsf'] and self.config['lsf_cmd']: kill_plan = self.config['run_dir'] + '/kill_plan.sh' with open(kill_plan, 'w') as fh: fh.write('bkill ' + ' '.join([str(x) for x in jobid_list])) subprocess.run('chmod 755 ' + kill_plan, shell=True)
def __load_json_file(self): with open('test_organization.json', 'r') as test_orgz_fh: self.test_orgz_data = json.load(test_orgz_fh) self.test_orgz_data = dict(sorted(self.test_orgz_data.items(), key=lambda x:x[1]['name'])) # sort by testname with open('regression_status.json', 'r') as regr_sts_fh: self.regr_sts_data = json.load(regr_sts_fh) if self.regr_sts_data['farm_type'] == 'LSF': lsfm = LSFMonitor() for item in self.test_orgz_data.values(): self.job_status.update(lsfm.get_job_init_status([item['job_id']]))
def __parse_regress_status(self): if self.regr_sts_data['farm_type'] == 'LSF': lsfm = LSFMonitor() lsfm.update_job_exec_status(self.job_status) for tid, info in self.test_orgz_data.items(): # ~dump_trace_only~ means we only run trace_generator regression. if self.regr_sts_data['dump_trace_only'] == 'True': result_dir = os.path.join(info['dir'], info['name']) else: result_dir = info['dir'] while not os.path.isdir(result_dir): if self.verbose: print('[INFO] %s does not exists.' % result_dir) time.sleep(1) os.chdir(result_dir) status_file = 'STATUS' errinfo = '' if not os.path.isfile(status_file): if self.verbose: print('[INFO] %s does not exists.' % os.path.join(os.getcwd(), status_file)) status = 'PENDING' else: with open(status_file, 'r') as status_fh: status = status_fh.readline().rstrip('\n') if status == 'FAIL': if os.path.exists('testout'): errinfo = self.__get_lastline('testout').rstrip('\n') else: errinfo = 'trace_gen failed' elif status == 'RUNNING' and self.regr_sts_data[ 'farm_type'] == 'LSF': if self.job_status[info['job_id']]['status'] in ('EXIT', 'EXPIRE'): status = 'KILLED' errinfo = self.job_status[info['job_id']]['syndrome'] self.test_orgz_data[tid]['status'] = status self.test_orgz_data[tid]['errinfo'] = errinfo self.test_orgz_data[tid]['syndrome'] = '' if self.regr_sts_data['farm_type'] == "LSF": self.test_orgz_data[tid]['cputime'] = self.job_status[ info['job_id']]['cputime_used'] self.test_orgz_data[tid]['memsize'] = self.job_status[ info['job_id']]['maxmem'] self.test_orgz_data[tid]['cpulimit'] = self.job_status[ info['job_id']]['runlimit'] self.test_orgz_data[tid]['memlimit'] = self.job_status[ info['job_id']]['memlimit'] self.test_orgz_data[tid]['queue_type'] = self.job_status[ info['job_id']]['queue_type']