def init_tables(g_vars, qaOpts): TP='TABLE_PATH' g_vars.table_path = qaOpts.getOpt(TP) tp_sz = len(g_vars.table_path) if tp_sz: # user-defined if g_vars.table_path[tp_sz-1] == '/': g_vars.table_path = g_vars.table_path[:tp_sz-1] else: g_vars.table_path = os.path.join(g_vars.res_dir_path, 'tables') qa_util.mkdirP(g_vars.table_path) qaOpts.setOpt(TP, g_vars.table_path) # Precedence of path search for tables: # # tables/projects/${PROJECT} # tables/projects # tables/${PROJECT} # tables # 1) default tables are provided in QA_SRC/tables/projects/PROJECT. # 2) a table of the same name provided in QA_SRC/tables gets # priority, thus may be persistently modified. # 3) tables from 2) or 1) are copied to ${QA_Results}/tables. # 4) Option TABLE_AUTO_UPDATE is the default, i.e. tables in # projects are updated and are applied. # 5) 4) also for option USE_STRICT. if not os.path.isdir(g_vars.table_path): qa_util.mkdirP(g_vars.table_path) # collect all table names in a list tables={} for key in qaOpts.dOpts.keys(): tName = '' # project tables if key.find('TABLE') > -1: tName = qaOpts.getOpt(key) elif key.find('CHECK_LIST') > -1: tName = qaOpts.getOpt(key) elif key.find('CF_') > -1 and key[3] != 'F': tName = qaOpts.getOpt(key) if len(tName): regExp = re.match(r'^[a-zA-Z0-9\._-]*$', tName) if regExp: tables[key] = tName qaOpts.setOpt('TABLES', tables) pDir=[] for key in tables.keys(): cpTables( key, tables[key], g_vars.table_path, qaOpts, pDir) return
def rsync_default_tables(g_vars, qaConf): # copy the default tables to the current session location if not os.path.isdir(g_vars.table_path): qa_util.mkdirP(g_vars.table_path) src_0 = os.path.join(qaConf.getOpt('QA_TABLES'), 'tables') if not src_0: src_0 = os.path.join(qaConf.getOpt('QA_SRC'), 'tables') prj = qaConf.getOpt('PROJECT') # '' wqhen not defined #elif qaConf.isOpt('DEFAULT_PROJECT'): # prj=qaConf.getOpt('DEFAULT_PROJECT') #else: src = '' # prevent a fatal state below if prj: # with trailing '/' src = os.path.join(src_0, 'projects', prj, '') if not (prj or os.path.isdir(src)): print 'no project specified' sys.exit(1) dest = g_vars.table_path rsync_cmd_0='rsync' + ' -lrtuz ' + ' --copy-links' \ + " --exclude='*~'" + " --exclude='.*'" + " --exclude='*_qa.conf'" \ + " --exclude='IS-ENES-Data.github.io'" rsync_cmd = rsync_cmd_0 + ' ' + src + ' ' + dest try: subprocess.call(rsync_cmd, shell=True) except: print 'could not rsync ' + src + ' --> ' + dest sys.exit(1) # special: CF tables if prj != 'CF': src = os.path.join(src_0, 'projects', 'CF', '') rsync_cmd = rsync_cmd_0 + ' ' + src + ' ' + dest try: subprocess.call(rsync_cmd, shell=True) except: print 'could not rsync ' + src + ' --> ' + dest sys.exit(1) return
def init_session(g_vars, qaConf): g_vars.curr_date = qa_util.date() g_vars.session = g_vars.curr_date g_vars.session_logdir = os.path.join(g_vars.res_dir_path, 'session_logs', g_vars.curr_date) qaConf.addOpt('SESSION', g_vars.session) qaConf.addOpt('SESSION_LOGDIR', g_vars.session_logdir) if qaConf.isOpt('SHOW'): return qa_util.mkdirP(g_vars.session_logdir) # error --> exit with open(os.path.join(g_vars.session_logdir, 'pid.' + g_vars.pid), 'w') as fd: fd.write(os.getcwd() + '\n') for a in sys.argv: fd.write(' ' + a) fd.write(' --fpid ' + str(g_vars.pid) + '\n') return
def init_session(g_vars, qaOpts): g_vars.curr_date = qa_util.date() g_vars.session = g_vars.curr_date g_vars.session_logdir = os.path.join(g_vars.res_dir_path, 'session_logs', g_vars.curr_date) qaOpts.setOpt('SESSION', g_vars.session) qaOpts.setOpt('SESSION_LOGDIR', g_vars.session_logdir) if qaOpts.isOpt('SHOW'): return qa_util.mkdirP(g_vars.session_logdir) # error --> exit with open(os.path.join(g_vars.session_logdir, 'pid.' + g_vars.pid), 'w') as fd: fd.write( os.getcwd() + '\n') for a in sys.argv: fd.write(' ' + a) fd.write(' --fpid ' + str(g_vars.pid) + '\n') return
def summary(self, f_log, g_vars): # extraction of annotations and atomic time ranges from log-files # sub-directories in check_logs self.f_annot = os.path.join(g_vars.check_logs_path, 'Annotations') self.f_perd = os.path.join(g_vars.check_logs_path, 'Period') self.f_sum = os.path.join(g_vars.check_logs_path, 'Summary') qa_util.mkdirP(self.f_annot) qa_util.mkdirP(self.f_perd) qa_util.mkdirP(self.f_sum) # time range of atomic variables; in order to save mem, # beg and end, respectively, are linked to the name by the # index of the corresponding atomic variable name in prd_name self.prd_name=[] self.prd_beg={} self.prd_end={} # reading and processing of the logfile with open(f_log, 'r') as fd: while True: blk = self.get_next_blk(fd=fd) if len(blk) == 0: break for i in range(len(blk)): words = blk[i].split() if words[0] == 'file:': fName = words[1] elif words[0] == 'period:': # time ranges of atomic variables self.update_period(fName, blk, i) continue return
def summary(self, f_log, g_vars): # extraction of annotations and atomic time ranges from log-files # sub-directories in check_logs self.f_annot = os.path.join(g_vars.check_logs_path, 'Annotations') self.f_perd = os.path.join(g_vars.check_logs_path, 'Period') self.f_sum = os.path.join(g_vars.check_logs_path, 'Summary') qa_util.mkdirP(self.f_annot) qa_util.mkdirP(self.f_perd) qa_util.mkdirP(self.f_sum) # time range of atomic variables; in order to save mem, # beg and end, respectively, are linked to the name by the # index of the corresponding atomic variable name in prd_name self.prd_name = [] self.prd_beg = {} self.prd_end = {} # reading and processing of the logfile with open(f_log, 'r') as fd: while True: blk = self.get_next_blk(fd=fd) if len(blk) == 0: break for i in range(len(blk)): words = blk[i].split() if words[0] == 'file:': fName = words[1] elif words[0] == 'period:': # time ranges of atomic variables self.update_period(fName, blk, i) continue return
def run(log, g_vars, qaConf): #g_vars.TTY = os.ttyname(0) # update external tables and in case of running qa_dkrz.py from # sources update C++ executables # run_install(qaConf) if qaConf.isOpt('NUM_EXEC_THREADS'): g_vars.thread_num = \ sum( qa_util.mk_list(qaConf.getOpt('NUM_EXEC_THREADS')) ) else: g_vars.thread_num = 1 g_vars.res_dir_path = qaConf.getOpt('QA_RESULTS') g_vars.project_data_path = qaConf.getOpt('PROJECT_DATA') g_vars.prj_dp_len = len(g_vars.project_data_path) init_session(g_vars, qaConf) g_vars.check_logs_path = os.path.join(g_vars.res_dir_path, 'check_logs') g_vars.cs_enable = False if qaConf.isOpt('CHECKSUM'): g_vars.cs_enable = True if qaConf.isOpt('CHECKSUM'): g_vars.cs_type = 'md5' else: g_vars.cs_type = qaConf.getOpt('CHECKSUM') cs_dir = qaConf.getOpt('CS_DIR') if len(cs_dir) == 0: cs_dir = 'cs_table' g_vars.cs_dir = os.path.join(g_vars.res_dir_path, cs_dir) qaConf.addOpt('LOG_FNAME_DIR', g_vars.check_logs_path) qa_util.mkdirP(g_vars.check_logs_path) # error --> exit qa_util.mkdirP(os.path.join(g_vars.res_dir_path, 'data')) # error --> exit # some more settings if not qaConf.isOpt('ZOMBIE_LIMIT'): qaConf.addOpt('ZOMBIE_LIMIT', 3600) if not qaConf.isOpt('CHECKSUM'): if qaConf.isOpt('CS_STAND_ALONE') or qaConf.isOpt('CS_DIR'): qaConf.addOpt('CHECKSUM', True) # save current version id to the cfg-file ''' if qaConf.isOpt('QA_VERSION'): qv=qaConf.getOpt('QA_VERSION') else: qv = qa_util.get_curr_revision(g_vars.qa_src, g_vars.isConda) qaConf.cfg.entry(key='QA_VERSION', value=qv) g_vars.qa_revision = qv ''' # abort criteria isAbort = False abortStr = '' if len(qaConf.getOpt('PROJECT')) == 0: isAbort = True abortStr = 'PROJECT' elif not qaConf.isOpt('PROJECT_DATA'): if not qaConf.isOpt('SELECT_PATH_LIST'): isAbort = True abortStr = 'PROJECT_DATA or explicit filename' if isAbort: print abortStr + ' was not defined.' print ' Did you miss to provide any of the sufficient options,' print ' e.g. -f task-file, QA_CONF, -P PROJECT + filename?' sys.exit(1) # table path and copy of tables for operational runs init_tables(g_vars, qaConf) # unique exp_name and table_names are defined by indices of path components g_vars.drs_path_base = qaConf.getOpt('DRS_PATH_BASE') qa_util.get_experiment_name(g_vars, qaConf, isInit=True) qa_util.get_project_table_name(g_vars, qaConf, isInit=True) # enable clearance of logfile entries by the CLEAR option if qaConf.isOpt('CLEAR_LOGFILE'): g_vars.clear_logfile = True else: g_vars.clear_logfile = False g_vars.ignore_temp_files = qaConf.isOpt('IGNORE_TEMP_FILES') g_vars.syncFilePrg = os.path.join(g_vars.qa_src, 'bin', 'syncFiles.x') g_vars.validNcPrg = os.path.join(g_vars.qa_src, 'bin', 'testValidNC.x') g_vars.checkPrg = os.path.join(g_vars.qa_src, 'bin', 'qA-' + qaConf.getOpt('PROJECT') + '.x') if not os.access(g_vars.syncFilePrg, os.X_OK): print g_vars.syncFilePrg + ' is not executable' sys.exit(1) if not os.access(g_vars.validNcPrg, os.X_OK): print g_vars.validNcPrg + ' is not executable' sys.exit(1) if not os.access(g_vars.checkPrg, os.X_OK): print g_vars.checkPrg + ' is not executable' sys.exit(1) g_vars.anyProgress = False return
def init_tables(g_vars, qaConf): TP = 'TABLE_PATH' g_vars.table_path = qaConf.getOpt(TP) tp_sz = len(g_vars.table_path) if tp_sz: # user-defined if g_vars.table_path[tp_sz - 1] == '/': g_vars.table_path = g_vars.table_path[:tp_sz - 1] else: g_vars.table_path = os.path.join(g_vars.res_dir_path, 'tables') qa_util.mkdirP(g_vars.table_path) qaConf.addOpt(TP, g_vars.table_path) # Precedence of path search for tables: # # tables/${PROJECT_VIRT} # tables/${PROJECT} # tables/projects/${PROJECT} # 1) default tables are provided in QA_SRC/tables/projects/PROJECT # 2) do not edit default tables; they are overwritten by updates # 3) Option TABLE_AUTO_UPDATE would search for updates for projects/PROJECT # 4) option USE_STRICT discards non-default tables. # rsync of default tables rsync_default_tables(g_vars, qaConf) # collect all table names in a list tables = {} for key in qaConf.dOpts.keys(): tName = '' # project tables if 'TABLE' in key: tName = qaConf.getOpt(key) elif 'CHECK_LIST' in key: tName = qaConf.getOpt(key) elif 'CF_' in key and key[3] != 'F': tName = qaConf.getOpt(key) if len(tName): regExp = re.match(r'^[a-zA-Z0-9\._-]*$', tName) if regExp: tables[key] = tName qaConf.addOpt('TABLES', tables) prj_to = qaConf.getOpt('PROJECT') pDir = [] for key in tables.keys(): # for genuine projects cpTables(key, tables[key], tables[key], g_vars.table_path, qaConf, prj_to, prj_to, pDir) if qaConf.isOpt('PROJECT_VIRT'): prj_from = qaConf.getOpt('PROJECT_VIRT') # find corresponding tables in the virtual project vTables = {} pHT = os.path.join(qaConf.getOpt('QA_TABLES'), 'tables', prj_from) for key in tables.keys(): name = tables[key] if prj_to in name: name = name.replace(prj_to, prj_from) if os.path.isfile(os.path.join(pHT, name)): vTables[key] = name pDir = [pHT] for key in vTables.keys(): cpTables(key, vTables[key], tables[key], g_vars.table_path, qaConf, prj_from, prj_to, pDir) return
def prepareExample(qaConf): if not qaConf.isOpt('PROJECT'): qaConf.addOpt("PROJECT", 'CORDEX') if qaConf.isOpt('WORK'): currdir = qaConf.getOpt('WORK') else: currdir = qaConf.getOpt('CURR_DIR') #currdir=os.path.join(currdir, 'example') qaConf.dOpts['QA_RESULTS'] = os.path.join(currdir, 'results') if not qa_util.mkdirP(currdir): sys.exit(1) os.chdir(currdir) qa_util.rm_r('results', 'config.txt', 'qa-test.task') print 'make examples in ' + currdir print 'make qa_test.task' taskFile = os.path.join(QA_SRC, 'example', 'templates', 'qa-test.task') shutil.copy(taskFile, currdir) taskFile = 'qa-test.task' # replace templates within QA_SRC/example sub = [] repl = [] sub.append('PROJECT_DATA=data') repl.append('PROJECT_DATA=' + os.path.join(currdir, 'data')) sub.append('QA_RESULTS=results') repl.append('QA_RESULTS=' + os.path.join(currdir, 'results')) qa_util.f_str_replace(taskFile, sub, repl) if not qa_util.which("ncgen"): print "building data in example requires the ncgen utility" sys.exit(1) if not os.path.isdir(os.path.join(currdir, 'data')): # data print 'make data' p = os.path.join(QA_SRC, 'example', 'templates', 'data.tbz') subprocess.call(["tar", "--bzip2", "-xf", p]) for rs, ds, fs in os.walk('data'): for f in fs: nc_f = f[:len(f) - 3] + 'nc' t_f = os.path.join(rs, f) t_nc = os.path.join(rs, nc_f) try: subprocess.call(["ncgen", "-k", "3", "-o", t_nc, t_f]) except: print 'making of example failed' sys.exit(1) else: qa_util.rm_r(t_f) print 'run' + sys.argv[0] + " -m -f " + currdir + "/qa-test.task" qaConf = QaConfig(QA_SRC, ['-m', '-f', currdir + "/qa-test.task"]) return qaConf
def run(self, f_log): # extraction of annotations and atomic time ranges from log-files log_path, log_name = os.path.split(f_log) log_name = log_name[0:-4] # sub-directories in check_logs self.f_annot = os.path.join(log_path, 'Annotations') self.f_perd = os.path.join(log_path, 'Period') self.sum_dir = os.path.join(log_path, 'Summary', log_name) qa_util.mkdirP(self.f_annot) qa_util.mkdirP(self.f_perd) qa_util.mkdirP(self.sum_dir) self.f_perd = os.path.join(self.f_perd, log_name + '.period') # time range of atomic variables; in order to save mem, # beg and end, respectively, are linked to the name by the # index of the corresponding atomic variable name in var self.fName_ids=[] self.fName_dt_id={} # each fName_id gets a list of dt ids self.path_ids=[] self.f_p_ids={} self.f_items=[] self.p_items=[] self.p_drs=[] self.var_ids={} # contains all [ids] with the same variable name in {} self.atomicBeg=[] # atomic time interval self.atomicEnd=[] # atomic time interval self.dt=[] # time intervals of sub-temp files self.annot_capt=[] # brief annotations self.annot_tag=[] # corresponding impact-tag self.annot_scope=[] # brief annotations self.annot_fName_id=[] # for each var involved self.annot_path_id=[] # self.annot_var_ix=[] # only project variable names self.annot_fName_dt_id=[] # for each time interval of each var involved # count total occurrences (good and bad) self.file_count=0 self.var_dt_count=[] # for all frequencies # reading and processing of the logfile with open(f_log, 'r') as fd: while True: # read the lines of the next check blk = self.get_next_blk(fd=fd) sz = len(blk) - 1 if sz == -1: break isMissPeriod=True # fx or segmentation fault i=-1 while i < sz : i = i+1 words = blk[i].lstrip(' -').split(None,1) if words[0] == 'file:': # fse contains ( var, StartTime, EndTime ); the # times could be empty strings or EndTime could be empty fse = qa_util.f_time_range(words[1]) self.set_curr_dt(fse) fName_id_ix = self.decomposition(words[1], self.f_items, self.fName_ids, self.prj_fName_sep) self.file_count += 1 # for counting atomic variable's sub_temps for all freqs try: self.fName_dt_id[fName_id_ix] except: self.fName_dt_id[fName_id_ix] = [self.dt_id] else: self.fName_dt_id[fName_id_ix].append(self.dt_id) try: vName = self.f_items[self.fName_ids[fName_id_ix][self.prj_var_ix]] except: pass else: # dict of varNames to contained var_ids try: self.var_ids[vName] except: self.var_ids[vName] = [fName_id_ix] else: try: self.var_ids[vName].index(fName_id_ix) except: self.var_ids[vName].append(fName_id_ix) if fName_id_ix > len(self.atomicBeg) - 1 : # init for a new variable self.atomicBeg.append('') # greater than any date self.atomicEnd.append('') # smaller than any date elif words[0] == 'data_path:': path_id = self.decomposition(words[1], self.p_items, self.path_ids, self.prj_path_sep) # in particular for paths with several variables self.f_p_ids[fName_id_ix] = path_id elif words[0] == 'period:': # time ranges of atomic variables # indexed by self.curr_dt within the function i = self.period_add(fName_id_ix, path_id, fse, blk, i) isMissPeriod=False elif words[0] == 'event:': if isMissPeriod and len(fse[2]): self.subst_period(fName_id_ix, path_id, fse) isMissPeriod=False # annotation and associated indices of properties i = self.annotation_add(path_id, fName_id_ix, blk, i) elif words[0] == 'status:': if isMissPeriod and len(fse[2]): self.subst_period(fName_id_ix, path_id, fse) # test for ragged time intervals of atomic variables for given frequency self.period_final() self.annotation_merge() return
def run(self, t_vars): os.environ["UDUNITS2_XML_PATH"] = self.g_vars.UDUNITS2_XML_PATH qa_lock = os.path.join(t_vars.var_path, 'qa_lock_' + t_vars.fBase + '.txt') if os.path.isfile(qa_lock): return False self.nc_file = os.path.join(t_vars.data_path, t_vars.fName) self.printStatusLine(nc=self.nc_file) if self.qaConf.isOpt('DRY_RUN'): print self.nc_file return True if self.is_show_call: param = self.getParamStr(t_vars, '\n') fd = open('./param_file.txt', 'w') print >> fd, param print param sys.exit(1) else: param = self.getParamStr(t_vars, ' ') log_entry = {} qa_util.mkdirP(t_vars.var_path) cpp_run = self.g_vars.checkPrg + param set_qa_lock = False try: check_output = subprocess.check_output(cpp_run, shell=True, cwd=t_vars.var_path) except subprocess.CalledProcessError as e: istatus = e.returncode set_qa_lock = True if istatus == 63: return True check_output = e.output.strip() # atomic variable gets locked if istatus > 1: set_qa_lock = True if istatus == 4: pass # EMERGENCY STOP # isSignalTerm = t # issueMail = t elif istatus > 4: # uncontroled system exception, e.g. segmentation fault log_entry['conclusion'] = 'segmentation fault' log_entry['is_event'] = True log_entry['event'] = [] event = {} event['annotation'] = 'run-time error: segmentation fault' event['impact'] = 'L2' event['tag'] = 'S_1' log_entry['event'].append(event) else: istatus = 0 if self.qaConf.isOpt('RUN_PREPARE'): # run and append output check_output += self.run_PrePARE(t_vars) entry_id = '' # prepare the logfile entry if istatus < 5: self.parse_output(check_output, log_entry) entry_id = self.log.append(entry_id, f=t_vars.fName, d_path=t_vars.data_path, r_path=t_vars.var_path, period=log_entry['period'], conclusion=log_entry['conclusion'], set_qa_lock=set_qa_lock) if 'is_event' in log_entry: entry_id = self.log.append(entry_id, is_events=True) for eve in log_entry['event']: entry_id = self.log.append(entry_id, annotation=eve['annotation'], impact=eve['impact'], tag=eve['tag']) if 'info' in eve: entry_id = self.log.append(entry_id, info=eve['info']) entry_id = self.log.append(entry_id, status=istatus) self.log.write_entry(entry_id, self.g_vars.check_logs_path, t_vars.log_fname) if istatus > 1: proceed = False else: proceed = True return proceed # true: proceed with next sub-temporal file
def run(log, g_vars, qaOpts, rawCfgPars, cfgFile): #g_vars.TTY = os.ttyname(0) # are git and wget available? g_vars.NO_GIT=True if qa_util.which('git'): g_vars.NO_GIT = False g_vars.NO_WGET=True if qa_util.which('wget'): g_vars.NO_WGET = False # update external tables and in case of running qa_dkrz.py from # sources update C++ executables run_install(qaOpts, g_vars) if qaOpts.isOpt('NUM_EXEC_THREADS'): g_vars.thread_num = \ sum( qa_util.mk_list(qaOpts.getOpt('NUM_EXEC_THREADS')) ) else: g_vars.thread_num = 1 g_vars.res_dir_path = qaOpts.getOpt('QA_RESULTS') g_vars.project_data_path = qaOpts.getOpt('PROJECT_DATA') g_vars.prj_dp_len = len(g_vars.project_data_path) init_session(g_vars, qaOpts) g_vars.check_logs_path = os.path.join(g_vars.res_dir_path, 'check_logs') g_vars.cs_enable = False if qaOpts.isOpt('CHECKSUM'): g_vars.cs_enable = True if qaOpts.isOpt('CHECKSUM', True): g_vars.cs_type = 'md5' else: g_vars.cs_type = qaOpts.getOpt('CHECKSUM') cs_dir = qaOpts.getOpt('CS_DIR') if len(cs_dir) == 0: cs_dir='cs_table' g_vars.cs_dir = os.path.join(g_vars.res_dir_path, cs_dir) qaOpts.setOpt('LOG_FNAME_DIR', g_vars.check_logs_path) qa_util.mkdirP(g_vars.check_logs_path) # error --> exit qa_util.mkdirP(os.path.join(g_vars.res_dir_path, 'data')) # error --> exit # some more settings if not qaOpts.isOpt('ZOMBIE_LIMIT'): qaOpts.setOpt('ZOMBIE_LIMIT', 3600) if not qaOpts.isOpt('CHECKSUM'): if qaOpts.isOpt('CS_STAND_ALONE') or qaOpts.isOpt('CS_DIR'): qaOpts.setOpt('CHECKSUM', True) # save current version id to the cfg-file qv=qaOpts.getOpt('QA_REVISION') if len(qv) == 0: qv=qa_util.get_curr_revision(g_vars.qa_src, g_vars.isConda) qv = qa_util.cfg_parser(rawCfgPars, cfgFile, section=g_vars.qa_src, key='QA_REVISION', value=qv) qaOpts.setOpt('QA_REVISION', qv) g_vars.qa_revision = qv # table path and copy of tables for operational runs init_tables(g_vars, qaOpts) # unique exp_name and table_names are defined by indices of path components qa_util.get_experiment_name(g_vars, qaOpts, isInit=True) qa_util.get_project_table_name(g_vars, qaOpts, isInit=True) # enable clearance of logfile entries by the CLEAR option if qaOpts.isOpt('CLEAR_LOGFILE'): g_vars.clear_logfile = True else: g_vars.clear_logfile = False g_vars.ignore_temp_files = qaOpts.isOpt('IGNORE_TEMP_FILES') g_vars.syncFilePrg = os.path.join(g_vars.qa_src, 'bin', 'syncFiles.x') g_vars.checkPrg = os.path.join(g_vars.qa_src, 'bin', 'qA-' + qaOpts.getOpt('PROJECT_AS') + '.x') if not os.access(g_vars.syncFilePrg, os.X_OK): print g_vars.syncFilePrg + ' is not executable' sys.exit(1) if not os.access(g_vars.checkPrg, os.X_OK): print g_vars.checkPrg + ' is not executable' sys.exit(1) g_vars.anyProgress = False return
def run(self, t_vars): qa_lock = os.path.join(t_vars.var_path, 'qa_lock_' + t_vars.fBase + '.txt') if os.path.isfile(qa_lock): return False self.nc_file = os.path.join(t_vars.data_path, t_vars.fName) if self.qaOpts.isOpt('DRY_RUN'): print self.nc_file return True param = self.getParamStr(t_vars) if self.is_show_call: print param log_entry={} qa_util.mkdirP(t_vars.var_path) cpp_run = self.g_vars.checkPrg + param set_qa_lock = False try: check_output = subprocess.check_output(cpp_run, shell=True, cwd=t_vars.var_path) except subprocess.CalledProcessError as e: istatus = e.returncode set_qa_lock = True if istatus == 63: return True check_output = e.output # atomic variable gets locked if istatus > 1: set_qa_lock = True if istatus == 4: pass # EMERGENCY STOP # isSignalTerm = t # issueMail = t elif istatus > 4: # uncontroled system exception, e.g. segmentation fault log_entry['conclusion'] = 'segmentation fault' log_entry['is_event'] = True log_entry['event'] = [] event = {} event['caption'] = 'run-time error: segmentation fault' event['impact'] = 'L2' event['tag'] ='S_1' log_entry['event'].append(event) else: istatus = 0 entry_id='' # prepare the logfile entry if istatus < 5: self.parse_output(check_output, log_entry) entry_id = self.log.append( entry_id, f = t_vars.fName, d_path = t_vars.data_path, r_path = t_vars.var_path, conclusion = log_entry['conclusion'], set_qa_lock = set_qa_lock) if 'is_event' in log_entry.keys(): entry_id = self.log.append(entry_id, is_events=True) for eve in log_entry['event']: entry_id = self.log.append( entry_id, caption = eve['caption'], impact = eve['impact'], tag = eve['tag']) if 'info' in eve.keys(): entry_id = self.log.append(entry_id, info=eve['info']) entry_id = self.log.append(entry_id, status=istatus) self.log.write_entry(entry_id, self.g_vars.check_logs_path, t_vars.log_fname) if istatus > 1: proceed = False else: proceed = True return proceed # true: proceed with next sub-temporal file
def run(self, f_log): if len(f_log) == 0: return if not os.path.isfile(f_log): print('qa_summary: ' + f_log + ' : no such file') return self.logfile = f_log # extraction of annotations and atomic time ranges from log-files self.log_path, self.log_name = os.path.split(f_log) self.log_name = self.log_name[0:-4] # sub-directories in check_logs self.f_annot = os.path.join(self.log_path, 'Annotations') self.f_period = os.path.join(self.log_path, 'Period') self.tag_dir = os.path.join(self.log_path, 'Tags', self.log_name) qa_util.mkdirP(self.f_annot) qa_util.mkdirP(self.f_period) #qa_util.mkdirP(self.tag_dir) # time range of atomic variables; in order to save mem, # beg and end, respectively, are linked to the name by the # index of the corresponding atomic variable name in var self.fName_ids = [] self.fName_dt_id = {} # each fName_id gets a list of dt ids self.path_ids = [] self.fp_ids = [] self.f_items = [] self.p_items = ['*'] # a placeholder self.atomicBeg = [] # atomic time interval: index by self.atomicEnd = [] # 'var_id'_'path_id' self.dt = [] # time intervals of sub-temp files self.annot_capt = [] # brief annotations self.annot_impact = [] # corresponding severity level self.annot_tag = [] # corresponding tag self.annot_fName_id = [] # for each var involved self.annot_path_id = [] self.annot_fName_dt_id = [ ] # for each time interval of each var involved self.annot_example_capt = [] # example for grouped annotations self.annot_example_isGroup = [] # # count total occurrences (good and bad) self.file_count = 0 # reading and processing of the logfile if not os.path.isfile(f_log): return line_num = 0 isMissedStatus = False with open(f_log, 'r') as fd: while True: if isMissedStatus: print 'incomplete log-file at line ' + str(line_num) sys.exit(1) # read the lines of the next check blk, ln = self.get_next_blk(fd=fd) line_num += ln #print line_num sz = len(blk) - 1 if sz == -1: break isMissedPeriod = True # fx or segmentation fault isMissedStatus = True i = 0 while i < sz: i += 1 words = blk[i].lstrip(' -').split(None, 1) if len(words) == 0: # a string of just '-' would results in this words = ['-----------'] if words[0] == 'file:' or words[0] == 'data-set:': # fse contains ( var, StartTime, EndTime ); the # times could be empty strings or EndTime could be empty fse = qa_util.f_time_range(words[1]) self.set_curr_dt(fse) file_id = self.decomposition(words[1], self.f_items, self.fName_ids, self.prj_fName_sep) self.file_count += 1 elif words[0] == 'data_path:': # used later path_id = self.decomposition(words[1], self.p_items, self.path_ids, self.prj_data_sep) fp_id = str(file_id) + '_' + str(path_id) try: fp_ix = self.fp_ids.index(fp_id) except: fp_ix = len(self.fp_ids) self.fp_ids.append(fp_id) # for counting atomic variable's sub_temps for all freqs try: self.fName_dt_id[fp_ix] except: self.fName_dt_id[fp_ix] = [self.dt_id] else: self.fName_dt_id[fp_ix].append(self.dt_id) if fp_ix > len(self.atomicBeg) - 1: # init for a new variable self.atomicBeg.append('') # greater than any date self.atomicEnd.append('') # smaller than any date elif words[0] == 'period:': # time ranges of atomic variables # indexed by self.curr_dt within the function i = self.period_add(fp_ix, fse, blk, i) isMissedPeriod = False elif words[0] == 'event:': if isMissedPeriod and len(fse[2]): self.subst_period(fp_ix, fse) isMissedPeriod = False # annotation and associated indices of properties i = self.annotation_add(file_id, path_id, blk, i) elif words[0] == 'status:': isMissedStatus = False if isMissedPeriod and len(fse[2]): self.subst_period(fp_ix, fse) if self.file_count == 0: return # test for ragged time intervals of atomic variables for given frequency self.period_final() self.annot_synthetic_tag() self.annotation_merge() self.sendMail() return
def runExample(): if qaOpts.isOpt('EXAMPLE_PATH'): currdir = os.path.join( qaOpts.getOpt('EXAMPLE_PATH'), 'example') if currdir[0] != '/': currdir = os.path.join(os.getcwd(), currdir) else: currdir=os.path.join(QA_SRC, 'example') if not qa_util.mkdirP(currdir): print 'could not mkdir ' + dir + ', please use option --example=path' sys.exit(1) os.chdir(currdir) qa_util.rmR( 'results', 'config.txt', 'data', 'tables', 'qa-test.task' ) print 'make examples in ' + currdir print 'make qa_test.task' taskFile = os.path.join(QA_SRC, 'example', 'templates', 'qa-test.task') shutil.copy( taskFile, currdir) taskFile = 'qa-test.task' # replace templates sub=[] repl=[] sub.append('PROJECT_DATA=data') repl.append('PROJECT_DATA=' + os.path.join(currdir, 'data') ) sub.append('QA_RESULTS=results') repl.append('QA_RESULTS=' + os.path.join(currdir, 'results') ) qa_util.f_str_replace(taskFile, sub, repl) # data print 'make data' subprocess.call(["tar", "--bzip2", "-xf", \ os.path.join(QA_SRC,os.path.join('example', 'templates', 'data.tbz') ) ]) txtFs=[] for rs, ds, fs in os.walk('data'): for f in fs: if '.txt' in f: txtFs.append(os.path.join(rs,f)) if qa_util.which("ncgen"): for f in txtFs: nc_f = f[:len(f)-2] + 'nc' subprocess.call(["ncgen", "-k", "3", "-o", nc_f, f]) qa_util.rmR(f) else: print "building data in example requires the ncgen utility" print 'run' print os.path.join(QA_SRC, 'scripts', 'qa-dkrz') +\ " -m --work=" + currdir + '-f qa-test.task' subprocess.call([os.path.join(QA_SRC, 'scripts', 'qa-dkrz'), \ '--work=' + currdir, "-f", "qa-test.task"]) return