def __setup_directory(self): """generate test directory including all required links and sub-directories""" self.logger.info('Creating directory for ' + self.name) node = self.node # create run directory and move there status = system_command('/bin/mkdir -p ' + self.rundir, self.logger) status = change_dir(self.rundir, self.logger) # removal of all the possible pre-existing files status = system_command('/bin/rm -r -f *', self.logger) # explicit copy of the namelists (copy is required since we will apply the change_par) status = system_command('/bin/cp -f ' + self.namelistdir + 'INPUT* .', self.logger) # copy of the auxiliary input parameters if exists if not glob.glob(os.path.join( dir_path(self.inputdir) + 'in_aux/', '*')) == []: status = system_command( '/bin/cp -f -r ' + dir_path(self.inputdir) + 'in_aux/* ./', self.logger) # linking input binary fields status = system_command( '/bin/ln -s ' + dir_path(self.inputdir) + 'input .', self.logger) # generation of the output folder status = system_command('/bin/mkdir -p output', self.logger)
def write_result(self): """print result of current test to stdout as well as result file""" # print the final result self.logger.result( 0, self.result, 'RESULT %s/%s: %s' % (self.type, self.name, self.description)) # change to run directory for this test status = change_dir(self.rundir, self.logger) # write in a file (this is used for test dependency) f = open(self.conf.res_file, "w") f.write(status_str(self.result)) f.close()
def update_namelist(self): """copy back modified namelists into data folder""" pattern = '(.*)/' + self.type + '/' + self.name + '(.*)' text = self.namelistdir # checks if is the test is a titular test if re.match(pattern, text): status = change_dir(self.rundir, self.logger) self.logger.important('Updating namelist data/' + self.type + '/' + self.name) cmd = 'cp INPUT* ' + self.namelistdir self.logger.debug('Executing: ' + cmd) status = system_command(cmd, self.logger) self.result = 0 # MATCH else: raise SkipError('No test repository ' + 'data/' + self.type + '/' + self.name)
def start(self): """launch test""" self.logger.info('Starting test') # change to run directory for this test status = change_dir(self.rundir, self.logger) # generate launch command self.log_file = 'exe.log' redirect_output = '> %s 2>&1' % (self.log_file) if self.options.mpicmd == '': run_cmd = '' else: if '&NTASKS' in self.options.mpicmd: #special case when n nodes cannot be given as last argument of #mpicmd command, e.g. with mpirun_rsh run_cmd = self.options.mpicmd.replace('&NTASKS', str(self.nprocs)) else: run_cmd = self.options.mpicmd + ' ' + str(self.nprocs) # writes the wrapper script in case a wrapper run of testsuite is required if self.options.use_wrappers: f = open('wrapper.sh', 'w') f.write('#!/bin/sh\n') f.write('./' + self.executable + redirect_output + '\n') f.close() status = os.chmod('wrapper.sh', 0755) if status: raise StopError('Problem changing permissions on wrapper.sh') run_cmd = run_cmd + ' ./' + 'wrapper.sh' else: run_cmd = run_cmd + ' ./' + self.executable + ' ' + redirect_output # displays the run command self.logger.info('Executing: ' + run_cmd) # executes the run command status = system_command(run_cmd, self.logger, issue_error=False, timeout=self.options.timeout)
def check(self): """perform checks""" # change to base directory status = change_dir(self.basedir, self.logger) # scan for the checker within the xml tree checkerlist = [] checker_nodes = self.node.findall("checker") for el in checker_nodes: checkerlist.append(el.text) # assignement of the environment variables for checker write_environ(self) # traversing of the checkerlist summary_list = [] for checker in checkerlist: self.logger.debug(checker + ' START') # run checker and save result checker_result,soutput = system_command(os.path.join(os.path.dirname(__file__), "../checkers/")+checker, self.logger, \ return_output=True,throw_exception=False, \ issue_error=False) # print checker output for line in soutput.split('\n'): if not line == '': self.logger.chckinfo(line) summary_list.append(checker_result) # display the subsummary for the checkers self.logger.result(1, checker_result, checker) self.logger.debug(checker + ' END') # compute summary result self.result = max(summary_list) # in case of fail or crash signal a stop to the testsuite if self.result >= 20: raise StopError
def update_yufiles(self): """copy back YU* files into the data folder Should be used to generate new reference files""" pattern = '(.*)/' + self.type + '/' + self.name + '(.*)' # only update those test where self.refoutdir=self.namelistdir text = self.namelistdir # checks if is the test is a titular test if re.match(pattern, text): status = change_dir(self.rundir, self.logger) if not os.path.exists(self.conf.yufile): raise SkipError('No file ' + self.conf.yufile + ' in ' + self.rundir) self.logger.info('Updating exe.log YU* ' + self.namelistdir) cmd = 'cp exe.log YU* ' + self.namelistdir self.logger.debug('Executing: ' + cmd) status = system_command(cmd, self.logger) self.result = 0 # MATCH else: raise SkipError('No test repository ' + 'data/' + self.type + '/' + self.name)
def prerun(self): """check dependencies and perform any prerun actions""" # change to run directory status = change_dir(self.rundir, self.logger) # check whether dependencies have run if self.dependdir != None: try: f = open(self.dependdir + '/' + self.conf.res_file, "r") dresult = f.readline() if dresult == 'CRASH': raise SkipError('Required test %s has crashed' % (self.dependdir)) # if dresult == 'SKIP': # raise SkipError('Required test %s has been skipped' %(self.dependdir)) except IOError: raise SkipError('No result file %s for required test %s' \ %(self.conf.res_file,self.dependdir)) # defines the procedures in case of restart test if 'restart' in self.prerun_actions: # test with restart are not allowed with a too short number of timesteps if self.options.steps is not None and self.options.steps < 60: raise SkipError('Restart is not compatible with short tests') # copy restart file status = system_command('/bin/cp ' + self.dependdir + '/output/lr* ' + self.rundir + 'output/', self.logger, throw_exception=False) if status: raise SkipError('Problem with restart file from ' + self.dependdir)
def main(): """read configuration and then execute tests""" # definition of structure carrying global configuration # search for config file in current path, otherwise takes # default configuration file in testsuite source directory # parse command line arguments options = parse_cmdline() # redirect standard output (if required) logger = setup_logger(options) logger.important('TESTSUITE ' + __version__) # read configuration file if os.path.isfile(options.config_file): config_filepath = options.config_file elif os.path.isfile( os.path.join(os.path.dirname(__file__), options.config_file)): config_filepath = os.path.join(os.path.dirname(__file__), options.config_file) else: # logger not initialized at this stage, use print and exit print('Error: Missing configuration file ' + options.config_file) sys.exit(1) conf = parse_config_file(config_filepath, logger) # parse the .xml file which contains the test definitions root = parse_xmlfile(options.testlist, logger) # generate work directory if not os.path.isabs(options.workdir): options.workdir = os.path.join(os.getcwd(), options.workdir) status = system_command('/bin/mkdir -p ' + options.workdir + '/', logger, throw_exception=False) if status: exit(status) # loops over all the tests stop = False for child in root.findall("test"): # create test object mytest = Test(child, options, conf, logger) if mytest.run_test(): # run test try: # if upyufiles=True, no model run. if options.upyufiles: logger.important('Update YU* files mode, no run') mytest.update_yufiles() # elif options.update_thresholds: logger.important( 'Updating the thresholds on the current runs') mytest.options.tune_thresholds = True mytest.log_file = 'exe.log' mytest.check() # if upnamelist=True, no model run. elif options.upnamelist: logger.important('Update namelist mode, no run') mytest.options.pert = 0 mytest.update_namelist() #copy back namelist in typedir # Spcial setup for ICON where only check is run elif options.icon: logger.important('Running checks for ICON') mytest.options.pert = 0 mytest.log_file = 'final_status.txt' mytest.check() else: if (mytest.options.tune_thresholds): mytest.options.pert = 0 for i in range(int(mytest.options.tuning_iterations)): mytest.prepare( ) # prepare test directory and update namelists logger.important("Iteration number {0}".format(i + 1)) mytest.prerun( ) # last preparations (dependencies must have finished) mytest.start() # start test mytest.wait() # wait for completion of test mytest.check() # call checkers for this test mytest.options.reset_thresholds = False # 1: Perturb only in the first timestep # 2: Perturb in every iteration mytest.options.pert = 2 else: mytest.options.pert = 0 mytest.prepare( ) # prepare test directory and update namelists mytest.prerun( ) # last preparations (dependencies must have finished) mytest.start() # start test mytest.wait() # wait for completion of test mytest.check() # call checkers for this test except SkipError as smessage: mytest.result = 15 # SKIP logger.warning(smessage) except StopError as emessage: if str(emessage).strip(): logger.error(emessage) if not options.force: stop = True # write result mytest.write_result() # return into the base directory after each test status = change_dir(conf.basedir, logger) # exit if required if stop: break # end of testsuite std output logger.important('FINISHED')